REAEDME updated. #16

Closed
nehlichp wants to merge 366 commits from main into pn
135 changed files with 15456 additions and 3489 deletions

1
.gitignore vendored
View File

@@ -1,4 +1,5 @@
/target
/Cargo.lock
/.idea/*
!/.idea/runConfigurations

3
.gitmodules vendored
View File

@@ -1,3 +0,0 @@
[submodule "spacepackets"]
path = spacepackets
url = https://egit.irs.uni-stuttgart.de/rust/spacepackets.git

View File

@@ -1,12 +1,13 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Docs" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="doc" />
<option name="command" value="doc --all-features" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="false" />
<option name="emulateTerminal" value="false" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />

View File

@@ -1,6 +1,6 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Run obsw example" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="run --package fsrc-example --bin obsw" />
<option name="command" value="run -p satrs-example --bin satrs-example" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />

View File

@@ -1,5 +1,5 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Run obsw client example" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<configuration default="false" name="Run obsw simple client" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="run --package fsrc-example --bin client" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="channel" value="DEFAULT" />

View File

@@ -1,19 +0,0 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Run spacepackets" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="run --package spacepackets --bin spacepackets" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="false" />
<option name="emulateTerminal" value="false" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />
<option name="redirectInputPath" value="" />
<method v="2">
<option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
</method>
</configuration>
</component>

View File

@@ -1,12 +1,13 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Test spacepackets" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="test -p spacepackets" />
<configuration default="false" name="Test satrs-core" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="test -p satrs-core --all-features" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="true" />
<option name="emulateTerminal" value="false" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />

View File

@@ -1,7 +1,11 @@
[workspace]
members = [
"fsrc-core",
"spacepackets",
"fsrc-example"
"satrs-core",
"satrs-mib",
"satrs-example",
]
exclude = [
"satrs-example-stm32f3-disco",
]

4
NOTICE
View File

@@ -1,3 +1 @@
Workspace for the Flight Software Rust Components (FSRC) development.
This software contains code developed at the University of Stuttgart.
This software contains code developed at the University of Stuttgart's Institute of Space Systems.

View File

@@ -1,5 +1,34 @@
Rust Flight Software Framework Launchpad
sat-rs launchpad
=========
This is the prototyping repository for the initial version of a
Rust Flight Software Framework.
This is the prototyping repository for the initial version of the sat-rs framework.
Its primary goal is to provide re-usable components to write on-board software for remote
systems like rovers or satellites. It is specifically written for the special requirements
for these systems.
# Overview
This project currently contains following crates:
* [`satrs-core`](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad/src/branch/main/satrs-core):
Core components of sat-rs.
* [`satrs-example`](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad/src/branch/main/satrs-example):
Example of a simple example on-board software using various sat-rs components which can be run
on a host computer or on any system with a standard runtime like a Raspberry Pi.
* [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad/src/branch/main/satrs-mib):
Components to build a mission information base from the on-board software directly.
* [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/satrs-example-stm32f3-disco):
Example of a simple example on-board software using sat-rs components on a bare-metal system
with constrained resources.
Each project has its own `CHANGELOG.md`.
# Related projects
In addition to the crates in this repository, the sat-rs project also maintains other libraries.
* [`spacepackets`](https://egit.irs.uni-stuttgart.de/rust/spacepackets): Basic ECSS and CCSDS
packet protocol implementations. This repository is re-exported in the
[`satrs-core`](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad/src/branch/main/satrs-core)
crate.

View File

@@ -1,82 +0,0 @@
/*
pub mod deku {
pub use ccsds_spacepacket::PrimaryHeader as SpHeader;
use crate::sp::{self, PacketId, PacketSequenceCtrl};
use crate::sp::{CcsdsPrimaryHeader, PacketType, SequenceFlags};
impl CcsdsPrimaryHeader for SpHeader {
fn from_composite_fields(packet_id: PacketId, psc: PacketSequenceCtrl, data_len: u16, version: Option<u8>) -> Self {
let mut version_to_set = 0b000;
if let Some(version) = version {
version_to_set = version;
}
let packet_type = match packet_id.ptype {
PacketType::Tm => ccsds_spacepacket::types::PacketType::Data,
PacketType::Tc => ccsds_spacepacket::types::PacketType::Command
};
let sec_header_flag = match packet_id.sec_header_flag {
true => ccsds_spacepacket::types::SecondaryHeaderFlag::Present,
false => ccsds_spacepacket::types::SecondaryHeaderFlag::NotPresent
};
let sequence_flags = match psc.seq_flags {
SequenceFlags::ContinuationSegment => ccsds_spacepacket::types::SeqFlag::Continuation,
SequenceFlags::FirstSegment => ccsds_spacepacket::types::SeqFlag::FirstSegment,
SequenceFlags::LastSegment => ccsds_spacepacket::types::SeqFlag::LastSegment,
SequenceFlags::Unsegmented => ccsds_spacepacket::types::SeqFlag::Unsegmented
};
SpHeader {
version: version_to_set,
packet_type,
sec_header_flag,
app_proc_id: packet_id.apid,
sequence_flags,
sequence_count: psc.ssc,
data_length: data_len
}
}
#[inline]
fn version(&self) -> u8 {
self.version
}
#[inline]
fn packet_id(&self) -> PacketId {
PacketId {
ptype: PacketType::try_from(self.packet_type as u8).unwrap(),
apid: self.app_proc_id,
sec_header_flag: self.sec_header_flag as u8 != 0
}
}
#[inline]
fn psc(&self) -> PacketSequenceCtrl {
PacketSequenceCtrl {
seq_flags: SequenceFlags::try_from(self.sequence_flags as u8).unwrap(),
ssc: self.sequence_count
}
}
#[inline]
fn data_len(&self) -> u16 {
self.data_length
}
}
sph_from_other!(SpHeader, sp::srd::SpHeader);
sph_from_other!(SpHeader, sp::zc::SpHeader);
}
*/
/*
#[test]
fn test_deser_to_raw_packed_deku() {
let sp_header = SpHeader::tc(0x42, 12).expect("Error creating SP header");
// TODO: Wait with these tests until KubOS merged
// https://github.com/KubOS-Preservation-Group/ccsds-spacepacket/pull/14
let _deku_header =
deku::SpHeader::try_from(sp_header).expect("Error creating Deku Sp Header");
// deku_header.to_bytes().unwrap();
}
*/

View File

@@ -1,43 +0,0 @@
[package]
name = "fsrc-core"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
delegate = "0.8.0"
hashbrown = "0.12.3"
[dependencies.num-traits]
version = "0.2"
default-features = false
[dependencies.downcast-rs]
version = "1.2.0"
default-features = false
[dependencies.bus]
version = "2.2.3"
optional = true
[dependencies.crossbeam-channel]
version= "0.5"
default-features = false
[dependencies.spacepackets]
path = "../spacepackets"
[dev-dependencies]
serde = "1.0.143"
zerocopy = "0.6.1"
once_cell = "1.13.1"
[dev-dependencies.postcard]
version = "1.0.1"
[features]
default = ["std"]
std = ["downcast-rs/std", "alloc", "bus", "postcard/use-std", "crossbeam-channel/std"]
alloc = []

View File

@@ -1,318 +0,0 @@
//! [Event][crate::events::Event] management and forwarding
use crate::events::{Event, EventRaw, GroupId};
use alloc::boxed::Box;
use alloc::vec;
use alloc::vec::Vec;
use hashbrown::HashMap;
#[derive(PartialEq, Eq, Hash, Copy, Clone)]
enum ListenerType {
Single(EventRaw),
Group(GroupId),
}
pub trait EventListener {
type Error;
fn id(&self) -> u32;
fn send_to(&mut self, event: Event) -> Result<(), Self::Error>;
}
struct Listener<E> {
ltype: ListenerType,
dest: Box<dyn EventListener<Error = E>>,
}
pub trait ReceivesAllEvent {
fn receive(&mut self) -> Option<Event>;
}
pub struct EventManager<E> {
listeners: HashMap<ListenerType, Vec<Listener<E>>>,
event_receiver: Box<dyn ReceivesAllEvent>,
}
pub enum HandlerResult {
Empty,
Handled(u32, Event),
}
impl<E> EventManager<E> {
pub fn new(event_receiver: Box<dyn ReceivesAllEvent>) -> Self {
EventManager {
listeners: HashMap::new(),
event_receiver,
}
}
pub fn subscribe_single(
&mut self,
event: Event,
dest: impl EventListener<Error = E> + 'static,
) {
self.update_listeners(ListenerType::Single(event.raw()), dest);
}
pub fn subscribe_group(
&mut self,
group_id: GroupId,
dest: impl EventListener<Error = E> + 'static,
) {
self.update_listeners(ListenerType::Group(group_id), dest);
}
fn update_listeners(
&mut self,
key: ListenerType,
dest: impl EventListener<Error = E> + 'static,
) {
if !self.listeners.contains_key(&key) {
self.listeners.insert(
key,
vec![Listener {
ltype: key,
dest: Box::new(dest),
}],
);
} else {
let vec = self.listeners.get_mut(&key).unwrap();
// To prevent double insertions
for entry in vec.iter() {
if entry.ltype == key && entry.dest.id() == dest.id() {
return;
}
}
vec.push(Listener {
ltype: key,
dest: Box::new(dest),
});
}
}
pub fn try_event_handling(&mut self) -> Result<HandlerResult, E> {
let mut err_status = None;
let mut num_recipients = 0;
let mut send_handler = |event, llist: &mut Vec<Listener<E>>| {
for listener in llist.iter_mut() {
if let Err(e) = listener.dest.send_to(event) {
err_status = Some(Err(e));
} else {
num_recipients += 1;
}
}
};
if let Some(event) = self.event_receiver.receive() {
let single_key = ListenerType::Single(event.raw());
if self.listeners.contains_key(&single_key) {
send_handler(event, self.listeners.get_mut(&single_key).unwrap());
}
let group_key = ListenerType::Group(event.group_id());
if self.listeners.contains_key(&group_key) {
send_handler(event, self.listeners.get_mut(&group_key).unwrap());
}
if let Some(err) = err_status {
return err;
}
return Ok(HandlerResult::Handled(num_recipients, event));
}
Ok(HandlerResult::Empty)
}
}
#[cfg(test)]
mod tests {
use super::{EventListener, HandlerResult, ReceivesAllEvent};
use crate::event_man::EventManager;
use crate::events::{Event, Severity};
use alloc::boxed::Box;
use std::sync::mpsc::{channel, Receiver, SendError, Sender};
use std::thread;
use std::time::Duration;
struct EventReceiver {
mpsc_receiver: Receiver<Event>,
}
impl ReceivesAllEvent for EventReceiver {
fn receive(&mut self) -> Option<Event> {
self.mpsc_receiver.try_recv().ok()
}
}
#[derive(Clone)]
struct MpscEventSenderQueue {
id: u32,
mpsc_sender: Sender<Event>,
}
impl EventListener for MpscEventSenderQueue {
type Error = SendError<Event>;
fn id(&self) -> u32 {
self.id
}
fn send_to(&mut self, event: Event) -> Result<(), Self::Error> {
self.mpsc_sender.send(event)
}
}
fn check_next_event(expected: Event, receiver: &Receiver<Event>) {
for _ in 0..5 {
if let Ok(event) = receiver.try_recv() {
assert_eq!(event, expected);
break;
}
thread::sleep(Duration::from_millis(1));
}
}
fn check_handled_event(res: HandlerResult, expected: Event, expected_num_sent: u32) {
assert!(matches!(res, HandlerResult::Handled { .. }));
if let HandlerResult::Handled(num_recipients, event) = res {
assert_eq!(event, expected);
assert_eq!(num_recipients, expected_num_sent);
}
}
#[test]
fn test_basic() {
let (event_sender, manager_queue) = channel();
let event_man_receiver = EventReceiver {
mpsc_receiver: manager_queue,
};
let mut event_man: EventManager<SendError<Event>> =
EventManager::new(Box::new(event_man_receiver));
let event_grp_0 = Event::new(Severity::INFO, 0, 0).unwrap();
let event_grp_1_0 = Event::new(Severity::HIGH, 1, 0).unwrap();
let (single_event_sender, single_event_receiver) = channel();
let single_event_listener = MpscEventSenderQueue {
id: 0,
mpsc_sender: single_event_sender,
};
event_man.subscribe_single(event_grp_0, single_event_listener);
let (group_event_sender_0, group_event_receiver_0) = channel();
let group_event_listener = MpscEventSenderQueue {
id: 1,
mpsc_sender: group_event_sender_0,
};
event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener);
// Test event with one listener
event_sender
.send(event_grp_0)
.expect("Sending single error failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_0, 1);
check_next_event(event_grp_0, &single_event_receiver);
// Test event which is sent to all group listeners
event_sender
.send(event_grp_1_0)
.expect("Sending group error failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_1_0, 1);
check_next_event(event_grp_1_0, &group_event_receiver_0);
}
/// Test listening for multiple groups
#[test]
fn test_multi_group() {
let (event_sender, manager_queue) = channel();
let event_man_receiver = EventReceiver {
mpsc_receiver: manager_queue,
};
let mut event_man: EventManager<SendError<Event>> =
EventManager::new(Box::new(event_man_receiver));
let res = event_man.try_event_handling();
assert!(res.is_ok());
let hres = res.unwrap();
assert!(matches!(hres, HandlerResult::Empty));
let event_grp_0 = Event::new(Severity::INFO, 0, 0).unwrap();
let event_grp_1_0 = Event::new(Severity::HIGH, 1, 0).unwrap();
let (event_grp_0_sender, event_grp_0_receiver) = channel();
let event_grp_0_and_1_listener = MpscEventSenderQueue {
id: 0,
mpsc_sender: event_grp_0_sender,
};
event_man.subscribe_group(event_grp_0.group_id(), event_grp_0_and_1_listener.clone());
event_man.subscribe_group(event_grp_1_0.group_id(), event_grp_0_and_1_listener);
event_sender
.send(event_grp_0)
.expect("Sending Event Group 0 failed");
event_sender
.send(event_grp_1_0)
.expect("Sendign Event Group 1 failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_0, 1);
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_1_0, 1);
check_next_event(event_grp_0, &event_grp_0_receiver);
check_next_event(event_grp_1_0, &event_grp_0_receiver);
}
/// Test listening to the same event from multiple listeners. Also test listening
/// to both group and single events from one listener
#[test]
fn test_listening_to_same_event_and_multi_type() {
let (event_sender, manager_queue) = channel();
let event_man_receiver = EventReceiver {
mpsc_receiver: manager_queue,
};
let mut event_man: EventManager<SendError<Event>> =
EventManager::new(Box::new(event_man_receiver));
let event_0 = Event::new(Severity::INFO, 0, 5).unwrap();
let event_1 = Event::new(Severity::HIGH, 1, 0).unwrap();
let (event_0_tx_0, event_0_rx_0) = channel();
let (event_0_tx_1, event_0_rx_1) = channel();
let event_listener_0 = MpscEventSenderQueue {
id: 0,
mpsc_sender: event_0_tx_0,
};
let event_listener_1 = MpscEventSenderQueue {
id: 1,
mpsc_sender: event_0_tx_1,
};
event_man.subscribe_single(event_0, event_listener_0.clone());
event_man.subscribe_single(event_0, event_listener_1);
event_sender
.send(event_0)
.expect("Triggering Event 0 failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_0, 2);
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_0, &event_0_rx_1);
event_man.subscribe_group(event_1.group_id(), event_listener_0.clone());
event_sender
.send(event_0)
.expect("Triggering Event 0 failed");
event_sender
.send(event_1)
.expect("Triggering Event 1 failed");
// 3 Events messages will be sent now
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_0, 2);
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_1, 1);
// Both the single event and the group event should arrive now
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_1, &event_0_rx_0);
// Double insertion should be detected, result should remain the same
event_man.subscribe_group(event_1.group_id(), event_listener_0);
event_sender
.send(event_1)
.expect("Triggering Event 1 failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_1, 1);
}
}

View File

@@ -1,132 +0,0 @@
//! Event support module
pub type GroupId = u16;
pub type UniqueId = u16;
pub type EventRaw = u32;
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Severity {
INFO = 1,
LOW = 2,
MEDIUM = 3,
HIGH = 4,
}
impl TryFrom<u8> for Severity {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == Severity::INFO as u8 => Ok(Severity::INFO),
x if x == Severity::LOW as u8 => Ok(Severity::LOW),
x if x == Severity::MEDIUM as u8 => Ok(Severity::MEDIUM),
x if x == Severity::HIGH as u8 => Ok(Severity::HIGH),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Event {
severity: Severity,
group_id: GroupId,
unique_id: UniqueId,
}
impl Event {
/// Generate an event. The raw representation of an event has 32 bits.
/// If the passed group ID is invalid (too large), None wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 3 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 13 bits after the severity. Therefore, the size is limited by dec 8191 hex 0x1FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID
pub fn new(severity: Severity, group_id: GroupId, unique_id: UniqueId) -> Option<Event> {
if group_id > (2u16.pow(13) - 1) {
return None;
}
Some(Event {
severity,
group_id,
unique_id,
})
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
pub fn severity(&self) -> Severity {
self.severity
}
pub fn group_id(&self) -> GroupId {
self.group_id
}
pub fn unique_id(&self) -> UniqueId {
self.unique_id
}
pub fn raw(&self) -> EventRaw {
(((self.severity as u32) << 29) as u32
| ((self.group_id as u32) << 16) as u32
| self.unique_id as u32) as EventRaw
}
}
impl TryFrom<EventRaw> for Event {
type Error = ();
fn try_from(raw: u32) -> Result<Self, Self::Error> {
let severity: Option<Severity> = (((raw >> 29) & 0b111) as u8).try_into().ok();
if severity.is_none() {
return Err(());
}
let group_id = ((raw >> 16) & 0x1FFF) as u16;
let unique_id = (raw & 0xFFFF) as u16;
Event::new(severity.unwrap(), group_id, unique_id).ok_or(())
}
}
#[cfg(test)]
mod tests {
use super::Event;
use crate::events::Severity;
#[test]
fn test_events() {
let event = Event::new(Severity::INFO, 0, 0).unwrap();
assert_eq!(event.severity(), Severity::INFO);
assert_eq!(event.unique_id(), 0);
assert_eq!(event.group_id(), 0);
let raw_event = event.raw();
assert_eq!(raw_event, 0x20000000);
let conv_from_raw = Event::try_from(raw_event);
assert!(conv_from_raw.is_ok());
let opt_event = conv_from_raw.ok();
assert!(opt_event.is_some());
let event = opt_event.unwrap();
assert_eq!(event.severity(), Severity::INFO);
assert_eq!(event.unique_id(), 0);
assert_eq!(event.group_id(), 0);
let event = Event::new(Severity::HIGH, 0x1FFF, 0xFFFF).unwrap();
assert_eq!(event.severity(), Severity::HIGH);
assert_eq!(event.group_id(), 0x1FFF);
assert_eq!(event.unique_id(), 0xFFFF);
let raw_event = event.raw();
assert_eq!(raw_event, 0x9FFFFFFF);
let conv_from_raw = Event::try_from(raw_event);
assert!(conv_from_raw.is_ok());
let opt_event = conv_from_raw.ok();
assert!(opt_event.is_some());
let event = opt_event.unwrap();
assert_eq!(event.severity(), Severity::HIGH);
assert_eq!(event.group_id(), 0x1FFF);
assert_eq!(event.unique_id(), 0xFFFF);
}
}

View File

@@ -1,29 +0,0 @@
//! # Core components of the Flight Software Rust Crate (FSRC) collection
//!
//! This is a collection of Rust crates which can be used to build On-Board Software for remote
//! systems like satellites of rovers. It has special support for space tailored protocols
//! like the ones provided by CCSDS and ECSS.
//!
//! The crates can generally be used in a `no_std` environment, but some crates expect that the
//! [alloc](https://doc.rust-lang.org/alloc) crate is available to allow boxed trait objects.
//! These are used to supply user code to the crates.
#![no_std]
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(any(feature = "std", test))]
extern crate std;
pub mod error;
#[cfg(feature = "alloc")]
pub mod event_man;
pub mod events;
#[cfg(feature = "std")]
pub mod executable;
pub mod hal;
pub mod objects;
#[cfg(feature = "alloc")]
pub mod pool;
pub mod pus;
pub mod tmtc;
extern crate downcast_rs;

View File

@@ -1,7 +0,0 @@
//! All PUS support modules
//!
//! Currenty includes:
//!
//! 1. PUS Verification Service 1 module inside [verification]. Requires [alloc] support.
#[cfg(feature = "alloc")]
pub mod verification;

File diff suppressed because it is too large Load Diff

View File

@@ -1,71 +0,0 @@
//! Telemetry and Telecommanding (TMTC) module. Contains packet routing components with special
//! support for CCSDS and ECSS packets.
//!
//! The distributor modules provided by this module use trait objects provided by the user to
//! directly dispatch received packets to packet listeners based on packet fields like the CCSDS
//! Application Process ID (APID) or the ECSS PUS service type. This allows for fast packet
//! routing without the overhead and complication of using message queues. However, it also requires
use crate::error::{FsrcErrorRaw, FsrcGroupIds};
use downcast_rs::{impl_downcast, Downcast};
use spacepackets::tc::PusTc;
use spacepackets::SpHeader;
#[cfg(feature = "alloc")]
pub mod ccsds_distrib;
#[cfg(feature = "alloc")]
pub mod pus_distrib;
pub mod tm_helper;
pub use ccsds_distrib::{CcsdsDistributor, CcsdsError, CcsdsPacketHandler};
pub use pus_distrib::{PusDistributor, PusServiceProvider};
const _RAW_PACKET_ERROR: &str = "raw-tmtc";
const _CCSDS_ERROR: &str = "ccsds-tmtc";
const _PUS_ERROR: &str = "pus-tmtc";
// TODO: A macro for general and unknown errors would be nice
const _FROM_BYTES_SLICE_TOO_SMALL_ERROR: FsrcErrorRaw = FsrcErrorRaw::new(
FsrcGroupIds::Tmtc as u8,
0,
_RAW_PACKET_ERROR,
"FROM_BYTES_SLICE_TOO_SMALL_ERROR",
);
const _FROM_BYTES_ZEROCOPY_ERROR: FsrcErrorRaw = FsrcErrorRaw::new(
FsrcGroupIds::Tmtc as u8,
1,
_RAW_PACKET_ERROR,
"FROM_BYTES_ZEROCOPY_ERROR",
);
/// Generic trait for object which can receive any telecommands in form of a raw bytestream, with
/// no assumptions about the received protocol.
///
/// This trait is implemented by both the [crate::tmtc::pus_distrib::PusDistributor] and the
/// [crate::tmtc::ccsds_distrib::CcsdsDistributor] which allows to pass the respective packets in
/// raw byte format into them.
pub trait ReceivesTc: Downcast {
type Error;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error>;
}
impl_downcast!(ReceivesTc assoc Error);
/// Generic trait for object which can receive CCSDS space packets, for fsrc-example ECSS PUS packets
/// for CCSDS File Delivery Protocol (CFDP) packets.
///
/// This trait is implemented by both the [crate::tmtc::pus_distrib::PusDistributor] and the
/// [crate::tmtc::ccsds_distrib::CcsdsDistributor] which allows
/// to pass the respective packets in raw byte format or in CCSDS format into them.
pub trait ReceivesCcsdsTc {
type Error;
fn pass_ccsds(&mut self, header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error>;
}
/// Generic trait for objects which can receive ECSS PUS telecommands. This trait is
/// implemented by the [crate::tmtc::pus_distrib::PusDistributor] objects to allow passing PUS TC
/// packets into it.
pub trait ReceivesEcssPusTc {
type Error;
fn pass_pus_tc(&mut self, header: &SpHeader, pus_tc: &PusTc) -> Result<(), Self::Error>;
}

View File

@@ -1,51 +0,0 @@
use spacepackets::time::{CdsShortTimeProvider, TimeWriter};
use spacepackets::tm::{PusTm, PusTmSecondaryHeader};
use spacepackets::SpHeader;
pub struct PusTmWithCdsShortHelper {
apid: u16,
cds_short_buf: [u8; 7],
}
impl PusTmWithCdsShortHelper {
pub fn new(apid: u16) -> Self {
Self {
apid,
cds_short_buf: [0; 7],
}
}
#[cfg(feature = "std")]
pub fn create_pus_tm_timestamp_now<'a>(
&'a mut self,
service: u8,
subservice: u8,
source_data: Option<&'a [u8]>,
) -> PusTm {
let time_stamp = CdsShortTimeProvider::from_now().unwrap();
time_stamp.write_to_bytes(&mut self.cds_short_buf).unwrap();
self.create_pus_tm_common(service, subservice, source_data)
}
pub fn create_pus_tm_with_stamp<'a>(
&'a mut self,
service: u8,
subservice: u8,
source_data: Option<&'a [u8]>,
stamper: &CdsShortTimeProvider,
) -> PusTm {
stamper.write_to_bytes(&mut self.cds_short_buf).unwrap();
self.create_pus_tm_common(service, subservice, source_data)
}
fn create_pus_tm_common<'a>(
&'a self,
service: u8,
subservice: u8,
source_data: Option<&'a [u8]>,
) -> PusTm {
let mut reply_header = SpHeader::tm(self.apid, 0, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(service, subservice, &self.cds_short_buf);
PusTm::new(&mut reply_header, tc_header, source_data, true)
}
}

View File

@@ -1,188 +0,0 @@
use fsrc_core::pool::{LocalPool, PoolCfg, PoolProvider, SharedPool};
use fsrc_core::pus::verification::{
CrossbeamVerifSender, FailParams, RequestId, VerificationReporterCfg,
VerificationReporterWithSender,
};
use hashbrown::HashMap;
use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket};
use spacepackets::tc::{PusTc, PusTcSecondaryHeader};
use spacepackets::tm::PusTm;
use spacepackets::SpHeader;
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
const TEST_APID: u16 = 0x03;
const FIXED_STAMP: [u8; 7] = [0; 7];
const PACKETS_SENT: u8 = 8;
/// This test also shows how the verification report could be used in a multi-threaded context,
/// wrapping it into an [Arc] and [Mutex] and then passing it to two threads.
///
/// - The first thread generates a acceptance, a start, two steps and one completion report
/// - The second generates an acceptance and start success report and a completion failure
/// - The third thread is the verification receiver. In the test case, it verifies the other two
/// threads have sent the correct expected verification reports
#[test]
fn test_shared_reporter() {
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8);
// Shared pool object to store the verification PUS telemetry
let pool_cfg = PoolCfg::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
let shared_tm_pool: SharedPool =
Arc::new(RwLock::new(Box::new(LocalPool::new(pool_cfg.clone()))));
let shared_tc_pool_0 = Arc::new(RwLock::new(LocalPool::new(pool_cfg)));
let shared_tc_pool_1 = shared_tc_pool_0.clone();
let (tx, rx) = crossbeam_channel::bounded(5);
let sender = CrossbeamVerifSender::new(shared_tm_pool.clone(), tx.clone());
let reporter_with_sender_0 = Arc::new(Mutex::new(VerificationReporterWithSender::new(
cfg,
Box::new(sender),
)));
let reporter_with_sender_1 = reporter_with_sender_0.clone();
// For test purposes, we retrieve the request ID from the TCs and pass them to the receiver
// tread.
let req_id_0;
let req_id_1;
let (tx_tc_0, rx_tc_0) = crossbeam_channel::bounded(3);
let (tx_tc_1, rx_tc_1) = crossbeam_channel::bounded(3);
{
let mut tc_guard = shared_tc_pool_0.write().unwrap();
let mut sph = SpHeader::tc(TEST_APID, 0, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
let pus_tc_0 = PusTc::new(&mut sph, tc_header, None, true);
req_id_0 = RequestId::new(&pus_tc_0);
let (addr, mut buf) = tc_guard.free_element(pus_tc_0.len_packed()).unwrap();
pus_tc_0.write_to(&mut buf).unwrap();
tx_tc_0.send(addr).unwrap();
let mut sph = SpHeader::tc(TEST_APID, 1, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(5, 1);
let pus_tc_1 = PusTc::new(&mut sph, tc_header, None, true);
req_id_1 = RequestId::new(&pus_tc_1);
let (addr, mut buf) = tc_guard.free_element(pus_tc_0.len_packed()).unwrap();
pus_tc_1.write_to(&mut buf).unwrap();
tx_tc_1.send(addr).unwrap();
}
let verif_sender_0 = thread::spawn(move || {
let mut tc_buf: [u8; 1024] = [0; 1024];
let tc_addr = rx_tc_0
.recv_timeout(Duration::from_millis(20))
.expect("Receive timeout");
let tc_len;
{
let mut tc_guard = shared_tc_pool_0.write().unwrap();
let pg = tc_guard.read_with_guard(tc_addr);
let buf = pg.read().unwrap();
tc_len = buf.len();
tc_buf[0..tc_len].copy_from_slice(buf);
}
let (_tc, _) = PusTc::new_from_raw_slice(&tc_buf[0..tc_len]).unwrap();
let accepted_token;
{
let mut mg = reporter_with_sender_0.lock().expect("Locking mutex failed");
let token = mg.add_tc_with_req_id(req_id_0);
accepted_token = mg
.acceptance_success(token, &FIXED_STAMP)
.expect("Acceptance success failed");
}
// Do some start handling here
let started_token;
{
let mut mg = reporter_with_sender_0.lock().expect("Locking mutex failed");
started_token = mg
.start_success(accepted_token, &FIXED_STAMP)
.expect("Start success failed");
// Do some step handling here
mg.step_success(&started_token, &FIXED_STAMP, EcssEnumU8::new(0))
.expect("Start success failed");
}
// Finish up
let mut mg = reporter_with_sender_0.lock().expect("Locking mutex failed");
mg.step_success(&started_token, &FIXED_STAMP, EcssEnumU8::new(1))
.expect("Start success failed");
mg.completion_success(started_token, &FIXED_STAMP)
.expect("Completion success failed");
});
let verif_sender_1 = thread::spawn(move || {
let mut tc_buf: [u8; 1024] = [0; 1024];
let tc_addr = rx_tc_1
.recv_timeout(Duration::from_millis(20))
.expect("Receive timeout");
let tc_len;
{
let mut tc_guard = shared_tc_pool_1.write().unwrap();
let pg = tc_guard.read_with_guard(tc_addr);
let buf = pg.read().unwrap();
tc_len = buf.len();
tc_buf[0..tc_len].copy_from_slice(buf);
}
let (tc, _) = PusTc::new_from_raw_slice(&tc_buf[0..tc_len]).unwrap();
let mut mg = reporter_with_sender_1
.lock()
.expect("Locking reporter failed");
let token = mg.add_tc(&tc);
let accepted_token = mg
.acceptance_success(token, &FIXED_STAMP)
.expect("Acceptance success failed");
let started_token = mg
.start_success(accepted_token, &FIXED_STAMP)
.expect("Start success failed");
let fail_code = EcssEnumU16::new(2);
let params = FailParams::new(&FIXED_STAMP, &fail_code, None);
mg.completion_failure(started_token, params)
.expect("Completion success failed");
});
let verif_receiver = thread::spawn(move || {
let mut packet_counter = 0;
let mut tm_buf: [u8; 1024] = [0; 1024];
let mut verif_map = HashMap::new();
while packet_counter < PACKETS_SENT {
let verif_addr = rx
.recv_timeout(Duration::from_millis(50))
.expect("Packet reception timeout");
let tm_len;
{
let mut rg = shared_tm_pool.write().expect("Error locking shared pool");
let store_guard = rg.read_with_guard(verif_addr);
let slice = store_guard.read().expect("Error reading TM slice");
tm_len = slice.len();
tm_buf[0..tm_len].copy_from_slice(slice);
}
let (pus_tm, _) = PusTm::new_from_raw_slice(&tm_buf[0..tm_len], 7)
.expect("Error reading verification TM");
let req_id = RequestId::from_bytes(
&pus_tm.source_data().expect("Invalid TM source data")[0..RequestId::SIZE_AS_BYTES],
)
.unwrap();
if !verif_map.contains_key(&req_id) {
let mut content = Vec::new();
content.push(pus_tm.subservice());
verif_map.insert(req_id, content);
} else {
let content = verif_map.get_mut(&req_id).unwrap();
content.push(pus_tm.subservice())
}
packet_counter += 1;
}
for (req_id, content) in verif_map {
if req_id == req_id_1 {
assert_eq!(content[0], 1);
assert_eq!(content[1], 3);
assert_eq!(content[2], 8);
} else if req_id == req_id_0 {
assert_eq!(content[0], 1);
assert_eq!(content[1], 3);
assert_eq!(content[2], 5);
assert_eq!(content[3], 5);
assert_eq!(content[4], 7);
} else {
panic!("Unexpected request ID {:?}", req_id);
}
}
});
verif_sender_0.join().expect("Joining thread 0 failed");
verif_sender_1.join().expect("Joining thread 1 failed");
verif_receiver.join().expect("Joining thread 2 failed");
}

View File

@@ -1,15 +0,0 @@
[package]
name = "fsrc-example"
version = "0.1.0"
edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
[dependencies]
crossbeam-channel = "0.5"
delegate = "0.8"
[dependencies.spacepackets]
path = "../spacepackets"
[dependencies.fsrc-core]
path = "../fsrc-core"

View File

@@ -1,73 +0,0 @@
mod ccsds;
mod pus;
mod tmtc;
use crate::tmtc::{core_tmtc_task, TmStore, PUS_APID};
use fsrc_core::hal::host::udp_server::UdpTcServer;
use fsrc_core::pool::{LocalPool, PoolCfg, SharedPool, StoreAddr};
use fsrc_core::pus::verification::{
MpscVerifSender, VerificationReporterCfg, VerificationReporterWithSender,
};
use fsrc_core::tmtc::CcsdsError;
use fsrc_example::{OBSW_SERVER_ADDR, SERVER_PORT};
use std::net::{IpAddr, SocketAddr};
use std::sync::{mpsc, Arc, Mutex, RwLock};
use std::thread;
struct TmFunnel {
tm_funnel_rx: mpsc::Receiver<StoreAddr>,
tm_server_tx: mpsc::Sender<StoreAddr>,
}
struct UdpTmtcServer {
udp_tc_server: UdpTcServer<CcsdsError<()>>,
tm_rx: mpsc::Receiver<StoreAddr>,
tm_store: SharedPool,
}
unsafe impl Send for UdpTmtcServer {}
fn main() {
println!("Running OBSW example");
let pool_cfg = PoolCfg::new(vec![(8, 32), (4, 64), (2, 128)]);
let tm_pool = LocalPool::new(pool_cfg);
let tm_store: SharedPool = Arc::new(RwLock::new(Box::new(tm_pool)));
let tm_store_helper = TmStore {
pool: tm_store.clone(),
};
let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (tm_server_tx, tm_server_rx) = mpsc::channel();
let sender = MpscVerifSender::new(tm_store.clone(), tm_funnel_tx.clone());
let verif_cfg = VerificationReporterCfg::new(PUS_APID, 1, 2, 8);
let reporter_with_sender_0 = Arc::new(Mutex::new(VerificationReporterWithSender::new(
verif_cfg,
Box::new(sender),
)));
let jh0 = thread::spawn(move || {
core_tmtc_task(
tm_funnel_tx.clone(),
tm_server_rx,
tm_store_helper.clone(),
addr,
reporter_with_sender_0,
);
});
let jh1 = thread::spawn(move || {
let tm_funnel = TmFunnel {
tm_server_tx,
tm_funnel_rx,
};
loop {
if let Ok(addr) = tm_funnel.tm_funnel_rx.recv() {
tm_funnel
.tm_server_tx
.send(addr)
.expect("Sending TM to server failed");
}
}
});
jh0.join().expect("Joining UDP TMTC server thread failed");
jh1.join().expect("Joining TM Funnel thread failed");
}

View File

@@ -1,93 +0,0 @@
use crate::tmtc::TmStore;
use fsrc_core::pool::StoreAddr;
use fsrc_core::pus::verification::{
SharedStdVerifReporterWithSender, StateAccepted, VerificationToken,
};
use fsrc_core::tmtc::tm_helper::PusTmWithCdsShortHelper;
use fsrc_core::tmtc::PusServiceProvider;
use spacepackets::tc::{PusTc, PusTcSecondaryHeaderT};
use spacepackets::time::{CdsShortTimeProvider, TimeWriter};
use spacepackets::SpHeader;
use std::sync::mpsc;
pub struct PusReceiver {
pub tm_helper: PusTmWithCdsShortHelper,
pub tm_tx: mpsc::Sender<StoreAddr>,
pub tm_store: TmStore,
pub verif_reporter: SharedStdVerifReporterWithSender,
stamper: CdsShortTimeProvider,
time_stamp: [u8; 7],
}
impl PusReceiver {
pub fn new(
apid: u16,
tm_tx: mpsc::Sender<StoreAddr>,
tm_store: TmStore,
verif_reporter: SharedStdVerifReporterWithSender,
) -> Self {
Self {
tm_helper: PusTmWithCdsShortHelper::new(apid),
tm_tx,
tm_store,
verif_reporter,
stamper: CdsShortTimeProvider::default(),
time_stamp: [0; 7],
}
}
}
impl PusServiceProvider for PusReceiver {
type Error = ();
fn handle_pus_tc_packet(
&mut self,
service: u8,
_header: &SpHeader,
pus_tc: &PusTc,
) -> Result<(), Self::Error> {
let mut reporter = self
.verif_reporter
.lock()
.expect("Locking Verification Reporter failed");
let init_token = reporter.add_tc(pus_tc);
self.stamper
.update_from_now()
.expect("Updating time for time stamp failed");
self.stamper
.write_to_bytes(&mut self.time_stamp)
.expect("Writing time stamp failed");
let accepted_token = reporter
.acceptance_success(init_token, &self.time_stamp)
.expect("Acceptance success failure");
drop(reporter);
if service == 17 {
self.handle_test_service(pus_tc, accepted_token);
}
Ok(())
}
}
impl PusReceiver {
fn handle_test_service(&mut self, pus_tc: &PusTc, token: VerificationToken<StateAccepted>) {
if pus_tc.subservice() == 1 {
println!("Received PUS ping command TC[17,1]");
println!("Sending ping reply PUS TM[17,2]");
let ping_reply = self.tm_helper.create_pus_tm_timestamp_now(17, 2, None);
let addr = self.tm_store.add_pus_tm(&ping_reply);
let mut reporter = self
.verif_reporter
.lock()
.expect("Error locking verification reporter");
let start_token = reporter
.start_success(token, &self.time_stamp)
.expect("Error sending start success");
self.tm_tx
.send(addr)
.expect("Sending TM to TM funnel failed");
reporter
.completion_success(start_token, &self.time_stamp)
.expect("Error sending completion success");
}
}
}

View File

@@ -1,110 +0,0 @@
use fsrc_core::hal::host::udp_server::{ReceiveResult, UdpTcServer};
use std::net::SocketAddr;
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use crate::ccsds::CcsdsReceiver;
use crate::pus::PusReceiver;
use crate::UdpTmtcServer;
use fsrc_core::pool::{SharedPool, StoreAddr};
use fsrc_core::pus::verification::SharedStdVerifReporterWithSender;
use fsrc_core::tmtc::{CcsdsDistributor, CcsdsError, PusDistributor};
use spacepackets::tm::PusTm;
pub const PUS_APID: u16 = 0x02;
#[derive(Clone)]
pub struct TmStore {
pub pool: SharedPool,
}
impl TmStore {
pub fn add_pus_tm(&mut self, pus_tm: &PusTm) -> StoreAddr {
let mut pg = self.pool.write().expect("Error locking TM store");
let (addr, buf) = pg.free_element(pus_tm.len_packed()).expect("Store error");
pus_tm
.write_to(buf)
.expect("Writing PUS TM to store failed");
addr
}
}
pub fn core_tmtc_task(
tm_creator_tx: mpsc::Sender<StoreAddr>,
tm_server_rx: mpsc::Receiver<StoreAddr>,
tm_store_helper: TmStore,
addr: SocketAddr,
verif_reporter: SharedStdVerifReporterWithSender,
) {
let pus_receiver = PusReceiver::new(
PUS_APID,
tm_creator_tx,
tm_store_helper.clone(),
verif_reporter,
);
let pus_distributor = PusDistributor::new(Box::new(pus_receiver));
let ccsds_receiver = CcsdsReceiver {
pus_handler: pus_distributor,
};
let ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver));
let udp_tc_server = UdpTcServer::new(addr, 2048, Box::new(ccsds_distributor))
.expect("Creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server,
tm_rx: tm_server_rx,
tm_store: tm_store_helper.pool.clone(),
};
loop {
core_tmtc_loop(&mut udp_tmtc_server);
thread::sleep(Duration::from_millis(400));
}
}
fn core_tmtc_loop(udp_tmtc_server: &mut UdpTmtcServer) {
while core_tc_handling(udp_tmtc_server) {}
if let Some(recv_addr) = udp_tmtc_server.udp_tc_server.last_sender() {
core_tm_handling(udp_tmtc_server, &recv_addr);
}
}
fn core_tc_handling(udp_tmtc_server: &mut UdpTmtcServer) -> bool {
match udp_tmtc_server.udp_tc_server.try_recv_tc() {
Ok(_) => true,
Err(e) => match e {
ReceiveResult::ReceiverError(e) => match e {
CcsdsError::PacketError(e) => {
println!("Got packet error: {e:?}");
true
}
CcsdsError::CustomError(_) => {
println!("Unknown receiver error");
true
}
},
ReceiveResult::IoError(e) => {
println!("IO error {e}");
false
}
ReceiveResult::NothingReceived => false,
},
}
}
fn core_tm_handling(udp_tmtc_server: &mut UdpTmtcServer, recv_addr: &SocketAddr) {
while let Ok(addr) = udp_tmtc_server.tm_rx.try_recv() {
let mut store_lock = udp_tmtc_server
.tm_store
.write()
.expect("Locking TM store failed");
let pg = store_lock.read_with_guard(addr);
let buf = pg.read().expect("Error reading TM pool data");
println!("Sending TM");
udp_tmtc_server
.udp_tc_server
.socket
.send_to(buf, recv_addr)
.expect("Sending TM failed");
}
}

View File

@@ -1,4 +0,0 @@
use std::net::Ipv4Addr;
pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::new(127, 0, 0, 1);
pub const SERVER_PORT: u16 = 7301;

5
satrs-core/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
/target
# CLion
/.idea/*
!/.idea/runConfigurations

9
satrs-core/CHANGELOG.md Normal file
View File

@@ -0,0 +1,9 @@
Change Log
=======
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]

106
satrs-core/Cargo.toml Normal file
View File

@@ -0,0 +1,106 @@
[package]
name = "satrs-core"
version = "0.1.0-alpha.0"
edition = "2021"
rust-version = "1.61"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "Core components of the sat-rs framework to build software for remote systems"
homepage = "https://egit.irs.uni-stuttgart.de/rust/satrs-core"
repository = "https://egit.irs.uni-stuttgart.de/rust/satrs-core"
license = "Apache-2.0"
keywords = ["no-std", "space", "aerospace"]
categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-support", "embedded"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
delegate = ">=0.8, <0.11"
paste = "1"
embed-doc-image = "0.1"
[dependencies.num_enum]
version = "0.6"
default-features = false
[dependencies.dyn-clone]
version = "1"
optional = true
[dependencies.hashbrown]
version = "0.14"
optional = true
[dependencies.heapless]
version = "0.7"
optional = true
[dependencies.num-traits]
version = "0.2"
default-features = false
[dependencies.downcast-rs]
version = "1.2"
default-features = false
optional = true
[dependencies.bus]
version = "2.2"
optional = true
[dependencies.crossbeam-channel]
version= "0.5"
default-features = false
optional = true
[dependencies.thiserror]
version = "1"
optional = true
[dependencies.serde]
version = "1"
default-features = false
optional = true
[dependencies.spacepackets]
# version = "0.6"
# path = "../spacepackets"
git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git"
rev = "784564a20ed"
default-features = false
[dev-dependencies]
serde = "1"
zerocopy = "0.6"
once_cell = "1.13"
serde_json = "1"
[dev-dependencies.postcard]
version = "1"
[features]
default = ["std"]
std = [
"downcast-rs/std",
"alloc",
"bus",
"postcard/use-std",
"crossbeam-channel/std",
"serde/std",
"spacepackets/std",
"num_enum/std",
"thiserror"
]
alloc = [
"serde/alloc",
"spacepackets/alloc",
"hashbrown",
"dyn-clone",
"downcast-rs"
]
serde = ["dep:serde", "spacepackets/serde"]
crossbeam = ["crossbeam-channel"]
heapless = ["dep:heapless"]
doc-images = []
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "doc_cfg"]

201
satrs-core/LICENSE-APACHE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
satrs-core/NOTICE Normal file
View File

@@ -0,0 +1 @@
This software contains code developed at the University of Stuttgart's Institute of Space Systems.

5
satrs-core/README.md Normal file
View File

@@ -0,0 +1,5 @@
satrs-core
======
This crate contains the core components of the sat-rs framework.
You can find more information on [homepage](https://egit.irs.uni-stuttgart.de/rust/sat-rs)

View File

@@ -0,0 +1,259 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
<!--Created by yEd 3.22-->
<key attr.name="Description" attr.type="string" for="graph" id="d0"/>
<key for="port" id="d1" yfiles.type="portgraphics"/>
<key for="port" id="d2" yfiles.type="portgeometry"/>
<key for="port" id="d3" yfiles.type="portuserdata"/>
<key attr.name="url" attr.type="string" for="node" id="d4"/>
<key attr.name="description" attr.type="string" for="node" id="d5"/>
<key for="node" id="d6" yfiles.type="nodegraphics"/>
<key for="graphml" id="d7" yfiles.type="resources"/>
<key attr.name="url" attr.type="string" for="edge" id="d8"/>
<key attr.name="description" attr.type="string" for="edge" id="d9"/>
<key for="edge" id="d10" yfiles.type="edgegraphics"/>
<graph edgedefault="directed" id="G">
<data key="d0" xml:space="preserve"/>
<node id="n0">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="509.9999999999999" width="768.7000000000003" x="579.3105418719211" y="304.7"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="21.936037063598633" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="150.1282958984375" x="26.197490701913352" xml:space="preserve" y="24.234711021505348">Example Event Flow<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.46591974671274444" nodeRatioY="-0.452480958781362" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n1">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="203.0" x="814.0" y="506.6799999999999"/>
<y:Fill color="#FFFF00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.452094078063965" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="86.21258544921875" x="58.393707275390625" xml:space="preserve" y="21.27395296096796">Event Manager<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n2">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="82.0" x="617.6" y="413.23"/>
<y:Fill color="#FF9900" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="55.120361328125" x="13.4398193359375" xml:space="preserve" y="14.547905921936035">Event
Creator 0<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n3">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="76.55999999999995" x="988.5" y="335.62999999999994"/>
<y:Fill color="#FF9900" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="55.120361328125" x="10.719819335937473" xml:space="preserve" y="14.547905921936035">Event
Creator 2<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n4">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="72.55999999999983" x="860.6610837438426" y="335.62999999999994"/>
<y:Fill color="#FF9900" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="55.120361328125" x="8.719819335937359" xml:space="preserve" y="14.547905921936035">Event
Creator 1<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n5">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="87.27999999999997" x="1112.52" y="335.62999999999994"/>
<y:Fill color="#FF9900" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="55.120361328125" x="16.079819335937373" xml:space="preserve" y="14.547905921936035">Event
Creator 3<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n6">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="126.0" x="781.0" y="620.26"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="92.78865051269531" x="16.605674743652344" xml:space="preserve" y="14.547905921936035">PUS Service 5
Event Reporting
<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n7">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="118.63999999999987" x="928.2" y="620.26"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="84.08859252929688" x="17.2757037353515" xml:space="preserve" y="14.547905921936035">PUS Service 19
Event Action<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n8">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="87.27999999999997" x="792.1260377358491" y="733.8400000000001"/>
<y:Fill color="#FFCC99" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="59.932403564453125" x="13.673798217773424" xml:space="preserve" y="14.547905921936035">Telemetry
Sink<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n9">
<data key="d6">
<y:ShapeNode>
<y:Geometry height="170.79999999999995" width="210.80000000000018" x="1076.84" y="601.88"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="left" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="143.6875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="181.591796875" x="8.373079774614325" xml:space="preserve" y="7.444138124199753">Subscriptions
1. Event Creator 0 subscribes
for event 0
2. Event Creator 1 subscribes
for event group 2
3. PUS Service 5 handler
subscribes for all events
4. PUS Service 19 handler
subscribes for all events<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.4602795077105583" nodeRatioY="-0.45641605313700395" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<edge id="e0" source="n4" target="n1">
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="8.058916256157545" sy="0.0" tx="-10.5" ty="0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="53.92036437988281" x="8.639817810058275" xml:space="preserve" y="29.00100609374465">event 1
(group 1)<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="35.59999999999969" distanceToCenter="true" position="left" ratio="0.34252387409930674" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e1" source="n2" target="n1">
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="0.0" sy="11.93999999999994" tx="-83.5" ty="0.0">
<y:Point x="832.0" y="455.16999999999996"/>
</y:Path>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="53.92036437988281" x="25.334655000000453" xml:space="preserve" y="-40.972107505798476">event 0
(group 0)<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="25.520000000000095" distanceToCenter="true" position="left" ratio="0.20267159489379444" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e2" source="n3" target="n1">
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-23.719999999999914" sy="5.5" tx="87.56000000000006" ty="0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="53.92036437988281" x="5.6761352539062955" xml:space="preserve" y="27.551854405966765">event 2
(group 3)<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="5.676132812499983" distanceToCenter="false" position="left" ratio="0.3219761157957032" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e3" source="n5" target="n1">
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-6.275467980295616" sy="0.0" tx="57.5" ty="8.5">
<y:Point x="1149.8845320197042" y="545.1799999999998"/>
</y:Path>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="97.38468933105469" x="26.667665869801795" xml:space="preserve" y="43.287014528669715">event 3 (group 2)
event 4 (group 2)<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="75.3599999999999" distanceToCenter="true" position="left" ratio="0.2967848459873102" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e4" source="n1" target="n6">
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-65.0" sy="0.0" tx="6.5" ty="0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.452094078063965" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="83.16456604003906" x="-98.78228302001958" xml:space="preserve" y="16.63042580701972">&lt;&lt;all events&gt;&gt;<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="57.20000000000004" distanceToCenter="true" position="right" ratio="0.4441995640590947" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e5" source="n1" target="n7">
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="42.660000000000196" sy="0.0" tx="-29.359999999999786" ty="0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.452094078063965" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="83.16456604003906" x="20.4177438354493" xml:space="preserve" y="17.885881494816203">&lt;&lt;all events&gt;&gt;<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="62.0" distanceToCenter="true" position="left" ratio="0.492249939452652" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e6" source="n1" target="n2">
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="0.0" sy="0.0" tx="0.0" ty="0.0">
<y:Point x="658.6" y="536.6799999999998"/>
</y:Path>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="44.69230651855469" x="-131.99129340961497" xml:space="preserve" y="-45.45208675384538">event 1
event 2<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="0.0" distance="30.0" distanceToCenter="true" position="right" ratio="0.6426904695623505" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e7" source="n1" target="n4">
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-35.69940886699487" sy="0.0" tx="-17.140492610837327" ty="1.5"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.452094078063965" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="46.14430236816406" x="-54.352158195608126" xml:space="preserve" y="-79.29459128622307">group 2<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="31.279999999999973" distanceToCenter="true" position="left" ratio="0.6800790648728832" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e8" source="n6" target="n8">
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="0.0" sy="0.0" tx="8.233962264150945" ty="-21.42352238805968"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="87.40060424804688" x="-100.50030212402339" xml:space="preserve" y="11.337896156311103">enabled Events
as PUS 5 TM<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="56.79999999999995" distanceToCenter="true" position="right" ratio="0.5" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
</graph>
<data key="d7">
<y:Resources/>
</data>
</graphml>

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

0
satrs-core/src/device.rs Normal file
View File

718
satrs-core/src/event_man.rs Normal file
View File

@@ -0,0 +1,718 @@
//! Event management and forwarding
//!
//! This module provides components to perform event routing. The most important component for this
//! task is the [EventManager]. It receives all events and then routes them to event subscribers
//! where appropriate.
#![cfg_attr(feature = "doc-images",
cfg_attr(all(),
doc = ::embed_doc_image::embed_image!("event_man_arch", "images/event_man_arch.png"
)))]
#![cfg_attr(
not(feature = "doc-images"),
doc = "**Doc images not enabled**. Compile with feature `doc-images` and Rust version >= 1.54 \
to enable."
)]
//! One common use case for satellite systems is to offer a light-weight publish-subscribe mechanism
//! and IPC mechanism for software and hardware events which are also packaged as telemetry (TM) or
//! can trigger a system response.
//!
//! The following graph shows how the event flow for such a setup could look like:
//!
//! ![Event flow][event_man_arch]
//!
//! The event manager has a listener table abstracted by the [ListenerTable], which maps
//! listener groups identified by [ListenerKey]s to a [sender ID][ChannelId].
//! It also contains a sender table abstracted by the [SenderTable] which maps these sender IDs
//! to a concrete [SendEventProvider]s. A simple approach would be to use one send event provider
//! for each OBSW thread and then subscribe for all interesting events for a particular thread
//! using the send event provider ID.
//!
//! This can be done with the [EventManager] like this:
//!
//! 1. Provide a concrete [EventReceiver] implementation. This abstraction allow to use different
//! message queue backends. A straightforward implementation where dynamic memory allocation is
//! not a big concern could use [std::sync::mpsc::channel] to do this and is provided in
//! form of the [MpscEventReceiver].
//! 2. To set up event creators, create channel pairs using some message queue implementation.
//! Each event creator gets a (cloned) sender component which allows it to send events to the
//! manager.
//! 3. The event manager receives the receiver component as part of a [EventReceiver]
//! implementation so all events are routed to the manager.
//! 4. Create the [send event providers][SendEventProvider]s which allow routing events to
//! subscribers. You can now use their [sender IDs][SendEventProvider::id] to subscribe for
//! event groups, for example by using the [EventManager::subscribe_single] method.
//! 5. Add the send provider as well using the [EventManager::add_sender] call so the event
//! manager can route listener groups to a the send provider.
//!
//! Some components like a PUS Event Service or PUS Event Action Service might require all
//! events to package them as telemetry or start actions where applicable.
//! Other components might only be interested in certain events. For example, a thermal system
//! handler might only be interested in temperature events generated by a thermal sensor component.
//!
//! # Examples
//!
//! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/pus_events.rs)
//! for a concrete example using multi-threading where events are routed to
//! different threads.
use crate::events::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw};
use crate::params::{Params, ParamsHeapless};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
#[cfg(feature = "alloc")]
use alloc::vec;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::slice::Iter;
#[cfg(feature = "alloc")]
use hashbrown::HashMap;
use crate::ChannelId;
#[cfg(feature = "std")]
pub use stdmod::*;
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub enum ListenerKey {
Single(LargestEventRaw),
Group(LargestGroupIdRaw),
All,
}
pub type EventWithHeaplessAuxData<Event> = (Event, Option<ParamsHeapless>);
pub type EventU32WithHeaplessAuxData = EventWithHeaplessAuxData<EventU32>;
pub type EventU16WithHeaplessAuxData = EventWithHeaplessAuxData<EventU16>;
pub type EventWithAuxData<Event> = (Event, Option<Params>);
pub type EventU32WithAuxData = EventWithAuxData<EventU32>;
pub type EventU16WithAuxData = EventWithAuxData<EventU16>;
pub trait SendEventProvider<Provider: GenericEvent, AuxDataProvider = Params> {
type Error;
fn id(&self) -> ChannelId;
fn send_no_data(&self, event: Provider) -> Result<(), Self::Error> {
self.send(event, None)
}
fn send(&self, event: Provider, aux_data: Option<AuxDataProvider>) -> Result<(), Self::Error>;
}
/// Generic abstraction for an event receiver.
pub trait EventReceiver<Event: GenericEvent, AuxDataProvider = Params> {
/// This function has to be provided by any event receiver. A receive call may or may not return
/// an event.
///
/// To allow returning arbitrary additional auxiliary data, a mutable slice is passed to the
/// [Self::receive] call as well. Receivers can write data to this slice, but care must be taken
/// to avoid panics due to size missmatches or out of bound writes.
fn receive(&self) -> Option<(Event, Option<AuxDataProvider>)>;
}
pub trait ListenerTable {
fn get_listeners(&self) -> Vec<ListenerKey>;
fn contains_listener(&self, key: &ListenerKey) -> bool;
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<ChannelId>>;
fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool;
fn remove_duplicates(&mut self, key: &ListenerKey);
}
pub trait SenderTable<SendProviderError, Event: GenericEvent = EventU32, AuxDataProvider = Params> {
fn contains_send_event_provider(&self, id: &ChannelId) -> bool;
fn get_send_event_provider(
&mut self,
id: &ChannelId,
) -> Option<&mut Box<dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>>>;
fn add_send_event_provider(
&mut self,
send_provider: Box<
dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>,
>,
) -> bool;
}
/// Generic event manager implementation.
///
/// # Generics
///
/// * `SendProviderError`: [SendEventProvider] error type
/// * `Event`: Concrete event provider, currently either [EventU32] or [EventU16]
/// * `AuxDataProvider`: Concrete auxiliary data provider, currently either [Params] or
/// [ParamsHeapless]
pub struct EventManager<SendProviderError, Event: GenericEvent = EventU32, AuxDataProvider = Params>
{
listener_table: Box<dyn ListenerTable>,
sender_table: Box<dyn SenderTable<SendProviderError, Event, AuxDataProvider>>,
event_receiver: Box<dyn EventReceiver<Event, AuxDataProvider>>,
}
/// Safety: It is safe to implement [Send] because all fields in the [EventManager] are [Send]
/// as well
#[cfg(feature = "std")]
unsafe impl<E, Event: GenericEvent + Send, AuxDataProvider: Send> Send
for EventManager<E, Event, AuxDataProvider>
{
}
#[cfg(feature = "std")]
pub type EventManagerWithMpscQueue<Event, AuxDataProvider> = EventManager<
std::sync::mpsc::SendError<(Event, Option<AuxDataProvider>)>,
Event,
AuxDataProvider,
>;
#[derive(Debug)]
pub enum EventRoutingResult<Event: GenericEvent, AuxDataProvider> {
/// No event was received
Empty,
/// An event was received and routed.
/// The first tuple entry will contain the number of recipients.
Handled(u32, Event, Option<AuxDataProvider>),
}
#[derive(Debug)]
pub enum EventRoutingError<E> {
SendError(E),
NoSendersForKey(ListenerKey),
NoSenderForId(ChannelId),
}
#[derive(Debug)]
pub struct EventRoutingErrorsWithResult<Event: GenericEvent, AuxDataProvider, E> {
pub result: EventRoutingResult<Event, AuxDataProvider>,
pub errors: [Option<EventRoutingError<E>>; 3],
}
impl<E, Event: GenericEvent + Copy> EventManager<E, Event> {
pub fn remove_duplicates(&mut self, key: &ListenerKey) {
self.listener_table.remove_duplicates(key)
}
/// Subscribe for a unique event.
pub fn subscribe_single(&mut self, event: &Event, sender_id: ChannelId) {
self.update_listeners(ListenerKey::Single(event.raw_as_largest_type()), sender_id);
}
/// Subscribe for an event group.
pub fn subscribe_group(&mut self, group_id: LargestGroupIdRaw, sender_id: ChannelId) {
self.update_listeners(ListenerKey::Group(group_id), sender_id);
}
/// Subscribe for all events received by the manager.
///
/// For example, this can be useful for a handler component which sends every event as
/// a telemetry packet.
pub fn subscribe_all(&mut self, sender_id: ChannelId) {
self.update_listeners(ListenerKey::All, sender_id);
}
}
impl<E: 'static, Event: GenericEvent + Copy + 'static, AuxDataProvider: Clone + 'static>
EventManager<E, Event, AuxDataProvider>
{
/// Create an event manager where the sender table will be the [DefaultSenderTableProvider]
/// and the listener table will be the [DefaultListenerTableProvider].
pub fn new(event_receiver: Box<dyn EventReceiver<Event, AuxDataProvider>>) -> Self {
let listener_table: Box<DefaultListenerTableProvider> = Box::default();
let sender_table: Box<DefaultSenderTableProvider<E, Event, AuxDataProvider>> =
Box::default();
Self::new_custom_tables(listener_table, sender_table, event_receiver)
}
}
impl<E, Event: GenericEvent + Copy, AuxDataProvider: Clone>
EventManager<E, Event, AuxDataProvider>
{
pub fn new_custom_tables(
listener_table: Box<dyn ListenerTable>,
sender_table: Box<dyn SenderTable<E, Event, AuxDataProvider>>,
event_receiver: Box<dyn EventReceiver<Event, AuxDataProvider>>,
) -> Self {
EventManager {
listener_table,
sender_table,
event_receiver,
}
}
pub fn add_sender(
&mut self,
send_provider: impl SendEventProvider<Event, AuxDataProvider, Error = E> + 'static,
) {
if !self
.sender_table
.contains_send_event_provider(&send_provider.id())
{
self.sender_table
.add_send_event_provider(Box::new(send_provider));
}
}
fn update_listeners(&mut self, key: ListenerKey, sender_id: ChannelId) {
self.listener_table.add_listener(key, sender_id);
}
/// This function will use the cached event receiver and try to receive one event.
/// If an event was received, it will try to route that event to all subscribed event listeners.
/// If this works without any issues, the [EventRoutingResult] will contain context information
/// about the routed event.
///
/// This function will track up to 3 errors returned as part of the
/// [EventRoutingErrorsWithResult] error struct.
pub fn try_event_handling(
&mut self,
) -> Result<
EventRoutingResult<Event, AuxDataProvider>,
EventRoutingErrorsWithResult<Event, AuxDataProvider, E>,
> {
let mut err_idx = 0;
let mut err_slice = [None, None, None];
let mut num_recipients = 0;
let mut add_error = |error: EventRoutingError<E>| {
if err_idx < 3 {
err_slice[err_idx] = Some(error);
err_idx += 1;
}
};
let mut send_handler =
|key: &ListenerKey, event: Event, aux_data: &Option<AuxDataProvider>| {
if self.listener_table.contains_listener(key) {
if let Some(ids) = self.listener_table.get_listener_ids(key) {
for id in ids {
if let Some(sender) = self.sender_table.get_send_event_provider(id) {
if let Err(e) = sender.send(event, aux_data.clone()) {
add_error(EventRoutingError::SendError(e));
} else {
num_recipients += 1;
}
} else {
add_error(EventRoutingError::NoSenderForId(*id));
}
}
} else {
add_error(EventRoutingError::NoSendersForKey(*key));
}
}
};
if let Some((event, aux_data)) = self.event_receiver.receive() {
let single_key = ListenerKey::Single(event.raw_as_largest_type());
send_handler(&single_key, event, &aux_data);
let group_key = ListenerKey::Group(event.group_id_as_largest_type());
send_handler(&group_key, event, &aux_data);
send_handler(&ListenerKey::All, event, &aux_data);
if err_idx > 0 {
return Err(EventRoutingErrorsWithResult {
result: EventRoutingResult::Handled(num_recipients, event, aux_data),
errors: err_slice,
});
}
return Ok(EventRoutingResult::Handled(num_recipients, event, aux_data));
}
Ok(EventRoutingResult::Empty)
}
}
#[derive(Default)]
pub struct DefaultListenerTableProvider {
listeners: HashMap<ListenerKey, Vec<ChannelId>>,
}
pub struct DefaultSenderTableProvider<
SendProviderError,
Event: GenericEvent = EventU32,
AuxDataProvider = Params,
> {
senders: HashMap<
ChannelId,
Box<dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>>,
>,
}
impl<SendProviderError, Event: GenericEvent, AuxDataProvider> Default
for DefaultSenderTableProvider<SendProviderError, Event, AuxDataProvider>
{
fn default() -> Self {
Self {
senders: HashMap::new(),
}
}
}
impl ListenerTable for DefaultListenerTableProvider {
fn get_listeners(&self) -> Vec<ListenerKey> {
let mut key_list = Vec::new();
for key in self.listeners.keys() {
key_list.push(*key);
}
key_list
}
fn contains_listener(&self, key: &ListenerKey) -> bool {
self.listeners.contains_key(key)
}
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<ChannelId>> {
self.listeners.get(key).map(|vec| vec.iter())
}
fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool {
if let Some(existing_list) = self.listeners.get_mut(&key) {
existing_list.push(sender_id);
} else {
let new_list = vec![sender_id];
self.listeners.insert(key, new_list);
}
true
}
fn remove_duplicates(&mut self, key: &ListenerKey) {
if let Some(list) = self.listeners.get_mut(key) {
list.sort_unstable();
list.dedup();
}
}
}
impl<SendProviderError, Event: GenericEvent, AuxDataProvider>
SenderTable<SendProviderError, Event, AuxDataProvider>
for DefaultSenderTableProvider<SendProviderError, Event, AuxDataProvider>
{
fn contains_send_event_provider(&self, id: &ChannelId) -> bool {
self.senders.contains_key(id)
}
fn get_send_event_provider(
&mut self,
id: &ChannelId,
) -> Option<&mut Box<dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>>>
{
self.senders.get_mut(id).filter(|sender| sender.id() == *id)
}
fn add_send_event_provider(
&mut self,
send_provider: Box<
dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>,
>,
) -> bool {
let id = send_provider.id();
if self.senders.contains_key(&id) {
return false;
}
self.senders.insert(id, send_provider).is_none()
}
}
#[cfg(feature = "std")]
pub mod stdmod {
use super::*;
use crate::event_man::{EventReceiver, EventWithAuxData};
use crate::events::{EventU16, EventU32, GenericEvent};
use crate::params::Params;
use std::sync::mpsc::{Receiver, SendError, Sender};
pub struct MpscEventReceiver<Event: GenericEvent + Send = EventU32> {
mpsc_receiver: Receiver<(Event, Option<Params>)>,
}
impl<Event: GenericEvent + Send> MpscEventReceiver<Event> {
pub fn new(receiver: Receiver<(Event, Option<Params>)>) -> Self {
Self {
mpsc_receiver: receiver,
}
}
}
impl<Event: GenericEvent + Send> EventReceiver<Event> for MpscEventReceiver<Event> {
fn receive(&self) -> Option<EventWithAuxData<Event>> {
if let Ok(event_and_data) = self.mpsc_receiver.try_recv() {
return Some(event_and_data);
}
None
}
}
pub type MpscEventU32Receiver = MpscEventReceiver<EventU32>;
pub type MpscEventU16Receiver = MpscEventReceiver<EventU16>;
#[derive(Clone)]
pub struct MpscEventSendProvider<Event: GenericEvent + Send> {
id: u32,
sender: Sender<(Event, Option<Params>)>,
}
impl<Event: GenericEvent + Send> MpscEventSendProvider<Event> {
pub fn new(id: u32, sender: Sender<(Event, Option<Params>)>) -> Self {
Self { id, sender }
}
}
impl<Event: GenericEvent + Send> SendEventProvider<Event> for MpscEventSendProvider<Event> {
type Error = SendError<(Event, Option<Params>)>;
fn id(&self) -> u32 {
self.id
}
fn send(&self, event: Event, aux_data: Option<Params>) -> Result<(), Self::Error> {
self.sender.send((event, aux_data))
}
}
pub type MpscEventU32SendProvider = MpscEventSendProvider<EventU32>;
pub type MpscEventU16SendProvider = MpscEventSendProvider<EventU16>;
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event_man::EventManager;
use crate::events::{EventU32, GenericEvent, Severity};
use crate::params::ParamsRaw;
use alloc::boxed::Box;
use std::format;
use std::sync::mpsc::{channel, Receiver, SendError, Sender};
#[derive(Clone)]
struct MpscEventSenderQueue {
id: u32,
mpsc_sender: Sender<EventU32WithAuxData>,
}
impl MpscEventSenderQueue {
fn new(id: u32, mpsc_sender: Sender<EventU32WithAuxData>) -> Self {
Self { id, mpsc_sender }
}
}
impl SendEventProvider<EventU32> for MpscEventSenderQueue {
type Error = SendError<EventU32WithAuxData>;
fn id(&self) -> u32 {
self.id
}
fn send(&self, event: EventU32, aux_data: Option<Params>) -> Result<(), Self::Error> {
self.mpsc_sender.send((event, aux_data))
}
}
fn check_next_event(
expected: EventU32,
receiver: &Receiver<EventU32WithAuxData>,
) -> Option<Params> {
if let Ok(event) = receiver.try_recv() {
assert_eq!(event.0, expected);
return event.1;
}
None
}
fn check_handled_event(
res: EventRoutingResult<EventU32, Params>,
expected: EventU32,
expected_num_sent: u32,
) {
assert!(matches!(res, EventRoutingResult::Handled { .. }));
if let EventRoutingResult::Handled(num_recipients, event, _aux_data) = res {
assert_eq!(event, expected);
assert_eq!(num_recipients, expected_num_sent);
}
}
fn generic_event_man() -> (
Sender<EventU32WithAuxData>,
EventManager<SendError<EventU32WithAuxData>>,
) {
let (event_sender, manager_queue) = channel();
let event_man_receiver = MpscEventReceiver::new(manager_queue);
(
event_sender,
EventManager::new(Box::new(event_man_receiver)),
)
}
#[test]
fn test_basic() {
let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let (single_event_sender, single_event_receiver) = channel();
let single_event_listener = MpscEventSenderQueue::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.id());
event_man.add_sender(single_event_listener);
let (group_event_sender_0, group_event_receiver_0) = channel();
let group_event_listener = MpscEventSenderQueue {
id: 1,
mpsc_sender: group_event_sender_0,
};
event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.id());
event_man.add_sender(group_event_listener);
// Test event with one listener
event_sender
.send((event_grp_0, None))
.expect("Sending single error failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_0, 1);
check_next_event(event_grp_0, &single_event_receiver);
// Test event which is sent to all group listeners
event_sender
.send((event_grp_1_0, None))
.expect("Sending group error failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_1_0, 1);
check_next_event(event_grp_1_0, &group_event_receiver_0);
}
#[test]
fn test_with_basic_aux_data() {
let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
let (single_event_sender, single_event_receiver) = channel();
let single_event_listener = MpscEventSenderQueue::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.id());
event_man.add_sender(single_event_listener);
event_sender
.send((event_grp_0, Some(Params::Heapless((2_u32, 3_u32).into()))))
.expect("Sending group error failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_0, 1);
let aux = check_next_event(event_grp_0, &single_event_receiver);
assert!(aux.is_some());
let aux = aux.unwrap();
if let Params::Heapless(ParamsHeapless::Raw(ParamsRaw::U32Pair(pair))) = aux {
assert_eq!(pair.0, 2);
assert_eq!(pair.1, 3);
} else {
panic!("{}", format!("Unexpected auxiliary value type {:?}", aux));
}
}
/// Test listening for multiple groups
#[test]
fn test_multi_group() {
let (event_sender, mut event_man) = generic_event_man();
let res = event_man.try_event_handling();
assert!(res.is_ok());
let hres = res.unwrap();
assert!(matches!(hres, EventRoutingResult::Empty));
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let (event_grp_0_sender, event_grp_0_receiver) = channel();
let event_grp_0_and_1_listener = MpscEventSenderQueue {
id: 0,
mpsc_sender: event_grp_0_sender,
};
event_man.subscribe_group(event_grp_0.group_id(), event_grp_0_and_1_listener.id());
event_man.subscribe_group(event_grp_1_0.group_id(), event_grp_0_and_1_listener.id());
event_man.add_sender(event_grp_0_and_1_listener);
event_sender
.send((event_grp_0, None))
.expect("Sending Event Group 0 failed");
event_sender
.send((event_grp_1_0, None))
.expect("Sendign Event Group 1 failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_0, 1);
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_1_0, 1);
check_next_event(event_grp_0, &event_grp_0_receiver);
check_next_event(event_grp_1_0, &event_grp_0_receiver);
}
/// Test listening to the same event from multiple listeners. Also test listening
/// to both group and single events from one listener
#[test]
fn test_listening_to_same_event_and_multi_type() {
let (event_sender, mut event_man) = generic_event_man();
let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap();
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let (event_0_tx_0, event_0_rx_0) = channel();
let (event_0_tx_1, event_0_rx_1) = channel();
let event_listener_0 = MpscEventSenderQueue {
id: 0,
mpsc_sender: event_0_tx_0,
};
let event_listener_1 = MpscEventSenderQueue {
id: 1,
mpsc_sender: event_0_tx_1,
};
let event_listener_0_sender_id = event_listener_0.id();
event_man.subscribe_single(&event_0, event_listener_0_sender_id);
event_man.add_sender(event_listener_0);
let event_listener_1_sender_id = event_listener_1.id();
event_man.subscribe_single(&event_0, event_listener_1_sender_id);
event_man.add_sender(event_listener_1);
event_sender
.send((event_0, None))
.expect("Triggering Event 0 failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_0, 2);
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_0, &event_0_rx_1);
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
event_sender
.send((event_0, None))
.expect("Triggering Event 0 failed");
event_sender
.send((event_1, None))
.expect("Triggering Event 1 failed");
// 3 Events messages will be sent now
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_0, 2);
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_1, 1);
// Both the single event and the group event should arrive now
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_1, &event_0_rx_0);
// Do double insertion and then remove duplicates
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
event_man.remove_duplicates(&ListenerKey::Group(event_1.group_id()));
event_sender
.send((event_1, None))
.expect("Triggering Event 1 failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_1, 1);
}
#[test]
fn test_all_events_listener() {
let (event_sender, manager_queue) = channel();
let event_man_receiver = MpscEventReceiver::new(manager_queue);
let mut event_man: EventManager<SendError<EventU32WithAuxData>> =
EventManager::new(Box::new(event_man_receiver));
let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap();
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let (event_0_tx_0, all_events_rx) = channel();
let all_events_listener = MpscEventSenderQueue {
id: 0,
mpsc_sender: event_0_tx_0,
};
event_man.subscribe_all(all_events_listener.id());
event_man.add_sender(all_events_listener);
event_sender
.send((event_0, None))
.expect("Triggering event 0 failed");
event_sender
.send((event_1, None))
.expect("Triggering event 1 failed");
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_0, 1);
let res = event_man.try_event_handling();
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_1, 1);
check_next_event(event_0, &all_events_rx);
check_next_event(event_1, &all_events_rx);
}
}

826
satrs-core/src/events.rs Normal file
View File

@@ -0,0 +1,826 @@
//! Event support module
//!
//! This module includes the basic event structs [EventU32] and [EventU16] and versions with the
//! ECSS severity levels as a type parameter. These structs are simple abstractions on top of the
//! [u32] and [u16] types where the raw value is the unique identifier for a particular event.
//! The abstraction also allows to group related events using a group ID, and the severity
//! of an event is encoded inside the raw value itself with four possible [Severity] levels:
//!
//! - INFO
//! - LOW
//! - MEDIUM
//! - HIGH
//!
//! All event structs implement the [EcssEnumeration] trait and can be created as constants.
//! This allows to easily create a static list of constant events which can then be used to generate
//! event telemetry using the PUS event manager modules.
//!
//! # Examples
//!
//! ```
//! use satrs_core::events::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo};
//!
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(1, 0);
//! const MSG_FAILED: EventU32 = EventU32::const_new(Severity::LOW, 1, 1);
//!
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::const_new(2, 0);
//!
//! let small_event = EventU16::new(Severity::INFO, 3, 0);
//! ```
use core::fmt::Debug;
use core::hash::Hash;
use core::marker::PhantomData;
use delegate::delegate;
use spacepackets::ecss::EcssEnumeration;
use spacepackets::util::{ToBeBytes, UnsignedEnum};
use spacepackets::{ByteConversionError, SizeMissmatch};
/// Using a type definition allows to change this to u64 in the future more easily
pub type LargestEventRaw = u32;
/// Using a type definition allows to change this to u32 in the future more easily
pub type LargestGroupIdRaw = u16;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum Severity {
INFO = 0,
LOW = 1,
MEDIUM = 2,
HIGH = 3,
}
pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
const SEVERITY: Severity;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityInfo {}
impl HasSeverity for SeverityInfo {
const SEVERITY: Severity = Severity::INFO;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityLow {}
impl HasSeverity for SeverityLow {
const SEVERITY: Severity = Severity::LOW;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityMedium {}
impl HasSeverity for SeverityMedium {
const SEVERITY: Severity = Severity::MEDIUM;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityHigh {}
impl HasSeverity for SeverityHigh {
const SEVERITY: Severity = Severity::HIGH;
}
pub trait GenericEvent: EcssEnumeration {
type Raw;
type GroupId;
type UniqueId;
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
}
impl TryFrom<u8> for Severity {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == Severity::INFO as u8 => Ok(Severity::INFO),
x if x == Severity::LOW as u8 => Ok(Severity::LOW),
x if x == Severity::MEDIUM as u8 => Ok(Severity::MEDIUM),
x if x == Severity::HIGH as u8 => Ok(Severity::HIGH),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
struct EventBase<RAW, GID, UID> {
severity: Severity,
group_id: GID,
unique_id: UID,
phantom: PhantomData<RAW>,
}
impl<RAW: ToBeBytes, GID, UID> EventBase<RAW, GID, UID> {
fn write_to_bytes(
&self,
raw: RAW,
buf: &mut [u8],
width: usize,
) -> Result<usize, ByteConversionError> {
if buf.len() < width {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: width,
}));
}
buf.copy_from_slice(raw.to_be_bytes().as_ref());
Ok(raw.written_len())
}
}
impl EventBase<u32, u16, u16> {
#[inline]
fn raw(&self) -> u32 {
((self.severity as u32) << 30) | ((self.group_id as u32) << 16) | self.unique_id as u32
}
}
impl EventBase<u16, u8, u8> {
#[inline]
fn raw(&self) -> u16 {
((self.severity as u16) << 14) | ((self.group_id as u16) << 8) | self.unique_id as u16
}
}
impl<RAW, GID, UID> EventBase<RAW, GID, UID> {
#[inline]
pub fn severity(&self) -> Severity {
self.severity
}
}
impl<RAW, GID> EventBase<RAW, GID, u16> {
#[inline]
pub fn unique_id(&self) -> u16 {
self.unique_id
}
}
impl<RAW, GID> EventBase<RAW, GID, u8> {
#[inline]
pub fn unique_id(&self) -> u8 {
self.unique_id
}
}
impl<RAW, UID> EventBase<RAW, u16, UID> {
#[inline]
pub fn group_id(&self) -> u16 {
self.group_id
}
}
impl<RAW, UID> EventBase<RAW, u8, UID> {
#[inline]
pub fn group_id(&self) -> u8 {
self.group_id
}
}
macro_rules! event_provider_impl {
() => {
#[inline]
fn raw(&self) -> Self::Raw {
self.base.raw()
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
#[inline]
fn severity(&self) -> Severity {
self.base.severity()
}
#[inline]
fn group_id(&self) -> Self::GroupId {
self.base.group_id()
}
#[inline]
fn unique_id(&self) -> Self::UniqueId {
self.base.unique_id()
}
};
}
macro_rules! impl_event_provider {
($BaseIdent: ident, $TypedIdent: ident, $raw: ty, $gid: ty, $uid: ty) => {
impl GenericEvent for $BaseIdent {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
event_provider_impl!();
fn raw_as_largest_type(&self) -> LargestEventRaw {
self.raw().into()
}
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
self.group_id().into()
}
}
impl<SEVERITY: HasSeverity> GenericEvent for $TypedIdent<SEVERITY> {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
delegate!(to self.event {
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
});
}
}
}
macro_rules! try_from_impls {
($SevIdent: ident, $severity: path, $raw: ty, $TypedSevIdent: ident) => {
impl TryFrom<$raw> for $TypedSevIdent<$SevIdent> {
type Error = Severity;
fn try_from(raw: $raw) -> Result<Self, Self::Error> {
Self::try_from_generic($severity, raw)
}
}
};
}
macro_rules! const_from_fn {
($from_fn_name: ident, $TypedIdent: ident, $SevIdent: ident) => {
pub const fn $from_fn_name(event: $TypedIdent<$SevIdent>) -> Self {
Self {
base: event.event.base,
}
}
};
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU32 {
base: EventBase<u32, u16, u16>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU32TypedSev<SEVERITY> {
event: EventU32,
phantom: PhantomData<SEVERITY>,
}
impl<SEVERITY: HasSeverity> From<EventU32TypedSev<SEVERITY>> for EventU32 {
fn from(e: EventU32TypedSev<SEVERITY>) -> Self {
Self { base: e.event.base }
}
}
impl<Severity: HasSeverity> AsRef<EventU32> for EventU32TypedSev<Severity> {
fn as_ref(&self) -> &EventU32 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU32> for EventU32TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU32 {
&mut self.event
}
}
impl_event_provider!(EventU32, EventU32TypedSev, u32, u16, u16);
impl EventU32 {
/// Generate an event. The raw representation of an event has 32 bits.
/// If the passed group ID is invalid (too large), None wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID
pub fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > (2u16.pow(14) - 1) {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
})
}
pub const fn const_new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > (2u16.pow(14) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh);
}
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// Const version of [Self::new], but panics on invalid group ID input values.
pub const fn const_new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU32::const_new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::const_new(
((raw >> 16) & 0x3FFF) as u16,
(raw & 0xFFFF) as u16,
))
}
}
impl From<u32> for EventU32 {
fn from(raw: u32) -> Self {
// Severity conversion from u8 should never fail
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
let group_id = ((raw >> 16) & 0x3FFF) as u16;
let unique_id = (raw & 0xFFFF) as u16;
// Sanitized input, should never fail
Self::const_new(severity, group_id, unique_id)
}
}
try_from_impls!(SeverityInfo, Severity::INFO, u32, EventU32TypedSev);
try_from_impls!(SeverityLow, Severity::LOW, u32, EventU32TypedSev);
try_from_impls!(SeverityMedium, Severity::MEDIUM, u32, EventU32TypedSev);
try_from_impls!(SeverityHigh, Severity::HIGH, u32, EventU32TypedSev);
impl UnsignedEnum for EventU32 {
fn size(&self) -> usize {
core::mem::size_of::<u32>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
}
}
impl EcssEnumeration for EventU32 {
fn pfc(&self) -> u8 {
u32::BITS as u8
}
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU16 {
base: EventBase<u16, u8, u8>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU16TypedSev<SEVERITY> {
event: EventU16,
phantom: PhantomData<SEVERITY>,
}
impl<Severity: HasSeverity> AsRef<EventU16> for EventU16TypedSev<Severity> {
fn as_ref(&self) -> &EventU16 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU16> for EventU16TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU16 {
&mut self.event
}
}
impl EventU16 {
/// Generate a small event. The raw representation of a small event has 16 bits.
/// If the passed group ID is invalid (too large), [None] wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
/// raw event ID
pub fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > (2u8.pow(6) - 1) {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: Default::default(),
},
})
}
/// Const version of [Self::new], but panics on invalid group ID input values.
pub const fn const_new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > (2u8.pow(6) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh);
}
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// Const version of [Self::new], but panics on invalid group ID input values.
pub const fn const_new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU16::const_new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::const_new(
((raw >> 8) & 0x3F) as u8,
(raw & 0xFF) as u8,
))
}
}
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
impl UnsignedEnum for EventU16 {
fn size(&self) -> usize {
core::mem::size_of::<u16>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
}
}
impl EcssEnumeration for EventU16 {
#[inline]
fn pfc(&self) -> u8 {
u16::BITS as u8
}
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
impl From<u16> for EventU16 {
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
let group_id = ((raw >> 8) & 0x3F) as u8;
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::const_new(severity, group_id, unique_id)
}
}
try_from_impls!(SeverityInfo, Severity::INFO, u16, EventU16TypedSev);
try_from_impls!(SeverityLow, Severity::LOW, u16, EventU16TypedSev);
try_from_impls!(SeverityMedium, Severity::MEDIUM, u16, EventU16TypedSev);
try_from_impls!(SeverityHigh, Severity::HIGH, u16, EventU16TypedSev);
impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU32) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU32TypedSev<Severity>> for EventU32 {
#[inline]
fn eq(&self, other: &EventU32TypedSev<Severity>) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16> for EventU16TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU16) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16TypedSev<Severity>> for EventU16 {
#[inline]
fn eq(&self, other: &EventU16TypedSev<Severity>) -> bool {
self.raw() == other.raw()
}
}
#[cfg(test)]
mod tests {
use super::EventU32TypedSev;
use super::*;
use spacepackets::ByteConversionError;
use std::mem::size_of;
fn assert_size<T>(_: T, val: usize) {
assert_eq!(size_of::<T>(), val);
}
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(0, 0);
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::const_new(0, 0);
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> =
EventU32TypedSev::const_new(0x3FFF, 0xFFFF);
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> =
EventU16TypedSev::const_new(0x3F, 0xff);
/// This working is a test in itself.
const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT);
#[test]
fn test_normal_from_raw_conversion() {
let conv_from_raw = EventU32TypedSev::<SeverityInfo>::try_from(INFO_EVENT.raw())
.expect("Creating typed EventU32 failed");
assert_eq!(conv_from_raw, INFO_EVENT);
}
#[test]
fn test_small_from_raw_conversion() {
let conv_from_raw = EventU16TypedSev::<SeverityInfo>::try_from(INFO_EVENT_SMALL.raw())
.expect("Creating typed EventU16 failed");
assert_eq!(conv_from_raw, INFO_EVENT_SMALL);
}
#[test]
fn verify_normal_size() {
assert_size(INFO_EVENT.raw(), 4)
}
#[test]
fn verify_small_size() {
assert_size(INFO_EVENT_SMALL.raw(), 2)
}
#[test]
fn test_normal_event_getters() {
assert_eq!(INFO_EVENT.severity(), Severity::INFO);
assert_eq!(INFO_EVENT.unique_id(), 0);
assert_eq!(INFO_EVENT.group_id(), 0);
let raw_event = INFO_EVENT.raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn test_small_event_getters() {
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::INFO);
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
let raw_event = INFO_EVENT_SMALL.raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn all_ones_event_regular() {
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::HIGH);
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
let raw_event = HIGH_SEV_EVENT.raw();
assert_eq!(raw_event, 0xFFFFFFFF);
}
#[test]
fn all_ones_event_small() {
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::HIGH);
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
let raw_event = HIGH_SEV_EVENT_SMALL.raw();
assert_eq!(raw_event, 0xFFFF);
}
#[test]
fn invalid_group_id_normal() {
assert!(EventU32TypedSev::<SeverityMedium>::new(2_u16.pow(14), 0).is_none());
}
#[test]
fn invalid_group_id_small() {
assert!(EventU16TypedSev::<SeverityMedium>::new(2_u8.pow(6), 0).is_none());
}
#[test]
fn regular_new() {
assert_eq!(
EventU32TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"),
INFO_EVENT
);
}
#[test]
fn small_new() {
assert_eq!(
EventU16TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"),
INFO_EVENT_SMALL
);
}
#[test]
fn as_largest_type() {
let event_raw = HIGH_SEV_EVENT.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFFFFFF);
}
#[test]
fn as_largest_type_for_small_event() {
let event_raw = HIGH_SEV_EVENT_SMALL.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFF);
}
#[test]
fn as_largest_group_id() {
let group_id = HIGH_SEV_EVENT.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3FFF);
}
#[test]
fn as_largest_group_id_small_event() {
let group_id = HIGH_SEV_EVENT_SMALL.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3F);
}
#[test]
fn write_to_buf() {
let mut buf: [u8; 4] = [0; 4];
assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u32::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFFFFFF);
}
#[test]
fn write_to_buf_small() {
let mut buf: [u8; 2] = [0; 2];
assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u16::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFF);
}
#[test]
fn write_to_buf_insufficient_buf() {
let mut buf: [u8; 3] = [0; 3];
let err = HIGH_SEV_EVENT.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall(missmatch) = err {
assert_eq!(missmatch.expected, 4);
assert_eq!(missmatch.found, 3);
}
}
#[test]
fn write_to_buf_small_insufficient_buf() {
let mut buf: [u8; 1] = [0; 1];
let err = HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall(missmatch) = err {
assert_eq!(missmatch.expected, 2);
assert_eq!(missmatch.found, 1);
}
}
#[test]
fn severity_from_invalid_raw_val() {
let invalid = 0xFF;
assert!(Severity::try_from(invalid).is_err());
let invalid = Severity::HIGH as u8 + 1;
assert!(Severity::try_from(invalid).is_err());
}
#[test]
fn reduction() {
let event = EventU32TypedSev::<SeverityInfo>::const_new(1, 1);
let raw = event.raw();
let reduced: EventU32 = event.into();
assert_eq!(reduced.group_id(), 1);
assert_eq!(reduced.unique_id(), 1);
assert_eq!(raw, reduced.raw());
}
#[test]
fn const_reducation() {
assert_eq!(INFO_REDUCED.raw(), INFO_EVENT.raw());
}
}

View File

@@ -1,5 +1,5 @@
//! UDP server helper components
use crate::tmtc::ReceivesTc;
use crate::tmtc::{ReceivesTc, ReceivesTcCore};
use std::boxed::Box;
use std::io::{Error, ErrorKind};
use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
@@ -12,20 +12,21 @@ use std::vec::Vec;
/// be declared upfront. This avoids dynamic allocation during run-time. The user can specify a TC
/// receiver in form of a special trait object which implements [ReceivesTc]. Please note that the
/// receiver should copy out the received data if it the data is required past the
/// [ReceivesTc::pass_tc] call.
/// [ReceivesTcCore::pass_tc] call.
///
/// # Examples
///
/// ```
/// use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
/// use fsrc_core::hal::host::udp_server::UdpTcServer;
/// use fsrc_core::tmtc::ReceivesTc;
/// use spacepackets::ecss::SerializablePusPacket;
/// use satrs_core::hal::host::udp_server::UdpTcServer;
/// use satrs_core::tmtc::{ReceivesTc, ReceivesTcCore};
/// use spacepackets::SpHeader;
/// use spacepackets::tc::PusTc;
///
/// #[derive (Default)]
/// struct PingReceiver {}
/// impl ReceivesTc for PingReceiver {
/// impl ReceivesTcCore for PingReceiver {
/// type Error = ();
/// fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
/// assert_eq!(tc_raw.len(), 13);
@@ -38,10 +39,10 @@ use std::vec::Vec;
/// let ping_receiver = PingReceiver::default();
/// let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
/// .expect("Creating UDP TMTC server failed");
/// let mut sph = SpHeader::tc(0x02, 0, 0).unwrap();
/// let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap();
/// let pus_tc = PusTc::new_simple(&mut sph, 17, 1, None, true);
/// let len = pus_tc
/// .write_to(&mut buf)
/// .write_to_bytes(&mut buf)
/// .expect("Error writing PUS TC packet");
/// assert_eq!(len, 13);
/// let client = UdpSocket::bind("127.0.0.1:7778").expect("Connecting to UDP server failed");
@@ -89,6 +90,14 @@ impl<E: PartialEq> PartialEq for ReceiveResult<E> {
impl<E: Eq + PartialEq> Eq for ReceiveResult<E> {}
impl<E: 'static> ReceivesTcCore for UdpTcServer<E> {
type Error = E;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.tc_receiver.pass_tc(tc_raw)
}
}
impl<E: 'static> UdpTcServer<E> {
pub fn new<A: ToSocketAddrs>(
addr: A,
@@ -132,7 +141,8 @@ impl<E: 'static> UdpTcServer<E> {
#[cfg(test)]
mod tests {
use crate::hal::host::udp_server::{ReceiveResult, UdpTcServer};
use crate::tmtc::ReceivesTc;
use crate::tmtc::ReceivesTcCore;
use spacepackets::ecss::SerializablePusPacket;
use spacepackets::tc::PusTc;
use spacepackets::SpHeader;
use std::boxed::Box;
@@ -140,12 +150,14 @@ mod tests {
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::vec::Vec;
fn is_send<T: Send>(_: &T) {}
#[derive(Default)]
struct PingReceiver {
pub sent_cmds: VecDeque<Vec<u8>>,
}
impl ReceivesTc for PingReceiver {
impl ReceivesTcCore for PingReceiver {
type Error = ();
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
@@ -161,12 +173,14 @@ mod tests {
let mut buf = [0; 32];
let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
let ping_receiver = PingReceiver::default();
is_send(&ping_receiver);
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
.expect("Creating UDP TMTC server failed");
let mut sph = SpHeader::tc(0x02, 0, 0).unwrap();
is_send(&udp_tc_server);
let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap();
let pus_tc = PusTc::new_simple(&mut sph, 17, 1, None, true);
let len = pus_tc
.write_to(&mut buf)
.write_to_bytes(&mut buf)
.expect("Error writing PUS TC packet");
let client = UdpSocket::bind("127.0.0.1:7778").expect("Connecting to UDP server failed");
client

View File

@@ -1,3 +1,4 @@
//! # Hardware Abstraction Layer module
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod host;

16
satrs-core/src/hk.rs Normal file
View File

@@ -0,0 +1,16 @@
pub type CollectionIntervalFactor = u32;
pub type UniqueId = u32;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum HkRequest {
OneShot(UniqueId),
Enable(UniqueId),
Disable(UniqueId),
ModifyCollectionInterval(UniqueId, CollectionIntervalFactor),
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct TargetedHkRequest {
target: u32,
hk_request: HkRequest,
}

49
satrs-core/src/lib.rs Normal file
View File

@@ -0,0 +1,49 @@
//! # Core components of the sat-rs framework
//!
//! You can find more information about the sat-rs framework on the
//! [homepage](https://egit.irs.uni-stuttgart.de/rust/sat-rs).
//!
//! ## Overview
//!
//! The core modules of this crate include
//!
//! - The [event manager][event_man] module which provides a publish and
//! and subscribe to route events.
//! - The [pus] module which provides special support for projects using
//! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
#![no_std]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(feature = "alloc")]
extern crate downcast_rs;
#[cfg(any(feature = "std", test))]
extern crate std;
pub mod error;
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod event_man;
pub mod events;
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod executable;
pub mod hal;
pub mod hk;
pub mod mode;
pub mod objects;
pub mod params;
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod pool;
pub mod power;
pub mod pus;
pub mod request;
pub mod res_code;
pub mod seq_count;
pub mod tmtc;
pub use spacepackets;
// Generic channel ID type.
pub type ChannelId = u32;

93
satrs-core/src/mode.rs Normal file
View File

@@ -0,0 +1,93 @@
use crate::tmtc::TargetId;
use core::mem::size_of;
use serde::{Deserialize, Serialize};
use spacepackets::{ByteConversionError, SizeMissmatch};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ModeAndSubmode {
mode: u32,
submode: u16,
}
impl ModeAndSubmode {
pub const fn new_mode_only(mode: u32) -> Self {
Self { mode, submode: 0 }
}
pub const fn new(mode: u32, submode: u16) -> Self {
Self { mode, submode }
}
pub fn raw_len() -> usize {
size_of::<u32>() + size_of::<u16>()
}
pub fn from_be_bytes(buf: &[u8]) -> Result<Self, ByteConversionError> {
if buf.len() < 6 {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
expected: 6,
found: buf.len(),
}));
}
Ok(Self {
mode: u32::from_be_bytes(buf[0..4].try_into().unwrap()),
submode: u16::from_be_bytes(buf[4..6].try_into().unwrap()),
})
}
pub fn mode(&self) -> u32 {
self.mode
}
pub fn submode(&self) -> u16 {
self.submode
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ModeCommand {
pub address: TargetId,
pub mode_submode: ModeAndSubmode,
}
impl ModeCommand {
pub const fn new(address: TargetId, mode_submode: ModeAndSubmode) -> Self {
Self {
address,
mode_submode,
}
}
pub fn address(&self) -> TargetId {
self.address
}
pub fn mode_submode(&self) -> ModeAndSubmode {
self.mode_submode
}
pub fn mode(&self) -> u32 {
self.mode_submode.mode
}
pub fn submode(&self) -> u16 {
self.mode_submode.submode
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ModeRequest {
SetMode(ModeAndSubmode),
ReadMode,
AnnounceMode,
AnnounceModeRecursive,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct TargetedModeRequest {
target_id: TargetId,
mode_request: ModeRequest,
}

View File

@@ -10,7 +10,7 @@
//! ```rust
//! use std::any::Any;
//! use std::error::Error;
//! use fsrc_core::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject};
//! use satrs_core::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject};
//!
//! struct ExampleSysObj {
//! id: ObjectId,
@@ -51,87 +51,99 @@
//! assert_eq!(example_obj.id, obj_id);
//! assert_eq!(example_obj.dummy, 42);
//! ```
use crate::tmtc::TargetId;
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
#[cfg(feature = "alloc")]
use downcast_rs::Downcast;
#[cfg(feature = "alloc")]
use hashbrown::HashMap;
#[cfg(feature = "std")]
use std::error::Error;
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub struct ObjectId {
pub id: u32,
pub id: TargetId,
pub name: &'static str,
}
/// Each object which is stored inside the [object manager][ObjectManager] needs to implemented
/// this trait
pub trait SystemObject: Downcast {
type Error;
fn get_object_id(&self) -> &ObjectId;
fn initialize(&mut self) -> Result<(), Self::Error>;
}
downcast_rs::impl_downcast!(SystemObject assoc Error);
pub trait ManagedSystemObject: SystemObject + Send {}
downcast_rs::impl_downcast!(ManagedSystemObject assoc Error);
/// Helper module to manage multiple [ManagedSystemObjects][ManagedSystemObject] by mapping them
/// using an [object ID][ObjectId]
#[cfg(feature = "alloc")]
pub struct ObjectManager<E> {
obj_map: HashMap<ObjectId, Box<dyn ManagedSystemObject<Error = E>>>,
}
pub mod alloc_mod {
use super::*;
#[cfg(feature = "alloc")]
impl<E: 'static> Default for ObjectManager<E> {
fn default() -> Self {
Self::new()
/// Each object which is stored inside the [object manager][ObjectManager] needs to implemented
/// this trait
pub trait SystemObject: Downcast {
type Error;
fn get_object_id(&self) -> &ObjectId;
fn initialize(&mut self) -> Result<(), Self::Error>;
}
}
downcast_rs::impl_downcast!(SystemObject assoc Error);
#[cfg(feature = "alloc")]
impl<E: 'static> ObjectManager<E> {
pub fn new() -> Self {
ObjectManager {
obj_map: HashMap::new(),
pub trait ManagedSystemObject: SystemObject + Send {}
downcast_rs::impl_downcast!(ManagedSystemObject assoc Error);
/// Helper module to manage multiple [ManagedSystemObjects][ManagedSystemObject] by mapping them
/// using an [object ID][ObjectId]
#[cfg(feature = "alloc")]
pub struct ObjectManager<E> {
obj_map: HashMap<ObjectId, Box<dyn ManagedSystemObject<Error = E>>>,
}
#[cfg(feature = "alloc")]
impl<E: 'static> Default for ObjectManager<E> {
fn default() -> Self {
Self::new()
}
}
pub fn insert(&mut self, sys_obj: Box<dyn ManagedSystemObject<Error = E>>) -> bool {
let obj_id = sys_obj.get_object_id();
if self.obj_map.contains_key(obj_id) {
return false;
}
self.obj_map.insert(*obj_id, sys_obj).is_none()
}
/// Initializes all System Objects in the hash map and returns the number of successful
/// initializations
pub fn initialize(&mut self) -> Result<u32, Box<dyn Error>> {
let mut init_success = 0;
for val in self.obj_map.values_mut() {
if val.initialize().is_ok() {
init_success += 1
#[cfg(feature = "alloc")]
impl<E: 'static> ObjectManager<E> {
pub fn new() -> Self {
ObjectManager {
obj_map: HashMap::new(),
}
}
Ok(init_success)
}
pub fn insert(&mut self, sys_obj: Box<dyn ManagedSystemObject<Error = E>>) -> bool {
let obj_id = sys_obj.get_object_id();
if self.obj_map.contains_key(obj_id) {
return false;
}
self.obj_map.insert(*obj_id, sys_obj).is_none()
}
/// Retrieve a reference to an object stored inside the manager. The type to retrieve needs to
/// be explicitly passed as a generic parameter or specified on the left hand side of the
/// expression.
pub fn get_ref<T: ManagedSystemObject<Error = E>>(&self, key: &ObjectId) -> Option<&T> {
self.obj_map.get(key).and_then(|o| o.downcast_ref::<T>())
}
/// Initializes all System Objects in the hash map and returns the number of successful
/// initializations
pub fn initialize(&mut self) -> Result<u32, Box<dyn Error>> {
let mut init_success = 0;
for val in self.obj_map.values_mut() {
if val.initialize().is_ok() {
init_success += 1
}
}
Ok(init_success)
}
/// Retrieve a mutable reference to an object stored inside the manager. The type to retrieve
/// needs to be explicitly passed as a generic parameter or specified on the left hand side
/// of the expression.
pub fn get_mut<T: ManagedSystemObject<Error = E>>(&mut self, key: &ObjectId) -> Option<&mut T> {
self.obj_map
.get_mut(key)
.and_then(|o| o.downcast_mut::<T>())
/// Retrieve a reference to an object stored inside the manager. The type to retrieve needs to
/// be explicitly passed as a generic parameter or specified on the left hand side of the
/// expression.
pub fn get_ref<T: ManagedSystemObject<Error = E>>(&self, key: &ObjectId) -> Option<&T> {
self.obj_map.get(key).and_then(|o| o.downcast_ref::<T>())
}
/// Retrieve a mutable reference to an object stored inside the manager. The type to retrieve
/// needs to be explicitly passed as a generic parameter or specified on the left hand side
/// of the expression.
pub fn get_mut<T: ManagedSystemObject<Error = E>>(
&mut self,
key: &ObjectId,
) -> Option<&mut T> {
self.obj_map
.get_mut(key)
.and_then(|o| o.downcast_mut::<T>())
}
}
}

680
satrs-core/src/params.rs Normal file
View File

@@ -0,0 +1,680 @@
//! Parameter types and enums.
//!
//! This module contains various helper types.
//!
//! # Primtive Parameter Wrappers and Enumeration
//!
//! This module includes wrapper for primitive rust types using the newtype pattern.
//! This was also done for pairs and triplets of these primitive types.
//! The [WritableToBeBytes] was implemented for all those types as well, which allows to easily
//! convert them into a network friendly raw byte format. The [ParamsRaw] enumeration groups
//! all newtypes and implements the [WritableToBeBytes] trait itself.
//!
//! ## Example for primitive type wrapper
//!
//! ```
//! use satrs_core::params::{ParamsRaw, ToBeBytes, U32Pair, WritableToBeBytes};
//!
//! let u32_pair = U32Pair(0x1010, 25);
//! assert_eq!(u32_pair.0, 0x1010);
//! assert_eq!(u32_pair.1, 25);
//! // Convert to raw stream
//! let raw_buf = u32_pair.to_be_bytes();
//! assert_eq!(raw_buf, [0, 0, 0x10, 0x10, 0, 0, 0, 25]);
//!
//! // Convert to enum variant
//! let params_raw: ParamsRaw = u32_pair.into();
//! assert_eq!(params_raw, (0x1010_u32, 25_u32).into());
//!
//! // Convert to stream using the enum variant
//! let mut other_raw_buf: [u8; 8] = [0; 8];
//! params_raw.write_to_be_bytes(&mut other_raw_buf).expect("Writing parameter to buffer failed");
//! assert_eq!(other_raw_buf, [0, 0, 0x10, 0x10, 0, 0, 0, 25]);
//!
//! // Create a pair from a raw stream
//! let u32_pair_from_stream: U32Pair = raw_buf.as_slice().try_into().unwrap();
//! assert_eq!(u32_pair_from_stream.0, 0x1010);
//! assert_eq!(u32_pair_from_stream.1, 25);
//! ```
//!
//! # Generic Parameter Enumeration
//!
//! The module also contains generic parameter enumerations.
//! This includes the [ParamsHeapless] enumeration for contained values which do not require heap
//! allocation, and the [Params] which enumerates [ParamsHeapless] and some additional types which
//! require [alloc] support but allow for more flexbility.
#[cfg(feature = "alloc")]
use crate::pool::StoreAddr;
#[cfg(feature = "alloc")]
use alloc::string::{String, ToString};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::fmt::Debug;
use core::mem::size_of;
use paste::paste;
use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU64, EcssEnumU8};
use spacepackets::util::UnsignedEnum;
use spacepackets::ByteConversionError;
use spacepackets::SizeMissmatch;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
pub use spacepackets::util::ToBeBytes;
/// Generic trait which is used for objects which can be converted into a raw network (big) endian
/// byte format.
pub trait WritableToBeBytes {
fn raw_len(&self) -> usize;
/// Writes the object to a raw buffer in network endianness (big)
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
}
macro_rules! param_to_be_bytes_impl {
($Newtype: ident) => {
impl WritableToBeBytes for $Newtype {
#[inline]
fn raw_len(&self) -> usize {
size_of::<<Self as ToBeBytes>::ByteArray>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
let raw_len = self.raw_len();
if buf.len() < raw_len {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: raw_len,
}));
}
buf[0..raw_len].copy_from_slice(&self.to_be_bytes());
Ok(raw_len)
}
}
};
}
macro_rules! primitive_newtypes_with_eq {
($($ty: ty,)+) => {
$(
paste! {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct [<$ty:upper>](pub $ty);
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct [<$ty:upper Pair>](pub $ty, pub $ty);
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct [<$ty:upper Triplet>](pub $ty, pub $ty, pub $ty);
param_to_be_bytes_impl!([<$ty:upper>]);
param_to_be_bytes_impl!([<$ty:upper Pair>]);
param_to_be_bytes_impl!([<$ty:upper Triplet>]);
impl From<$ty> for [<$ty:upper>] {
fn from(v: $ty) -> Self {
Self(v)
}
}
impl From<($ty, $ty)> for [<$ty:upper Pair>] {
fn from(v: ($ty, $ty)) -> Self {
Self(v.0, v.1)
}
}
impl From<($ty, $ty, $ty)> for [<$ty:upper Triplet>] {
fn from(v: ($ty, $ty, $ty)) -> Self {
Self(v.0, v.1, v.2)
}
}
}
)+
}
}
macro_rules! primitive_newtypes {
($($ty: ty,)+) => {
$(
paste! {
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct [<$ty:upper>](pub $ty);
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct [<$ty:upper Pair>](pub $ty, pub $ty);
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct [<$ty:upper Triplet>](pub $ty, pub $ty, pub $ty);
param_to_be_bytes_impl!([<$ty:upper>]);
param_to_be_bytes_impl!([<$ty:upper Pair>]);
param_to_be_bytes_impl!([<$ty:upper Triplet>]);
impl From<$ty> for [<$ty:upper>] {
fn from(v: $ty) -> Self {
Self(v)
}
}
impl From<($ty, $ty)> for [<$ty:upper Pair>] {
fn from(v: ($ty, $ty)) -> Self {
Self(v.0, v.1)
}
}
impl From<($ty, $ty, $ty)> for [<$ty:upper Triplet>] {
fn from(v: ($ty, $ty, $ty)) -> Self {
Self(v.0, v.1, v.2)
}
}
}
)+
}
}
primitive_newtypes_with_eq!(u8, u16, u32, u64, i8, i16, i32, i64,);
primitive_newtypes!(f32, f64,);
macro_rules! scalar_byte_conversions_impl {
($($ty: ty,)+) => {
$(
paste! {
impl ToBeBytes for [<$ty:upper>] {
type ByteArray = [u8; size_of::<$ty>()];
fn written_len(&self) -> usize {
size_of::<Self::ByteArray>()
}
fn to_be_bytes(&self) -> Self::ByteArray {
self.0.to_be_bytes()
}
}
impl TryFrom<&[u8]> for [<$ty:upper>] {
type Error = ByteConversionError;
fn try_from(v: &[u8]) -> Result<Self, Self::Error> {
if v.len() < size_of::<$ty>() {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
expected: size_of::<$ty>(),
found: v.len()
}));
}
Ok([<$ty:upper>]($ty::from_be_bytes(v[0..size_of::<$ty>()].try_into().unwrap())))
}
}
}
)+
}
}
macro_rules! pair_byte_conversions_impl {
($($ty: ty,)+) => {
$(
paste! {
impl ToBeBytes for [<$ty:upper Pair>] {
type ByteArray = [u8; size_of::<$ty>() * 2];
fn written_len(&self) -> usize {
size_of::<Self::ByteArray>()
}
fn to_be_bytes(&self) -> Self::ByteArray {
let mut array = [0; size_of::<$ty>() * 2];
array[0..size_of::<$ty>()].copy_from_slice(&self.0.to_be_bytes());
array[
size_of::<$ty>()..2 * size_of::<$ty>()
].copy_from_slice(&self.1.to_be_bytes());
array
}
}
impl TryFrom<&[u8]> for [<$ty:upper Pair>] {
type Error = ByteConversionError;
fn try_from(v: &[u8]) -> Result<Self, Self::Error> {
if v.len() < 2 * size_of::<$ty>() {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
expected: 2 * size_of::<$ty>(),
found: v.len()
}));
}
Ok([<$ty:upper Pair>](
$ty::from_be_bytes(v[0..size_of::<$ty>()].try_into().unwrap()),
$ty::from_be_bytes(v[size_of::<$ty>()..2 * size_of::<$ty>()].try_into().unwrap())
))
}
}
}
)+
}
}
macro_rules! triplet_to_be_bytes_impl {
($($ty: ty,)+) => {
$(
paste! {
impl ToBeBytes for [<$ty:upper Triplet>] {
type ByteArray = [u8; size_of::<$ty>() * 3];
fn written_len(&self) -> usize {
size_of::<Self::ByteArray>()
}
fn to_be_bytes(&self) -> Self::ByteArray {
let mut array = [0; size_of::<$ty>() * 3];
array[0..size_of::<$ty>()].copy_from_slice(&self.0.to_be_bytes());
array[
size_of::<$ty>()..2 * size_of::<$ty>()
].copy_from_slice(&self.1.to_be_bytes());
array[
2 * size_of::<$ty>()..3 * size_of::<$ty>()
].copy_from_slice(&self.2.to_be_bytes());
array
}
}
impl TryFrom<&[u8]> for [<$ty:upper Triplet>] {
type Error = ByteConversionError;
fn try_from(v: &[u8]) -> Result<Self, Self::Error> {
if v.len() < 3 * size_of::<$ty>() {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
expected: 3 * size_of::<$ty>(),
found: v.len()
}));
}
Ok([<$ty:upper Triplet>](
$ty::from_be_bytes(v[0..size_of::<$ty>()].try_into().unwrap()),
$ty::from_be_bytes(v[size_of::<$ty>()..2 * size_of::<$ty>()].try_into().unwrap()),
$ty::from_be_bytes(v[2 * size_of::<$ty>()..3 * size_of::<$ty>()].try_into().unwrap())
))
}
}
}
)+
}
}
scalar_byte_conversions_impl!(u8, u16, u32, u64, i8, i16, i32, i64, f32, f64,);
impl ToBeBytes for U8Pair {
type ByteArray = [u8; 2];
fn written_len(&self) -> usize {
size_of::<Self::ByteArray>()
}
fn to_be_bytes(&self) -> Self::ByteArray {
let mut array = [0; 2];
array[0] = self.0;
array[1] = self.1;
array
}
}
impl ToBeBytes for I8Pair {
type ByteArray = [u8; 2];
fn written_len(&self) -> usize {
size_of::<Self::ByteArray>()
}
fn to_be_bytes(&self) -> Self::ByteArray {
let mut array = [0; 2];
array[0] = self.0 as u8;
array[1] = self.1 as u8;
array
}
}
impl ToBeBytes for U8Triplet {
type ByteArray = [u8; 3];
fn written_len(&self) -> usize {
size_of::<Self::ByteArray>()
}
fn to_be_bytes(&self) -> Self::ByteArray {
let mut array = [0; 3];
array[0] = self.0;
array[1] = self.1;
array[2] = self.2;
array
}
}
impl ToBeBytes for I8Triplet {
type ByteArray = [u8; 3];
fn written_len(&self) -> usize {
size_of::<Self::ByteArray>()
}
fn to_be_bytes(&self) -> Self::ByteArray {
let mut array = [0; 3];
array[0] = self.0 as u8;
array[1] = self.1 as u8;
array[2] = self.2 as u8;
array
}
}
pair_byte_conversions_impl!(u16, u32, u64, i16, i32, i64, f32, f64,);
triplet_to_be_bytes_impl!(u16, u32, u64, i16, i32, i64, f32, f64,);
/// Generic enumeration for additonal parameters only consisting of primitive data types.
///
/// All contained variants and the enum itself implement the [WritableToBeBytes] trait, which
/// allows to easily convert them into a network-friendly format.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum ParamsRaw {
U8(U8),
U8Pair(U8Pair),
U8Triplet(U8Triplet),
I8(I8),
I8Pair(I8Pair),
I8Triplet(I8Triplet),
U16(U16),
U16Pair(U16Pair),
U16Triplet(U16Triplet),
I16(I16),
I16Pair(I16Pair),
I16Triplet(I16Triplet),
U32(U32),
U32Pair(U32Pair),
U32Triplet(U32Triplet),
I32(I32),
I32Pair(I32Pair),
I32Triplet(I32Triplet),
F32(F32),
F32Pair(F32Pair),
F32Triplet(F32Triplet),
U64(U64),
I64(I64),
F64(F64),
}
impl WritableToBeBytes for ParamsRaw {
fn raw_len(&self) -> usize {
match self {
ParamsRaw::U8(v) => v.raw_len(),
ParamsRaw::U8Pair(v) => v.raw_len(),
ParamsRaw::U8Triplet(v) => v.raw_len(),
ParamsRaw::I8(v) => v.raw_len(),
ParamsRaw::I8Pair(v) => v.raw_len(),
ParamsRaw::I8Triplet(v) => v.raw_len(),
ParamsRaw::U16(v) => v.raw_len(),
ParamsRaw::U16Pair(v) => v.raw_len(),
ParamsRaw::U16Triplet(v) => v.raw_len(),
ParamsRaw::I16(v) => v.raw_len(),
ParamsRaw::I16Pair(v) => v.raw_len(),
ParamsRaw::I16Triplet(v) => v.raw_len(),
ParamsRaw::U32(v) => v.raw_len(),
ParamsRaw::U32Pair(v) => v.raw_len(),
ParamsRaw::U32Triplet(v) => v.raw_len(),
ParamsRaw::I32(v) => v.raw_len(),
ParamsRaw::I32Pair(v) => v.raw_len(),
ParamsRaw::I32Triplet(v) => v.raw_len(),
ParamsRaw::F32(v) => v.raw_len(),
ParamsRaw::F32Pair(v) => v.raw_len(),
ParamsRaw::F32Triplet(v) => v.raw_len(),
ParamsRaw::U64(v) => v.raw_len(),
ParamsRaw::I64(v) => v.raw_len(),
ParamsRaw::F64(v) => v.raw_len(),
}
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
match self {
ParamsRaw::U8(v) => v.write_to_be_bytes(buf),
ParamsRaw::U8Pair(v) => v.write_to_be_bytes(buf),
ParamsRaw::U8Triplet(v) => v.write_to_be_bytes(buf),
ParamsRaw::I8(v) => v.write_to_be_bytes(buf),
ParamsRaw::I8Pair(v) => v.write_to_be_bytes(buf),
ParamsRaw::I8Triplet(v) => v.write_to_be_bytes(buf),
ParamsRaw::U16(v) => v.write_to_be_bytes(buf),
ParamsRaw::U16Pair(v) => v.write_to_be_bytes(buf),
ParamsRaw::U16Triplet(v) => v.write_to_be_bytes(buf),
ParamsRaw::I16(v) => v.write_to_be_bytes(buf),
ParamsRaw::I16Pair(v) => v.write_to_be_bytes(buf),
ParamsRaw::I16Triplet(v) => v.write_to_be_bytes(buf),
ParamsRaw::U32(v) => v.write_to_be_bytes(buf),
ParamsRaw::U32Pair(v) => v.write_to_be_bytes(buf),
ParamsRaw::U32Triplet(v) => v.write_to_be_bytes(buf),
ParamsRaw::I32(v) => v.write_to_be_bytes(buf),
ParamsRaw::I32Pair(v) => v.write_to_be_bytes(buf),
ParamsRaw::I32Triplet(v) => v.write_to_be_bytes(buf),
ParamsRaw::F32(v) => v.write_to_be_bytes(buf),
ParamsRaw::F32Pair(v) => v.write_to_be_bytes(buf),
ParamsRaw::F32Triplet(v) => v.write_to_be_bytes(buf),
ParamsRaw::U64(v) => v.write_to_be_bytes(buf),
ParamsRaw::I64(v) => v.write_to_be_bytes(buf),
ParamsRaw::F64(v) => v.write_to_be_bytes(buf),
}
}
}
macro_rules! params_raw_from_newtype {
($($newtype: ident,)+) => {
$(
impl From<$newtype> for ParamsRaw {
fn from(v: $newtype) -> Self {
Self::$newtype(v)
}
}
)+
}
}
params_raw_from_newtype!(
U8, U8Pair, U8Triplet, U16, U16Pair, U16Triplet, U32, U32Pair, U32Triplet, I8, I8Pair,
I8Triplet, I16, I16Pair, I16Triplet, I32, I32Pair, I32Triplet, F32, F32Pair, F32Triplet, U64,
I64, F64,
);
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum EcssEnumParams {
U8(EcssEnumU8),
U16(EcssEnumU16),
U32(EcssEnumU32),
U64(EcssEnumU64),
}
macro_rules! writable_as_be_bytes_ecss_enum_impl {
($EnumIdent: ident) => {
impl WritableToBeBytes for $EnumIdent {
fn raw_len(&self) -> usize {
self.size()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
<Self as UnsignedEnum>::write_to_be_bytes(self, buf).map(|_| self.raw_len())
}
}
};
}
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8);
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16);
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32);
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64);
impl WritableToBeBytes for EcssEnumParams {
fn raw_len(&self) -> usize {
match self {
EcssEnumParams::U8(e) => e.raw_len(),
EcssEnumParams::U16(e) => e.raw_len(),
EcssEnumParams::U32(e) => e.raw_len(),
EcssEnumParams::U64(e) => e.raw_len(),
}
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
match self {
EcssEnumParams::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
EcssEnumParams::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
EcssEnumParams::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
EcssEnumParams::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
}
}
}
/// Generic enumeration for parameters which do not rely on heap allocations.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum ParamsHeapless {
Raw(ParamsRaw),
EcssEnum(EcssEnumParams),
}
macro_rules! from_conversions_for_raw {
($(($raw_ty: ty, $TargetPath: path),)+) => {
$(
impl From<$raw_ty> for ParamsRaw {
fn from(val: $raw_ty) -> Self {
$TargetPath(val.into())
}
}
impl From<$raw_ty> for ParamsHeapless {
fn from(val: $raw_ty) -> Self {
ParamsHeapless::Raw(val.into())
}
}
)+
};
}
from_conversions_for_raw!(
(u8, Self::U8),
((u8, u8), Self::U8Pair),
((u8, u8, u8), Self::U8Triplet),
(i8, Self::I8),
((i8, i8), Self::I8Pair),
((i8, i8, i8), Self::I8Triplet),
(u16, Self::U16),
((u16, u16), Self::U16Pair),
((u16, u16, u16), Self::U16Triplet),
(i16, Self::I16),
((i16, i16), Self::I16Pair),
((i16, i16, i16), Self::I16Triplet),
(u32, Self::U32),
((u32, u32), Self::U32Pair),
((u32, u32, u32), Self::U32Triplet),
(i32, Self::I32),
((i32, i32), Self::I32Pair),
((i32, i32, i32), Self::I32Triplet),
(f32, Self::F32),
((f32, f32), Self::F32Pair),
((f32, f32, f32), Self::F32Triplet),
(u64, Self::U64),
(f64, Self::F64),
);
#[cfg(feature = "alloc")]
mod alloc_mod {
use super::*;
/// Generic enumeration for additional parameters, including parameters which rely on heap
/// allocations.
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[derive(Debug, Clone)]
pub enum Params {
Heapless(ParamsHeapless),
Store(StoreAddr),
Vec(Vec<u8>),
String(String),
}
impl From<StoreAddr> for Params {
fn from(x: StoreAddr) -> Self {
Self::Store(x)
}
}
impl From<ParamsHeapless> for Params {
fn from(x: ParamsHeapless) -> Self {
Self::Heapless(x)
}
}
impl From<Vec<u8>> for Params {
fn from(val: Vec<u8>) -> Self {
Self::Vec(val)
}
}
/// Converts a byte slice into the [Params::Vec] variant
impl From<&[u8]> for Params {
fn from(val: &[u8]) -> Self {
Self::Vec(val.to_vec())
}
}
impl From<String> for Params {
fn from(val: String) -> Self {
Self::String(val)
}
}
/// Converts a string slice into the [Params::String] variant
impl From<&str> for Params {
fn from(val: &str) -> Self {
Self::String(val.to_string())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_basic_u32_pair() {
let u32_pair = U32Pair(4, 8);
assert_eq!(u32_pair.0, 4);
assert_eq!(u32_pair.1, 8);
let raw = u32_pair.to_be_bytes();
let mut u32_conv_back = u32::from_be_bytes(raw[0..4].try_into().unwrap());
assert_eq!(u32_conv_back, 4);
u32_conv_back = u32::from_be_bytes(raw[4..8].try_into().unwrap());
assert_eq!(u32_conv_back, 8);
}
#[test]
fn basic_signed_test_pair() {
let i8_pair = I8Pair(-3, -16);
assert_eq!(i8_pair.0, -3);
assert_eq!(i8_pair.1, -16);
let raw = i8_pair.to_be_bytes();
let mut i8_conv_back = i8::from_be_bytes(raw[0..1].try_into().unwrap());
assert_eq!(i8_conv_back, -3);
i8_conv_back = i8::from_be_bytes(raw[1..2].try_into().unwrap());
assert_eq!(i8_conv_back, -16);
}
#[test]
fn basic_signed_test_triplet() {
let i8_triplet = I8Triplet(-3, -16, -126);
assert_eq!(i8_triplet.0, -3);
assert_eq!(i8_triplet.1, -16);
assert_eq!(i8_triplet.2, -126);
let raw = i8_triplet.to_be_bytes();
let mut i8_conv_back = i8::from_be_bytes(raw[0..1].try_into().unwrap());
assert_eq!(i8_conv_back, -3);
i8_conv_back = i8::from_be_bytes(raw[1..2].try_into().unwrap());
assert_eq!(i8_conv_back, -16);
i8_conv_back = i8::from_be_bytes(raw[2..3].try_into().unwrap());
assert_eq!(i8_conv_back, -126);
}
#[test]
fn conversion_test_string() {
let param: Params = "Test String".into();
if let Params::String(str) = param {
assert_eq!(str, String::from("Test String"));
} else {
panic!("Params type is not String")
}
}
#[test]
fn conversion_from_slice() {
let test_slice: [u8; 5] = [0; 5];
let vec_param: Params = test_slice.as_slice().into();
if let Params::Vec(vec) = vec_param {
assert_eq!(vec, test_slice.to_vec());
} else {
panic!("Params type is not a vector")
}
}
}

View File

@@ -13,7 +13,7 @@
//! # Example
//!
//! ```
//! use fsrc_core::pool::{LocalPool, PoolCfg, PoolProvider};
//! use satrs_core::pool::{LocalPool, PoolCfg, PoolProvider};
//!
//! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
//! let pool_cfg = PoolCfg::new(vec![(4, 4), (2, 8), (1, 16)]);
@@ -77,10 +77,15 @@ use alloc::format;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use core::fmt::{Display, Formatter};
use delegate::delegate;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::boxed::Box;
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")]
use std::sync::{Arc, RwLock};
type NumBlocks = u16;
@@ -127,26 +132,55 @@ pub struct LocalPool {
/// Simple address type used for transactions with the local pool.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct StoreAddr {
pool_idx: u16,
packet_idx: NumBlocks,
pub(crate) pool_idx: u16,
pub(crate) packet_idx: NumBlocks,
}
impl Display for StoreAddr {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
write!(
f,
"StoreAddr(pool index: {}, packet index: {})",
self.pool_idx, self.packet_idx
)
}
}
impl StoreAddr {
pub const INVALID_ADDR: u32 = 0xFFFFFFFF;
pub fn raw(&self) -> u32 {
((self.pool_idx as u32) << 16) as u32 | self.packet_idx as u32
((self.pool_idx as u32) << 16) | self.packet_idx as u32
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum StoreIdError {
InvalidSubpool(u16),
InvalidPacketIdx(u16),
}
impl Display for StoreIdError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
StoreIdError::InvalidSubpool(pool) => {
write!(f, "invalid subpool, index: {pool}")
}
StoreIdError::InvalidPacketIdx(packet_idx) => {
write!(f, "invalid packet index: {packet_idx}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for StoreIdError {}
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum StoreError {
/// Requested data block is too large
DataTooLarge(usize),
@@ -160,6 +194,38 @@ pub enum StoreError {
InternalError(String),
}
impl Display for StoreError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
StoreError::DataTooLarge(size) => {
write!(f, "data to store with size {size} is too large")
}
StoreError::StoreFull(u16) => {
write!(f, "store is too full. index for full subpool: {u16}")
}
StoreError::InvalidStoreId(id_e, addr) => {
write!(f, "invalid store ID: {id_e}, address: {addr:?}")
}
StoreError::DataDoesNotExist(addr) => {
write!(f, "no data exists at address {addr:?}")
}
StoreError::InternalError(e) => {
write!(f, "internal error: {e}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for StoreError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
if let StoreError::InvalidStoreId(e, _) = self {
return Some(e);
}
None
}
}
pub trait PoolProvider {
/// Add new data to the pool. The provider should attempt to reserve a memory block with the
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
@@ -201,6 +267,14 @@ pub trait PoolProvider {
/// Delete data inside the pool given a [StoreAddr]
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
/// Retrieve the length of the data at the given store address.
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
if !self.has_element_at(addr)? {
return Err(StoreError::DataDoesNotExist(*addr));
}
Ok(self.read(addr)?.len())
}
}
impl LocalPool {
@@ -239,7 +313,7 @@ impl LocalPool {
fn validate_addr(&self, addr: &StoreAddr) -> Result<(), StoreError> {
let pool_idx = addr.pool_idx as usize;
if pool_idx as usize >= self.pool_cfg.cfg.len() {
if pool_idx >= self.pool_cfg.cfg.len() {
return Err(StoreError::InvalidStoreId(
StoreIdError::InvalidSubpool(addr.pool_idx),
Some(*addr),
@@ -279,14 +353,12 @@ impl LocalPool {
fn write(&mut self, addr: &StoreAddr, data: &[u8]) -> Result<(), StoreError> {
let packet_pos = self.raw_pos(addr).ok_or_else(|| {
StoreError::InternalError(format!(
"write: Error in raw_pos func with address {:?}",
addr
"write: Error in raw_pos func with address {addr:?}"
))
})?;
let subpool = self.pool.get_mut(addr.pool_idx as usize).ok_or_else(|| {
StoreError::InternalError(format!(
"write: Error retrieving pool slice with address {:?}",
addr
"write: Error retrieving pool slice with address {addr:?}"
))
})?;
let pool_slice = &mut subpool[packet_pos..packet_pos + data.len()];
@@ -340,7 +412,8 @@ impl PoolProvider for LocalPool {
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> {
let curr_size = self.addr_check(addr)?;
let raw_pos = self.raw_pos(addr).unwrap();
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..curr_size];
let block =
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
Ok(block)
}
@@ -405,6 +478,8 @@ impl<'a> PoolGuard<'a> {
self.pool.read(&self.addr)
}
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self) {
self.no_deletion = true;
}
@@ -438,6 +513,8 @@ impl<'a> PoolRwGuard<'a> {
delegate!(
to self.guard {
pub fn read(&self) -> Result<&[u8], StoreError>;
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self);
}
);
@@ -713,4 +790,25 @@ mod tests {
drop(rw_guard);
assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
}
#[test]
fn modify_pool_index_above_0() {
let mut local_pool = basic_small_pool();
let test_buf_0: [u8; 4] = [1; 4];
let test_buf_1: [u8; 4] = [2; 4];
let test_buf_2: [u8; 4] = [3; 4];
let test_buf_3: [u8; 4] = [4; 4];
let addr0 = local_pool.add(&test_buf_0).expect("Adding data failed");
let addr1 = local_pool.add(&test_buf_1).expect("Adding data failed");
let addr2 = local_pool.add(&test_buf_2).expect("Adding data failed");
let addr3 = local_pool.add(&test_buf_3).expect("Adding data failed");
let tm0_raw = local_pool.modify(&addr0).expect("Modifying data failed");
assert_eq!(tm0_raw, test_buf_0);
let tm1_raw = local_pool.modify(&addr1).expect("Modifying data failed");
assert_eq!(tm1_raw, test_buf_1);
let tm2_raw = local_pool.modify(&addr2).expect("Modifying data failed");
assert_eq!(tm2_raw, test_buf_2);
let tm3_raw = local_pool.modify(&addr3).expect("Modifying data failed");
assert_eq!(tm3_raw, test_buf_3);
}
}

102
satrs-core/src/power.rs Normal file
View File

@@ -0,0 +1,102 @@
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Generic trait for a device capable of switching itself on or off.
pub trait PowerSwitch {
type Error;
fn switch_on(&mut self) -> Result<(), Self::Error>;
fn switch_off(&mut self) -> Result<(), Self::Error>;
fn is_switch_on(&self) -> bool {
self.switch_state() == SwitchState::On
}
fn switch_state(&self) -> SwitchState;
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum SwitchState {
Off = 0,
On = 1,
Unknown = 2,
Faulty = 3,
}
pub type SwitchId = u16;
/// Generic trait for a device capable of turning on and off switches.
pub trait PowerSwitcherCommandSender {
type Error;
fn send_switch_on_cmd(&mut self, switch_id: SwitchId) -> Result<(), Self::Error>;
fn send_switch_off_cmd(&mut self, switch_id: SwitchId) -> Result<(), Self::Error>;
}
pub trait PowerSwitchInfo {
type Error;
/// Retrieve the switch state
fn get_switch_state(&mut self, switch_id: SwitchId) -> Result<SwitchState, Self::Error>;
fn get_is_switch_on(&mut self, switch_id: SwitchId) -> Result<bool, Self::Error> {
Ok(self.get_switch_state(switch_id)? == SwitchState::On)
}
/// The maximum delay it will take to change a switch.
///
/// This may take into account the time to send a command, wait for it to be executed, and
/// see the switch changed.
fn switch_delay_ms(&self) -> u32;
}
#[cfg(test)]
mod tests {
#![allow(dead_code)]
use super::*;
use std::boxed::Box;
struct Pcdu {
switch_rx: std::sync::mpsc::Receiver<(SwitchId, u16)>,
}
#[derive(Eq, PartialEq)]
enum DeviceState {
OFF,
SwitchingPower,
ON,
SETUP,
IDLE,
}
struct MyComplexDevice {
power_switcher: Box<dyn PowerSwitcherCommandSender<Error = ()>>,
power_info: Box<dyn PowerSwitchInfo<Error = ()>>,
switch_id: SwitchId,
some_state: u16,
dev_state: DeviceState,
mode: u32,
submode: u16,
}
impl MyComplexDevice {
pub fn periodic_op(&mut self) {
// .. mode command coming in
let mode = 1;
if mode == 1 {
if self.dev_state == DeviceState::OFF {
self.power_switcher
.send_switch_on_cmd(self.switch_id)
.expect("sending siwthc cmd failed");
self.dev_state = DeviceState::SwitchingPower;
}
if self.dev_state == DeviceState::SwitchingPower {
if self.power_info.get_is_switch_on(0).unwrap() {
self.dev_state = DeviceState::ON;
self.mode = 1;
}
}
}
}
}
}

449
satrs-core/src/pus/event.rs Normal file
View File

@@ -0,0 +1,449 @@
use crate::pus::{source_buffer_large_enough, EcssTmtcError};
use spacepackets::ecss::{EcssEnumeration, PusError};
use spacepackets::tm::PusTm;
use spacepackets::tm::PusTmSecondaryHeader;
use spacepackets::{SpHeader, MAX_APID};
use crate::pus::EcssTmSenderCore;
#[cfg(feature = "alloc")]
pub use alloc_mod::EventReporter;
pub use spacepackets::ecss::event::*;
pub struct EventReporterBase {
msg_count: u16,
apid: u16,
pub dest_id: u16,
}
impl EventReporterBase {
pub fn new(apid: u16) -> Option<Self> {
if apid > MAX_APID {
return None;
}
Some(Self {
msg_count: 0,
dest_id: 0,
apid,
})
}
pub fn event_info(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.generate_and_send_generic_tm(
buf,
Subservice::TmInfoReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_low_severity(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.generate_and_send_generic_tm(
buf,
Subservice::TmLowSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_medium_severity(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.generate_and_send_generic_tm(
buf,
Subservice::TmMediumSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_high_severity(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.generate_and_send_generic_tm(
buf,
Subservice::TmHighSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
fn generate_and_send_generic_tm(
&mut self,
buf: &mut [u8],
subservice: Subservice,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
let tm = self.generate_generic_event_tm(buf, subservice, time_stamp, event_id, aux_data)?;
sender.send_tm(tm.into())?;
self.msg_count += 1;
Ok(())
}
fn generate_generic_event_tm<'a>(
&'a self,
buf: &'a mut [u8],
subservice: Subservice,
time_stamp: &'a [u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<PusTm, EcssTmtcError> {
let mut src_data_len = event_id.size();
if let Some(aux_data) = aux_data {
src_data_len += aux_data.len();
}
source_buffer_large_enough(buf.len(), src_data_len)?;
let mut sp_header = SpHeader::tm_unseg(self.apid, 0, 0).unwrap();
let sec_header = PusTmSecondaryHeader::new(
5,
subservice.into(),
self.msg_count,
self.dest_id,
Some(time_stamp),
);
let mut current_idx = 0;
event_id
.write_to_be_bytes(&mut buf[0..event_id.size()])
.map_err(PusError::ByteConversion)?;
current_idx += event_id.size();
if let Some(aux_data) = aux_data {
buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data);
current_idx += aux_data.len();
}
Ok(PusTm::new(
&mut sp_header,
sec_header,
Some(&buf[0..current_idx]),
true,
))
}
}
#[cfg(feature = "alloc")]
mod alloc_mod {
use super::*;
use alloc::vec;
use alloc::vec::Vec;
pub struct EventReporter {
source_data_buf: Vec<u8>,
pub reporter: EventReporterBase,
}
impl EventReporter {
pub fn new(apid: u16, max_event_id_and_aux_data_size: usize) -> Option<Self> {
let reporter = EventReporterBase::new(apid)?;
Some(Self {
source_data_buf: vec![0; max_event_id_and_aux_data_size],
reporter,
})
}
pub fn event_info(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_info(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_low_severity(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_low_severity(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_medium_severity(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_medium_severity(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_high_severity(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_high_severity(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::events::{EventU32, Severity};
use crate::pus::tests::CommonTmInfo;
use crate::pus::{EcssChannel, PusTmWrapper};
use crate::ChannelId;
use spacepackets::ByteConversionError;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::vec::Vec;
const EXAMPLE_APID: u16 = 0xee;
const EXAMPLE_GROUP_ID: u16 = 2;
const EXAMPLE_EVENT_ID_0: u16 = 1;
#[allow(dead_code)]
const EXAMPLE_EVENT_ID_1: u16 = 2;
#[derive(Debug, Eq, PartialEq, Clone)]
struct TmInfo {
pub common: CommonTmInfo,
pub event: EventU32,
pub aux_data: Vec<u8>,
}
#[derive(Default, Clone)]
struct TestSender {
pub service_queue: RefCell<VecDeque<TmInfo>>,
}
impl EcssChannel for TestSender {
fn id(&self) -> ChannelId {
0
}
}
impl EcssTmSenderCore for TestSender {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(_) => {
panic!("TestSender: unexpected call with address");
}
PusTmWrapper::Direct(tm) => {
assert!(tm.source_data().is_some());
let src_data = tm.source_data().unwrap();
assert!(src_data.len() >= 4);
let event =
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
let mut aux_data = Vec::new();
if src_data.len() > 4 {
aux_data.extend_from_slice(&src_data[4..]);
}
self.service_queue.borrow_mut().push_back(TmInfo {
common: CommonTmInfo::new_from_tm(&tm),
event,
aux_data,
});
Ok(())
}
}
}
}
fn severity_to_subservice(severity: Severity) -> Subservice {
match severity {
Severity::INFO => Subservice::TmInfoReport,
Severity::LOW => Subservice::TmLowSeverityReport,
Severity::MEDIUM => Subservice::TmMediumSeverityReport,
Severity::HIGH => Subservice::TmHighSeverityReport,
}
}
fn report_basic_event(
reporter: &mut EventReporter,
sender: &mut TestSender,
time_stamp: &[u8],
event: EventU32,
severity: Severity,
aux_data: Option<&[u8]>,
) {
match severity {
Severity::INFO => {
reporter
.event_info(sender, time_stamp, event, aux_data)
.expect("Error reporting info event");
}
Severity::LOW => {
reporter
.event_low_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting low event");
}
Severity::MEDIUM => {
reporter
.event_medium_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting medium event");
}
Severity::HIGH => {
reporter
.event_high_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting high event");
}
}
}
fn basic_event_test(
max_event_aux_data_buf: usize,
severity: Severity,
error_data: Option<&[u8]>,
) {
let mut sender = TestSender::default();
let reporter = EventReporter::new(EXAMPLE_APID, max_event_aux_data_buf);
assert!(reporter.is_some());
let mut reporter = reporter.unwrap();
let time_stamp_empty: [u8; 7] = [0; 7];
let mut error_copy = Vec::new();
if let Some(err_data) = error_data {
error_copy.extend_from_slice(err_data);
}
let event = EventU32::new(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event");
report_basic_event(
&mut reporter,
&mut sender,
&time_stamp_empty,
event,
severity,
error_data,
);
let mut service_queue = sender.service_queue.borrow_mut();
assert_eq!(service_queue.len(), 1);
let tm_info = service_queue.pop_front().unwrap();
assert_eq!(
tm_info.common.subservice,
severity_to_subservice(severity) as u8
);
assert_eq!(tm_info.common.dest_id, 0);
assert_eq!(tm_info.common.time_stamp, time_stamp_empty);
assert_eq!(tm_info.common.msg_counter, 0);
assert_eq!(tm_info.common.apid, EXAMPLE_APID);
assert_eq!(tm_info.event, event);
assert_eq!(tm_info.aux_data, error_copy);
}
#[test]
fn basic_info_event_generation() {
basic_event_test(4, Severity::INFO, None);
}
#[test]
fn basic_low_severity_event() {
basic_event_test(4, Severity::LOW, None);
}
#[test]
fn basic_medium_severity_event() {
basic_event_test(4, Severity::MEDIUM, None);
}
#[test]
fn basic_high_severity_event() {
basic_event_test(4, Severity::HIGH, None);
}
#[test]
fn event_with_info_string() {
let info_string = "Test Information";
basic_event_test(32, Severity::INFO, Some(info_string.as_bytes()));
}
#[test]
fn low_severity_with_raw_err_data() {
let raw_err_param: i32 = -1;
let raw_err = raw_err_param.to_be_bytes();
basic_event_test(8, Severity::LOW, Some(&raw_err))
}
fn check_buf_too_small(
reporter: &mut EventReporter,
sender: &mut TestSender,
expected_found_len: usize,
) {
let time_stamp_empty: [u8; 7] = [0; 7];
let event = EventU32::new(Severity::INFO, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event");
let err = reporter.event_info(sender, &time_stamp_empty, event, None);
assert!(err.is_err());
let err = err.unwrap_err();
if let EcssTmtcError::Pus(PusError::ByteConversion(ByteConversionError::ToSliceTooSmall(
missmatch,
))) = err
{
assert_eq!(missmatch.expected, 4);
assert_eq!(missmatch.found, expected_found_len);
} else {
panic!("Unexpected error {:?}", err);
}
}
#[test]
fn insufficient_buffer() {
let mut sender = TestSender::default();
for i in 0..3 {
let reporter = EventReporter::new(EXAMPLE_APID, i);
assert!(reporter.is_some());
let mut reporter = reporter.unwrap();
check_buf_too_small(&mut reporter, &mut sender, i);
}
}
}

View File

@@ -0,0 +1,309 @@
use crate::events::{EventU32, GenericEvent, Severity};
#[cfg(feature = "alloc")]
use crate::events::{EventU32TypedSev, HasSeverity};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
#[cfg(feature = "alloc")]
use core::hash::Hash;
#[cfg(feature = "alloc")]
use hashbrown::HashSet;
#[cfg(feature = "alloc")]
pub use crate::pus::event::EventReporter;
use crate::pus::verification::TcStateToken;
#[cfg(feature = "alloc")]
use crate::pus::EcssTmSenderCore;
use crate::pus::EcssTmtcError;
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use alloc_mod::*;
#[cfg(feature = "heapless")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
pub use heapless_mod::*;
/// This trait allows the PUS event manager implementation to stay generic over various types
/// of backend containers.
///
/// These backend containers keep track on whether a particular event is enabled or disabled for
/// reporting and also expose a simple API to enable or disable the event reporting.
///
/// For example, a straight forward implementation for host systems could use a
/// [hash set](https://docs.rs/hashbrown/latest/hashbrown/struct.HashSet.html)
/// structure to track disabled events. A more primitive and embedded friendly
/// solution could track this information in a static or pre-allocated list which contains
/// the disabled events.
pub trait PusEventMgmtBackendProvider<Provider: GenericEvent> {
type Error;
fn event_enabled(&self, event: &Provider) -> bool;
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
}
#[cfg(feature = "heapless")]
pub mod heapless_mod {
use super::*;
use crate::events::{GenericEvent, LargestEventRaw};
use std::marker::PhantomData;
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
// regular Event type again.
#[derive(Default)]
pub struct HeaplessPusMgmtBackendProvider<const N: usize, Provider: GenericEvent> {
disabled: heapless::FnvIndexSet<LargestEventRaw, N>,
phantom: PhantomData<Provider>,
}
/// Safety: All contained field are [Send] as well
unsafe impl<const N: usize, Event: GenericEvent + Send> Send
for HeaplessPusMgmtBackendProvider<N, Event>
{
}
impl<const N: usize, Provider: GenericEvent> PusEventMgmtBackendProvider<Provider>
for HeaplessPusMgmtBackendProvider<N, Provider>
{
type Error = ();
fn event_enabled(&self, event: &Provider) -> bool {
self.disabled.contains(&event.raw_as_largest_type())
}
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
self.disabled
.insert(event.raw_as_largest_type())
.map_err(|_| ())
}
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(&event.raw_as_largest_type()))
}
}
}
#[derive(Debug)]
pub enum EventRequest<Event: GenericEvent = EventU32> {
Enable(Event),
Disable(Event),
}
#[derive(Debug)]
pub struct EventRequestWithToken<Event: GenericEvent = EventU32> {
pub request: EventRequest<Event>,
pub token: TcStateToken,
}
#[derive(Debug)]
pub enum EventManError {
EcssTmtcError(EcssTmtcError),
SeverityMissmatch(Severity, Severity),
}
impl From<EcssTmtcError> for EventManError {
fn from(v: EcssTmtcError) -> Self {
Self::EcssTmtcError(v)
}
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use super::*;
/// Default backend provider which uses a hash set as the event reporting status container
/// like mentioned in the example of the [PusEventMgmtBackendProvider] documentation.
///
/// This provider is a good option for host systems or larger embedded systems where
/// the expected occasional memory allocation performed by the [HashSet] is not an issue.
pub struct DefaultPusMgmtBackendProvider<Event: GenericEvent = EventU32> {
disabled: HashSet<Event>,
}
/// Safety: All contained field are [Send] as well
unsafe impl<Event: GenericEvent + Send> Send for DefaultPusMgmtBackendProvider<Event> {}
impl<Event: GenericEvent> Default for DefaultPusMgmtBackendProvider<Event> {
fn default() -> Self {
Self {
disabled: HashSet::default(),
}
}
}
impl<Provider: GenericEvent + PartialEq + Eq + Hash + Copy + Clone>
PusEventMgmtBackendProvider<Provider> for DefaultPusMgmtBackendProvider<Provider>
{
type Error = ();
fn event_enabled(&self, event: &Provider) -> bool {
!self.disabled.contains(event)
}
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(event))
}
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.insert(*event))
}
}
pub struct PusEventDispatcher<BackendError, Provider: GenericEvent> {
reporter: EventReporter,
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>,
}
/// Safety: All contained fields are send as well.
unsafe impl<E: Send, Event: GenericEvent + Send> Send for PusEventDispatcher<E, Event> {}
impl<BackendError, Provider: GenericEvent> PusEventDispatcher<BackendError, Provider> {
pub fn new(
reporter: EventReporter,
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>,
) -> Self {
Self { reporter, backend }
}
}
impl<BackendError, Event: GenericEvent> PusEventDispatcher<BackendError, Event> {
pub fn enable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> {
self.backend.enable_event_reporting(event)
}
pub fn disable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> {
self.backend.disable_event_reporting(event)
}
pub fn generate_pus_event_tm_generic(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event: Event,
aux_data: Option<&[u8]>,
) -> Result<bool, EventManError> {
if !self.backend.event_enabled(&event) {
return Ok(false);
}
match event.severity() {
Severity::INFO => self
.reporter
.event_info(sender, time_stamp, event, aux_data)
.map(|_| true)
.map_err(|e| e.into()),
Severity::LOW => self
.reporter
.event_low_severity(sender, time_stamp, event, aux_data)
.map(|_| true)
.map_err(|e| e.into()),
Severity::MEDIUM => self
.reporter
.event_medium_severity(sender, time_stamp, event, aux_data)
.map(|_| true)
.map_err(|e| e.into()),
Severity::HIGH => self
.reporter
.event_high_severity(sender, time_stamp, event, aux_data)
.map(|_| true)
.map_err(|e| e.into()),
}
}
}
impl<BackendError> PusEventDispatcher<BackendError, EventU32> {
pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self,
event: &EventU32TypedSev<Severity>,
) -> Result<bool, BackendError> {
self.backend.enable_event_reporting(event.as_ref())
}
pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self,
event: &EventU32TypedSev<Severity>,
) -> Result<bool, BackendError> {
self.backend.disable_event_reporting(event.as_ref())
}
pub fn generate_pus_event_tm<Severity: HasSeverity>(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event: EventU32TypedSev<Severity>,
aux_data: Option<&[u8]>,
) -> Result<bool, EventManError> {
self.generate_pus_event_tm_generic(sender, time_stamp, event.into(), aux_data)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::events::SeverityInfo;
use crate::pus::MpscTmAsVecSender;
use std::sync::mpsc::{channel, TryRecvError};
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
fn create_basic_man() -> PusEventDispatcher<(), EventU32> {
let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed");
let backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
PusEventDispatcher::new(reporter, Box::new(backend))
}
#[test]
fn test_basic() {
let mut event_man = create_basic_man();
let (event_tx, event_rx) = channel();
let mut sender = MpscTmAsVecSender::new(0, "test_sender", event_tx);
let event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
assert!(event_sent);
// Will not check packet here, correctness of packet was tested somewhere else
event_rx.try_recv().expect("Receiving event TM failed");
}
#[test]
fn test_disable_event() {
let mut event_man = create_basic_man();
let (event_tx, event_rx) = channel();
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx);
let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
let mut event_sent = event_man
.generate_pus_event_tm_generic(&mut sender, &EMPTY_STAMP, LOW_SEV_EVENT, None)
.expect("Sending low severity event failed");
assert!(!event_sent);
let res = event_rx.try_recv();
assert!(res.is_err());
assert!(matches!(res.unwrap_err(), TryRecvError::Empty));
// Check that only the low severity event was disabled
event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
assert!(event_sent);
event_rx.try_recv().expect("No info event received");
}
#[test]
fn test_reenable_event() {
let mut event_man = create_basic_man();
let (event_tx, event_rx) = channel();
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx);
let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
res = event_man.enable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
let event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
assert!(event_sent);
event_rx.try_recv().expect("No info event received");
}
}

View File

@@ -0,0 +1,133 @@
use crate::events::EventU32;
use crate::pool::{SharedPool, StoreAddr};
use crate::pus::event_man::{EventRequest, EventRequestWithToken};
use crate::pus::verification::{
StdVerifReporterWithSender, TcStateAccepted, TcStateToken, VerificationToken,
};
use crate::pus::{
EcssTcReceiver, EcssTmSender, PartialPusHandlingError, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceBase, PusServiceHandler,
};
use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::PusPacket;
use spacepackets::tc::PusTc;
use std::boxed::Box;
use std::sync::mpsc::Sender;
pub struct PusService5EventHandler {
psb: PusServiceBase,
event_request_tx: Sender<EventRequestWithToken>,
}
impl PusService5EventHandler {
pub fn new(
tc_receiver: Box<dyn EcssTcReceiver>,
shared_tc_store: SharedPool,
tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16,
verification_handler: StdVerifReporterWithSender,
event_request_tx: Sender<EventRequestWithToken>,
) -> Self {
Self {
psb: PusServiceBase::new(
tc_receiver,
shared_tc_store,
tm_sender,
tm_apid,
verification_handler,
),
event_request_tx,
}
}
}
impl PusServiceHandler for PusService5EventHandler {
fn psb_mut(&mut self) -> &mut PusServiceBase {
&mut self.psb
}
fn psb(&self) -> &PusServiceBase {
&self.psb
}
fn handle_one_tc(
&mut self,
addr: StoreAddr,
token: VerificationToken<TcStateAccepted>,
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
self.copy_tc_to_buf(addr)?;
let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf)?;
let subservice = tc.subservice();
let srv = Subservice::try_from(subservice);
if srv.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(),
token,
));
}
let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| {
if tc.user_data().is_none() || tc.user_data().unwrap().len() < 4 {
return Err(PusPacketHandlingError::NotEnoughAppData(
"At least 4 bytes event ID expected".into(),
));
}
let user_data = tc.user_data().unwrap();
let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
let start_token = self
.psb
.verification_handler
.borrow_mut()
.start_success(token, Some(&stamp))
.map_err(|_| PartialPusHandlingError::Verification);
let partial_error = start_token.clone().err();
let mut token: TcStateToken = token.into();
if let Ok(start_token) = start_token {
token = start_token.into();
}
let event_req_with_token = if enable {
EventRequestWithToken {
request: EventRequest::Enable(event_u32),
token,
}
} else {
EventRequestWithToken {
request: EventRequest::Disable(event_u32),
token,
}
};
self.event_request_tx
.send(event_req_with_token)
.map_err(|_| {
PusPacketHandlingError::Other("Forwarding event request failed".into())
})?;
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
};
let mut partial_error = None;
let time_stamp = self.psb().get_current_timestamp(&mut partial_error);
match srv.unwrap() {
Subservice::TmInfoReport
| Subservice::TmLowSeverityReport
| Subservice::TmMediumSeverityReport
| Subservice::TmHighSeverityReport => {
return Err(PusPacketHandlingError::InvalidSubservice(tc.subservice()))
}
Subservice::TcEnableEventGeneration => {
handle_enable_disable_request(true, time_stamp)?;
}
Subservice::TcDisableEventGeneration => {
handle_enable_disable_request(false, time_stamp)?;
}
Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => {
return Ok(PusPacketHandlerResult::SubserviceNotImplemented(
subservice, token,
));
}
}
Ok(PusPacketHandlerResult::RequestHandled)
}
}

1
satrs-core/src/pus/hk.rs Normal file
View File

@@ -0,0 +1 @@
pub use spacepackets::ecss::hk::*;

669
satrs-core/src/pus/mod.rs Normal file
View File

@@ -0,0 +1,669 @@
//! # PUS support modules
//!
//! This module contains structures to make working with the PUS C standard easier.
//! The satrs-example application contains various usage examples of these components.
use crate::ChannelId;
use core::fmt::{Display, Formatter};
#[cfg(feature = "alloc")]
use downcast_rs::{impl_downcast, Downcast};
#[cfg(feature = "alloc")]
use dyn_clone::DynClone;
use spacepackets::ecss::PusError;
use spacepackets::tc::PusTc;
use spacepackets::tm::PusTm;
use spacepackets::{ByteConversionError, SizeMissmatch, SpHeader};
use std::error::Error;
pub mod event;
pub mod event_man;
pub mod event_srv;
pub mod hk;
pub mod mode;
pub mod scheduler;
pub mod scheduler_srv;
#[cfg(feature = "std")]
pub mod test;
pub mod verification;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
use crate::pool::{StoreAddr, StoreError};
use crate::pus::verification::{TcStateAccepted, TcStateToken, VerificationToken};
#[cfg(feature = "std")]
pub use std_mod::*;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PusTmWrapper<'tm> {
InStore(StoreAddr),
Direct(PusTm<'tm>),
}
impl From<StoreAddr> for PusTmWrapper<'_> {
fn from(value: StoreAddr) -> Self {
Self::InStore(value)
}
}
impl<'tm> From<PusTm<'tm>> for PusTmWrapper<'tm> {
fn from(value: PusTm<'tm>) -> Self {
Self::Direct(value)
}
}
pub type TcAddrWithToken = (StoreAddr, TcStateToken);
/// Generic abstraction for a telecommand being sent around after is has been accepted.
/// The actual telecommand is stored inside a pre-allocated pool structure.
pub type AcceptedTc = (StoreAddr, VerificationToken<TcStateAccepted>);
/// Generic error type for sending something via a message queue.
#[derive(Debug, Copy, Clone)]
pub enum GenericSendError {
RxDisconnected,
QueueFull(u32),
}
impl Display for GenericSendError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
GenericSendError::RxDisconnected => {
write!(f, "rx side has disconnected")
}
GenericSendError::QueueFull(max_cap) => {
write!(f, "queue with max capacity of {max_cap} is full")
}
}
}
}
#[cfg(feature = "std")]
impl Error for GenericSendError {}
/// Generic error type for sending something via a message queue.
#[derive(Debug, Copy, Clone)]
pub enum GenericRecvError {
Empty,
TxDisconnected,
}
impl Display for GenericRecvError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::TxDisconnected => {
write!(f, "tx side has disconnected")
}
Self::Empty => {
write!(f, "nothing to receive")
}
}
}
}
#[cfg(feature = "std")]
impl Error for GenericRecvError {}
#[derive(Debug, Clone)]
pub enum EcssTmtcError {
StoreLock,
Store(StoreError),
Pus(PusError),
CantSendAddr(StoreAddr),
Send(GenericSendError),
Recv(GenericRecvError),
}
impl Display for EcssTmtcError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
EcssTmtcError::StoreLock => {
write!(f, "store lock error")
}
EcssTmtcError::Store(store) => {
write!(f, "store error: {store}")
}
EcssTmtcError::Pus(pus_e) => {
write!(f, "PUS error: {pus_e}")
}
EcssTmtcError::CantSendAddr(addr) => {
write!(f, "can not send address {addr}")
}
EcssTmtcError::Send(send_e) => {
write!(f, "send error {send_e}")
}
EcssTmtcError::Recv(recv_e) => {
write!(f, "recv error {recv_e}")
}
}
}
}
impl From<StoreError> for EcssTmtcError {
fn from(value: StoreError) -> Self {
Self::Store(value)
}
}
impl From<PusError> for EcssTmtcError {
fn from(value: PusError) -> Self {
Self::Pus(value)
}
}
impl From<GenericSendError> for EcssTmtcError {
fn from(value: GenericSendError) -> Self {
Self::Send(value)
}
}
impl From<GenericRecvError> for EcssTmtcError {
fn from(value: GenericRecvError) -> Self {
Self::Recv(value)
}
}
#[cfg(feature = "std")]
impl Error for EcssTmtcError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
EcssTmtcError::Store(e) => Some(e),
EcssTmtcError::Pus(e) => Some(e),
EcssTmtcError::Send(e) => Some(e),
_ => None,
}
}
}
pub trait EcssChannel: Send {
/// Each sender can have an ID associated with it
fn id(&self) -> ChannelId;
fn name(&self) -> &'static str {
"unset"
}
}
/// Generic trait for a user supplied sender object.
///
/// This sender object is responsible for sending PUS telemetry to a TM sink.
pub trait EcssTmSenderCore: EcssChannel {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError>;
}
/// Generic trait for a user supplied sender object.
///
/// This sender object is responsible for sending PUS telecommands to a TC recipient. Each
/// telecommand can optionally have a token which contains its verification state.
pub trait EcssTcSenderCore: EcssChannel {
fn send_tc(&self, tc: PusTc, token: Option<TcStateToken>) -> Result<(), EcssTmtcError>;
}
pub struct ReceivedTcWrapper {
pub store_addr: StoreAddr,
pub token: Option<TcStateToken>,
}
#[derive(Debug, Clone)]
pub enum TryRecvTmtcError {
Error(EcssTmtcError),
Empty,
}
impl From<EcssTmtcError> for TryRecvTmtcError {
fn from(value: EcssTmtcError) -> Self {
Self::Error(value)
}
}
impl From<PusError> for TryRecvTmtcError {
fn from(value: PusError) -> Self {
Self::Error(value.into())
}
}
impl From<StoreError> for TryRecvTmtcError {
fn from(value: StoreError) -> Self {
Self::Error(value.into())
}
}
/// Generic trait for a user supplied receiver object.
pub trait EcssTcReceiverCore: EcssChannel {
fn recv_tc(&self) -> Result<ReceivedTcWrapper, TryRecvTmtcError>;
}
/// Generic trait for objects which can receive ECSS PUS telecommands. This trait is
/// implemented by the [crate::tmtc::pus_distrib::PusDistributor] objects to allow passing PUS TC
/// packets into it.
pub trait ReceivesEcssPusTc {
type Error;
fn pass_pus_tc(&mut self, header: &SpHeader, pus_tc: &PusTc) -> Result<(), Self::Error>;
}
#[cfg(feature = "alloc")]
mod alloc_mod {
use super::*;
/// Extension trait for [EcssTmSenderCore].
///
/// It provides additional functionality, for example by implementing the [Downcast] trait
/// and the [DynClone] trait.
///
/// [Downcast] is implemented to allow passing the sender as a boxed trait object and still
/// retrieve the concrete type at a later point.
///
/// [DynClone] allows cloning the trait object as long as the boxed object implements
/// [Clone].
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub trait EcssTmSender: EcssTmSenderCore + Downcast + DynClone {}
/// Blanket implementation for all types which implement [EcssTmSenderCore] and are clonable.
impl<T> EcssTmSender for T where T: EcssTmSenderCore + Clone + 'static {}
dyn_clone::clone_trait_object!(EcssTmSender);
impl_downcast!(EcssTmSender);
/// Extension trait for [EcssTcSenderCore].
///
/// It provides additional functionality, for example by implementing the [Downcast] trait
/// and the [DynClone] trait.
///
/// [Downcast] is implemented to allow passing the sender as a boxed trait object and still
/// retrieve the concrete type at a later point.
///
/// [DynClone] allows cloning the trait object as long as the boxed object implements
/// [Clone].
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub trait EcssTcSender: EcssTcSenderCore + Downcast + DynClone {}
/// Blanket implementation for all types which implement [EcssTcSenderCore] and are clonable.
impl<T> EcssTcSender for T where T: EcssTcSenderCore + Clone + 'static {}
dyn_clone::clone_trait_object!(EcssTcSender);
impl_downcast!(EcssTcSender);
/// Extension trait for [EcssTcReceiverCore].
///
/// It provides additional functionality, for example by implementing the [Downcast] trait
/// and the [DynClone] trait.
///
/// [Downcast] is implemented to allow passing the sender as a boxed trait object and still
/// retrieve the concrete type at a later point.
///
/// [DynClone] allows cloning the trait object as long as the boxed object implements
/// [Clone].
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub trait EcssTcReceiver: EcssTcReceiverCore + Downcast {}
/// Blanket implementation for all types which implement [EcssTcReceiverCore] and are clonable.
impl<T> EcssTcReceiver for T where T: EcssTcReceiverCore + 'static {}
impl_downcast!(EcssTcReceiver);
}
#[cfg(feature = "std")]
pub mod std_mod {
use crate::pool::{SharedPool, StoreAddr};
use crate::pus::verification::{
StdVerifReporterWithSender, TcStateAccepted, VerificationToken,
};
use crate::pus::{
EcssChannel, EcssTcReceiver, EcssTcReceiverCore, EcssTmSender, EcssTmSenderCore,
EcssTmtcError, GenericRecvError, GenericSendError, PusTmWrapper, ReceivedTcWrapper,
TcAddrWithToken, TryRecvTmtcError,
};
use crate::tmtc::tm_helper::SharedTmStore;
use crate::ChannelId;
use alloc::boxed::Box;
use alloc::vec::Vec;
use spacepackets::ecss::PusError;
use spacepackets::time::cds::TimeProvider;
use spacepackets::time::StdTimestampError;
use spacepackets::time::TimeWriter;
use spacepackets::tm::PusTm;
use std::cell::RefCell;
use std::string::String;
use std::sync::mpsc;
use std::sync::mpsc::{SendError, TryRecvError};
use thiserror::Error;
#[derive(Clone)]
pub struct MpscTmInStoreSender {
id: ChannelId,
name: &'static str,
shared_tm_store: SharedTmStore,
sender: mpsc::Sender<StoreAddr>,
pub ignore_poison_errors: bool,
}
impl EcssChannel for MpscTmInStoreSender {
fn id(&self) -> ChannelId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
}
impl From<SendError<StoreAddr>> for EcssTmtcError {
fn from(_: SendError<StoreAddr>) -> Self {
Self::Send(GenericSendError::RxDisconnected)
}
}
impl MpscTmInStoreSender {
pub fn send_direct_tm(&self, tm: PusTm) -> Result<(), EcssTmtcError> {
let addr = self.shared_tm_store.add_pus_tm(&tm)?;
self.sender
.send(addr)
.map_err(|_| EcssTmtcError::Send(GenericSendError::RxDisconnected))
}
}
impl EcssTmSenderCore for MpscTmInStoreSender {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => self.sender.send(addr).map_err(|e| e.into()),
PusTmWrapper::Direct(tm) => self.send_direct_tm(tm),
}
}
}
impl MpscTmInStoreSender {
pub fn new(
id: ChannelId,
name: &'static str,
shared_tm_store: SharedTmStore,
sender: mpsc::Sender<StoreAddr>,
) -> Self {
Self {
id,
name,
shared_tm_store,
sender,
ignore_poison_errors: false,
}
}
}
pub struct MpscTcInStoreReceiver {
id: ChannelId,
name: &'static str,
receiver: mpsc::Receiver<TcAddrWithToken>,
pub ignore_poison_errors: bool,
}
impl EcssChannel for MpscTcInStoreReceiver {
fn id(&self) -> ChannelId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
}
impl EcssTcReceiverCore for MpscTcInStoreReceiver {
fn recv_tc(&self) -> Result<ReceivedTcWrapper, TryRecvTmtcError> {
let (store_addr, token) = self.receiver.try_recv().map_err(|e| match e {
TryRecvError::Empty => TryRecvTmtcError::Empty,
TryRecvError::Disconnected => {
TryRecvTmtcError::Error(EcssTmtcError::from(GenericRecvError::TxDisconnected))
}
})?;
Ok(ReceivedTcWrapper {
store_addr,
token: Some(token),
})
}
}
impl MpscTcInStoreReceiver {
pub fn new(
id: ChannelId,
name: &'static str,
receiver: mpsc::Receiver<TcAddrWithToken>,
) -> Self {
Self {
id,
name,
receiver,
ignore_poison_errors: false,
}
}
}
/// This class can be used if frequent heap allocations during run-time are not an issue.
/// PUS TM packets will be sent around as [Vec]s. Please note that the current implementation
/// of this class can not deal with store addresses, so it is assumed that is is always
/// going to be called with direct packets.
#[derive(Clone)]
pub struct MpscTmAsVecSender {
id: ChannelId,
sender: mpsc::Sender<Vec<u8>>,
name: &'static str,
}
impl From<SendError<Vec<u8>>> for EcssTmtcError {
fn from(_: SendError<Vec<u8>>) -> Self {
Self::Send(GenericSendError::RxDisconnected)
}
}
impl MpscTmAsVecSender {
pub fn new(id: u32, name: &'static str, sender: mpsc::Sender<Vec<u8>>) -> Self {
Self { id, sender, name }
}
}
impl EcssChannel for MpscTmAsVecSender {
fn id(&self) -> ChannelId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
}
impl EcssTmSenderCore for MpscTmAsVecSender {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => Err(EcssTmtcError::CantSendAddr(addr)),
PusTmWrapper::Direct(tm) => {
let mut vec = Vec::new();
tm.append_to_vec(&mut vec).map_err(EcssTmtcError::Pus)?;
self.sender.send(vec)?;
Ok(())
}
}
}
}
#[derive(Debug, Clone, Error)]
pub enum PusPacketHandlingError {
#[error("generic PUS error: {0}")]
Pus(#[from] PusError),
#[error("wrong service number {0} for packet handler")]
WrongService(u8),
#[error("invalid subservice {0}")]
InvalidSubservice(u8),
#[error("not enough application data available: {0}")]
NotEnoughAppData(String),
#[error("invalid application data")]
InvalidAppData(String),
#[error("generic ECSS tmtc error: {0}")]
EcssTmtc(#[from] EcssTmtcError),
#[error("invalid verification token")]
InvalidVerificationToken,
#[error("other error {0}")]
Other(String),
}
#[derive(Debug, Clone, Error)]
pub enum PartialPusHandlingError {
#[error("generic timestamp generation error")]
Time(#[from] StdTimestampError),
#[error("error sending telemetry: {0}")]
TmSend(#[from] EcssTmtcError),
#[error("error sending verification message")]
Verification,
#[error("invalid verification token")]
NoVerificationToken,
}
/// Generic result type for handlers which can process PUS packets.
#[derive(Debug, Clone)]
pub enum PusPacketHandlerResult {
RequestHandled,
RequestHandledPartialSuccess(PartialPusHandlingError),
SubserviceNotImplemented(u8, VerificationToken<TcStateAccepted>),
CustomSubservice(u8, VerificationToken<TcStateAccepted>),
Empty,
}
impl From<PartialPusHandlingError> for PusPacketHandlerResult {
fn from(value: PartialPusHandlingError) -> Self {
Self::RequestHandledPartialSuccess(value)
}
}
/// Base class for handlers which can handle PUS TC packets. Right now, the verification
/// reporter is constrained to the [StdVerifReporterWithSender] and the service handler
/// relies on TMTC packets being exchanged via a [SharedPool].
pub struct PusServiceBase {
pub tc_receiver: Box<dyn EcssTcReceiver>,
pub shared_tc_store: SharedPool,
pub tm_sender: Box<dyn EcssTmSender>,
pub tm_apid: u16,
/// The verification handler is wrapped in a [RefCell] to allow the interior mutability
/// pattern. This makes writing methods which are not mutable a lot easier.
pub verification_handler: RefCell<StdVerifReporterWithSender>,
pub pus_buf: [u8; 2048],
pub pus_size: usize,
}
impl PusServiceBase {
pub fn new(
tc_receiver: Box<dyn EcssTcReceiver>,
shared_tc_store: SharedPool,
tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16,
verification_handler: StdVerifReporterWithSender,
) -> Self {
Self {
tc_receiver,
shared_tc_store,
tm_apid,
tm_sender,
verification_handler: RefCell::new(verification_handler),
pus_buf: [0; 2048],
pus_size: 0,
}
}
pub fn get_current_timestamp(
&self,
partial_error: &mut Option<PartialPusHandlingError>,
) -> [u8; 7] {
let mut time_stamp: [u8; 7] = [0; 7];
let time_provider =
TimeProvider::from_now_with_u16_days().map_err(PartialPusHandlingError::Time);
if let Ok(time_provider) = time_provider {
// Can't fail, we have a buffer with the exact required size.
time_provider.write_to_bytes(&mut time_stamp).unwrap();
} else {
*partial_error = Some(time_provider.unwrap_err());
}
time_stamp
}
pub fn get_current_timestamp_ignore_error(&self) -> [u8; 7] {
let mut dummy = None;
self.get_current_timestamp(&mut dummy)
}
}
pub trait PusServiceHandler {
fn psb_mut(&mut self) -> &mut PusServiceBase;
fn psb(&self) -> &PusServiceBase;
fn handle_one_tc(
&mut self,
addr: StoreAddr,
token: VerificationToken<TcStateAccepted>,
) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
fn copy_tc_to_buf(&mut self, addr: StoreAddr) -> Result<(), PusPacketHandlingError> {
// Keep locked section as short as possible.
let psb_mut = self.psb_mut();
let mut tc_pool = psb_mut
.shared_tc_store
.write()
.map_err(|_| PusPacketHandlingError::EcssTmtc(EcssTmtcError::StoreLock))?;
let tc_guard = tc_pool.read_with_guard(addr);
let tc_raw = tc_guard.read().unwrap();
psb_mut.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw);
Ok(())
}
fn handle_next_packet(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
match self.psb().tc_receiver.recv_tc() {
Ok(ReceivedTcWrapper { store_addr, token }) => {
if token.is_none() {
return Err(PusPacketHandlingError::InvalidVerificationToken);
}
let token = token.unwrap();
let accepted_token = VerificationToken::<TcStateAccepted>::try_from(token)
.map_err(|_| PusPacketHandlingError::InvalidVerificationToken)?;
self.handle_one_tc(store_addr, accepted_token)
}
Err(e) => match e {
TryRecvTmtcError::Error(e) => Err(PusPacketHandlingError::EcssTmtc(e)),
TryRecvTmtcError::Empty => Ok(PusPacketHandlerResult::Empty),
},
}
}
}
}
pub(crate) fn source_buffer_large_enough(cap: usize, len: usize) -> Result<(), EcssTmtcError> {
if len > cap {
return Err(
PusError::ByteConversion(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: cap,
expected: len,
}))
.into(),
);
}
Ok(())
}
#[cfg(test)]
pub(crate) mod tests {
use spacepackets::tm::{GenericPusTmSecondaryHeader, PusTm};
use spacepackets::CcsdsPacket;
#[derive(Debug, Eq, PartialEq, Clone)]
pub(crate) struct CommonTmInfo {
pub subservice: u8,
pub apid: u16,
pub msg_counter: u16,
pub dest_id: u16,
pub time_stamp: [u8; 7],
}
impl CommonTmInfo {
pub fn new_from_tm(tm: &PusTm) -> Self {
let mut time_stamp = [0; 7];
time_stamp.clone_from_slice(&tm.timestamp().unwrap()[0..7]);
Self {
subservice: tm.subservice(),
apid: tm.apid(),
msg_counter: tm.msg_counter(),
dest_id: tm.dest_id(),
time_stamp,
}
}
}
}

View File

@@ -0,0 +1,16 @@
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
TcSetMode = 1,
TcReadMode = 3,
TcAnnounceMode = 4,
TcAnnounceModeRecursive = 5,
TmModeReply = 6,
TmCantReachMode = 7,
TmWrongModeReply = 8,
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,184 @@
use crate::pool::{SharedPool, StoreAddr};
use crate::pus::scheduler::PusScheduler;
use crate::pus::verification::{StdVerifReporterWithSender, TcStateAccepted, VerificationToken};
use crate::pus::{
EcssTcReceiver, EcssTmSender, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase,
PusServiceHandler,
};
use spacepackets::ecss::{scheduling, PusPacket};
use spacepackets::tc::PusTc;
use spacepackets::time::cds::TimeProvider;
use std::boxed::Box;
/// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service)
/// packets. This handler is constrained to using the [PusScheduler], but is able to process
/// the most important PUS requests for a scheduling service.
///
/// Please note that this class does not do the regular periodic handling like releasing any
/// telecommands inside the scheduler. The user can retrieve the wrapped scheduler via the
/// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release
/// telecommands when applicable.
pub struct PusService11SchedHandler {
psb: PusServiceBase,
scheduler: PusScheduler,
}
impl PusService11SchedHandler {
pub fn new(
tc_receiver: Box<dyn EcssTcReceiver>,
shared_tc_store: SharedPool,
tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16,
verification_handler: StdVerifReporterWithSender,
scheduler: PusScheduler,
) -> Self {
Self {
psb: PusServiceBase::new(
tc_receiver,
shared_tc_store,
tm_sender,
tm_apid,
verification_handler,
),
scheduler,
}
}
pub fn scheduler_mut(&mut self) -> &mut PusScheduler {
&mut self.scheduler
}
pub fn scheduler(&self) -> &PusScheduler {
&self.scheduler
}
}
impl PusServiceHandler for PusService11SchedHandler {
fn psb_mut(&mut self) -> &mut PusServiceBase {
&mut self.psb
}
fn psb(&self) -> &PusServiceBase {
&self.psb
}
fn handle_one_tc(
&mut self,
addr: StoreAddr,
token: VerificationToken<TcStateAccepted>,
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
self.copy_tc_to_buf(addr)?;
let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf)?;
let subservice = tc.subservice();
let std_service = scheduling::Subservice::try_from(subservice);
if std_service.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(),
token,
));
}
let mut partial_error = None;
let time_stamp = self.psb().get_current_timestamp(&mut partial_error);
match std_service.unwrap() {
scheduling::Subservice::TcEnableScheduling => {
let start_token = self
.psb
.verification_handler
.get_mut()
.start_success(token, Some(&time_stamp))
.expect("Error sending start success");
self.scheduler.enable();
if self.scheduler.is_enabled() {
self.psb
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success");
} else {
panic!("Failed to enable scheduler");
}
}
scheduling::Subservice::TcDisableScheduling => {
let start_token = self
.psb
.verification_handler
.get_mut()
.start_success(token, Some(&time_stamp))
.expect("Error sending start success");
self.scheduler.disable();
if !self.scheduler.is_enabled() {
self.psb
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success");
} else {
panic!("Failed to disable scheduler");
}
}
scheduling::Subservice::TcResetScheduling => {
let start_token = self
.psb
.verification_handler
.get_mut()
.start_success(token, Some(&time_stamp))
.expect("Error sending start success");
let mut pool = self
.psb
.shared_tc_store
.write()
.expect("Locking pool failed");
self.scheduler
.reset(pool.as_mut())
.expect("Error resetting TC Pool");
self.psb
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success");
}
scheduling::Subservice::TcInsertActivity => {
let start_token = self
.psb
.verification_handler
.get_mut()
.start_success(token, Some(&time_stamp))
.expect("error sending start success");
let mut pool = self
.psb
.shared_tc_store
.write()
.expect("locking pool failed");
self.scheduler
.insert_wrapped_tc::<TimeProvider>(&tc, pool.as_mut())
.expect("insertion of activity into pool failed");
self.psb
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.expect("sending completion success failed");
}
_ => {
return Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(),
token,
));
}
}
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(),
token,
))
}
}

218
satrs-core/src/pus/test.rs Normal file
View File

@@ -0,0 +1,218 @@
use crate::pool::{SharedPool, StoreAddr};
use crate::pus::verification::{StdVerifReporterWithSender, TcStateAccepted, VerificationToken};
use crate::pus::{
EcssTcReceiver, EcssTmSender, PartialPusHandlingError, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceBase, PusServiceHandler, PusTmWrapper,
};
use spacepackets::ecss::PusPacket;
use spacepackets::tc::PusTc;
use spacepackets::tm::{PusTm, PusTmSecondaryHeader};
use spacepackets::SpHeader;
use std::boxed::Box;
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
/// This handler only processes ping requests and generates a ping reply for them accordingly.
pub struct PusService17TestHandler {
psb: PusServiceBase,
}
impl PusService17TestHandler {
pub fn new(
tc_receiver: Box<dyn EcssTcReceiver>,
shared_tc_store: SharedPool,
tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16,
verification_handler: StdVerifReporterWithSender,
) -> Self {
Self {
psb: PusServiceBase::new(
tc_receiver,
shared_tc_store,
tm_sender,
tm_apid,
verification_handler,
),
}
}
}
impl PusServiceHandler for PusService17TestHandler {
fn psb_mut(&mut self) -> &mut PusServiceBase {
&mut self.psb
}
fn psb(&self) -> &PusServiceBase {
&self.psb
}
fn handle_one_tc(
&mut self,
addr: StoreAddr,
token: VerificationToken<TcStateAccepted>,
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
self.copy_tc_to_buf(addr)?;
let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf)?;
if tc.service() != 17 {
return Err(PusPacketHandlingError::WrongService(tc.service()));
}
if tc.subservice() == 1 {
let mut partial_error = None;
let time_stamp = self.psb().get_current_timestamp(&mut partial_error);
let result = self
.psb
.verification_handler
.get_mut()
.start_success(token, Some(&time_stamp))
.map_err(|_| PartialPusHandlingError::Verification);
let start_token = if let Ok(result) = result {
Some(result)
} else {
partial_error = Some(result.unwrap_err());
None
};
// Sequence count will be handled centrally in TM funnel.
let mut reply_header = SpHeader::tm_unseg(self.psb.tm_apid, 0, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp);
let ping_reply = PusTm::new(&mut reply_header, tc_header, None, true);
let result = self
.psb
.tm_sender
.send_tm(PusTmWrapper::Direct(ping_reply))
.map_err(PartialPusHandlingError::TmSend);
if let Err(err) = result {
partial_error = Some(err);
}
if let Some(start_token) = start_token {
if self
.psb
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.is_err()
{
partial_error = Some(PartialPusHandlingError::Verification)
}
}
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
};
return Ok(PusPacketHandlerResult::RequestHandled);
}
Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(),
token,
))
}
}
#[cfg(test)]
mod tests {
use crate::pool::{LocalPool, PoolCfg, SharedPool};
use crate::pus::test::PusService17TestHandler;
use crate::pus::verification::{
RequestId, StdVerifReporterWithSender, VerificationReporterCfg,
};
use crate::pus::{MpscTcInStoreReceiver, MpscTmInStoreSender, PusServiceHandler};
use crate::tmtc::tm_helper::SharedTmStore;
use spacepackets::ecss::{PusPacket, SerializablePusPacket};
use spacepackets::tc::{PusTc, PusTcSecondaryHeader};
use spacepackets::tm::PusTm;
use spacepackets::{SequenceFlags, SpHeader};
use std::boxed::Box;
use std::sync::{mpsc, RwLock};
use std::vec;
const TEST_APID: u16 = 0x101;
#[test]
fn test_basic_ping_processing() {
let mut pus_buf: [u8; 64] = [0; 64];
let pool_cfg = PoolCfg::new(vec![(16, 16), (8, 32), (4, 64)]);
let tc_pool = LocalPool::new(pool_cfg.clone());
let tm_pool = LocalPool::new(pool_cfg);
let tc_pool_shared = SharedPool::new(RwLock::new(Box::new(tc_pool)));
let shared_tm_store = SharedTmStore::new(Box::new(tm_pool));
let tm_pool_shared = shared_tm_store.clone_backing_pool();
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let verif_sender =
MpscTmInStoreSender::new(0, "verif_sender", shared_tm_store.clone(), tm_tx.clone());
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let mut verification_handler =
StdVerifReporterWithSender::new(&verif_cfg, Box::new(verif_sender));
let test_srv_tm_sender = MpscTmInStoreSender::new(0, "TEST_SENDER", shared_tm_store, tm_tx);
let test_srv_tc_receiver = MpscTcInStoreReceiver::new(0, "TEST_RECEIVER", test_srv_tc_rx);
let mut pus_17_handler = PusService17TestHandler::new(
Box::new(test_srv_tc_receiver),
tc_pool_shared.clone(),
Box::new(test_srv_tm_sender),
TEST_APID,
verification_handler.clone(),
);
// Create a ping TC, verify acceptance.
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTc::new(&mut sp_header, sec_header, None, true);
let token = verification_handler.add_tc(&ping_tc);
let token = verification_handler
.acceptance_success(token, None)
.unwrap();
let tc_size = ping_tc.write_to_bytes(&mut pus_buf).unwrap();
let mut tc_pool = tc_pool_shared.write().unwrap();
let addr = tc_pool.add(&pus_buf[..tc_size]).unwrap();
drop(tc_pool);
// Send accepted TC to test service handler.
test_srv_tc_tx.send((addr, token.into())).unwrap();
let result = pus_17_handler.handle_next_packet();
assert!(result.is_ok());
// We should see 4 replies in the TM queue now: Acceptance TM, Start TM, ping reply and
// Completion TM
let mut next_msg = tm_rx.try_recv();
assert!(next_msg.is_ok());
let mut tm_addr = next_msg.unwrap();
let tm_pool = tm_pool_shared.read().unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
let (tm, _) = PusTm::from_bytes(&tm_raw, 0).unwrap();
assert_eq!(tm.service(), 1);
assert_eq!(tm.subservice(), 1);
let req_id = RequestId::from_bytes(tm.user_data().unwrap()).unwrap();
assert_eq!(req_id, token.req_id());
// Acceptance TM
next_msg = tm_rx.try_recv();
assert!(next_msg.is_ok());
tm_addr = next_msg.unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
// Is generated with CDS short timestamp.
let (tm, _) = PusTm::from_bytes(&tm_raw, 7).unwrap();
assert_eq!(tm.service(), 1);
assert_eq!(tm.subservice(), 3);
let req_id = RequestId::from_bytes(tm.user_data().unwrap()).unwrap();
assert_eq!(req_id, token.req_id());
// Ping reply
next_msg = tm_rx.try_recv();
assert!(next_msg.is_ok());
tm_addr = next_msg.unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
// Is generated with CDS short timestamp.
let (tm, _) = PusTm::from_bytes(&tm_raw, 7).unwrap();
assert_eq!(tm.service(), 17);
assert_eq!(tm.subservice(), 2);
assert!(tm.user_data().is_none());
// TM completion
next_msg = tm_rx.try_recv();
assert!(next_msg.is_ok());
tm_addr = next_msg.unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
// Is generated with CDS short timestamp.
let (tm, _) = PusTm::from_bytes(&tm_raw, 7).unwrap();
assert_eq!(tm.service(), 1);
assert_eq!(tm.subservice(), 7);
let req_id = RequestId::from_bytes(tm.user_data().unwrap()).unwrap();
assert_eq!(req_id, token.req_id());
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,60 @@
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use spacepackets::ecss::{EcssEnumU16, EcssEnumeration};
use spacepackets::util::UnsignedEnum;
use spacepackets::{ByteConversionError, SizeMissmatch};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ResultU16 {
group_id: u8,
unique_id: u8,
}
impl ResultU16 {
pub const fn const_new(group_id: u8, unique_id: u8) -> Self {
Self {
group_id,
unique_id,
}
}
pub fn raw(&self) -> u16 {
((self.group_id as u16) << 8) | self.unique_id as u16
}
pub fn group_id(&self) -> u8 {
self.group_id
}
pub fn unique_id(&self) -> u8 {
self.unique_id
}
}
impl From<ResultU16> for EcssEnumU16 {
fn from(v: ResultU16) -> Self {
EcssEnumU16::new(v.raw())
}
}
impl UnsignedEnum for ResultU16 {
fn size(&self) -> usize {
core::mem::size_of::<u16>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < 2 {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: 2,
}));
}
buf[0] = self.group_id;
buf[1] = self.unique_id;
Ok(self.size())
}
}
impl EcssEnumeration for ResultU16 {
fn pfc(&self) -> u8 {
16
}
}

250
satrs-core/src/seq_count.rs Normal file
View File

@@ -0,0 +1,250 @@
use core::cell::Cell;
#[cfg(feature = "alloc")]
use dyn_clone::DynClone;
use paste::paste;
use spacepackets::MAX_SEQ_COUNT;
#[cfg(feature = "std")]
pub use stdmod::*;
/// Core trait for objects which can provide a sequence count.
///
/// The core functions are not mutable on purpose to allow easier usage with
/// static structs when using the interior mutability pattern. This can be achieved by using
/// [Cell], [core::cell::RefCell] or atomic types.
pub trait SequenceCountProviderCore<Raw> {
fn get(&self) -> Raw;
fn increment(&self);
fn get_and_increment(&self) -> Raw {
let val = self.get();
self.increment();
val
}
}
/// Extension trait which allows cloning a sequence count provider after it was turned into
/// a trait object.
#[cfg(feature = "alloc")]
pub trait SequenceCountProvider<Raw>: SequenceCountProviderCore<Raw> + DynClone {}
#[cfg(feature = "alloc")]
dyn_clone::clone_trait_object!(SequenceCountProvider<u16>);
#[cfg(feature = "alloc")]
impl<T, Raw> SequenceCountProvider<Raw> for T where T: SequenceCountProviderCore<Raw> + Clone {}
#[derive(Default, Clone)]
pub struct SeqCountProviderSimple<T: Copy> {
seq_count: Cell<T>,
max_val: T,
}
macro_rules! impl_for_primitives {
($($ty: ident,)+) => {
$(
paste! {
impl SeqCountProviderSimple<$ty> {
pub fn [<new_ $ty _max_val>](max_val: $ty) -> Self {
Self {
seq_count: Cell::new(0),
max_val,
}
}
pub fn [<new_ $ty>]() -> Self {
Self {
seq_count: Cell::new(0),
max_val: $ty::MAX
}
}
}
impl SequenceCountProviderCore<$ty> for SeqCountProviderSimple<$ty> {
fn get(&self) -> $ty {
self.seq_count.get()
}
fn increment(&self) {
self.get_and_increment();
}
fn get_and_increment(&self) -> $ty {
let curr_count = self.seq_count.get();
if curr_count == self.max_val {
self.seq_count.set(0);
} else {
self.seq_count.set(curr_count + 1);
}
curr_count
}
}
}
)+
}
}
impl_for_primitives!(u8, u16, u32, u64,);
/// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT].
pub struct CcsdsSimpleSeqCountProvider {
provider: SeqCountProviderSimple<u16>,
}
impl CcsdsSimpleSeqCountProvider {
pub fn new() -> Self {
Self {
provider: SeqCountProviderSimple::new_u16_max_val(MAX_SEQ_COUNT),
}
}
}
impl Default for CcsdsSimpleSeqCountProvider {
fn default() -> Self {
Self::new()
}
}
impl SequenceCountProviderCore<u16> for CcsdsSimpleSeqCountProvider {
delegate::delegate! {
to self.provider {
fn get(&self) -> u16;
fn increment(&self);
fn get_and_increment(&self) -> u16;
}
}
}
#[cfg(feature = "std")]
pub mod stdmod {
use super::*;
use std::sync::{Arc, Mutex};
macro_rules! sync_clonable_seq_counter_impl {
($($ty: ident,)+) => {
$(paste! {
/// These sequence counters can be shared between threads and can also be
/// configured to wrap around at specified maximum values. Please note that
/// that the API provided by this class will not panic und [Mutex] lock errors,
/// but it will yield 0 for the getter functions.
#[derive(Clone, Default)]
pub struct [<SeqCountProviderSync $ty:upper>] {
seq_count: Arc<Mutex<$ty>>,
max_val: $ty
}
impl [<SeqCountProviderSync $ty:upper>] {
pub fn new() -> Self {
Self::new_with_max_val($ty::MAX)
}
pub fn new_with_max_val(max_val: $ty) -> Self {
Self {
seq_count: Arc::default(),
max_val
}
}
}
impl SequenceCountProviderCore<$ty> for [<SeqCountProviderSync $ty:upper>] {
fn get(&self) -> $ty {
match self.seq_count.lock() {
Ok(counter) => *counter,
Err(_) => 0
}
}
fn increment(&self) {
self.get_and_increment();
}
fn get_and_increment(&self) -> $ty {
match self.seq_count.lock() {
Ok(mut counter) => {
let val = *counter;
if val == self.max_val {
*counter = 0;
} else {
*counter += 1;
}
val
}
Err(_) => 0,
}
}
}
})+
}
}
sync_clonable_seq_counter_impl!(u8, u16, u32, u64,);
}
#[cfg(test)]
mod tests {
use crate::seq_count::{
CcsdsSimpleSeqCountProvider, SeqCountProviderSimple, SeqCountProviderSyncU8,
SequenceCountProviderCore,
};
use spacepackets::MAX_SEQ_COUNT;
#[test]
fn test_u8_counter() {
let u8_counter = SeqCountProviderSimple::new_u8();
assert_eq!(u8_counter.get(), 0);
assert_eq!(u8_counter.get_and_increment(), 0);
assert_eq!(u8_counter.get_and_increment(), 1);
assert_eq!(u8_counter.get(), 2);
}
#[test]
fn test_u8_counter_overflow() {
let u8_counter = SeqCountProviderSimple::new_u8();
for _ in 0..256 {
u8_counter.increment();
}
assert_eq!(u8_counter.get(), 0);
}
#[test]
fn test_ccsds_counter() {
let ccsds_counter = CcsdsSimpleSeqCountProvider::default();
assert_eq!(ccsds_counter.get(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 1);
assert_eq!(ccsds_counter.get(), 2);
}
#[test]
fn test_ccsds_counter_overflow() {
let ccsds_counter = CcsdsSimpleSeqCountProvider::default();
for _ in 0..MAX_SEQ_COUNT + 1 {
ccsds_counter.increment();
}
assert_eq!(ccsds_counter.get(), 0);
}
#[test]
fn test_atomic_ref_counters() {
let sync_u8_counter = SeqCountProviderSyncU8::new();
assert_eq!(sync_u8_counter.get(), 0);
assert_eq!(sync_u8_counter.get_and_increment(), 0);
assert_eq!(sync_u8_counter.get_and_increment(), 1);
assert_eq!(sync_u8_counter.get(), 2);
}
#[test]
fn test_atomic_ref_counters_overflow() {
let sync_u8_counter = SeqCountProviderSyncU8::new();
for _ in 0..u8::MAX as u16 + 1 {
sync_u8_counter.increment();
}
assert_eq!(sync_u8_counter.get(), 0);
}
#[test]
fn test_atomic_ref_counters_overflow_custom_max_val() {
let sync_u8_counter = SeqCountProviderSyncU8::new_with_max_val(128);
for _ in 0..129 {
sync_u8_counter.increment();
}
assert_eq!(sync_u8_counter.get(), 0);
}
}

View File

@@ -4,7 +4,7 @@
//! 1. [CcsdsDistributor] component which dispatches received packets to a user-provided handler
//! 2. [CcsdsPacketHandler] trait which should be implemented by the user-provided packet handler.
//!
//! The [CcsdsDistributor] implements the [ReceivesCcsdsTc] and [ReceivesTc] trait which allows to
//! The [CcsdsDistributor] implements the [ReceivesCcsdsTc] and [ReceivesTcCore] trait which allows to
//! pass raw or CCSDS packets to it. Upon receiving a packet, it performs the following steps:
//!
//! 1. It tries to identify the target Application Process Identifier (APID) based on the
@@ -18,9 +18,10 @@
//! # Example
//!
//! ```rust
//! use fsrc_core::tmtc::ccsds_distrib::{CcsdsPacketHandler, CcsdsDistributor};
//! use fsrc_core::tmtc::ReceivesTc;
//! use satrs_core::tmtc::ccsds_distrib::{CcsdsPacketHandler, CcsdsDistributor};
//! use satrs_core::tmtc::{ReceivesTc, ReceivesTcCore};
//! use spacepackets::{CcsdsPacket, SpHeader};
//! use spacepackets::ecss::SerializablePusPacket;
//! use spacepackets::tc::PusTc;
//!
//! #[derive (Default)]
@@ -54,11 +55,11 @@
//! let mut ccsds_distributor = CcsdsDistributor::new(Box::new(apid_handler));
//!
//! // Create and pass PUS telecommand with a valid APID
//! let mut space_packet_header = SpHeader::tc(0x002, 0x34, 0).unwrap();
//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
//! let mut pus_tc = PusTc::new_simple(&mut space_packet_header, 17, 1, None, true);
//! let mut test_buf: [u8; 32] = [0; 32];
//! let mut size = pus_tc
//! .write_to(test_buf.as_mut_slice())
//! .write_to_bytes(test_buf.as_mut_slice())
//! .expect("Error writing TC to buffer");
//! let tc_slice = &test_buf[0..size];
//! ccsds_distributor.pass_tc(&tc_slice).expect("Passing TC slice failed");
@@ -66,7 +67,7 @@
//! // Now pass a packet with an unknown APID to the distributor
//! pus_tc.set_apid(0x003);
//! size = pus_tc
//! .write_to(test_buf.as_mut_slice())
//! .write_to_bytes(test_buf.as_mut_slice())
//! .expect("Error writing TC to buffer");
//! let tc_slice = &test_buf[0..size];
//! ccsds_distributor.pass_tc(&tc_slice).expect("Passing TC slice failed");
@@ -84,10 +85,13 @@
//! .expect("Casting back to concrete type failed");
//! mutable_ref.mutable_foo();
//! ```
use crate::tmtc::{ReceivesCcsdsTc, ReceivesTc};
use crate::tmtc::{ReceivesCcsdsTc, ReceivesTcCore};
use alloc::boxed::Box;
use core::fmt::{Display, Formatter};
use downcast_rs::Downcast;
use spacepackets::{ByteConversionError, CcsdsPacket, SizeMissmatch, SpHeader};
#[cfg(feature = "std")]
use std::error::Error;
/// Generic trait for a handler or dispatcher object handling CCSDS packets.
///
@@ -114,18 +118,46 @@ pub trait CcsdsPacketHandler: Downcast {
downcast_rs::impl_downcast!(CcsdsPacketHandler assoc Error);
pub trait SendableCcsdsPacketHandler: CcsdsPacketHandler + Send {}
impl<T: CcsdsPacketHandler + Send> SendableCcsdsPacketHandler for T {}
downcast_rs::impl_downcast!(SendableCcsdsPacketHandler assoc Error);
/// The CCSDS distributor dispatches received CCSDS packets to a user provided packet handler.
///
/// The passed APID handler is required to be [Send]able to allow more ergonomic usage with
/// threads.
pub struct CcsdsDistributor<E> {
/// User provided APID handler stored as a generic trait object.
/// It can be cast back to the original concrete type using the [Self::apid_handler_ref] or
/// the [Self::apid_handler_mut] method.
pub apid_handler: Box<dyn CcsdsPacketHandler<Error = E>>,
pub apid_handler: Box<dyn SendableCcsdsPacketHandler<Error = E>>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CcsdsError<E> {
CustomError(E),
PacketError(ByteConversionError),
ByteConversionError(ByteConversionError),
}
impl<E: Display> Display for CcsdsError<E> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::CustomError(e) => write!(f, "{e}"),
Self::ByteConversionError(e) => write!(f, "{e}"),
}
}
}
#[cfg(feature = "std")]
impl<E: Error> Error for CcsdsError<E> {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::CustomError(e) => e.source(),
Self::ByteConversionError(e) => e.source(),
}
}
}
impl<E: 'static> ReceivesCcsdsTc for CcsdsDistributor<E> {
@@ -136,38 +168,39 @@ impl<E: 'static> ReceivesCcsdsTc for CcsdsDistributor<E> {
}
}
impl<E: 'static> ReceivesTc for CcsdsDistributor<E> {
impl<E: 'static> ReceivesTcCore for CcsdsDistributor<E> {
type Error = CcsdsError<E>;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
if tc_raw.len() < 7 {
return Err(CcsdsError::PacketError(
return Err(CcsdsError::ByteConversionError(
ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: tc_raw.len(),
expected: 7,
}),
));
}
let sp_header = SpHeader::from_raw_slice(tc_raw).map_err(|e| CcsdsError::PacketError(e))?;
let (sp_header, _) =
SpHeader::from_be_bytes(tc_raw).map_err(|e| CcsdsError::ByteConversionError(e))?;
self.dispatch_ccsds(&sp_header, tc_raw)
}
}
impl<E: 'static> CcsdsDistributor<E> {
pub fn new(apid_handler: Box<dyn CcsdsPacketHandler<Error = E>>) -> Self {
pub fn new(apid_handler: Box<dyn SendableCcsdsPacketHandler<Error = E>>) -> Self {
CcsdsDistributor { apid_handler }
}
/// This function can be used to retrieve a reference to the concrete instance of the APID
/// handler after it was passed to the distributor. See the
/// [module documentation][crate::tmtc::ccsds_distrib] for an fsrc-example.
pub fn apid_handler_ref<T: CcsdsPacketHandler<Error = E>>(&self) -> Option<&T> {
pub fn apid_handler_ref<T: SendableCcsdsPacketHandler<Error = E>>(&self) -> Option<&T> {
self.apid_handler.downcast_ref::<T>()
}
/// This function can be used to retrieve a mutable reference to the concrete instance of the
/// APID handler after it was passed to the distributor.
pub fn apid_handler_mut<T: CcsdsPacketHandler<Error = E>>(&mut self) -> Option<&mut T> {
pub fn apid_handler_mut<T: SendableCcsdsPacketHandler<Error = E>>(&mut self) -> Option<&mut T> {
self.apid_handler.downcast_mut::<T>()
}
@@ -192,16 +225,21 @@ impl<E: 'static> CcsdsDistributor<E> {
pub(crate) mod tests {
use super::*;
use crate::tmtc::ccsds_distrib::{CcsdsDistributor, CcsdsPacketHandler};
use spacepackets::ecss::SerializablePusPacket;
use spacepackets::tc::PusTc;
use spacepackets::CcsdsPacket;
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use std::vec::Vec;
fn is_send<T: Send>(_: &T) {}
pub fn generate_ping_tc(buf: &mut [u8]) -> &[u8] {
let mut sph = SpHeader::tc(0x002, 0x34, 0).unwrap();
let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
let pus_tc = PusTc::new_simple(&mut sph, 17, 1, None, true);
let size = pus_tc.write_to(buf).expect("Error writing TC to buffer");
let size = pus_tc
.write_to_bytes(buf)
.expect("Error writing TC to buffer");
assert_eq!(size, 13);
&buf[0..size]
}
@@ -289,6 +327,7 @@ pub(crate) mod tests {
unknown_packet_queue: unknown_packet_queue.clone(),
};
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
is_send(&ccsds_distrib);
let mut test_buf: [u8; 32] = [0; 32];
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
@@ -310,11 +349,11 @@ pub(crate) mod tests {
unknown_packet_queue: unknown_packet_queue.clone(),
};
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
let mut sph = SpHeader::tc(0x004, 0x34, 0).unwrap();
let mut sph = SpHeader::tc_unseg(0x004, 0x34, 0).unwrap();
let pus_tc = PusTc::new_simple(&mut sph, 17, 1, None, true);
let mut test_buf: [u8; 32] = [0; 32];
pus_tc
.write_to(test_buf.as_mut_slice())
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
ccsds_distrib.pass_tc(&test_buf).expect("Passing TC failed");
let recvd = unknown_packet_queue.lock().unwrap().pop_front();

View File

@@ -0,0 +1,94 @@
//! Telemetry and Telecommanding (TMTC) module. Contains packet routing components with special
//! support for CCSDS and ECSS packets.
//!
//! The distributor modules provided by this module use trait objects provided by the user to
//! directly dispatch received packets to packet listeners based on packet fields like the CCSDS
//! Application Process ID (APID) or the ECSS PUS service type. This allows for fast packet
//! routing without the overhead and complication of using message queues. However, it also requires
#[cfg(feature = "alloc")]
use downcast_rs::{impl_downcast, Downcast};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use spacepackets::{ByteConversionError, SizeMissmatch, SpHeader};
#[cfg(feature = "alloc")]
pub mod ccsds_distrib;
#[cfg(feature = "alloc")]
pub mod pus_distrib;
pub mod tm_helper;
#[cfg(feature = "alloc")]
pub use ccsds_distrib::{CcsdsDistributor, CcsdsError, CcsdsPacketHandler};
#[cfg(feature = "alloc")]
pub use pus_distrib::{PusDistributor, PusServiceProvider};
pub type TargetId = u32;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct AddressableId {
pub target_id: TargetId,
pub unique_id: u32,
}
impl AddressableId {
pub fn from_raw_be(buf: &[u8]) -> Result<Self, ByteConversionError> {
if buf.len() < 8 {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: 8,
}));
}
Ok(Self {
target_id: u32::from_be_bytes(buf[0..4].try_into().unwrap()),
unique_id: u32::from_be_bytes(buf[4..8].try_into().unwrap()),
})
}
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < 8 {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: 8,
}));
}
buf[0..4].copy_from_slice(&self.target_id.to_be_bytes());
buf[4..8].copy_from_slice(&self.unique_id.to_be_bytes());
Ok(8)
}
}
/// Generic trait for object which can receive any telecommands in form of a raw bytestream, with
/// no assumptions about the received protocol.
///
/// This trait is implemented by both the [crate::tmtc::pus_distrib::PusDistributor] and the
/// [crate::tmtc::ccsds_distrib::CcsdsDistributor] which allows to pass the respective packets in
/// raw byte format into them.
pub trait ReceivesTcCore {
type Error;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error>;
}
/// Extension trait of [ReceivesTcCore] which allows downcasting by implementing [Downcast] and
/// is also sendable.
#[cfg(feature = "alloc")]
pub trait ReceivesTc: ReceivesTcCore + Downcast + Send {}
/// Blanket implementation to automatically implement [ReceivesTc] when the [alloc] feature
/// is enabled.
#[cfg(feature = "alloc")]
impl<T> ReceivesTc for T where T: ReceivesTcCore + Send + 'static {}
#[cfg(feature = "alloc")]
impl_downcast!(ReceivesTc assoc Error);
/// Generic trait for object which can receive CCSDS space packets, for example ECSS PUS packets
/// for CCSDS File Delivery Protocol (CFDP) packets.
///
/// This trait is implemented by both the [crate::tmtc::pus_distrib::PusDistributor] and the
/// [crate::tmtc::ccsds_distrib::CcsdsDistributor] which allows
/// to pass the respective packets in raw byte format or in CCSDS format into them.
pub trait ReceivesCcsdsTc {
type Error;
fn pass_ccsds(&mut self, header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error>;
}

View File

@@ -5,9 +5,9 @@
//! 2. [PusServiceProvider] trait which should be implemented by the user-provided PUS packet
//! handler.
//!
//! The [PusDistributor] implements the [ReceivesEcssPusTc], [ReceivesCcsdsTc] and the [ReceivesTc]
//! trait which allows to pass raw packets, CCSDS packets and PUS TC packets into it.
//! Upon receiving a packet, it performs the following steps:
//! The [PusDistributor] implements the [ReceivesEcssPusTc], [ReceivesCcsdsTc] and the
//! [ReceivesTcCore] trait which allows to pass raw packets, CCSDS packets and PUS TC packets into
//! it. Upon receiving a packet, it performs the following steps:
//!
//! 1. It tries to extract the [SpHeader] and [PusTc] objects from the raw bytestream. If this
//! process fails, a [PusDistribError::PusError] is returned to the user.
@@ -17,8 +17,9 @@
//! # Example
//!
//! ```rust
//! use fsrc_core::tmtc::pus_distrib::{PusDistributor, PusServiceProvider};
//! use fsrc_core::tmtc::ReceivesTc;
//! use spacepackets::ecss::SerializablePusPacket;
//! use satrs_core::tmtc::pus_distrib::{PusDistributor, PusServiceProvider};
//! use satrs_core::tmtc::{ReceivesTc, ReceivesTcCore};
//! use spacepackets::SpHeader;
//! use spacepackets::tc::PusTc;
//! struct ConcretePusHandler {
@@ -43,11 +44,11 @@
//! let mut pus_distributor = PusDistributor::new(Box::new(service_handler));
//!
//! // Create and pass PUS ping telecommand with a valid APID
//! let mut space_packet_header = SpHeader::tc(0x002, 0x34, 0).unwrap();
//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
//! let mut pus_tc = PusTc::new_simple(&mut space_packet_header, 17, 1, None, true);
//! let mut test_buf: [u8; 32] = [0; 32];
//! let mut size = pus_tc
//! .write_to(test_buf.as_mut_slice())
//! .write_to_bytes(test_buf.as_mut_slice())
//! .expect("Error writing TC to buffer");
//! let tc_slice = &test_buf[0..size];
//!
@@ -60,12 +61,16 @@
//! .expect("Casting back to concrete type failed");
//! assert_eq!(concrete_handler_ref.handler_call_count, 1);
//! ```
use crate::tmtc::{ReceivesCcsdsTc, ReceivesEcssPusTc, ReceivesTc};
use crate::pus::ReceivesEcssPusTc;
use crate::tmtc::{ReceivesCcsdsTc, ReceivesTcCore};
use alloc::boxed::Box;
use core::fmt::{Display, Formatter};
use downcast_rs::Downcast;
use spacepackets::ecss::{PusError, PusPacket};
use spacepackets::tc::PusTc;
use spacepackets::SpHeader;
#[cfg(feature = "std")]
use std::error::Error;
pub trait PusServiceProvider: Downcast {
type Error;
@@ -78,12 +83,22 @@ pub trait PusServiceProvider: Downcast {
}
downcast_rs::impl_downcast!(PusServiceProvider assoc Error);
pub trait SendablePusServiceProvider: PusServiceProvider + Send {}
impl<T: Send + PusServiceProvider> SendablePusServiceProvider for T {}
downcast_rs::impl_downcast!(SendablePusServiceProvider assoc Error);
/// Generic distributor object which dispatches received packets to a user provided handler.
///
/// This distributor expects the passed trait object to be [Send]able to allow more ergonomic
/// usage with threads.
pub struct PusDistributor<E> {
pub service_provider: Box<dyn PusServiceProvider<Error = E>>,
pub service_provider: Box<dyn SendablePusServiceProvider<Error = E>>,
}
impl<E> PusDistributor<E> {
pub fn new(service_provider: Box<dyn PusServiceProvider<Error = E>>) -> Self {
pub fn new(service_provider: Box<dyn SendablePusServiceProvider<Error = E>>) -> Self {
PusDistributor { service_provider }
}
}
@@ -94,12 +109,31 @@ pub enum PusDistribError<E> {
PusError(PusError),
}
impl<E: 'static> ReceivesTc for PusDistributor<E> {
impl<E: Display> Display for PusDistribError<E> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PusDistribError::CustomError(e) => write!(f, "{e}"),
PusDistribError::PusError(e) => write!(f, "{e}"),
}
}
}
#[cfg(feature = "std")]
impl<E: Error> Error for PusDistribError<E> {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::CustomError(e) => e.source(),
Self::PusError(e) => e.source(),
}
}
}
impl<E: 'static> ReceivesTcCore for PusDistributor<E> {
type Error = PusDistribError<E>;
fn pass_tc(&mut self, tm_raw: &[u8]) -> Result<(), Self::Error> {
// Convert to ccsds and call pass_ccsds
let sp_header = SpHeader::from_raw_slice(tm_raw)
.map_err(|e| PusDistribError::PusError(PusError::PacketError(e)))?;
let (sp_header, _) = SpHeader::from_be_bytes(tm_raw)
.map_err(|e| PusDistribError::PusError(PusError::ByteConversion(e)))?;
self.pass_ccsds(&sp_header, tm_raw)
}
}
@@ -107,8 +141,7 @@ impl<E: 'static> ReceivesTc for PusDistributor<E> {
impl<E: 'static> ReceivesCcsdsTc for PusDistributor<E> {
type Error = PusDistribError<E>;
fn pass_ccsds(&mut self, header: &SpHeader, tm_raw: &[u8]) -> Result<(), Self::Error> {
let (tc, _) =
PusTc::new_from_raw_slice(tm_raw).map_err(|e| PusDistribError::PusError(e))?;
let (tc, _) = PusTc::from_bytes(tm_raw).map_err(|e| PusDistribError::PusError(e))?;
self.pass_pus_tc(header, &tc)
}
}
@@ -123,11 +156,13 @@ impl<E: 'static> ReceivesEcssPusTc for PusDistributor<E> {
}
impl<E: 'static> PusDistributor<E> {
pub fn service_provider_ref<T: PusServiceProvider<Error = E>>(&self) -> Option<&T> {
pub fn service_provider_ref<T: SendablePusServiceProvider<Error = E>>(&self) -> Option<&T> {
self.service_provider.downcast_ref::<T>()
}
pub fn service_provider_mut<T: PusServiceProvider<Error = E>>(&mut self) -> Option<&mut T> {
pub fn service_provider_mut<T: SendablePusServiceProvider<Error = E>>(
&mut self,
) -> Option<&mut T> {
self.service_provider.downcast_mut::<T>()
}
}
@@ -148,6 +183,8 @@ mod tests {
#[cfg(feature = "std")]
use std::sync::{Arc, Mutex};
fn is_send<T: Send>(_: &T) {}
struct PusHandlerSharedQueue {
pub pus_queue: Arc<Mutex<VecDeque<(u8, u16, Vec<u8>)>>>,
}
@@ -264,7 +301,7 @@ mod tests {
let pus_distrib = PusDistributor {
service_provider: Box::new(pus_handler),
};
is_send(&pus_distrib);
let apid_handler = ApidHandlerShared {
pus_distrib,
handler_base,

View File

@@ -0,0 +1,93 @@
use spacepackets::time::cds::TimeProvider;
use spacepackets::time::TimeWriter;
use spacepackets::tm::{PusTm, PusTmSecondaryHeader};
use spacepackets::SpHeader;
#[cfg(feature = "std")]
pub use std_mod::*;
#[cfg(feature = "std")]
pub mod std_mod {
use crate::pool::{ShareablePoolProvider, SharedPool, StoreAddr};
use crate::pus::EcssTmtcError;
use spacepackets::ecss::SerializablePusPacket;
use spacepackets::tm::PusTm;
use std::sync::{Arc, RwLock};
#[derive(Clone)]
pub struct SharedTmStore {
pool: SharedPool,
}
impl SharedTmStore {
pub fn new(backing_pool: ShareablePoolProvider) -> Self {
Self {
pool: Arc::new(RwLock::new(backing_pool)),
}
}
pub fn clone_backing_pool(&self) -> SharedPool {
self.pool.clone()
}
pub fn add_pus_tm(&self, pus_tm: &PusTm) -> Result<StoreAddr, EcssTmtcError> {
let mut pg = self.pool.write().map_err(|_| EcssTmtcError::StoreLock)?;
let (addr, buf) = pg.free_element(pus_tm.len_packed())?;
pus_tm
.write_to_bytes(buf)
.expect("writing PUS TM to store failed");
Ok(addr)
}
}
}
pub struct PusTmWithCdsShortHelper {
apid: u16,
cds_short_buf: [u8; 7],
}
impl PusTmWithCdsShortHelper {
pub fn new(apid: u16) -> Self {
Self {
apid,
cds_short_buf: [0; 7],
}
}
#[cfg(feature = "std")]
pub fn create_pus_tm_timestamp_now<'a>(
&'a mut self,
service: u8,
subservice: u8,
source_data: Option<&'a [u8]>,
seq_count: u16,
) -> PusTm {
let time_stamp = TimeProvider::from_now_with_u16_days().unwrap();
time_stamp.write_to_bytes(&mut self.cds_short_buf).unwrap();
self.create_pus_tm_common(service, subservice, source_data, seq_count)
}
pub fn create_pus_tm_with_stamper<'a>(
&'a mut self,
service: u8,
subservice: u8,
source_data: Option<&'a [u8]>,
stamper: &TimeProvider,
seq_count: u16,
) -> PusTm {
stamper.write_to_bytes(&mut self.cds_short_buf).unwrap();
self.create_pus_tm_common(service, subservice, source_data, seq_count)
}
fn create_pus_tm_common<'a>(
&'a self,
service: u8,
subservice: u8,
source_data: Option<&'a [u8]>,
seq_count: u16,
) -> PusTm {
let mut reply_header = SpHeader::tm_unseg(self.apid, seq_count, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(service, subservice, &self.cds_short_buf);
PusTm::new(&mut reply_header, tc_header, source_data, true)
}
}

View File

@@ -0,0 +1,164 @@
#![allow(dead_code)]
use core::mem::size_of;
use serde::{Deserialize, Serialize};
use spacepackets::ecss::{Ptc, RealPfc, UnsignedPfc};
use spacepackets::time::cds::TimeProvider;
use spacepackets::time::{CcsdsTimeProvider, TimeWriter};
enum NumOfParamsInfo {
/// The parameter entry is a scalar field
Scalar = 0b00,
/// The parameter entry is a vector, and its length field is one byte wide (max. 255 entries)
VecLenFieldOneByte = 0b01,
/// The parameter entry is a vecotr, and its length field is two bytes wide (max. 65565 entries)
VecLenFieldTwoBytes = 0b10,
/// The parameter entry is a matrix, and its length field contains a one byte row number
/// and a one byte column number.
MatrixRowsAndColumns = 0b11,
}
const HAS_VALIDITY_MASK: u8 = 1 << 7;
struct ParamWithValidity<T> {
valid: bool,
val: T,
}
struct TestMgmHk {
temp: f32,
mgm_vals: [u16; 3],
}
struct TestMgmHkWithIndividualValidity {
temp: ParamWithValidity<f32>,
mgm_vals: ParamWithValidity<[u16; 3]>,
}
#[derive(Serialize, Deserialize)]
struct TestMgmHkWithGroupValidity {
last_valid_stamp: TimeProvider,
valid: bool,
temp: f32,
mgm_vals: [u16; 3],
}
impl TestMgmHk {
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ()> {
let mut curr_idx = 0;
buf[curr_idx..curr_idx + size_of::<f32>()].copy_from_slice(&self.temp.to_be_bytes());
curr_idx += size_of::<f32>();
for val in self.mgm_vals {
buf[curr_idx..curr_idx + size_of::<u16>()].copy_from_slice(&val.to_be_bytes());
curr_idx += size_of::<u16>();
}
Ok(curr_idx)
}
}
/// This could in principle be auto-generated.
impl TestMgmHkWithIndividualValidity {
pub fn write_to_be_bytes_self_describing(&self, buf: &mut [u8]) -> Result<usize, ()> {
let mut curr_idx = 0;
buf[curr_idx] = 0;
buf[curr_idx] |= HAS_VALIDITY_MASK | (self.temp.valid as u8) << 6;
curr_idx += 1;
buf[curr_idx] = Ptc::Real as u8;
curr_idx += 1;
buf[curr_idx] = RealPfc::Float as u8;
curr_idx += 1;
buf[curr_idx..curr_idx + size_of::<f32>()].copy_from_slice(&self.temp.val.to_be_bytes());
curr_idx += size_of::<f32>();
buf[curr_idx] = 0;
buf[curr_idx] |= HAS_VALIDITY_MASK
| (self.mgm_vals.valid as u8) << 6
| (NumOfParamsInfo::VecLenFieldOneByte as u8) << 4;
curr_idx += 1;
buf[curr_idx] = Ptc::UnsignedInt as u8;
curr_idx += 1;
buf[curr_idx] = UnsignedPfc::TwoBytes as u8;
curr_idx += 1;
buf[curr_idx] = 3;
curr_idx += 1;
for val in self.mgm_vals.val {
buf[curr_idx..curr_idx + size_of::<u16>()].copy_from_slice(&val.to_be_bytes());
curr_idx += size_of::<u16>();
}
Ok(curr_idx)
}
}
impl TestMgmHkWithGroupValidity {
pub fn write_to_be_bytes_self_describing(&self, buf: &mut [u8]) -> Result<usize, ()> {
let mut curr_idx = 0;
buf[curr_idx] = self.valid as u8;
curr_idx += 1;
self.last_valid_stamp
.write_to_bytes(&mut buf[curr_idx..curr_idx + self.last_valid_stamp.len_as_bytes()])
.unwrap();
curr_idx += self.last_valid_stamp.len_as_bytes();
buf[curr_idx] = 0;
curr_idx += 1;
buf[curr_idx] = Ptc::Real as u8;
curr_idx += 1;
buf[curr_idx] = RealPfc::Float as u8;
curr_idx += 1;
buf[curr_idx..curr_idx + size_of::<f32>()].copy_from_slice(&self.temp.to_be_bytes());
curr_idx += size_of::<f32>();
buf[curr_idx] = 0;
buf[curr_idx] |= (NumOfParamsInfo::VecLenFieldOneByte as u8) << 4;
curr_idx += 1;
buf[curr_idx] = Ptc::UnsignedInt as u8;
curr_idx += 1;
buf[curr_idx] = UnsignedPfc::TwoBytes as u8;
curr_idx += 1;
buf[curr_idx] = 3;
for val in self.mgm_vals {
buf[curr_idx..curr_idx + size_of::<u16>()].copy_from_slice(&val.to_be_bytes());
curr_idx += size_of::<u16>();
}
Ok(curr_idx)
}
}
#[test]
pub fn main() {
let mut raw_buf: [u8; 32] = [0; 32];
let mgm_hk = TestMgmHk {
temp: 20.0,
mgm_vals: [0x1f1f, 0x2f2f, 0x3f3f],
};
// 4 byte float + 3 * 2 bytes MGM values
let written = mgm_hk.write_to_be_bytes(&mut raw_buf).unwrap();
assert_eq!(written, 10);
let mgm_hk_individual_validity = TestMgmHkWithIndividualValidity {
temp: ParamWithValidity {
valid: true,
val: 20.0,
},
mgm_vals: ParamWithValidity {
valid: true,
val: [0x1f1f, 0x2f2f, 0x3f3f],
},
};
let written = mgm_hk_individual_validity
.write_to_be_bytes_self_describing(&mut raw_buf)
.unwrap();
// 3 byte float description, 4 byte float, 4 byte MGM val description, 3 * 2 bytes MGM values
assert_eq!(written, 17);
// The easiest and probably best approach, trading off big advantages for TM downlink capacity:
// Use a JSON format
let mgm_hk_group_validity = TestMgmHkWithGroupValidity {
last_valid_stamp: TimeProvider::from_now_with_u16_days().unwrap(),
valid: false,
temp: 20.0,
mgm_vals: [0x1f1f, 0x2f2f, 0x3f3f],
};
let mgm_as_json_str = serde_json::to_string(&mgm_hk_group_validity).unwrap();
println!(
"JSON string with length {}: {}",
mgm_as_json_str.len(),
mgm_as_json_str
);
}

View File

@@ -1,4 +1,4 @@
use fsrc_core::pool::{LocalPool, PoolCfg, PoolGuard, PoolProvider, StoreAddr};
use satrs_core::pool::{LocalPool, PoolCfg, PoolGuard, PoolProvider, StoreAddr};
use std::ops::DerefMut;
use std::sync::mpsc;
use std::sync::mpsc::{Receiver, Sender};
@@ -20,14 +20,15 @@ fn threaded_usage() {
});
let jh1 = thread::spawn(move || {
let mut pool_access = shared_clone.write().unwrap();
let addr;
{
addr = rx.recv().expect("Receiving store address failed");
let mut pool_access = shared_clone.write().unwrap();
let pg = PoolGuard::new(pool_access.deref_mut(), addr);
let read_res = pg.read().expect("Reading failed");
assert_eq!(read_res, DUMMY_DATA);
}
let pool_access = shared_clone.read().unwrap();
assert!(!pool_access.has_element_at(&addr).expect("Invalid address"));
});
jh0.join().unwrap();

View File

@@ -0,0 +1,94 @@
#![allow(dead_code, unused_imports)]
use satrs_core::events::{
EventU32, EventU32TypedSev, GenericEvent, HasSeverity, LargestEventRaw, LargestGroupIdRaw,
Severity, SeverityInfo, SeverityLow, SeverityMedium,
};
use std::convert::AsRef;
#[derive(Debug)]
struct GroupIdIntrospection {
name: &'static str,
id: LargestGroupIdRaw,
}
#[derive(Debug)]
struct EventIntrospection {
name: &'static str,
group_id: GroupIdIntrospection,
event: &'static EventU32,
info: &'static str,
}
//#[event(descr="This is some info event")]
const INFO_EVENT_0: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(0, 0);
const INFO_EVENT_0_ERASED: EventU32 = EventU32::const_from_info(INFO_EVENT_0);
// This is ideally auto-generated
const INFO_EVENT_0_INTROSPECTION: EventIntrospection = EventIntrospection {
name: "INFO_EVENT_0",
group_id: GroupIdIntrospection {
id: 0,
name: "Group ID 0 without name",
},
event: &INFO_EVENT_0_ERASED,
info: "This is some info event",
};
//#[event(descr="This is some low severity event")]
const SOME_LOW_SEV_EVENT: EventU32TypedSev<SeverityLow> = EventU32TypedSev::const_new(0, 12);
//const EVENT_LIST: [&'static Event; 2] = [&INFO_EVENT_0, &SOME_LOW_SEV_EVENT];
//#[event_group]
const TEST_GROUP_NAME: u16 = 1;
// Auto-generated?
const TEST_GROUP_NAME_NAME: &str = "TEST_GROUP_NAME";
//#[event(desc="Some medium severity event")]
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP: EventU32TypedSev<SeverityMedium> =
EventU32TypedSev::const_new(TEST_GROUP_NAME, 0);
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_REDUCED: EventU32 =
EventU32::const_from_medium(MEDIUM_SEV_EVENT_IN_OTHER_GROUP);
// Also auto-generated
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_INTROSPECTION: EventIntrospection = EventIntrospection {
name: "MEDIUM_SEV_EVENT_IN_OTHER_GROUP",
group_id: GroupIdIntrospection {
name: TEST_GROUP_NAME_NAME,
id: TEST_GROUP_NAME,
},
event: &MEDIUM_SEV_EVENT_IN_OTHER_GROUP_REDUCED,
info: "Some medium severity event",
};
const CONST_SLICE: &'static [u8] = &[0, 1, 2, 3];
const INTROSPECTION_FOR_TEST_GROUP_0: [&EventIntrospection; 2] =
[&INFO_EVENT_0_INTROSPECTION, &INFO_EVENT_0_INTROSPECTION];
//const INTROSPECTION_FOR_TABLE: &'static [&EventIntrospection] = &INTROSPECTION_FOR_TEST_GROUP_0;
const INTROSPECTION_FOR_TEST_GROUP_NAME: [&EventIntrospection; 1] =
[&MEDIUM_SEV_EVENT_IN_OTHER_GROUP_INTROSPECTION];
//const BLAH: &'static [&EventIntrospection] = &INTROSPECTION_FOR_TEST_GROUP_NAME;
const ALL_EVENTS: [&[&EventIntrospection]; 2] = [
&INTROSPECTION_FOR_TEST_GROUP_0,
&INTROSPECTION_FOR_TEST_GROUP_NAME,
];
#[test]
fn main() {
//let test = stringify!(INFO_EVENT);
//println!("{:?}", test);
//for event in EVENT_LIST {
// println!("{:?}", event);
//}
//for events in ALL_EVENTS.into_iter().flatten() {
// dbg!("{:?}", events);
//}
//for introspection_info in INTROSPECTION_FOR_TEST_GROUP {
// dbg!("{:?}", introspection_info);
//}
//let test_struct =
}

View File

@@ -0,0 +1,162 @@
use satrs_core::event_man::{
EventManagerWithMpscQueue, MpscEventU32Receiver, MpscEventU32SendProvider, SendEventProvider,
};
use satrs_core::events::{EventU32, EventU32TypedSev, Severity, SeverityInfo};
use satrs_core::params::U32Pair;
use satrs_core::params::{Params, ParamsHeapless, WritableToBeBytes};
use satrs_core::pus::event_man::{
DefaultPusMgmtBackendProvider, EventReporter, PusEventDispatcher,
};
use satrs_core::pus::MpscTmAsVecSender;
use spacepackets::ecss::{PusError, PusPacket};
use spacepackets::tm::PusTm;
use std::sync::mpsc::{channel, SendError, TryRecvError};
use std::thread;
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
#[derive(Debug, Clone)]
pub enum CustomTmSenderError {
SendError(SendError<Vec<u8>>),
PusError(PusError),
}
#[test]
fn test_threaded_usage() {
let (event_sender, event_man_receiver) = channel();
let event_receiver = MpscEventU32Receiver::new(event_man_receiver);
let mut event_man = EventManagerWithMpscQueue::new(Box::new(event_receiver));
let (pus_event_man_tx, pus_event_man_rx) = channel();
let pus_event_man_send_provider = MpscEventU32SendProvider::new(1, pus_event_man_tx);
event_man.subscribe_all(pus_event_man_send_provider.id());
event_man.add_sender(pus_event_man_send_provider);
let (event_tx, event_rx) = channel();
let reporter = EventReporter::new(0x02, 128).expect("Creating event reporter failed");
let backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
let mut pus_event_man = PusEventDispatcher::new(reporter, Box::new(backend));
// PUS + Generic event manager thread
let jh0 = thread::spawn(move || {
let mut sender = MpscTmAsVecSender::new(0, "event_sender", event_tx);
let mut event_cnt = 0;
let mut params_array: [u8; 128] = [0; 128];
loop {
let res = event_man.try_event_handling();
assert!(res.is_ok());
match pus_event_man_rx.try_recv() {
Ok((event, aux_data)) => {
let mut gen_event = |aux_data| {
pus_event_man.generate_pus_event_tm_generic(
&mut sender,
&EMPTY_STAMP,
event,
aux_data,
)
};
let res = if let Some(aux_data) = aux_data {
match aux_data {
Params::Heapless(heapless) => match heapless {
ParamsHeapless::Raw(raw) => {
raw.write_to_be_bytes(&mut params_array)
.expect("Writing raw parameter failed");
gen_event(Some(&params_array[0..raw.raw_len()]))
}
ParamsHeapless::EcssEnum(e) => {
e.write_to_be_bytes(&mut params_array)
.expect("Writing ECSS enum failed");
gen_event(Some(&params_array[0..e.raw_len()]))
}
},
Params::Vec(vec) => gen_event(Some(vec.as_slice())),
Params::String(str) => gen_event(Some(str.as_bytes())),
Params::Store(_) => gen_event(None),
}
} else {
gen_event(None)
};
event_cnt += 1;
assert!(res.is_ok());
assert!(res.unwrap());
if event_cnt == 2 {
break;
}
}
Err(e) => {
if let TryRecvError::Disconnected = e {
panic!("Event receiver disconnected!")
}
}
}
}
});
// Event sender and TM checker thread
let jh1 = thread::spawn(move || {
event_sender
.send((INFO_EVENT.into(), None))
.expect("Sending info event failed");
loop {
match event_rx.try_recv() {
// Event TM received successfully
Ok(event_tm) => {
let tm =
PusTm::from_bytes(event_tm.as_slice(), 7).expect("Deserializing TM failed");
assert_eq!(tm.0.service(), 5);
assert_eq!(tm.0.subservice(), 1);
let src_data = tm.0.source_data();
assert!(src_data.is_some());
let src_data = src_data.unwrap();
assert_eq!(src_data.len(), 4);
let event =
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
assert_eq!(event, INFO_EVENT);
break;
}
Err(e) => {
if let TryRecvError::Disconnected = e {
panic!("Event sender disconnected!")
}
}
}
}
event_sender
.send((
LOW_SEV_EVENT.into(),
Some(Params::Heapless((2_u32, 3_u32).into())),
))
.expect("Sending low severity event failed");
loop {
match event_rx.try_recv() {
// Event TM received successfully
Ok(event_tm) => {
let tm =
PusTm::from_bytes(event_tm.as_slice(), 7).expect("Deserializing TM failed");
assert_eq!(tm.0.service(), 5);
assert_eq!(tm.0.subservice(), 2);
let src_data = tm.0.source_data();
assert!(src_data.is_some());
let src_data = src_data.unwrap();
assert_eq!(src_data.len(), 12);
let event =
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
assert_eq!(event, LOW_SEV_EVENT);
let u32_pair: U32Pair =
src_data[4..].try_into().expect("Creating U32Pair failed");
assert_eq!(u32_pair.0, 2);
assert_eq!(u32_pair.1, 3);
break;
}
Err(e) => {
if let TryRecvError::Disconnected = e {
panic!("Event sender disconnected!")
}
}
}
}
});
jh0.join().expect("Joining manager thread failed");
jh1.join().expect("Joining creator thread failed");
}

View File

@@ -0,0 +1,194 @@
// TODO: Refactor this to also test the STD impl using mpsc
// TODO: Change back to cross-beam as soon as STD impl was added back for TM.
#[cfg(feature = "crossbeam")]
pub mod crossbeam_test {
use hashbrown::HashMap;
use satrs_core::pool::{LocalPool, PoolCfg, PoolProvider};
use satrs_core::pus::verification::{
FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender,
};
use satrs_core::pus::MpscTmInStoreSender;
use satrs_core::tmtc::tm_helper::SharedTmStore;
use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket, SerializablePusPacket};
use spacepackets::tc::{PusTc, PusTcSecondaryHeader};
use spacepackets::tm::PusTm;
use spacepackets::SpHeader;
use std::sync::{mpsc, Arc, RwLock};
use std::thread;
use std::time::Duration;
const TEST_APID: u16 = 0x03;
const FIXED_STAMP: [u8; 7] = [0; 7];
const PACKETS_SENT: u8 = 8;
/// This test also shows how the verification report could be used in a multi-threaded context,
/// wrapping it into an [Arc] and [Mutex] and then passing it to two threads.
///
/// - The first thread generates a acceptance, a start, two steps and one completion report
/// - The second generates an acceptance and start success report and a completion failure
/// - The third thread is the verification receiver. In the test case, it verifies the other two
/// threads have sent the correct expected verification reports
#[test]
fn test_shared_reporter() {
// We use a synced sequence count provider here because both verification reporters have the
// the same APID. If they had distinct APIDs, the more correct approach would be to have
// each reporter have an own sequence count provider.
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
// Shared pool object to store the verification PUS telemetry
let pool_cfg = PoolCfg::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
let shared_tm_store = SharedTmStore::new(Box::new(LocalPool::new(pool_cfg.clone())));
let shared_tc_pool_0 = Arc::new(RwLock::new(LocalPool::new(pool_cfg)));
let shared_tc_pool_1 = shared_tc_pool_0.clone();
let (tx, rx) = mpsc::channel();
let sender =
MpscTmInStoreSender::new(0, "verif_sender", shared_tm_store.clone(), tx.clone());
let mut reporter_with_sender_0 =
VerificationReporterWithSender::new(&cfg, Box::new(sender));
let mut reporter_with_sender_1 = reporter_with_sender_0.clone();
// For test purposes, we retrieve the request ID from the TCs and pass them to the receiver
// tread.
let req_id_0;
let req_id_1;
let (tx_tc_0, rx_tc_0) = crossbeam_channel::bounded(3);
let (tx_tc_1, rx_tc_1) = crossbeam_channel::bounded(3);
{
let mut tc_guard = shared_tc_pool_0.write().unwrap();
let mut sph = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
let pus_tc_0 = PusTc::new(&mut sph, tc_header, None, true);
req_id_0 = RequestId::new(&pus_tc_0);
let (addr, mut buf) = tc_guard.free_element(pus_tc_0.len_packed()).unwrap();
pus_tc_0.write_to_bytes(&mut buf).unwrap();
tx_tc_0.send(addr).unwrap();
let mut sph = SpHeader::tc_unseg(TEST_APID, 1, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(5, 1);
let pus_tc_1 = PusTc::new(&mut sph, tc_header, None, true);
req_id_1 = RequestId::new(&pus_tc_1);
let (addr, mut buf) = tc_guard.free_element(pus_tc_0.len_packed()).unwrap();
pus_tc_1.write_to_bytes(&mut buf).unwrap();
tx_tc_1.send(addr).unwrap();
}
let verif_sender_0 = thread::spawn(move || {
let mut tc_buf: [u8; 1024] = [0; 1024];
let tc_addr = rx_tc_0
.recv_timeout(Duration::from_millis(20))
.expect("Receive timeout");
let tc_len;
{
let mut tc_guard = shared_tc_pool_0.write().unwrap();
let pg = tc_guard.read_with_guard(tc_addr);
let buf = pg.read().unwrap();
tc_len = buf.len();
tc_buf[0..tc_len].copy_from_slice(buf);
}
let (_tc, _) = PusTc::from_bytes(&tc_buf[0..tc_len]).unwrap();
let accepted_token;
let token = reporter_with_sender_0.add_tc_with_req_id(req_id_0);
accepted_token = reporter_with_sender_0
.acceptance_success(token, Some(&FIXED_STAMP))
.expect("Acceptance success failed");
// Do some start handling here
let started_token;
started_token = reporter_with_sender_0
.start_success(accepted_token, Some(&FIXED_STAMP))
.expect("Start success failed");
// Do some step handling here
reporter_with_sender_0
.step_success(&started_token, Some(&FIXED_STAMP), EcssEnumU8::new(0))
.expect("Start success failed");
// Finish up
reporter_with_sender_0
.step_success(&started_token, Some(&FIXED_STAMP), EcssEnumU8::new(1))
.expect("Start success failed");
reporter_with_sender_0
.completion_success(started_token, Some(&FIXED_STAMP))
.expect("Completion success failed");
});
let verif_sender_1 = thread::spawn(move || {
let mut tc_buf: [u8; 1024] = [0; 1024];
let tc_addr = rx_tc_1
.recv_timeout(Duration::from_millis(20))
.expect("Receive timeout");
let tc_len;
{
let mut tc_guard = shared_tc_pool_1.write().unwrap();
let pg = tc_guard.read_with_guard(tc_addr);
let buf = pg.read().unwrap();
tc_len = buf.len();
tc_buf[0..tc_len].copy_from_slice(buf);
}
let (tc, _) = PusTc::from_bytes(&tc_buf[0..tc_len]).unwrap();
let token = reporter_with_sender_1.add_tc(&tc);
let accepted_token = reporter_with_sender_1
.acceptance_success(token, Some(&FIXED_STAMP))
.expect("Acceptance success failed");
let started_token = reporter_with_sender_1
.start_success(accepted_token, Some(&FIXED_STAMP))
.expect("Start success failed");
let fail_code = EcssEnumU16::new(2);
let params = FailParams::new(Some(&FIXED_STAMP), &fail_code, None);
reporter_with_sender_1
.completion_failure(started_token, params)
.expect("Completion success failed");
});
let verif_receiver = thread::spawn(move || {
let mut packet_counter = 0;
let mut tm_buf: [u8; 1024] = [0; 1024];
let mut verif_map = HashMap::new();
while packet_counter < PACKETS_SENT {
let verif_addr = rx
.recv_timeout(Duration::from_millis(50))
.expect("Packet reception timeout");
let tm_len;
let shared_tm_store = shared_tm_store.clone_backing_pool();
{
let mut rg = shared_tm_store.write().expect("Error locking shared pool");
let store_guard = rg.read_with_guard(verif_addr);
let slice = store_guard.read().expect("Error reading TM slice");
tm_len = slice.len();
tm_buf[0..tm_len].copy_from_slice(slice);
}
let (pus_tm, _) = PusTm::from_bytes(&tm_buf[0..tm_len], 7)
.expect("Error reading verification TM");
let req_id = RequestId::from_bytes(
&pus_tm.source_data().expect("Invalid TM source data")
[0..RequestId::SIZE_AS_BYTES],
)
.unwrap();
if !verif_map.contains_key(&req_id) {
let mut content = Vec::new();
content.push(pus_tm.subservice());
verif_map.insert(req_id, content);
} else {
let content = verif_map.get_mut(&req_id).unwrap();
content.push(pus_tm.subservice())
}
packet_counter += 1;
}
for (req_id, content) in verif_map {
if req_id == req_id_1 {
assert_eq!(content[0], 1);
assert_eq!(content[1], 3);
assert_eq!(content[2], 8);
} else if req_id == req_id_0 {
assert_eq!(content[0], 1);
assert_eq!(content[1], 3);
assert_eq!(content[2], 5);
assert_eq!(content[3], 5);
assert_eq!(content[4], 7);
} else {
panic!("Unexpected request ID {:?}", req_id);
}
}
});
verif_sender_0.join().expect("Joining thread 0 failed");
verif_sender_1.join().expect("Joining thread 1 failed");
verif_receiver.join().expect("Joining thread 2 failed");
}
}

View File

@@ -0,0 +1,3 @@
/target
/itm.txt
/.cargo/config*

View File

@@ -0,0 +1,2 @@
/settings.json
/.cortex-debug.*

View File

@@ -0,0 +1,12 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=827846 to learn about workspace recommendations.
// Extension identifier format: ${publisher}.${name}. Example: vscode.csharp
// List of extensions which should be recommended for users of this workspace.
"recommendations": [
"rust-lang.rust",
"marus25.cortex-debug",
],
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
"unwantedRecommendations": []
}

View File

@@ -0,0 +1,66 @@
{
/*
* Requires the Rust Language Server (RLS) and Cortex-Debug extensions
* https://marketplace.visualstudio.com/items?itemName=rust-lang.rust
* https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug
*/
"version": "0.2.0",
"configurations": [
{
/* Launches debug session for currently open example */
"type": "cortex-debug",
"request": "launch",
"name": "Debug",
"servertype": "openocd",
"cwd": "${workspaceRoot}",
"preLaunchTask": "cargo build",
"runToEntryPoint": "true",
"executable": "./target/thumbv7em-none-eabihf/debug/satrs-example-stm32f3-disco",
"preLaunchCommands": ["break rust_begin_unwind"],
"device": "STM32F303VCT6",
"configFiles": [
"${workspaceRoot}/.vscode/openocd-helpers.tcl",
"interface/stlink.cfg",
"target/stm32f3x.cfg"
],
"svdFile": "${env:HOME}/.svd/STM32F303.svd",
"swoConfig": {
"enabled": true,
"cpuFrequency": 8000000,
"swoFrequency": 2000000,
"source": "probe",
"decoders": [
{ "type": "console", "label": "ITM", "port": 0 }
]
}
},
{
/* Launches debug session for currently open example */
"type": "cortex-debug",
"request": "launch",
"name": "Release",
"servertype": "openocd",
"cwd": "${workspaceRoot}",
"preLaunchTask": "cargo build",
"runToEntryPoint": "true",
"executable": "./target/thumbv7em-none-eabihf/release/satrs-example-stm32f3-disco",
"preLaunchCommands": ["break rust_begin_unwind"],
"device": "STM32F303VCT6",
"configFiles": [
"${workspaceRoot}/.vscode/openocd-helpers.tcl",
"interface/stlink.cfg",
"target/stm32f3x.cfg"
],
"svdFile": "${env:HOME}/.svd/STM32F303.svd",
"swoConfig": {
"enabled": true,
"cpuFrequency": 8000000,
"swoFrequency": 2000000,
"source": "probe",
"decoders": [
{ "type": "console", "label": "ITM", "port": 0 }
]
}
}
]
}

View File

@@ -0,0 +1,17 @@
#
# Cortex-Debug extension calls this function during initialization. You can copy this
# file, modify it and specifyy it as one of the config files supplied in launch.json
# preferably at the beginning.
#
# Note that this file simply defines a function for use later when it is time to configure
# for SWO.
#
set USE_SWO 0
proc CDSWOConfigure { CDCPUFreqHz CDSWOFreqHz CDSWOOutput } {
# Alternative option: Pipe ITM output into itm.txt file
# tpiu config internal itm.txt uart off $CDCPUFreqHz
# Default option so SWO display of VS code works.
tpiu config internal $CDSWOOutput uart off $CDCPUFreqHz $CDSWOFreqHz
itm port 0 on
}

View File

@@ -0,0 +1,3 @@
{
"cortex-debug.gdbPath.linux": "gdb-multiarch"
}

View File

@@ -0,0 +1,20 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "cargo build",
"type": "shell",
"command": "~/.cargo/bin/cargo", // note: full path to the cargo
"args": [
"build"
],
"group": {
"kind": "build",
"isDefault": true
}
},
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,59 @@
[package]
name = "satrs-example-stm32f3-disco"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
cortex-m = "0.7"
cortex-m-rt = "0.7"
embedded-hal = "0.2.6"
cortex-m-rtic = "1.0"
enumset = "1.0"
heapless = "0.7"
systick-monotonic = "1.0"
[dependencies.cobs]
git = "https://github.com/robamu/cobs.rs.git"
branch = "all_features"
default-features = false
[dependencies.panic-itm]
version = "0.4"
[dependencies.itm_logger]
git = "https://github.com/robamu/itm_logger.rs.git"
branch = "all_features"
version = "0.1.3-alpha.0"
[dependencies.stm32f3xx-hal]
git = "https://github.com/robamu/stm32f3xx-hal"
version = "0.10.0-alpha.0"
features = ["stm32f303xc", "rt", "enumset"]
branch = "all_features"
# Can be used in workspace to develop and update HAL
# path = "../stm32f3xx-hal"
[dependencies.stm32f3-discovery]
git = "https://github.com/robamu/stm32f3-discovery"
version = "0.8.0-alpha.0"
branch = "all_features"
# Can be used in workspace to develop and update BSP
# path = "../stm32f3-discovery"
[dependencies.satrs-core]
git = "https://egit.irs.uni-stuttgart.de/rust/satrs-core.git"
version = "0.1.0-alpha.0"
default-features = false
# this lets you use `cargo fix`!
# [[bin]]
# name = "stm32f3-blinky"
# test = false
# bench = false
[profile.release]
codegen-units = 1 # better optimizations
debug = true # symbols are nice and they don't increase the size on Flash
lto = true # better optimizations

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1 @@
This software contains code developed at the University of Stuttgart's Institute of Space Systems.

View File

@@ -0,0 +1,75 @@
sat-rs example for the STM32F3-Discovery board
=======
This example application shows how the [sat-rs framework](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad)
can be used on an embedded target. It also shows how a relatively simple OBSW could be built when no
standard runtime is available. It uses [RTIC](https://rtic.rs/1/book/en/) as the concurrency
framework.
The STM32F3-Discovery device was picked because it is a cheap Cortex-M4 based device which is also
used by the [Rust Embedded Book](https://docs.rust-embedded.org/book/intro/hardware.html) and the
[Rust Discovery](https://docs.rust-embedded.org/discovery/f3discovery/) book as an introduction
to embedded Rust.
## Preparing Rust and the repository
Building an application requires the `thumbv7em-none-eabihf` cross-compiler toolchain.
If you have not installed it yet, you can do so with
```sh
rustup target add thumbv7em-none-eabihf
```
A default `.cargo` config file is provided for this project, but needs to be copied to have
the correct name. This is so that the config file can be updated or edited for custom needs
without being tracked by git.
```sh
cp def_config.toml config.toml
```
The configuration file will also set the target so it does not always have to be specified with
the `--target` argument.
## Building
After that, assuming that you have a `.cargo/config.toml` setting the correct build target,
you can simply build the application with
```sh
cargo build
```
## Flashing and Debugging from the command line
TODO
## Debugging with VS Code
The STM32F3-Discovery comes with an on-board ST-Link so all that is required to flash and debug
the board is a Mini-USB cable. The code in this repository was debugged using `openocd`
and the VS Code [`Cortex-Debug` plugin](https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug).
Some sample configuration files for VS Code were provided as well. You can simply use `Run` and `Debug`
to automatically rebuild and flash your application.
The `tasks.json` and `launch.json` files are generic and you can use them immediately by opening
the folder in VS code or adding it to a workspace.
If you would like to use a custom GDB application, you can specify the gdb binary in the following
configuration variables in your `settings.json`:
- `"cortex-debug.gdbPath"`
- `"cortex-debug.gdbPath.linux"`
- `"cortex-debug.gdbPath.windows"`
- `"cortex-debug.gdbPath.osx"`
## Commanding with Python
When the SW is running on the Discovery board, you can command the MCU via a serial interface,
using COBS encoded CCSDS packets.
TODO:
- How and where to connect serial interface on the MCU
- How to set up Python venv (or at least strongly recommend it) and install deps
- How to copy `def_tmtc_conf.json` to `tmtc_conf.json` and adapt it for custom serial port

View File

@@ -0,0 +1,18 @@
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
fn main() {
// Put the linker script somewhere the linker can find it
let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
File::create(out.join("memory.x"))
.unwrap()
.write_all(include_bytes!("memory.x"))
.unwrap();
println!("cargo:rustc-link-search={}", out.display());
// Only re-run the build script when memory.x is changed,
// instead of when any part of the source code changes.
println!("cargo:rerun-if-changed=memory.x");
}

View File

@@ -0,0 +1,10 @@
target extended-remote localhost:2331
monitor reset
# *try* to stop at the user entry point (it might be gone due to inlining)
break main
load
continue

View File

@@ -0,0 +1,33 @@
/* Linker script for the STM32F303VCT6 */
MEMORY
{
/* NOTE 1 K = 1 KiBi = 1024 bytes */
FLASH : ORIGIN = 0x08000000, LENGTH = 256K
RAM : ORIGIN = 0x20000000, LENGTH = 40K
}
/* This is where the call stack will be allocated. */
/* The stack is of the full descending type. */
/* You may want to use this variable to locate the call stack and static
variables in different memory regions. Below is shown the default value */
/* _stack_start = ORIGIN(RAM) + LENGTH(RAM); */
/* You can use this symbol to customize the location of the .text section */
/* If omitted the .text section will be placed right after the .vector_table
section */
/* This is required only on microcontrollers that store some configuration right
after the vector table */
/* _stext = ORIGIN(FLASH) + 0x400; */
/* Example of putting non-initialized variables into custom RAM locations. */
/* This assumes you have defined a region RAM2 above, and in the Rust
sources added the attribute `#[link_section = ".ram2bss"]` to the data
you want to place there. */
/* Note that the section will not be zero-initialized by the runtime! */
/* SECTIONS {
.ram2bss (NOLOAD) : ALIGN(4) {
*(.ram2bss);
. = ALIGN(4);
} > RAM2
} INSERT AFTER .bss;
*/

View File

@@ -0,0 +1,12 @@
# Sample OpenOCD configuration for the STM32F3DISCOVERY development board
# Depending on the hardware revision you got you'll have to pick ONE of these
# interfaces. At any time only one interface should be commented out.
# Revision C (newer revision)
source [find interface/stlink.cfg]
# Revision A and B (older revisions)
# source [find interface/stlink-v2.cfg]
source [find target/stm32f3x.cfg]

View File

@@ -0,0 +1,40 @@
target extended-remote :3333
# print demangled symbols
set print asm-demangle on
# set backtrace limit to not have infinite backtrace loops
set backtrace limit 32
# detect unhandled exceptions, hard faults and panics
break DefaultHandler
break HardFault
break rust_begin_unwind
# # run the next few lines so the panic message is printed immediately
# # the number needs to be adjusted for your panic handler
# commands $bpnum
# next 4
# end
# *try* to stop at the user entry point (it might be gone due to inlining)
break main
# monitor arm semihosting enable
# # send captured ITM to the file itm.fifo
# # (the microcontroller SWO pin must be connected to the programmer SWO pin)
# # 8000000 must match the core clock frequency
monitor tpiu config internal itm.txt uart off 8000000
# # OR: make the microcontroller SWO pin output compatible with UART (8N1)
# # 8000000 must match the core clock frequency
# # 2000000 is the frequency of the SWO pin
# monitor tpiu config external uart off 8000000 2000000
# # enable ITM port 0
monitor itm port 0 on
load
# start the process but immediately halt the processor
stepi

View File

@@ -0,0 +1,7 @@
/venv
/log
/.idea/*
!/.idea/runConfigurations
/seqcnt.txt
/tmtc_conf.json

View File

@@ -0,0 +1,4 @@
{
"com_if": "serial_cobs",
"serial_baudrate": 115200
}

View File

@@ -0,0 +1,307 @@
#!/usr/bin/env python3
"""Example client for the sat-rs example application"""
import enum
import struct
import sys
import time
from typing import Optional, cast
import tmtccmd
from spacepackets.ecss import PusTelemetry, PusTelecommand, PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm
from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm
from tmtccmd import CcsdsTmtcBackend, TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest
from tmtccmd.pus import VerificationWrapper
from tmtccmd.tm import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.com_if import ComInterface
from tmtccmd.config import (
default_json_path,
SetupParams,
TmTcCfgHookBase,
TmtcDefinitionWrapper,
CoreServiceList,
OpCodeEntry,
params_to_procedure_conversion,
)
from tmtccmd.config.com_if import SerialCfgWrapper
from tmtccmd.config import PreArgsParsingWrapper, SetupWrapper
from tmtccmd.logging import get_console_logger
from tmtccmd.logging.pus import (
RegularTmtcLogWrapper,
RawTmtcTimedLogWrapper,
TimedLogWhen,
)
from tmtccmd.tc import (
TcQueueEntryType,
ProcedureWrapper,
TcProcedureType,
FeedWrapper,
SendCbParams,
DefaultPusQueueHelper,
)
from tmtccmd.tm.pus_5_event import Service5Tm
from tmtccmd.util import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.util.obj_id import ObjectIdDictT
from tmtccmd.util.tmtc_printer import FsfwTmTcPrinter
LOGGER = get_console_logger()
EXAMPLE_PUS_APID = 0x02
class SatRsConfigHook(TmTcCfgHookBase):
def __init__(self, json_cfg_path: str):
super().__init__(json_cfg_path=json_cfg_path)
def assign_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com_if import (
create_com_interface_default,
create_com_interface_cfg_default,
)
cfg = create_com_interface_cfg_default(
com_if_key=com_if_key,
json_cfg_path=self.cfg_path,
space_packet_ids=None,
)
if cfg is None:
raise ValueError(
f"No valid configuration could be retrieved for the COM IF with key {com_if_key}"
)
if cfg.com_if_key == "serial_cobs":
cfg = cast(SerialCfgWrapper, cfg)
cfg.serial_cfg.serial_timeout = 0.5
return create_com_interface_default(cfg)
def get_tmtc_definitions(self) -> TmtcDefinitionWrapper:
from tmtccmd.config.globals import get_default_tmtc_defs
defs = get_default_tmtc_defs()
srv_5 = OpCodeEntry()
srv_5.add("0", "Event Test")
defs.add_service(
name=CoreServiceList.SERVICE_5.value,
info="PUS Service 5 Event",
op_code_entry=srv_5,
)
srv_17 = OpCodeEntry()
srv_17.add("0", "Ping Test")
defs.add_service(
name=CoreServiceList.SERVICE_17_ALT,
info="PUS Service 17 Test",
op_code_entry=srv_17,
)
srv_3 = OpCodeEntry()
defs.add_service(
name=CoreServiceList.SERVICE_3,
info="PUS Service 3 Housekeeping",
op_code_entry=srv_3,
)
return defs
def perform_mode_operation(self, tmtc_backend: CcsdsTmtcBackend, mode: int):
LOGGER.info("Mode operation hook was called")
pass
def get_object_ids(self) -> ObjectIdDictT:
from tmtccmd.config.objects import get_core_object_ids
return get_core_object_ids()
class PusHandler(SpecificApidHandlerBase):
def __init__(
self,
verif_wrapper: VerificationWrapper,
printer: FsfwTmTcPrinter,
raw_logger: RawTmtcTimedLogWrapper,
):
super().__init__(EXAMPLE_PUS_APID, None)
self.printer = printer
self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper
def handle_tm(self, packet: bytes, _user_args: any):
try:
tm_packet = PusTelemetry.unpack(packet)
except ValueError as e:
LOGGER.warning("Could not generate PUS TM object from raw data")
LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
raise e
service = tm_packet.service
dedicated_handler = False
if service == 1:
tm_packet = Service1Tm.unpack(data=packet, params=UnpackParams(1, 2))
res = self.verif_wrapper.add_tm(tm_packet)
if res is None:
LOGGER.info(
f"Received Verification TM[{tm_packet.service}, {tm_packet.subservice}] "
f"with Request ID {tm_packet.tc_req_id.as_u32():#08x}"
)
LOGGER.warning(
f"No matching telecommand found for {tm_packet.tc_req_id}"
)
else:
self.verif_wrapper.log_to_console(tm_packet, res)
self.verif_wrapper.log_to_file(tm_packet, res)
dedicated_handler = True
if service == 3:
LOGGER.info("No handling for HK packets implemented")
LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet)
if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet")
json_str = pus_tm.source_data[8:]
dedicated_handler = True
if service == 5:
tm_packet = Service5Tm.unpack(packet)
if service == 17:
tm_packet = Service17Tm.unpack(packet)
dedicated_handler = True
if tm_packet.subservice == 2:
self.printer.file_logger.info("Received Ping Reply TM[17,2]")
LOGGER.info("Received Ping Reply TM[17,2]")
else:
self.printer.file_logger.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
if tm_packet is None:
LOGGER.info(
f"The service {service} is not implemented in Telemetry Factory"
)
tm_packet = PusTelemetry.unpack(packet)
self.raw_logger.log_tm(tm_packet)
if not dedicated_handler and tm_packet is not None:
self.printer.handle_long_tm_print(packet_if=tm_packet, info_if=tm_packet)
def make_addressable_id(target_id: int, unique_id: int) -> bytes:
byte_string = bytearray(struct.pack("!I", target_id))
byte_string.extend(struct.pack("!I", unique_id))
return byte_string
class TcHandler(TcHandlerBase):
def __init__(
self,
seq_count_provider: FileSeqCountProvider,
verif_wrapper: VerificationWrapper,
):
super(TcHandler, self).__init__()
self.seq_count_provider = seq_count_provider
self.verif_wrapper = verif_wrapper
self.queue_helper = DefaultPusQueueHelper(
queue_wrapper=None,
seq_cnt_provider=seq_count_provider,
)
def send_cb(self, send_params: SendCbParams):
entry_helper = send_params.entry
if entry_helper.is_tc:
if entry_helper.entry_type == TcQueueEntryType.PUS_TC:
pus_tc_wrapper = entry_helper.to_pus_tc_entry()
pus_tc_wrapper.pus_tc.seq_count = (
self.seq_count_provider.get_and_increment()
)
self.verif_wrapper.add_tc(pus_tc_wrapper.pus_tc)
raw_tc = pus_tc_wrapper.pus_tc.pack()
LOGGER.info(f"Sending {pus_tc_wrapper.pus_tc}")
send_params.com_if.send(raw_tc)
elif entry_helper.entry_type == TcQueueEntryType.LOG:
log_entry = entry_helper.to_log_entry()
LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, helper: ProcedureWrapper):
if helper.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure()
LOGGER.info(
f"Queue handling finished for service {def_proc.service} and "
f"op code {def_proc.op_code}"
)
def feed_cb(self, helper: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper
if helper.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure()
service = def_proc.service
op_code = def_proc.op_code
if (
service == CoreServiceList.SERVICE_17
or service == CoreServiceList.SERVICE_17_ALT
):
q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(PusTelecommand(service=17, subservice=1))
def main():
tmtccmd.init_printout(False)
hook_obj = SatRsConfigHook(json_cfg_path=default_json_path())
parser_wrapper = PreArgsParsingWrapper()
parser_wrapper.create_default_parent_parser()
parser_wrapper.create_default_parser()
parser_wrapper.add_def_proc_args()
post_args_wrapper = parser_wrapper.parse(hook_obj)
params = SetupParams()
proc_wrapper = ProcedureParamsWrapper()
if post_args_wrapper.use_gui:
post_args_wrapper.set_params_without_prompts(params, proc_wrapper)
else:
post_args_wrapper.set_params_with_prompts(params, proc_wrapper)
params.apid = EXAMPLE_PUS_APID
setup_args = SetupWrapper(
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
)
# Create console logger helper and file loggers
tmtc_logger = RegularTmtcLogWrapper()
printer = FsfwTmTcPrinter(tmtc_logger.logger)
raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1)
verificator = PusVerificator()
verification_wrapper = VerificationWrapper(verificator, LOGGER, printer.file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler
tm_handler = PusHandler(verification_wrapper, printer, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=None)
ccsds_handler.add_apid_handler(tm_handler)
# Create TC handler
seq_count_provider = PusFileSeqCountProvider()
tc_handler = TcHandler(seq_count_provider, verification_wrapper)
tmtccmd.setup(setup_args=setup_args)
init_proc = params_to_procedure_conversion(setup_args.proc_param_wrapper)
tmtc_backend = tmtccmd.create_default_tmtc_backend(
setup_wrapper=setup_args,
tm_handler=ccsds_handler,
tc_handler=tc_handler,
init_procedure=init_proc,
)
tmtccmd.start(tmtc_backend=tmtc_backend, hook_obj=hook_obj)
try:
while True:
state = tmtc_backend.periodic_op(None)
if state.request == BackendRequest.TERMINATION_NO_ERROR:
sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE:
LOGGER.info("TMTC Client in IDLE mode")
time.sleep(3.0)
elif state.request == BackendRequest.DELAY_LISTENER:
time.sleep(0.8)
elif state.request == BackendRequest.DELAY_CUSTOM:
if state.next_delay.total_seconds() <= 0.4:
time.sleep(state.next_delay.total_seconds())
else:
time.sleep(0.4)
elif state.request == BackendRequest.CALL_NEXT:
pass
except KeyboardInterrupt:
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,2 @@
tmtccmd == 4.0.0a0
# -e git+https://github.com/robamu-org/tmtccmd.git@main#egg=tmtccmd

View File

@@ -0,0 +1,78 @@
#![no_std]
#![no_main]
extern crate panic_itm;
use cortex_m_rt::entry;
use stm32f3_discovery::stm32f3xx_hal::delay::Delay;
use stm32f3_discovery::stm32f3xx_hal::{pac, prelude::*};
use stm32f3_discovery::leds::Leds;
use stm32f3_discovery::switch_hal::{OutputSwitch, ToggleableOutputSwitch};
#[entry]
fn main()-> ! {
let dp = pac::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
let cp = cortex_m::Peripherals::take().unwrap();
let mut flash = dp.FLASH.constrain();
let clocks = rcc.cfgr.freeze(&mut flash.acr);
let mut delay = Delay::new(cp.SYST, clocks);
let mut gpioe = dp.GPIOE.split(&mut rcc.ahb);
let mut leds = Leds::new(
gpioe.pe8,
gpioe.pe9,
gpioe.pe10,
gpioe.pe11,
gpioe.pe12,
gpioe.pe13,
gpioe.pe14,
gpioe.pe15,
&mut gpioe.moder,
&mut gpioe.otyper
);
let delay_ms = 200u16;
loop {
leds.ld3.toggle().ok();
delay.delay_ms(delay_ms);
leds.ld3.toggle().ok();
delay.delay_ms(delay_ms);
//explicit on/off
leds.ld4.on().ok();
delay.delay_ms(delay_ms);
leds.ld4.off().ok();
delay.delay_ms(delay_ms);
leds.ld5.on().ok();
delay.delay_ms(delay_ms);
leds.ld5.off().ok();
delay.delay_ms(delay_ms);
leds.ld6.on().ok();
delay.delay_ms(delay_ms);
leds.ld6.off().ok();
delay.delay_ms(delay_ms);
leds.ld7.on().ok();
delay.delay_ms(delay_ms);
leds.ld7.off().ok();
delay.delay_ms(delay_ms);
leds.ld8.on().ok();
delay.delay_ms(delay_ms);
leds.ld8.off().ok();
delay.delay_ms(delay_ms);
leds.ld9.on().ok();
delay.delay_ms(delay_ms);
leds.ld9.off().ok();
delay.delay_ms(delay_ms);
leds.ld10.on().ok();
delay.delay_ms(delay_ms);
leds.ld10.off().ok();
delay.delay_ms(delay_ms);
}
}

View File

@@ -0,0 +1,604 @@
#![no_std]
#![no_main]
extern crate panic_itm;
use rtic::app;
use heapless::{
mpmc::Q16,
pool,
pool::singleton::{Box, Pool},
};
#[allow(unused_imports)]
use itm_logger::{debug, info, logger_init, warn};
use satrs_core::spacepackets::{ecss::PusPacket, tm::PusTm};
use satrs_core::{
pus::{EcssTmErrorWithSend, EcssTmSenderCore},
seq_count::SequenceCountProviderCore,
};
use stm32f3xx_hal::dma::dma1;
use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3};
use stm32f3xx_hal::pac::USART2;
use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent};
use systick_monotonic::{fugit::Duration, Systick};
const UART_BAUD: u32 = 115200;
const BLINK_FREQ_MS: u64 = 1000;
const TX_HANDLER_FREQ_MS: u64 = 20;
const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u16 = 5;
const MAX_TC_LEN: usize = 200;
const MAX_TM_LEN: usize = 200;
pub const PUS_APID: u16 = 0x02;
type TxType = Tx<USART2, PA2<AF7<PushPull>>>;
type RxType = Rx<USART2, PA3<AF7<PushPull>>>;
type MsDuration = Duration<u64, 1, 1000>;
type TxDmaTransferType = SerialDmaTx<&'static [u8], dma1::C7, TxType>;
type RxDmaTransferType = SerialDmaRx<&'static mut [u8], dma1::C6, RxType>;
// This is the predictable maximum overhead of the COBS encoding scheme.
// It is simply the maximum packet lenght dividied by 254 rounded up.
const COBS_TC_OVERHEAD: usize = (MAX_TC_LEN + 254 - 1) / 254;
const COBS_TM_OVERHEAD: usize = (MAX_TM_LEN + 254 - 1) / 254;
const TC_BUF_LEN: usize = MAX_TC_LEN + COBS_TC_OVERHEAD;
const TM_BUF_LEN: usize = MAX_TC_LEN + COBS_TM_OVERHEAD;
// This is a static buffer which should ONLY (!) be used as the TX DMA
// transfer buffer.
static mut DMA_TX_BUF: [u8; TM_BUF_LEN] = [0; TM_BUF_LEN];
// This is a static buffer which should ONLY (!) be used as the RX DMA
// transfer buffer.
static mut DMA_RX_BUF: [u8; TC_BUF_LEN] = [0; TC_BUF_LEN];
static TX_REQUESTS: Q16<(Box<poolmod::TM>, usize)> = Q16::new();
const TC_POOL_SLOTS: usize = 12;
const TM_POOL_SLOTS: usize = 12;
use core::sync::atomic::{AtomicU16, Ordering};
pub struct SeqCountProviderAtomicRef {
atomic: AtomicU16,
ordering: Ordering,
}
impl SeqCountProviderAtomicRef {
pub const fn new(ordering: Ordering) -> Self {
Self {
atomic: AtomicU16::new(0),
ordering,
}
}
}
impl SequenceCountProviderCore<u16> for SeqCountProviderAtomicRef {
fn get(&self) -> u16 {
self.atomic.load(self.ordering)
}
fn increment(&self) {
self.atomic.fetch_add(1, self.ordering);
}
fn get_and_increment(&self) -> u16 {
self.atomic.fetch_add(1, self.ordering)
}
}
static SEQ_COUNT_PROVIDER: SeqCountProviderAtomicRef =
SeqCountProviderAtomicRef::new(Ordering::Relaxed);
// Otherwise, warnings because of heapless pool macro.
#[allow(non_camel_case_types)]
mod poolmod {
use super::*;
// Must hold full TC length including COBS overhead.
pool!(TC: [u8; TC_BUF_LEN]);
// Only encoded at the end, so no need to account for COBS overhead.
pool!(TM: [u8; MAX_TM_LEN]);
}
pub struct TxIdle {
tx: TxType,
dma_channel: dma1::C7,
}
#[derive(Debug)]
pub enum TmStoreError {
StoreFull,
StoreSlotsTooSmall,
}
impl From<TmStoreError> for EcssTmErrorWithSend<TmStoreError> {
fn from(value: TmStoreError) -> Self {
Self::SendError(value)
}
}
pub struct TmSender {
mem_block: Option<Box<poolmod::TM>>,
ctx: &'static str,
}
impl TmSender {
pub fn new(mem_block: Box<poolmod::TM>, ctx: &'static str) -> Self {
Self {
mem_block: Some(mem_block),
ctx,
}
}
}
impl EcssTmSenderCore for TmSender {
type Error = TmStoreError;
fn send_tm(
&mut self,
tm: PusTm,
) -> Result<(), satrs_core::pus::EcssTmErrorWithSend<Self::Error>> {
let mem_block = self.mem_block.take();
if mem_block.is_none() {
panic!("send_tm should only be called once");
}
let mut mem_block = mem_block.unwrap();
if tm.len_packed() > MAX_TM_LEN {
return Err(EcssTmErrorWithSend::SendError(
TmStoreError::StoreSlotsTooSmall,
));
}
tm.write_to_bytes(mem_block.as_mut_slice())
.map_err(|e| EcssTmErrorWithSend::EcssTmError(e.into()))?;
info!(target: self.ctx, "Sending TM[{},{}] with size {}", tm.service(), tm.subservice(), tm.len_packed());
TX_REQUESTS
.enqueue((mem_block, tm.len_packed()))
.map_err(|_| TmStoreError::StoreFull)?;
Ok(())
}
}
pub enum UartTxState {
// Wrapped in an option because we need an owned type later.
Idle(Option<TxIdle>),
// Same as above
Transmitting(Option<TxDmaTransferType>),
}
#[app(device = stm32f3xx_hal::pac, peripherals = true, dispatchers = [TIM20_BRK, TIM20_UP, TIM20_TRG_COM])]
mod app {
use super::*;
use core::slice::Iter;
use cortex_m::iprintln;
use satrs_core::pus::verification::FailParams;
use satrs_core::pus::verification::VerificationReporterCore;
use satrs_core::spacepackets::{
ecss::EcssEnumU16,
tc::PusTc,
time::cds::P_FIELD_BASE,
tm::{PusTm, PusTmSecondaryHeader},
CcsdsPacket, SpHeader,
};
#[allow(unused_imports)]
use stm32f3_discovery::leds::Direction;
use stm32f3_discovery::leds::Leds;
use stm32f3xx_hal::prelude::*;
use stm32f3xx_hal::Toggle;
use stm32f3_discovery::switch_hal::OutputSwitch;
#[allow(dead_code)]
type SerialType = Serial<USART2, (PA2<AF7<PushPull>>, PA3<AF7<PushPull>>)>;
#[shared]
struct Shared {
tx_transfer: UartTxState,
rx_transfer: Option<RxDmaTransferType>,
}
#[local]
struct Local {
leds: Leds,
last_dir: Direction,
verif_reporter: VerificationReporterCore,
curr_dir: Iter<'static, Direction>,
}
#[monotonic(binds = SysTick, default = true)]
type MonoTimer = Systick<1000>;
#[init(local = [
tc_pool_mem: [u8; TC_BUF_LEN * TC_POOL_SLOTS] = [0; TC_BUF_LEN * TC_POOL_SLOTS],
tm_pool_mem: [u8; MAX_TM_LEN * TM_POOL_SLOTS] = [0; MAX_TM_LEN * TM_POOL_SLOTS]
])]
fn init(mut cx: init::Context) -> (Shared, Local, init::Monotonics) {
let mut rcc = cx.device.RCC.constrain();
let mono = Systick::new(cx.core.SYST, 8_000_000);
logger_init();
let mut flash = cx.device.FLASH.constrain();
let clocks = rcc
.cfgr
.use_hse(8.MHz())
.sysclk(8.MHz())
.pclk1(8.MHz())
.freeze(&mut flash.acr);
// setup ITM output
iprintln!(
&mut cx.core.ITM.stim[0],
"Starting sat-rs demo application for the STM32F3-Discovery"
);
let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb);
// Assign memory to the pools.
poolmod::TC::grow(cx.local.tc_pool_mem);
poolmod::TM::grow(cx.local.tm_pool_mem);
let verif_reporter = VerificationReporterCore::new(PUS_APID).unwrap();
let leds = Leds::new(
gpioe.pe8,
gpioe.pe9,
gpioe.pe10,
gpioe.pe11,
gpioe.pe12,
gpioe.pe13,
gpioe.pe14,
gpioe.pe15,
&mut gpioe.moder,
&mut gpioe.otyper,
);
let mut gpioa = cx.device.GPIOA.split(&mut rcc.ahb);
// USART2 pins
let mut pins = (
// TX pin: PA2
gpioa
.pa2
.into_af_push_pull(&mut gpioa.moder, &mut gpioa.otyper, &mut gpioa.afrl),
// RX pin: PA3
gpioa
.pa3
.into_af_push_pull(&mut gpioa.moder, &mut gpioa.otyper, &mut gpioa.afrl),
);
pins.1.internal_pull_up(&mut gpioa.pupdr, true);
let mut usart2 = Serial::new(
cx.device.USART2,
pins,
UART_BAUD.Bd(),
clocks,
&mut rcc.apb1,
);
usart2.configure_rx_interrupt(RxEvent::Idle, Toggle::On);
// This interrupt is enabled to re-schedule new transfers in the interrupt handler immediately.
usart2.configure_tx_interrupt(TxEvent::TransmissionComplete, Toggle::On);
let dma1 = cx.device.DMA1.split(&mut rcc.ahb);
let (tx_serial, mut rx_serial) = usart2.split();
// This interrupt is immediately triggered, clear it. It will only be reset
// by the hardware when data is received on RX (RXNE event)
rx_serial.clear_event(RxEvent::Idle);
let rx_transfer = rx_serial.read_exact(unsafe { DMA_RX_BUF.as_mut_slice() }, dma1.ch6);
info!(target: "init", "Spawning tasks");
blink::spawn().unwrap();
serial_tx_handler::spawn().unwrap();
(
Shared {
tx_transfer: UartTxState::Idle(Some(TxIdle {
tx: tx_serial,
dma_channel: dma1.ch7,
})),
rx_transfer: Some(rx_transfer),
},
Local {
leds,
last_dir: Direction::North,
curr_dir: Direction::iter(),
verif_reporter,
},
init::Monotonics(mono),
)
}
#[task(local = [leds, curr_dir, last_dir])]
fn blink(cx: blink::Context) {
let toggle_leds = |dir: &Direction| {
let leds = cx.local.leds;
let last_led = leds.for_direction(*cx.local.last_dir);
last_led.off().ok();
let led = leds.for_direction(*dir);
led.on().ok();
*cx.local.last_dir = *dir;
};
match cx.local.curr_dir.next() {
Some(dir) => {
toggle_leds(dir);
}
None => {
*cx.local.curr_dir = Direction::iter();
toggle_leds(cx.local.curr_dir.next().unwrap());
}
}
blink::spawn_after(MsDuration::from_ticks(BLINK_FREQ_MS)).unwrap();
}
#[task(
shared = [tx_transfer],
local = []
)]
fn serial_tx_handler(mut cx: serial_tx_handler::Context) {
if let Some((buf, len)) = TX_REQUESTS.dequeue() {
cx.shared.tx_transfer.lock(|tx_state| match tx_state {
UartTxState::Idle(tx) => {
//debug!(target: "serial_tx_handler", "bytes: {:x?}", &buf[0..len]);
// Safety: We only copy the data into the TX DMA buffer in this task.
// If the DMA is active, another branch will be taken.
let mut_tx_dma_buf = unsafe { &mut DMA_TX_BUF };
// 0 sentinel value as start marker
mut_tx_dma_buf[0] = 0;
// Should never panic, we accounted for the overhead.
// Write into transfer buffer directly, no need for intermediate
// encoding buffer.
let encoded_len = cobs::encode(&buf[0..len], &mut mut_tx_dma_buf[1..]);
// 0 end marker
mut_tx_dma_buf[encoded_len + 1] = 0;
//debug!(target: "serial_tx_handler", "Sending {} bytes", encoded_len + 2);
//debug!("sent: {:x?}", &mut_tx_dma_buf[0..encoded_len + 2]);
let tx_idle = tx.take().unwrap();
// Transfer completion and re-scheduling of new TX transfers will be done
// by the IRQ handler.
let transfer = tx_idle
.tx
.write_all(&mut_tx_dma_buf[0..encoded_len + 2], tx_idle.dma_channel);
*tx_state = UartTxState::Transmitting(Some(transfer));
// The memory block is automatically returned to the pool when it is dropped.
}
UartTxState::Transmitting(_) => {
// This is a SW configuration error. Only the ISR which
// detects transfer completion should be able to spawn a new
// task, and that ISR should set the state to IDLE.
panic!("invalid internal tx state detected")
}
})
} else {
cx.shared.tx_transfer.lock(|tx_state| {
if let UartTxState::Idle(_) = tx_state {
serial_tx_handler::spawn_after(MsDuration::from_ticks(TX_HANDLER_FREQ_MS))
.unwrap();
}
});
}
}
#[task(
local = [
stamp_buf: [u8; 7] = [0; 7],
decode_buf: [u8; MAX_TC_LEN] = [0; MAX_TC_LEN],
src_data_buf: [u8; MAX_TM_LEN] = [0; MAX_TM_LEN],
verif_reporter
],
)]
fn serial_rx_handler(
cx: serial_rx_handler::Context,
received_packet: Box<poolmod::TC>,
rx_len: usize,
) {
let tgt: &'static str = "serial_rx_handler";
cx.local.stamp_buf[0] = P_FIELD_BASE;
info!(target: tgt, "Received packet with {} bytes", rx_len);
let decode_buf = cx.local.decode_buf;
let packet = received_packet.as_slice();
let mut start_idx = None;
for (idx, byte) in packet.iter().enumerate() {
if *byte != 0 {
start_idx = Some(idx);
break;
}
}
if start_idx.is_none() {
warn!(
target: tgt,
"decoding error, can only process cobs encoded frames, data is all 0"
);
return;
}
let start_idx = start_idx.unwrap();
match cobs::decode(&received_packet.as_slice()[start_idx..], decode_buf) {
Ok(len) => {
info!(target: tgt, "Decoded packet length: {}", len);
let pus_tc = PusTc::from_bytes(decode_buf);
let verif_reporter = cx.local.verif_reporter;
match pus_tc {
Ok((tc, tc_len)) => handle_tc(
tc,
tc_len,
verif_reporter,
cx.local.src_data_buf,
cx.local.stamp_buf,
tgt,
),
Err(e) => {
warn!(target: tgt, "Error unpacking PUS TC: {}", e);
}
}
}
Err(_) => {
warn!(
target: tgt,
"decoding error, can only process cobs encoded frames"
)
}
}
}
fn handle_tc(
tc: PusTc,
tc_len: usize,
verif_reporter: &mut VerificationReporterCore,
src_data_buf: &mut [u8; MAX_TM_LEN],
stamp_buf: &[u8; 7],
tgt: &'static str,
) {
info!(
target: tgt,
"Found PUS TC [{},{}] with length {}",
tc.service(),
tc.subservice(),
tc_len
);
let token = verif_reporter.add_tc(&tc);
if tc.apid() != PUS_APID {
warn!(target: tgt, "Received tc with unknown APID {}", tc.apid());
let sendable = verif_reporter
.acceptance_failure(
src_data_buf,
token,
&SEQ_COUNT_PROVIDER,
FailParams::new(stamp_buf, &EcssEnumU16::new(0), None),
)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
if let Err(e) =
verif_reporter.send_acceptance_failure(sendable, &SEQ_COUNT_PROVIDER, &mut sender)
{
warn!(target: tgt, "Sending acceptance failure failed: {:?}", e.0);
};
return;
}
let sendable = verif_reporter
.acceptance_success(src_data_buf, token, &SEQ_COUNT_PROVIDER, stamp_buf)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
let accepted_token = match verif_reporter.send_acceptance_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
Ok(token) => token,
Err(e) => {
warn!(target: "serial_rx_handler", "Sending acceptance success failed: {:?}", e.0);
return;
}
};
if tc.service() == 17 {
if tc.subservice() == 1 {
let sendable = verif_reporter
.start_success(src_data_buf, accepted_token, &SEQ_COUNT_PROVIDER, stamp_buf)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
let started_token = match verif_reporter.send_start_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
Ok(token) => token,
Err(e) => {
warn!(target: tgt, "Sending acceptance success failed: {:?}", e.0);
return;
}
};
info!(
target: tgt,
"Received PUS ping telecommand, sending ping reply TM[17,2]"
);
let mut sp_header =
SpHeader::tc_unseg(PUS_APID, SEQ_COUNT_PROVIDER.get(), 0).unwrap();
let sec_header = PusTmSecondaryHeader::new_simple(17, 2, stamp_buf);
let ping_reply = PusTm::new(&mut sp_header, sec_header, None, true);
let mut mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let reply_len = ping_reply.write_to_bytes(mem_block.as_mut_slice()).unwrap();
if TX_REQUESTS.enqueue((mem_block, reply_len)).is_err() {
warn!(target: tgt, "TC queue full");
return;
}
SEQ_COUNT_PROVIDER.increment();
let sendable = verif_reporter
.completion_success(src_data_buf, started_token, &SEQ_COUNT_PROVIDER, stamp_buf)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
if let Err(e) = verif_reporter.send_step_or_completion_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
warn!(target: tgt, "Sending completion success failed: {:?}", e.0);
}
} else {
// TODO: Invalid subservice
}
}
}
#[task(binds = DMA1_CH6, shared = [rx_transfer])]
fn rx_dma_isr(mut cx: rx_dma_isr::Context) {
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_ref = rx_transfer.as_ref().unwrap();
if rx_ref.is_complete() {
let uart_rx_owned = rx_transfer.take().unwrap();
let (buf, c, rx) = uart_rx_owned.stop();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a memory pool here
// to do this.
let mut mem_block = poolmod::TC::alloc()
.expect("allocating memory block for rx failed")
.init([0u8; TC_BUF_LEN]);
// Copy data into memory pool.
mem_block.copy_from_slice(buf);
*rx_transfer = Some(rx.read_exact(buf, c));
// Only send owning pointer to pool memory and the received packet length.
serial_rx_handler::spawn(mem_block, TC_BUF_LEN)
.expect("spawning rx handler task failed");
// If this happens, there is a high chance that the maximum packet length was
// exceeded. Circular mode is not used here, so data might be missed.
warn!(
"rx transfer with maximum length {}, might miss data",
TC_BUF_LEN
);
}
});
}
#[task(binds = USART2_EXTI26, shared = [rx_transfer, tx_transfer])]
fn serial_isr(mut cx: serial_isr::Context) {
cx.shared.tx_transfer.lock(|tx_state| match tx_state {
UartTxState::Idle(_) => (),
UartTxState::Transmitting(transfer) => {
let transfer_ref = transfer.as_ref().unwrap();
if transfer_ref.is_complete() {
let transfer = transfer.take().unwrap();
let (_, dma_channel, tx) = transfer.stop();
*tx_state = UartTxState::Idle(Some(TxIdle { tx, dma_channel }));
serial_tx_handler::spawn_after(MsDuration::from_ticks(
MIN_DELAY_BETWEEN_TX_PACKETS_MS.into(),
))
.unwrap();
}
}
});
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_transfer_ref = rx_transfer.as_ref().unwrap();
// Received a partial packet.
if rx_transfer_ref.is_event_triggered(RxEvent::Idle) {
let rx_transfer_owned = rx_transfer.take().unwrap();
let (buf, ch, mut rx, rx_len) = rx_transfer_owned.stop_and_return_received_bytes();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a memory pool here
// to do this.
let mut mem_block = poolmod::TC::alloc()
.expect("allocating memory block for rx failed")
.init([0u8; TC_BUF_LEN]);
// Copy data into memory pool.
mem_block[0..rx_len as usize].copy_from_slice(&buf[0..rx_len as usize]);
rx.clear_event(RxEvent::Idle);
// Only send owning pointer to pool memory and the received packet length.
serial_rx_handler::spawn(mem_block, rx_len as usize)
.expect("spawning rx handler task failed");
*rx_transfer = Some(rx.read_exact(buf, ch));
}
});
}
}

View File

@@ -0,0 +1,9 @@
Change Log
=======
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]

23
satrs-example/Cargo.toml Normal file
View File

@@ -0,0 +1,23 @@
[package]
name = "satrs-example"
version = "0.1.0"
edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
default-run = "satrs-example"
[dependencies]
fern = "0.6"
chrono = "0.4"
log = "0.4"
crossbeam-channel = "0.5"
delegate = "0.10"
zerocopy = "0.6"
csv = "1"
num_enum = "0.6"
thiserror = "1"
[dependencies.satrs-core]
path = "../satrs-core"
[dependencies.satrs-mib]
path = "../satrs-mib"

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
satrs-example/NOTICE Normal file
View File

@@ -0,0 +1 @@
This software contains code developed at the University of Stuttgart's Institute of Space Systems.

53
satrs-example/README.md Normal file
View File

@@ -0,0 +1,53 @@
sat-rs example
======
This crate contains an example application which simulates an on-board software.
It uses various components provided by the sat-rs framework to do this. As such, it shows how
a more complex real on-board software could be built from these components.
The application opens a UDP server on port 7301 to receive telecommands.
You can run the application using `cargo run`. The `simpleclient` binary target sends a
ping telecommand and then verifies the telemetry generated by the example application.
It can be run like this:
```rs
cargo run --bin simpleclient
```
This repository also contains a more complex client using the
[Python tmtccmd](https://github.com/robamu-org/tmtccmd) module.
# Using the tmtccmd Python client
The python client requires a valid installation of the
[tmtccmd package](https://github.com/robamu-org/tmtccmd).
It is recommended to use a virtual environment to do this. To set up one in the command line,
you can use `python3 -m venv venv` on Unix systems or `py -m venv venv` on Windows systems.
After doing this, you can check the [venv tutorial](https://docs.python.org/3/tutorial/venv.html)
on how to activate the environment and then use the following command to install the required
dependency:
```sh
pip install -r requirements.txt
```
Alternatively, if you would like to use the GUI functionality provided by `tmtccmd`, you can also
install it manually with
```sh
pip install tmtccmd[gui]
```
After setting up the dependencies, you can simply run the `main.py` script to send commands
to the OBSW example and to view and handle incoming telemetry. The script and the `tmtccmd`
framework it uses allow to easily add and expose additional telecommand and telemetry handling
as Python code. For example, you can use the following command to send a ping like done with
the `simpleclient`:
```sh
./main.py -s test -o ping
```
You can also simply call the script without any arguments to view a list of services (`-s` flag)
and corresponding op codes (`-o` flag) for each service.

8
satrs-example/pyclient/.gitignore vendored Normal file
View File

@@ -0,0 +1,8 @@
__pycache__
/venv
/log
/.idea/*
!/.idea/runConfigurations
/seqcnt.txt

View File

@@ -0,0 +1,50 @@
from __future__ import annotations
import dataclasses
import enum
import struct
EXAMPLE_PUS_APID = 0x02
class EventSeverity(enum.IntEnum):
INFO = 0
LOW = 1
MEDIUM = 2
HIGH = 3
@dataclasses.dataclass
class EventU32:
severity: EventSeverity
group_id: int
unique_id: int
@classmethod
def unpack(cls, data: bytes) -> EventU32:
if len(data) < 4:
raise ValueError("passed data too short")
event_raw = struct.unpack("!I", data[0:4])[0]
return cls(
severity=EventSeverity((event_raw >> 30) & 0b11),
group_id=(event_raw >> 16) & 0x3FFF,
unique_id=event_raw & 0xFFFF,
)
class RequestTargetId(enum.IntEnum):
ACS = 1
class AcsHkIds(enum.IntEnum):
MGM_SET = 1
class HkOpCodes:
GENERATE_ONE_SHOT = ["0", "oneshot"]
def make_addressable_id(target_id: int, unique_id: int) -> bytes:
byte_string = bytearray(struct.pack("!I", target_id))
byte_string.extend(struct.pack("!I", unique_id))
return byte_string

Some files were not shown because too many files have changed in this diff Show More