Re-structure sat-rs
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good

- Add new shared subcrate satrs-shared to split off some shared
  components not expected to change very often.
- Renmame `satrs-core` to `satrs`. It is expected that sat-rs will remain
  the primary crate, so the core information is superfluous, and core also
  implies stability, which will not be the case for some time.
This commit is contained in:
2024-02-12 15:51:37 +01:00
parent f58a4eaee5
commit de4e6183b3
93 changed files with 466 additions and 189 deletions

449
satrs/src/pus/event.rs Normal file
View File

@ -0,0 +1,449 @@
use crate::pus::{source_buffer_large_enough, EcssTmtcError};
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::tm::PusTmSecondaryHeader;
use spacepackets::ecss::{EcssEnumeration, PusError};
use spacepackets::{SpHeader, MAX_APID};
use crate::pus::EcssTmSenderCore;
#[cfg(feature = "alloc")]
pub use alloc_mod::EventReporter;
pub use spacepackets::ecss::event::*;
pub struct EventReporterBase {
msg_count: u16,
apid: u16,
pub dest_id: u16,
}
impl EventReporterBase {
pub fn new(apid: u16) -> Option<Self> {
if apid > MAX_APID {
return None;
}
Some(Self {
msg_count: 0,
dest_id: 0,
apid,
})
}
pub fn event_info(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.generate_and_send_generic_tm(
buf,
Subservice::TmInfoReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_low_severity(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.generate_and_send_generic_tm(
buf,
Subservice::TmLowSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_medium_severity(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.generate_and_send_generic_tm(
buf,
Subservice::TmMediumSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_high_severity(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.generate_and_send_generic_tm(
buf,
Subservice::TmHighSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
fn generate_and_send_generic_tm(
&mut self,
buf: &mut [u8],
subservice: Subservice,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
let tm = self.generate_generic_event_tm(buf, subservice, time_stamp, event_id, aux_data)?;
sender.send_tm(tm.into())?;
self.msg_count += 1;
Ok(())
}
fn generate_generic_event_tm<'a>(
&'a self,
buf: &'a mut [u8],
subservice: Subservice,
time_stamp: &'a [u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<PusTmCreator, EcssTmtcError> {
let mut src_data_len = event_id.size();
if let Some(aux_data) = aux_data {
src_data_len += aux_data.len();
}
source_buffer_large_enough(buf.len(), src_data_len)?;
let mut sp_header = SpHeader::tm_unseg(self.apid, 0, 0).unwrap();
let sec_header = PusTmSecondaryHeader::new(
5,
subservice.into(),
self.msg_count,
self.dest_id,
Some(time_stamp),
);
let mut current_idx = 0;
event_id
.write_to_be_bytes(&mut buf[0..event_id.size()])
.map_err(PusError::ByteConversion)?;
current_idx += event_id.size();
if let Some(aux_data) = aux_data {
buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data);
current_idx += aux_data.len();
}
Ok(PusTmCreator::new(
&mut sp_header,
sec_header,
&buf[0..current_idx],
true,
))
}
}
#[cfg(feature = "alloc")]
mod alloc_mod {
use super::*;
use alloc::vec;
use alloc::vec::Vec;
pub struct EventReporter {
source_data_buf: Vec<u8>,
pub reporter: EventReporterBase,
}
impl EventReporter {
pub fn new(apid: u16, max_event_id_and_aux_data_size: usize) -> Option<Self> {
let reporter = EventReporterBase::new(apid)?;
Some(Self {
source_data_buf: vec![0; max_event_id_and_aux_data_size],
reporter,
})
}
pub fn event_info(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_info(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_low_severity(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_low_severity(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_medium_severity(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_medium_severity(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_high_severity(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_high_severity(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::events::{EventU32, Severity};
use crate::pus::tests::CommonTmInfo;
use crate::pus::{EcssChannel, PusTmWrapper};
use crate::ChannelId;
use spacepackets::ByteConversionError;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::vec::Vec;
const EXAMPLE_APID: u16 = 0xee;
const EXAMPLE_GROUP_ID: u16 = 2;
const EXAMPLE_EVENT_ID_0: u16 = 1;
#[allow(dead_code)]
const EXAMPLE_EVENT_ID_1: u16 = 2;
#[derive(Debug, Eq, PartialEq, Clone)]
struct TmInfo {
pub common: CommonTmInfo,
pub event: EventU32,
pub aux_data: Vec<u8>,
}
#[derive(Default, Clone)]
struct TestSender {
pub service_queue: RefCell<VecDeque<TmInfo>>,
}
impl EcssChannel for TestSender {
fn id(&self) -> ChannelId {
0
}
}
impl EcssTmSenderCore for TestSender {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(_) => {
panic!("TestSender: unexpected call with address");
}
PusTmWrapper::Direct(tm) => {
assert!(!tm.source_data().is_empty());
let src_data = tm.source_data();
assert!(src_data.len() >= 4);
let event =
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
let mut aux_data = Vec::new();
if src_data.len() > 4 {
aux_data.extend_from_slice(&src_data[4..]);
}
self.service_queue.borrow_mut().push_back(TmInfo {
common: CommonTmInfo::new_from_tm(&tm),
event,
aux_data,
});
Ok(())
}
}
}
}
fn severity_to_subservice(severity: Severity) -> Subservice {
match severity {
Severity::INFO => Subservice::TmInfoReport,
Severity::LOW => Subservice::TmLowSeverityReport,
Severity::MEDIUM => Subservice::TmMediumSeverityReport,
Severity::HIGH => Subservice::TmHighSeverityReport,
}
}
fn report_basic_event(
reporter: &mut EventReporter,
sender: &mut TestSender,
time_stamp: &[u8],
event: EventU32,
severity: Severity,
aux_data: Option<&[u8]>,
) {
match severity {
Severity::INFO => {
reporter
.event_info(sender, time_stamp, event, aux_data)
.expect("Error reporting info event");
}
Severity::LOW => {
reporter
.event_low_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting low event");
}
Severity::MEDIUM => {
reporter
.event_medium_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting medium event");
}
Severity::HIGH => {
reporter
.event_high_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting high event");
}
}
}
fn basic_event_test(
max_event_aux_data_buf: usize,
severity: Severity,
error_data: Option<&[u8]>,
) {
let mut sender = TestSender::default();
let reporter = EventReporter::new(EXAMPLE_APID, max_event_aux_data_buf);
assert!(reporter.is_some());
let mut reporter = reporter.unwrap();
let time_stamp_empty: [u8; 7] = [0; 7];
let mut error_copy = Vec::new();
if let Some(err_data) = error_data {
error_copy.extend_from_slice(err_data);
}
let event = EventU32::new(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event");
report_basic_event(
&mut reporter,
&mut sender,
&time_stamp_empty,
event,
severity,
error_data,
);
let mut service_queue = sender.service_queue.borrow_mut();
assert_eq!(service_queue.len(), 1);
let tm_info = service_queue.pop_front().unwrap();
assert_eq!(
tm_info.common.subservice,
severity_to_subservice(severity) as u8
);
assert_eq!(tm_info.common.dest_id, 0);
assert_eq!(tm_info.common.time_stamp, time_stamp_empty);
assert_eq!(tm_info.common.msg_counter, 0);
assert_eq!(tm_info.common.apid, EXAMPLE_APID);
assert_eq!(tm_info.event, event);
assert_eq!(tm_info.aux_data, error_copy);
}
#[test]
fn basic_info_event_generation() {
basic_event_test(4, Severity::INFO, None);
}
#[test]
fn basic_low_severity_event() {
basic_event_test(4, Severity::LOW, None);
}
#[test]
fn basic_medium_severity_event() {
basic_event_test(4, Severity::MEDIUM, None);
}
#[test]
fn basic_high_severity_event() {
basic_event_test(4, Severity::HIGH, None);
}
#[test]
fn event_with_info_string() {
let info_string = "Test Information";
basic_event_test(32, Severity::INFO, Some(info_string.as_bytes()));
}
#[test]
fn low_severity_with_raw_err_data() {
let raw_err_param: i32 = -1;
let raw_err = raw_err_param.to_be_bytes();
basic_event_test(8, Severity::LOW, Some(&raw_err))
}
fn check_buf_too_small(
reporter: &mut EventReporter,
sender: &mut TestSender,
expected_found_len: usize,
) {
let time_stamp_empty: [u8; 7] = [0; 7];
let event = EventU32::new(Severity::INFO, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event");
let err = reporter.event_info(sender, &time_stamp_empty, event, None);
assert!(err.is_err());
let err = err.unwrap_err();
if let EcssTmtcError::Pus(PusError::ByteConversion(
ByteConversionError::ToSliceTooSmall { found, expected },
)) = err
{
assert_eq!(expected, 4);
assert_eq!(found, expected_found_len);
} else {
panic!("Unexpected error {:?}", err);
}
}
#[test]
fn insufficient_buffer() {
let mut sender = TestSender::default();
for i in 0..3 {
let reporter = EventReporter::new(EXAMPLE_APID, i);
assert!(reporter.is_some());
let mut reporter = reporter.unwrap();
check_buf_too_small(&mut reporter, &mut sender, i);
}
}
}

309
satrs/src/pus/event_man.rs Normal file
View File

@ -0,0 +1,309 @@
use crate::events::{EventU32, GenericEvent, Severity};
#[cfg(feature = "alloc")]
use crate::events::{EventU32TypedSev, HasSeverity};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
#[cfg(feature = "alloc")]
use core::hash::Hash;
#[cfg(feature = "alloc")]
use hashbrown::HashSet;
#[cfg(feature = "alloc")]
pub use crate::pus::event::EventReporter;
use crate::pus::verification::TcStateToken;
#[cfg(feature = "alloc")]
use crate::pus::EcssTmSenderCore;
use crate::pus::EcssTmtcError;
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use alloc_mod::*;
#[cfg(feature = "heapless")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
pub use heapless_mod::*;
/// This trait allows the PUS event manager implementation to stay generic over various types
/// of backend containers.
///
/// These backend containers keep track on whether a particular event is enabled or disabled for
/// reporting and also expose a simple API to enable or disable the event reporting.
///
/// For example, a straight forward implementation for host systems could use a
/// [hash set](https://docs.rs/hashbrown/latest/hashbrown/struct.HashSet.html)
/// structure to track disabled events. A more primitive and embedded friendly
/// solution could track this information in a static or pre-allocated list which contains
/// the disabled events.
pub trait PusEventMgmtBackendProvider<Provider: GenericEvent> {
type Error;
fn event_enabled(&self, event: &Provider) -> bool;
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
}
#[cfg(feature = "heapless")]
pub mod heapless_mod {
use super::*;
use crate::events::{GenericEvent, LargestEventRaw};
use std::marker::PhantomData;
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
// regular Event type again.
#[derive(Default)]
pub struct HeaplessPusMgmtBackendProvider<const N: usize, Provider: GenericEvent> {
disabled: heapless::FnvIndexSet<LargestEventRaw, N>,
phantom: PhantomData<Provider>,
}
/// Safety: All contained field are [Send] as well
unsafe impl<const N: usize, Event: GenericEvent + Send> Send
for HeaplessPusMgmtBackendProvider<N, Event>
{
}
impl<const N: usize, Provider: GenericEvent> PusEventMgmtBackendProvider<Provider>
for HeaplessPusMgmtBackendProvider<N, Provider>
{
type Error = ();
fn event_enabled(&self, event: &Provider) -> bool {
self.disabled.contains(&event.raw_as_largest_type())
}
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
self.disabled
.insert(event.raw_as_largest_type())
.map_err(|_| ())
}
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(&event.raw_as_largest_type()))
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum EventRequest<Event: GenericEvent = EventU32> {
Enable(Event),
Disable(Event),
}
#[derive(Debug)]
pub struct EventRequestWithToken<Event: GenericEvent = EventU32> {
pub request: EventRequest<Event>,
pub token: TcStateToken,
}
#[derive(Debug)]
pub enum EventManError {
EcssTmtcError(EcssTmtcError),
SeverityMissmatch(Severity, Severity),
}
impl From<EcssTmtcError> for EventManError {
fn from(v: EcssTmtcError) -> Self {
Self::EcssTmtcError(v)
}
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use super::*;
/// Default backend provider which uses a hash set as the event reporting status container
/// like mentioned in the example of the [PusEventMgmtBackendProvider] documentation.
///
/// This provider is a good option for host systems or larger embedded systems where
/// the expected occasional memory allocation performed by the [HashSet] is not an issue.
pub struct DefaultPusMgmtBackendProvider<Event: GenericEvent = EventU32> {
disabled: HashSet<Event>,
}
/// Safety: All contained field are [Send] as well
unsafe impl<Event: GenericEvent + Send> Send for DefaultPusMgmtBackendProvider<Event> {}
impl<Event: GenericEvent> Default for DefaultPusMgmtBackendProvider<Event> {
fn default() -> Self {
Self {
disabled: HashSet::default(),
}
}
}
impl<Provider: GenericEvent + PartialEq + Eq + Hash + Copy + Clone>
PusEventMgmtBackendProvider<Provider> for DefaultPusMgmtBackendProvider<Provider>
{
type Error = ();
fn event_enabled(&self, event: &Provider) -> bool {
!self.disabled.contains(event)
}
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(event))
}
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.insert(*event))
}
}
pub struct PusEventDispatcher<BackendError, Provider: GenericEvent> {
reporter: EventReporter,
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>,
}
/// Safety: All contained fields are send as well.
unsafe impl<E: Send, Event: GenericEvent + Send> Send for PusEventDispatcher<E, Event> {}
impl<BackendError, Provider: GenericEvent> PusEventDispatcher<BackendError, Provider> {
pub fn new(
reporter: EventReporter,
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>,
) -> Self {
Self { reporter, backend }
}
}
impl<BackendError, Event: GenericEvent> PusEventDispatcher<BackendError, Event> {
pub fn enable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> {
self.backend.enable_event_reporting(event)
}
pub fn disable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> {
self.backend.disable_event_reporting(event)
}
pub fn generate_pus_event_tm_generic(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event: Event,
aux_data: Option<&[u8]>,
) -> Result<bool, EventManError> {
if !self.backend.event_enabled(&event) {
return Ok(false);
}
match event.severity() {
Severity::INFO => self
.reporter
.event_info(sender, time_stamp, event, aux_data)
.map(|_| true)
.map_err(|e| e.into()),
Severity::LOW => self
.reporter
.event_low_severity(sender, time_stamp, event, aux_data)
.map(|_| true)
.map_err(|e| e.into()),
Severity::MEDIUM => self
.reporter
.event_medium_severity(sender, time_stamp, event, aux_data)
.map(|_| true)
.map_err(|e| e.into()),
Severity::HIGH => self
.reporter
.event_high_severity(sender, time_stamp, event, aux_data)
.map(|_| true)
.map_err(|e| e.into()),
}
}
}
impl<BackendError> PusEventDispatcher<BackendError, EventU32> {
pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self,
event: &EventU32TypedSev<Severity>,
) -> Result<bool, BackendError> {
self.backend.enable_event_reporting(event.as_ref())
}
pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self,
event: &EventU32TypedSev<Severity>,
) -> Result<bool, BackendError> {
self.backend.disable_event_reporting(event.as_ref())
}
pub fn generate_pus_event_tm<Severity: HasSeverity>(
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event: EventU32TypedSev<Severity>,
aux_data: Option<&[u8]>,
) -> Result<bool, EventManError> {
self.generate_pus_event_tm_generic(sender, time_stamp, event.into(), aux_data)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::events::SeverityInfo;
use crate::pus::MpscTmAsVecSender;
use std::sync::mpsc::{channel, TryRecvError};
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
fn create_basic_man() -> PusEventDispatcher<(), EventU32> {
let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed");
let backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
PusEventDispatcher::new(reporter, Box::new(backend))
}
#[test]
fn test_basic() {
let mut event_man = create_basic_man();
let (event_tx, event_rx) = channel();
let mut sender = MpscTmAsVecSender::new(0, "test_sender", event_tx);
let event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
assert!(event_sent);
// Will not check packet here, correctness of packet was tested somewhere else
event_rx.try_recv().expect("Receiving event TM failed");
}
#[test]
fn test_disable_event() {
let mut event_man = create_basic_man();
let (event_tx, event_rx) = channel();
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx);
let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
let mut event_sent = event_man
.generate_pus_event_tm_generic(&mut sender, &EMPTY_STAMP, LOW_SEV_EVENT, None)
.expect("Sending low severity event failed");
assert!(!event_sent);
let res = event_rx.try_recv();
assert!(res.is_err());
assert!(matches!(res.unwrap_err(), TryRecvError::Empty));
// Check that only the low severity event was disabled
event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
assert!(event_sent);
event_rx.try_recv().expect("No info event received");
}
#[test]
fn test_reenable_event() {
let mut event_man = create_basic_man();
let (event_tx, event_rx) = channel();
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx);
let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
res = event_man.enable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
let event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
assert!(event_sent);
event_rx.try_recv().expect("No info event received");
}
}

280
satrs/src/pus/event_srv.rs Normal file
View File

@ -0,0 +1,280 @@
use crate::events::EventU32;
use crate::pus::event_man::{EventRequest, EventRequestWithToken};
use crate::pus::verification::TcStateToken;
use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError};
use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::PusPacket;
use std::sync::mpsc::Sender;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
pub struct PusService5EventHandler<TcInMemConverter: EcssTcInMemConverter> {
pub service_helper: PusServiceHelper<TcInMemConverter>,
event_request_tx: Sender<EventRequestWithToken>,
}
impl<TcInMemConverter: EcssTcInMemConverter> PusService5EventHandler<TcInMemConverter> {
pub fn new(
service_handler: PusServiceHelper<TcInMemConverter>,
event_request_tx: Sender<EventRequestWithToken>,
) -> Self {
Self {
service_helper: service_handler,
event_request_tx,
}
}
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
let tc = self
.service_helper
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let subservice = tc.subservice();
let srv = Subservice::try_from(subservice);
if srv.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(),
ecss_tc_and_token.token,
));
}
let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| {
if tc.user_data().len() < 4 {
return Err(PusPacketHandlingError::NotEnoughAppData(
"at least 4 bytes event ID expected".into(),
));
}
let user_data = tc.user_data();
let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
let start_token = self
.service_helper
.common
.verification_handler
.borrow_mut()
.start_success(ecss_tc_and_token.token, Some(&stamp))
.map_err(|_| PartialPusHandlingError::Verification);
let partial_error = start_token.clone().err();
let mut token: TcStateToken = ecss_tc_and_token.token.into();
if let Ok(start_token) = start_token {
token = start_token.into();
}
let event_req_with_token = if enable {
EventRequestWithToken {
request: EventRequest::Enable(event_u32),
token,
}
} else {
EventRequestWithToken {
request: EventRequest::Disable(event_u32),
token,
}
};
self.event_request_tx
.send(event_req_with_token)
.map_err(|_| {
PusPacketHandlingError::Other("Forwarding event request failed".into())
})?;
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
};
let mut partial_error = None;
let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
match srv.unwrap() {
Subservice::TmInfoReport
| Subservice::TmLowSeverityReport
| Subservice::TmMediumSeverityReport
| Subservice::TmHighSeverityReport => {
return Err(PusPacketHandlingError::InvalidSubservice(tc.subservice()))
}
Subservice::TcEnableEventGeneration => {
handle_enable_disable_request(true, time_stamp)?;
}
Subservice::TcDisableEventGeneration => {
handle_enable_disable_request(false, time_stamp)?;
}
Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => {
return Ok(PusPacketHandlerResult::SubserviceNotImplemented(
subservice,
ecss_tc_and_token.token,
));
}
}
Ok(PusPacketHandlerResult::RequestHandled)
}
}
#[cfg(test)]
mod tests {
use delegate::delegate;
use spacepackets::ecss::event::Subservice;
use spacepackets::util::UnsignedEnum;
use spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader,
},
SequenceFlags, SpHeader,
};
use std::sync::mpsc::{self, Sender};
use crate::pus::event_man::EventRequest;
use crate::pus::tests::SimplePusPacketHandler;
use crate::pus::verification::RequestId;
use crate::{
events::EventU32,
pus::{
event_man::EventRequestWithToken,
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness, TEST_APID},
verification::{TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError,
},
};
use super::PusService5EventHandler;
const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25);
struct Pus5HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusService5EventHandler<EcssTcInSharedStoreConverter>,
}
impl Pus5HandlerWithStoreTester {
pub fn new(event_request_tx: Sender<EventRequestWithToken>) -> Self {
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
Self {
common,
handler: PusService5EventHandler::new(srv_handler, event_request_tx),
}
}
}
impl PusTestHarness for Pus5HandlerWithStoreTester {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
}
}
}
impl SimplePusPacketHandler for Pus5HandlerWithStoreTester {
delegate! {
to self.handler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
}
}
}
fn event_test(
test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler),
subservice: Subservice,
expected_event_req: EventRequest,
event_req_receiver: mpsc::Receiver<EventRequestWithToken>,
) {
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8);
let mut app_data = [0; 4];
TEST_EVENT_0
.write_to_be_bytes(&mut app_data)
.expect("writing test event failed");
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true);
let token = test_harness.send_tc(&ping_tc);
let request_id = token.req_id();
test_harness.handle_one_tc().unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);
// Completion TM is not generated for us.
assert!(test_harness.check_no_tm_available());
let event_request = event_req_receiver
.try_recv()
.expect("no event request received");
assert_eq!(expected_event_req, event_request.request);
}
#[test]
fn test_enabling_event_reporting() {
let (event_request_tx, event_request_rx) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
event_test(
&mut test_harness,
Subservice::TcEnableEventGeneration,
EventRequest::Enable(TEST_EVENT_0),
event_request_rx,
);
}
#[test]
fn test_disabling_event_reporting() {
let (event_request_tx, event_request_rx) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
event_test(
&mut test_harness,
Subservice::TcDisableEventGeneration,
EventRequest::Disable(TEST_EVENT_0),
event_request_rx,
);
}
#[test]
fn test_empty_tc_queue() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::Empty = result {
} else {
panic!("unexpected result type {result:?}")
}
}
#[test]
fn test_sending_custom_subservice() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(5, 200);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
test_harness.send_tc(&ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result {
assert_eq!(subservice, 200);
} else {
panic!("unexpected result type {result:?}")
}
}
#[test]
fn test_sending_invalid_app_data() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header =
PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8);
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &[0, 1, 2], true);
test_harness.send_tc(&ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_err());
let result = result.unwrap_err();
if let PusPacketHandlingError::NotEnoughAppData(string) = result {
assert_eq!(string, "at least 4 bytes event ID expected");
} else {
panic!("unexpected result type {result:?}")
}
}
}

1
satrs/src/pus/hk.rs Normal file
View File

@ -0,0 +1 @@
pub use spacepackets::ecss::hk::*;

1194
satrs/src/pus/mod.rs Normal file

File diff suppressed because it is too large Load Diff

16
satrs/src/pus/mode.rs Normal file
View File

@ -0,0 +1,16 @@
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
TcSetMode = 1,
TcReadMode = 3,
TcAnnounceMode = 4,
TcAnnounceModeRecursive = 5,
TmModeReply = 6,
TmCantReachMode = 7,
TmWrongModeReply = 8,
}

2026
satrs/src/pus/scheduler.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,355 @@
use super::scheduler::PusSchedulerProvider;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
use crate::pool::PoolProvider;
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
use alloc::string::ToString;
use spacepackets::ecss::{scheduling, PusPacket};
use spacepackets::time::cds::TimeProvider;
/// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service)
/// packets. This handler is able to handle the most important PUS requests for a scheduling
/// service which provides the [PusSchedulerProvider].
///
/// Please note that this class does not do the regular periodic handling like releasing any
/// telecommands inside the scheduler. The user can retrieve the wrapped scheduler via the
/// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release
/// telecommands when applicable.
pub struct PusService11SchedHandler<
TcInMemConverter: EcssTcInMemConverter,
PusScheduler: PusSchedulerProvider,
> {
pub service_helper: PusServiceHelper<TcInMemConverter>,
scheduler: PusScheduler,
}
impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerProvider>
PusService11SchedHandler<TcInMemConverter, Scheduler>
{
pub fn new(service_helper: PusServiceHelper<TcInMemConverter>, scheduler: Scheduler) -> Self {
Self {
service_helper,
scheduler,
}
}
pub fn scheduler_mut(&mut self) -> &mut Scheduler {
&mut self.scheduler
}
pub fn scheduler(&self) -> &Scheduler {
&self.scheduler
}
pub fn handle_one_tc(
&mut self,
sched_tc_pool: &mut (impl PoolProvider + ?Sized),
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
let tc = self
.service_helper
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let subservice = PusPacket::subservice(&tc);
let standard_subservice = scheduling::Subservice::try_from(subservice);
if standard_subservice.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice(
subservice,
ecss_tc_and_token.token,
));
}
let mut partial_error = None;
let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
match standard_subservice.unwrap() {
scheduling::Subservice::TcEnableScheduling => {
let start_token = self
.service_helper
.common
.verification_handler
.get_mut()
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
.expect("Error sending start success");
self.scheduler.enable();
if self.scheduler.is_enabled() {
self.service_helper
.common
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success");
} else {
return Err(PusPacketHandlingError::Other(
"failed to enabled scheduler".to_string(),
));
}
}
scheduling::Subservice::TcDisableScheduling => {
let start_token = self
.service_helper
.common
.verification_handler
.get_mut()
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
.expect("Error sending start success");
self.scheduler.disable();
if !self.scheduler.is_enabled() {
self.service_helper
.common
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success");
} else {
return Err(PusPacketHandlingError::Other(
"failed to disable scheduler".to_string(),
));
}
}
scheduling::Subservice::TcResetScheduling => {
let start_token = self
.service_helper
.common
.verification_handler
.get_mut()
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
.expect("Error sending start success");
self.scheduler
.reset(sched_tc_pool)
.expect("Error resetting TC Pool");
self.service_helper
.common
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success");
}
scheduling::Subservice::TcInsertActivity => {
let start_token = self
.service_helper
.common
.verification_handler
.get_mut()
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
.expect("error sending start success");
// let mut pool = self.sched_tc_pool.write().expect("locking pool failed");
self.scheduler
.insert_wrapped_tc::<TimeProvider>(&tc, sched_tc_pool)
.expect("insertion of activity into pool failed");
self.service_helper
.common
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.expect("sending completion success failed");
}
_ => {
// Treat unhandled standard subservices as custom subservices for now.
return Ok(PusPacketHandlerResult::CustomSubservice(
subservice,
ecss_tc_and_token.token,
));
}
}
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
}
}
#[cfg(test)]
mod tests {
use crate::pool::{StaticMemoryPool, StaticPoolConfig};
use crate::pus::tests::TEST_APID;
use crate::pus::{
scheduler::{self, PusSchedulerProvider, TcInfo},
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness},
verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter,
};
use alloc::collections::VecDeque;
use delegate::delegate;
use spacepackets::ecss::scheduling::Subservice;
use spacepackets::ecss::tc::PusTcSecondaryHeader;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::time::TimeWriter;
use spacepackets::SpHeader;
use spacepackets::{
ecss::{tc::PusTcCreator, tm::PusTmReader},
time::cds,
};
use super::PusService11SchedHandler;
struct Pus11HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusService11SchedHandler<EcssTcInSharedStoreConverter, TestScheduler>,
sched_tc_pool: StaticMemoryPool,
}
impl Pus11HandlerWithStoreTester {
pub fn new() -> Self {
let test_scheduler = TestScheduler::default();
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false);
let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
Self {
common,
handler: PusService11SchedHandler::new(srv_handler, test_scheduler),
sched_tc_pool,
}
}
}
impl PusTestHarness for Pus11HandlerWithStoreTester {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
}
}
}
#[derive(Default)]
pub struct TestScheduler {
reset_count: u32,
enabled: bool,
enabled_count: u32,
disabled_count: u32,
inserted_tcs: VecDeque<TcInfo>,
}
impl PusSchedulerProvider for TestScheduler {
type TimeProvider = cds::TimeProvider;
fn reset(
&mut self,
_store: &mut (impl crate::pool::PoolProvider + ?Sized),
) -> Result<(), crate::pool::StoreError> {
self.reset_count += 1;
Ok(())
}
fn is_enabled(&self) -> bool {
self.enabled
}
fn enable(&mut self) {
self.enabled_count += 1;
self.enabled = true;
}
fn disable(&mut self) {
self.disabled_count += 1;
self.enabled = false;
}
fn insert_unwrapped_and_stored_tc(
&mut self,
_time_stamp: spacepackets::time::UnixTimestamp,
info: crate::pus::scheduler::TcInfo,
) -> Result<(), crate::pus::scheduler::ScheduleError> {
self.inserted_tcs.push_back(info);
Ok(())
}
}
fn generic_subservice_send(
test_harness: &mut Pus11HandlerWithStoreTester,
subservice: Subservice,
) {
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8);
let enable_scheduling = PusTcCreator::new(&mut reply_header, tc_header, &[0; 7], true);
let token = test_harness.send_tc(&enable_scheduling);
let request_id = token.req_id();
test_harness
.handler
.handle_one_tc(&mut test_harness.sched_tc_pool)
.unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);
test_harness.check_next_verification_tm(7, request_id);
}
#[test]
fn test_scheduling_enabling_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
test_harness.handler.scheduler_mut().disable();
assert!(!test_harness.handler.scheduler().is_enabled());
generic_subservice_send(&mut test_harness, Subservice::TcEnableScheduling);
assert!(test_harness.handler.scheduler().is_enabled());
assert_eq!(test_harness.handler.scheduler().enabled_count, 1);
}
#[test]
fn test_scheduling_disabling_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
test_harness.handler.scheduler_mut().enable();
assert!(test_harness.handler.scheduler().is_enabled());
generic_subservice_send(&mut test_harness, Subservice::TcDisableScheduling);
assert!(!test_harness.handler.scheduler().is_enabled());
assert_eq!(test_harness.handler.scheduler().disabled_count, 1);
}
#[test]
fn test_reset_scheduler_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
generic_subservice_send(&mut test_harness, Subservice::TcResetScheduling);
assert_eq!(test_harness.handler.scheduler().reset_count, 1);
}
#[test]
fn test_insert_activity_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap();
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new(&mut reply_header, sec_header, &[], true);
let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc);
let stamper = cds::TimeProvider::from_now_with_u16_days().expect("time provider failed");
let mut sched_app_data: [u8; 64] = [0; 64];
let mut written_len = stamper.write_to_bytes(&mut sched_app_data).unwrap();
let ping_raw = ping_tc.to_vec().expect("generating raw tc failed");
sched_app_data[written_len..written_len + ping_raw.len()].copy_from_slice(&ping_raw);
written_len += ping_raw.len();
reply_header = SpHeader::tm_unseg(TEST_APID, 1, 0).unwrap();
sec_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcInsertActivity as u8);
let enable_scheduling = PusTcCreator::new(
&mut reply_header,
sec_header,
&sched_app_data[..written_len],
true,
);
let token = test_harness.send_tc(&enable_scheduling);
let request_id = token.req_id();
test_harness
.handler
.handle_one_tc(&mut test_harness.sched_tc_pool)
.unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);
test_harness.check_next_verification_tm(7, request_id);
let tc_info = test_harness
.handler
.scheduler_mut()
.inserted_tcs
.pop_front()
.unwrap();
assert_eq!(tc_info.request_id(), req_id_ping_tc);
}
}

272
satrs/src/pus/test.rs Normal file
View File

@ -0,0 +1,272 @@
use crate::pus::{
PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmWrapper,
};
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::PusPacket;
use spacepackets::SpHeader;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
/// This handler only processes ping requests and generates a ping reply for them accordingly.
pub struct PusService17TestHandler<TcInMemConverter: EcssTcInMemConverter> {
pub service_helper: PusServiceHelper<TcInMemConverter>,
}
impl<TcInMemConverter: EcssTcInMemConverter> PusService17TestHandler<TcInMemConverter> {
pub fn new(service_helper: PusServiceHelper<TcInMemConverter>) -> Self {
Self { service_helper }
}
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
let tc = self
.service_helper
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
if tc.service() != 17 {
return Err(PusPacketHandlingError::WrongService(tc.service()));
}
if tc.subservice() == 1 {
let mut partial_error = None;
let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
let result = self
.service_helper
.common
.verification_handler
.get_mut()
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
.map_err(|_| PartialPusHandlingError::Verification);
let start_token = if let Ok(result) = result {
Some(result)
} else {
partial_error = Some(result.unwrap_err());
None
};
// Sequence count will be handled centrally in TM funnel.
let mut reply_header =
SpHeader::tm_unseg(self.service_helper.common.tm_apid, 0, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp);
let ping_reply = PusTmCreator::new(&mut reply_header, tc_header, &[], true);
let result = self
.service_helper
.common
.tm_sender
.send_tm(PusTmWrapper::Direct(ping_reply))
.map_err(PartialPusHandlingError::TmSend);
if let Err(err) = result {
partial_error = Some(err);
}
if let Some(start_token) = start_token {
if self
.service_helper
.common
.verification_handler
.get_mut()
.completion_success(start_token, Some(&time_stamp))
.is_err()
{
partial_error = Some(PartialPusHandlingError::Verification)
}
}
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
};
} else {
return Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(),
ecss_tc_and_token.token,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
}
}
#[cfg(test)]
mod tests {
use crate::pus::tests::{
PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon, PusTestHarness,
SimplePusPacketHandler, TEST_APID,
};
use crate::pus::verification::RequestId;
use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{
EcssTcInSharedStoreConverter, EcssTcInVecConverter, PusPacketHandlerResult,
PusPacketHandlingError,
};
use delegate::delegate;
use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader};
use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::PusPacket;
use spacepackets::{SequenceFlags, SpHeader};
use super::PusService17TestHandler;
struct Pus17HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusService17TestHandler<EcssTcInSharedStoreConverter>,
}
impl Pus17HandlerWithStoreTester {
pub fn new() -> Self {
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
let pus_17_handler = PusService17TestHandler::new(srv_handler);
Self {
common,
handler: pus_17_handler,
}
}
}
impl PusTestHarness for Pus17HandlerWithStoreTester {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(
&self,
subservice: u8,
expected_request_id: RequestId
);
}
}
}
impl SimplePusPacketHandler for Pus17HandlerWithStoreTester {
delegate! {
to self.handler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
}
}
}
struct Pus17HandlerWithVecTester {
common: PusServiceHandlerWithVecCommon,
handler: PusService17TestHandler<EcssTcInVecConverter>,
}
impl Pus17HandlerWithVecTester {
pub fn new() -> Self {
let (common, srv_handler) = PusServiceHandlerWithVecCommon::new();
Self {
common,
handler: PusService17TestHandler::new(srv_handler),
}
}
}
impl PusTestHarness for Pus17HandlerWithVecTester {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(
&self,
subservice: u8,
expected_request_id: RequestId,
);
}
}
}
impl SimplePusPacketHandler for Pus17HandlerWithVecTester {
delegate! {
to self.handler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
}
}
}
fn ping_test(test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler)) {
// Create a ping TC, verify acceptance.
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
let token = test_harness.send_tc(&ping_tc);
let request_id = token.req_id();
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
// We should see 4 replies in the TM queue now: Acceptance TM, Start TM, ping reply and
// Completion TM
// Acceptance TM
test_harness.check_next_verification_tm(1, request_id);
// Start TM
test_harness.check_next_verification_tm(3, request_id);
// Ping reply
let tm = test_harness.read_next_tm();
assert_eq!(tm.service(), 17);
assert_eq!(tm.subservice(), 2);
assert!(tm.user_data().is_empty());
// TM completion
test_harness.check_next_verification_tm(7, request_id);
}
#[test]
fn test_basic_ping_processing_using_store() {
let mut test_harness = Pus17HandlerWithStoreTester::new();
ping_test(&mut test_harness);
}
#[test]
fn test_basic_ping_processing_using_vec() {
let mut test_harness = Pus17HandlerWithVecTester::new();
ping_test(&mut test_harness);
}
#[test]
fn test_empty_tc_queue() {
let mut test_harness = Pus17HandlerWithStoreTester::new();
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::Empty = result {
} else {
panic!("unexpected result type {result:?}")
}
}
#[test]
fn test_sending_unsupported_service() {
let mut test_harness = Pus17HandlerWithStoreTester::new();
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(3, 1);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
test_harness.send_tc(&ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_err());
let error = result.unwrap_err();
if let PusPacketHandlingError::WrongService(num) = error {
assert_eq!(num, 3);
} else {
panic!("unexpected error type {error}")
}
}
#[test]
fn test_sending_custom_subservice() {
let mut test_harness = Pus17HandlerWithStoreTester::new();
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(17, 200);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
test_harness.send_tc(&ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result {
assert_eq!(subservice, 200);
} else {
panic!("unexpected result type {result:?}")
}
}
}

File diff suppressed because it is too large Load Diff