diff --git a/.gitignore b/.gitignore index fba2216..cf44893 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ target/ +output.log /Cargo.lock output.log diff --git a/README.md b/README.md index aaaee5c..ae2912d 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,11 @@ A lot of the architecture and general design considerations are based on the through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/) and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/). +This framework is in the early stages of development. Important features are missing. New releases +with breaking changes are released regularly, with all changes documented inside respective +changelog files. You should only use this framework if your are willing to work in this +environment. + # Overview This project currently contains following crates: diff --git a/coverage.py b/coverage.py index 3b1c3c4..c932d9a 100755 --- a/coverage.py +++ b/coverage.py @@ -47,7 +47,7 @@ def main(): parser.add_argument( "-p", "--package", - choices=["satrs", "satrs-minisim"], + choices=["satrs", "satrs-minisim", "satrs-example"], default="satrs", help="Choose project to generate coverage for", ) diff --git a/satrs-example/Cargo.toml b/satrs-example/Cargo.toml index d874a9c..1893db8 100644 --- a/satrs-example/Cargo.toml +++ b/satrs-example/Cargo.toml @@ -18,10 +18,12 @@ csv = "1" num_enum = "0.7" thiserror = "1" derive-new = "0.5" +serde = { version = "1", features = ["derive"] } +serde_json = "1" [dependencies.satrs] -# version = "0.2.0-rc.0" path = "../satrs" +features = ["test_util"] [dependencies.satrs-mib] version = "0.1.1" @@ -30,3 +32,6 @@ path = "../satrs-mib" [features] dyn_tmtc = [] default = ["dyn_tmtc"] + +[dev-dependencies] +env_logger = "0.11" diff --git a/satrs-example/src/acs.rs b/satrs-example/src/acs.rs deleted file mode 100644 index d7add5e..0000000 --- a/satrs-example/src/acs.rs +++ /dev/null @@ -1,118 +0,0 @@ -use std::sync::mpsc::{self, TryRecvError}; - -use log::{info, warn}; -use satrs::pus::verification::VerificationReportingProvider; -use satrs::pus::{EcssTmSender, PusTmWrapper}; -use satrs::request::TargetAndApidId; -use satrs::spacepackets::ecss::hk::Subservice as HkSubservice; -use satrs::{ - hk::HkRequest, - spacepackets::{ - ecss::tm::{PusTmCreator, PusTmSecondaryHeader}, - time::cds::{CdsTime, DaysLen16Bits}, - SequenceFlags, SpHeader, - }, -}; -use satrs_example::config::{RequestTargetId, PUS_APID}; - -use crate::{ - hk::{AcsHkIds, HkUniqueId}, - requests::{Request, RequestWithToken}, - update_time, -}; - -pub struct AcsTask { - timestamp: [u8; 7], - time_provider: CdsTime, - verif_reporter: VerificationReporter, - tm_sender: Box, - request_rx: mpsc::Receiver, -} - -impl AcsTask { - pub fn new( - tm_sender: impl EcssTmSender, - request_rx: mpsc::Receiver, - verif_reporter: VerificationReporter, - ) -> Self { - Self { - timestamp: [0; 7], - time_provider: CdsTime::new_with_u16_days(0, 0), - verif_reporter, - tm_sender: Box::new(tm_sender), - request_rx, - } - } - - fn handle_hk_request(&mut self, target_id: u32, unique_id: u32) { - assert_eq!(target_id, RequestTargetId::AcsSubsystem as u32); - if unique_id == AcsHkIds::TestMgmSet as u32 { - let mut sp_header = SpHeader::tm(PUS_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTmSecondaryHeader::new_simple( - 3, - HkSubservice::TmHkPacket as u8, - &self.timestamp, - ); - let mut buf: [u8; 8] = [0; 8]; - let hk_id = HkUniqueId::new(target_id, unique_id); - hk_id.write_to_be_bytes(&mut buf).unwrap(); - let pus_tm = PusTmCreator::new(&mut sp_header, sec_header, &buf, true); - self.tm_sender - .send_tm(PusTmWrapper::Direct(pus_tm)) - .expect("Sending HK TM failed"); - } - // TODO: Verification failure for invalid unique IDs. - } - - pub fn try_reading_one_request(&mut self) -> bool { - match self.request_rx.try_recv() { - Ok(request) => { - info!( - "ACS thread: Received HK request {:?}", - request.targeted_request - ); - let target_and_apid_id = TargetAndApidId::from(request.targeted_request.target_id); - match request.targeted_request.request { - Request::Hk(hk_req) => match hk_req { - HkRequest::OneShot(unique_id) => { - self.handle_hk_request(target_and_apid_id.target(), unique_id) - } - HkRequest::Enable(_) => {} - HkRequest::Disable(_) => {} - HkRequest::ModifyCollectionInterval(_, _) => {} - }, - Request::Mode(_mode_req) => { - warn!("mode request handling not implemented yet") - } - Request::Action(_action_req) => { - warn!("action request handling not implemented yet") - } - } - let started_token = self - .verif_reporter - .start_success(request.token, &self.timestamp) - .expect("Sending start success failed"); - self.verif_reporter - .completion_success(started_token, &self.timestamp) - .expect("Sending completion success failed"); - true - } - Err(e) => match e { - TryRecvError::Empty => false, - TryRecvError::Disconnected => { - warn!("ACS thread: Message Queue TX disconnected!"); - false - } - }, - } - } - - pub fn periodic_operation(&mut self) { - update_time(&mut self.time_provider, &mut self.timestamp); - loop { - if !self.try_reading_one_request() { - break; - } - } - } -} diff --git a/satrs-example/src/acs/mgm.rs b/satrs-example/src/acs/mgm.rs new file mode 100644 index 0000000..3505f95 --- /dev/null +++ b/satrs-example/src/acs/mgm.rs @@ -0,0 +1,188 @@ +// TODO: Remove this at a later stage. +#![allow(dead_code)] +use derive_new::new; +use satrs::hk::HkRequestVariant; +use satrs::spacepackets::ecss::hk; +use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader}; +use satrs::spacepackets::SpHeader; +use satrs_example::TimeStampHelper; +use std::sync::mpsc::{self}; +use std::sync::{Arc, Mutex}; + +use satrs::mode::{ + ModeAndSubmode, ModeError, ModeProvider, ModeReply, ModeRequest, ModeRequestHandler, +}; +use satrs::pus::{EcssTmSenderCore, PusTmVariant}; +use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId}; +use satrs_example::config::components::PUS_MODE_SERVICE; + +use crate::pus::hk::{HkReply, HkReplyVariant}; +use crate::requests::CompositeRequest; + +use serde::{Deserialize, Serialize}; + +pub trait SpiInterface { + type Error; + fn transfer(&mut self, data: &mut [u8]) -> Result<(), Self::Error>; +} + +#[derive(Default)] +pub struct SpiDummyInterface {} + +impl SpiInterface for SpiDummyInterface { + type Error = (); + + fn transfer(&mut self, _data: &mut [u8]) -> Result<(), Self::Error> { + Ok(()) + } +} + +#[derive(Default, Debug, Copy, Clone, Serialize, Deserialize)] +pub struct MgmData { + pub x: f32, + pub y: f32, + pub z: f32, +} + +pub struct MpscModeLeafInterface { + pub request_rx: mpsc::Receiver>, + pub reply_tx_to_pus: mpsc::Sender>, + pub reply_tx_to_parent: mpsc::Sender>, +} + +#[derive(new)] +#[allow(clippy::too_many_arguments)] +pub struct MgmHandler { + id: UniqueApidTargetId, + dev_str: &'static str, + mode_interface: MpscModeLeafInterface, + composite_request_receiver: mpsc::Receiver>, + hk_reply_sender: mpsc::Sender>, + tm_sender: TmSender, + com_interface: ComInterface, + shared_mgm_set: Arc>, + #[new(value = "ModeAndSubmode::new(satrs_example::DeviceMode::Off as u32, 0)")] + mode: ModeAndSubmode, + #[new(default)] + stamp_helper: TimeStampHelper, +} + +impl MgmHandler { + pub fn periodic_operation(&mut self) { + self.stamp_helper.update_from_now(); + // Handle messages. + match self.composite_request_receiver.try_recv() { + Ok(ref msg) => match &msg.message { + CompositeRequest::Hk(hk_req) => match hk_req.variant { + HkRequestVariant::OneShot => { + self.hk_reply_sender + .send(GenericMessage::new( + msg.requestor_info, + HkReply::new(hk_req.unique_id, HkReplyVariant::Ack), + )) + .expect("failed to send HK reply"); + let mut sp_header = SpHeader::tm_unseg(self.id.apid, 0, 0).unwrap(); + let sec_header = PusTmSecondaryHeader::new( + 3, + hk::Subservice::TmHkPacket as u8, + 0, + 0, + Some(self.stamp_helper.stamp()), + ); + // Let's serialize it as JSON for now.. This is a lot simpler than binary + // serialization. + let mgm_data_serialized = + serde_json::to_vec(&*self.shared_mgm_set.lock().unwrap()).unwrap(); + let hk_tm = PusTmCreator::new( + &mut sp_header, + sec_header, + &mgm_data_serialized, + true, + ); + self.tm_sender + .send_tm(self.id.id(), PusTmVariant::Direct(hk_tm)) + .expect("failed to send HK TM"); + } + HkRequestVariant::EnablePeriodic => todo!(), + HkRequestVariant::DisablePeriodic => todo!(), + HkRequestVariant::ModifyCollectionInterval(_) => todo!(), + }, + // TODO: This object does not have actions (yet).. Still send back completion failure + // reply. + CompositeRequest::Action(_action_req) => {} + }, + Err(_) => todo!(), + } + match self.mode_interface.request_rx.try_recv() { + Ok(msg) => { + let result = self.handle_mode_request(msg); + // TODO: Trigger event? + if result.is_err() { + log::warn!( + "{}: mode request failed with error {:?}", + self.dev_str, + result.err().unwrap() + ); + } + } + Err(_) => todo!(), + } + } +} + +impl ModeProvider + for MgmHandler +{ + fn mode_and_submode(&self) -> ModeAndSubmode { + self.mode + } +} + +impl ModeRequestHandler + for MgmHandler +{ + type Error = ModeError; + fn start_transition( + &mut self, + requestor: MessageMetadata, + mode_and_submode: ModeAndSubmode, + ) -> Result<(), satrs::mode::ModeError> { + self.mode = mode_and_submode; + self.handle_mode_reached(Some(requestor))?; + Ok(()) + } + + fn announce_mode(&self, _requestor_info: MessageMetadata, _recursive: bool) { + log::info!("{} announcing mode: {:?}", self.dev_str, self.mode); + } + + fn handle_mode_reached( + &mut self, + requestor: Option, + ) -> Result<(), Self::Error> { + if let Some(requestor) = requestor { + if requestor.sender_id() == PUS_MODE_SERVICE.raw() { + // self.mode_reply_sender_to_pus.send( + //GenericMessage::new(requestor.request_id, requestor.sender_id, ModeReply::ModeReply(self.mode)) + // )?; + } + } + Ok(()) + } + + fn send_mode_reply( + &self, + _requestor: MessageMetadata, + _reply: ModeReply, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn handle_mode_info( + &mut self, + _requestor_info: MessageMetadata, + _info: ModeAndSubmode, + ) -> Result<(), Self::Error> { + Ok(()) + } +} diff --git a/satrs-example/src/acs/mod.rs b/satrs-example/src/acs/mod.rs new file mode 100644 index 0000000..bd61e8b --- /dev/null +++ b/satrs-example/src/acs/mod.rs @@ -0,0 +1 @@ +pub mod mgm; diff --git a/satrs-example/src/ccsds.rs b/satrs-example/src/ccsds.rs index e61172e..7f15fb0 100644 --- a/satrs-example/src/ccsds.rs +++ b/satrs-example/src/ccsds.rs @@ -1,7 +1,7 @@ use satrs::pus::ReceivesEcssPusTc; use satrs::spacepackets::{CcsdsPacket, SpHeader}; use satrs::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc}; -use satrs_example::config::PUS_APID; +use satrs_example::config::components::Apid; #[derive(Clone)] pub struct CcsdsReceiver< @@ -19,7 +19,12 @@ impl< type Error = E; fn valid_apids(&self) -> &'static [u16] { - &[PUS_APID] + &[ + Apid::GenericPus as u16, + Apid::Acs as u16, + Apid::Sched as u16, + Apid::EventTm as u16, + ] } fn handle_known_apid( @@ -27,7 +32,8 @@ impl< sp_header: &SpHeader, tc_raw: &[u8], ) -> Result<(), Self::Error> { - if sp_header.apid() == PUS_APID { + if sp_header.apid() == Apid::Cfdp as u16 { + } else { return self.tc_source.pass_ccsds(sp_header, tc_raw); } Ok(()) diff --git a/satrs-example/src/config.rs b/satrs-example/src/config.rs index 9d04403..07d01ed 100644 --- a/satrs-example/src/config.rs +++ b/satrs-example/src/config.rs @@ -9,8 +9,6 @@ use satrs::{ pool::{StaticMemoryPool, StaticPoolConfig}, }; -pub const PUS_APID: u16 = 0x02; - #[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)] #[repr(u8)] pub enum CustomPusServiceId { @@ -29,6 +27,7 @@ pub const AOCS_APID: u16 = 1; pub enum GroupId { Tmtc = 0, Hk = 1, + Mode = 2, } pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED; @@ -53,6 +52,8 @@ pub mod tmtc_err { pub const UNKNOWN_TARGET_ID: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 4); #[resultcode] pub const ROUTING_ERROR: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 5); + #[resultcode(info = "Request timeout for targeted PUS request. P1: Request ID. P2: Target ID")] + pub const REQUEST_TIMEOUT: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 6); #[resultcode( info = "Not enough data inside the TC application data field. Optionally includes: \ @@ -92,27 +93,60 @@ pub mod hk_err { ]; } -#[allow(clippy::enum_variant_names)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum TmSenderId { - PusVerification = 0, - PusTest = 1, - PusEvent = 2, - PusHk = 3, - PusAction = 4, - PusSched = 5, - AllEvents = 6, - AcsSubsystem = 7, +pub mod mode_err { + use super::*; + + #[resultcode] + pub const WRONG_MODE: ResultU16 = ResultU16::new(GroupId::Mode as u8, 0); } -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum TcReceiverId { - PusTest = 1, - PusEvent = 2, - PusHk = 3, - PusAction = 4, - PusSched = 5, +pub mod components { + use satrs::request::UniqueApidTargetId; + + #[derive(Copy, Clone, PartialEq, Eq)] + pub enum Apid { + VerificationTm = 1, + Sched = 2, + EventTm = 3, + HkTm = 4, + GenericPus = 5, + Acs = 6, + Cfdp = 7, + } + + // Component IDs for components with the PUS APID. + #[derive(Copy, Clone, PartialEq, Eq)] + pub enum PusId { + PusRouting = 0, + PusTest = 1, + PusAction = 2, + PusMode = 3, + PusHk = 4, + } + + #[derive(Copy, Clone, PartialEq, Eq)] + pub enum AcsId { + Mgm0 = 0, + } + + pub const PUS_ACTION_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32); + pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::EventTm as u16, 0); + pub const PUS_ROUTING_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32); + pub const PUS_TEST_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusTest as u32); + pub const PUS_MODE_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusMode as u32); + pub const PUS_HK_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusHk as u32); + pub const PUS_SCHED_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::Sched as u16, 0); + pub const MGM_HANDLER_0: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32); } + pub mod pool { use super::*; pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) { diff --git a/satrs-example/src/events.rs b/satrs-example/src/events.rs index 4cb2cda..9c9c993 100644 --- a/satrs-example/src/events.rs +++ b/satrs-example/src/events.rs @@ -1,5 +1,8 @@ use std::sync::mpsc::{self}; +use crate::pus::create_verification_reporter; +use satrs::pus::verification::VerificationReporter; +use satrs::pus::EcssTmSenderCore; use satrs::{ event_man::{ EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded, @@ -12,55 +15,67 @@ use satrs::{ DefaultPusEventU32Dispatcher, EventReporter, EventRequest, EventRequestWithToken, }, verification::{TcStateStarted, VerificationReportingProvider, VerificationToken}, - EcssTmSender, }, - spacepackets::time::cds::{self, CdsTime}, + spacepackets::time::cds::CdsTime, + ComponentId, }; -use satrs_example::config::PUS_APID; +use satrs_example::config::components; +use satrs_example::config::components::PUS_EVENT_MANAGEMENT; use crate::update_time; -pub struct PusEventHandler { +/// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event +/// packets. It also handles the verification completion of PUS event service requests. +pub struct PusEventHandler { + id: ComponentId, event_request_rx: mpsc::Receiver, pus_event_dispatcher: DefaultPusEventU32Dispatcher<()>, pus_event_man_rx: mpsc::Receiver<(EventU32, Option)>, - tm_sender: Box, + tm_sender: TmSender, time_provider: CdsTime, timestamp: [u8; 7], verif_handler: VerificationReporter, } -/* -*/ -impl PusEventHandler { +impl PusEventHandler { pub fn new( + id: ComponentId, + tm_sender: TmSender, verif_handler: VerificationReporter, event_manager: &mut EventManagerWithBoundedMpsc, event_request_rx: mpsc::Receiver, - tm_sender: impl EcssTmSender, ) -> Self { let event_queue_cap = 30; let (pus_event_man_tx, pus_event_man_rx) = mpsc::sync_channel(event_queue_cap); // All events sent to the manager are routed to the PUS event manager, which generates PUS event // telemetry for each event. - let event_reporter = EventReporter::new(PUS_APID, 128).unwrap(); + let event_reporter = EventReporter::new( + PUS_EVENT_MANAGEMENT.raw(), + components::Apid::EventTm as u16, + 128, + ) + .unwrap(); let pus_event_dispatcher = DefaultPusEventU32Dispatcher::new_with_default_backend(event_reporter); - let pus_event_man_send_provider = - EventU32SenderMpscBounded::new(1, pus_event_man_tx, event_queue_cap); + let pus_event_man_send_provider = EventU32SenderMpscBounded::new( + PUS_EVENT_MANAGEMENT.raw(), + pus_event_man_tx, + event_queue_cap, + ); - event_manager.subscribe_all(pus_event_man_send_provider.channel_id()); + event_manager.subscribe_all(pus_event_man_send_provider.target_id()); event_manager.add_sender(pus_event_man_send_provider); Self { + id, event_request_rx, pus_event_dispatcher, pus_event_man_rx, - time_provider: cds::CdsTime::new_with_u16_days(0, 0), + time_provider: CdsTime::new_with_u16_days(0, 0), timestamp: [0; 7], verif_handler, - tm_sender: Box::new(tm_sender), + tm_sender, } } @@ -71,7 +86,7 @@ impl PusEventHandler PusEventHandler)>, @@ -128,6 +140,7 @@ impl EventManagerWrapper { } } + // Returns a cached event sender to send events to the event manager for routing. pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option)> { self.event_sender.clone() } @@ -144,23 +157,23 @@ impl EventManagerWrapper { } } -pub struct EventHandler { +pub struct EventHandler { pub event_man_wrapper: EventManagerWrapper, - pub pus_event_handler: PusEventHandler, + pub pus_event_handler: PusEventHandler, } -impl EventHandler { +impl EventHandler { pub fn new( - tm_sender: impl EcssTmSender, - verif_handler: VerificationReporter, + tm_sender: TmSender, event_request_rx: mpsc::Receiver, ) -> Self { let mut event_man_wrapper = EventManagerWrapper::new(); let pus_event_handler = PusEventHandler::new( - verif_handler, + PUS_EVENT_MANAGEMENT.raw(), + tm_sender, + create_verification_reporter(PUS_EVENT_MANAGEMENT.apid), event_man_wrapper.event_manager(), event_request_rx, - tm_sender, ); Self { event_man_wrapper, diff --git a/satrs-example/src/hk.rs b/satrs-example/src/hk.rs index 3147cbf..0852d04 100644 --- a/satrs-example/src/hk.rs +++ b/satrs-example/src/hk.rs @@ -1,27 +1,25 @@ use derive_new::new; +use satrs::hk::UniqueId; +use satrs::request::UniqueApidTargetId; use satrs::spacepackets::ByteConversionError; -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum AcsHkIds { - TestMgmSet = 1, -} - #[derive(Debug, new, Copy, Clone)] pub struct HkUniqueId { - target_id: u32, - set_id: u32, + target_id: UniqueApidTargetId, + set_id: UniqueId, } impl HkUniqueId { #[allow(dead_code)] - pub fn target_id(&self) -> u32 { + pub fn target_id(&self) -> UniqueApidTargetId { self.target_id } #[allow(dead_code)] - pub fn set_id(&self) -> u32 { + pub fn set_id(&self) -> UniqueId { self.set_id } + #[allow(dead_code)] pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { if buf.len() < 8 { return Err(ByteConversionError::ToSliceTooSmall { @@ -29,7 +27,7 @@ impl HkUniqueId { expected: 8, }); } - buf[0..4].copy_from_slice(&self.target_id.to_be_bytes()); + buf[0..4].copy_from_slice(&self.target_id.unique_id.to_be_bytes()); buf[4..8].copy_from_slice(&self.set_id.to_be_bytes()); Ok(8) diff --git a/satrs-example/src/lib.rs b/satrs-example/src/lib.rs index ef68c36..a224fe5 100644 --- a/satrs-example/src/lib.rs +++ b/satrs-example/src/lib.rs @@ -1 +1,39 @@ +use satrs::spacepackets::time::{cds::CdsTime, TimeWriter}; + pub mod config; + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum DeviceMode { + Off = 0, + On = 1, + Normal = 2, +} + +pub struct TimeStampHelper { + stamper: CdsTime, + time_stamp: [u8; 7], +} + +impl TimeStampHelper { + pub fn stamp(&self) -> &[u8] { + &self.time_stamp + } + + pub fn update_from_now(&mut self) { + self.stamper + .update_from_now() + .expect("Updating timestamp failed"); + self.stamper + .write_to_bytes(&mut self.time_stamp) + .expect("Writing timestamp failed"); + } +} + +impl Default for TimeStampHelper { + fn default() -> Self { + Self { + stamper: CdsTime::now_with_u16_days().expect("creating time stamper failed"), + time_stamp: Default::default(), + } + } +} diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index b4cbe74..dc6223c 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -17,52 +17,44 @@ use log::info; use pus::test::create_test_service_dynamic; use satrs::hal::std::tcp_server::ServerConfig; use satrs::hal::std::udp_server::UdpTcServer; -use satrs::request::TargetAndApidId; +use satrs::request::GenericMessage; use satrs::tmtc::tm_helper::SharedTmPool; use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools}; use satrs_example::config::tasks::{ FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC, }; -use satrs_example::config::{RequestTargetId, TmSenderId, OBSW_SERVER_ADDR, PUS_APID, SERVER_PORT}; +use satrs_example::config::{OBSW_SERVER_ADDR, SERVER_PORT}; use tmtc::PusTcSourceProviderDynamic; use udp::DynamicUdpTmHandler; -use crate::acs::AcsTask; +use crate::acs::mgm::{MgmHandler, MpscModeLeafInterface, SpiDummyInterface}; use crate::ccsds::CcsdsReceiver; use crate::logger::setup_logger; use crate::pus::action::{create_action_service_dynamic, create_action_service_static}; use crate::pus::event::{create_event_service_dynamic, create_event_service_static}; use crate::pus::hk::{create_hk_service_dynamic, create_hk_service_static}; +use crate::pus::mode::{create_mode_service_dynamic, create_mode_service_static}; use crate::pus::scheduler::{create_scheduler_service_dynamic, create_scheduler_service_static}; use crate::pus::test::create_test_service_static; use crate::pus::{PusReceiver, PusTcMpscRouter}; -use crate::requests::{GenericRequestRouter, RequestWithToken}; +use crate::requests::{CompositeRequest, GenericRequestRouter}; use crate::tcp::{SyncTcpTmSource, TcpTask}; use crate::tmtc::{ PusTcSourceProviderSharedPool, SharedTcPool, TcSourceTaskDynamic, TcSourceTaskStatic, }; use crate::udp::{StaticUdpTmHandler, UdpTmtcServer}; +use satrs::mode::ModeRequest; use satrs::pus::event_man::EventRequestWithToken; -use satrs::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender}; -use satrs::pus::{EcssTmSender, TmAsVecSenderWithId, TmInSharedPoolSenderWithId}; +use satrs::pus::TmInSharedPoolSender; use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter}; use satrs::tmtc::CcsdsDistributor; -use satrs::ChannelId; +use satrs_example::config::components::MGM_HANDLER_0; use std::net::{IpAddr, SocketAddr}; -use std::sync::mpsc::{self, channel}; +use std::sync::mpsc; use std::sync::{Arc, RwLock}; use std::thread; use std::time::Duration; -fn create_verification_reporter( - verif_sender: Sender, -) -> VerificationReporterWithSender { - let verif_cfg = VerificationReporterCfg::new(PUS_APID, 1, 2, 8).unwrap(); - // Every software component which needs to generate verification telemetry, gets a cloned - // verification reporter. - VerificationReporterWithSender::new(&verif_cfg, verif_sender) -} - #[allow(dead_code)] fn static_tmtc_pool_main() { let (tm_pool, tc_pool) = create_static_pools(); @@ -74,20 +66,21 @@ fn static_tmtc_pool_main() { let (tm_funnel_tx, tm_funnel_rx) = mpsc::sync_channel(50); let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50); - // Every software component which needs to generate verification telemetry, receives a cloned - // verification reporter. - let verif_reporter = create_verification_reporter(TmInSharedPoolSenderWithId::new( - TmSenderId::PusVerification as ChannelId, - "verif_sender", - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - )); + let tm_funnel_tx_sender = + TmInSharedPoolSender::new(shared_tm_pool.clone(), tm_funnel_tx.clone()); + + let (mgm_handler_composite_tx, mgm_handler_composite_rx) = + mpsc::channel::>(); + let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::>(); - let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32); - let (acs_thread_tx, acs_thread_rx) = channel::(); // Some request are targetable. This map is used to retrieve sender handles based on a target ID. let mut request_map = GenericRequestRouter::default(); - request_map.0.insert(acs_target_id.into(), acs_thread_tx); + request_map + .composite_router_map + .insert(MGM_HANDLER_0.id(), mgm_handler_composite_tx); + request_map + .mode_router_map + .insert(MGM_HANDLER_0.id(), mgm_handler_mode_tx); // This helper structure is used by all telecommand providers which need to send telecommands // to the TC source. @@ -103,82 +96,80 @@ fn static_tmtc_pool_main() { // The event task is the core handler to perform the event routing and TM handling as specified // in the sat-rs documentation. - let mut event_handler = EventHandler::new( - TmInSharedPoolSenderWithId::new( - TmSenderId::AllEvents as ChannelId, - "ALL_EVENTS_TX", - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - ), - verif_reporter.clone(), - event_request_rx, - ); + let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_request_rx); + + let (pus_test_tx, pus_test_rx) = mpsc::channel(); + let (pus_event_tx, pus_event_rx) = mpsc::channel(); + let (pus_sched_tx, pus_sched_rx) = mpsc::channel(); + let (pus_hk_tx, pus_hk_rx) = mpsc::channel(); + let (pus_action_tx, pus_action_rx) = mpsc::channel(); + let (pus_mode_tx, pus_mode_rx) = mpsc::channel(); + + let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel(); + let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel(); + let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel(); - let (pus_test_tx, pus_test_rx) = channel(); - let (pus_event_tx, pus_event_rx) = channel(); - let (pus_sched_tx, pus_sched_rx) = channel(); - let (pus_hk_tx, pus_hk_rx) = channel(); - let (pus_action_tx, pus_action_rx) = channel(); let pus_router = PusTcMpscRouter { - test_service_receiver: pus_test_tx, - event_service_receiver: pus_event_tx, - sched_service_receiver: pus_sched_tx, - hk_service_receiver: pus_hk_tx, - action_service_receiver: pus_action_tx, + test_tc_sender: pus_test_tx, + event_tc_sender: pus_event_tx, + sched_tc_sender: pus_sched_tx, + hk_tc_sender: pus_hk_tx, + action_tc_sender: pus_action_tx, + mode_tc_sender: pus_mode_tx, }; let pus_test_service = create_test_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), shared_tc_pool.pool.clone(), event_handler.clone_event_sender(), pus_test_rx, ); let pus_scheduler_service = create_scheduler_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), tc_source.clone(), pus_sched_rx, create_sched_tc_pool(), ); let pus_event_service = create_event_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), shared_tc_pool.pool.clone(), pus_event_rx, event_request_tx, ); let pus_action_service = create_action_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), shared_tc_pool.pool.clone(), pus_action_rx, request_map.clone(), + pus_action_reply_rx, ); let pus_hk_service = create_hk_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), shared_tc_pool.pool.clone(), pus_hk_rx, + request_map.clone(), + pus_hk_reply_rx, + ); + let pus_mode_service = create_mode_service_static( + tm_funnel_tx_sender.clone(), + shared_tc_pool.pool.clone(), + pus_mode_rx, request_map, + pus_mode_reply_rx, ); let mut pus_stack = PusStack::new( + pus_test_service, pus_hk_service, pus_event_service, pus_action_service, pus_scheduler_service, - pus_test_service, + pus_mode_service, ); let ccsds_receiver = CcsdsReceiver { tc_source }; let mut tmtc_task = TcSourceTaskStatic::new( shared_tc_pool.clone(), tc_source_rx, - PusReceiver::new(verif_reporter.clone(), pus_router), + PusReceiver::new(tm_funnel_tx_sender, pus_router), ); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); @@ -203,17 +194,6 @@ fn static_tmtc_pool_main() { ) .expect("tcp server creation failed"); - let mut acs_task = AcsTask::new( - TmInSharedPoolSenderWithId::new( - TmSenderId::AcsSubsystem as ChannelId, - "ACS_TASK_SENDER", - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - ), - acs_thread_rx, - verif_reporter, - ); - let mut tm_funnel = TmFunnelStatic::new( shared_tm_pool, sync_tm_tcp_source, @@ -221,6 +201,27 @@ fn static_tmtc_pool_main() { tm_server_tx, ); + let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) = + mpsc::channel(); + + let dummy_spi_interface = SpiDummyInterface::default(); + let shared_mgm_set = Arc::default(); + let mode_leaf_interface = MpscModeLeafInterface { + request_rx: mgm_handler_mode_rx, + reply_tx_to_pus: pus_mode_reply_tx, + reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx, + }; + let mut mgm_handler = MgmHandler::new( + MGM_HANDLER_0, + "MGM_0", + mode_leaf_interface, + mgm_handler_composite_rx, + pus_hk_reply_tx, + tm_funnel_tx, + dummy_spi_interface, + shared_mgm_set, + ); + info!("Starting TMTC and UDP task"); let jh_udp_tmtc = thread::Builder::new() .name("TMTC and UDP".to_string()) @@ -266,7 +267,7 @@ fn static_tmtc_pool_main() { let jh_aocs = thread::Builder::new() .name("AOCS".to_string()) .spawn(move || loop { - acs_task.periodic_operation(); + mgm_handler.periodic_operation(); thread::sleep(Duration::from_millis(FREQ_MS_AOCS)); }) .unwrap(); @@ -300,22 +301,23 @@ fn static_tmtc_pool_main() { #[allow(dead_code)] fn dyn_tmtc_pool_main() { - let (tc_source_tx, tc_source_rx) = channel(); - let (tm_funnel_tx, tm_funnel_rx) = channel(); - let (tm_server_tx, tm_server_rx) = channel(); - // Every software component which needs to generate verification telemetry, gets a cloned - // verification reporter. - let verif_reporter = create_verification_reporter(TmAsVecSenderWithId::new( - TmSenderId::PusVerification as ChannelId, - "verif_sender", - tm_funnel_tx.clone(), - )); + let (tc_source_tx, tc_source_rx) = mpsc::channel(); + let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel(); + let (tm_server_tx, tm_server_rx) = mpsc::channel(); + + // Some request are targetable. This map is used to retrieve sender handles based on a target ID. + let (mgm_handler_composite_tx, mgm_handler_composite_rx) = + mpsc::channel::>(); + let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::>(); - let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32); - let (acs_thread_tx, acs_thread_rx) = channel::(); // Some request are targetable. This map is used to retrieve sender handles based on a target ID. let mut request_map = GenericRequestRouter::default(); - request_map.0.insert(acs_target_id.into(), acs_thread_tx); + request_map + .composite_router_map + .insert(MGM_HANDLER_0.raw(), mgm_handler_composite_tx); + request_map + .mode_router_map + .insert(MGM_HANDLER_0.raw(), mgm_handler_mode_tx); let tc_source = PusTcSourceProviderDynamic(tc_source_tx); @@ -325,74 +327,74 @@ fn dyn_tmtc_pool_main() { let (event_request_tx, event_request_rx) = mpsc::channel::(); // The event task is the core handler to perform the event routing and TM handling as specified // in the sat-rs documentation. - let mut event_handler = EventHandler::new( - TmAsVecSenderWithId::new( - TmSenderId::AllEvents as ChannelId, - "ALL_EVENTS_TX", - tm_funnel_tx.clone(), - ), - verif_reporter.clone(), - event_request_rx, - ); + let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_request_rx); + + let (pus_test_tx, pus_test_rx) = mpsc::channel(); + let (pus_event_tx, pus_event_rx) = mpsc::channel(); + let (pus_sched_tx, pus_sched_rx) = mpsc::channel(); + let (pus_hk_tx, pus_hk_rx) = mpsc::channel(); + let (pus_action_tx, pus_action_rx) = mpsc::channel(); + let (pus_mode_tx, pus_mode_rx) = mpsc::channel(); + + let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel(); + let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel(); + let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel(); - let (pus_test_tx, pus_test_rx) = channel(); - let (pus_event_tx, pus_event_rx) = channel(); - let (pus_sched_tx, pus_sched_rx) = channel(); - let (pus_hk_tx, pus_hk_rx) = channel(); - let (pus_action_tx, pus_action_rx) = channel(); let pus_router = PusTcMpscRouter { - test_service_receiver: pus_test_tx, - event_service_receiver: pus_event_tx, - sched_service_receiver: pus_sched_tx, - hk_service_receiver: pus_hk_tx, - action_service_receiver: pus_action_tx, + test_tc_sender: pus_test_tx, + event_tc_sender: pus_event_tx, + sched_tc_sender: pus_sched_tx, + hk_tc_sender: pus_hk_tx, + action_tc_sender: pus_action_tx, + mode_tc_sender: pus_mode_tx, }; let pus_test_service = create_test_service_dynamic( tm_funnel_tx.clone(), - verif_reporter.clone(), event_handler.clone_event_sender(), pus_test_rx, ); let pus_scheduler_service = create_scheduler_service_dynamic( tm_funnel_tx.clone(), - verif_reporter.clone(), tc_source.0.clone(), pus_sched_rx, create_sched_tc_pool(), ); - let pus_event_service = create_event_service_dynamic( - tm_funnel_tx.clone(), - verif_reporter.clone(), - pus_event_rx, - event_request_tx, - ); + let pus_event_service = + create_event_service_dynamic(tm_funnel_tx.clone(), pus_event_rx, event_request_tx); let pus_action_service = create_action_service_dynamic( tm_funnel_tx.clone(), - verif_reporter.clone(), pus_action_rx, request_map.clone(), + pus_action_reply_rx, ); let pus_hk_service = create_hk_service_dynamic( tm_funnel_tx.clone(), - verif_reporter.clone(), pus_hk_rx, + request_map.clone(), + pus_hk_reply_rx, + ); + let pus_mode_service = create_mode_service_dynamic( + tm_funnel_tx.clone(), + pus_mode_rx, request_map, + pus_mode_reply_rx, ); let mut pus_stack = PusStack::new( + pus_test_service, pus_hk_service, pus_event_service, pus_action_service, pus_scheduler_service, - pus_test_service, + pus_mode_service, ); let ccsds_receiver = CcsdsReceiver { tc_source }; let mut tmtc_task = TcSourceTaskDynamic::new( tc_source_rx, - PusReceiver::new(verif_reporter.clone(), pus_router), + PusReceiver::new(tm_funnel_tx.clone(), pus_router), ); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); @@ -416,17 +418,28 @@ fn dyn_tmtc_pool_main() { ) .expect("tcp server creation failed"); - let mut acs_task = AcsTask::new( - TmAsVecSenderWithId::new( - TmSenderId::AcsSubsystem as ChannelId, - "ACS_TASK_SENDER", - tm_funnel_tx.clone(), - ), - acs_thread_rx, - verif_reporter, - ); let mut tm_funnel = TmFunnelDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx); + let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) = + mpsc::channel(); + let dummy_spi_interface = SpiDummyInterface::default(); + let shared_mgm_set = Arc::default(); + let mode_leaf_interface = MpscModeLeafInterface { + request_rx: mgm_handler_mode_rx, + reply_tx_to_pus: pus_mode_reply_tx, + reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx, + }; + let mut mgm_handler = MgmHandler::new( + MGM_HANDLER_0, + "MGM_0", + mode_leaf_interface, + mgm_handler_composite_rx, + pus_hk_reply_tx, + tm_funnel_tx, + dummy_spi_interface, + shared_mgm_set, + ); + info!("Starting TMTC and UDP task"); let jh_udp_tmtc = thread::Builder::new() .name("TMTC and UDP".to_string()) @@ -472,7 +485,7 @@ fn dyn_tmtc_pool_main() { let jh_aocs = thread::Builder::new() .name("AOCS".to_string()) .spawn(move || loop { - acs_task.periodic_operation(); + mgm_handler.periodic_operation(); thread::sleep(Duration::from_millis(FREQ_MS_AOCS)); }) .unwrap(); diff --git a/satrs-example/src/pus/action.rs b/satrs-example/src/pus/action.rs index ef23786..f2ab77b 100644 --- a/satrs-example/src/pus/action.rs +++ b/satrs-example/src/pus/action.rs @@ -1,181 +1,290 @@ use log::{error, warn}; -use satrs::action::ActionRequest; -use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; -use satrs::pus::action::{PusActionToRequestConverter, PusService8ActionHandler}; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, +use satrs::action::{ActionRequest, ActionRequestVariant}; +use satrs::params::WritableToBeBytes; +use satrs::pool::SharedStaticMemoryPool; +use satrs::pus::action::{ + ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap, PusActionReply, }; use satrs::pus::verification::{ - FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken, + FailParams, FailParamsWithStep, TcStateAccepted, TcStateStarted, VerificationReporter, + VerificationReportingProvider, VerificationToken, }; use satrs::pus::{ - EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, - EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, - PusPacketHandlingError, PusServiceHelper, TmAsVecSenderWithId, TmAsVecSenderWithMpsc, - TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId, + ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, + EcssTcInVecConverter, EcssTmSenderCore, EcssTmtcError, GenericConversionError, MpscTcReceiver, + MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler, + PusServiceHelper, PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; -use satrs::request::TargetAndApidId; +use satrs::request::{GenericMessage, UniqueApidTargetId}; use satrs::spacepackets::ecss::tc::PusTcReader; -use satrs::spacepackets::ecss::PusPacket; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::{ChannelId, TargetId}; -use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID}; -use std::sync::mpsc::{self}; +use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket}; +use satrs::ComponentId; +use satrs_example::config::components::PUS_ACTION_SERVICE; +use satrs_example::config::tmtc_err; +use std::sync::mpsc; +use std::time::Duration; use crate::requests::GenericRequestRouter; -use super::GenericRoutingErrorHandler; +use super::{ + create_verification_reporter, generic_pus_request_timeout_handler, PusTargetedRequestService, + TargetedPusService, +}; + +pub struct ActionReplyHandler { + fail_data_buf: [u8; 128], +} + +impl Default for ActionReplyHandler { + fn default() -> Self { + Self { + fail_data_buf: [0; 128], + } + } +} + +impl PusReplyHandler for ActionReplyHandler { + type Error = EcssTmtcError; + + fn handle_unrequested_reply( + &mut self, + _caller_id: ComponentId, + reply: &GenericMessage, + _tm_sender: &impl EcssTmSenderCore, + ) -> Result<(), Self::Error> { + warn!("received unexpected reply for service 8: {reply:?}"); + Ok(()) + } + + fn handle_reply( + &mut self, + caller_id: ComponentId, + reply: &GenericMessage, + active_request: &ActivePusActionRequestStd, + tm_sender: &(impl EcssTmSenderCore + ?Sized), + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result { + let verif_token: VerificationToken = active_request + .token() + .try_into() + .expect("invalid token state"); + let remove_entry = match &reply.message.variant { + ActionReplyVariant::CompletionFailed { error_code, params } => { + let mut fail_data_len = 0; + if let Some(params) = params { + fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?; + } + verification_handler.completion_failure( + caller_id, + tm_sender, + verif_token, + FailParams::new(time_stamp, error_code, &self.fail_data_buf[..fail_data_len]), + )?; + true + } + ActionReplyVariant::StepFailed { + error_code, + step, + params, + } => { + let mut fail_data_len = 0; + if let Some(params) = params { + fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?; + } + verification_handler.step_failure( + caller_id, + tm_sender, + verif_token, + FailParamsWithStep::new( + time_stamp, + &EcssEnumU16::new(*step), + error_code, + &self.fail_data_buf[..fail_data_len], + ), + )?; + true + } + ActionReplyVariant::Completed => { + verification_handler.completion_success( + caller_id, + tm_sender, + verif_token, + time_stamp, + )?; + true + } + ActionReplyVariant::StepSuccess { step } => { + verification_handler.step_success( + caller_id, + tm_sender, + &verif_token, + time_stamp, + EcssEnumU16::new(*step), + )?; + false + } + _ => false, + }; + Ok(remove_entry) + } + + fn handle_request_timeout( + &mut self, + caller_id: ComponentId, + active_request: &ActivePusActionRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(), Self::Error> { + generic_pus_request_timeout_handler( + caller_id, + tm_sender, + active_request, + verification_handler, + time_stamp, + "action", + ) + } +} #[derive(Default)] -pub struct ExampleActionRequestConverter {} +pub struct ActionRequestConverter {} -impl PusActionToRequestConverter for ExampleActionRequestConverter { - type Error = PusPacketHandlingError; +impl PusTcToRequestConverter for ActionRequestConverter { + type Error = GenericConversionError; fn convert( &mut self, + caller_id: ComponentId, token: VerificationToken, tc: &PusTcReader, - time_stamp: &[u8], + tm_sender: &(impl EcssTmSenderCore + ?Sized), verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, ActionRequest), Self::Error> { + time_stamp: &[u8], + ) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> { let subservice = tc.subservice(); let user_data = tc.user_data(); if user_data.len() < 8 { verif_reporter .start_failure( + caller_id, + tm_sender, token, FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA), ) .expect("Sending start failure failed"); - return Err(PusPacketHandlingError::NotEnoughAppData { + return Err(GenericConversionError::NotEnoughAppData { expected: 8, found: user_data.len(), }); } - let target_id = TargetAndApidId::from_pus_tc(tc).unwrap(); + let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap(); let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap()); if subservice == 128 { + let req_variant = if user_data.len() == 8 { + ActionRequestVariant::NoData + } else { + ActionRequestVariant::VecData(user_data[8..].to_vec()) + }; Ok(( - target_id.raw(), - ActionRequest::UnsignedIdAndVecData { + ActivePusActionRequestStd::new( action_id, - data: user_data[8..].to_vec(), - }, + target_id_and_apid.into(), + token.into(), + Duration::from_secs(30), + ), + ActionRequest::new(action_id, req_variant), )) } else { verif_reporter .start_failure( + caller_id, + tm_sender, token, FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE), ) .expect("Sending start failure failed"); - Err(PusPacketHandlingError::InvalidSubservice(subservice)) + Err(GenericConversionError::InvalidSubservice(subservice)) } } } pub fn create_action_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_pool: SharedStaticMemoryPool, pus_action_rx: mpsc::Receiver, action_router: GenericRequestRouter, -) -> Pus8Wrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let action_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusAction as ChannelId, - "PUS_8_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let action_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusAction as ChannelId, - "PUS_8_TC_RECV", - pus_action_rx, - ); - let pus_8_handler = PusService8ActionHandler::new( + reply_receiver: mpsc::Receiver>, +) -> ActionServiceWrapper { + let action_request_handler = PusTargetedRequestService::new( PusServiceHelper::new( - action_srv_receiver, - action_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_ACTION_SERVICE.raw(), + pus_action_rx, + tm_sender, + create_verification_reporter(PUS_ACTION_SERVICE.apid), EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048), ), - ExampleActionRequestConverter::default(), + ActionRequestConverter::default(), + // TODO: Implementation which does not use run-time allocation? Maybe something like + // a bounded wrapper which pre-allocates using [HashMap::with_capacity].. + DefaultActiveActionRequestMap::default(), + ActionReplyHandler::default(), action_router, - GenericRoutingErrorHandler::<8>::default(), + reply_receiver, ); - Pus8Wrapper { pus_8_handler } + ActionServiceWrapper { + service: action_request_handler, + } } pub fn create_action_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, + tm_funnel_tx: mpsc::Sender, pus_action_rx: mpsc::Receiver, action_router: GenericRequestRouter, -) -> Pus8Wrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let action_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusAction as ChannelId, - "PUS_8_TM_SENDER", - tm_funnel_tx.clone(), - ); - let action_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusAction as ChannelId, - "PUS_8_TC_RECV", - pus_action_rx, - ); - let pus_8_handler = PusService8ActionHandler::new( + reply_receiver: mpsc::Receiver>, +) -> ActionServiceWrapper { + let action_request_handler = PusTargetedRequestService::new( PusServiceHelper::new( - action_srv_receiver, - action_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_ACTION_SERVICE.raw(), + pus_action_rx, + tm_funnel_tx, + create_verification_reporter(PUS_ACTION_SERVICE.apid), EcssTcInVecConverter::default(), ), - ExampleActionRequestConverter::default(), + ActionRequestConverter::default(), + DefaultActiveActionRequestMap::default(), + ActionReplyHandler::default(), action_router, - GenericRoutingErrorHandler::<8>::default(), + reply_receiver, ); - Pus8Wrapper { pus_8_handler } + ActionServiceWrapper { + service: action_request_handler, + } } -pub struct Pus8Wrapper< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, -> { - pub(crate) pus_8_handler: PusService8ActionHandler< - TcReceiver, +pub struct ActionServiceWrapper +{ + pub(crate) service: PusTargetedRequestService< + MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter, - ExampleActionRequestConverter, - GenericRequestRouter, - GenericRoutingErrorHandler<8>, + ActionRequestConverter, + ActionReplyHandler, + DefaultActiveActionRequestMap, + ActivePusActionRequestStd, + ActionRequest, + PusActionReply, >, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Pus8Wrapper +impl TargetedPusService + for ActionServiceWrapper { - pub fn handle_next_packet(&mut self) -> bool { - match self.pus_8_handler.handle_one_tc() { + /// Returns [true] if the packet handling is finished. + fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self.service.poll_and_handle_next_tc(time_stamp) { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { @@ -197,4 +306,436 @@ impl< } false } + + fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> bool { + self.service + .poll_and_check_next_reply(time_stamp) + .unwrap_or_else(|e| { + warn!("PUS 8: Handling reply failed with error {e:?}"); + false + }) + } + + fn check_for_request_timeouts(&mut self) { + self.service.check_for_request_timeouts(); + } +} + +#[cfg(test)] +mod tests { + use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID, TEST_UNIQUE_ID}; + use satrs::pus::verification; + use satrs::pus::verification::test_util::TestVerificationReporter; + use satrs::request::MessageMetadata; + use satrs::{ + res_code::ResultU16, + spacepackets::{ + ecss::{ + tc::{PusTcCreator, PusTcSecondaryHeader}, + tm::PusTmReader, + WritablePusPacket, + }, + SpHeader, + }, + }; + + use crate::{ + pus::tests::{PusConverterTestbench, ReplyHandlerTestbench, TargetedPusRequestTestbench}, + requests::CompositeRequest, + }; + + use super::*; + + impl + TargetedPusRequestTestbench< + ActionRequestConverter, + ActionReplyHandler, + DefaultActiveActionRequestMap, + ActivePusActionRequestStd, + ActionRequest, + PusActionReply, + > + { + pub fn new_for_action() -> Self { + let _ = env_logger::builder().is_test(true).try_init(); + let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel(); + let (pus_action_tx, pus_action_rx) = mpsc::channel(); + let (action_reply_tx, action_reply_rx) = mpsc::channel(); + let (action_req_tx, action_req_rx) = mpsc::channel(); + let verif_reporter = TestVerificationReporter::default(); + let mut generic_req_router = GenericRequestRouter::default(); + generic_req_router + .composite_router_map + .insert(TEST_COMPONENT_ID.id(), action_req_tx); + Self { + service: PusTargetedRequestService::new( + PusServiceHelper::new( + 0, + pus_action_rx, + tm_funnel_tx.clone(), + verif_reporter, + EcssTcInVecConverter::default(), + ), + ActionRequestConverter::default(), + DefaultActiveActionRequestMap::default(), + ActionReplyHandler::default(), + generic_req_router, + action_reply_rx, + ), + request_id: None, + pus_packet_tx: pus_action_tx, + tm_funnel_rx, + reply_tx: action_reply_tx, + request_rx: action_req_rx, + } + } + + pub fn verify_packet_started(&self) { + self.service + .service_helper + .common + .verif_reporter + .check_next_is_started_success( + self.service.service_helper.id(), + self.request_id.expect("request ID not set").into(), + ); + } + + pub fn verify_packet_completed(&self) { + self.service + .service_helper + .common + .verif_reporter + .check_next_is_completion_success( + self.service.service_helper.id(), + self.request_id.expect("request ID not set").into(), + ); + } + + pub fn verify_tm_empty(&self) { + let packet = self.tm_funnel_rx.try_recv(); + if let Err(mpsc::TryRecvError::Empty) = packet { + } else { + let tm = packet.unwrap(); + let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0; + panic!("unexpected TM packet {unexpected_tm:?}"); + } + } + + pub fn verify_next_tc_is_handled_properly(&mut self, time_stamp: &[u8]) { + let result = self.service.poll_and_handle_next_tc(time_stamp); + if let Err(e) = result { + panic!("unexpected error {:?}", e); + } + let result = result.unwrap(); + match result { + PusPacketHandlerResult::RequestHandled => (), + _ => panic!("unexpected result {result:?}"), + } + } + + pub fn verify_all_tcs_handled(&mut self, time_stamp: &[u8]) { + let result = self.service.poll_and_handle_next_tc(time_stamp); + if let Err(e) = result { + panic!("unexpected error {:?}", e); + } + let result = result.unwrap(); + match result { + PusPacketHandlerResult::Empty => (), + _ => panic!("unexpected result {result:?}"), + } + } + + pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) { + let result = self.service.poll_and_check_next_reply(time_stamp); + assert!(result.is_ok()); + assert!(!result.unwrap()); + } + + pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) { + let result = self.service.poll_and_check_next_reply(time_stamp); + assert!(result.is_ok()); + assert!(result.unwrap()); + } + + pub fn add_tc(&mut self, tc: &PusTcCreator) { + self.request_id = Some(verification::RequestId::new(tc).into()); + let token = self.service.service_helper.verif_reporter_mut().add_tc(tc); + let accepted_token = self + .service + .service_helper + .verif_reporter() + .acceptance_success( + self.service.service_helper.id(), + self.service.service_helper.tm_sender(), + token, + &[0; 7], + ) + .expect("TC acceptance failed"); + self.service + .service_helper + .verif_reporter() + .check_next_was_added(accepted_token.request_id()); + let id = self.service.service_helper.id(); + self.service + .service_helper + .verif_reporter() + .check_next_is_acceptance_success(id, accepted_token.request_id()); + self.pus_packet_tx + .send(EcssTcAndToken::new(tc.to_vec().unwrap(), accepted_token)) + .unwrap(); + } + } + + #[test] + fn basic_request() { + let mut testbench = TargetedPusRequestTestbench::new_for_action(); + // Create a basic action request and verify forwarding. + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let sec_header = PusTcSecondaryHeader::new_simple(8, 128); + let action_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); + app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); + let pus8_packet = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); + testbench.add_tc(&pus8_packet); + let time_stamp: [u8; 7] = [0; 7]; + testbench.verify_next_tc_is_handled_properly(&time_stamp); + testbench.verify_all_tcs_handled(&time_stamp); + + testbench.verify_packet_started(); + + let possible_req = testbench.request_rx.try_recv(); + assert!(possible_req.is_ok()); + let req = possible_req.unwrap(); + if let CompositeRequest::Action(action_req) = req.message { + assert_eq!(action_req.action_id, action_id); + assert_eq!(action_req.variant, ActionRequestVariant::NoData); + let action_reply = PusActionReply::new(action_id, ActionReplyVariant::Completed); + testbench + .reply_tx + .send(GenericMessage::new(req.requestor_info, action_reply)) + .unwrap(); + } else { + panic!("unexpected request type"); + } + testbench.verify_next_reply_is_handled_properly(&time_stamp); + testbench.verify_all_replies_handled(&time_stamp); + + testbench.verify_packet_completed(); + testbench.verify_tm_empty(); + } + + #[test] + fn basic_request_routing_error() { + let mut testbench = TargetedPusRequestTestbench::new_for_action(); + // Create a basic action request and verify forwarding. + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let sec_header = PusTcSecondaryHeader::new_simple(8, 128); + let action_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + // Invalid ID, routing should fail. + app_data[0..4].copy_from_slice(&(TEST_UNIQUE_ID + 1).to_be_bytes()); + app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); + let pus8_packet = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); + testbench.add_tc(&pus8_packet); + let time_stamp: [u8; 7] = [0; 7]; + + let result = testbench.service.poll_and_handle_next_tc(&time_stamp); + assert!(result.is_err()); + // Verify the correct result and completion failure. + } + + #[test] + fn converter_action_req_no_data() { + let mut testbench = PusConverterTestbench::new(ActionRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let sec_header = PusTcSecondaryHeader::new_simple(8, 128); + let action_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + // Invalid ID, routing should fail. + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); + app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); + let pus8_packet = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&pus8_packet); + let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID); + assert!(result.is_ok()); + let (active_req, request) = result.unwrap(); + if let ActionRequestVariant::NoData = request.variant { + assert_eq!(request.action_id, action_id); + assert_eq!(active_req.action_id, action_id); + assert_eq!( + active_req.target_id(), + UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID).raw() + ); + assert_eq!( + active_req.token().request_id(), + testbench.request_id().unwrap() + ); + } else { + panic!("unexpected action request variant"); + } + } + + #[test] + fn converter_action_req_with_data() { + let mut testbench = PusConverterTestbench::new(ActionRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let sec_header = PusTcSecondaryHeader::new_simple(8, 128); + let action_id = 5_u32; + let mut app_data: [u8; 16] = [0; 16]; + // Invalid ID, routing should fail. + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); + app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); + for i in 0..8 { + app_data[i + 8] = i as u8; + } + let pus8_packet = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&pus8_packet); + let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID); + assert!(result.is_ok()); + let (active_req, request) = result.unwrap(); + if let ActionRequestVariant::VecData(vec) = request.variant { + assert_eq!(request.action_id, action_id); + assert_eq!(active_req.action_id, action_id); + assert_eq!(vec, app_data[8..].to_vec()); + } else { + panic!("unexpected action request variant"); + } + } + + #[test] + fn reply_handling_completion_success() { + let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); + let active_action_req = + ActivePusActionRequestStd::new_from_common_req(action_id, active_req); + let reply = PusActionReply::new(action_id, ActionReplyVariant::Completed); + let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); + let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); + assert!(result.is_ok()); + assert!(result.unwrap()); + testbench.verif_reporter.assert_full_completion_success( + TEST_COMPONENT_ID.id(), + req_id, + None, + ); + } + + #[test] + fn reply_handling_completion_failure() { + let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); + let active_action_req = + ActivePusActionRequestStd::new_from_common_req(action_id, active_req); + let error_code = ResultU16::new(2, 3); + let reply = PusActionReply::new( + action_id, + ActionReplyVariant::CompletionFailed { + error_code, + params: None, + }, + ); + let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); + let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); + assert!(result.is_ok()); + assert!(result.unwrap()); + testbench.verif_reporter.assert_completion_failure( + TEST_COMPONENT_ID.into(), + req_id, + None, + error_code.raw() as u64, + ); + } + + #[test] + fn reply_handling_step_success() { + let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); + let active_action_req = + ActivePusActionRequestStd::new_from_common_req(action_id, active_req); + let reply = PusActionReply::new(action_id, ActionReplyVariant::StepSuccess { step: 1 }); + let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); + let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); + assert!(result.is_ok()); + // Entry should not be removed, completion not done yet. + assert!(!result.unwrap()); + testbench.verif_reporter.check_next_was_added(req_id); + testbench + .verif_reporter + .check_next_is_acceptance_success(TEST_COMPONENT_ID.raw(), req_id); + testbench + .verif_reporter + .check_next_is_started_success(TEST_COMPONENT_ID.raw(), req_id); + testbench + .verif_reporter + .check_next_is_step_success(TEST_COMPONENT_ID.raw(), req_id, 1); + } + + #[test] + fn reply_handling_step_failure() { + let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); + let active_action_req = + ActivePusActionRequestStd::new_from_common_req(action_id, active_req); + let error_code = ResultU16::new(2, 3); + let reply = PusActionReply::new( + action_id, + ActionReplyVariant::StepFailed { + error_code, + step: 1, + params: None, + }, + ); + let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); + let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); + assert!(result.is_ok()); + assert!(result.unwrap()); + testbench.verif_reporter.check_next_was_added(req_id); + testbench + .verif_reporter + .check_next_is_acceptance_success(TEST_COMPONENT_ID.id(), req_id); + testbench + .verif_reporter + .check_next_is_started_success(TEST_COMPONENT_ID.id(), req_id); + testbench.verif_reporter.check_next_is_step_failure( + TEST_COMPONENT_ID.id(), + req_id, + error_code.raw().into(), + ); + } + + #[test] + fn reply_handling_unrequested_reply() { + let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); + let action_reply = PusActionReply::new(5_u32, ActionReplyVariant::Completed); + let unrequested_reply = + GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply); + // Right now this function does not do a lot. We simply check that it does not panic or do + // weird stuff. + let result = testbench.handle_unrequested_reply(&unrequested_reply); + assert!(result.is_ok()); + } + + #[test] + fn reply_handling_reply_timeout() { + let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); + let result = testbench.handle_request_timeout( + &ActivePusActionRequestStd::new_from_common_req(action_id, active_request), + &[], + ); + assert!(result.is_ok()); + testbench.verif_reporter.assert_completion_failure( + TEST_COMPONENT_ID.raw(), + req_id, + None, + tmtc_err::REQUEST_TIMEOUT.raw() as u64, + ); + } } diff --git a/satrs-example/src/pus/event.rs b/satrs-example/src/pus/event.rs index 1d16f5c..53fb65b 100644 --- a/satrs-example/src/pus/event.rs +++ b/satrs-example/src/pus/event.rs @@ -1,113 +1,69 @@ use std::sync::mpsc; +use crate::pus::create_verification_reporter; use log::{error, warn}; -use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; +use satrs::pool::SharedStaticMemoryPool; use satrs::pus::event_man::EventRequestWithToken; -use satrs::pus::event_srv::PusService5EventHandler; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, -}; -use satrs::pus::verification::VerificationReportingProvider; +use satrs::pus::event_srv::PusEventServiceHandler; +use satrs::pus::verification::VerificationReporter; use satrs::pus::{ EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, - EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper, - TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithId, + EcssTmSenderCore, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, + PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::ChannelId; -use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID}; +use satrs_example::config::components::PUS_EVENT_MANAGEMENT; pub fn create_event_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_pool: SharedStaticMemoryPool, pus_event_rx: mpsc::Receiver, event_request_tx: mpsc::Sender, -) -> Pus5Wrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let event_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusEvent as ChannelId, - "PUS_5_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let event_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusEvent as ChannelId, - "PUS_5_TC_RECV", - pus_event_rx, - ); - let pus_5_handler = PusService5EventHandler::new( +) -> EventServiceWrapper { + let pus_5_handler = PusEventServiceHandler::new( PusServiceHelper::new( - event_srv_receiver, - event_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_EVENT_MANAGEMENT.raw(), + pus_event_rx, + tm_sender, + create_verification_reporter(PUS_EVENT_MANAGEMENT.apid), EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048), ), event_request_tx, ); - Pus5Wrapper { pus_5_handler } + EventServiceWrapper { + handler: pus_5_handler, + } } pub fn create_event_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, + tm_funnel_tx: mpsc::Sender, pus_event_rx: mpsc::Receiver, event_request_tx: mpsc::Sender, -) -> Pus5Wrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let event_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusEvent as ChannelId, - "PUS_5_TM_SENDER", - tm_funnel_tx, - ); - let event_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusEvent as ChannelId, - "PUS_5_TC_RECV", - pus_event_rx, - ); - let pus_5_handler = PusService5EventHandler::new( +) -> EventServiceWrapper { + let pus_5_handler = PusEventServiceHandler::new( PusServiceHelper::new( - event_srv_receiver, - event_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_EVENT_MANAGEMENT.raw(), + pus_event_rx, + tm_funnel_tx, + create_verification_reporter(PUS_EVENT_MANAGEMENT.apid), EcssTcInVecConverter::default(), ), event_request_tx, ); - Pus5Wrapper { pus_5_handler } + EventServiceWrapper { + handler: pus_5_handler, + } } -pub struct Pus5Wrapper< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, -> { - pub pus_5_handler: - PusService5EventHandler, +pub struct EventServiceWrapper { + pub handler: + PusEventServiceHandler, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Pus5Wrapper +impl + EventServiceWrapper { - pub fn handle_next_packet(&mut self) -> bool { - match self.pus_5_handler.handle_one_tc() { + pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self.handler.poll_and_handle_next_tc(time_stamp) { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { diff --git a/satrs-example/src/pus/hk.rs b/satrs-example/src/pus/hk.rs index 48a54be..76dc659 100644 --- a/satrs-example/src/pus/hk.rs +++ b/satrs-example/src/pus/hk.rs @@ -1,50 +1,134 @@ +use derive_new::new; use log::{error, warn}; -use satrs::hk::{CollectionIntervalFactor, HkRequest}; -use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; -use satrs::pus::hk::{PusHkToRequestConverter, PusService3HkHandler}; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, -}; +use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId}; +use satrs::pool::SharedStaticMemoryPool; use satrs::pus::verification::{ - FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken, + FailParams, TcStateAccepted, TcStateStarted, VerificationReporter, + VerificationReportingProvider, VerificationToken, }; use satrs::pus::{ - EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, - EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, - PusPacketHandlingError, PusServiceHelper, TmAsVecSenderWithId, TmAsVecSenderWithMpsc, - TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId, + ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken, + EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSenderCore, + EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender, + MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler, PusServiceHelper, + PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; -use satrs::request::TargetAndApidId; +use satrs::request::{GenericMessage, UniqueApidTargetId}; use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::{hk, PusPacket}; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::{ChannelId, TargetId}; -use satrs_example::config::{hk_err, tmtc_err, TcReceiverId, TmSenderId, PUS_APID}; -use std::sync::mpsc::{self}; +use satrs::ComponentId; +use satrs_example::config::components::PUS_HK_SERVICE; +use satrs_example::config::{hk_err, tmtc_err}; +use std::sync::mpsc; +use std::time::Duration; +use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler}; use crate::requests::GenericRequestRouter; -use super::GenericRoutingErrorHandler; +use super::PusTargetedRequestService; + +#[derive(Clone, PartialEq, Debug, new)] +pub struct HkReply { + pub unique_id: UniqueId, + pub variant: HkReplyVariant, +} + +#[derive(Clone, PartialEq, Debug)] +pub enum HkReplyVariant { + Ack, +} #[derive(Default)] -pub struct ExampleHkRequestConverter {} +pub struct HkReplyHandler {} -impl PusHkToRequestConverter for ExampleHkRequestConverter { - type Error = PusPacketHandlingError; +impl PusReplyHandler for HkReplyHandler { + type Error = EcssTmtcError; + + fn handle_unrequested_reply( + &mut self, + _caller_id: ComponentId, + reply: &GenericMessage, + _tm_sender: &impl EcssTmSenderCore, + ) -> Result<(), Self::Error> { + log::warn!("received unexpected reply for service 3: {reply:?}"); + Ok(()) + } + + fn handle_reply( + &mut self, + caller_id: ComponentId, + reply: &GenericMessage, + active_request: &ActivePusRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result { + let started_token: VerificationToken = active_request + .token() + .try_into() + .expect("invalid token state"); + match reply.message.variant { + HkReplyVariant::Ack => { + verification_handler + .completion_success(caller_id, tm_sender, started_token, time_stamp) + .expect("sending completion success verification failed"); + } + }; + Ok(true) + } + + fn handle_request_timeout( + &mut self, + caller_id: ComponentId, + active_request: &ActivePusRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(), Self::Error> { + generic_pus_request_timeout_handler( + caller_id, + tm_sender, + active_request, + verification_handler, + time_stamp, + "HK", + )?; + Ok(()) + } +} + +pub struct HkRequestConverter { + timeout: Duration, +} + +impl Default for HkRequestConverter { + fn default() -> Self { + Self { + timeout: Duration::from_secs(60), + } + } +} + +impl PusTcToRequestConverter for HkRequestConverter { + type Error = GenericConversionError; fn convert( &mut self, + caller_id: ComponentId, token: VerificationToken, tc: &PusTcReader, - time_stamp: &[u8], + tm_sender: &(impl EcssTmSenderCore + ?Sized), verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, HkRequest), Self::Error> { + time_stamp: &[u8], + ) -> Result<(ActivePusRequestStd, HkRequest), Self::Error> { let user_data = tc.user_data(); if user_data.is_empty() { let user_data_len = user_data.len() as u32; let user_data_len_raw = user_data_len.to_be_bytes(); verif_reporter .start_failure( + caller_id, + tm_sender, token, FailParams::new( time_stamp, @@ -53,7 +137,7 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter { ), ) .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::NotEnoughAppData { + return Err(GenericConversionError::NotEnoughAppData { expected: 4, found: 0, }); @@ -67,178 +151,168 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter { let user_data_len = user_data.len() as u32; let user_data_len_raw = user_data_len.to_be_bytes(); verif_reporter - .start_failure(token, FailParams::new(time_stamp, err, &user_data_len_raw)) + .start_failure( + caller_id, + tm_sender, + token, + FailParams::new(time_stamp, err, &user_data_len_raw), + ) .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::NotEnoughAppData { + return Err(GenericConversionError::NotEnoughAppData { expected: 8, found: 4, }); } let subservice = tc.subservice(); - let target_id = TargetAndApidId::from_pus_tc(tc).expect("invalid tc format"); + let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).expect("invalid tc format"); let unique_id = u32::from_be_bytes(tc.user_data()[4..8].try_into().unwrap()); let standard_subservice = hk::Subservice::try_from(subservice); if standard_subservice.is_err() { verif_reporter .start_failure( + caller_id, + tm_sender, token, FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]), ) .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::InvalidSubservice(subservice)); + return Err(GenericConversionError::InvalidSubservice(subservice)); } - Ok(( - target_id.into(), - match standard_subservice.unwrap() { - hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => { - HkRequest::Enable(unique_id) - } - hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => { - HkRequest::Disable(unique_id) - } - hk::Subservice::TcReportHkReportStructures => todo!(), - hk::Subservice::TmHkPacket => todo!(), - hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => { - HkRequest::OneShot(unique_id) - } - hk::Subservice::TcModifyDiagCollectionInterval - | hk::Subservice::TcModifyHkCollectionInterval => { - if user_data.len() < 12 { - verif_reporter - .start_failure( - token, - FailParams::new_no_fail_data( - time_stamp, - &tmtc_err::NOT_ENOUGH_APP_DATA, - ), - ) - .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::NotEnoughAppData { - expected: 12, - found: user_data.len(), - }); - } - HkRequest::ModifyCollectionInterval( - unique_id, - CollectionIntervalFactor::from_be_bytes( - user_data[8..12].try_into().unwrap(), - ), - ) - } - _ => { + let request = match standard_subservice.unwrap() { + hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => { + HkRequest::new(unique_id, HkRequestVariant::EnablePeriodic) + } + hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => { + HkRequest::new(unique_id, HkRequestVariant::DisablePeriodic) + } + hk::Subservice::TcReportHkReportStructures => todo!(), + hk::Subservice::TmHkPacket => todo!(), + hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => { + HkRequest::new(unique_id, HkRequestVariant::OneShot) + } + hk::Subservice::TcModifyDiagCollectionInterval + | hk::Subservice::TcModifyHkCollectionInterval => { + if user_data.len() < 12 { verif_reporter .start_failure( + caller_id, + tm_sender, token, - FailParams::new( + FailParams::new_no_fail_data( time_stamp, - &tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED, - &[subservice], + &tmtc_err::NOT_ENOUGH_APP_DATA, ), ) .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::InvalidSubservice(subservice)); + return Err(GenericConversionError::NotEnoughAppData { + expected: 12, + found: user_data.len(), + }); } - }, + HkRequest::new( + unique_id, + HkRequestVariant::ModifyCollectionInterval( + CollectionIntervalFactor::from_be_bytes( + user_data[8..12].try_into().unwrap(), + ), + ), + ) + } + _ => { + verif_reporter + .start_failure( + caller_id, + tm_sender, + token, + FailParams::new( + time_stamp, + &tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED, + &[subservice], + ), + ) + .expect("Sending start failure TM failed"); + return Err(GenericConversionError::InvalidSubservice(subservice)); + } + }; + Ok(( + ActivePusRequestStd::new(target_id_and_apid.into(), token, self.timeout), + request, )) } } pub fn create_hk_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_pool: SharedStaticMemoryPool, pus_hk_rx: mpsc::Receiver, request_router: GenericRequestRouter, -) -> Pus3Wrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let hk_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusHk as ChannelId, - "PUS_3_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let hk_srv_receiver = - MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx); - let pus_3_handler = PusService3HkHandler::new( + reply_receiver: mpsc::Receiver>, +) -> HkServiceWrapper { + let pus_3_handler = PusTargetedRequestService::new( PusServiceHelper::new( - hk_srv_receiver, - hk_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_HK_SERVICE.raw(), + pus_hk_rx, + tm_sender, + create_verification_reporter(PUS_HK_SERVICE.apid), EcssTcInSharedStoreConverter::new(tc_pool, 2048), ), - ExampleHkRequestConverter::default(), + HkRequestConverter::default(), + DefaultActiveRequestMap::default(), + HkReplyHandler::default(), request_router, - GenericRoutingErrorHandler::default(), + reply_receiver, ); - Pus3Wrapper { pus_3_handler } + HkServiceWrapper { + service: pus_3_handler, + } } pub fn create_hk_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, + tm_funnel_tx: mpsc::Sender, pus_hk_rx: mpsc::Receiver, request_router: GenericRequestRouter, -) -> Pus3Wrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let hk_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusHk as ChannelId, - "PUS_3_TM_SENDER", - tm_funnel_tx.clone(), - ); - let hk_srv_receiver = - MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx); - let pus_3_handler = PusService3HkHandler::new( + reply_receiver: mpsc::Receiver>, +) -> HkServiceWrapper { + let pus_3_handler = PusTargetedRequestService::new( PusServiceHelper::new( - hk_srv_receiver, - hk_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_HK_SERVICE.raw(), + pus_hk_rx, + tm_funnel_tx, + create_verification_reporter(PUS_HK_SERVICE.apid), EcssTcInVecConverter::default(), ), - ExampleHkRequestConverter::default(), + HkRequestConverter::default(), + DefaultActiveRequestMap::default(), + HkReplyHandler::default(), request_router, - GenericRoutingErrorHandler::default(), + reply_receiver, ); - Pus3Wrapper { pus_3_handler } + HkServiceWrapper { + service: pus_3_handler, + } } -pub struct Pus3Wrapper< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, -> { - pub(crate) pus_3_handler: PusService3HkHandler< - TcReceiver, +pub struct HkServiceWrapper { + pub(crate) service: PusTargetedRequestService< + MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter, - ExampleHkRequestConverter, - GenericRequestRouter, - GenericRoutingErrorHandler<3>, + HkRequestConverter, + HkReplyHandler, + DefaultActiveRequestMap, + ActivePusRequestStd, + HkRequest, + HkReply, >, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Pus3Wrapper +impl + HkServiceWrapper { - pub fn handle_next_packet(&mut self) -> bool { - match self.pus_3_handler.handle_one_tc() { + pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self.service.poll_and_handle_next_tc(time_stamp) { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { @@ -260,4 +334,232 @@ impl< } false } + + pub fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> bool { + self.service + .poll_and_check_next_reply(time_stamp) + .unwrap_or_else(|e| { + warn!("PUS 3: Handling reply failed with error {e:?}"); + false + }) + } + + pub fn check_for_request_timeouts(&mut self) { + self.service.check_for_request_timeouts(); + } +} + +#[cfg(test)] +mod tests { + use satrs::pus::test_util::{TEST_COMPONENT_ID, TEST_UNIQUE_ID}; + use satrs::request::MessageMetadata; + use satrs::{ + hk::HkRequestVariant, + pus::test_util::TEST_APID, + request::GenericMessage, + spacepackets::{ + ecss::{hk::Subservice, tc::PusTcCreator}, + SpHeader, + }, + }; + use satrs_example::config::tmtc_err; + + use crate::pus::{ + hk::HkReplyVariant, + tests::{PusConverterTestbench, ReplyHandlerTestbench}, + }; + + use super::{HkReply, HkReplyHandler, HkRequestConverter}; + + #[test] + fn hk_converter_one_shot_req() { + let mut hk_bench = PusConverterTestbench::new(HkRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let target_id = TEST_UNIQUE_ID; + let unique_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); + app_data[4..8].copy_from_slice(&unique_id.to_be_bytes()); + + let hk_req = PusTcCreator::new_simple( + &mut sp_header, + 3, + Subservice::TcGenerateOneShotHk as u8, + Some(&app_data), + true, + ); + let accepted_token = hk_bench.add_tc(&hk_req); + let (_active_req, req) = hk_bench + .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID) + .expect("conversion failed"); + + assert_eq!(req.unique_id, unique_id); + if let HkRequestVariant::OneShot = req.variant { + } else { + panic!("unexpected HK request") + } + } + + #[test] + fn hk_converter_enable_periodic_generation() { + let mut hk_bench = PusConverterTestbench::new(HkRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let target_id = TEST_UNIQUE_ID; + let unique_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); + app_data[4..8].copy_from_slice(&unique_id.to_be_bytes()); + let mut generic_check = |tc: &PusTcCreator| { + let accepted_token = hk_bench.add_tc(tc); + let (_active_req, req) = hk_bench + .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID) + .expect("conversion failed"); + assert_eq!(req.unique_id, unique_id); + if let HkRequestVariant::EnablePeriodic = req.variant { + } else { + panic!("unexpected HK request") + } + }; + let tc0 = PusTcCreator::new_simple( + &mut sp_header, + 3, + Subservice::TcEnableHkGeneration as u8, + Some(&app_data), + true, + ); + generic_check(&tc0); + let tc1 = PusTcCreator::new_simple( + &mut sp_header, + 3, + Subservice::TcEnableDiagGeneration as u8, + Some(&app_data), + true, + ); + generic_check(&tc1); + } + + #[test] + fn hk_conversion_disable_periodic_generation() { + let mut hk_bench = PusConverterTestbench::new(HkRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let target_id = TEST_UNIQUE_ID; + let unique_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); + app_data[4..8].copy_from_slice(&unique_id.to_be_bytes()); + let mut generic_check = |tc: &PusTcCreator| { + let accepted_token = hk_bench.add_tc(tc); + let (_active_req, req) = hk_bench + .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID) + .expect("conversion failed"); + assert_eq!(req.unique_id, unique_id); + if let HkRequestVariant::DisablePeriodic = req.variant { + } else { + panic!("unexpected HK request") + } + }; + let tc0 = PusTcCreator::new_simple( + &mut sp_header, + 3, + Subservice::TcDisableHkGeneration as u8, + Some(&app_data), + true, + ); + generic_check(&tc0); + let tc1 = PusTcCreator::new_simple( + &mut sp_header, + 3, + Subservice::TcDisableDiagGeneration as u8, + Some(&app_data), + true, + ); + generic_check(&tc1); + } + + #[test] + fn hk_conversion_modify_interval() { + let mut hk_bench = PusConverterTestbench::new(HkRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let target_id = TEST_UNIQUE_ID; + let unique_id = 5_u32; + let mut app_data: [u8; 12] = [0; 12]; + let collection_interval_factor = 5_u32; + app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); + app_data[4..8].copy_from_slice(&unique_id.to_be_bytes()); + app_data[8..12].copy_from_slice(&collection_interval_factor.to_be_bytes()); + + let mut generic_check = |tc: &PusTcCreator| { + let accepted_token = hk_bench.add_tc(tc); + let (_active_req, req) = hk_bench + .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID) + .expect("conversion failed"); + assert_eq!(req.unique_id, unique_id); + if let HkRequestVariant::ModifyCollectionInterval(interval_factor) = req.variant { + assert_eq!(interval_factor, collection_interval_factor); + } else { + panic!("unexpected HK request") + } + }; + let tc0 = PusTcCreator::new_simple( + &mut sp_header, + 3, + Subservice::TcModifyHkCollectionInterval as u8, + Some(&app_data), + true, + ); + generic_check(&tc0); + let tc1 = PusTcCreator::new_simple( + &mut sp_header, + 3, + Subservice::TcModifyDiagCollectionInterval as u8, + Some(&app_data), + true, + ); + generic_check(&tc1); + } + + #[test] + fn hk_reply_handler() { + let mut reply_testbench = ReplyHandlerTestbench::new(HkReplyHandler::default()); + let sender_id = 2_u64; + let apid_target_id = 3_u32; + let unique_id = 5_u32; + let (req_id, active_req) = reply_testbench.add_tc(TEST_APID, apid_target_id, &[]); + let reply = GenericMessage::new( + MessageMetadata::new(req_id.into(), sender_id), + HkReply::new(unique_id, HkReplyVariant::Ack), + ); + let result = reply_testbench.handle_reply(&reply, &active_req, &[]); + assert!(result.is_ok()); + assert!(result.unwrap()); + reply_testbench + .verif_reporter + .assert_full_completion_success(TEST_COMPONENT_ID.raw(), req_id, None); + } + + #[test] + fn reply_handling_unrequested_reply() { + let mut testbench = ReplyHandlerTestbench::new(HkReplyHandler::default()); + let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack); + let unrequested_reply = + GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply); + // Right now this function does not do a lot. We simply check that it does not panic or do + // weird stuff. + let result = testbench.handle_unrequested_reply(&unrequested_reply); + assert!(result.is_ok()); + } + + #[test] + fn reply_handling_reply_timeout() { + let mut testbench = ReplyHandlerTestbench::new(HkReplyHandler::default()); + let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); + let result = testbench.handle_request_timeout(&active_request, &[]); + assert!(result.is_ok()); + testbench.verif_reporter.assert_completion_failure( + TEST_COMPONENT_ID.raw(), + req_id, + None, + tmtc_err::REQUEST_TIMEOUT.raw() as u64, + ); + } } diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 70102b4..bf805e6 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -1,75 +1,71 @@ +use crate::requests::GenericRequestRouter; use crate::tmtc::MpscStoreAndSendError; use log::warn; -use satrs::pus::verification::{FailParams, VerificationReportingProvider}; -use satrs::pus::{ - EcssTcAndToken, GenericRoutingError, PusPacketHandlerResult, PusRoutingErrorHandler, TcInMemory, +use satrs::pus::verification::{ + self, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter, + VerificationReporterCfg, VerificationReportingProvider, VerificationToken, }; +use satrs::pus::{ + ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, + EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, GenericConversionError, + GenericRoutingError, PusPacketHandlerResult, PusPacketHandlingError, PusReplyHandler, + PusRequestRouter, PusServiceHelper, PusTcToRequestConverter, TcInMemory, +}; +use satrs::queue::GenericReceiveError; +use satrs::request::{Apid, GenericMessage, MessageMetadata}; use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::PusServiceId; -use satrs::spacepackets::time::cds::CdsTime; -use satrs::spacepackets::time::TimeWriter; +use satrs::ComponentId; +use satrs_example::config::components::PUS_ROUTING_SERVICE; use satrs_example::config::{tmtc_err, CustomPusServiceId}; -use std::sync::mpsc::Sender; +use satrs_example::TimeStampHelper; +use std::fmt::Debug; +use std::sync::mpsc::{self, Sender}; pub mod action; pub mod event; pub mod hk; +pub mod mode; pub mod scheduler; pub mod stack; pub mod test; -pub struct PusTcMpscRouter { - pub test_service_receiver: Sender, - pub event_service_receiver: Sender, - pub sched_service_receiver: Sender, - pub hk_service_receiver: Sender, - pub action_service_receiver: Sender, +pub fn create_verification_reporter(apid: Apid) -> VerificationReporter { + let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap(); + // Every software component which needs to generate verification telemetry, gets a cloned + // verification reporter. + VerificationReporter::new(&verif_cfg) } -pub struct PusReceiver { +/// Simple router structure which forwards PUS telecommands to dedicated handlers. +pub struct PusTcMpscRouter { + pub test_tc_sender: Sender, + pub event_tc_sender: Sender, + pub sched_tc_sender: Sender, + pub hk_tc_sender: Sender, + pub action_tc_sender: Sender, + pub mode_tc_sender: Sender, +} + +pub struct PusReceiver { + pub id: ComponentId, + pub tm_sender: TmSender, pub verif_reporter: VerificationReporter, pub pus_router: PusTcMpscRouter, stamp_helper: TimeStampHelper, } -struct TimeStampHelper { - stamper: CdsTime, - time_stamp: [u8; 7], -} - -impl TimeStampHelper { - pub fn new() -> Self { +impl PusReceiver { + pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self { Self { - stamper: CdsTime::new_with_u16_days(0, 0), - time_stamp: [0; 7], - } - } - - pub fn stamp(&self) -> &[u8] { - &self.time_stamp - } - - pub fn update_from_now(&mut self) { - self.stamper - .update_from_now() - .expect("Updating timestamp failed"); - self.stamper - .write_to_bytes(&mut self.time_stamp) - .expect("Writing timestamp failed"); - } -} - -impl PusReceiver { - pub fn new(verif_reporter: VerificationReporter, pus_router: PusTcMpscRouter) -> Self { - Self { - verif_reporter, + id: PUS_ROUTING_SERVICE.raw(), + tm_sender, + verif_reporter: create_verification_reporter(PUS_ROUTING_SERVICE.apid), pus_router, - stamp_helper: TimeStampHelper::new(), + stamp_helper: TimeStampHelper::default(), } } -} -impl PusReceiver { pub fn handle_tc_packet( &mut self, tc_in_memory: TcInMemory, @@ -80,41 +76,40 @@ impl PusReceiver match standard_service { - PusServiceId::Test => { - self.pus_router.test_service_receiver.send(EcssTcAndToken { - tc_in_memory, - token: Some(accepted_token.into()), - })? - } + PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken { + tc_in_memory, + token: Some(accepted_token.into()), + })?, PusServiceId::Housekeeping => { - self.pus_router.hk_service_receiver.send(EcssTcAndToken { + self.pus_router.hk_tc_sender.send(EcssTcAndToken { tc_in_memory, token: Some(accepted_token.into()), })? } - PusServiceId::Event => { - self.pus_router - .event_service_receiver - .send(EcssTcAndToken { - tc_in_memory, - token: Some(accepted_token.into()), - })? - } + PusServiceId::Event => self.pus_router.event_tc_sender.send(EcssTcAndToken { + tc_in_memory, + token: Some(accepted_token.into()), + })?, PusServiceId::Scheduling => { - self.pus_router - .sched_service_receiver - .send(EcssTcAndToken { - tc_in_memory, - token: Some(accepted_token.into()), - })? + self.pus_router.sched_tc_sender.send(EcssTcAndToken { + tc_in_memory, + token: Some(accepted_token.into()), + })? } _ => { let result = self.verif_reporter.start_failure( + self.id, + &self.tm_sender, accepted_token, FailParams::new( self.stamp_helper.stamp(), @@ -139,6 +134,8 @@ impl PusReceiver PusReceiver {} +pub trait TargetedPusService { + /// Returns [true] if the packet handling is finished. + fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool; + fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> bool; + fn check_for_request_timeouts(&mut self); +} -impl PusRoutingErrorHandler for GenericRoutingErrorHandler { - type Error = satrs::pus::GenericRoutingError; +/// This is a generic handler class for all PUS services where a PUS telecommand is converted +/// to a targeted request. +/// +/// The generic steps for this process are the following +/// +/// 1. Poll for TC packets +/// 2. Convert the raw packets to a [PusTcReader]. +/// 3. Convert the PUS TC to a typed request using the [PusTcToRequestConverter]. +/// 4. Route the requests using the [GenericRequestRouter]. +/// 5. Add the request to the active request map using the [ActiveRequestMapProvider] abstraction. +/// 6. Check for replies which complete the forwarded request. The handler takes care of +/// the verification process. +/// 7. Check for timeouts of active requests. Generally, the timeout on the service level should +/// be highest expected timeout for the given target. +/// +/// The handler exposes the following API: +/// +/// 1. [Self::handle_one_tc] which tries to poll and handle one TC packet, covering steps 1-5. +/// 2. [Self::check_one_reply] which tries to poll and handle one reply, covering step 6. +/// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7. +pub struct PusTargetedRequestService< + TcReceiver: EcssTcReceiverCore, + TmSender: EcssTmSenderCore, + TcInMemConverter: EcssTcInMemConverter, + VerificationReporter: VerificationReportingProvider, + RequestConverter: PusTcToRequestConverter, + ReplyHandler: PusReplyHandler, + ActiveRequestMap: ActiveRequestMapProvider, + ActiveRequestInfo: ActiveRequestProvider, + RequestType, + ReplyType, +> { + pub service_helper: + PusServiceHelper, + pub request_router: GenericRequestRouter, + pub request_converter: RequestConverter, + pub active_request_map: ActiveRequestMap, + pub reply_handler: ReplyHandler, + pub reply_receiver: mpsc::Receiver>, + phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>, +} - fn handle_error( - &self, - target_id: satrs::TargetId, - token: satrs::pus::verification::VerificationToken< - satrs::pus::verification::TcStateAccepted, +impl< + TcReceiver: EcssTcReceiverCore, + TmSender: EcssTmSenderCore, + TcInMemConverter: EcssTcInMemConverter, + VerificationReporter: VerificationReportingProvider, + RequestConverter: PusTcToRequestConverter, + ReplyHandler: PusReplyHandler, + ActiveRequestMap: ActiveRequestMapProvider, + ActiveRequestInfo: ActiveRequestProvider, + RequestType, + ReplyType, + > + PusTargetedRequestService< + TcReceiver, + TmSender, + TcInMemConverter, + VerificationReporter, + RequestConverter, + ReplyHandler, + ActiveRequestMap, + ActiveRequestInfo, + RequestType, + ReplyType, + > +where + GenericRequestRouter: PusRequestRouter, +{ + pub fn new( + service_helper: PusServiceHelper< + TcReceiver, + TmSender, + TcInMemConverter, + VerificationReporter, >, - _tc: &PusTcReader, - error: Self::Error, + request_converter: RequestConverter, + active_request_map: ActiveRequestMap, + reply_hook: ReplyHandler, + request_router: GenericRequestRouter, + reply_receiver: mpsc::Receiver>, + ) -> Self { + Self { + service_helper, + request_converter, + active_request_map, + reply_handler: reply_hook, + request_router, + reply_receiver, + phantom: std::marker::PhantomData, + } + } + + pub fn poll_and_handle_next_tc( + &mut self, + time_stamp: &[u8], + ) -> Result { + let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; + if possible_packet.is_none() { + return Ok(PusPacketHandlerResult::Empty); + } + let ecss_tc_and_token = possible_packet.unwrap(); + self.service_helper + .tc_in_mem_converter_mut() + .cache(&ecss_tc_and_token.tc_in_memory)?; + let tc = self.service_helper.tc_in_mem_converter().convert()?; + let (mut request_info, request) = match self.request_converter.convert( + self.service_helper.id(), + ecss_tc_and_token.token, + &tc, + self.service_helper.tm_sender(), + &self.service_helper.common.verif_reporter, + time_stamp, + ) { + Ok((info, req)) => (info, req), + Err(e) => { + self.handle_conversion_to_request_error(&e, ecss_tc_and_token.token, time_stamp); + return Err(e.into()); + } + }; + let accepted_token: VerificationToken = request_info + .token() + .try_into() + .expect("token not in expected accepted state"); + let verif_request_id = verification::RequestId::new(&tc).raw(); + match self.request_router.route( + MessageMetadata::new(verif_request_id, self.service_helper.id()), + request_info.target_id(), + request, + ) { + Ok(()) => { + let started_token = self + .service_helper + .verif_reporter() + .start_success( + self.service_helper.id(), + &self.service_helper.common.tm_sender, + accepted_token, + time_stamp, + ) + .expect("Start success failure"); + request_info.set_token(started_token.into()); + self.active_request_map + .insert(&verif_request_id, request_info); + } + Err(e) => { + self.request_router.handle_error_generic( + &request_info, + &tc, + e.clone(), + self.service_helper.tm_sender(), + self.service_helper.verif_reporter(), + time_stamp, + ); + return Err(e.into()); + } + } + Ok(PusPacketHandlerResult::RequestHandled) + } + + fn handle_conversion_to_request_error( + &mut self, + error: &GenericConversionError, + token: VerificationToken, time_stamp: &[u8], - verif_reporter: &impl VerificationReportingProvider, ) { - warn!("Routing request for service {SERVICE_ID} failed: {error:?}"); match error { - GenericRoutingError::UnknownTargetId(id) => { - let mut fail_data: [u8; 8] = [0; 8]; - fail_data.copy_from_slice(&id.to_be_bytes()); - verif_reporter - .start_failure( + GenericConversionError::WrongService(service) => { + let service_slice: [u8; 1] = [*service]; + self.service_helper + .verif_reporter() + .completion_failure( + self.service_helper.id(), + self.service_helper.tm_sender(), token, - FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data), + FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SERVICE, &service_slice), ) - .expect("Sending start failure failed"); + .expect("Sending completion failure failed"); } - GenericRoutingError::SendError(_) => { - let mut fail_data: [u8; 8] = [0; 8]; - fail_data.copy_from_slice(&target_id.to_be_bytes()); - verif_reporter - .start_failure( + GenericConversionError::InvalidSubservice(subservice) => { + let subservice_slice: [u8; 1] = [*subservice]; + self.service_helper + .verif_reporter() + .completion_failure( + self.service_helper.id(), + self.service_helper.tm_sender(), token, - FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data), + FailParams::new( + time_stamp, + &tmtc_err::INVALID_PUS_SUBSERVICE, + &subservice_slice, + ), ) - .expect("Sending start failure failed"); + .expect("Sending completion failure failed"); } - GenericRoutingError::NotEnoughAppData { expected, found } => { - let mut context_info = (found as u32).to_be_bytes().to_vec(); - context_info.extend_from_slice(&(expected as u32).to_be_bytes()); - verif_reporter - .start_failure( + GenericConversionError::NotEnoughAppData { expected, found } => { + let mut context_info = (*found as u32).to_be_bytes().to_vec(); + context_info.extend_from_slice(&(*expected as u32).to_be_bytes()); + self.service_helper + .verif_reporter() + .completion_failure( + self.service_helper.id(), + self.service_helper.tm_sender(), token, FailParams::new(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA, &context_info), ) - .expect("Sending start failure failed"); + .expect("Sending completion failure failed"); + } + // Do nothing.. this is service-level and can not be handled generically here. + GenericConversionError::InvalidAppData(_) => (), + } + } + + pub fn poll_and_check_next_reply(&mut self, time_stamp: &[u8]) -> Result { + match self.reply_receiver.try_recv() { + Ok(reply) => { + self.handle_reply(&reply, time_stamp)?; + Ok(false) + } + Err(e) => match e { + mpsc::TryRecvError::Empty => Ok(true), + mpsc::TryRecvError::Disconnected => Err(EcssTmtcError::Receive( + GenericReceiveError::TxDisconnected(None), + )), + }, + } + } + + pub fn handle_reply( + &mut self, + reply: &GenericMessage, + time_stamp: &[u8], + ) -> Result<(), EcssTmtcError> { + let active_req_opt = self.active_request_map.get(reply.request_id()); + if active_req_opt.is_none() { + self.reply_handler.handle_unrequested_reply( + self.service_helper.id(), + reply, + &self.service_helper.common.tm_sender, + )?; + return Ok(()); + } + let active_request = active_req_opt.unwrap(); + let request_finished = self + .reply_handler + .handle_reply( + self.service_helper.id(), + reply, + active_request, + &self.service_helper.common.tm_sender, + &self.service_helper.common.verif_reporter, + time_stamp, + ) + .unwrap_or(false); + if request_finished { + self.active_request_map.remove(reply.request_id()); + } + Ok(()) + } + + pub fn check_for_request_timeouts(&mut self) { + let mut requests_to_delete = Vec::new(); + self.active_request_map + .for_each(|request_id, request_info| { + if request_info.has_timed_out() { + requests_to_delete.push(*request_id); + } + }); + if !requests_to_delete.is_empty() { + for request_id in requests_to_delete { + self.active_request_map.remove(request_id); } } } } + +/// Generic timeout handling: Handle the verification failure with a dedicated return code +/// and also log the error. +pub fn generic_pus_request_timeout_handler( + sender_id: ComponentId, + sender: &(impl EcssTmSenderCore + ?Sized), + active_request: &(impl ActiveRequestProvider + Debug), + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + service_str: &'static str, +) -> Result<(), EcssTmtcError> { + log::warn!("timeout for active request {active_request:?} on {service_str} service"); + let started_token: VerificationToken = active_request + .token() + .try_into() + .expect("token not in expected started state"); + verification_handler.completion_failure( + sender_id, + sender, + started_token, + FailParams::new(time_stamp, &tmtc_err::REQUEST_TIMEOUT, &[]), + )?; + Ok(()) +} + +#[cfg(test)] +pub(crate) mod tests { + use std::time::Duration; + + use satrs::pus::test_util::TEST_COMPONENT_ID; + use satrs::pus::{MpscTmAsVecSender, PusTmAsVec, PusTmVariant}; + use satrs::request::RequestId; + use satrs::{ + pus::{ + verification::test_util::TestVerificationReporter, ActivePusRequestStd, + ActiveRequestMapProvider, EcssTcInVecConverter, MpscTcReceiver, + }, + request::UniqueApidTargetId, + spacepackets::{ + ecss::{ + tc::{PusTcCreator, PusTcSecondaryHeader}, + WritablePusPacket, + }, + SpHeader, + }, + }; + + use crate::requests::CompositeRequest; + + use super::*; + + // Testbench dedicated to the testing of [PusReplyHandler]s + pub struct ReplyHandlerTestbench< + ReplyHandler: PusReplyHandler, + ActiveRequestInfo: ActiveRequestProvider, + Reply, + > { + pub id: ComponentId, + pub verif_reporter: TestVerificationReporter, + pub reply_handler: ReplyHandler, + pub tm_receiver: mpsc::Receiver, + pub default_timeout: Duration, + tm_sender: MpscTmAsVecSender, + phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>, + } + + impl< + ReplyHandler: PusReplyHandler, + ActiveRequestInfo: ActiveRequestProvider, + Reply, + > ReplyHandlerTestbench + { + pub fn new(reply_handler: ReplyHandler) -> Self { + let test_verif_reporter = TestVerificationReporter::default(); + let (tm_sender, tm_receiver) = mpsc::channel(); + Self { + id: TEST_COMPONENT_ID.raw(), + verif_reporter: test_verif_reporter, + reply_handler, + default_timeout: Duration::from_secs(30), + tm_sender, + tm_receiver, + phantom: std::marker::PhantomData, + } + } + + pub fn add_tc( + &mut self, + apid: u16, + apid_target: u32, + time_stamp: &[u8], + ) -> (verification::RequestId, ActivePusRequestStd) { + let mut sp_header = SpHeader::tc_unseg(apid, 0, 0).unwrap(); + let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0); + let init = self.verif_reporter.add_tc(&PusTcCreator::new( + &mut sp_header, + sec_header_dummy, + &[], + true, + )); + let accepted = self + .verif_reporter + .acceptance_success(self.id, &self.tm_sender, init, time_stamp) + .expect("acceptance failed"); + let started = self + .verif_reporter + .start_success(self.id, &self.tm_sender, accepted, time_stamp) + .expect("start failed"); + ( + started.request_id(), + ActivePusRequestStd::new( + UniqueApidTargetId::new(apid, apid_target).raw(), + started, + self.default_timeout, + ), + ) + } + + pub fn handle_reply( + &mut self, + reply: &GenericMessage, + active_request: &ActiveRequestInfo, + time_stamp: &[u8], + ) -> Result { + self.reply_handler.handle_reply( + self.id, + reply, + active_request, + &self.tm_sender, + &self.verif_reporter, + time_stamp, + ) + } + + pub fn handle_unrequested_reply( + &mut self, + reply: &GenericMessage, + ) -> Result<(), ReplyHandler::Error> { + self.reply_handler + .handle_unrequested_reply(self.id, reply, &self.tm_sender) + } + pub fn handle_request_timeout( + &mut self, + active_request_info: &ActiveRequestInfo, + time_stamp: &[u8], + ) -> Result<(), ReplyHandler::Error> { + self.reply_handler.handle_request_timeout( + self.id, + active_request_info, + &self.tm_sender, + &self.verif_reporter, + time_stamp, + ) + } + } + + #[derive(Default)] + pub struct DummySender {} + + /// Dummy sender component which does nothing on the [Self::send_tm] call. + /// + /// Useful for unit tests. + impl EcssTmSenderCore for DummySender { + fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> { + Ok(()) + } + } + + // Testbench dedicated to the testing of [PusTcToRequestConverter]s + pub struct PusConverterTestbench< + Converter: PusTcToRequestConverter, + ActiveRequestInfo: ActiveRequestProvider, + Request, + > { + pub id: ComponentId, + pub verif_reporter: TestVerificationReporter, + pub converter: Converter, + dummy_sender: DummySender, + current_request_id: Option, + current_packet: Option>, + phantom: std::marker::PhantomData<(ActiveRequestInfo, Request)>, + } + + impl< + Converter: PusTcToRequestConverter, + ActiveRequestInfo: ActiveRequestProvider, + Request, + > PusConverterTestbench + { + pub fn new(converter: Converter) -> Self { + let test_verif_reporter = TestVerificationReporter::default(); + Self { + id: TEST_COMPONENT_ID.raw(), + verif_reporter: test_verif_reporter, + converter, + dummy_sender: DummySender::default(), + current_request_id: None, + current_packet: None, + phantom: std::marker::PhantomData, + } + } + + pub fn add_tc(&mut self, tc: &PusTcCreator) -> VerificationToken { + let token = self.verif_reporter.add_tc(tc); + self.current_request_id = Some(verification::RequestId::new(tc)); + self.current_packet = Some(tc.to_vec().unwrap()); + self.verif_reporter + .acceptance_success(self.id, &self.dummy_sender, token, &[]) + .expect("acceptance failed") + } + + pub fn request_id(&self) -> Option { + self.current_request_id + } + + pub fn convert( + &mut self, + token: VerificationToken, + time_stamp: &[u8], + expected_apid: u16, + expected_apid_target: u32, + ) -> Result<(ActiveRequestInfo, Request), Converter::Error> { + if self.current_packet.is_none() { + return Err(GenericConversionError::InvalidAppData( + "call add_tc first".to_string(), + )); + } + let current_packet = self.current_packet.take().unwrap(); + let tc_reader = PusTcReader::new(¤t_packet).unwrap(); + let (active_info, request) = self.converter.convert( + self.id, + token, + &tc_reader.0, + &self.dummy_sender, + &self.verif_reporter, + time_stamp, + )?; + assert_eq!( + active_info.token().request_id(), + self.request_id().expect("no request id is set") + ); + assert_eq!( + active_info.target_id(), + UniqueApidTargetId::new(expected_apid, expected_apid_target).raw() + ); + Ok((active_info, request)) + } + } + + pub struct TargetedPusRequestTestbench< + RequestConverter: PusTcToRequestConverter, + ReplyHandler: PusReplyHandler, + ActiveRequestMap: ActiveRequestMapProvider, + ActiveRequestInfo: ActiveRequestProvider, + RequestType, + ReplyType, + > { + pub service: PusTargetedRequestService< + MpscTcReceiver, + MpscTmAsVecSender, + EcssTcInVecConverter, + TestVerificationReporter, + RequestConverter, + ReplyHandler, + ActiveRequestMap, + ActiveRequestInfo, + RequestType, + ReplyType, + >, + pub request_id: Option, + pub tm_funnel_rx: mpsc::Receiver, + pub pus_packet_tx: mpsc::Sender, + pub reply_tx: mpsc::Sender>, + pub request_rx: mpsc::Receiver>, + } +} diff --git a/satrs-example/src/pus/mode.rs b/satrs-example/src/pus/mode.rs new file mode 100644 index 0000000..6d1c1b1 --- /dev/null +++ b/satrs-example/src/pus/mode.rs @@ -0,0 +1,442 @@ +use log::{error, warn}; +use std::sync::mpsc; +use std::time::Duration; + +use crate::requests::GenericRequestRouter; +use satrs::pool::SharedStaticMemoryPool; +use satrs::pus::verification::VerificationReporter; +use satrs::pus::{ + DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, + EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, + PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, +}; +use satrs::request::GenericMessage; +use satrs::{ + mode::{ModeAndSubmode, ModeReply, ModeRequest}, + pus::{ + mode::Subservice, + verification::{ + self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider, + VerificationToken, + }, + ActivePusRequestStd, ActiveRequestProvider, EcssTmSenderCore, EcssTmtcError, + GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant, + }, + request::UniqueApidTargetId, + spacepackets::{ + ecss::{ + tc::PusTcReader, + tm::{PusTmCreator, PusTmSecondaryHeader}, + PusPacket, + }, + SpHeader, + }, + ComponentId, +}; +use satrs_example::config::components::PUS_MODE_SERVICE; +use satrs_example::config::{mode_err, tmtc_err}; + +use super::{ + create_verification_reporter, generic_pus_request_timeout_handler, PusTargetedRequestService, + TargetedPusService, +}; + +#[derive(Default)] +pub struct ModeReplyHandler {} + +impl PusReplyHandler for ModeReplyHandler { + type Error = EcssTmtcError; + + fn handle_unrequested_reply( + &mut self, + _caller_id: ComponentId, + reply: &GenericMessage, + _tm_sender: &impl EcssTmSenderCore, + ) -> Result<(), Self::Error> { + log::warn!("received unexpected reply for mode service 5: {reply:?}"); + Ok(()) + } + + fn handle_reply( + &mut self, + caller_id: ComponentId, + reply: &GenericMessage, + active_request: &ActivePusRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result { + let started_token: VerificationToken = active_request + .token() + .try_into() + .expect("invalid token state"); + match reply.message { + ModeReply::ModeReply(mode_reply) => { + let mut source_data: [u8; 12] = [0; 12]; + mode_reply + .write_to_be_bytes(&mut source_data) + .expect("writing mode reply failed"); + let req_id = verification::RequestId::from(reply.request_id()); + let mut sp_header = SpHeader::tm_unseg(req_id.packet_id().apid(), 0, 0) + .expect("generating SP header failed"); + let sec_header = PusTmSecondaryHeader::new( + 200, + Subservice::TmModeReply as u8, + 0, + 0, + Some(time_stamp), + ); + let pus_tm = PusTmCreator::new(&mut sp_header, sec_header, &source_data, true); + tm_sender.send_tm(caller_id, PusTmVariant::Direct(pus_tm))?; + verification_handler.completion_success( + caller_id, + tm_sender, + started_token, + time_stamp, + )?; + } + ModeReply::CantReachMode(error_code) => { + verification_handler.completion_failure( + caller_id, + tm_sender, + started_token, + FailParams::new(time_stamp, &error_code, &[]), + )?; + } + ModeReply::WrongMode { expected, reached } => { + let mut error_info: [u8; 24] = [0; 24]; + let mut written_len = expected + .write_to_be_bytes(&mut error_info[0..ModeAndSubmode::RAW_LEN]) + .expect("writing expected mode failed"); + written_len += reached + .write_to_be_bytes(&mut error_info[ModeAndSubmode::RAW_LEN..]) + .expect("writing reached mode failed"); + verification_handler.completion_failure( + caller_id, + tm_sender, + started_token, + FailParams::new( + time_stamp, + &mode_err::WRONG_MODE, + &error_info[..written_len], + ), + )?; + } + }; + Ok(true) + } + + fn handle_request_timeout( + &mut self, + caller_id: ComponentId, + active_request: &ActivePusRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(), Self::Error> { + generic_pus_request_timeout_handler( + caller_id, + tm_sender, + active_request, + verification_handler, + time_stamp, + "HK", + )?; + Ok(()) + } +} + +#[derive(Default)] +pub struct ModeRequestConverter {} + +impl PusTcToRequestConverter for ModeRequestConverter { + type Error = GenericConversionError; + + fn convert( + &mut self, + + caller_id: ComponentId, + token: VerificationToken, + tc: &PusTcReader, + tm_sender: &(impl EcssTmSenderCore + ?Sized), + verif_reporter: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> { + let subservice = tc.subservice(); + let user_data = tc.user_data(); + let not_enough_app_data = |expected: usize| { + verif_reporter + .start_failure( + caller_id, + tm_sender, + token, + FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA), + ) + .expect("Sending start failure failed"); + Err(GenericConversionError::NotEnoughAppData { + expected, + found: user_data.len(), + }) + }; + if user_data.len() < core::mem::size_of::() { + return not_enough_app_data(4); + } + let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap(); + let active_request = + ActivePusRequestStd::new(target_id_and_apid.into(), token, Duration::from_secs(30)); + let subservice_typed = Subservice::try_from(subservice); + let invalid_subservice = || { + // Invalid subservice + verif_reporter + .start_failure( + caller_id, + tm_sender, + token, + FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE), + ) + .expect("Sending start failure failed"); + Err(GenericConversionError::InvalidSubservice(subservice)) + }; + if subservice_typed.is_err() { + return invalid_subservice(); + } + let subservice_typed = subservice_typed.unwrap(); + match subservice_typed { + Subservice::TcSetMode => { + if user_data.len() < core::mem::size_of::() + ModeAndSubmode::RAW_LEN { + return not_enough_app_data(4 + ModeAndSubmode::RAW_LEN); + } + let mode_and_submode = ModeAndSubmode::from_be_bytes(&tc.user_data()[4..]) + .expect("mode and submode extraction failed"); + Ok((active_request, ModeRequest::SetMode(mode_and_submode))) + } + Subservice::TcReadMode => Ok((active_request, ModeRequest::ReadMode)), + Subservice::TcAnnounceMode => Ok((active_request, ModeRequest::AnnounceMode)), + Subservice::TcAnnounceModeRecursive => { + Ok((active_request, ModeRequest::AnnounceModeRecursive)) + } + _ => invalid_subservice(), + } + } +} + +pub fn create_mode_service_static( + tm_sender: TmInSharedPoolSender>, + tc_pool: SharedStaticMemoryPool, + pus_action_rx: mpsc::Receiver, + mode_router: GenericRequestRouter, + reply_receiver: mpsc::Receiver>, +) -> ModeServiceWrapper { + let mode_request_handler = PusTargetedRequestService::new( + PusServiceHelper::new( + PUS_MODE_SERVICE.raw(), + pus_action_rx, + tm_sender, + create_verification_reporter(PUS_MODE_SERVICE.apid), + EcssTcInSharedStoreConverter::new(tc_pool, 2048), + ), + ModeRequestConverter::default(), + DefaultActiveRequestMap::default(), + ModeReplyHandler::default(), + mode_router, + reply_receiver, + ); + ModeServiceWrapper { + service: mode_request_handler, + } +} + +pub fn create_mode_service_dynamic( + tm_funnel_tx: mpsc::Sender, + pus_action_rx: mpsc::Receiver, + mode_router: GenericRequestRouter, + reply_receiver: mpsc::Receiver>, +) -> ModeServiceWrapper { + let mode_request_handler = PusTargetedRequestService::new( + PusServiceHelper::new( + PUS_MODE_SERVICE.raw(), + pus_action_rx, + tm_funnel_tx, + create_verification_reporter(PUS_MODE_SERVICE.apid), + EcssTcInVecConverter::default(), + ), + ModeRequestConverter::default(), + DefaultActiveRequestMap::default(), + ModeReplyHandler::default(), + mode_router, + reply_receiver, + ); + ModeServiceWrapper { + service: mode_request_handler, + } +} + +pub struct ModeServiceWrapper { + pub(crate) service: PusTargetedRequestService< + MpscTcReceiver, + TmSender, + TcInMemConverter, + VerificationReporter, + ModeRequestConverter, + ModeReplyHandler, + DefaultActiveRequestMap, + ActivePusRequestStd, + ModeRequest, + ModeReply, + >, +} + +impl TargetedPusService + for ModeServiceWrapper +{ + /// Returns [true] if the packet handling is finished. + fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self.service.poll_and_handle_next_tc(time_stamp) { + Ok(result) => match result { + PusPacketHandlerResult::RequestHandled => {} + PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { + warn!("PUS mode service: partial packet handling success: {e:?}") + } + PusPacketHandlerResult::CustomSubservice(invalid, _) => { + warn!("PUS mode service: invalid subservice {invalid}"); + } + PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { + warn!("PUS mode service: {subservice} not implemented"); + } + PusPacketHandlerResult::Empty => { + return true; + } + }, + Err(error) => { + error!("PUS mode service: packet handling error: {error:?}") + } + } + false + } + + fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> bool { + self.service + .poll_and_check_next_reply(time_stamp) + .unwrap_or_else(|e| { + warn!("PUS action service: Handling reply failed with error {e:?}"); + false + }) + } + + fn check_for_request_timeouts(&mut self) { + self.service.check_for_request_timeouts(); + } +} +#[cfg(test)] +mod tests { + use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID, TEST_UNIQUE_ID}; + use satrs::request::MessageMetadata; + use satrs::{ + mode::{ModeAndSubmode, ModeReply, ModeRequest}, + pus::mode::Subservice, + request::GenericMessage, + spacepackets::{ + ecss::tc::{PusTcCreator, PusTcSecondaryHeader}, + SpHeader, + }, + }; + use satrs_example::config::tmtc_err; + + use crate::pus::{ + mode::ModeReplyHandler, + tests::{PusConverterTestbench, ReplyHandlerTestbench}, + }; + + use super::ModeRequestConverter; + + #[test] + fn mode_converter_read_mode_request() { + let mut testbench = PusConverterTestbench::new(ModeRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8); + let mut app_data: [u8; 4] = [0; 4]; + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); + let tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&tc); + let (_active_req, req) = testbench + .convert(token, &[], TEST_APID, TEST_UNIQUE_ID) + .expect("conversion has failed"); + assert_eq!(req, ModeRequest::ReadMode); + } + + #[test] + fn mode_converter_set_mode_request() { + let mut testbench = PusConverterTestbench::new(ModeRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8); + let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN]; + let mode_and_submode = ModeAndSubmode::new(2, 1); + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); + mode_and_submode + .write_to_be_bytes(&mut app_data[4..]) + .unwrap(); + let tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&tc); + let (_active_req, req) = testbench + .convert(token, &[], TEST_APID, TEST_UNIQUE_ID) + .expect("conversion has failed"); + assert_eq!(req, ModeRequest::SetMode(mode_and_submode)); + } + + #[test] + fn mode_converter_announce_mode() { + let mut testbench = PusConverterTestbench::new(ModeRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8); + let mut app_data: [u8; 4] = [0; 4]; + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); + let tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&tc); + let (_active_req, req) = testbench + .convert(token, &[], TEST_APID, TEST_UNIQUE_ID) + .expect("conversion has failed"); + assert_eq!(req, ModeRequest::AnnounceMode); + } + + #[test] + fn mode_converter_announce_mode_recursively() { + let mut testbench = PusConverterTestbench::new(ModeRequestConverter::default()); + let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let sec_header = + PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8); + let mut app_data: [u8; 4] = [0; 4]; + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); + let tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&tc); + let (_active_req, req) = testbench + .convert(token, &[], TEST_APID, TEST_UNIQUE_ID) + .expect("conversion has failed"); + assert_eq!(req, ModeRequest::AnnounceModeRecursive); + } + + #[test] + fn reply_handling_unrequested_reply() { + let mut testbench = ReplyHandlerTestbench::new(ModeReplyHandler::default()); + let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1)); + let unrequested_reply = + GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply); + // Right now this function does not do a lot. We simply check that it does not panic or do + // weird stuff. + let result = testbench.handle_unrequested_reply(&unrequested_reply); + assert!(result.is_ok()); + } + + #[test] + fn reply_handling_reply_timeout() { + let mut testbench = ReplyHandlerTestbench::new(ModeReplyHandler::default()); + let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); + let result = testbench.handle_request_timeout(&active_request, &[]); + assert!(result.is_ok()); + testbench.verif_reporter.assert_completion_failure( + TEST_COMPONENT_ID.raw(), + req_id, + None, + tmtc_err::REQUEST_TIMEOUT.raw() as u64, + ); + } +} diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index c5d2c06..c80574e 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -1,23 +1,18 @@ use std::sync::mpsc; use std::time::Duration; +use crate::pus::create_verification_reporter; use log::{error, info, warn}; -use satrs::pool::{PoolProvider, StaticMemoryPool, StoreAddr}; +use satrs::pool::{PoolProvider, StaticMemoryPool}; use satrs::pus::scheduler::{PusScheduler, TcInfo}; -use satrs::pus::scheduler_srv::PusService11SchedHandler; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, -}; -use satrs::pus::verification::VerificationReportingProvider; +use satrs::pus::scheduler_srv::PusSchedServiceHandler; +use satrs::pus::verification::VerificationReporter; use satrs::pus::{ EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, - EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper, - TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithId, + EcssTmSenderCore, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, + PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::ChannelId; -use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID}; +use satrs_example::config::components::PUS_SCHED_SERVICE; use crate::tmtc::PusTcSourceProviderSharedPool; @@ -55,14 +50,12 @@ impl TcReleaser for mpsc::Sender> { } } -pub struct Pus11Wrapper< - TcReceiver: EcssTcReceiverCore, +pub struct SchedulingServiceWrapper< TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, > { - pub pus_11_handler: PusService11SchedHandler< - TcReceiver, + pub pus_11_handler: PusSchedServiceHandler< + MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter, @@ -73,12 +66,8 @@ pub struct Pus11Wrapper< pub tc_releaser: Box, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Pus11Wrapper +impl + SchedulingServiceWrapper { pub fn release_tcs(&mut self) { let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool { @@ -103,8 +92,11 @@ impl< } } - pub fn handle_next_packet(&mut self) -> bool { - match self.pus_11_handler.handle_one_tc(&mut self.sched_tc_pool) { + pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self + .pus_11_handler + .poll_and_handle_next_tc(time_stamp, &mut self.sched_tc_pool) + { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { @@ -129,42 +121,24 @@ impl< } pub fn create_scheduler_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_releaser: PusTcSourceProviderSharedPool, pus_sched_rx: mpsc::Receiver, sched_tc_pool: StaticMemoryPool, -) -> Pus11Wrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let sched_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusSched as ChannelId, - "PUS_11_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let sched_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusSched as ChannelId, - "PUS_11_TC_RECV", - pus_sched_rx, - ); +) -> SchedulingServiceWrapper { let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) .expect("Creating PUS Scheduler failed"); - let pus_11_handler = PusService11SchedHandler::new( + let pus_11_handler = PusSchedServiceHandler::new( PusServiceHelper::new( - sched_srv_receiver, - sched_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_SCHED_SERVICE.raw(), + pus_sched_rx, + tm_sender, + create_verification_reporter(PUS_SCHED_SERVICE.apid), EcssTcInSharedStoreConverter::new(tc_releaser.clone_backing_pool(), 2048), ), scheduler, ); - Pus11Wrapper { + SchedulingServiceWrapper { pus_11_handler, sched_tc_pool, releaser_buf: [0; 4096], @@ -173,40 +147,26 @@ pub fn create_scheduler_service_static( } pub fn create_scheduler_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, + tm_funnel_tx: mpsc::Sender, tc_source_sender: mpsc::Sender>, pus_sched_rx: mpsc::Receiver, sched_tc_pool: StaticMemoryPool, -) -> Pus11Wrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let sched_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusSched as ChannelId, - "PUS_11_TM_SENDER", - tm_funnel_tx, - ); - let sched_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusSched as ChannelId, - "PUS_11_TC_RECV", - pus_sched_rx, - ); +) -> SchedulingServiceWrapper { + //let sched_srv_receiver = + //MpscTcReceiver::new(PUS_SCHED_SERVICE.raw(), "PUS_11_TC_RECV", pus_sched_rx); let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) .expect("Creating PUS Scheduler failed"); - let pus_11_handler = PusService11SchedHandler::new( + let pus_11_handler = PusSchedServiceHandler::new( PusServiceHelper::new( - sched_srv_receiver, - sched_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_SCHED_SERVICE.raw(), + pus_sched_rx, + tm_funnel_tx, + create_verification_reporter(PUS_SCHED_SERVICE.apid), EcssTcInVecConverter::default(), ), scheduler, ); - Pus11Wrapper { + SchedulingServiceWrapper { pus_11_handler, sched_tc_pool, releaser_buf: [0; 4096], diff --git a/satrs-example/src/pus/stack.rs b/satrs-example/src/pus/stack.rs index ed06e08..96ca438 100644 --- a/satrs-example/src/pus/stack.rs +++ b/satrs-example/src/pus/stack.rs @@ -1,69 +1,69 @@ -use satrs::pus::{ - verification::VerificationReportingProvider, EcssTcInMemConverter, EcssTcReceiverCore, - EcssTmSenderCore, +use crate::pus::mode::ModeServiceWrapper; +use derive_new::new; +use satrs::{ + pus::{EcssTcInMemConverter, EcssTmSenderCore}, + spacepackets::time::{cds, TimeWriter}, }; use super::{ - action::Pus8Wrapper, event::Pus5Wrapper, hk::Pus3Wrapper, scheduler::Pus11Wrapper, - test::Service17CustomWrapper, + action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper, + scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, TargetedPusService, }; -pub struct PusStack< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, -> { - event_srv: Pus5Wrapper, - hk_srv: Pus3Wrapper, - action_srv: Pus8Wrapper, - schedule_srv: Pus11Wrapper, - test_srv: Service17CustomWrapper, +#[derive(new)] +pub struct PusStack { + test_srv: TestCustomServiceWrapper, + hk_srv_wrapper: HkServiceWrapper, + event_srv: EventServiceWrapper, + action_srv_wrapper: ActionServiceWrapper, + schedule_srv: SchedulingServiceWrapper, + mode_srv: ModeServiceWrapper, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > PusStack +impl + PusStack { - pub fn new( - hk_srv: Pus3Wrapper, - event_srv: Pus5Wrapper, - action_srv: Pus8Wrapper, - schedule_srv: Pus11Wrapper, - test_srv: Service17CustomWrapper< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - >, - ) -> Self { - Self { - event_srv, - action_srv, - schedule_srv, - test_srv, - hk_srv, - } - } - pub fn periodic_operation(&mut self) { + // Release all telecommands which reached their release time before calling the service + // handlers. self.schedule_srv.release_tcs(); + let time_stamp = cds::CdsTime::now_with_u16_days() + .expect("time stamp generation error") + .to_vec() + .unwrap(); loop { - let mut all_queues_empty = true; - let mut is_srv_finished = |srv_handler_finished: bool| { - if !srv_handler_finished { - all_queues_empty = false; - } - }; - is_srv_finished(self.test_srv.handle_next_packet()); - is_srv_finished(self.schedule_srv.handle_next_packet()); - is_srv_finished(self.event_srv.handle_next_packet()); - is_srv_finished(self.action_srv.handle_next_packet()); - is_srv_finished(self.hk_srv.handle_next_packet()); - if all_queues_empty { + let mut nothing_to_do = true; + let mut is_srv_finished = + |tc_handling_done: bool, reply_handling_done: Option| { + if !tc_handling_done + || (reply_handling_done.is_some() && !reply_handling_done.unwrap()) + { + nothing_to_do = false; + } + }; + is_srv_finished(self.test_srv.poll_and_handle_next_packet(&time_stamp), None); + is_srv_finished(self.schedule_srv.poll_and_handle_next_tc(&time_stamp), None); + is_srv_finished(self.event_srv.poll_and_handle_next_tc(&time_stamp), None); + is_srv_finished( + self.action_srv_wrapper.poll_and_handle_next_tc(&time_stamp), + Some( + self.action_srv_wrapper + .poll_and_handle_next_reply(&time_stamp), + ), + ); + is_srv_finished( + self.hk_srv_wrapper.poll_and_handle_next_tc(&time_stamp), + Some(self.hk_srv_wrapper.poll_and_handle_next_reply(&time_stamp)), + ); + is_srv_finished( + self.mode_srv.poll_and_handle_next_tc(&time_stamp), + Some(self.mode_srv.poll_and_handle_next_reply(&time_stamp)), + ); + if nothing_to_do { + // Timeout checking is only done once. + self.action_srv_wrapper.check_for_request_timeouts(); + self.hk_srv_wrapper.check_for_request_timeouts(); + self.mode_srv.check_for_request_timeouts(); break; } } diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 4b0164b..a42a250 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -1,118 +1,74 @@ +use crate::pus::create_verification_reporter; use log::{info, warn}; use satrs::params::Params; -use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; +use satrs::pool::SharedStaticMemoryPool; use satrs::pus::test::PusService17TestHandler; -use satrs::pus::verification::{FailParams, VerificationReportingProvider}; -use satrs::pus::verification::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, -}; +use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider}; use satrs::pus::{ - EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTcReceiverCore, - EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper, - TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithId, + EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTmSenderCore, MpscTcReceiver, + MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusServiceHelper, + PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::PusPacket; use satrs::spacepackets::time::cds::CdsTime; use satrs::spacepackets::time::TimeWriter; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::ChannelId; use satrs::{events::EventU32, pus::EcssTcInSharedStoreConverter}; -use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID, TEST_EVENT}; -use std::sync::mpsc::{self, Sender}; +use satrs_example::config::components::PUS_TEST_SERVICE; +use satrs_example::config::{tmtc_err, TEST_EVENT}; +use std::sync::mpsc; pub fn create_test_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_pool: SharedStaticMemoryPool, event_sender: mpsc::Sender<(EventU32, Option)>, pus_test_rx: mpsc::Receiver, -) -> Service17CustomWrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let test_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusTest as ChannelId, - "PUS_17_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let test_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusTest as ChannelId, - "PUS_17_TC_RECV", - pus_test_rx, - ); +) -> TestCustomServiceWrapper { let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( - test_srv_receiver, - test_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_TEST_SERVICE.raw(), + pus_test_rx, + tm_sender, + create_verification_reporter(PUS_TEST_SERVICE.apid), EcssTcInSharedStoreConverter::new(tc_pool, 2048), )); - Service17CustomWrapper { - pus17_handler, + TestCustomServiceWrapper { + handler: pus17_handler, test_srv_event_sender: event_sender, } } pub fn create_test_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, + tm_funnel_tx: mpsc::Sender, event_sender: mpsc::Sender<(EventU32, Option)>, pus_test_rx: mpsc::Receiver, -) -> Service17CustomWrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let test_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusTest as ChannelId, - "PUS_17_TM_SENDER", - tm_funnel_tx.clone(), - ); - let test_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusTest as ChannelId, - "PUS_17_TC_RECV", - pus_test_rx, - ); +) -> TestCustomServiceWrapper { let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( - test_srv_receiver, - test_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_TEST_SERVICE.raw(), + pus_test_rx, + tm_funnel_tx, + create_verification_reporter(PUS_TEST_SERVICE.apid), EcssTcInVecConverter::default(), )); - Service17CustomWrapper { - pus17_handler, + TestCustomServiceWrapper { + handler: pus17_handler, test_srv_event_sender: event_sender, } } -pub struct Service17CustomWrapper< - TcReceiver: EcssTcReceiverCore, +pub struct TestCustomServiceWrapper< TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, > { - pub pus17_handler: - PusService17TestHandler, - pub test_srv_event_sender: Sender<(EventU32, Option)>, + pub handler: + PusService17TestHandler, + pub test_srv_event_sender: mpsc::Sender<(EventU32, Option)>, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Service17CustomWrapper +impl + TestCustomServiceWrapper { - pub fn handle_next_packet(&mut self) -> bool { - let res = self.pus17_handler.handle_one_tc(); + pub fn poll_and_handle_next_packet(&mut self, time_stamp: &[u8]) -> bool { + let res = self.handler.poll_and_handle_next_tc(time_stamp); if res.is_err() { warn!("PUS17 handler failed with error {:?}", res.unwrap_err()); return true; @@ -133,7 +89,7 @@ impl< } PusPacketHandlerResult::CustomSubservice(subservice, token) => { let (tc, _) = PusTcReader::new( - self.pus17_handler + self.handler .service_helper .tc_in_mem_converter .tc_slice_raw(), @@ -148,25 +104,34 @@ impl< .send((TEST_EVENT.into(), None)) .expect("Sending test event failed"); let start_token = self - .pus17_handler + .handler .service_helper - .common - .verification_handler - .start_success(token, &stamp_buf) + .verif_reporter() + .start_success( + self.handler.service_helper.common.id, + self.handler.service_helper.tm_sender(), + token, + &stamp_buf, + ) .expect("Error sending start success"); - self.pus17_handler + self.handler .service_helper - .common - .verification_handler - .completion_success(start_token, &stamp_buf) + .verif_reporter() + .completion_success( + self.handler.service_helper.id(), + self.handler.service_helper.tm_sender(), + start_token, + &stamp_buf, + ) .expect("Error sending completion success"); } else { let fail_data = [tc.subservice()]; - self.pus17_handler + self.handler .service_helper - .common - .verification_handler + .verif_reporter() .start_failure( + self.handler.service_helper.id(), + self.handler.service_helper.tm_sender(), token, FailParams::new( &stamp_buf, diff --git a/satrs-example/src/requests.rs b/satrs-example/src/requests.rs index 6703d93..1fa6f5d 100644 --- a/satrs-example/src/requests.rs +++ b/satrs-example/src/requests.rs @@ -1,94 +1,151 @@ use std::collections::HashMap; use std::sync::mpsc; -use derive_new::new; +use log::warn; use satrs::action::ActionRequest; use satrs::hk::HkRequest; use satrs::mode::ModeRequest; -use satrs::pus::action::PusActionRequestRouter; -use satrs::pus::hk::PusHkRequestRouter; -use satrs::pus::verification::{TcStateAccepted, VerificationToken}; -use satrs::pus::GenericRoutingError; +use satrs::pus::verification::{ + FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken, +}; +use satrs::pus::{ActiveRequestProvider, EcssTmSenderCore, GenericRoutingError, PusRequestRouter}; use satrs::queue::GenericSendError; -use satrs::TargetId; +use satrs::request::{GenericMessage, MessageMetadata}; +use satrs::spacepackets::ecss::tc::PusTcReader; +use satrs::spacepackets::ecss::PusPacket; +use satrs::ComponentId; +use satrs_example::config::components::PUS_ROUTING_SERVICE; +use satrs_example::config::tmtc_err; -#[allow(dead_code)] -#[derive(Clone, Eq, PartialEq, Debug)] +#[derive(Clone, Debug)] #[non_exhaustive] -pub enum Request { +pub enum CompositeRequest { Hk(HkRequest), - Mode(ModeRequest), Action(ActionRequest), } -#[derive(Clone, Eq, PartialEq, Debug, new)] -pub struct TargetedRequest { - pub(crate) target_id: TargetId, - pub(crate) request: Request, +#[derive(Clone)] +pub struct GenericRequestRouter { + pub id: ComponentId, + // All messages which do not have a dedicated queue. + pub composite_router_map: HashMap>>, + pub mode_router_map: HashMap>>, } -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct RequestWithToken { - pub(crate) targeted_request: TargetedRequest, - pub(crate) token: VerificationToken, -} - -impl RequestWithToken { - pub fn new( - target_id: TargetId, - request: Request, - token: VerificationToken, - ) -> Self { +impl Default for GenericRequestRouter { + fn default() -> Self { Self { - targeted_request: TargetedRequest::new(target_id, request), - token, + id: PUS_ROUTING_SERVICE.raw(), + composite_router_map: Default::default(), + mode_router_map: Default::default(), } } } - -#[derive(Default, Clone)] -pub struct GenericRequestRouter(pub HashMap>); - -impl PusHkRequestRouter for GenericRequestRouter { +impl GenericRequestRouter { + pub(crate) fn handle_error_generic( + &self, + active_request: &impl ActiveRequestProvider, + tc: &PusTcReader, + error: GenericRoutingError, + tm_sender: &(impl EcssTmSenderCore + ?Sized), + verif_reporter: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) { + warn!( + "Routing request for service {} failed: {error:?}", + tc.service() + ); + let accepted_token: VerificationToken = active_request + .token() + .try_into() + .expect("token is not in accepted state"); + match error { + GenericRoutingError::UnknownTargetId(id) => { + let mut fail_data: [u8; 8] = [0; 8]; + fail_data.copy_from_slice(&id.to_be_bytes()); + verif_reporter + .completion_failure( + self.id, + tm_sender, + accepted_token, + FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data), + ) + .expect("Sending start failure failed"); + } + GenericRoutingError::Send(_) => { + let mut fail_data: [u8; 8] = [0; 8]; + fail_data.copy_from_slice(&active_request.target_id().to_be_bytes()); + verif_reporter + .completion_failure( + self.id, + tm_sender, + accepted_token, + FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data), + ) + .expect("Sending start failure failed"); + } + } + } +} +impl PusRequestRouter for GenericRequestRouter { type Error = GenericRoutingError; fn route( &self, - target_id: TargetId, + requestor_info: MessageMetadata, + target_id: ComponentId, hk_request: HkRequest, - token: VerificationToken, ) -> Result<(), Self::Error> { - if let Some(sender) = self.0.get(&target_id) { + if let Some(sender) = self.composite_router_map.get(&target_id) { sender - .send(RequestWithToken::new( - target_id, - Request::Hk(hk_request), - token, + .send(GenericMessage::new( + requestor_info, + CompositeRequest::Hk(hk_request), )) - .map_err(|_| GenericRoutingError::SendError(GenericSendError::RxDisconnected))?; + .map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?; + return Ok(()); } - Ok(()) + Err(GenericRoutingError::UnknownTargetId(target_id)) } } -impl PusActionRequestRouter for GenericRequestRouter { +impl PusRequestRouter for GenericRequestRouter { type Error = GenericRoutingError; fn route( &self, - target_id: TargetId, + requestor_info: MessageMetadata, + target_id: ComponentId, action_request: ActionRequest, - token: VerificationToken, ) -> Result<(), Self::Error> { - if let Some(sender) = self.0.get(&target_id) { + if let Some(sender) = self.composite_router_map.get(&target_id) { sender - .send(RequestWithToken::new( - target_id, - Request::Action(action_request), - token, + .send(GenericMessage::new( + requestor_info, + CompositeRequest::Action(action_request), )) - .map_err(|_| GenericRoutingError::SendError(GenericSendError::RxDisconnected))?; + .map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?; + return Ok(()); } - Ok(()) + Err(GenericRoutingError::UnknownTargetId(target_id)) + } +} + +impl PusRequestRouter for GenericRequestRouter { + type Error = GenericRoutingError; + + fn route( + &self, + requestor_info: MessageMetadata, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), Self::Error> { + if let Some(sender) = self.mode_router_map.get(&target_id) { + sender + .send(GenericMessage::new(requestor_info, request)) + .map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?; + return Ok(()); + } + Err(GenericRoutingError::UnknownTargetId(target_id)) } } diff --git a/satrs-example/src/tcp.rs b/satrs-example/src/tcp.rs index 014f300..561a030 100644 --- a/satrs-example/src/tcp.rs +++ b/satrs-example/src/tcp.rs @@ -10,11 +10,16 @@ use satrs::{ spacepackets::PacketId, tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, TmPacketSourceCore}, }; -use satrs_example::config::PUS_APID; +use satrs_example::config::components; use crate::ccsds::CcsdsReceiver; -pub const PACKET_ID_LOOKUP: &[PacketId] = &[PacketId::const_tc(true, PUS_APID)]; +pub const PACKET_ID_LOOKUP: &[PacketId] = &[ + PacketId::const_tc(true, components::Apid::GenericPus as u16), + PacketId::const_tc(true, components::Apid::EventTm as u16), + PacketId::const_tc(true, components::Apid::Acs as u16), + PacketId::const_tc(true, components::Apid::Sched as u16), +]; #[derive(Default, Clone)] pub struct SyncTcpTmSource { diff --git a/satrs-example/src/tm_funnel.rs b/satrs-example/src/tm_funnel.rs index 8b6285f..61cddd1 100644 --- a/satrs-example/src/tm_funnel.rs +++ b/satrs-example/src/tm_funnel.rs @@ -4,8 +4,9 @@ use std::{ }; use log::info; +use satrs::pus::{PusTmAsVec, PusTmInPool}; use satrs::{ - pool::{PoolProvider, StoreAddr}, + pool::PoolProvider, seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}, spacepackets::{ ecss::{tm::PusTmZeroCopyWriter, PusPacket}, @@ -77,16 +78,16 @@ impl TmFunnelCommon { pub struct TmFunnelStatic { common: TmFunnelCommon, shared_tm_store: SharedTmPool, - tm_funnel_rx: mpsc::Receiver, - tm_server_tx: mpsc::SyncSender, + tm_funnel_rx: mpsc::Receiver, + tm_server_tx: mpsc::SyncSender, } impl TmFunnelStatic { pub fn new( shared_tm_store: SharedTmPool, sync_tm_tcp_source: SyncTcpTmSource, - tm_funnel_rx: mpsc::Receiver, - tm_server_tx: mpsc::SyncSender, + tm_funnel_rx: mpsc::Receiver, + tm_server_tx: mpsc::SyncSender, ) -> Self { Self { common: TmFunnelCommon::new(sync_tm_tcp_source), @@ -97,14 +98,14 @@ impl TmFunnelStatic { } pub fn operation(&mut self) { - if let Ok(addr) = self.tm_funnel_rx.recv() { + if let Ok(pus_tm_in_pool) = self.tm_funnel_rx.recv() { // Read the TM, set sequence counter and message counter, and finally update // the CRC. let shared_pool = self.shared_tm_store.clone_backing_pool(); let mut pool_guard = shared_pool.write().expect("Locking TM pool failed"); let mut tm_copy = Vec::new(); pool_guard - .modify(&addr, |buf| { + .modify(&pus_tm_in_pool.store_addr, |buf| { let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN) .expect("Creating TM zero copy writer failed"); self.common.apply_packet_processing(zero_copy_writer); @@ -112,7 +113,7 @@ impl TmFunnelStatic { }) .expect("Reading TM from pool failed"); self.tm_server_tx - .send(addr) + .send(pus_tm_in_pool) .expect("Sending TM to server failed"); // We could also do this step in the update closure, but I'd rather avoid this, could // lead to nested locking. @@ -123,15 +124,15 @@ impl TmFunnelStatic { pub struct TmFunnelDynamic { common: TmFunnelCommon, - tm_funnel_rx: mpsc::Receiver>, - tm_server_tx: mpsc::Sender>, + tm_funnel_rx: mpsc::Receiver, + tm_server_tx: mpsc::Sender, } impl TmFunnelDynamic { pub fn new( sync_tm_tcp_source: SyncTcpTmSource, - tm_funnel_rx: mpsc::Receiver>, - tm_server_tx: mpsc::Sender>, + tm_funnel_rx: mpsc::Receiver, + tm_server_tx: mpsc::Sender, ) -> Self { Self { common: TmFunnelCommon::new(sync_tm_tcp_source), @@ -144,13 +145,13 @@ impl TmFunnelDynamic { if let Ok(mut tm) = self.tm_funnel_rx.recv() { // Read the TM, set sequence counter and message counter, and finally update // the CRC. - let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm, MIN_CDS_FIELD_LEN) + let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN) .expect("Creating TM zero copy writer failed"); self.common.apply_packet_processing(zero_copy_writer); + self.common.sync_tm_tcp_source.add_tm(&tm.packet); self.tm_server_tx - .send(tm.clone()) + .send(tm) .expect("Sending TM to server failed"); - self.common.sync_tm_tcp_source.add_tm(&tm); } } } diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 0a43504..43d5889 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -1,8 +1,7 @@ use log::warn; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, +use satrs::pus::{ + EcssTcAndToken, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, ReceivesEcssPusTc, }; -use satrs::pus::{EcssTcAndToken, ReceivesEcssPusTc}; use satrs::spacepackets::SpHeader; use std::sync::mpsc::{self, Receiver, SendError, Sender, SyncSender, TryRecvError}; use thiserror::Error; @@ -100,14 +99,14 @@ pub struct TcSourceTaskStatic { shared_tc_pool: SharedTcPool, tc_receiver: Receiver, tc_buf: [u8; 4096], - pus_receiver: PusReceiver, + pus_receiver: PusReceiver, } impl TcSourceTaskStatic { pub fn new( shared_tc_pool: SharedTcPool, tc_receiver: Receiver, - pus_receiver: PusReceiver, + pus_receiver: PusReceiver, ) -> Self { Self { shared_tc_pool, @@ -164,13 +163,13 @@ impl TcSourceTaskStatic { // TC source components where the heap is the backing memory of the received telecommands. pub struct TcSourceTaskDynamic { pub tc_receiver: Receiver>, - pus_receiver: PusReceiver, + pus_receiver: PusReceiver, } impl TcSourceTaskDynamic { pub fn new( tc_receiver: Receiver>, - pus_receiver: PusReceiver, + pus_receiver: PusReceiver, ) -> Self { Self { tc_receiver, diff --git a/satrs-example/src/udp.rs b/satrs-example/src/udp.rs index b6d1f6b..c2f26fa 100644 --- a/satrs-example/src/udp.rs +++ b/satrs-example/src/udp.rs @@ -1,12 +1,11 @@ -use std::{ - net::{SocketAddr, UdpSocket}, - sync::mpsc::Receiver, -}; +use std::net::{SocketAddr, UdpSocket}; +use std::sync::mpsc; use log::{info, warn}; +use satrs::pus::{PusTmAsVec, PusTmInPool}; use satrs::{ hal::std::udp_server::{ReceiveResult, UdpTcServer}, - pool::{PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr}, + pool::{PoolProviderWithGuards, SharedStaticMemoryPool}, tmtc::CcsdsError, }; @@ -15,20 +14,20 @@ pub trait UdpTmHandler { } pub struct StaticUdpTmHandler { - pub tm_rx: Receiver, + pub tm_rx: mpsc::Receiver, pub tm_store: SharedStaticMemoryPool, } impl UdpTmHandler for StaticUdpTmHandler { fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, &recv_addr: &SocketAddr) { - while let Ok(addr) = self.tm_rx.try_recv() { + while let Ok(pus_tm_in_pool) = self.tm_rx.try_recv() { let store_lock = self.tm_store.write(); if store_lock.is_err() { warn!("Locking TM store failed"); continue; } let mut store_lock = store_lock.unwrap(); - let pg = store_lock.read_with_guard(addr); + let pg = store_lock.read_with_guard(pus_tm_in_pool.store_addr); let read_res = pg.read_as_vec(); if read_res.is_err() { warn!("Error reading TM pool data"); @@ -44,20 +43,20 @@ impl UdpTmHandler for StaticUdpTmHandler { } pub struct DynamicUdpTmHandler { - pub tm_rx: Receiver>, + pub tm_rx: mpsc::Receiver, } impl UdpTmHandler for DynamicUdpTmHandler { fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr) { while let Ok(tm) = self.tm_rx.try_recv() { - if tm.len() > 9 { - let service = tm[7]; - let subservice = tm[8]; + if tm.packet.len() > 9 { + let service = tm.packet[7]; + let subservice = tm.packet[8]; info!("Sending PUS TM[{service},{subservice}]") } else { info!("Sending PUS TM"); } - let result = socket.send_to(&tm, recv_addr); + let result = socket.send_to(&tm.packet, recv_addr); if let Err(e) = result { warn!("Sending TM with UDP socket failed: {e}") } @@ -120,7 +119,7 @@ mod tests { }, tmtc::ReceivesTcCore, }; - use satrs_example::config::{OBSW_SERVER_ADDR, PUS_APID}; + use satrs_example::config::{components, OBSW_SERVER_ADDR}; use super::*; @@ -178,7 +177,7 @@ mod tests { udp_tc_server, tm_handler, }; - let mut sph = SpHeader::tc_unseg(PUS_APID, 0, 0).unwrap(); + let mut sph = SpHeader::tc_unseg(components::Apid::GenericPus as u16, 0, 0).unwrap(); let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true) .to_vec() .unwrap(); diff --git a/satrs-mib/Cargo.toml b/satrs-mib/Cargo.toml index 9024b86..e97971d 100644 --- a/satrs-mib/Cargo.toml +++ b/satrs-mib/Cargo.toml @@ -24,7 +24,7 @@ optional = true [dependencies.satrs-shared] path = "../satrs-shared" -version = "0.1.2" +version = "0.1.3" features = ["serde"] [dependencies.satrs-mib-codegen] diff --git a/satrs-mib/codegen/Cargo.toml b/satrs-mib/codegen/Cargo.toml index a25358d..43ba785 100644 --- a/satrs-mib/codegen/Cargo.toml +++ b/satrs-mib/codegen/Cargo.toml @@ -28,7 +28,7 @@ features = ["full"] trybuild = { version = "1", features = ["diff"] } [dev-dependencies.satrs-shared] -version = "0.1.2" +version = "0.1.3" path = "../../satrs-shared" [dev-dependencies.satrs-mib] diff --git a/satrs-shared/Cargo.toml b/satrs-shared/Cargo.toml index e706d6a..7cb4caf 100644 --- a/satrs-shared/Cargo.toml +++ b/satrs-shared/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "satrs-shared" description = "Components shared by multiple sat-rs crates" -version = "0.1.2" +version = "0.1.3" edition = "2021" authors = ["Robin Mueller "] homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/" diff --git a/satrs/CHANGELOG.md b/satrs/CHANGELOG.md index b0bc493..5319992 100644 --- a/satrs/CHANGELOG.md +++ b/satrs/CHANGELOG.md @@ -18,11 +18,16 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Refactored ECSS TM sender abstractions to be generic over different message queue backends. - Refactored Verification Reporter abstractions and implementation to be generic over the sender instead of using trait objects. +- Renamed `WritableToBeBytes::raw_len` to `WritableToBeBytes::written_len` for consistency. - `PusServiceProvider` renamed to `PusServiceDistributor` to make the purpose of the object more clear - `PusServiceProvider::handle_pus_tc_packet` renamed to `PusServiceDistributor::distribute_packet`. - `PusServiceDistibutor` and `CcsdsDistributor` now use generics instead of trait objects. This makes accessing the concrete trait implementations more easy as well. +- Major overhaul of the PUS handling module. +- Replace `TargetId` by `ComponentId`. +- Replace most usages of `ChannelId` by `ComponentId`. A dedicated channel ID has limited usage + due to the nature of typed channels in Rust. ## Fixed diff --git a/satrs/Cargo.toml b/satrs/Cargo.toml index ad11272..4237bbb 100644 --- a/satrs/Cargo.toml +++ b/satrs/Cargo.toml @@ -19,7 +19,7 @@ smallvec = "1" crc = "3" [dependencies.satrs-shared] -version = "0.1.2" +version = "0.1.3" path = "../satrs-shared" [dependencies.num_enum] @@ -117,6 +117,7 @@ alloc = [ serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"] crossbeam = ["crossbeam-channel"] heapless = ["dep:heapless"] +test_util = [] doc-images = [] [package.metadata.docs.rs] diff --git a/satrs/src/action.rs b/satrs/src/action.rs index 7caeaa6..d2b15d8 100644 --- a/satrs/src/action.rs +++ b/satrs/src/action.rs @@ -1,63 +1,77 @@ -use crate::{pool::StoreAddr, TargetId}; +use crate::{params::Params, pool::StoreAddr}; + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub use alloc_mod::*; pub type ActionId = u32; +#[derive(Debug, Eq, PartialEq, Clone)] +pub struct ActionRequest { + pub action_id: ActionId, + pub variant: ActionRequestVariant, +} + +impl ActionRequest { + pub fn new(action_id: ActionId, variant: ActionRequestVariant) -> Self { + Self { action_id, variant } + } +} + #[non_exhaustive] #[derive(Clone, Eq, PartialEq, Debug)] -pub enum ActionRequest { - UnsignedIdAndStoreData { - action_id: ActionId, - data_addr: StoreAddr, - }, +pub enum ActionRequestVariant { + NoData, + StoreData(StoreAddr), #[cfg(feature = "alloc")] - UnsignedIdAndVecData { - action_id: ActionId, - data: alloc::vec::Vec, - }, - #[cfg(feature = "alloc")] - StringIdAndVecData { - action_id: alloc::string::String, - data: alloc::vec::Vec, - }, - #[cfg(feature = "alloc")] - StringIdAndStoreData { - action_id: alloc::string::String, - data: StoreAddr, - }, + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + VecData(alloc::vec::Vec), } -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TargetedActionRequest { - target: TargetId, - action_request: ActionRequest, -} - -impl TargetedActionRequest { - pub fn new(target: TargetId, action_request: ActionRequest) -> Self { - Self { - target, - action_request, - } - } +#[derive(Debug, PartialEq, Clone)] +pub struct ActionReply { + pub action_id: ActionId, + pub variant: ActionReplyVariant, } /// A reply to an action request. #[non_exhaustive] -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum ActionReply { - CompletionFailed(ActionId), - StepFailed { - id: ActionId, - step: u32, - }, - Completed(ActionId), - #[cfg(feature = "alloc")] - CompletedStringId(alloc::string::String), - #[cfg(feature = "alloc")] - CompletionFailedStringId(alloc::string::String), - #[cfg(feature = "alloc")] - StepFailedStringId { - id: alloc::string::String, - step: u32, - }, +#[derive(Clone, Debug, PartialEq)] +pub enum ActionReplyVariant { + CompletionFailed(Params), + StepFailed { step: u32, reason: Params }, + Completed, } + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub mod alloc_mod { + use super::*; + + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + #[derive(Debug, Eq, PartialEq, Clone)] + pub struct ActionRequestStringId { + pub action_id: alloc::string::String, + pub variant: ActionRequestVariant, + } + + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + impl ActionRequestStringId { + pub fn new(action_id: alloc::string::String, variant: ActionRequestVariant) -> Self { + Self { action_id, variant } + } + } + + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + #[derive(Debug, PartialEq, Clone)] + pub struct ActionReplyStringId { + pub action_id: alloc::string::String, + pub variant: ActionReplyVariant, + } +} + +#[cfg(test)] +mod tests {} diff --git a/satrs/src/cfdp/dest.rs b/satrs/src/cfdp/dest.rs index b42df3a..4a87ce6 100644 --- a/satrs/src/cfdp/dest.rs +++ b/satrs/src/cfdp/dest.rs @@ -5,7 +5,7 @@ use std::path::{Path, PathBuf}; use super::{ filestore::{FilestoreError, VirtualFilestore}, user::{CfdpUser, FileSegmentRecvdParams, MetadataReceivedParams}, - CheckTimer, CheckTimerCreator, EntityType, LocalEntityConfig, PacketInfo, PacketTarget, + CheckTimerCreator, CountdownProvider, EntityType, LocalEntityConfig, PacketInfo, PacketTarget, RemoteEntityConfig, RemoteEntityConfigProvider, State, TimerContext, TransactionId, TransactionStep, }; @@ -54,7 +54,7 @@ struct TransferState { completion_disposition: CompletionDisposition, checksum: u32, current_check_count: u32, - current_check_timer: Option>, + current_check_timer: Option>, } impl Default for TransferState { @@ -799,9 +799,9 @@ mod tests { }; use crate::cfdp::{ - filestore::NativeFilestore, user::OwnedMetadataRecvdParams, CheckTimer, CheckTimerCreator, - DefaultFaultHandler, IndicationConfig, RemoteEntityConfig, StdRemoteEntityConfigProvider, - UserFaultHandler, CRC_32, + filestore::NativeFilestore, user::OwnedMetadataRecvdParams, CheckTimerCreator, + CountdownProvider, DefaultFaultHandler, IndicationConfig, RemoteEntityConfig, + StdRemoteEntityConfigProvider, UserFaultHandler, CRC_32, }; use super::*; @@ -1057,7 +1057,7 @@ mod tests { expired: Arc, } - impl CheckTimer for TestCheckTimer { + impl CountdownProvider for TestCheckTimer { fn has_expired(&self) -> bool { self.expired.load(core::sync::atomic::Ordering::Relaxed) } @@ -1088,7 +1088,10 @@ mod tests { } impl CheckTimerCreator for TestCheckTimerCreator { - fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box { + fn get_check_timer_provider( + &self, + timer_context: TimerContext, + ) -> Box { match timer_context { TimerContext::CheckLimit { .. } => { Box::new(TestCheckTimer::new(self.check_limit_expired_flag.clone())) diff --git a/satrs/src/cfdp/mod.rs b/satrs/src/cfdp/mod.rs index 8c88fda..c2f6d01 100644 --- a/satrs/src/cfdp/mod.rs +++ b/satrs/src/cfdp/mod.rs @@ -17,6 +17,8 @@ use alloc::boxed::Box; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use crate::time::CountdownProvider; + #[cfg(feature = "std")] pub mod dest; #[cfg(feature = "alloc")] @@ -45,7 +47,15 @@ pub enum TimerContext { }, } -/// Generic abstraction for a check timer which is used by 3 mechanisms of the CFDP protocol. +/// A generic trait which allows CFDP entities to create check timers which are required to +/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2 +/// and 4.6.3.3. +/// +/// This trait also allows the creation of different check timers depending on context and purpose +/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or +/// other factors. +/// +/// The countdown timer is used by 3 mechanisms of the CFDP protocol. /// /// ## 1. Check limit handling /// @@ -74,22 +84,9 @@ pub enum TimerContext { /// The timer will be used to perform the Positive Acknowledgement Procedures as specified in /// 4.7. 1of the CFDP standard. The expiration period will be provided by the Positive ACK timer /// interval of the remote entity configuration. -pub trait CheckTimer: Debug { - fn has_expired(&self) -> bool; - fn reset(&mut self); -} - -/// A generic trait which allows CFDP entities to create check timers which are required to -/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2 -/// and 4.6.3.3. The [CheckTimer] documentation provides more information about the purpose of the -/// check timer in the context of CFDP. -/// -/// This trait also allows the creation of different check timers depending on context and purpose -/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or -/// other factors. #[cfg(feature = "alloc")] pub trait CheckTimerCreator { - fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box; + fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box; } /// Simple implementation of the [CheckTimerCreator] trait assuming a standard runtime. @@ -112,7 +109,7 @@ impl StdCheckTimer { } #[cfg(feature = "std")] -impl CheckTimer for StdCheckTimer { +impl CountdownProvider for StdCheckTimer { fn has_expired(&self) -> bool { let elapsed_time = self.start_time.elapsed(); if elapsed_time.as_secs() > self.expiry_time_seconds { diff --git a/satrs/src/event_man.rs b/satrs/src/event_man.rs index 304f9a1..2b5a37b 100644 --- a/satrs/src/event_man.rs +++ b/satrs/src/event_man.rs @@ -11,7 +11,7 @@ //! about events first: //! //! The event manager has a listener table abstracted by the [ListenerMapProvider], which maps -//! listener groups identified by [ListenerKey]s to a [sender ID][ChannelId]. +//! listener groups identified by [ListenerKey]s to a [sender ID][ComponentId]. //! It also contains a sender table abstracted by the [SenderMapProvider] which maps these sender //! IDs to concrete [EventSendProvider]s. A simple approach would be to use one send event provider //! for each OBSW thread and then subscribe for all interesting events for a particular thread @@ -50,7 +50,7 @@ use crate::queue::GenericSendError; use core::marker::PhantomData; use core::slice::Iter; -use crate::ChannelId; +use crate::ComponentId; #[cfg(feature = "alloc")] pub use alloc_mod::*; @@ -74,7 +74,7 @@ pub type EventU32WithAuxData = EventWithAuxData; pub type EventU16WithAuxData = EventWithAuxData; pub trait EventSendProvider { - fn channel_id(&self) -> ChannelId; + fn target_id(&self) -> ComponentId; fn send_no_data(&self, event: EV) -> Result<(), GenericSendError> { self.send(event, None) @@ -95,46 +95,46 @@ pub trait ListenerMapProvider { #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] fn get_listeners(&self) -> alloc::vec::Vec; fn contains_listener(&self, key: &ListenerKey) -> bool; - fn get_listener_ids(&self, key: &ListenerKey) -> Option>; - fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool; + fn get_listener_ids(&self, key: &ListenerKey) -> Option>; + fn add_listener(&mut self, key: ListenerKey, sender_id: ComponentId) -> bool; fn remove_duplicates(&mut self, key: &ListenerKey); } pub trait SenderMapProvider< - SP: EventSendProvider, - EV: GenericEvent = EventU32, - AUX = Params, + EventSender: EventSendProvider, + Ev: GenericEvent = EventU32, + Data = Params, > { - fn contains_send_event_provider(&self, id: &ChannelId) -> bool; + fn contains_send_event_provider(&self, target_id: &ComponentId) -> bool; - fn get_send_event_provider(&self, id: &ChannelId) -> Option<&SP>; - fn add_send_event_provider(&mut self, send_provider: SP) -> bool; + fn get_send_event_provider(&self, target_id: &ComponentId) -> Option<&EventSender>; + fn add_send_event_provider(&mut self, send_provider: EventSender) -> bool; } /// Generic event manager implementation. /// /// # Generics /// -/// * `ERP`: [EventReceiveProvider] used to receive all events. -/// * `SMP`: [SenderMapProvider] which maps channel IDs to send providers. -/// * `LTR`: [ListenerMapProvider] which maps listener keys to channel IDs. -/// * `SP`: [EventSendProvider] contained within the sender map which sends the events. -/// * `EV`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32] +/// * `EventReceiver`: [EventReceiveProvider] used to receive all events. +/// * `SenderMap`: [SenderMapProvider] which maps channel IDs to send providers. +/// * `ListenerMap`: [ListenerMapProvider] which maps listener keys to channel IDs. +/// * `EventSender`: [EventSendProvider] contained within the sender map which sends the events. +/// * `Ev`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32] /// and [EventU16] are supported. -/// * `AUX`: Auxiliary data which is sent with the event to provide optional context information +/// * `Data`: Auxiliary data which is sent with the event to provide optional context information pub struct EventManager< - ERP: EventReceiveProvider, - SMP: SenderMapProvider, - LTR: ListenerMapProvider, - SP: EventSendProvider, - EV: GenericEvent = EventU32, - AUX = Params, + EventReceiver: EventReceiveProvider, + SenderMap: SenderMapProvider, + ListenerMap: ListenerMapProvider, + EventSender: EventSendProvider, + Ev: GenericEvent = EventU32, + Data = Params, > { - event_receiver: ERP, - sender_map: SMP, - listener_map: LTR, - phantom: core::marker::PhantomData<(SP, EV, AUX)>, + event_receiver: EventReceiver, + sender_map: SenderMap, + listener_map: ListenerMap, + phantom: core::marker::PhantomData<(EventSender, Ev, Data)>, } #[derive(Debug)] @@ -153,35 +153,35 @@ pub enum EventRoutingResult { pub enum EventRoutingError { Send(GenericSendError), NoSendersForKey(ListenerKey), - NoSenderForId(ChannelId), + NoSenderForId(ComponentId), } #[derive(Debug)] -pub struct EventRoutingErrorsWithResult { - pub result: EventRoutingResult, +pub struct EventRoutingErrorsWithResult { + pub result: EventRoutingResult, pub errors: [Option; 3], } impl< - ER: EventReceiveProvider, - S: SenderMapProvider, - L: ListenerMapProvider, - SP: EventSendProvider, - EV: GenericEvent + Copy, - AUX: Clone, - > EventManager + EventReceiver: EventReceiveProvider, + SenderMap: SenderMapProvider, + ListenerMap: ListenerMapProvider, + EventSender: EventSendProvider, + Ev: GenericEvent + Copy, + Data: Clone, + > EventManager { pub fn remove_duplicates(&mut self, key: &ListenerKey) { self.listener_map.remove_duplicates(key) } /// Subscribe for a unique event. - pub fn subscribe_single(&mut self, event: &EV, sender_id: ChannelId) { + pub fn subscribe_single(&mut self, event: &Ev, sender_id: ComponentId) { self.update_listeners(ListenerKey::Single(event.raw_as_largest_type()), sender_id); } /// Subscribe for an event group. - pub fn subscribe_group(&mut self, group_id: LargestGroupIdRaw, sender_id: ChannelId) { + pub fn subscribe_group(&mut self, group_id: LargestGroupIdRaw, sender_id: ComponentId) { self.update_listeners(ListenerKey::Group(group_id), sender_id); } @@ -189,7 +189,7 @@ impl< /// /// For example, this can be useful for a handler component which sends every event as /// a telemetry packet. - pub fn subscribe_all(&mut self, sender_id: ChannelId) { + pub fn subscribe_all(&mut self, sender_id: ComponentId) { self.update_listeners(ListenerKey::All, sender_id); } } @@ -216,14 +216,14 @@ impl< pub fn add_sender(&mut self, send_provider: SP) { if !self .sender_map - .contains_send_event_provider(&send_provider.channel_id()) + .contains_send_event_provider(&send_provider.target_id()) { self.sender_map.add_send_event_provider(send_provider); } } /// Generic function to update the event subscribers. - fn update_listeners(&mut self, key: ListenerKey, sender_id: ChannelId) { + fn update_listeners(&mut self, key: ListenerKey, sender_id: ComponentId) { self.listener_map.add_listener(key, sender_id); } @@ -342,7 +342,7 @@ pub mod alloc_mod { /// Simple implementation which uses a [HashMap] and a [Vec] internally. #[derive(Default)] pub struct DefaultListenerMap { - listeners: HashMap>, + listeners: HashMap>, } impl ListenerMapProvider for DefaultListenerMap { @@ -358,11 +358,11 @@ pub mod alloc_mod { self.listeners.contains_key(key) } - fn get_listener_ids(&self, key: &ListenerKey) -> Option> { + fn get_listener_ids(&self, key: &ListenerKey) -> Option> { self.listeners.get(key).map(|vec| vec.iter()) } - fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool { + fn add_listener(&mut self, key: ListenerKey, sender_id: ComponentId) -> bool { if let Some(existing_list) = self.listeners.get_mut(&key) { existing_list.push(sender_id); } else { @@ -388,7 +388,7 @@ pub mod alloc_mod { EV: GenericEvent = EventU32, AUX = Params, > { - senders: HashMap, + senders: HashMap, phantom: PhantomData<(EV, AUX)>, } @@ -406,18 +406,18 @@ pub mod alloc_mod { impl, EV: GenericEvent, AUX> SenderMapProvider for DefaultSenderMap { - fn contains_send_event_provider(&self, id: &ChannelId) -> bool { + fn contains_send_event_provider(&self, id: &ComponentId) -> bool { self.senders.contains_key(id) } - fn get_send_event_provider(&self, id: &ChannelId) -> Option<&SP> { + fn get_send_event_provider(&self, id: &ComponentId) -> Option<&SP> { self.senders .get(id) - .filter(|sender| sender.channel_id() == *id) + .filter(|sender| sender.target_id() == *id) } fn add_send_event_provider(&mut self, send_provider: SP) -> bool { - let id = send_provider.channel_id(); + let id = send_provider.target_id(); if self.senders.contains_key(&id) { return false; } @@ -458,19 +458,19 @@ pub mod std_mod { /// send events. #[derive(Clone)] pub struct EventSenderMpsc { - id: u32, + target_id: ComponentId, sender: mpsc::Sender<(Event, Option)>, } impl EventSenderMpsc { - pub fn new(id: u32, sender: mpsc::Sender<(Event, Option)>) -> Self { - Self { id, sender } + pub fn new(target_id: ComponentId, sender: mpsc::Sender<(Event, Option)>) -> Self { + Self { target_id, sender } } } impl EventSendProvider for EventSenderMpsc { - fn channel_id(&self) -> u32 { - self.id + fn target_id(&self) -> ComponentId { + self.target_id } fn send(&self, event: Event, aux_data: Option) -> Result<(), GenericSendError> { self.sender @@ -483,19 +483,19 @@ pub mod std_mod { /// events. This has the advantage that the channel is bounded and thus more deterministic. #[derive(Clone)] pub struct EventSenderMpscBounded { - channel_id: u32, + target_id: ComponentId, sender: mpsc::SyncSender<(Event, Option)>, capacity: usize, } impl EventSenderMpscBounded { pub fn new( - channel_id: u32, + target_id: ComponentId, sender: mpsc::SyncSender<(Event, Option)>, capacity: usize, ) -> Self { Self { - channel_id, + target_id, sender, capacity, } @@ -503,8 +503,8 @@ pub mod std_mod { } impl EventSendProvider for EventSenderMpscBounded { - fn channel_id(&self) -> u32 { - self.channel_id + fn target_id(&self) -> ComponentId { + self.target_id } fn send(&self, event: Event, aux_data: Option) -> Result<(), GenericSendError> { if let Err(e) = self.sender.try_send((event, aux_data)) { @@ -577,11 +577,11 @@ mod tests { let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); let (single_event_sender, single_event_receiver) = channel(); let single_event_listener = EventSenderMpsc::new(0, single_event_sender); - event_man.subscribe_single(&event_grp_0, single_event_listener.channel_id()); + event_man.subscribe_single(&event_grp_0, single_event_listener.target_id()); event_man.add_sender(single_event_listener); let (group_event_sender_0, group_event_receiver_0) = channel(); let group_event_listener = EventU32SenderMpsc::new(1, group_event_sender_0); - event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.channel_id()); + event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.target_id()); event_man.add_sender(group_event_listener); // Test event with one listener @@ -609,7 +609,7 @@ mod tests { let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap(); let (single_event_sender, single_event_receiver) = channel(); let single_event_listener = EventSenderMpsc::new(0, single_event_sender); - event_man.subscribe_single(&event_grp_0, single_event_listener.channel_id()); + event_man.subscribe_single(&event_grp_0, single_event_listener.target_id()); event_man.add_sender(single_event_listener); event_sender .send((event_grp_0, Some(Params::Heapless((2_u32, 3_u32).into())))) @@ -643,11 +643,11 @@ mod tests { let event_grp_0_and_1_listener = EventU32SenderMpsc::new(0, event_grp_0_sender); event_man.subscribe_group( event_grp_0.group_id(), - event_grp_0_and_1_listener.channel_id(), + event_grp_0_and_1_listener.target_id(), ); event_man.subscribe_group( event_grp_1_0.group_id(), - event_grp_0_and_1_listener.channel_id(), + event_grp_0_and_1_listener.target_id(), ); event_man.add_sender(event_grp_0_and_1_listener); @@ -679,10 +679,10 @@ mod tests { let (event_0_tx_1, event_0_rx_1) = channel(); let event_listener_0 = EventU32SenderMpsc::new(0, event_0_tx_0); let event_listener_1 = EventU32SenderMpsc::new(1, event_0_tx_1); - let event_listener_0_sender_id = event_listener_0.channel_id(); + let event_listener_0_sender_id = event_listener_0.target_id(); event_man.subscribe_single(&event_0, event_listener_0_sender_id); event_man.add_sender(event_listener_0); - let event_listener_1_sender_id = event_listener_1.channel_id(); + let event_listener_1_sender_id = event_listener_1.target_id(); event_man.subscribe_single(&event_0, event_listener_1_sender_id); event_man.add_sender(event_listener_1); event_sender @@ -732,7 +732,7 @@ mod tests { let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); let (event_0_tx_0, all_events_rx) = channel(); let all_events_listener = EventU32SenderMpsc::new(0, event_0_tx_0); - event_man.subscribe_all(all_events_listener.channel_id()); + event_man.subscribe_all(all_events_listener.target_id()); event_man.add_sender(all_events_listener); event_sender .send((event_0, None)) diff --git a/satrs/src/hk.rs b/satrs/src/hk.rs index 8033e15..50edfda 100644 --- a/satrs/src/hk.rs +++ b/satrs/src/hk.rs @@ -1,40 +1,40 @@ -use crate::{ - pus::verification::{TcStateAccepted, VerificationToken}, - TargetId, -}; +use crate::ComponentId; pub type CollectionIntervalFactor = u32; +/// Unique Identifier for a certain housekeeping dataset. pub type UniqueId = u32; #[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum HkRequest { - OneShot(UniqueId), - Enable(UniqueId), - Disable(UniqueId), - ModifyCollectionInterval(UniqueId, CollectionIntervalFactor), +pub struct HkRequest { + pub unique_id: UniqueId, + pub variant: HkRequestVariant, +} + +impl HkRequest { + pub fn new(unique_id: UniqueId, variant: HkRequestVariant) -> Self { + Self { unique_id, variant } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum HkRequestVariant { + OneShot, + EnablePeriodic, + DisablePeriodic, + ModifyCollectionInterval(CollectionIntervalFactor), } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TargetedHkRequest { - pub target_id: TargetId, - pub hk_request: HkRequest, + pub target_id: ComponentId, + pub hk_request: HkRequestVariant, } impl TargetedHkRequest { - pub fn new(target_id: TargetId, hk_request: HkRequest) -> Self { + pub fn new(target_id: ComponentId, hk_request: HkRequestVariant) -> Self { Self { target_id, hk_request, } } } - -pub trait PusHkRequestRouter { - type Error; - fn route( - &self, - target_id: TargetId, - hk_request: HkRequest, - token: VerificationToken, - ) -> Result<(), Self::Error>; -} diff --git a/satrs/src/lib.rs b/satrs/src/lib.rs index 5040d58..ca5106b 100644 --- a/satrs/src/lib.rs +++ b/satrs/src/lib.rs @@ -32,7 +32,9 @@ pub mod events; #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] pub mod executable; pub mod hal; -pub mod objects; +#[cfg(feature = "std")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] +pub mod mode_tree; pub mod pool; pub mod power; pub mod pus; @@ -40,6 +42,7 @@ pub mod queue; pub mod request; pub mod res_code; pub mod seq_count; +pub mod time; pub mod tmtc; pub mod action; @@ -49,8 +52,7 @@ pub mod params; pub use spacepackets; -/// Generic channel ID type. -pub type ChannelId = u32; +pub use queue::ChannelId; -/// Generic target ID type. -pub type TargetId = u64; +/// Generic component ID type. +pub type ComponentId = u64; diff --git a/satrs/src/mode.rs b/satrs/src/mode.rs index c5968b4..2f28d07 100644 --- a/satrs/src/mode.rs +++ b/satrs/src/mode.rs @@ -1,67 +1,95 @@ use core::mem::size_of; +use satrs_shared::res_code::ResultU16; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use spacepackets::ByteConversionError; -use crate::TargetId; +#[cfg(feature = "alloc")] +pub use alloc_mod::*; + +#[cfg(feature = "std")] +pub use std_mod::*; + +use crate::{ + queue::GenericTargetedMessagingError, + request::{GenericMessage, MessageMetadata, MessageReceiver, MessageReceiverWithId, RequestId}, + ComponentId, +}; + +pub type Mode = u32; +pub type Submode = u16; #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ModeAndSubmode { - mode: u32, - submode: u16, + mode: Mode, + submode: Submode, } impl ModeAndSubmode { - pub const fn new_mode_only(mode: u32) -> Self { + pub const RAW_LEN: usize = size_of::() + size_of::(); + + pub const fn new_mode_only(mode: Mode) -> Self { Self { mode, submode: 0 } } - pub const fn new(mode: u32, submode: u16) -> Self { + pub const fn new(mode: Mode, submode: Submode) -> Self { Self { mode, submode } } - pub fn raw_len() -> usize { - size_of::() + size_of::() - } - pub fn from_be_bytes(buf: &[u8]) -> Result { if buf.len() < 6 { return Err(ByteConversionError::FromSliceTooSmall { - expected: 6, + expected: Self::RAW_LEN, found: buf.len(), }); } Ok(Self { - mode: u32::from_be_bytes(buf[0..4].try_into().unwrap()), - submode: u16::from_be_bytes(buf[4..6].try_into().unwrap()), + mode: Mode::from_be_bytes(buf[0..size_of::()].try_into().unwrap()), + submode: Submode::from_be_bytes( + buf[size_of::()..size_of::() + size_of::()] + .try_into() + .unwrap(), + ), }) } - pub fn mode(&self) -> u32 { + pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { + if buf.len() < Self::RAW_LEN { + return Err(ByteConversionError::ToSliceTooSmall { + expected: Self::RAW_LEN, + found: buf.len(), + }); + } + buf[0..size_of::()].copy_from_slice(&self.mode.to_be_bytes()); + buf[size_of::()..Self::RAW_LEN].copy_from_slice(&self.submode.to_be_bytes()); + Ok(Self::RAW_LEN) + } + + pub fn mode(&self) -> Mode { self.mode } - pub fn submode(&self) -> u16 { + pub fn submode(&self) -> Submode { self.submode } } #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct TargetedModeCommand { - pub address: TargetId, + pub address: ComponentId, pub mode_submode: ModeAndSubmode, } impl TargetedModeCommand { - pub const fn new(address: TargetId, mode_submode: ModeAndSubmode) -> Self { + pub const fn new(address: ComponentId, mode_submode: ModeAndSubmode) -> Self { Self { address, mode_submode, } } - pub fn address(&self) -> TargetId { + pub fn address(&self) -> ComponentId { self.address } @@ -81,6 +109,8 @@ impl TargetedModeCommand { #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum ModeRequest { + /// Mode information. Can be used to notify other components of changed modes. + ModeInfo(ModeAndSubmode), SetMode(ModeAndSubmode), ReadMode, AnnounceMode, @@ -90,6 +120,473 @@ pub enum ModeRequest { #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct TargetedModeRequest { - target_id: TargetId, + target_id: ComponentId, mode_request: ModeRequest, } + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum ModeReply { + /// Reply to a mode request to confirm the commanded mode was reached. + ModeReply(ModeAndSubmode), + // Can not reach the commanded mode. Contains a reason as a [ResultU16]. + CantReachMode(ResultU16), + /// We are in the wrong mode for unknown reasons. Contains the expected and reached mode. + WrongMode { + expected: ModeAndSubmode, + reached: ModeAndSubmode, + }, +} + +pub type GenericModeReply = GenericMessage; + +pub trait ModeRequestSender { + fn local_channel_id(&self) -> ComponentId; + fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError>; +} + +pub trait ModeRequestReceiver { + fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError>; +} + +impl> ModeRequestReceiver + for MessageReceiverWithId +{ + fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } +} + +#[derive(Debug, Clone)] +pub enum ModeError { + Messaging(GenericTargetedMessagingError), +} + +impl From for ModeError { + fn from(value: GenericTargetedMessagingError) -> Self { + Self::Messaging(value) + } +} + +pub trait ModeProvider { + fn mode_and_submode(&self) -> ModeAndSubmode; +} + +pub trait ModeRequestHandler: ModeProvider { + type Error; + + fn start_transition( + &mut self, + requestor: MessageMetadata, + mode_and_submode: ModeAndSubmode, + ) -> Result<(), Self::Error>; + + fn announce_mode(&self, requestor_info: MessageMetadata, recursive: bool); + + fn handle_mode_reached( + &mut self, + requestor_info: Option, + ) -> Result<(), Self::Error>; + + fn handle_mode_info( + &mut self, + requestor_info: MessageMetadata, + info: ModeAndSubmode, + ) -> Result<(), Self::Error>; + + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + reply: ModeReply, + ) -> Result<(), Self::Error>; + + fn handle_mode_request( + &mut self, + request: GenericMessage, + ) -> Result<(), Self::Error> { + match request.message { + ModeRequest::SetMode(mode_and_submode) => { + self.start_transition(request.requestor_info, mode_and_submode) + } + ModeRequest::ReadMode => self.send_mode_reply( + request.requestor_info, + ModeReply::ModeReply(self.mode_and_submode()), + ), + ModeRequest::AnnounceMode => { + self.announce_mode(request.requestor_info, false); + Ok(()) + } + ModeRequest::AnnounceModeRecursive => { + self.announce_mode(request.requestor_info, true); + Ok(()) + } + ModeRequest::ModeInfo(info) => self.handle_mode_info(request.requestor_info, info), + } + } +} + +pub trait ModeReplyReceiver { + fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError>; +} + +impl> ModeReplyReceiver for MessageReceiverWithId { + fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } +} + +pub trait ModeReplySender { + fn local_channel_id(&self) -> ComponentId; + + /// The requestor is assumed to be the target of the reply. + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + reply: ModeReply, + ) -> Result<(), GenericTargetedMessagingError>; +} + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub mod alloc_mod { + use crate::{ + mode::ModeRequest, + queue::GenericTargetedMessagingError, + request::{ + MessageMetadata, MessageSender, MessageSenderAndReceiver, MessageSenderMap, + RequestAndReplySenderAndReceiver, RequestId, + }, + ComponentId, + }; + + use super::*; + + impl> MessageSenderMap { + pub fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + target_id: ComponentId, + request: ModeReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(requestor_info, target_id, request) + } + + pub fn add_reply_target(&mut self, target_id: ComponentId, request_sender: S) { + self.add_message_target(target_id, request_sender) + } + } + + impl, R: MessageReceiver> ModeReplySender + for MessageSenderAndReceiver + { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id_generic() + } + + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + request: ModeReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.message_sender_map.send_mode_reply( + MessageMetadata::new(requestor_info.request_id(), self.local_channel_id()), + requestor_info.sender_id(), + request, + ) + } + } + + impl, R: MessageReceiver> ModeReplyReceiver + for MessageSenderAndReceiver + { + fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.message_receiver.try_recv_message() + } + } + + impl< + REQUEST, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > RequestAndReplySenderAndReceiver + { + pub fn add_reply_target(&mut self, target_id: ComponentId, reply_sender: S1) { + self.reply_sender_map + .add_message_target(target_id, reply_sender) + } + } + + impl< + REQUEST, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > ModeReplySender for RequestAndReplySenderAndReceiver + { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id_generic() + } + + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + request: ModeReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.reply_sender_map.send_mode_reply( + MessageMetadata::new(requestor_info.request_id(), self.local_channel_id()), + requestor_info.sender_id(), + request, + ) + } + } + + impl< + REQUEST, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > ModeReplyReceiver + for RequestAndReplySenderAndReceiver + { + fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.reply_receiver.try_recv_message() + } + } + + /// Helper type definition for a mode handler which can handle mode requests. + pub type ModeRequestHandlerInterface = + MessageSenderAndReceiver; + + impl, R: MessageReceiver> + ModeRequestHandlerInterface + { + pub fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } + + pub fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + reply: ModeReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message( + requestor_info.request_id(), + requestor_info.sender_id(), + reply, + ) + } + } + + /// Helper type defintion for a mode handler object which can send mode requests and receive + /// mode replies. + pub type ModeRequestorInterface = MessageSenderAndReceiver; + + impl, R: MessageReceiver> ModeRequestorInterface { + pub fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } + + pub fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + reply: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(request_id, target_id, reply) + } + } + + /// Helper type defintion for a mode handler object which can both send mode requests and + /// process mode requests. + pub type ModeInterface = + RequestAndReplySenderAndReceiver; + + impl> MessageSenderMap { + pub fn send_mode_request( + &self, + requestor_info: MessageMetadata, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(requestor_info, target_id, request) + } + + pub fn add_request_target(&mut self, target_id: ComponentId, request_sender: S) { + self.add_message_target(target_id, request_sender) + } + } + + /* + impl> ModeRequestSender for MessageSenderMapWithId { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id + } + + fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(request_id, target_id, request) + } + } + */ + + impl, R: MessageReceiver> ModeRequestReceiver + for MessageSenderAndReceiver + { + fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.message_receiver.try_recv_message() + } + } + + impl, R: MessageReceiver> ModeRequestSender + for MessageSenderAndReceiver + { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id_generic() + } + + fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.message_sender_map.send_mode_request( + MessageMetadata::new(request_id, self.local_channel_id()), + target_id, + request, + ) + } + } + + impl< + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > RequestAndReplySenderAndReceiver + { + pub fn add_request_target(&mut self, target_id: ComponentId, request_sender: S0) { + self.request_sender_map + .add_message_target(target_id, request_sender) + } + } + + impl< + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > ModeRequestSender + for RequestAndReplySenderAndReceiver + { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id_generic() + } + + fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.request_sender_map.send_mode_request( + MessageMetadata::new(request_id, self.local_channel_id()), + target_id, + request, + ) + } + } + + impl< + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > ModeRequestReceiver + for RequestAndReplySenderAndReceiver + { + fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.request_receiver.try_recv_message() + } + } +} + +#[cfg(feature = "std")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] +pub mod std_mod { + use std::sync::mpsc; + + use crate::request::GenericMessage; + + use super::*; + + pub type ModeRequestHandlerMpsc = ModeRequestHandlerInterface< + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ModeRequestHandlerMpscBounded = ModeRequestHandlerInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + >; + + pub type ModeRequestorMpsc = ModeRequestorInterface< + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ModeRequestorBoundedMpsc = ModeRequestorInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + >; + + pub type ModeRequestorAndHandlerMpsc = ModeInterface< + mpsc::Sender>, + mpsc::Receiver>, + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ModeRequestorAndHandlerMpscBounded = ModeInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + mpsc::SyncSender>, + mpsc::Receiver>, + >; +} + +#[cfg(test)] +mod tests {} diff --git a/satrs/src/mode_tree.rs b/satrs/src/mode_tree.rs new file mode 100644 index 0000000..1cddd32 --- /dev/null +++ b/satrs/src/mode_tree.rs @@ -0,0 +1,37 @@ +use alloc::vec::Vec; +use hashbrown::HashMap; + +use crate::{ + mode::{Mode, ModeAndSubmode, Submode}, + ComponentId, +}; + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum TableEntryType { + /// Target table containing information of the expected children modes for given mode. + Target, + /// Sequence table which contains information about how to reach a target table, including + /// the order of the sequences. + Sequence, +} + +pub struct ModeTableEntry { + /// Name of respective table entry. + pub name: &'static str, + /// Target channel ID. + pub channel_id: ComponentId, + pub mode_submode: ModeAndSubmode, + pub allowed_submode_mask: Option, + pub check_success: bool, +} + +pub struct ModeTableMapValue { + /// Name for a given mode table entry. + pub name: &'static str, + pub entries: Vec, +} + +pub type ModeTable = HashMap; + +#[cfg(test)] +mod tests {} diff --git a/satrs/src/objects.rs b/satrs/src/objects.rs deleted file mode 100644 index a9b6881..0000000 --- a/satrs/src/objects.rs +++ /dev/null @@ -1,308 +0,0 @@ -//! # Module providing addressable object support and a manager for them -//! -//! Each addressable object can be identified using an [object ID][ObjectId]. -//! The [system object][ManagedSystemObject] trait also allows storing these objects into the -//! [object manager][ObjectManager]. They can then be retrieved and casted back to a known type -//! using the object ID. -//! -//! # Examples -//! -//! ```rust -//! use std::any::Any; -//! use std::error::Error; -//! use satrs::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject}; -//! -//! struct ExampleSysObj { -//! id: ObjectId, -//! dummy: u32, -//! was_initialized: bool, -//! } -//! -//! impl ExampleSysObj { -//! fn new(id: ObjectId, dummy: u32) -> ExampleSysObj { -//! ExampleSysObj { -//! id, -//! dummy, -//! was_initialized: false, -//! } -//! } -//! } -//! -//! impl SystemObject for ExampleSysObj { -//! type Error = (); -//! fn get_object_id(&self) -> &ObjectId { -//! &self.id -//! } -//! -//! fn initialize(&mut self) -> Result<(), Self::Error> { -//! self.was_initialized = true; -//! Ok(()) -//! } -//! } -//! -//! impl ManagedSystemObject for ExampleSysObj {} -//! -//! let mut obj_manager = ObjectManager::default(); -//! let obj_id = ObjectId { id: 0, name: "Example 0"}; -//! let example_obj = ExampleSysObj::new(obj_id, 42); -//! obj_manager.insert(Box::new(example_obj)); -//! let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&obj_id); -//! let example_obj = obj_back_casted.unwrap(); -//! assert_eq!(example_obj.id, obj_id); -//! assert_eq!(example_obj.dummy, 42); -//! ``` -#[cfg(feature = "alloc")] -use alloc::boxed::Box; -#[cfg(feature = "alloc")] -pub use alloc_mod::*; -#[cfg(feature = "alloc")] -use downcast_rs::Downcast; -#[cfg(feature = "alloc")] -use hashbrown::HashMap; -#[cfg(feature = "std")] -use std::error::Error; - -use crate::TargetId; - -#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)] -pub struct ObjectId { - pub id: TargetId, - pub name: &'static str, -} - -#[cfg(feature = "alloc")] -pub mod alloc_mod { - use super::*; - - /// Each object which is stored inside the [object manager][ObjectManager] needs to implemented - /// this trait - pub trait SystemObject: Downcast { - type Error; - fn get_object_id(&self) -> &ObjectId; - fn initialize(&mut self) -> Result<(), Self::Error>; - } - downcast_rs::impl_downcast!(SystemObject assoc Error); - - pub trait ManagedSystemObject: SystemObject + Send {} - downcast_rs::impl_downcast!(ManagedSystemObject assoc Error); - - /// Helper module to manage multiple [ManagedSystemObjects][ManagedSystemObject] by mapping them - /// using an [object ID][ObjectId] - #[cfg(feature = "alloc")] - pub struct ObjectManager { - obj_map: HashMap>>, - } - - #[cfg(feature = "alloc")] - impl Default for ObjectManager { - fn default() -> Self { - Self::new() - } - } - - #[cfg(feature = "alloc")] - impl ObjectManager { - pub fn new() -> Self { - ObjectManager { - obj_map: HashMap::new(), - } - } - pub fn insert(&mut self, sys_obj: Box>) -> bool { - let obj_id = sys_obj.get_object_id(); - if self.obj_map.contains_key(obj_id) { - return false; - } - self.obj_map.insert(*obj_id, sys_obj).is_none() - } - - /// Initializes all System Objects in the hash map and returns the number of successful - /// initializations - pub fn initialize(&mut self) -> Result> { - let mut init_success = 0; - for val in self.obj_map.values_mut() { - if val.initialize().is_ok() { - init_success += 1 - } - } - Ok(init_success) - } - - /// Retrieve a reference to an object stored inside the manager. The type to retrieve needs to - /// be explicitly passed as a generic parameter or specified on the left hand side of the - /// expression. - pub fn get_ref>(&self, key: &ObjectId) -> Option<&T> { - self.obj_map.get(key).and_then(|o| o.downcast_ref::()) - } - - /// Retrieve a mutable reference to an object stored inside the manager. The type to retrieve - /// needs to be explicitly passed as a generic parameter or specified on the left hand side - /// of the expression. - pub fn get_mut>( - &mut self, - key: &ObjectId, - ) -> Option<&mut T> { - self.obj_map - .get_mut(key) - .and_then(|o| o.downcast_mut::()) - } - } -} - -#[cfg(test)] -mod tests { - use crate::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject}; - use std::boxed::Box; - use std::string::String; - use std::sync::{Arc, Mutex}; - use std::thread; - - struct ExampleSysObj { - id: ObjectId, - dummy: u32, - was_initialized: bool, - } - - impl ExampleSysObj { - fn new(id: ObjectId, dummy: u32) -> ExampleSysObj { - ExampleSysObj { - id, - dummy, - was_initialized: false, - } - } - } - - impl SystemObject for ExampleSysObj { - type Error = (); - fn get_object_id(&self) -> &ObjectId { - &self.id - } - - fn initialize(&mut self) -> Result<(), Self::Error> { - self.was_initialized = true; - Ok(()) - } - } - - impl ManagedSystemObject for ExampleSysObj {} - - struct OtherExampleObject { - id: ObjectId, - string: String, - was_initialized: bool, - } - - impl SystemObject for OtherExampleObject { - type Error = (); - fn get_object_id(&self) -> &ObjectId { - &self.id - } - - fn initialize(&mut self) -> Result<(), Self::Error> { - self.was_initialized = true; - Ok(()) - } - } - - impl ManagedSystemObject for OtherExampleObject {} - - #[test] - fn test_obj_manager_simple() { - let mut obj_manager = ObjectManager::default(); - let expl_obj_id = ObjectId { - id: 0, - name: "Example 0", - }; - let example_obj = ExampleSysObj::new(expl_obj_id, 42); - assert!(obj_manager.insert(Box::new(example_obj))); - let res = obj_manager.initialize(); - assert!(res.is_ok()); - assert_eq!(res.unwrap(), 1); - let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&expl_obj_id); - assert!(obj_back_casted.is_some()); - let expl_obj_back_casted = obj_back_casted.unwrap(); - assert_eq!(expl_obj_back_casted.dummy, 42); - assert!(expl_obj_back_casted.was_initialized); - - let second_obj_id = ObjectId { - id: 12, - name: "Example 1", - }; - let second_example_obj = OtherExampleObject { - id: second_obj_id, - string: String::from("Hello Test"), - was_initialized: false, - }; - - assert!(obj_manager.insert(Box::new(second_example_obj))); - let res = obj_manager.initialize(); - assert!(res.is_ok()); - assert_eq!(res.unwrap(), 2); - let obj_back_casted: Option<&OtherExampleObject> = obj_manager.get_ref(&second_obj_id); - assert!(obj_back_casted.is_some()); - let expl_obj_back_casted = obj_back_casted.unwrap(); - assert_eq!(expl_obj_back_casted.string, String::from("Hello Test")); - assert!(expl_obj_back_casted.was_initialized); - - let existing_obj_id = ObjectId { - id: 12, - name: "Example 1", - }; - let invalid_obj = OtherExampleObject { - id: existing_obj_id, - string: String::from("Hello Test"), - was_initialized: false, - }; - - assert!(!obj_manager.insert(Box::new(invalid_obj))); - } - - #[test] - fn object_man_threaded() { - let obj_manager = Arc::new(Mutex::new(ObjectManager::new())); - let expl_obj_id = ObjectId { - id: 0, - name: "Example 0", - }; - let example_obj = ExampleSysObj::new(expl_obj_id, 42); - let second_obj_id = ObjectId { - id: 12, - name: "Example 1", - }; - let second_example_obj = OtherExampleObject { - id: second_obj_id, - string: String::from("Hello Test"), - was_initialized: false, - }; - - let mut obj_man_handle = obj_manager.lock().expect("Mutex lock failed"); - assert!(obj_man_handle.insert(Box::new(example_obj))); - assert!(obj_man_handle.insert(Box::new(second_example_obj))); - let res = obj_man_handle.initialize(); - std::mem::drop(obj_man_handle); - assert!(res.is_ok()); - assert_eq!(res.unwrap(), 2); - let obj_man_0 = obj_manager.clone(); - let jh0 = thread::spawn(move || { - let locked_man = obj_man_0.lock().expect("Mutex lock failed"); - let obj_back_casted: Option<&ExampleSysObj> = locked_man.get_ref(&expl_obj_id); - assert!(obj_back_casted.is_some()); - let expl_obj_back_casted = obj_back_casted.unwrap(); - assert_eq!(expl_obj_back_casted.dummy, 42); - assert!(expl_obj_back_casted.was_initialized); - std::mem::drop(locked_man) - }); - - let jh1 = thread::spawn(move || { - let locked_man = obj_manager.lock().expect("Mutex lock failed"); - let obj_back_casted: Option<&OtherExampleObject> = locked_man.get_ref(&second_obj_id); - assert!(obj_back_casted.is_some()); - let expl_obj_back_casted = obj_back_casted.unwrap(); - assert_eq!(expl_obj_back_casted.string, String::from("Hello Test")); - assert!(expl_obj_back_casted.was_initialized); - std::mem::drop(locked_man) - }); - jh0.join().expect("Joining thread 0 failed"); - jh1.join().expect("Joining thread 1 failed"); - } -} diff --git a/satrs/src/params.rs b/satrs/src/params.rs index 1279015..35781c1 100644 --- a/satrs/src/params.rs +++ b/satrs/src/params.rs @@ -60,21 +60,28 @@ use alloc::vec::Vec; /// Generic trait which is used for objects which can be converted into a raw network (big) endian /// byte format. pub trait WritableToBeBytes { - fn raw_len(&self) -> usize; + fn written_len(&self) -> usize; /// Writes the object to a raw buffer in network endianness (big) fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result; + + #[cfg(feature = "alloc")] + fn to_vec(&self) -> Result, ByteConversionError> { + let mut vec = alloc::vec![0; self.written_len()]; + self.write_to_be_bytes(&mut vec)?; + Ok(vec) + } } macro_rules! param_to_be_bytes_impl { ($Newtype: ident) => { impl WritableToBeBytes for $Newtype { #[inline] - fn raw_len(&self) -> usize { + fn written_len(&self) -> usize { size_of::<::ByteArray>() } fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { - let raw_len = self.raw_len(); + let raw_len = WritableToBeBytes::written_len(self); if buf.len() < raw_len { return Err(ByteConversionError::ToSliceTooSmall { found: buf.len(), @@ -382,32 +389,32 @@ pub enum ParamsRaw { } impl WritableToBeBytes for ParamsRaw { - fn raw_len(&self) -> usize { + fn written_len(&self) -> usize { match self { - ParamsRaw::U8(v) => v.raw_len(), - ParamsRaw::U8Pair(v) => v.raw_len(), - ParamsRaw::U8Triplet(v) => v.raw_len(), - ParamsRaw::I8(v) => v.raw_len(), - ParamsRaw::I8Pair(v) => v.raw_len(), - ParamsRaw::I8Triplet(v) => v.raw_len(), - ParamsRaw::U16(v) => v.raw_len(), - ParamsRaw::U16Pair(v) => v.raw_len(), - ParamsRaw::U16Triplet(v) => v.raw_len(), - ParamsRaw::I16(v) => v.raw_len(), - ParamsRaw::I16Pair(v) => v.raw_len(), - ParamsRaw::I16Triplet(v) => v.raw_len(), - ParamsRaw::U32(v) => v.raw_len(), - ParamsRaw::U32Pair(v) => v.raw_len(), - ParamsRaw::U32Triplet(v) => v.raw_len(), - ParamsRaw::I32(v) => v.raw_len(), - ParamsRaw::I32Pair(v) => v.raw_len(), - ParamsRaw::I32Triplet(v) => v.raw_len(), - ParamsRaw::F32(v) => v.raw_len(), - ParamsRaw::F32Pair(v) => v.raw_len(), - ParamsRaw::F32Triplet(v) => v.raw_len(), - ParamsRaw::U64(v) => v.raw_len(), - ParamsRaw::I64(v) => v.raw_len(), - ParamsRaw::F64(v) => v.raw_len(), + ParamsRaw::U8(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U8Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U8Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I8(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I8Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I8Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U16(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U16Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U16Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I16(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I16Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I16Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U32(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U32Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U32Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I32(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I32Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I32Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::F32(v) => WritableToBeBytes::written_len(v), + ParamsRaw::F32Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::F32Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U64(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I64(v) => WritableToBeBytes::written_len(v), + ParamsRaw::F64(v) => WritableToBeBytes::written_len(v), } } @@ -460,7 +467,7 @@ params_raw_from_newtype!( ); #[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum EcssEnumParams { +pub enum ParamsEcssEnum { U8(EcssEnumU8), U16(EcssEnumU16), U32(EcssEnumU32), @@ -468,40 +475,46 @@ pub enum EcssEnumParams { } macro_rules! writable_as_be_bytes_ecss_enum_impl { - ($EnumIdent: ident) => { + ($EnumIdent: ident, $Ty: ident) => { + impl From<$EnumIdent> for ParamsEcssEnum { + fn from(e: $EnumIdent) -> Self { + Self::$Ty(e) + } + } + impl WritableToBeBytes for $EnumIdent { - fn raw_len(&self) -> usize { + fn written_len(&self) -> usize { self.size() } fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { - ::write_to_be_bytes(self, buf).map(|_| self.raw_len()) + ::write_to_be_bytes(self, buf).map(|_| self.written_len()) } } }; } -writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8); -writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16); -writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32); -writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64); +writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8, U8); +writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16, U16); +writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32, U32); +writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64, U64); -impl WritableToBeBytes for EcssEnumParams { - fn raw_len(&self) -> usize { +impl WritableToBeBytes for ParamsEcssEnum { + fn written_len(&self) -> usize { match self { - EcssEnumParams::U8(e) => e.raw_len(), - EcssEnumParams::U16(e) => e.raw_len(), - EcssEnumParams::U32(e) => e.raw_len(), - EcssEnumParams::U64(e) => e.raw_len(), + ParamsEcssEnum::U8(e) => e.written_len(), + ParamsEcssEnum::U16(e) => e.written_len(), + ParamsEcssEnum::U32(e) => e.written_len(), + ParamsEcssEnum::U64(e) => e.written_len(), } } fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { match self { - EcssEnumParams::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf), - EcssEnumParams::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf), - EcssEnumParams::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf), - EcssEnumParams::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf), + ParamsEcssEnum::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf), + ParamsEcssEnum::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf), + ParamsEcssEnum::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf), + ParamsEcssEnum::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf), } } } @@ -510,7 +523,19 @@ impl WritableToBeBytes for EcssEnumParams { #[derive(Debug, Copy, Clone, PartialEq)] pub enum ParamsHeapless { Raw(ParamsRaw), - EcssEnum(EcssEnumParams), + EcssEnum(ParamsEcssEnum), +} + +impl From for ParamsHeapless { + fn from(v: ParamsRaw) -> Self { + Self::Raw(v) + } +} + +impl From for ParamsHeapless { + fn from(v: ParamsEcssEnum) -> Self { + Self::EcssEnum(v) + } } macro_rules! from_conversions_for_raw { @@ -559,7 +584,7 @@ from_conversions_for_raw!( /// Generic enumeration for additional parameters, including parameters which rely on heap /// allocations. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] #[non_exhaustive] pub enum Params { Heapless(ParamsHeapless), @@ -584,6 +609,12 @@ impl From for Params { } } +impl From for Params { + fn from(x: ParamsRaw) -> Self { + Self::Heapless(ParamsHeapless::Raw(x)) + } +} + #[cfg(feature = "alloc")] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] impl From> for Params { @@ -618,10 +649,90 @@ impl From<&str> for Params { } } +/// Please note while [WritableToBeBytes] is implemented for [Params], the default implementation +/// will not be able to process the [Params::Store] parameter variant. +impl WritableToBeBytes for Params { + fn written_len(&self) -> usize { + match self { + Params::Heapless(p) => match p { + ParamsHeapless::Raw(raw) => raw.written_len(), + ParamsHeapless::EcssEnum(enumeration) => enumeration.written_len(), + }, + Params::Store(_) => 0, + #[cfg(feature = "alloc")] + Params::Vec(vec) => vec.len(), + #[cfg(feature = "alloc")] + Params::String(string) => string.len(), + } + } + + fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { + match self { + Params::Heapless(p) => match p { + ParamsHeapless::Raw(raw) => raw.write_to_be_bytes(buf), + ParamsHeapless::EcssEnum(enumeration) => enumeration.write_to_be_bytes(buf), + }, + Params::Store(_) => Ok(0), + #[cfg(feature = "alloc")] + Params::Vec(vec) => { + if buf.len() < vec.len() { + return Err(ByteConversionError::ToSliceTooSmall { + found: buf.len(), + expected: vec.len(), + }); + } + buf[0..vec.len()].copy_from_slice(vec); + Ok(vec.len()) + } + #[cfg(feature = "alloc")] + Params::String(string) => { + if buf.len() < string.len() { + return Err(ByteConversionError::ToSliceTooSmall { + found: buf.len(), + expected: string.len(), + }); + } + buf[0..string.len()].copy_from_slice(string.as_bytes()); + Ok(string.len()) + } + } + } +} + #[cfg(test)] mod tests { use super::*; + fn test_cloning_works(param_raw: &impl WritableToBeBytes) { + let _new_param = param_raw; + } + + fn test_writing_fails(param_raw: &(impl WritableToBeBytes + ToBeBytes)) { + let pair_size = WritableToBeBytes::written_len(param_raw); + assert_eq!(pair_size, ToBeBytes::written_len(param_raw)); + let mut vec = alloc::vec![0; pair_size - 1]; + let result = param_raw.write_to_be_bytes(&mut vec); + if let Err(ByteConversionError::ToSliceTooSmall { found, expected }) = result { + assert_eq!(found, pair_size - 1); + assert_eq!(expected, pair_size); + } else { + panic!("Expected ByteConversionError::ToSliceTooSmall"); + } + } + + fn test_writing(params_raw: &ParamsRaw, writeable: &impl WritableToBeBytes) { + assert_eq!(params_raw.written_len(), writeable.written_len()); + let mut vec = alloc::vec![0; writeable.written_len()]; + writeable + .write_to_be_bytes(&mut vec) + .expect("writing parameter to buffer failed"); + let mut other_vec = alloc::vec![0; writeable.written_len()]; + params_raw + .write_to_be_bytes(&mut other_vec) + .expect("writing parameter to buffer failed"); + assert_eq!(vec, other_vec); + } + #[test] fn test_basic_u32_pair() { let u32_pair = U32Pair(4, 8); @@ -632,10 +743,32 @@ mod tests { assert_eq!(u32_conv_back, 4); u32_conv_back = u32::from_be_bytes(raw[4..8].try_into().unwrap()); assert_eq!(u32_conv_back, 8); + test_writing_fails(&u32_pair); + test_cloning_works(&u32_pair); + let u32_praw = ParamsRaw::from(u32_pair); + test_writing(&u32_praw, &u32_pair); } #[test] - fn basic_signed_test_pair() { + fn test_u16_pair_writing_fails() { + let u16_pair = U16Pair(4, 8); + test_writing_fails(&u16_pair); + test_cloning_works(&u16_pair); + let u16_praw = ParamsRaw::from(u16_pair); + test_writing(&u16_praw, &u16_pair); + } + + #[test] + fn test_u8_pair_writing_fails() { + let u8_pair = U8Pair(4, 8); + test_writing_fails(&u8_pair); + test_cloning_works(&u8_pair); + let u8_praw = ParamsRaw::from(u8_pair); + test_writing(&u8_praw, &u8_pair); + } + + #[test] + fn basic_i8_test() { let i8_pair = I8Pair(-3, -16); assert_eq!(i8_pair.0, -3); assert_eq!(i8_pair.1, -16); @@ -644,10 +777,31 @@ mod tests { assert_eq!(i8_conv_back, -3); i8_conv_back = i8::from_be_bytes(raw[1..2].try_into().unwrap()); assert_eq!(i8_conv_back, -16); + test_writing_fails(&i8_pair); + test_cloning_works(&i8_pair); + let i8_praw = ParamsRaw::from(i8_pair); + test_writing(&i8_praw, &i8_pair); } #[test] - fn basic_signed_test_triplet() { + fn test_from_u32_triplet() { + let raw_params = U32Triplet::from((1, 2, 3)); + assert_eq!(raw_params.0, 1); + assert_eq!(raw_params.1, 2); + assert_eq!(raw_params.2, 3); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 12); + assert_eq!( + raw_params.to_be_bytes(), + [0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3] + ); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u32_triplet = ParamsRaw::from(raw_params); + test_writing(&u32_triplet, &raw_params); + } + + #[test] + fn test_i8_triplet() { let i8_triplet = I8Triplet(-3, -16, -126); assert_eq!(i8_triplet.0, -3); assert_eq!(i8_triplet.1, -16); @@ -659,6 +813,10 @@ mod tests { assert_eq!(i8_conv_back, -16); i8_conv_back = i8::from_be_bytes(raw[2..3].try_into().unwrap()); assert_eq!(i8_conv_back, -126); + test_writing_fails(&i8_triplet); + test_cloning_works(&i8_triplet); + let i8_praw = ParamsRaw::from(i8_triplet); + test_writing(&i8_praw, &i8_triplet); } #[test] @@ -681,4 +839,352 @@ mod tests { panic!("Params type is not a vector") } } + + #[test] + fn test_params_written_len_raw() { + let param_raw = ParamsRaw::from((500_u32, 1000_u32)); + let param: Params = Params::Heapless(param_raw.into()); + assert_eq!(param.written_len(), 8); + let mut buf: [u8; 8] = [0; 8]; + param + .write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500); + assert_eq!(u32::from_be_bytes(buf[4..8].try_into().unwrap()), 1000); + } + + #[test] + fn test_params_written_string() { + let string = "Test String".to_string(); + let param = Params::String(string.clone()); + assert_eq!(param.written_len(), string.len()); + let vec = param.to_vec().unwrap(); + let string_conv_back = String::from_utf8(vec).expect("conversion to string failed"); + assert_eq!(string_conv_back, string); + } + + #[test] + fn test_params_written_vec() { + let vec: Vec = alloc::vec![1, 2, 3, 4, 5]; + let param = Params::Vec(vec.clone()); + assert_eq!(param.written_len(), vec.len()); + assert_eq!(param.to_vec().expect("writing vec params failed"), vec); + } + + #[test] + fn test_u32_single() { + let raw_params = U32::from(20); + assert_eq!(raw_params.0, 20); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 4); + assert_eq!(raw_params.to_be_bytes(), [0, 0, 0, 20]); + let other = U32::from(20); + assert_eq!(raw_params, other); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u32_praw = ParamsRaw::from(raw_params); + test_writing(&u32_praw, &raw_params); + } + + #[test] + fn test_i8_single() { + let neg_number: i8 = -5_i8; + let raw_params = I8::from(neg_number); + assert_eq!(raw_params.0, neg_number); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 1); + assert_eq!(raw_params.to_be_bytes(), neg_number.to_be_bytes()); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u8_praw = ParamsRaw::from(raw_params); + test_writing(&u8_praw, &raw_params); + } + + #[test] + fn test_u8_single() { + let raw_params = U8::from(20); + assert_eq!(raw_params.0, 20); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 1); + assert_eq!(raw_params.to_be_bytes(), [20]); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u32_praw = ParamsRaw::from(raw_params); + test_writing(&u32_praw, &raw_params); + } + + #[test] + fn test_u16_single() { + let raw_params = U16::from(0x123); + assert_eq!(raw_params.0, 0x123); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 2); + assert_eq!(raw_params.to_be_bytes(), [0x01, 0x23]); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u16_praw = ParamsRaw::from(raw_params); + test_writing(&u16_praw, &raw_params); + } + + #[test] + fn test_u16_triplet() { + let raw_params = U16Triplet::from((1, 2, 3)); + assert_eq!(raw_params.0, 1); + assert_eq!(raw_params.1, 2); + assert_eq!(raw_params.2, 3); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 6); + assert_eq!(raw_params.to_be_bytes(), [0, 1, 0, 2, 0, 3]); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u16_praw = ParamsRaw::from(raw_params); + test_writing(&u16_praw, &raw_params); + } + + #[test] + fn test_u8_triplet() { + let raw_params = U8Triplet::from((1, 2, 3)); + assert_eq!(raw_params.0, 1); + assert_eq!(raw_params.1, 2); + assert_eq!(raw_params.2, 3); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 3); + assert_eq!(raw_params.to_be_bytes(), [1, 2, 3]); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u8_praw = ParamsRaw::from(raw_params); + test_writing(&u8_praw, &raw_params); + } + + #[test] + fn test_i16_single() { + let value = -300_i16; + let raw_params = I16::from(value); + assert_eq!(raw_params.0, value); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 2); + assert_eq!(raw_params.to_be_bytes(), value.to_be_bytes()); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i16_praw = ParamsRaw::from(raw_params); + test_writing(&i16_praw, &raw_params); + } + + #[test] + fn test_i16_pair() { + let raw_params = I16Pair::from((-300, -400)); + assert_eq!(raw_params.0, -300); + assert_eq!(raw_params.1, -400); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 4); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i16_praw = ParamsRaw::from(raw_params); + test_writing(&i16_praw, &raw_params); + } + + #[test] + fn test_i16_triplet() { + let raw_params = I16Triplet::from((-300, -400, -350)); + assert_eq!(raw_params.0, -300); + assert_eq!(raw_params.1, -400); + assert_eq!(raw_params.2, -350); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 6); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i16_praw = ParamsRaw::from(raw_params); + test_writing(&i16_praw, &raw_params); + } + + #[test] + fn test_i32_single() { + let raw_params = I32::from(-80000); + assert_eq!(raw_params.0, -80000); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 4); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i32_praw = ParamsRaw::from(raw_params); + test_writing(&i32_praw, &raw_params); + } + + #[test] + fn test_i32_pair() { + let raw_params = I32Pair::from((-80000, -200)); + assert_eq!(raw_params.0, -80000); + assert_eq!(raw_params.1, -200); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 8); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i32_praw = ParamsRaw::from(raw_params); + test_writing(&i32_praw, &raw_params); + } + + #[test] + fn test_i32_triplet() { + let raw_params = I32Triplet::from((-80000, -5, -200)); + assert_eq!(raw_params.0, -80000); + assert_eq!(raw_params.1, -5); + assert_eq!(raw_params.2, -200); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 12); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i32_praw = ParamsRaw::from(raw_params); + test_writing(&i32_praw, &raw_params); + } + + #[test] + fn test_f32_single() { + let param = F32::from(0.1); + assert_eq!(param.0, 0.1); + assert_eq!(WritableToBeBytes::written_len(¶m), 4); + let f32_pair_raw = param.to_be_bytes(); + let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap()); + assert_eq!(f32_0, 0.1); + test_writing_fails(¶m); + test_cloning_works(¶m); + let praw = ParamsRaw::from(param); + test_writing(&praw, ¶m); + let p_try_from = F32::try_from(param.to_be_bytes().as_ref()).expect("try_from failed"); + assert_eq!(p_try_from, param); + } + + #[test] + fn test_f32_pair() { + let param = F32Pair::from((0.1, 0.2)); + assert_eq!(param.0, 0.1); + assert_eq!(param.1, 0.2); + assert_eq!(WritableToBeBytes::written_len(¶m), 8); + let f32_pair_raw = param.to_be_bytes(); + let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap()); + assert_eq!(f32_0, 0.1); + let f32_1 = f32::from_be_bytes(f32_pair_raw[4..8].try_into().unwrap()); + assert_eq!(f32_1, 0.2); + let other_pair = F32Pair::from((0.1, 0.2)); + assert_eq!(param, other_pair); + test_writing_fails(¶m); + test_cloning_works(¶m); + let praw = ParamsRaw::from(param); + test_writing(&praw, ¶m); + let p_try_from = F32Pair::try_from(param.to_be_bytes().as_ref()).expect("try_from failed"); + assert_eq!(p_try_from, param); + } + + #[test] + fn test_f32_triplet() { + let f32 = F32Triplet::from((0.1, -0.1, -5.2)); + assert_eq!(f32.0, 0.1); + assert_eq!(f32.1, -0.1); + assert_eq!(f32.2, -5.2); + assert_eq!(WritableToBeBytes::written_len(&f32), 12); + let f32_pair_raw = f32.to_be_bytes(); + let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap()); + assert_eq!(f32_0, 0.1); + let f32_1 = f32::from_be_bytes(f32_pair_raw[4..8].try_into().unwrap()); + assert_eq!(f32_1, -0.1); + let f32_2 = f32::from_be_bytes(f32_pair_raw[8..12].try_into().unwrap()); + assert_eq!(f32_2, -5.2); + test_writing_fails(&f32); + test_cloning_works(&f32); + let f32_praw = ParamsRaw::from(f32); + test_writing(&f32_praw, &f32); + let f32_try_from = + F32Triplet::try_from(f32.to_be_bytes().as_ref()).expect("try_from failed"); + assert_eq!(f32_try_from, f32); + } + + #[test] + fn test_u64_single() { + let u64 = U64::from(0x1010101010); + assert_eq!(u64.0, 0x1010101010); + assert_eq!(WritableToBeBytes::written_len(&u64), 8); + test_writing_fails(&u64); + test_cloning_works(&u64); + let praw = ParamsRaw::from(u64); + test_writing(&praw, &u64); + } + + #[test] + fn test_i64_single() { + let i64 = I64::from(-0xfffffffff); + assert_eq!(i64.0, -0xfffffffff); + assert_eq!(WritableToBeBytes::written_len(&i64), 8); + test_writing_fails(&i64); + test_cloning_works(&i64); + let praw = ParamsRaw::from(i64); + test_writing(&praw, &i64); + } + + #[test] + fn test_f64_single() { + let value = 823_823_812_832.232_3; + let f64 = F64::from(value); + assert_eq!(f64.0, value); + assert_eq!(WritableToBeBytes::written_len(&f64), 8); + test_writing_fails(&f64); + test_cloning_works(&f64); + let praw = ParamsRaw::from(f64); + test_writing(&praw, &f64); + } + + #[test] + fn test_f64_triplet() { + let f64_triplet = F64Triplet::from((0.1, 0.2, 0.3)); + assert_eq!(f64_triplet.0, 0.1); + assert_eq!(f64_triplet.1, 0.2); + assert_eq!(f64_triplet.2, 0.3); + assert_eq!(WritableToBeBytes::written_len(&f64_triplet), 24); + let f64_triplet_raw = f64_triplet.to_be_bytes(); + let f64_0 = f64::from_be_bytes(f64_triplet_raw[0..8].try_into().unwrap()); + assert_eq!(f64_0, 0.1); + let f64_1 = f64::from_be_bytes(f64_triplet_raw[8..16].try_into().unwrap()); + assert_eq!(f64_1, 0.2); + let f64_2 = f64::from_be_bytes(f64_triplet_raw[16..24].try_into().unwrap()); + assert_eq!(f64_2, 0.3); + test_writing_fails(&f64_triplet); + test_cloning_works(&f64_triplet); + } + + #[test] + fn test_u8_ecss_enum() { + let value = 200; + let u8p = EcssEnumU8::new(value); + test_cloning_works(&u8p); + let praw = ParamsEcssEnum::from(u8p); + assert_eq!(praw.written_len(), 1); + let mut buf = [0; 1]; + praw.write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + buf[0] = 200; + } + + #[test] + fn test_u16_ecss_enum() { + let value = 60000; + let u16p = EcssEnumU16::new(value); + test_cloning_works(&u16p); + let praw = ParamsEcssEnum::from(u16p); + assert_eq!(praw.written_len(), 2); + let mut buf = [0; 2]; + praw.write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + assert_eq!(u16::from_be_bytes(buf), value); + } + + #[test] + fn test_u32_ecss_enum() { + let value = 70000; + let u32p = EcssEnumU32::new(value); + test_cloning_works(&u32p); + let praw = ParamsEcssEnum::from(u32p); + assert_eq!(praw.written_len(), 4); + let mut buf = [0; 4]; + praw.write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + assert_eq!(u32::from_be_bytes(buf), value); + } + + #[test] + fn test_u64_ecss_enum() { + let value = 0xffffffffff; + let u64p = EcssEnumU64::new(value); + test_cloning_works(&u64p); + let praw = ParamsEcssEnum::from(u64p); + assert_eq!(praw.written_len(), 8); + let mut buf = [0; 8]; + praw.write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + assert_eq!(u64::from_be_bytes(buf), value); + } } diff --git a/satrs/src/pus/action.rs b/satrs/src/pus/action.rs index 2ee4815..875621f 100644 --- a/satrs/src/pus/action.rs +++ b/satrs/src/pus/action.rs @@ -1,6 +1,10 @@ -use crate::{action::ActionRequest, TargetId}; +use crate::{ + action::{ActionId, ActionRequest}, + params::Params, + request::{GenericMessage, MessageMetadata, RequestId}, +}; -use super::verification::{TcStateAccepted, VerificationToken}; +use satrs_shared::res_code::ResultU16; #[cfg(feature = "std")] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] @@ -8,219 +12,278 @@ pub use std_mod::*; #[cfg(feature = "alloc")] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +#[allow(unused_imports)] pub use alloc_mod::*; -/// This trait is an abstraction for the routing of PUS service 8 action requests to a dedicated -/// recipient using the generic [TargetId]. -pub trait PusActionRequestRouter { - type Error; - fn route( - &self, - target_id: TargetId, - hk_request: ActionRequest, - token: VerificationToken, - ) -> Result<(), Self::Error>; +#[derive(Clone, Debug)] +pub struct ActionRequestWithId { + pub request_id: RequestId, + pub request: ActionRequest, +} + +/// A reply to an action request, but tailored to the PUS standard verification process. +#[non_exhaustive] +#[derive(Clone, PartialEq, Debug)] +pub enum ActionReplyVariant { + Completed, + StepSuccess { + step: u16, + }, + CompletionFailed { + error_code: ResultU16, + params: Option, + }, + StepFailed { + error_code: ResultU16, + step: u16, + params: Option, + }, +} + +#[derive(Debug, PartialEq, Clone)] +pub struct PusActionReply { + pub action_id: ActionId, + pub variant: ActionReplyVariant, +} + +impl PusActionReply { + pub fn new(action_id: ActionId, variant: ActionReplyVariant) -> Self { + Self { action_id, variant } + } +} + +pub type GenericActionReplyPus = GenericMessage; + +impl GenericActionReplyPus { + pub fn new_action_reply( + requestor_info: MessageMetadata, + action_id: ActionId, + reply: ActionReplyVariant, + ) -> Self { + Self::new(requestor_info, PusActionReply::new(action_id, reply)) + } } #[cfg(feature = "alloc")] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] pub mod alloc_mod { - use spacepackets::ecss::tc::PusTcReader; + use crate::{ + action::ActionRequest, + queue::GenericTargetedMessagingError, + request::{ + GenericMessage, MessageReceiver, MessageSender, MessageSenderAndReceiver, RequestId, + }, + ComponentId, + }; - use crate::pus::verification::VerificationReportingProvider; + use super::PusActionReply; - use super::*; + /// Helper type definition for a mode handler which can handle mode requests. + pub type ActionRequestHandlerInterface = + MessageSenderAndReceiver; - /// This trait is an abstraction for the conversion of a PUS service 8 action telecommand into - /// an [ActionRequest]. - /// - /// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard. - /// The only requirement is that a valid [TargetId] and an [ActionRequest] are returned by the - /// core conversion function. - /// - /// The user should take care of performing the error handling as well. Some of the following - /// aspects might be relevant: - /// - /// - Checking the validity of the APID, service ID, subservice ID. - /// - Checking the validity of the user data. - /// - /// A [VerificationReportingProvider] instance is passed to the user to also allow handling - /// of the verification process as part of the PUS standard requirements. - pub trait PusActionToRequestConverter { - type Error; - fn convert( - &mut self, - token: VerificationToken, - tc: &PusTcReader, - time_stamp: &[u8], - verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, ActionRequest), Self::Error>; + impl, R: MessageReceiver> + ActionRequestHandlerInterface + { + pub fn try_recv_action_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } + + pub fn send_action_reply( + &self, + request_id: RequestId, + target_id: ComponentId, + reply: PusActionReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(request_id, target_id, reply) + } + } + + /// Helper type defintion for a mode handler object which can send mode requests and receive + /// mode replies. + pub type ActionRequestorInterface = + MessageSenderAndReceiver; + + impl, R: MessageReceiver> + ActionRequestorInterface + { + pub fn try_recv_action_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } + + pub fn send_action_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ActionRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(request_id, target_id, request) + } } } #[cfg(feature = "std")] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] pub mod std_mod { - use crate::pus::{ - get_current_cds_short_timestamp, verification::VerificationReportingProvider, - EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericRoutingError, - PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceHelper, + use std::sync::mpsc; + + use crate::{ + pus::{ + verification::{self, TcStateToken}, + ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, + }, + ComponentId, }; use super::*; - /// This is a high-level handler for the PUS service 8 action service. - /// - /// It performs the following handling steps: - /// - /// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter] - /// allows to configure the used telecommand memory backend. - /// 2. Convert the TC to a targeted action request using the provided - /// [PusActionToRequestConverter]. The generic error type is constrained to the - /// [PusPacketHandlingError] for the concrete implementation which offers a packet handler. - /// 3. Route the action request using the provided [PusActionRequestRouter]. - /// 4. Handle all routing errors using the provided [PusRoutingErrorHandler]. - pub struct PusService8ActionHandler< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - RequestConverter: PusActionToRequestConverter, - RequestRouter: PusActionRequestRouter, - RoutingErrorHandler: PusRoutingErrorHandler, - RoutingError = GenericRoutingError, - > { - service_helper: - PusServiceHelper, - pub request_converter: RequestConverter, - pub request_router: RequestRouter, - pub routing_error_handler: RoutingErrorHandler, + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct ActivePusActionRequestStd { + pub action_id: ActionId, + common: ActivePusRequestStd, } - impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - RequestConverter: PusActionToRequestConverter, - RequestRouter: PusActionRequestRouter, - RoutingErrorHandler: PusRoutingErrorHandler, - RoutingError: Clone, - > - PusService8ActionHandler< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - RequestConverter, - RequestRouter, - RoutingErrorHandler, - RoutingError, - > - where - PusPacketHandlingError: From, - { + impl ActiveRequestProvider for ActivePusActionRequestStd { + delegate::delegate! { + to self.common { + fn target_id(&self) -> ComponentId; + fn token(&self) -> verification::TcStateToken; + fn set_token(&mut self, token: verification::TcStateToken); + fn has_timed_out(&self) -> bool; + fn timeout(&self) -> core::time::Duration; + } + } + } + + impl ActivePusActionRequestStd { + pub fn new_from_common_req(action_id: ActionId, common: ActivePusRequestStd) -> Self { + Self { action_id, common } + } + pub fn new( - service_helper: PusServiceHelper< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - >, - request_converter: RequestConverter, - request_router: RequestRouter, - routing_error_handler: RoutingErrorHandler, + action_id: ActionId, + target_id: ComponentId, + token: TcStateToken, + timeout: core::time::Duration, ) -> Self { Self { - service_helper, - request_converter, - request_router, - routing_error_handler, + action_id, + common: ActivePusRequestStd::new(target_id, token, timeout), } } - - /// Core function to poll the next TC packet and try to handle it. - pub fn handle_one_tc(&mut self) -> Result { - let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; - if possible_packet.is_none() { - return Ok(PusPacketHandlerResult::Empty); - } - let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; - let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); - let (target_id, action_request) = self.request_converter.convert( - ecss_tc_and_token.token, - &tc, - &time_stamp, - &self.service_helper.common.verification_handler, - )?; - if let Err(e) = - self.request_router - .route(target_id, action_request, ecss_tc_and_token.token) - { - self.routing_error_handler.handle_error( - target_id, - ecss_tc_and_token.token, - &tc, - e.clone(), - &time_stamp, - &self.service_helper.common.verification_handler, - ); - return Err(e.into()); - } - Ok(PusPacketHandlerResult::RequestHandled) - } } + pub type DefaultActiveActionRequestMap = DefaultActiveRequestMap; + + pub type ActionRequestHandlerMpsc = ActionRequestHandlerInterface< + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ActionRequestHandlerMpscBounded = ActionRequestHandlerInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + >; + + pub type ActionRequestorMpsc = ActionRequestorInterface< + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ActionRequestorBoundedMpsc = ActionRequestorInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + >; + + /* + pub type ModeRequestorAndHandlerMpsc = ModeInterface< + mpsc::Sender>, + mpsc::Receiver>, + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ModeRequestorAndHandlerMpscBounded = ModeInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + mpsc::SyncSender>, + mpsc::Receiver>, + >; + */ } #[cfg(test)] mod tests { + /* + use core::{cell::RefCell, time::Duration}; + use std::{sync::mpsc, time::SystemTimeError}; + + use alloc::{collections::VecDeque, vec::Vec}; use delegate::delegate; use spacepackets::{ ecss::{ - tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}, + tc::{PusTcCreator, PusTcReader}, tm::PusTmReader, PusPacket, }, - CcsdsPacket, SequenceFlags, SpHeader, + time::{cds, TimeWriter}, + CcsdsPacket, }; - use crate::pus::{ - tests::{ - PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler, TestConverter, - TestRouter, TestRoutingErrorHandler, APP_DATA_TOO_SHORT, TEST_APID, + use crate::{ + action::ActionRequestVariant, + params::{self, ParamsRaw, WritableToBeBytes}, + pus::{ + tests::{ + PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler, + TestConverter, TestRouter, APP_DATA_TOO_SHORT, + }, + verification::{ + self, + tests::{SharedVerificationMap, TestVerificationReporter, VerificationStatus}, + FailParams, TcStateAccepted, TcStateNone, TcStateStarted, + VerificationReportingProvider, + }, + EcssTcInMemConverter, EcssTcInVecConverter, EcssTmtcError, GenericRoutingError, + MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError, PusRequestRouter, + PusServiceHelper, PusTcToRequestConverter, TmAsVecSenderWithMpsc, }, - verification::{ - tests::TestVerificationReporter, FailParams, RequestId, VerificationReportingProvider, - }, - EcssTcInVecConverter, GenericRoutingError, MpscTcReceiver, PusPacketHandlerResult, - PusPacketHandlingError, TmAsVecSenderWithMpsc, }; use super::*; - impl PusActionRequestRouter for TestRouter { + impl PusRequestRouter for TestRouter { type Error = GenericRoutingError; fn route( &self, target_id: TargetId, - hk_request: ActionRequest, + request: Request, _token: VerificationToken, ) -> Result<(), Self::Error> { self.routing_requests .borrow_mut() - .push_back((target_id, hk_request)); + .push_back((target_id, request)); self.check_for_injected_error() } + + fn handle_error( + &self, + target_id: TargetId, + token: VerificationToken, + tc: &PusTcReader, + error: Self::Error, + time_stamp: &[u8], + verif_reporter: &impl VerificationReportingProvider, + ) { + self.routing_errors + .borrow_mut() + .push_back((target_id, error)); + } } - impl PusActionToRequestConverter for TestConverter<8> { + impl PusTcToRequestConverter for TestConverter<8> { type Error = PusPacketHandlingError; fn convert( &mut self, @@ -254,9 +317,9 @@ mod tests { .expect("start success failure"); return Ok(( target_id.into(), - ActionRequest::UnsignedIdAndVecData { + ActionRequest { action_id: u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()), - data: tc.user_data()[4..].to_vec(), + variant: ActionRequestVariant::VecData(tc.user_data()[4..].to_vec()), }, )); } @@ -266,31 +329,32 @@ mod tests { } } - struct Pus8HandlerWithVecTester { - common: PusServiceHandlerWithVecCommon, - handler: PusService8ActionHandler< + pub struct PusDynRequestHandler { + srv_helper: PusServiceHelper< MpscTcReceiver, TmAsVecSenderWithMpsc, EcssTcInVecConverter, TestVerificationReporter, - TestConverter<8>, - TestRouter, - TestRoutingErrorHandler, >, + request_converter: TestConverter, + request_router: TestRouter, } - impl Pus8HandlerWithVecTester { + struct Pus8RequestTestbenchWithVec { + common: PusServiceHandlerWithVecCommon, + handler: PusDynRequestHandler<8, ActionRequest>, + } + + impl Pus8RequestTestbenchWithVec { pub fn new() -> Self { - let (common, srv_handler) = - PusServiceHandlerWithVecCommon::new_with_test_verif_sender(); + let (common, srv_helper) = PusServiceHandlerWithVecCommon::new_with_test_verif_sender(); Self { common, - handler: PusService8ActionHandler::new( - srv_handler, - TestConverter::default(), - TestRouter::default(), - TestRoutingErrorHandler::default(), - ), + handler: PusDynRequestHandler { + srv_helper, + request_converter: TestConverter::default(), + request_router: TestRouter::default(), + }, } } @@ -305,13 +369,13 @@ mod tests { } } delegate! { - to self.handler.routing_error_handler { - pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError); + to self.handler.request_router { + pub fn retrieve_next_routing_error(&mut self) -> (TargetId, GenericRoutingError); } } } - impl PusTestHarness for Pus8HandlerWithVecTester { + impl PusTestHarness for Pus8RequestTestbenchWithVec { delegate! { to self.common { fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; @@ -320,78 +384,421 @@ mod tests { fn check_next_verification_tm( &self, subservice: u8, - expected_request_id: RequestId, + expected_request_id: verification::RequestId, ); } } } - impl SimplePusPacketHandler for Pus8HandlerWithVecTester { + impl SimplePusPacketHandler for Pus8RequestTestbenchWithVec { + fn handle_one_tc(&mut self) -> Result { + let possible_packet = self.handler.srv_helper.retrieve_and_accept_next_packet()?; + if possible_packet.is_none() { + return Ok(PusPacketHandlerResult::Empty); + } + let ecss_tc_and_token = possible_packet.unwrap(); + let tc = self + .handler + .srv_helper + .tc_in_mem_converter + .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; + let time_stamp = cds::TimeProvider::from_now_with_u16_days() + .expect("timestamp generation failed") + .to_vec() + .unwrap(); + let (target_id, action_request) = self.handler.request_converter.convert( + ecss_tc_and_token.token, + &tc, + &time_stamp, + &self.handler.srv_helper.common.verification_handler, + )?; + if let Err(e) = self.handler.request_router.route( + target_id, + action_request, + ecss_tc_and_token.token, + ) { + self.handler.request_router.handle_error( + target_id, + ecss_tc_and_token.token, + &tc, + e.clone(), + &time_stamp, + &self.handler.srv_helper.common.verification_handler, + ); + return Err(e.into()); + } + Ok(PusPacketHandlerResult::RequestHandled) + } + } + + const TIMEOUT_ERROR_CODE: ResultU16 = ResultU16::new(1, 2); + const COMPLETION_ERROR_CODE: ResultU16 = ResultU16::new(2, 0); + const COMPLETION_ERROR_CODE_STEP: ResultU16 = ResultU16::new(2, 1); + + #[derive(Default)] + pub struct TestReplyHandlerHook { + pub unexpected_replies: VecDeque, + pub timeouts: RefCell>, + } + + impl ReplyHandlerHook for TestReplyHandlerHook { + fn handle_unexpected_reply(&mut self, reply: &GenericActionReplyPus) { + self.unexpected_replies.push_back(reply.clone()); + } + + fn timeout_callback(&self, active_request: &ActivePusActionRequest) { + self.timeouts.borrow_mut().push_back(active_request.clone()); + } + + fn timeout_error_code(&self) -> ResultU16 { + TIMEOUT_ERROR_CODE + } + } + + pub struct Pus8ReplyTestbench { + verif_reporter: TestVerificationReporter, + #[allow(dead_code)] + ecss_tm_receiver: mpsc::Receiver>, + handler: PusService8ReplyHandler< + TestVerificationReporter, + DefaultActiveActionRequestMap, + TestReplyHandlerHook, + mpsc::Sender>, + >, + } + + impl Pus8ReplyTestbench { + pub fn new(normal_ctor: bool) -> Self { + let reply_handler_hook = TestReplyHandlerHook::default(); + let shared_verif_map = SharedVerificationMap::default(); + let test_verif_reporter = TestVerificationReporter::new(shared_verif_map.clone()); + let (ecss_tm_sender, ecss_tm_receiver) = mpsc::channel(); + let reply_handler = if normal_ctor { + PusService8ReplyHandler::new_from_now_with_default_map( + test_verif_reporter.clone(), + 128, + reply_handler_hook, + ecss_tm_sender, + ) + .expect("creating reply handler failed") + } else { + PusService8ReplyHandler::new_from_now( + test_verif_reporter.clone(), + DefaultActiveActionRequestMap::default(), + 128, + reply_handler_hook, + ecss_tm_sender, + ) + .expect("creating reply handler failed") + }; + Self { + verif_reporter: test_verif_reporter, + ecss_tm_receiver, + handler: reply_handler, + } + } + + pub fn init_handling_for_request( + &mut self, + request_id: RequestId, + _action_id: ActionId, + ) -> VerificationToken { + assert!(!self.handler.request_active(request_id)); + // let action_req = ActionRequest::new(action_id, ActionRequestVariant::NoData); + let token = self.add_tc_with_req_id(request_id.into()); + let token = self + .verif_reporter + .acceptance_success(token, &[]) + .expect("acceptance success failure"); + let token = self + .verif_reporter + .start_success(token, &[]) + .expect("start success failure"); + let verif_info = self + .verif_reporter + .verification_info(&verification::RequestId::from(request_id)) + .expect("no verification info found"); + assert!(verif_info.started.expect("request was not started")); + assert!(verif_info.accepted.expect("request was not accepted")); + token + } + + pub fn next_unrequested_reply(&self) -> Option { + self.handler.user_hook.unexpected_replies.front().cloned() + } + + pub fn assert_request_completion_success(&self, step: Option, request_id: RequestId) { + let verif_info = self + .verif_reporter + .verification_info(&verification::RequestId::from(request_id)) + .expect("no verification info found"); + self.assert_request_completion_common(request_id, &verif_info, step, true); + } + + pub fn assert_request_completion_failure( + &self, + step: Option, + request_id: RequestId, + fail_enum: ResultU16, + fail_data: &[u8], + ) { + let verif_info = self + .verif_reporter + .verification_info(&verification::RequestId::from(request_id)) + .expect("no verification info found"); + self.assert_request_completion_common(request_id, &verif_info, step, false); + assert_eq!(verif_info.fail_enum.unwrap(), fail_enum.raw() as u64); + assert_eq!(verif_info.failure_data.unwrap(), fail_data); + } + + pub fn assert_request_completion_common( + &self, + request_id: RequestId, + verif_info: &VerificationStatus, + step: Option, + completion_success: bool, + ) { + if let Some(step) = step { + assert!(verif_info.step_status.is_some()); + assert!(verif_info.step_status.unwrap()); + assert_eq!(step, verif_info.step); + } + assert_eq!( + verif_info.completed.expect("request is not completed"), + completion_success + ); + assert!(!self.handler.request_active(request_id)); + } + + pub fn assert_request_step_failure(&self, step: u16, request_id: RequestId) { + let verif_info = self + .verif_reporter + .verification_info(&verification::RequestId::from(request_id)) + .expect("no verification info found"); + assert!(verif_info.step_status.is_some()); + assert!(!verif_info.step_status.unwrap()); + assert_eq!(step, verif_info.step); + } + pub fn add_routed_request( + &mut self, + request_id: verification::RequestId, + target_id: TargetId, + action_id: ActionId, + token: VerificationToken, + timeout: Duration, + ) { + if self.handler.request_active(request_id.into()) { + panic!("request already present"); + } + self.handler + .add_routed_action_request(request_id, target_id, action_id, token, timeout); + if !self.handler.request_active(request_id.into()) { + panic!("request should be active now"); + } + } + delegate! { to self.handler { - fn handle_one_tc(&mut self) -> Result; + pub fn request_active(&self, request_id: RequestId) -> bool; + + pub fn handle_action_reply( + &mut self, + action_reply_with_ids: GenericMessage, + time_stamp: &[u8] + ) -> Result<(), EcssTmtcError>; + + pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError>; + + pub fn check_for_timeouts(&mut self, time_stamp: &[u8]) -> Result<(), EcssTmtcError>; + } + to self.verif_reporter { + fn add_tc_with_req_id(&mut self, req_id: verification::RequestId) -> VerificationToken; } } } #[test] - fn basic_test() { - let mut action_handler = Pus8HandlerWithVecTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTcSecondaryHeader::new_simple(8, 1); - let action_id: u32 = 1; - let action_id_raw = action_id.to_be_bytes(); - let tc = PusTcCreator::new(&mut sp_header, sec_header, action_id_raw.as_ref(), true); - action_handler.send_tc(&tc); - let result = action_handler.handle_one_tc(); - assert!(result.is_ok()); - action_handler.check_next_conversion(&tc); - let (target_id, action_req) = action_handler.retrieve_next_request(); - assert_eq!(target_id, TEST_APID.into()); - if let ActionRequest::UnsignedIdAndVecData { action_id, data } = action_req { - assert_eq!(action_id, 1); - assert_eq!(data, &[]); - } + fn test_reply_handler_completion_success() { + let mut reply_testbench = Pus8ReplyTestbench::new(true); + let sender_id = 0x06; + let request_id = 0x02; + let target_id = 0x05; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + assert!(reply_testbench.request_active(request_id)); + let action_reply = GenericMessage::new( + request_id, + sender_id, + ActionReplyPusWithActionId { + action_id, + variant: ActionReplyPus::Completed, + }, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + reply_testbench.assert_request_completion_success(None, request_id); } #[test] - fn test_routing_error() { - let mut action_handler = Pus8HandlerWithVecTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTcSecondaryHeader::new_simple(8, 1); - let action_id: u32 = 1; - let action_id_raw = action_id.to_be_bytes(); - let tc = PusTcCreator::new(&mut sp_header, sec_header, action_id_raw.as_ref(), true); - let error = GenericRoutingError::UnknownTargetId(25); - action_handler - .handler - .request_router - .inject_routing_error(error); - action_handler.send_tc(&tc); - let result = action_handler.handle_one_tc(); - assert!(result.is_err()); - let check_error = |routing_error: GenericRoutingError| { - if let GenericRoutingError::UnknownTargetId(id) = routing_error { - assert_eq!(id, 25); - } else { - panic!("unexpected error type"); - } - }; - if let PusPacketHandlingError::RequestRoutingError(routing_error) = result.unwrap_err() { - check_error(routing_error); - } else { - panic!("unexpected error type"); - } - - action_handler.check_next_conversion(&tc); - let (target_id, action_req) = action_handler.retrieve_next_request(); - assert_eq!(target_id, TEST_APID.into()); - if let ActionRequest::UnsignedIdAndVecData { action_id, data } = action_req { - assert_eq!(action_id, 1); - assert_eq!(data, &[]); - } - - let (target_id, found_error) = action_handler.retrieve_next_error(); - assert_eq!(target_id, TEST_APID.into()); - check_error(found_error); + fn test_reply_handler_step_success() { + let mut reply_testbench = Pus8ReplyTestbench::new(false); + let request_id = 0x02; + let target_id = 0x05; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + action_id, + action_id, + ActionReplyPus::StepSuccess { step: 1 }, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + action_id, + action_id, + ActionReplyPus::Completed, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + reply_testbench.assert_request_completion_success(Some(1), request_id); } + + #[test] + fn test_reply_handler_completion_failure() { + let mut reply_testbench = Pus8ReplyTestbench::new(true); + let sender_id = 0x01; + let request_id = 0x02; + let target_id = 0x05; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + let params_raw = ParamsRaw::U32(params::U32(5)); + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + sender_id, + action_id, + ActionReplyPus::CompletionFailed { + error_code: COMPLETION_ERROR_CODE, + params: params_raw.into(), + }, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + reply_testbench.assert_request_completion_failure( + None, + request_id, + COMPLETION_ERROR_CODE, + ¶ms_raw.to_vec().unwrap(), + ); + } + + #[test] + fn test_reply_handler_step_failure() { + let mut reply_testbench = Pus8ReplyTestbench::new(false); + let sender_id = 0x01; + let request_id = 0x02; + let target_id = 0x05; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + sender_id, + action_id, + ActionReplyPus::StepFailed { + error_code: COMPLETION_ERROR_CODE_STEP, + step: 2, + params: ParamsRaw::U32(crate::params::U32(5)).into(), + }, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + reply_testbench.assert_request_step_failure(2, request_id); + } + + #[test] + fn test_reply_handler_timeout_handling() { + let mut reply_testbench = Pus8ReplyTestbench::new(true); + let request_id = 0x02; + let target_id = 0x06; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + let timeout_param = Duration::from_millis(1).as_millis() as u64; + let timeout_param_raw = timeout_param.to_be_bytes(); + std::thread::sleep(Duration::from_millis(2)); + reply_testbench + .update_time_from_now() + .expect("time update failure"); + reply_testbench.check_for_timeouts(&[]).unwrap(); + reply_testbench.assert_request_completion_failure( + None, + request_id, + TIMEOUT_ERROR_CODE, + &timeout_param_raw, + ); + } + + #[test] + fn test_unrequested_reply() { + let mut reply_testbench = Pus8ReplyTestbench::new(true); + let sender_id = 0x01; + let request_id = 0x02; + let action_id = 0x03; + + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + sender_id, + action_id, + ActionReplyPus::Completed, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + let reply = reply_testbench.next_unrequested_reply(); + assert!(reply.is_some()); + let reply = reply.unwrap(); + assert_eq!(reply.message.action_id, action_id); + assert_eq!(reply.request_id, request_id); + assert_eq!(reply.message.variant, ActionReplyPus::Completed); + } + */ } diff --git a/satrs/src/pus/event.rs b/satrs/src/pus/event.rs index 4165601..6343f3b 100644 --- a/satrs/src/pus/event.rs +++ b/satrs/src/pus/event.rs @@ -28,7 +28,7 @@ impl EventReportCreator { } pub fn event_info<'time, 'src_data>( - &mut self, + &self, src_data_buf: &'src_data mut [u8], time_stamp: &'time [u8], event_id: impl EcssEnumeration, @@ -44,7 +44,7 @@ impl EventReportCreator { } pub fn event_low_severity<'time, 'src_data>( - &mut self, + &self, src_data_buf: &'src_data mut [u8], time_stamp: &'time [u8], event_id: impl EcssEnumeration, @@ -60,7 +60,7 @@ impl EventReportCreator { } pub fn event_medium_severity<'time, 'src_data>( - &mut self, + &self, buf: &'src_data mut [u8], time_stamp: &'time [u8], event_id: impl EcssEnumeration, @@ -76,7 +76,7 @@ impl EventReportCreator { } pub fn event_high_severity<'time, 'src_data>( - &mut self, + &self, src_data_buf: &'src_data mut [u8], time_stamp: &'time [u8], event_id: impl EcssEnumeration, @@ -92,7 +92,7 @@ impl EventReportCreator { } fn generate_and_send_generic_tm<'time, 'src_data>( - &mut self, + &self, src_data_buf: &'src_data mut [u8], subservice: Subservice, time_stamp: &'time [u8], @@ -137,99 +137,94 @@ impl EventReportCreator { #[cfg(feature = "alloc")] mod alloc_mod { use super::*; + use crate::ComponentId; use alloc::vec; use alloc::vec::Vec; + use core::cell::RefCell; pub struct EventReporter { - source_data_buf: Vec, - pub reporter: EventReportCreator, + id: ComponentId, + // Use interior mutability pattern here. This is just an intermediate buffer to the PUS event packet + // generation. + source_data_buf: RefCell>, + pub report_creator: EventReportCreator, } impl EventReporter { - pub fn new(apid: u16, max_event_id_and_aux_data_size: usize) -> Option { + pub fn new( + id: ComponentId, + apid: u16, + max_event_id_and_aux_data_size: usize, + ) -> Option { let reporter = EventReportCreator::new(apid)?; Some(Self { - source_data_buf: vec![0; max_event_id_and_aux_data_size], - reporter, + id, + source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]), + report_creator: reporter, }) } + pub fn event_info( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event_id: impl EcssEnumeration, aux_data: Option<&[u8]>, ) -> Result<(), EcssTmtcError> { + let mut mut_buf = self.source_data_buf.borrow_mut(); let tm_creator = self - .reporter - .event_info( - self.source_data_buf.as_mut_slice(), - time_stamp, - event_id, - aux_data, - ) + .report_creator + .event_info(mut_buf.as_mut_slice(), time_stamp, event_id, aux_data) .map_err(PusError::ByteConversion)?; - sender.send_tm(tm_creator.into())?; + sender.send_tm(self.id, tm_creator.into())?; Ok(()) } pub fn event_low_severity( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event_id: impl EcssEnumeration, aux_data: Option<&[u8]>, ) -> Result<(), EcssTmtcError> { + let mut mut_buf = self.source_data_buf.borrow_mut(); let tm_creator = self - .reporter - .event_low_severity( - self.source_data_buf.as_mut_slice(), - time_stamp, - event_id, - aux_data, - ) + .report_creator + .event_low_severity(mut_buf.as_mut_slice(), time_stamp, event_id, aux_data) .map_err(PusError::ByteConversion)?; - sender.send_tm(tm_creator.into())?; + sender.send_tm(self.id, tm_creator.into())?; Ok(()) } pub fn event_medium_severity( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event_id: impl EcssEnumeration, aux_data: Option<&[u8]>, ) -> Result<(), EcssTmtcError> { + let mut mut_buf = self.source_data_buf.borrow_mut(); let tm_creator = self - .reporter - .event_medium_severity( - self.source_data_buf.as_mut_slice(), - time_stamp, - event_id, - aux_data, - ) + .report_creator + .event_medium_severity(mut_buf.as_mut_slice(), time_stamp, event_id, aux_data) .map_err(PusError::ByteConversion)?; - sender.send_tm(tm_creator.into())?; + sender.send_tm(self.id, tm_creator.into())?; Ok(()) } pub fn event_high_severity( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event_id: impl EcssEnumeration, aux_data: Option<&[u8]>, ) -> Result<(), EcssTmtcError> { + let mut mut_buf = self.source_data_buf.borrow_mut(); let tm_creator = self - .reporter - .event_high_severity( - self.source_data_buf.as_mut_slice(), - time_stamp, - event_id, - aux_data, - ) + .report_creator + .event_high_severity(mut_buf.as_mut_slice(), time_stamp, event_id, aux_data) .map_err(PusError::ByteConversion)?; - sender.send_tm(tm_creator.into())?; + sender.send_tm(self.id, tm_creator.into())?; Ok(()) } } @@ -239,9 +234,10 @@ mod alloc_mod { mod tests { use super::*; use crate::events::{EventU32, Severity}; + use crate::pus::test_util::TEST_COMPONENT_ID; use crate::pus::tests::CommonTmInfo; - use crate::pus::{EcssChannel, PusTmWrapper}; - use crate::ChannelId; + use crate::pus::{ChannelWithId, PusTmVariant}; + use crate::ComponentId; use spacepackets::ByteConversionError; use std::cell::RefCell; use std::collections::VecDeque; @@ -255,6 +251,7 @@ mod tests { #[derive(Debug, Eq, PartialEq, Clone)] struct TmInfo { + pub sender_id: ComponentId, pub common: CommonTmInfo, pub event: EventU32, pub aux_data: Vec, @@ -265,19 +262,19 @@ mod tests { pub service_queue: RefCell>, } - impl EcssChannel for TestSender { - fn channel_id(&self) -> ChannelId { + impl ChannelWithId for TestSender { + fn id(&self) -> ComponentId { 0 } } impl EcssTmSenderCore for TestSender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(_) => { + PusTmVariant::InStore(_) => { panic!("TestSender: unexpected call with address"); } - PusTmWrapper::Direct(tm) => { + PusTmVariant::Direct(tm) => { assert!(!tm.source_data().is_empty()); let src_data = tm.source_data(); assert!(src_data.len() >= 4); @@ -288,6 +285,7 @@ mod tests { aux_data.extend_from_slice(&src_data[4..]); } self.service_queue.borrow_mut().push_back(TmInfo { + sender_id, common: CommonTmInfo::new_from_tm(&tm), event, aux_data, @@ -345,7 +343,8 @@ mod tests { error_data: Option<&[u8]>, ) { let mut sender = TestSender::default(); - let reporter = EventReporter::new(EXAMPLE_APID, max_event_aux_data_buf); + let reporter = + EventReporter::new(TEST_COMPONENT_ID.id(), EXAMPLE_APID, max_event_aux_data_buf); assert!(reporter.is_some()); let mut reporter = reporter.unwrap(); let time_stamp_empty: [u8; 7] = [0; 7]; @@ -375,6 +374,7 @@ mod tests { assert_eq!(tm_info.common.msg_counter, 0); assert_eq!(tm_info.common.apid, EXAMPLE_APID); assert_eq!(tm_info.event, event); + assert_eq!(tm_info.sender_id, TEST_COMPONENT_ID.id()); assert_eq!(tm_info.aux_data, error_copy); } @@ -437,7 +437,7 @@ mod tests { fn insufficient_buffer() { let mut sender = TestSender::default(); for i in 0..3 { - let reporter = EventReporter::new(EXAMPLE_APID, i); + let reporter = EventReporter::new(0, EXAMPLE_APID, i); assert!(reporter.is_some()); let mut reporter = reporter.unwrap(); check_buf_too_small(&mut reporter, &mut sender, i); diff --git a/satrs/src/pus/event_man.rs b/satrs/src/pus/event_man.rs index e6e18c9..c907f94 100644 --- a/satrs/src/pus/event_man.rs +++ b/satrs/src/pus/event_man.rs @@ -177,8 +177,8 @@ pub mod alloc_mod { } pub fn generate_pus_event_tm_generic( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event: EV, aux_data: Option<&[u8]>, @@ -239,8 +239,8 @@ pub mod alloc_mod { } pub fn generate_pus_event_tm( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event: EventU32TypedSev, aux_data: Option<&[u8]>, @@ -257,31 +257,36 @@ pub mod alloc_mod { #[cfg(test)] mod tests { use super::*; - use crate::{events::SeverityInfo, pus::TmAsVecSenderWithMpsc}; + use crate::events::SeverityInfo; + use crate::pus::PusTmAsVec; + use crate::request::UniqueApidTargetId; use std::sync::mpsc::{self, TryRecvError}; const INFO_EVENT: EventU32TypedSev = EventU32TypedSev::::const_new(1, 0); const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5); const EMPTY_STAMP: [u8; 7] = [0; 7]; + const TEST_APID: u16 = 0x02; + const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05); fn create_basic_man_1() -> DefaultPusEventU32Dispatcher<()> { - let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed"); + let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 128) + .expect("Creating event repoter failed"); PusEventDispatcher::new_with_default_backend(reporter) } fn create_basic_man_2() -> DefaultPusEventU32Dispatcher<()> { - let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed"); + let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 128) + .expect("Creating event repoter failed"); let backend = DefaultPusEventMgmtBackend::default(); PusEventDispatcher::new(reporter, backend) } #[test] fn test_basic() { - let mut event_man = create_basic_man_1(); - let (event_tx, event_rx) = mpsc::channel(); - let mut sender = TmAsVecSenderWithMpsc::new(0, "test_sender", event_tx); + let event_man = create_basic_man_1(); + let (event_tx, event_rx) = mpsc::channel::(); let event_sent = event_man - .generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None) + .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None) .expect("Sending info event failed"); assert!(event_sent); @@ -292,13 +297,13 @@ mod tests { #[test] fn test_disable_event() { let mut event_man = create_basic_man_2(); - let (event_tx, event_rx) = mpsc::channel(); - let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx); + let (event_tx, event_rx) = mpsc::channel::(); + // let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx); let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT); assert!(res.is_ok()); assert!(res.unwrap()); let mut event_sent = event_man - .generate_pus_event_tm_generic(&mut sender, &EMPTY_STAMP, LOW_SEV_EVENT, None) + .generate_pus_event_tm_generic(&event_tx, &EMPTY_STAMP, LOW_SEV_EVENT, None) .expect("Sending low severity event failed"); assert!(!event_sent); let res = event_rx.try_recv(); @@ -306,7 +311,7 @@ mod tests { assert!(matches!(res.unwrap_err(), TryRecvError::Empty)); // Check that only the low severity event was disabled event_sent = event_man - .generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None) + .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None) .expect("Sending info event failed"); assert!(event_sent); event_rx.try_recv().expect("No info event received"); @@ -315,8 +320,7 @@ mod tests { #[test] fn test_reenable_event() { let mut event_man = create_basic_man_1(); - let (event_tx, event_rx) = mpsc::channel(); - let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx); + let (event_tx, event_rx) = mpsc::channel::(); let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT); assert!(res.is_ok()); assert!(res.unwrap()); @@ -324,7 +328,7 @@ mod tests { assert!(res.is_ok()); assert!(res.unwrap()); let event_sent = event_man - .generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None) + .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None) .expect("Sending info event failed"); assert!(event_sent); event_rx.try_recv().expect("No info event received"); diff --git a/satrs/src/pus/event_srv.rs b/satrs/src/pus/event_srv.rs index 64c1ba0..cca55ca 100644 --- a/satrs/src/pus/event_srv.rs +++ b/satrs/src/pus/event_srv.rs @@ -2,17 +2,18 @@ use crate::events::EventU32; use crate::pus::event_man::{EventRequest, EventRequestWithToken}; use crate::pus::verification::TcStateToken; use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError}; +use crate::queue::GenericSendError; use spacepackets::ecss::event::Subservice; use spacepackets::ecss::PusPacket; use std::sync::mpsc::Sender; use super::verification::VerificationReportingProvider; use super::{ - get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, - PusServiceHelper, + EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericConversionError, + GenericRoutingError, PusServiceHelper, }; -pub struct PusService5EventHandler< +pub struct PusEventServiceHandler< TcReceiver: EcssTcReceiverCore, TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, @@ -28,7 +29,7 @@ impl< TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, VerificationReporter: VerificationReportingProvider, - > PusService5EventHandler + > PusEventServiceHandler { pub fn new( service_helper: PusServiceHelper< @@ -45,16 +46,19 @@ impl< } } - pub fn handle_one_tc(&mut self) -> Result { + pub fn poll_and_handle_next_tc( + &mut self, + time_stamp: &[u8], + ) -> Result { let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; if possible_packet.is_none() { return Ok(PusPacketHandlerResult::Empty); } let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; + self.service_helper + .tc_in_mem_converter_mut() + .cache(&ecss_tc_and_token.tc_in_memory)?; + let tc = self.service_helper.tc_in_mem_converter().convert()?; let subservice = tc.subservice(); let srv = Subservice::try_from(subservice); if srv.is_err() { @@ -63,63 +67,74 @@ impl< ecss_tc_and_token.token, )); } - let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| { - if tc.user_data().len() < 4 { - return Err(PusPacketHandlingError::NotEnoughAppData { - expected: 4, - found: tc.user_data().len(), - }); - } - let user_data = tc.user_data(); - let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap())); - let start_token = self - .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &stamp) - .map_err(|_| PartialPusHandlingError::Verification); - let partial_error = start_token.clone().err(); - let mut token: TcStateToken = ecss_tc_and_token.token.into(); - if let Ok(start_token) = start_token { - token = start_token.into(); - } - let event_req_with_token = if enable { - EventRequestWithToken { - request: EventRequest::Enable(event_u32), - token, + let handle_enable_disable_request = + |enable: bool| -> Result { + if tc.user_data().len() < 4 { + return Err(GenericConversionError::NotEnoughAppData { + expected: 4, + found: tc.user_data().len(), + } + .into()); } - } else { - EventRequestWithToken { - request: EventRequest::Disable(event_u32), - token, + let user_data = tc.user_data(); + let event_u32 = + EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap())); + let start_token = self + .service_helper + .common + .verif_reporter + .start_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) + .map_err(|_| PartialPusHandlingError::Verification); + let partial_error = start_token.clone().err(); + let mut token: TcStateToken = ecss_tc_and_token.token.into(); + if let Ok(start_token) = start_token { + token = start_token.into(); } + let event_req_with_token = if enable { + EventRequestWithToken { + request: EventRequest::Enable(event_u32), + token, + } + } else { + EventRequestWithToken { + request: EventRequest::Disable(event_u32), + token, + } + }; + self.event_request_tx + .send(event_req_with_token) + .map_err(|_| { + PusPacketHandlingError::RequestRouting(GenericRoutingError::Send( + GenericSendError::RxDisconnected, + )) + })?; + if let Some(partial_error) = partial_error { + return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( + partial_error, + )); + } + Ok(PusPacketHandlerResult::RequestHandled) }; - self.event_request_tx - .send(event_req_with_token) - .map_err(|_| { - PusPacketHandlingError::Other("Forwarding event request failed".into()) - })?; - if let Some(partial_error) = partial_error { - return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( - partial_error, - )); - } - Ok(PusPacketHandlerResult::RequestHandled) - }; - let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); + match srv.unwrap() { Subservice::TmInfoReport | Subservice::TmLowSeverityReport | Subservice::TmMediumSeverityReport | Subservice::TmHighSeverityReport => { - return Err(PusPacketHandlingError::InvalidSubservice(tc.subservice())) + return Err(PusPacketHandlingError::RequestConversion( + GenericConversionError::WrongService(tc.subservice()), + )) } Subservice::TcEnableEventGeneration => { - handle_enable_disable_request(true, time_stamp)?; + handle_enable_disable_request(true)?; } Subservice::TcDisableEventGeneration => { - handle_enable_disable_request(false, time_stamp)?; + handle_enable_disable_request(false)?; } Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => { return Ok(PusPacketHandlerResult::SubserviceNotImplemented( @@ -137,6 +152,7 @@ impl< mod tests { use delegate::delegate; use spacepackets::ecss::event::Subservice; + use spacepackets::time::{cds, TimeWriter}; use spacepackets::util::UnsignedEnum; use spacepackets::{ ecss::{ @@ -148,49 +164,63 @@ mod tests { use std::sync::mpsc::{self, Sender}; use crate::pus::event_man::EventRequest; - use crate::pus::tests::SimplePusPacketHandler; + use crate::pus::test_util::{PusTestHarness, SimplePusPacketHandler, TEST_APID}; use crate::pus::verification::{ - RequestId, VerificationReporterWithSharedPoolMpscBoundedSender, + RequestId, VerificationReporter, VerificationReportingProvider, }; - use crate::pus::{MpscTcReceiver, TmInSharedPoolSenderWithBoundedMpsc}; + use crate::pus::{GenericConversionError, MpscTcReceiver, MpscTmInSharedPoolSenderBounded}; use crate::{ events::EventU32, pus::{ event_man::EventRequestWithToken, - tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness, TEST_APID}, + tests::PusServiceHandlerWithSharedStoreCommon, verification::{TcStateAccepted, VerificationToken}, EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError, }, }; - use super::PusService5EventHandler; + use super::PusEventServiceHandler; const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25); struct Pus5HandlerWithStoreTester { common: PusServiceHandlerWithSharedStoreCommon, - handler: PusService5EventHandler< + handler: PusEventServiceHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, >, } impl Pus5HandlerWithStoreTester { pub fn new(event_request_tx: Sender) -> Self { - let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(); + let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(0); Self { common, - handler: PusService5EventHandler::new(srv_handler, event_request_tx), + handler: PusEventServiceHandler::new(srv_handler, event_request_tx), } } } impl PusTestHarness for Pus5HandlerWithStoreTester { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken { + let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc); + self.handler + .service_helper + .verif_reporter() + .acceptance_success( + self.handler.service_helper.id(), + self.handler.service_helper.tm_sender(), + init_token, + &[0; 7], + ) + .expect("acceptance success failure") + } + delegate! { to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); fn read_next_tm(&mut self) -> PusTmReader<'_>; fn check_no_tm_available(&self) -> bool; fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId); @@ -200,10 +230,9 @@ mod tests { } impl SimplePusPacketHandler for Pus5HandlerWithStoreTester { - delegate! { - to self.handler { - fn handle_one_tc(&mut self) -> Result; - } + fn handle_one_tc(&mut self) -> Result { + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); + self.handler.poll_and_handle_next_tc(&time_stamp) } } @@ -220,8 +249,9 @@ mod tests { .write_to_be_bytes(&mut app_data) .expect("writing test event failed"); let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); - let token = test_harness.send_tc(&ping_tc); - let request_id = token.req_id(); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); + let request_id = token.request_id(); test_harness.handle_one_tc().unwrap(); test_harness.check_next_verification_tm(1, request_id); test_harness.check_next_verification_tm(3, request_id); @@ -277,7 +307,8 @@ mod tests { let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sec_header = PusTcSecondaryHeader::new_simple(5, 200); let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); - test_harness.send_tc(&ping_tc); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); let result = test_harness.handle_one_tc(); assert!(result.is_ok()); let result = result.unwrap(); @@ -296,11 +327,15 @@ mod tests { let sec_header = PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8); let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &[0, 1, 2], true); - test_harness.send_tc(&ping_tc); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); let result = test_harness.handle_one_tc(); assert!(result.is_err()); let result = result.unwrap_err(); - if let PusPacketHandlingError::NotEnoughAppData { expected, found } = result { + if let PusPacketHandlingError::RequestConversion( + GenericConversionError::NotEnoughAppData { expected, found }, + ) = result + { assert_eq!(expected, 4); assert_eq!(found, 3); } else { diff --git a/satrs/src/pus/hk.rs b/satrs/src/pus/hk.rs deleted file mode 100644 index 852e8f7..0000000 --- a/satrs/src/pus/hk.rs +++ /dev/null @@ -1,406 +0,0 @@ -pub use spacepackets::ecss::hk::*; - -#[cfg(feature = "std")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] -pub use std_mod::*; - -#[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] -pub use alloc_mod::*; - -use crate::{hk::HkRequest, TargetId}; - -use super::verification::{TcStateAccepted, VerificationToken}; - -/// This trait is an abstraction for the routing of PUS service 3 housekeeping requests to a -/// dedicated recipient using the generic [TargetId]. -pub trait PusHkRequestRouter { - type Error; - fn route( - &self, - target_id: TargetId, - hk_request: HkRequest, - token: VerificationToken, - ) -> Result<(), Self::Error>; -} - -#[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] -pub mod alloc_mod { - use spacepackets::ecss::tc::PusTcReader; - - use crate::pus::verification::VerificationReportingProvider; - - use super::*; - - /// This trait is an abstraction for the conversion of a PUS service 8 action telecommand into - /// a [HkRequest]. - /// - /// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard. - /// The only requirement is that a valid [TargetId] and a [HkRequest] are returned by the - /// core conversion function. - /// - /// The user should take care of performing the error handling as well. Some of the following - /// aspects might be relevant: - /// - /// - Checking the validity of the APID, service ID, subservice ID. - /// - Checking the validity of the user data. - /// - /// A [VerificationReportingProvider] is passed to the user to also allow handling - /// of the verification process as part of the PUS standard requirements. - pub trait PusHkToRequestConverter { - type Error; - fn convert( - &mut self, - token: VerificationToken, - tc: &PusTcReader, - time_stamp: &[u8], - verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, HkRequest), Self::Error>; - } -} - -#[cfg(feature = "std")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] -pub mod std_mod { - use crate::pus::{ - get_current_cds_short_timestamp, verification::VerificationReportingProvider, - EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericRoutingError, - PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceHelper, - }; - - use super::*; - - /// This is a generic high-level handler for the PUS service 3 housekeeping service. - /// - /// It performs the following handling steps: - /// - /// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter] - /// allows to configure the used telecommand memory backend. - /// 2. Convert the TC to a targeted action request using the provided - /// [PusHkToRequestConverter]. The generic error type is constrained to the - /// [PusPacketHandlerResult] for the concrete implementation which offers a packet handler. - /// 3. Route the action request using the provided [PusHkRequestRouter]. The generic error - /// type is constrained to the [GenericRoutingError] for the concrete implementation. - /// 4. Handle all routing errors using the provided [PusRoutingErrorHandler]. The generic error - /// type is constrained to the [GenericRoutingError] for the concrete implementation. - pub struct PusService3HkHandler< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - RequestConverter: PusHkToRequestConverter, - RequestRouter: PusHkRequestRouter, - RoutingErrorHandler: PusRoutingErrorHandler, - RoutingError = GenericRoutingError, - > { - service_helper: - PusServiceHelper, - pub request_converter: RequestConverter, - pub request_router: RequestRouter, - pub routing_error_handler: RoutingErrorHandler, - } - - impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - RequestConverter: PusHkToRequestConverter, - RequestRouter: PusHkRequestRouter, - RoutingErrorHandler: PusRoutingErrorHandler, - RoutingError: Clone, - > - PusService3HkHandler< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - RequestConverter, - RequestRouter, - RoutingErrorHandler, - RoutingError, - > - where - PusPacketHandlingError: From, - { - pub fn new( - service_helper: PusServiceHelper< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - >, - request_converter: RequestConverter, - request_router: RequestRouter, - routing_error_handler: RoutingErrorHandler, - ) -> Self { - Self { - service_helper, - request_converter, - request_router, - routing_error_handler, - } - } - - pub fn handle_one_tc(&mut self) -> Result { - let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; - if possible_packet.is_none() { - return Ok(PusPacketHandlerResult::Empty); - } - let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; - let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); - let (target_id, hk_request) = self.request_converter.convert( - ecss_tc_and_token.token, - &tc, - &time_stamp, - &self.service_helper.common.verification_handler, - )?; - if let Err(e) = - self.request_router - .route(target_id, hk_request, ecss_tc_and_token.token) - { - self.routing_error_handler.handle_error( - target_id, - ecss_tc_and_token.token, - &tc, - e.clone(), - &time_stamp, - &self.service_helper.common.verification_handler, - ); - return Err(e.into()); - } - Ok(PusPacketHandlerResult::RequestHandled) - } - } -} - -#[cfg(test)] -mod tests { - use delegate::delegate; - use spacepackets::ecss::hk::Subservice; - - use spacepackets::{ - ecss::{ - tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}, - tm::PusTmReader, - PusPacket, - }, - CcsdsPacket, SequenceFlags, SpHeader, - }; - - use crate::pus::{MpscTcReceiver, TmAsVecSenderWithMpsc}; - use crate::{ - hk::HkRequest, - pus::{ - tests::{ - PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler, - TestConverter, TestRouter, TestRoutingErrorHandler, APP_DATA_TOO_SHORT, TEST_APID, - }, - verification::{ - tests::TestVerificationReporter, FailParams, RequestId, TcStateAccepted, - VerificationReportingProvider, VerificationToken, - }, - EcssTcInVecConverter, GenericRoutingError, PusPacketHandlerResult, - PusPacketHandlingError, - }, - TargetId, - }; - - use super::{PusHkRequestRouter, PusHkToRequestConverter, PusService3HkHandler}; - - impl PusHkRequestRouter for TestRouter { - type Error = GenericRoutingError; - - fn route( - &self, - target_id: TargetId, - hk_request: HkRequest, - _token: VerificationToken, - ) -> Result<(), Self::Error> { - self.routing_requests - .borrow_mut() - .push_back((target_id, hk_request)); - self.check_for_injected_error() - } - } - - impl PusHkToRequestConverter for TestConverter<3> { - type Error = PusPacketHandlingError; - fn convert( - &mut self, - token: VerificationToken, - tc: &PusTcReader, - time_stamp: &[u8], - verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, HkRequest), Self::Error> { - self.conversion_request.push_back(tc.raw_data().to_vec()); - self.check_service(tc)?; - let target_id = tc.apid(); - if tc.user_data().len() < 4 { - verif_reporter - .start_failure( - token, - FailParams::new( - time_stamp, - &APP_DATA_TOO_SHORT, - (tc.user_data().len() as u32).to_be_bytes().as_ref(), - ), - ) - .expect("start success failure"); - return Err(PusPacketHandlingError::NotEnoughAppData { - expected: 4, - found: tc.user_data().len(), - }); - } - if tc.subservice() == Subservice::TcGenerateOneShotHk as u8 { - verif_reporter - .start_success(token, time_stamp) - .expect("start success failure"); - return Ok(( - target_id.into(), - HkRequest::OneShot(u32::from_be_bytes( - tc.user_data()[0..4].try_into().unwrap(), - )), - )); - } - Err(PusPacketHandlingError::InvalidAppData( - "unexpected app data".into(), - )) - } - } - - struct Pus3HandlerWithVecTester { - common: PusServiceHandlerWithVecCommon, - handler: PusService3HkHandler< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - TestVerificationReporter, - TestConverter<3>, - TestRouter, - TestRoutingErrorHandler, - >, - } - - impl Pus3HandlerWithVecTester { - pub fn new() -> Self { - let (common, srv_handler) = - PusServiceHandlerWithVecCommon::new_with_test_verif_sender(); - Self { - common, - handler: PusService3HkHandler::new( - srv_handler, - TestConverter::default(), - TestRouter::default(), - TestRoutingErrorHandler::default(), - ), - } - } - - delegate! { - to self.handler.request_converter { - pub fn check_next_conversion(&mut self, tc: &PusTcCreator); - } - } - delegate! { - to self.handler.request_router { - pub fn retrieve_next_request(&mut self) -> (TargetId, HkRequest); - } - } - delegate! { - to self.handler.routing_error_handler { - pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError); - } - } - } - - impl PusTestHarness for Pus3HandlerWithVecTester { - delegate! { - to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; - fn read_next_tm(&mut self) -> PusTmReader<'_>; - fn check_no_tm_available(&self) -> bool; - fn check_next_verification_tm( - &self, - subservice: u8, - expected_request_id: RequestId, - ); - } - } - } - impl SimplePusPacketHandler for Pus3HandlerWithVecTester { - delegate! { - to self.handler { - fn handle_one_tc(&mut self) -> Result; - } - } - } - - #[test] - fn basic_test() { - let mut hk_handler = Pus3HandlerWithVecTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTcSecondaryHeader::new_simple(3, Subservice::TcGenerateOneShotHk as u8); - let unique_id: u32 = 1; - let unique_id_raw = unique_id.to_be_bytes(); - let tc = PusTcCreator::new(&mut sp_header, sec_header, unique_id_raw.as_ref(), true); - hk_handler.send_tc(&tc); - let result = hk_handler.handle_one_tc(); - assert!(result.is_ok()); - hk_handler.check_next_conversion(&tc); - let (target_id, hk_request) = hk_handler.retrieve_next_request(); - assert_eq!(target_id, TEST_APID.into()); - if let HkRequest::OneShot(id) = hk_request { - assert_eq!(id, unique_id); - } else { - panic!("unexpected request"); - } - } - - #[test] - fn test_routing_error() { - let mut hk_handler = Pus3HandlerWithVecTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTcSecondaryHeader::new_simple(3, Subservice::TcGenerateOneShotHk as u8); - let unique_id: u32 = 1; - let unique_id_raw = unique_id.to_be_bytes(); - let tc = PusTcCreator::new(&mut sp_header, sec_header, unique_id_raw.as_ref(), true); - let error = GenericRoutingError::UnknownTargetId(25); - hk_handler - .handler - .request_router - .inject_routing_error(error); - hk_handler.send_tc(&tc); - let result = hk_handler.handle_one_tc(); - assert!(result.is_err()); - let check_error = |routing_error: GenericRoutingError| { - if let GenericRoutingError::UnknownTargetId(id) = routing_error { - assert_eq!(id, 25); - } else { - panic!("unexpected error type"); - } - }; - if let PusPacketHandlingError::RequestRoutingError(routing_error) = result.unwrap_err() { - check_error(routing_error); - } else { - panic!("unexpected error type"); - } - - hk_handler.check_next_conversion(&tc); - let (target_id, hk_req) = hk_handler.retrieve_next_request(); - assert_eq!(target_id, TEST_APID.into()); - if let HkRequest::OneShot(unique_id) = hk_req { - assert_eq!(unique_id, 1); - } - - let (target_id, found_error) = hk_handler.retrieve_next_error(); - assert_eq!(target_id, TEST_APID.into()); - check_error(found_error); - } -} diff --git a/satrs/src/pus/mod.rs b/satrs/src/pus/mod.rs index 37d69ef..f989cbb 100644 --- a/satrs/src/pus/mod.rs +++ b/satrs/src/pus/mod.rs @@ -4,9 +4,11 @@ //! The satrs-example application contains various usage examples of these components. use crate::pool::{StoreAddr, StoreError}; use crate::pus::verification::{TcStateAccepted, TcStateToken, VerificationToken}; -use crate::queue::{GenericRecvError, GenericSendError}; -use crate::ChannelId; +use crate::queue::{GenericReceiveError, GenericSendError}; +use crate::request::{GenericMessage, MessageMetadata, RequestId}; +use crate::ComponentId; use core::fmt::{Display, Formatter}; +use core::time::Duration; #[cfg(feature = "alloc")] use downcast_rs::{impl_downcast, Downcast}; #[cfg(feature = "alloc")] @@ -24,7 +26,6 @@ pub mod event; pub mod event_man; #[cfg(feature = "std")] pub mod event_srv; -pub mod hk; pub mod mode; pub mod scheduler; #[cfg(feature = "std")] @@ -39,46 +40,48 @@ pub use alloc_mod::*; #[cfg(feature = "std")] pub use std_mod::*; +use self::verification::VerificationReportingProvider; + #[derive(Debug, PartialEq, Eq, Clone)] -pub enum PusTmWrapper<'time, 'src_data> { +pub enum PusTmVariant<'time, 'src_data> { InStore(StoreAddr), Direct(PusTmCreator<'time, 'src_data>), } -impl From for PusTmWrapper<'_, '_> { +impl From for PusTmVariant<'_, '_> { fn from(value: StoreAddr) -> Self { Self::InStore(value) } } -impl<'time, 'src_data> From> for PusTmWrapper<'time, 'src_data> { +impl<'time, 'src_data> From> for PusTmVariant<'time, 'src_data> { fn from(value: PusTmCreator<'time, 'src_data>) -> Self { Self::Direct(value) } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum EcssTmtcError { - StoreLock, Store(StoreError), + ByteConversion(ByteConversionError), Pus(PusError), CantSendAddr(StoreAddr), CantSendDirectTm, Send(GenericSendError), - Recv(GenericRecvError), + Receive(GenericReceiveError), } impl Display for EcssTmtcError { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { match self { - EcssTmtcError::StoreLock => { - write!(f, "store lock error") - } EcssTmtcError::Store(store) => { - write!(f, "store error: {store}") + write!(f, "ecss tmtc error: {store}") } - EcssTmtcError::Pus(pus_e) => { - write!(f, "PUS error: {pus_e}") + EcssTmtcError::ByteConversion(e) => { + write!(f, "ecss tmtc error: {e}") + } + EcssTmtcError::Pus(e) => { + write!(f, "ecss tmtc error: {e}") } EcssTmtcError::CantSendAddr(addr) => { write!(f, "can not send address {addr}") @@ -86,11 +89,11 @@ impl Display for EcssTmtcError { EcssTmtcError::CantSendDirectTm => { write!(f, "can not send TM directly") } - EcssTmtcError::Send(send_e) => { - write!(f, "send error {send_e}") + EcssTmtcError::Send(e) => { + write!(f, "ecss tmtc error: {e}") } - EcssTmtcError::Recv(recv_e) => { - write!(f, "recv error {recv_e}") + EcssTmtcError::Receive(e) => { + write!(f, "ecss tmtc error {e}") } } } @@ -114,9 +117,15 @@ impl From for EcssTmtcError { } } -impl From for EcssTmtcError { - fn from(value: GenericRecvError) -> Self { - Self::Recv(value) +impl From for EcssTmtcError { + fn from(value: ByteConversionError) -> Self { + Self::ByteConversion(value) + } +} + +impl From for EcssTmtcError { + fn from(value: GenericReceiveError) -> Self { + Self::Receive(value) } } @@ -125,16 +134,17 @@ impl Error for EcssTmtcError { fn source(&self) -> Option<&(dyn Error + 'static)> { match self { EcssTmtcError::Store(e) => Some(e), + EcssTmtcError::ByteConversion(e) => Some(e), EcssTmtcError::Pus(e) => Some(e), EcssTmtcError::Send(e) => Some(e), - EcssTmtcError::Recv(e) => Some(e), + EcssTmtcError::Receive(e) => Some(e), _ => None, } } } -pub trait EcssChannel: Send { +pub trait ChannelWithId: Send { /// Each sender can have an ID associated with it - fn channel_id(&self) -> ChannelId; + fn id(&self) -> ComponentId; fn name(&self) -> &'static str { "unset" } @@ -144,7 +154,7 @@ pub trait EcssChannel: Send { /// /// This sender object is responsible for sending PUS telemetry to a TM sink. pub trait EcssTmSenderCore: Send { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError>; + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError>; } /// Generic trait for a user supplied sender object. @@ -155,6 +165,16 @@ pub trait EcssTcSenderCore { fn send_tc(&self, tc: PusTcCreator, token: Option) -> Result<(), EcssTmtcError>; } +/// Dummy object which can be useful for tests. +#[derive(Default)] +pub struct EcssTmDummySender {} + +impl EcssTmSenderCore for EcssTmDummySender { + fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> { + Ok(()) + } +} + /// A PUS telecommand packet can be stored in memory using different methods. Right now, /// storage inside a pool structure like [crate::pool::StaticMemoryPool], and storage inside a /// `Vec` are supported. @@ -249,7 +269,7 @@ impl From for TryRecvTmtcError { } /// Generic trait for a user supplied receiver object. -pub trait EcssTcReceiverCore: EcssChannel { +pub trait EcssTcReceiverCore { fn recv_tc(&self) -> Result; } @@ -263,9 +283,76 @@ pub trait ReceivesEcssPusTc { fn pass_pus_tc(&mut self, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error>; } +pub trait ActiveRequestMapProvider: Sized { + fn insert(&mut self, request_id: &RequestId, request_info: V); + fn get(&self, request_id: RequestId) -> Option<&V>; + fn get_mut(&mut self, request_id: RequestId) -> Option<&mut V>; + fn remove(&mut self, request_id: RequestId) -> bool; + + /// Call a user-supplied closure for each active request. + fn for_each(&self, f: F); + + /// Call a user-supplied closure for each active request. Mutable variant. + fn for_each_mut(&mut self, f: F); +} + +pub trait ActiveRequestProvider { + fn target_id(&self) -> ComponentId; + fn token(&self) -> TcStateToken; + fn set_token(&mut self, token: TcStateToken); + fn has_timed_out(&self) -> bool; + fn timeout(&self) -> Duration; +} + +/// This trait is an abstraction for the routing of PUS request to a dedicated +/// recipient using the generic [TargetId]. +pub trait PusRequestRouter { + type Error; + + fn route( + &self, + requestor_info: MessageMetadata, + target_id: ComponentId, + request: Request, + ) -> Result<(), Self::Error>; +} + +pub trait PusReplyHandler { + type Error; + + /// This function handles a reply for a given PUS request and returns whether that request + /// is finished. A finished PUS request will be removed from the active request map. + fn handle_reply( + &mut self, + caller_id: ComponentId, + reply: &GenericMessage, + active_request: &ActiveRequestInfo, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result; + + fn handle_unrequested_reply( + &mut self, + caller_id: ComponentId, + reply: &GenericMessage, + tm_sender: &impl EcssTmSenderCore, + ) -> Result<(), Self::Error>; + + /// Handle the timeout of an active request. + fn handle_request_timeout( + &mut self, + caller_id: ComponentId, + active_request: &ActiveRequestInfo, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(), Self::Error>; +} + #[cfg(feature = "alloc")] -mod alloc_mod { - use crate::TargetId; +pub mod alloc_mod { + use hashbrown::HashMap; use super::*; @@ -351,38 +438,241 @@ mod alloc_mod { impl_downcast!(EcssTcReceiver); - pub trait PusRoutingErrorHandler { + /// This trait is an abstraction for the conversion of a PUS telecommand into a generic request + /// type. + /// + /// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard. + /// The only requirement is that a valid [TargetId] and a request instance are returned by the + /// core conversion function. + /// + /// The user should take care of performing the error handling as well. Some of the following + /// aspects might be relevant: + /// + /// - Checking the validity of the APID, service ID, subservice ID. + /// - Checking the validity of the user data. + /// + /// A [VerificationReportingProvider] instance is passed to the user to also allow handling + /// of the verification process as part of the PUS standard requirements. + pub trait PusTcToRequestConverter { type Error; - fn handle_error( - &self, - target_id: TargetId, + fn convert( + &mut self, + caller_id: ComponentId, token: VerificationToken, tc: &PusTcReader, - error: Self::Error, - time_stamp: &[u8], + tm_sender: &(impl EcssTmSenderCore + ?Sized), verif_reporter: &impl VerificationReportingProvider, - ); + time_stamp: &[u8], + ) -> Result<(ActiveRequestInfo, Request), Self::Error>; } + + #[derive(Clone, Debug)] + pub struct DefaultActiveRequestMap(pub HashMap); + + impl Default for DefaultActiveRequestMap { + fn default() -> Self { + Self(HashMap::new()) + } + } + + impl ActiveRequestMapProvider for DefaultActiveRequestMap { + fn insert(&mut self, request_id: &RequestId, request: V) { + self.0.insert(*request_id, request); + } + + fn get(&self, request_id: RequestId) -> Option<&V> { + self.0.get(&request_id) + } + + fn get_mut(&mut self, request_id: RequestId) -> Option<&mut V> { + self.0.get_mut(&request_id) + } + + fn remove(&mut self, request_id: RequestId) -> bool { + self.0.remove(&request_id).is_some() + } + + fn for_each(&self, mut f: F) { + for (req_id, active_req) in &self.0 { + f(req_id, active_req); + } + } + + fn for_each_mut(&mut self, mut f: F) { + for (req_id, active_req) in &mut self.0 { + f(req_id, active_req); + } + } + } + + /* + /// Generic reply handler structure which can be used to handle replies for a specific PUS + /// service. + /// + /// This is done by keeping track of active requests using an internal map structure. An API + /// to register new active requests is exposed as well. + /// The reply handler performs boilerplate tasks like performing the verification handling and + /// timeout handling. + /// + /// This object is not useful by itself but serves as a common building block for high-level + /// PUS reply handlers. Concrete PUS handlers should constrain the [ActiveRequestProvider] and + /// the `ReplyType` generics to specific types tailored towards PUS services in addition to + /// providing an API which can process received replies and convert them into verification + /// completions or other operation like user hook calls. The framework also provides some + /// concrete PUS handlers for common PUS services like the mode, action and housekeeping + /// service. + /// + /// This object does not automatically update its internal time information used to check for + /// timeouts. The user should call the [Self::update_time] and [Self::update_time_from_now] + /// methods to do this. + pub struct PusServiceReplyHandler< + ActiveRequestMap: ActiveRequestMapProvider, + ReplyHook: ReplyHandlerHook, + ActiveRequestType: ActiveRequestProvider, + ReplyType, + > { + pub active_request_map: ActiveRequestMap, + pub tm_buf: alloc::vec::Vec, + pub current_time: UnixTimestamp, + pub user_hook: ReplyHook, + phantom: PhantomData<(ActiveRequestType, ReplyType)>, + } + + impl< + ActiveRequestMap: ActiveRequestMapProvider, + ReplyHook: ReplyHandlerHook, + ActiveRequestType: ActiveRequestProvider, + ReplyType, + > + PusServiceReplyHandler< + ActiveRequestMap, + ReplyHook, + ActiveRequestType, + ReplyType, + > + { + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + pub fn new_from_now( + active_request_map: ActiveRequestMap, + fail_data_buf_size: usize, + user_hook: ReplyHook, + ) -> Result { + let current_time = UnixTimestamp::from_now()?; + Ok(Self::new( + active_request_map, + fail_data_buf_size, + user_hook, + tm_sender, + current_time, + )) + } + + pub fn new( + active_request_map: ActiveRequestMap, + fail_data_buf_size: usize, + user_hook: ReplyHook, + tm_sender: TmSender, + init_time: UnixTimestamp, + ) -> Self { + Self { + active_request_map, + tm_buf: alloc::vec![0; fail_data_buf_size], + current_time: init_time, + user_hook, + tm_sender, + phantom: PhantomData, + } + } + + pub fn add_routed_request( + &mut self, + request_id: verification::RequestId, + active_request_type: ActiveRequestType, + ) { + self.active_request_map + .insert(&request_id.into(), active_request_type); + } + + pub fn request_active(&self, request_id: RequestId) -> bool { + self.active_request_map.get(request_id).is_some() + } + + /// Check for timeouts across all active requests. + /// + /// It will call [Self::handle_timeout] for all active requests which have timed out. + pub fn check_for_timeouts(&mut self, time_stamp: &[u8]) -> Result<(), EcssTmtcError> { + let mut timed_out_commands = alloc::vec::Vec::new(); + self.active_request_map.for_each(|request_id, active_req| { + let diff = self.current_time - active_req.start_time(); + if diff.duration_absolute > active_req.timeout() { + self.handle_timeout(active_req, time_stamp); + } + timed_out_commands.push(*request_id); + }); + for timed_out_command in timed_out_commands { + self.active_request_map.remove(timed_out_command); + } + Ok(()) + } + + /// Handle the timeout for a given active request. + /// + /// This implementation will report a verification completion failure with a user-provided + /// error code. It supplies the configured request timeout in milliseconds as a [u64] + /// serialized in big-endian format as the failure data. + pub fn handle_timeout(&self, active_request: &ActiveRequestType, time_stamp: &[u8]) { + let timeout = active_request.timeout().as_millis() as u64; + let timeout_raw = timeout.to_be_bytes(); + self.verification_reporter + .completion_failure( + active_request.token(), + FailParams::new( + time_stamp, + &self.user_hook.timeout_error_code(), + &timeout_raw, + ), + ) + .unwrap(); + self.user_hook.timeout_callback(active_request); + } + + /// Update the current time used for timeout checks based on the current OS time. + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + pub fn update_time_from_now(&mut self) -> Result<(), std::time::SystemTimeError> { + self.current_time = UnixTimestamp::from_now()?; + Ok(()) + } + + /// Update the current time used for timeout checks. + pub fn update_time(&mut self, time: UnixTimestamp) { + self.current_time = time; + } + } + */ } #[cfg(feature = "std")] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] pub mod std_mod { - use crate::pool::{PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr}; + use crate::pool::{ + PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr, StoreError, + }; use crate::pus::verification::{TcStateAccepted, VerificationToken}; use crate::pus::{ - EcssChannel, EcssTcAndToken, EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, - GenericRecvError, GenericSendError, PusTmWrapper, TryRecvTmtcError, + EcssTcAndToken, EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, GenericReceiveError, + GenericSendError, PusTmVariant, TryRecvTmtcError, }; use crate::tmtc::tm_helper::SharedTmPool; - use crate::{ChannelId, TargetId}; + use crate::ComponentId; use alloc::vec::Vec; + use core::time::Duration; use spacepackets::ecss::tc::PusTcReader; use spacepackets::ecss::tm::PusTmCreator; - use spacepackets::ecss::{PusError, WritablePusPacket}; - use spacepackets::time::cds::CdsTime; + use spacepackets::ecss::WritablePusPacket; use spacepackets::time::StdTimestampError; - use spacepackets::time::TimeWriter; + use spacepackets::ByteConversionError; use std::string::String; use std::sync::mpsc; use std::sync::mpsc::TryRecvError; @@ -391,8 +681,14 @@ pub mod std_mod { #[cfg(feature = "crossbeam")] pub use cb_mod::*; - use super::verification::VerificationReportingProvider; - use super::{AcceptedEcssTcAndToken, TcInMemory}; + use super::verification::{TcStateToken, VerificationReportingProvider}; + use super::{AcceptedEcssTcAndToken, ActiveRequestProvider, TcInMemory}; + + #[derive(Debug)] + pub struct PusTmInPool { + pub source_id: ComponentId, + pub store_addr: StoreAddr, + } impl From> for EcssTmtcError { fn from(_: mpsc::SendError) -> Self { @@ -400,48 +696,70 @@ pub mod std_mod { } } - impl EcssTmSenderCore for mpsc::Sender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + impl EcssTmSenderCore for mpsc::Sender { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => self - .send(addr) + PusTmVariant::InStore(store_addr) => self + .send(PusTmInPool { + source_id, + store_addr, + }) .map_err(|_| GenericSendError::RxDisconnected)?, - PusTmWrapper::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), + PusTmVariant::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), }; Ok(()) } } - impl EcssTmSenderCore for mpsc::SyncSender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + impl EcssTmSenderCore for mpsc::SyncSender { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => self - .try_send(addr) + PusTmVariant::InStore(store_addr) => self + .try_send(PusTmInPool { + source_id, + store_addr, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, - PusTmWrapper::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), + PusTmVariant::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), }; Ok(()) } } - impl EcssTmSenderCore for mpsc::Sender> { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + #[derive(Debug)] + pub struct PusTmAsVec { + pub source_id: ComponentId, + pub packet: Vec, + } + + pub type MpscTmAsVecSender = mpsc::Sender; + + impl EcssTmSenderCore for MpscTmAsVecSender { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), - PusTmWrapper::Direct(tm) => self - .send(tm.to_vec()?) + PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), + PusTmVariant::Direct(tm) => self + .send(PusTmAsVec { + source_id, + packet: tm.to_vec()?, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, }; Ok(()) } } - impl EcssTmSenderCore for mpsc::SyncSender> { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + pub type MpscTmAsVecSenderBounded = mpsc::SyncSender; + + impl EcssTmSenderCore for MpscTmAsVecSenderBounded { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), - PusTmWrapper::Direct(tm) => self - .send(tm.to_vec()?) + PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), + PusTmVariant::Direct(tm) => self + .send(PusTmAsVec { + source_id, + packet: tm.to_vec()?, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, }; Ok(()) @@ -449,67 +767,52 @@ pub mod std_mod { } #[derive(Clone)] - pub struct TmInSharedPoolSenderWithId { - channel_id: ChannelId, - name: &'static str, + pub struct TmInSharedPoolSender { shared_tm_store: SharedTmPool, sender: Sender, } - impl EcssChannel for TmInSharedPoolSenderWithId { - fn channel_id(&self) -> ChannelId { - self.channel_id - } - - fn name(&self) -> &'static str { - self.name - } - } - - impl TmInSharedPoolSenderWithId { - pub fn send_direct_tm(&self, tm: PusTmCreator) -> Result<(), EcssTmtcError> { + impl TmInSharedPoolSender { + pub fn send_direct_tm( + &self, + source_id: ComponentId, + tm: PusTmCreator, + ) -> Result<(), EcssTmtcError> { let addr = self.shared_tm_store.add_pus_tm(&tm)?; - self.sender.send_tm(PusTmWrapper::InStore(addr)) + self.sender.send_tm(source_id, PusTmVariant::InStore(addr)) } } - impl EcssTmSenderCore for TmInSharedPoolSenderWithId { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { - if let PusTmWrapper::Direct(tm) = tm { - return self.send_direct_tm(tm); + impl EcssTmSenderCore for TmInSharedPoolSender { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { + if let PusTmVariant::Direct(tm) = tm { + return self.send_direct_tm(source_id, tm); } - self.sender.send_tm(tm) + self.sender.send_tm(source_id, tm) } } - impl TmInSharedPoolSenderWithId { - pub fn new( - id: ChannelId, - name: &'static str, - shared_tm_store: SharedTmPool, - sender: Sender, - ) -> Self { + impl TmInSharedPoolSender { + pub fn new(shared_tm_store: SharedTmPool, sender: Sender) -> Self { Self { - channel_id: id, - name, shared_tm_store, sender, } } } - pub type TmInSharedPoolSenderWithMpsc = TmInSharedPoolSenderWithId>; - pub type TmInSharedPoolSenderWithBoundedMpsc = - TmInSharedPoolSenderWithId>; + pub type MpscTmInSharedPoolSender = TmInSharedPoolSender>; + pub type MpscTmInSharedPoolSenderBounded = TmInSharedPoolSender>; + /* /// This class can be used if frequent heap allocations during run-time are not an issue. /// PUS TM packets will be sent around as [Vec]s. Please note that the current implementation /// of this class can not deal with store addresses, so it is assumed that is is always /// going to be called with direct packets. #[derive(Clone)] pub struct TmAsVecSenderWithId { - id: ChannelId, - name: &'static str, + // id: ComponentId, + //name: &'static str, sender: Sender, } @@ -520,13 +823,13 @@ pub mod std_mod { } impl TmAsVecSenderWithId { - pub fn new(id: u32, name: &'static str, sender: Sender) -> Self { + pub fn new(id: ComponentId, name: &'static str, sender: Sender) -> Self { Self { id, sender, name } } } - impl EcssChannel for TmAsVecSenderWithId { - fn channel_id(&self) -> ChannelId { + impl ChannelWithId for TmAsVecSenderWithId { + fn id(&self) -> ComponentId { self.id } fn name(&self) -> &'static str { @@ -535,58 +838,34 @@ pub mod std_mod { } impl EcssTmSenderCore for TmAsVecSenderWithId { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + fn send_tm(&self, tm: PusTmVariant) -> Result<(), EcssTmtcError> { self.sender.send_tm(tm) } } pub type TmAsVecSenderWithMpsc = TmAsVecSenderWithId>>; pub type TmAsVecSenderWithBoundedMpsc = TmAsVecSenderWithId>>; + */ - pub struct MpscTcReceiver { - id: ChannelId, - name: &'static str, - receiver: mpsc::Receiver, - } - - impl EcssChannel for MpscTcReceiver { - fn channel_id(&self) -> ChannelId { - self.id - } - - fn name(&self) -> &'static str { - self.name - } - } + pub type MpscTcReceiver = mpsc::Receiver; impl EcssTcReceiverCore for MpscTcReceiver { fn recv_tc(&self) -> Result { - self.receiver.try_recv().map_err(|e| match e { + self.try_recv().map_err(|e| match e { TryRecvError::Empty => TryRecvTmtcError::Empty, - TryRecvError::Disconnected => { - TryRecvTmtcError::Tmtc(EcssTmtcError::from(GenericRecvError::TxDisconnected)) - } + TryRecvError::Disconnected => TryRecvTmtcError::Tmtc(EcssTmtcError::from( + GenericReceiveError::TxDisconnected(None), + )), }) } } - impl MpscTcReceiver { - pub fn new( - id: ChannelId, - name: &'static str, - receiver: mpsc::Receiver, - ) -> Self { - Self { id, name, receiver } - } - } - #[cfg(feature = "crossbeam")] pub mod cb_mod { use super::*; use crossbeam_channel as cb; - pub type TmInSharedPoolSenderWithCrossbeam = - TmInSharedPoolSenderWithId>; + pub type TmInSharedPoolSenderWithCrossbeam = TmInSharedPoolSender>; impl From> for EcssTmtcError { fn from(_: cb::SendError) -> Self { @@ -605,64 +884,198 @@ pub mod std_mod { } } - impl EcssTmSenderCore for cb::Sender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + impl EcssTmSenderCore for cb::Sender { + fn send_tm( + &self, + source_id: ComponentId, + tm: PusTmVariant, + ) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => self - .try_send(addr) + PusTmVariant::InStore(addr) => self + .try_send(PusTmInPool { + source_id, + store_addr: addr, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, - PusTmWrapper::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), + PusTmVariant::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), }; Ok(()) } } - impl EcssTmSenderCore for cb::Sender> { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + impl EcssTmSenderCore for cb::Sender { + fn send_tm( + &self, + source_id: ComponentId, + tm: PusTmVariant, + ) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), - PusTmWrapper::Direct(tm) => self - .send(tm.to_vec()?) + PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), + PusTmVariant::Direct(tm) => self + .send(PusTmAsVec { + source_id, + packet: tm.to_vec()?, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, }; Ok(()) } } - pub struct CrossbeamTcReceiver { - id: ChannelId, - name: &'static str, - receiver: cb::Receiver, - } + pub type CrossbeamTcReceiver = cb::Receiver; + } - impl CrossbeamTcReceiver { - pub fn new( - id: ChannelId, - name: &'static str, - receiver: cb::Receiver, - ) -> Self { - Self { id, name, receiver } + /// This is a high-level handler for the generic PUS services which need to convert PUS + /// commands into a request/reply pattern. + /// + /// It performs the following handling steps: + /// + /// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter] + /// allows to configure the used telecommand memory backend. + /// 2. Convert the TC to a targeted action request using the provided + /// [PusTcToRequestConverter]. The generic error type is constrained to the + /// [PusPacketHandlingError] for the concrete implementation which offers a packet handler. + /// 3. Route the action request using the provided [PusRequestRouter]. + /* + pub struct PusTargetedRequestHandler< + TcReceiver: EcssTcReceiverCore, + TmSender: EcssTmSenderCore, + TcInMemConverter: EcssTcInMemConverter, + VerificationReporter: VerificationReportingProvider, + RequestConverter: PusTcToRequestConverter, + RequestRouter: PusRequestRouter, + Request, + RoutingError = GenericRoutingError, + > { + service_helper: + PusServiceHelper, + pub request_converter: RequestConverter, + pub request_router: RequestRouter, + phantom: PhantomData, + } + + // pub trait PusReplyHandlerProvider { + // fn add_routed_request(&mut self, request_id: RequestId, active_request: ActiveRequest); + // } + impl< + TcReceiver: EcssTcReceiverCore, + TmSender: EcssTmSenderCore, + TcInMemConverter: EcssTcInMemConverter, + VerificationReporter: VerificationReportingProvider, + RequestConverter: PusTcToRequestConverter, + RequestRouter: PusRequestRouter, + Request, + RoutingError: Clone, + > + PusTargetedRequestHandler< + TcReceiver, + TmSender, + TcInMemConverter, + VerificationReporter, + RequestConverter, + RequestRouter, + Request, + RoutingError, + > + where + PusPacketHandlingError: From, + { + pub fn new( + service_helper: PusServiceHelper< + TcReceiver, + TmSender, + TcInMemConverter, + VerificationReporter, + >, + request_converter: RequestConverter, + request_router: RequestRouter, + ) -> Self { + Self { + service_helper, + request_converter, + request_router, + phantom: PhantomData, } } - impl EcssChannel for CrossbeamTcReceiver { - fn channel_id(&self) -> ChannelId { - self.id + /// Core function to poll the next TC packet and try to handle it. + pub fn handle_one_tc(&mut self) -> Result { + let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; + if possible_packet.is_none() { + return Ok(PusPacketHandlerResult::Empty); } + let ecss_tc_and_token = possible_packet.unwrap(); + let tc = self + .service_helper + .tc_in_mem_converter + .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; + let mut partial_error = None; + let time_stamp = get_current_cds_short_timestamp(&mut partial_error); + let (target_id, action_request) = self.request_converter.convert( + ecss_tc_and_token.token, + &tc, + &time_stamp, + &self.service_helper.common.verification_handler, + )?; + if let Err(e) = + self.request_router + .route(target_id, action_request, ecss_tc_and_token.token) + { + self.request_router.handle_error( + target_id, + ecss_tc_and_token.token, + &tc, + e.clone(), + &time_stamp, + &self.service_helper.common.verification_handler, + ); + return Err(e.into()); + } + Ok(PusPacketHandlerResult::RequestHandled) + } + } + */ - fn name(&self) -> &'static str { - self.name + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct ActivePusRequestStd { + target_id: ComponentId, + token: TcStateToken, + start_time: std::time::Instant, + timeout: Duration, + } + + impl ActivePusRequestStd { + pub fn new( + target_id: ComponentId, + token: impl Into, + timeout: Duration, + ) -> Self { + Self { + target_id, + token: token.into(), + start_time: std::time::Instant::now(), + timeout, } } + } - impl EcssTcReceiverCore for CrossbeamTcReceiver { - fn recv_tc(&self) -> Result { - self.receiver.try_recv().map_err(|e| match e { - cb::TryRecvError::Empty => TryRecvTmtcError::Empty, - cb::TryRecvError::Disconnected => TryRecvTmtcError::Tmtc(EcssTmtcError::from( - GenericRecvError::TxDisconnected, - )), - }) - } + impl ActiveRequestProvider for ActivePusRequestStd { + fn target_id(&self) -> ComponentId { + self.target_id + } + + fn token(&self) -> TcStateToken { + self.token + } + + fn timeout(&self) -> Duration { + self.timeout + } + fn set_token(&mut self, token: TcStateToken) { + self.token = token; + } + + fn has_timed_out(&self) -> bool { + std::time::Instant::now() - self.start_time > self.timeout } } @@ -671,37 +1084,52 @@ pub mod std_mod { // will be no_std soon, see https://github.com/rust-lang/rust/issues/103765 . #[derive(Debug, Clone, Error)] - pub enum GenericRoutingError { - #[error("not enough application data, expected at least {expected}, found {found}")] - NotEnoughAppData { expected: usize, found: usize }, - #[error("Unknown target ID {0}")] - UnknownTargetId(TargetId), - #[error("Sending action request failed: {0}")] - SendError(GenericSendError), + pub enum PusTcFromMemError { + #[error("generic PUS error: {0}")] + EcssTmtc(#[from] EcssTmtcError), + #[error("invalid format of TC in memory: {0:?}")] + InvalidFormat(TcInMemory), } #[derive(Debug, Clone, Error)] - pub enum PusPacketHandlingError { - #[error("generic PUS error: {0}")] - Pus(#[from] PusError), + pub enum GenericRoutingError { + // #[error("not enough application data, expected at least {expected}, found {found}")] + // NotEnoughAppData { expected: usize, found: usize }, + #[error("Unknown target ID {0}")] + UnknownTargetId(ComponentId), + #[error("Sending action request failed: {0}")] + Send(GenericSendError), + } + + /// This error can be used for generic conversions from PUS Telecommands to request types. + /// + /// Please note that this error can also be used if no request is generated and the PUS + /// service, subservice and application data is used directly to perform some request. + #[derive(Debug, Clone, Error)] + pub enum GenericConversionError { #[error("wrong service number {0} for packet handler")] WrongService(u8), #[error("invalid subservice {0}")] InvalidSubservice(u8), #[error("not enough application data, expected at least {expected}, found {found}")] NotEnoughAppData { expected: usize, found: usize }, - #[error("PUS packet too large, does not fit in buffer: {0}")] - PusPacketTooLarge(usize), #[error("invalid application data")] InvalidAppData(String), - #[error("invalid format of TC in memory: {0:?}")] - InvalidTcInMemoryFormat(TcInMemory), - #[error("generic ECSS tmtc error: {0}")] - EcssTmtc(#[from] EcssTmtcError), + } + + /// Wrapper type which tries to encapsulate all possible errors when handling PUS packets. + #[derive(Debug, Clone, Error)] + pub enum PusPacketHandlingError { + #[error("error polling PUS TC packet: {0}")] + TcPolling(#[from] EcssTmtcError), + #[error("error generating PUS reader from memory: {0}")] + TcFromMem(#[from] PusTcFromMemError), + #[error("generic request conversion error: {0}")] + RequestConversion(#[from] GenericConversionError), + #[error("request routing error: {0}")] + RequestRouting(#[from] GenericRoutingError), #[error("invalid verification token")] InvalidVerificationToken, - #[error("request routing error: {0}")] - RequestRoutingError(#[from] GenericRoutingError), #[error("other error {0}")] Other(String), } @@ -735,19 +1163,24 @@ pub mod std_mod { } pub trait EcssTcInMemConverter { - fn cache_ecss_tc_in_memory( - &mut self, - possible_packet: &TcInMemory, - ) -> Result<(), PusPacketHandlingError>; + fn cache(&mut self, possible_packet: &TcInMemory) -> Result<(), PusTcFromMemError>; fn tc_slice_raw(&self) -> &[u8]; - fn convert_ecss_tc_in_memory_to_reader( + fn cache_and_convert( &mut self, possible_packet: &TcInMemory, - ) -> Result, PusPacketHandlingError> { - self.cache_ecss_tc_in_memory(possible_packet)?; - Ok(PusTcReader::new(self.tc_slice_raw())?.0) + ) -> Result, PusTcFromMemError> { + self.cache(possible_packet)?; + Ok(PusTcReader::new(self.tc_slice_raw()) + .map_err(EcssTmtcError::Pus)? + .0) + } + + fn convert(&self) -> Result, PusTcFromMemError> { + Ok(PusTcReader::new(self.tc_slice_raw()) + .map_err(EcssTmtcError::Pus)? + .0) } } @@ -760,16 +1193,11 @@ pub mod std_mod { } impl EcssTcInMemConverter for EcssTcInVecConverter { - fn cache_ecss_tc_in_memory( - &mut self, - tc_in_memory: &TcInMemory, - ) -> Result<(), PusPacketHandlingError> { + fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> { self.pus_tc_raw = None; match tc_in_memory { super::TcInMemory::StoreAddr(_) => { - return Err(PusPacketHandlingError::InvalidTcInMemoryFormat( - tc_in_memory.clone(), - )); + return Err(PusTcFromMemError::InvalidFormat(tc_in_memory.clone())); } super::TcInMemory::Vec(vec) => { self.pus_tc_raw = Some(vec.clone()); @@ -803,17 +1231,20 @@ pub mod std_mod { } } - pub fn copy_tc_to_buf(&mut self, addr: StoreAddr) -> Result<(), PusPacketHandlingError> { + pub fn copy_tc_to_buf(&mut self, addr: StoreAddr) -> Result<(), PusTcFromMemError> { // Keep locked section as short as possible. - let mut tc_pool = self - .shared_tc_store - .write() - .map_err(|_| PusPacketHandlingError::EcssTmtc(EcssTmtcError::StoreLock))?; - let tc_size = tc_pool - .len_of_data(&addr) - .map_err(|e| PusPacketHandlingError::EcssTmtc(EcssTmtcError::Store(e)))?; + let mut tc_pool = self.shared_tc_store.write().map_err(|_| { + PusTcFromMemError::EcssTmtc(EcssTmtcError::Store(StoreError::LockError)) + })?; + let tc_size = tc_pool.len_of_data(&addr).map_err(EcssTmtcError::Store)?; if tc_size > self.pus_buf.len() { - return Err(PusPacketHandlingError::PusPacketTooLarge(tc_size)); + return Err( + EcssTmtcError::ByteConversion(ByteConversionError::ToSliceTooSmall { + found: self.pus_buf.len(), + expected: tc_size, + }) + .into(), + ); } let tc_guard = tc_pool.read_with_guard(addr); // TODO: Proper error handling. @@ -823,18 +1254,13 @@ pub mod std_mod { } impl EcssTcInMemConverter for EcssTcInSharedStoreConverter { - fn cache_ecss_tc_in_memory( - &mut self, - tc_in_memory: &TcInMemory, - ) -> Result<(), PusPacketHandlingError> { + fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> { match tc_in_memory { super::TcInMemory::StoreAddr(addr) => { self.copy_tc_to_buf(*addr)?; } super::TcInMemory::Vec(_) => { - return Err(PusPacketHandlingError::InvalidTcInMemoryFormat( - tc_in_memory.clone(), - )); + return Err(PusTcFromMemError::InvalidFormat(tc_in_memory.clone())); } }; Ok(()) @@ -850,29 +1276,10 @@ pub mod std_mod { TmSender: EcssTmSenderCore, VerificationReporter: VerificationReportingProvider, > { + pub id: ComponentId, pub tc_receiver: TcReceiver, pub tm_sender: TmSender, - pub tm_apid: u16, - pub verification_handler: VerificationReporter, - } - #[cfg(feature = "std")] - pub fn get_current_cds_short_timestamp( - partial_error: &mut Option, - ) -> [u8; 7] { - let mut time_stamp: [u8; 7] = [0; 7]; - let time_provider = CdsTime::now_with_u16_days().map_err(PartialPusHandlingError::Time); - if let Ok(time_provider) = time_provider { - // Can't fail, we have a buffer with the exact required size. - time_provider.write_to_bytes(&mut time_stamp).unwrap(); - } else { - *partial_error = Some(time_provider.unwrap_err()); - } - time_stamp - } - #[cfg(feature = "std")] - pub fn get_current_timestamp_ignore_error() -> [u8; 7] { - let mut dummy = None; - get_current_cds_short_timestamp(&mut dummy) + pub verif_reporter: VerificationReporter, } /// This is a high-level PUS packet handler helper. @@ -902,23 +1309,31 @@ pub mod std_mod { > PusServiceHelper { pub fn new( + id: ComponentId, tc_receiver: TcReceiver, tm_sender: TmSender, - tm_apid: u16, verification_handler: VerificationReporter, tc_in_mem_converter: TcInMemConverter, ) -> Self { Self { common: PusServiceBase { + id, tc_receiver, tm_sender, - tm_apid, - verification_handler, + verif_reporter: verification_handler, }, tc_in_mem_converter, } } + pub fn id(&self) -> ComponentId { + self.common.id + } + + pub fn tm_sender(&self) -> &TmSender { + &self.common.tm_sender + } + /// This function can be used to poll the internal [EcssTcReceiverCore] object for the next /// telecommand packet. It will return `Ok(None)` if there are not packets available. /// In any other case, it will perform the acceptance of the ECSS TC packet using the @@ -944,37 +1359,48 @@ pub mod std_mod { })) } Err(e) => match e { - TryRecvTmtcError::Tmtc(e) => Err(PusPacketHandlingError::EcssTmtc(e)), + TryRecvTmtcError::Tmtc(e) => Err(PusPacketHandlingError::TcPolling(e)), TryRecvTmtcError::Empty => Ok(None), }, } } + + pub fn verif_reporter(&self) -> &VerificationReporter { + &self.common.verif_reporter + } + pub fn verif_reporter_mut(&mut self) -> &mut VerificationReporter { + &mut self.common.verif_reporter + } + + pub fn tc_in_mem_converter(&self) -> &TcInMemConverter { + &self.tc_in_mem_converter + } + + pub fn tc_in_mem_converter_mut(&mut self) -> &mut TcInMemConverter { + &mut self.tc_in_mem_converter + } } - pub type PusServiceHelperDynWithMpsc = PusServiceHelper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - TcInMemConverter, - VerificationReporter, - >; + pub type PusServiceHelperDynWithMpsc = + PusServiceHelper; pub type PusServiceHelperDynWithBoundedMpsc = PusServiceHelper< MpscTcReceiver, - TmAsVecSenderWithBoundedMpsc, + MpscTmAsVecSenderBounded, TcInMemConverter, VerificationReporter, >; pub type PusServiceHelperStaticWithMpsc = PusServiceHelper< MpscTcReceiver, - TmInSharedPoolSenderWithMpsc, + MpscTmInSharedPoolSender, TcInMemConverter, VerificationReporter, >; pub type PusServiceHelperStaticWithBoundedMpsc = PusServiceHelper< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, TcInMemConverter, VerificationReporter, >; @@ -988,12 +1414,43 @@ pub(crate) fn source_buffer_large_enough( return Err(ByteConversionError::ToSliceTooSmall { found: cap, expected: len, - } - .into()); + }); } Ok(()) } +#[cfg(any(feature = "test_util", test))] +pub mod test_util { + use crate::request::UniqueApidTargetId; + use spacepackets::ecss::{tc::PusTcCreator, tm::PusTmReader}; + + use super::{ + verification::{self, TcStateAccepted, VerificationToken}, + PusPacketHandlerResult, PusPacketHandlingError, + }; + + pub const TEST_APID: u16 = 0x101; + pub const TEST_UNIQUE_ID: u32 = 0x05; + pub const TEST_COMPONENT_ID: UniqueApidTargetId = + UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID); + + pub trait PusTestHarness { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); + fn read_next_tm(&mut self) -> PusTmReader<'_>; + fn check_no_tm_available(&self) -> bool; + fn check_next_verification_tm( + &self, + subservice: u8, + expected_request_id: verification::RequestId, + ); + } + + pub trait SimplePusPacketHandler { + fn handle_one_tc(&mut self) -> Result; + } +} + #[cfg(test)] pub mod tests { use core::cell::RefCell; @@ -1008,29 +1465,18 @@ pub mod tests { use spacepackets::ecss::{PusPacket, WritablePusPacket}; use spacepackets::CcsdsPacket; - use crate::pool::{ - PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig, StoreAddr, - }; - use crate::pus::verification::RequestId; + use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig}; + use crate::pus::verification::{RequestId, VerificationReporter}; use crate::tmtc::tm_helper::SharedTmPool; - use crate::TargetId; + use crate::ComponentId; - use super::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, - }; - use super::verification::tests::{SharedVerificationMap, TestVerificationReporter}; + use super::test_util::{TEST_APID, TEST_COMPONENT_ID}; + + use super::verification::test_util::TestVerificationReporter; use super::verification::{ - TcStateAccepted, VerificationReporterCfg, VerificationReporterWithSender, - VerificationReportingProvider, VerificationToken, + TcStateAccepted, VerificationReporterCfg, VerificationReportingProvider, VerificationToken, }; - use super::{ - EcssTcAndToken, EcssTcInSharedStoreConverter, EcssTcInVecConverter, GenericRoutingError, - MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, - PusServiceHelper, TcInMemory, TmAsVecSenderWithId, TmAsVecSenderWithMpsc, - TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId, - }; - - pub const TEST_APID: u16 = 0x101; + use super::*; #[derive(Debug, Eq, PartialEq, Clone)] pub(crate) struct CommonTmInfo { @@ -1041,17 +1487,6 @@ pub mod tests { pub time_stamp: [u8; 7], } - pub trait PusTestHarness { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; - fn read_next_tm(&mut self) -> PusTmReader<'_>; - fn check_no_tm_available(&self) -> bool; - fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId); - } - - pub trait SimplePusPacketHandler { - fn handle_one_tc(&mut self) -> Result; - } - impl CommonTmInfo { pub fn new_from_tm(tm: &PusTmCreator) -> Self { let mut time_stamp = [0; 7]; @@ -1068,20 +1503,19 @@ pub mod tests { /// Common fields for a PUS service test harness. pub struct PusServiceHandlerWithSharedStoreCommon { - pus_buf: [u8; 2048], + pus_buf: RefCell<[u8; 2048]>, tm_buf: [u8; 2048], tc_pool: SharedStaticMemoryPool, tm_pool: SharedTmPool, tc_sender: mpsc::SyncSender, - tm_receiver: mpsc::Receiver, - verification_handler: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_receiver: mpsc::Receiver, } pub type PusServiceHelperStatic = PusServiceHelper< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, >; impl PusServiceHandlerWithSharedStoreCommon { @@ -1089,7 +1523,7 @@ pub mod tests { /// [PusServiceHandler] which might be required for a specific PUS service handler. /// /// The PUS service handler is instantiated with a [EcssTcInStoreConverter]. - pub fn new() -> (Self, PusServiceHelperStatic) { + pub fn new(id: ComponentId) -> (Self, PusServiceHelperStatic) { let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false); let tc_pool = StaticMemoryPool::new(pool_cfg.clone()); let tm_pool = StaticMemoryPool::new(pool_cfg); @@ -1098,62 +1532,47 @@ pub mod tests { let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::sync_channel(10); let (tm_tx, tm_rx) = mpsc::sync_channel(10); - let verif_sender = TmInSharedPoolSenderWithBoundedMpsc::new( - 0, - "verif_sender", - shared_tm_pool.clone(), - tm_tx.clone(), - ); let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); - let verification_handler = - VerificationReporterWithSharedPoolMpscBoundedSender::new(&verif_cfg, verif_sender); - let test_srv_tm_sender = - TmInSharedPoolSenderWithId::new(0, "TEST_SENDER", shared_tm_pool.clone(), tm_tx); - let test_srv_tc_receiver = MpscTcReceiver::new(0, "TEST_RECEIVER", test_srv_tc_rx); + let verification_handler = VerificationReporter::new(&verif_cfg); + let test_srv_tm_sender = TmInSharedPoolSender::new(shared_tm_pool.clone(), tm_tx); let in_store_converter = EcssTcInSharedStoreConverter::new(shared_tc_pool.clone(), 2048); ( Self { - pus_buf: [0; 2048], + pus_buf: RefCell::new([0; 2048]), tm_buf: [0; 2048], tc_pool: shared_tc_pool, tm_pool: shared_tm_pool, tc_sender: test_srv_tc_tx, tm_receiver: tm_rx, - verification_handler: verification_handler.clone(), }, PusServiceHelper::new( - test_srv_tc_receiver, + id, + test_srv_tc_rx, test_srv_tm_sender, - TEST_APID, verification_handler, in_store_converter, ), ) } - pub fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken { - let token = self.verification_handler.add_tc(tc); - let token = self - .verification_handler - .acceptance_success(token, &[0; 7]) - .unwrap(); - let tc_size = tc.write_to_bytes(&mut self.pus_buf).unwrap(); + pub fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator) { + let mut mut_buf = self.pus_buf.borrow_mut(); + let tc_size = tc.write_to_bytes(mut_buf.as_mut_slice()).unwrap(); let mut tc_pool = self.tc_pool.write().unwrap(); - let addr = tc_pool.add(&self.pus_buf[..tc_size]).unwrap(); + let addr = tc_pool.add(&mut_buf[..tc_size]).unwrap(); drop(tc_pool); // Send accepted TC to test service handler. self.tc_sender - .send(EcssTcAndToken::new(addr, token)) + .send(EcssTcAndToken::new(addr, *token)) .expect("sending tc failed"); - token } pub fn read_next_tm(&mut self) -> PusTmReader<'_> { let next_msg = self.tm_receiver.try_recv(); assert!(next_msg.is_ok()); - let tm_addr = next_msg.unwrap(); + let tm_in_pool = next_msg.unwrap(); let tm_pool = self.tm_pool.0.read().unwrap(); - let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap(); + let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap(); self.tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw); PusTmReader::new(&self.tm_buf, 7).unwrap().0 } @@ -1169,9 +1588,9 @@ pub mod tests { pub fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId) { let next_msg = self.tm_receiver.try_recv(); assert!(next_msg.is_ok()); - let tm_addr = next_msg.unwrap(); + let tm_in_pool = next_msg.unwrap(); let tm_pool = self.tm_pool.0.read().unwrap(); - let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap(); + let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap(); let tm = PusTmReader::new(&tm_raw, 7).unwrap().0; assert_eq!(PusPacket::service(&tm), 1); assert_eq!(PusPacket::subservice(&tm), subservice); @@ -1182,43 +1601,38 @@ pub mod tests { } } - pub struct PusServiceHandlerWithVecCommon { - current_tm: Option>, + pub struct PusServiceHandlerWithVecCommon { + current_tm: Option>, tc_sender: mpsc::Sender, - tm_receiver: mpsc::Receiver>, - pub verification_handler: VerificationReporter, + tm_receiver: mpsc::Receiver, } pub type PusServiceHelperDynamic = PusServiceHelper< MpscTcReceiver, - TmAsVecSenderWithMpsc, + MpscTmAsVecSender, EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, + VerificationReporter, >; - impl PusServiceHandlerWithVecCommon { - pub fn new_with_standard_verif_reporter() -> (Self, PusServiceHelperDynamic) { + impl PusServiceHandlerWithVecCommon { + pub fn new_with_standard_verif_reporter( + id: ComponentId, + ) -> (Self, PusServiceHelperDynamic) { let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel(); let (tm_tx, tm_rx) = mpsc::channel(); - let verif_sender = TmAsVecSenderWithId::new(0, "verififcatio-sender", tm_tx.clone()); let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); - let verification_handler = - VerificationReporterWithSender::new(&verif_cfg, verif_sender); - - let test_srv_tm_sender = TmAsVecSenderWithId::new(0, "test-sender", tm_tx); - let test_srv_tc_receiver = MpscTcReceiver::new(0, "test-receiver", test_srv_tc_rx); + let verification_handler = VerificationReporter::new(&verif_cfg); let in_store_converter = EcssTcInVecConverter::default(); ( Self { current_tm: None, tc_sender: test_srv_tc_tx, tm_receiver: tm_rx, - verification_handler: verification_handler.clone(), }, PusServiceHelper::new( - test_srv_tc_receiver, - test_srv_tm_sender, - TEST_APID, + id, + test_srv_tc_rx, + tm_tx, verification_handler, in_store_converter, ), @@ -1226,12 +1640,12 @@ pub mod tests { } } - impl PusServiceHandlerWithVecCommon { + impl PusServiceHandlerWithVecCommon { pub fn new_with_test_verif_sender() -> ( Self, PusServiceHelper< MpscTcReceiver, - TmAsVecSenderWithMpsc, + MpscTmAsVecSender, EcssTcInVecConverter, TestVerificationReporter, >, @@ -1239,22 +1653,19 @@ pub mod tests { let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel(); let (tm_tx, tm_rx) = mpsc::channel(); - let test_srv_tm_sender = TmAsVecSenderWithId::new(0, "test-sender", tm_tx); - let test_srv_tc_receiver = MpscTcReceiver::new(0, "test-receiver", test_srv_tc_rx); let in_store_converter = EcssTcInVecConverter::default(); - let shared_verif_map = SharedVerificationMap::default(); - let verification_handler = TestVerificationReporter::new(shared_verif_map); + let verification_handler = TestVerificationReporter::default(); ( Self { current_tm: None, tc_sender: test_srv_tc_tx, tm_receiver: tm_rx, - verification_handler: verification_handler.clone(), + //verification_handler: verification_handler.clone(), }, PusServiceHelper::new( - test_srv_tc_receiver, - test_srv_tm_sender, - TEST_APID, + TEST_COMPONENT_ID.raw(), + test_srv_tc_rx, + tm_tx, verification_handler, in_store_converter, ), @@ -1262,29 +1673,21 @@ pub mod tests { } } - impl - PusServiceHandlerWithVecCommon - { - pub fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken { - let token = self.verification_handler.add_tc(tc); - let token = self - .verification_handler - .acceptance_success(token, &[0; 7]) - .unwrap(); + impl PusServiceHandlerWithVecCommon { + pub fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator) { // Send accepted TC to test service handler. self.tc_sender .send(EcssTcAndToken::new( TcInMemory::Vec(tc.to_vec().expect("pus tc conversion to vec failed")), - token, + *token, )) .expect("sending tc failed"); - token } pub fn read_next_tm(&mut self) -> PusTmReader<'_> { let next_msg = self.tm_receiver.try_recv(); assert!(next_msg.is_ok()); - self.current_tm = Some(next_msg.unwrap()); + self.current_tm = Some(next_msg.unwrap().packet); PusTmReader::new(self.current_tm.as_ref().unwrap(), 7) .unwrap() .0 @@ -1302,7 +1705,7 @@ pub mod tests { let next_msg = self.tm_receiver.try_recv(); assert!(next_msg.is_ok()); let next_msg = next_msg.unwrap(); - let tm = PusTmReader::new(next_msg.as_slice(), 7).unwrap().0; + let tm = PusTmReader::new(next_msg.packet.as_slice(), 7).unwrap().0; assert_eq!(PusPacket::service(&tm), 1); assert_eq!(PusPacket::subservice(&tm), subservice); assert_eq!(tm.apid(), TEST_APID); @@ -1322,7 +1725,9 @@ pub mod tests { impl TestConverter { pub fn check_service(&self, tc: &PusTcReader) -> Result<(), PusPacketHandlingError> { if tc.service() != SERVICE { - return Err(PusPacketHandlingError::WrongService(tc.service())); + return Err(PusPacketHandlingError::RequestConversion( + GenericConversionError::WrongService(tc.service()), + )); } Ok(()) } @@ -1340,44 +1745,9 @@ pub mod tests { } } - #[derive(Default)] - pub struct TestRoutingErrorHandler { - pub routing_errors: RefCell>, - } - - impl PusRoutingErrorHandler for TestRoutingErrorHandler { - type Error = GenericRoutingError; - - fn handle_error( - &self, - target_id: TargetId, - _token: VerificationToken, - _tc: &PusTcReader, - error: Self::Error, - _time_stamp: &[u8], - _verif_reporter: &impl VerificationReportingProvider, - ) { - self.routing_errors - .borrow_mut() - .push_back((target_id, error)); - } - } - - impl TestRoutingErrorHandler { - pub fn is_empty(&self) -> bool { - self.routing_errors.borrow().is_empty() - } - - pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError) { - if self.routing_errors.borrow().is_empty() { - panic!("no routing request available"); - } - self.routing_errors.borrow_mut().pop_front().unwrap() - } - } - pub struct TestRouter { - pub routing_requests: RefCell>, + pub routing_requests: RefCell>, + pub routing_errors: RefCell>, pub injected_routing_failure: RefCell>, } @@ -1385,6 +1755,7 @@ pub mod tests { fn default() -> Self { Self { routing_requests: Default::default(), + routing_errors: Default::default(), injected_routing_failure: Default::default(), } } @@ -1398,6 +1769,31 @@ pub mod tests { Ok(()) } + pub fn handle_error( + &self, + target_id: ComponentId, + _token: VerificationToken, + _tc: &PusTcReader, + error: GenericRoutingError, + _time_stamp: &[u8], + _verif_reporter: &impl VerificationReportingProvider, + ) { + self.routing_errors + .borrow_mut() + .push_back((target_id, error)); + } + + pub fn no_routing_errors(&self) -> bool { + self.routing_errors.borrow().is_empty() + } + + pub fn retrieve_next_routing_error(&mut self) -> (ComponentId, GenericRoutingError) { + if self.routing_errors.borrow().is_empty() { + panic!("no routing request available"); + } + self.routing_errors.borrow_mut().pop_front().unwrap() + } + pub fn inject_routing_error(&mut self, error: GenericRoutingError) { *self.injected_routing_failure.borrow_mut() = Some(error); } @@ -1406,7 +1802,7 @@ pub mod tests { self.routing_requests.borrow().is_empty() } - pub fn retrieve_next_request(&mut self) -> (TargetId, REQUEST) { + pub fn retrieve_next_request(&mut self) -> (ComponentId, REQUEST) { if self.routing_requests.borrow().is_empty() { panic!("no routing request available"); } diff --git a/satrs/src/pus/mode.rs b/satrs/src/pus/mode.rs index 1ab46ef..abb6b99 100644 --- a/satrs/src/pus/mode.rs +++ b/satrs/src/pus/mode.rs @@ -2,6 +2,16 @@ use num_enum::{IntoPrimitive, TryFromPrimitive}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "alloc")] +#[allow(unused_imports)] +pub use alloc_mod::*; + +#[cfg(feature = "std")] +#[allow(unused_imports)] +pub use std_mod::*; + +pub const MODE_SERVICE_ID: u8 = 200; + #[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[repr(u8)] @@ -14,3 +24,134 @@ pub enum Subservice { TmCantReachMode = 7, TmWrongModeReply = 8, } + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub mod alloc_mod {} + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub mod std_mod {} + +#[cfg(test)] +mod tests { + + use std::sync::mpsc; + + use crate::{ + mode::{ + ModeAndSubmode, ModeReply, ModeReplySender, ModeRequest, ModeRequestSender, + ModeRequestorAndHandlerMpsc, ModeRequestorMpsc, + }, + request::{GenericMessage, MessageMetadata}, + }; + + const TEST_COMPONENT_ID_0: u64 = 5; + const TEST_COMPONENT_ID_1: u64 = 6; + const TEST_COMPONENT_ID_2: u64 = 7; + + #[test] + fn test_simple_mode_requestor() { + let (reply_sender, reply_receiver) = mpsc::channel(); + let (request_sender, request_receiver) = mpsc::channel(); + let mut mode_requestor = ModeRequestorMpsc::new(TEST_COMPONENT_ID_0, reply_receiver); + mode_requestor.add_message_target(TEST_COMPONENT_ID_1, request_sender); + + // Send a request and verify it arrives at the receiver. + let request_id = 2; + let sent_request = ModeRequest::ReadMode; + mode_requestor + .send_mode_request(request_id, TEST_COMPONENT_ID_1, sent_request) + .expect("send failed"); + let request = request_receiver.recv().expect("recv failed"); + assert_eq!(request.request_id(), 2); + assert_eq!(request.sender_id(), TEST_COMPONENT_ID_0); + assert_eq!(request.message, sent_request); + + // Send a reply and verify it arrives at the requestor. + let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(1, 5)); + reply_sender + .send(GenericMessage::new( + MessageMetadata::new(request_id, TEST_COMPONENT_ID_1), + mode_reply, + )) + .expect("send failed"); + let reply = mode_requestor.try_recv_mode_reply().expect("recv failed"); + assert!(reply.is_some()); + let reply = reply.unwrap(); + assert_eq!(reply.sender_id(), TEST_COMPONENT_ID_1); + assert_eq!(reply.request_id(), 2); + assert_eq!(reply.message, mode_reply); + } + + #[test] + fn test_mode_requestor_and_request_handler_request_sending() { + let (_reply_sender_to_connector, reply_receiver_of_connector) = mpsc::channel(); + let (_request_sender_to_connector, request_receiver_of_connector) = mpsc::channel(); + + let (request_sender_to_channel_1, request_receiver_channel_1) = mpsc::channel(); + //let (reply_sender_to_channel_2, reply_receiver_channel_2) = mpsc::channel(); + let mut mode_connector = ModeRequestorAndHandlerMpsc::new( + TEST_COMPONENT_ID_0, + request_receiver_of_connector, + reply_receiver_of_connector, + ); + assert_eq!( + ModeRequestSender::local_channel_id(&mode_connector), + TEST_COMPONENT_ID_0 + ); + assert_eq!( + ModeReplySender::local_channel_id(&mode_connector), + TEST_COMPONENT_ID_0 + ); + assert_eq!( + mode_connector.local_channel_id_generic(), + TEST_COMPONENT_ID_0 + ); + + mode_connector.add_request_target(TEST_COMPONENT_ID_1, request_sender_to_channel_1); + + // Send a request and verify it arrives at the receiver. + let request_id = 2; + let sent_request = ModeRequest::ReadMode; + mode_connector + .send_mode_request(request_id, TEST_COMPONENT_ID_1, sent_request) + .expect("send failed"); + + let request = request_receiver_channel_1.recv().expect("recv failed"); + assert_eq!(request.request_id(), 2); + assert_eq!(request.sender_id(), TEST_COMPONENT_ID_0); + assert_eq!(request.message, ModeRequest::ReadMode); + } + + #[test] + fn test_mode_requestor_and_request_handler_reply_sending() { + let (_reply_sender_to_connector, reply_receiver_of_connector) = mpsc::channel(); + let (_request_sender_to_connector, request_receiver_of_connector) = mpsc::channel(); + + let (reply_sender_to_channel_2, reply_receiver_channel_2) = mpsc::channel(); + let mut mode_connector = ModeRequestorAndHandlerMpsc::new( + TEST_COMPONENT_ID_0, + request_receiver_of_connector, + reply_receiver_of_connector, + ); + mode_connector.add_reply_target(TEST_COMPONENT_ID_2, reply_sender_to_channel_2); + + // Send a reply and verify it arrives at the receiver. + let request_id = 2; + let sent_reply = ModeReply::ModeReply(ModeAndSubmode::new(3, 5)); + mode_connector + .send_mode_reply( + MessageMetadata::new(request_id, TEST_COMPONENT_ID_2), + sent_reply, + ) + .expect("send failed"); + let reply = reply_receiver_channel_2.recv().expect("recv failed"); + assert_eq!(reply.request_id(), 2); + assert_eq!(reply.sender_id(), TEST_COMPONENT_ID_0); + assert_eq!(reply.message, sent_reply); + } + + #[test] + fn test_mode_reply_handler() {} +} diff --git a/satrs/src/pus/scheduler_srv.rs b/satrs/src/pus/scheduler_srv.rs index cc75fe0..6296798 100644 --- a/satrs/src/pus/scheduler_srv.rs +++ b/satrs/src/pus/scheduler_srv.rs @@ -1,20 +1,16 @@ use super::scheduler::PusSchedulerProvider; -use super::verification::{ - VerificationReporterWithSharedPoolMpscBoundedSender, - VerificationReporterWithSharedPoolMpscSender, VerificationReporterWithVecMpscBoundedSender, - VerificationReporterWithVecMpscSender, VerificationReportingProvider, -}; +use super::verification::{VerificationReporter, VerificationReportingProvider}; use super::{ - get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcInSharedStoreConverter, - EcssTcInVecConverter, EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusServiceHelper, - TmAsVecSenderWithBoundedMpsc, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithMpsc, + EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiverCore, + EcssTmSenderCore, MpscTcReceiver, MpscTmInSharedPoolSender, MpscTmInSharedPoolSenderBounded, + PusServiceHelper, PusTmAsVec, }; use crate::pool::PoolProvider; use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError}; use alloc::string::ToString; use spacepackets::ecss::{scheduling, PusPacket}; use spacepackets::time::cds::CdsTime; +use std::sync::mpsc; /// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service) /// packets. This handler is able to handle the most important PUS requests for a scheduling @@ -24,7 +20,7 @@ use spacepackets::time::cds::CdsTime; /// telecommands inside the scheduler. The user can retrieve the wrapped scheduler via the /// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release /// telecommands when applicable. -pub struct PusService11SchedHandler< +pub struct PusSchedServiceHandler< TcReceiver: EcssTcReceiverCore, TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, @@ -43,13 +39,7 @@ impl< VerificationReporter: VerificationReportingProvider, Scheduler: PusSchedulerProvider, > - PusService11SchedHandler< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - Scheduler, - > + PusSchedServiceHandler { pub fn new( service_helper: PusServiceHelper< @@ -74,8 +64,9 @@ impl< &self.scheduler } - pub fn handle_one_tc( + pub fn poll_and_handle_next_tc( &mut self, + time_stamp: &[u8], sched_tc_pool: &mut (impl PoolProvider + ?Sized), ) -> Result { let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; @@ -83,10 +74,10 @@ impl< return Ok(PusPacketHandlerResult::Empty); } let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; + self.service_helper + .tc_in_mem_converter_mut() + .cache(&ecss_tc_and_token.tc_in_memory)?; + let tc = self.service_helper.tc_in_mem_converter().convert()?; let subservice = PusPacket::subservice(&tc); let standard_subservice = scheduling::Subservice::try_from(subservice); if standard_subservice.is_err() { @@ -95,23 +86,30 @@ impl< ecss_tc_and_token.token, )); } - let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); + let partial_error = None; match standard_subservice.unwrap() { scheduling::Subservice::TcEnableScheduling => { let start_token = self .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter() + .start_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .expect("Error sending start success"); self.scheduler.enable(); if self.scheduler.is_enabled() { self.service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .expect("Error sending completion success"); } else { return Err(PusPacketHandlingError::Other( @@ -122,17 +120,25 @@ impl< scheduling::Subservice::TcDisableScheduling => { let start_token = self .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter() + .start_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .expect("Error sending start success"); self.scheduler.disable(); if !self.scheduler.is_enabled() { self.service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .expect("Error sending completion success"); } else { return Err(PusPacketHandlingError::Other( @@ -143,9 +149,13 @@ impl< scheduling::Subservice::TcResetScheduling => { let start_token = self .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter() + .start_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .expect("Error sending start success"); self.scheduler @@ -153,17 +163,26 @@ impl< .expect("Error resetting TC Pool"); self.service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .expect("Error sending completion success"); } scheduling::Subservice::TcInsertActivity => { let start_token = self .service_helper .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter + .start_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .expect("error sending start success"); // let mut pool = self.sched_tc_pool.write().expect("locking pool failed"); @@ -172,9 +191,13 @@ impl< .expect("insertion of activity into pool failed"); self.service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .expect("sending completion success failed"); } _ => { @@ -195,53 +218,57 @@ impl< } /// Helper type definition for a PUS 11 handler with a dynamic TMTC memory backend and regular /// mpsc queues. -pub type PusService11SchedHandlerDynWithMpsc = PusService11SchedHandler< +pub type PusService11SchedHandlerDynWithMpsc = PusSchedServiceHandler< MpscTcReceiver, - TmAsVecSenderWithMpsc, + mpsc::Sender, EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, + VerificationReporter, PusScheduler, >; /// Helper type definition for a PUS 11 handler with a dynamic TMTC memory backend and bounded MPSC /// queues. -pub type PusService11SchedHandlerDynWithBoundedMpsc = PusService11SchedHandler< +pub type PusService11SchedHandlerDynWithBoundedMpsc = PusSchedServiceHandler< MpscTcReceiver, - TmAsVecSenderWithBoundedMpsc, + mpsc::SyncSender, EcssTcInVecConverter, - VerificationReporterWithVecMpscBoundedSender, + VerificationReporter, PusScheduler, >; /// Helper type definition for a PUS 11 handler with a shared store TMTC memory backend and regular /// mpsc queues. -pub type PusService11SchedHandlerStaticWithMpsc = PusService11SchedHandler< +pub type PusService11SchedHandlerStaticWithMpsc = PusSchedServiceHandler< MpscTcReceiver, - TmInSharedPoolSenderWithMpsc, + MpscTmInSharedPoolSender, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscSender, + VerificationReporter, PusScheduler, >; /// Helper type definition for a PUS 11 handler with a shared store TMTC memory backend and bounded /// mpsc queues. -pub type PusService11SchedHandlerStaticWithBoundedMpsc = PusService11SchedHandler< +pub type PusService11SchedHandlerStaticWithBoundedMpsc = PusSchedServiceHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, PusScheduler, >; #[cfg(test)] mod tests { use crate::pool::{StaticMemoryPool, StaticPoolConfig}; - use crate::pus::tests::TEST_APID; - use crate::pus::verification::VerificationReporterWithSharedPoolMpscBoundedSender; + use crate::pus::test_util::{PusTestHarness, TEST_APID}; + use crate::pus::verification::{VerificationReporter, VerificationReportingProvider}; + use crate::pus::{ scheduler::{self, PusSchedulerProvider, TcInfo}, - tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness}, + tests::PusServiceHandlerWithSharedStoreCommon, verification::{RequestId, TcStateAccepted, VerificationToken}, EcssTcInSharedStoreConverter, }; - use crate::pus::{MpscTcReceiver, TmInSharedPoolSenderWithBoundedMpsc}; + use crate::pus::{ + MpscTcReceiver, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, + PusPacketHandlingError, + }; use alloc::collections::VecDeque; use delegate::delegate; use spacepackets::ecss::scheduling::Subservice; @@ -254,15 +281,15 @@ mod tests { time::cds, }; - use super::PusService11SchedHandler; + use super::PusSchedServiceHandler; struct Pus11HandlerWithStoreTester { common: PusServiceHandlerWithSharedStoreCommon, - handler: PusService11SchedHandler< + handler: PusSchedServiceHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, TestScheduler, >, sched_tc_pool: StaticMemoryPool, @@ -273,19 +300,39 @@ mod tests { let test_scheduler = TestScheduler::default(); let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false); let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone()); - let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(); + let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(0); Self { common, - handler: PusService11SchedHandler::new(srv_handler, test_scheduler), + handler: PusSchedServiceHandler::new(srv_handler, test_scheduler), sched_tc_pool, } } + + pub fn handle_one_tc(&mut self) -> Result { + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); + self.handler + .poll_and_handle_next_tc(&time_stamp, &mut self.sched_tc_pool) + } } impl PusTestHarness for Pus11HandlerWithStoreTester { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken { + let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc); + self.handler + .service_helper + .verif_reporter() + .acceptance_success( + self.handler.service_helper.id(), + self.handler.service_helper.tm_sender(), + init_token, + &[0; 7], + ) + .expect("acceptance success failure") + } + delegate! { to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); fn read_next_tm(&mut self) -> PusTmReader<'_>; fn check_no_tm_available(&self) -> bool; fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId); @@ -344,12 +391,14 @@ mod tests { let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap(); let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8); let enable_scheduling = PusTcCreator::new(&mut reply_header, tc_header, &[0; 7], true); - let token = test_harness.send_tc(&enable_scheduling); + let token = test_harness.init_verification(&enable_scheduling); + test_harness.send_tc(&token, &enable_scheduling); - let request_id = token.req_id(); + let request_id = token.request_id(); + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); test_harness .handler - .handle_one_tc(&mut test_harness.sched_tc_pool) + .poll_and_handle_next_tc(&time_stamp, &mut test_harness.sched_tc_pool) .unwrap(); test_harness.check_next_verification_tm(1, request_id); test_harness.check_next_verification_tm(3, request_id); @@ -404,13 +453,11 @@ mod tests { &sched_app_data[..written_len], true, ); - let token = test_harness.send_tc(&enable_scheduling); + let token = test_harness.init_verification(&enable_scheduling); + test_harness.send_tc(&token, &enable_scheduling); - let request_id = token.req_id(); - test_harness - .handler - .handle_one_tc(&mut test_harness.sched_tc_pool) - .unwrap(); + let request_id = token.request_id(); + test_harness.handle_one_tc().unwrap(); test_harness.check_next_verification_tm(1, request_id); test_harness.check_next_verification_tm(3, request_id); test_harness.check_next_verification_tm(7, request_id); diff --git a/satrs/src/pus/test.rs b/satrs/src/pus/test.rs index ea5a720..3a990b6 100644 --- a/satrs/src/pus/test.rs +++ b/satrs/src/pus/test.rs @@ -1,20 +1,17 @@ use crate::pus::{ - PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmWrapper, + PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmAsVec, + PusTmInPool, PusTmVariant, }; use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader}; use spacepackets::ecss::PusPacket; use spacepackets::SpHeader; +use std::sync::mpsc; -use super::verification::{ - VerificationReporterWithSharedPoolMpscBoundedSender, - VerificationReporterWithSharedPoolMpscSender, VerificationReporterWithVecMpscBoundedSender, - VerificationReporterWithVecMpscSender, VerificationReportingProvider, -}; +use super::verification::{VerificationReporter, VerificationReportingProvider}; use super::{ - get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcInSharedStoreConverter, - EcssTcInVecConverter, EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusServiceHelper, - TmAsVecSenderWithBoundedMpsc, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithMpsc, + EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiverCore, + EcssTmSenderCore, GenericConversionError, MpscTcReceiver, MpscTmInSharedPoolSender, + MpscTmInSharedPoolSenderBounded, PusServiceHelper, }; /// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets. @@ -47,27 +44,33 @@ impl< Self { service_helper } } - pub fn handle_one_tc(&mut self) -> Result { + pub fn poll_and_handle_next_tc( + &mut self, + time_stamp: &[u8], + ) -> Result { let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; if possible_packet.is_none() { return Ok(PusPacketHandlerResult::Empty); } let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; + self.service_helper + .tc_in_mem_converter_mut() + .cache(&ecss_tc_and_token.tc_in_memory)?; + let tc = self.service_helper.tc_in_mem_converter().convert()?; if tc.service() != 17 { - return Err(PusPacketHandlingError::WrongService(tc.service())); + return Err(GenericConversionError::WrongService(tc.service()).into()); } if tc.subservice() == 1 { let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); let result = self .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter() + .start_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .map_err(|_| PartialPusHandlingError::Verification); let start_token = if let Ok(result) = result { Some(result) @@ -77,14 +80,14 @@ impl< }; // Sequence count will be handled centrally in TM funnel. let mut reply_header = - SpHeader::tm_unseg(self.service_helper.common.tm_apid, 0, 0).unwrap(); - let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp); + SpHeader::tm_unseg(self.service_helper.verif_reporter().apid(), 0, 0).unwrap(); + let tc_header = PusTmSecondaryHeader::new_simple(17, 2, time_stamp); let ping_reply = PusTmCreator::new(&mut reply_header, tc_header, &[], true); let result = self .service_helper .common .tm_sender - .send_tm(PusTmWrapper::Direct(ping_reply)) + .send_tm(self.service_helper.id(), PusTmVariant::Direct(ping_reply)) .map_err(PartialPusHandlingError::TmSend); if let Err(err) = result { partial_error = Some(err); @@ -93,9 +96,13 @@ impl< if let Some(start_token) = start_token { if self .service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + self.service_helper.common.id, + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .is_err() { partial_error = Some(PartialPusHandlingError::Verification) @@ -120,54 +127,56 @@ impl< /// mpsc queues. pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler< MpscTcReceiver, - TmAsVecSenderWithMpsc, + mpsc::Sender, EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, + VerificationReporter, >; /// Helper type definition for a PUS 17 handler with a dynamic TMTC memory backend and bounded MPSC /// queues. pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler< MpscTcReceiver, - TmAsVecSenderWithBoundedMpsc, + mpsc::SyncSender, EcssTcInVecConverter, - VerificationReporterWithVecMpscBoundedSender, + VerificationReporter, >; /// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and regular /// mpsc queues. pub type PusService17TestHandlerStaticWithMpsc = PusService17TestHandler< MpscTcReceiver, - TmInSharedPoolSenderWithMpsc, + MpscTmInSharedPoolSender, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscSender, + VerificationReporter, >; /// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and bounded /// mpsc queues. pub type PusService17TestHandlerStaticWithBoundedMpsc = PusService17TestHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, >; #[cfg(test)] mod tests { + use crate::pus::test_util::{PusTestHarness, SimplePusPacketHandler, TEST_APID}; use crate::pus::tests::{ - PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon, PusTestHarness, - SimplePusPacketHandler, TEST_APID, + PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon, }; - use crate::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, + use crate::pus::verification::{ + RequestId, VerificationReporter, VerificationReportingProvider, }; - use crate::pus::verification::RequestId; use crate::pus::verification::{TcStateAccepted, VerificationToken}; use crate::pus::{ - EcssTcInSharedStoreConverter, EcssTcInVecConverter, MpscTcReceiver, PusPacketHandlerResult, - PusPacketHandlingError, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, + EcssTcInSharedStoreConverter, EcssTcInVecConverter, GenericConversionError, MpscTcReceiver, + MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, + PusPacketHandlingError, }; + use crate::ComponentId; use delegate::delegate; use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader}; use spacepackets::ecss::tm::PusTmReader; use spacepackets::ecss::PusPacket; + use spacepackets::time::{cds, TimeWriter}; use spacepackets::{SequenceFlags, SpHeader}; use super::PusService17TestHandler; @@ -176,15 +185,15 @@ mod tests { common: PusServiceHandlerWithSharedStoreCommon, handler: PusService17TestHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, >, } impl Pus17HandlerWithStoreTester { - pub fn new() -> Self { - let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(); + pub fn new(id: ComponentId) -> Self { + let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(id); let pus_17_handler = PusService17TestHandler::new(srv_handler); Self { common, @@ -194,10 +203,24 @@ mod tests { } impl PusTestHarness for Pus17HandlerWithStoreTester { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken { + let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc); + self.handler + .service_helper + .verif_reporter() + .acceptance_success( + self.handler.service_helper.id(), + self.handler.service_helper.tm_sender(), + init_token, + &[0; 7], + ) + .expect("acceptance success failure") + } + delegate! { to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; fn read_next_tm(&mut self) -> PusTmReader<'_>; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); fn check_no_tm_available(&self) -> bool; fn check_next_verification_tm( &self, @@ -208,27 +231,26 @@ mod tests { } } impl SimplePusPacketHandler for Pus17HandlerWithStoreTester { - delegate! { - to self.handler { - fn handle_one_tc(&mut self) -> Result; - } + fn handle_one_tc(&mut self) -> Result { + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); + self.handler.poll_and_handle_next_tc(&time_stamp) } } struct Pus17HandlerWithVecTester { - common: PusServiceHandlerWithVecCommon, + common: PusServiceHandlerWithVecCommon, handler: PusService17TestHandler< MpscTcReceiver, - TmAsVecSenderWithMpsc, + MpscTmAsVecSender, EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, + VerificationReporter, >, } impl Pus17HandlerWithVecTester { - pub fn new() -> Self { + pub fn new(id: ComponentId) -> Self { let (common, srv_handler) = - PusServiceHandlerWithVecCommon::new_with_standard_verif_reporter(); + PusServiceHandlerWithVecCommon::new_with_standard_verif_reporter(id); Self { common, handler: PusService17TestHandler::new(srv_handler), @@ -237,9 +259,23 @@ mod tests { } impl PusTestHarness for Pus17HandlerWithVecTester { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken { + let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc); + self.handler + .service_helper + .verif_reporter() + .acceptance_success( + self.handler.service_helper.id(), + self.handler.service_helper.tm_sender(), + init_token, + &[0; 7], + ) + .expect("acceptance success failure") + } + delegate! { to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); fn read_next_tm(&mut self) -> PusTmReader<'_>; fn check_no_tm_available(&self) -> bool; fn check_next_verification_tm( @@ -251,10 +287,9 @@ mod tests { } } impl SimplePusPacketHandler for Pus17HandlerWithVecTester { - delegate! { - to self.handler { - fn handle_one_tc(&mut self) -> Result; - } + fn handle_one_tc(&mut self) -> Result { + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); + self.handler.poll_and_handle_next_tc(&time_stamp) } } @@ -263,8 +298,9 @@ mod tests { let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sec_header = PusTcSecondaryHeader::new_simple(17, 1); let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); - let token = test_harness.send_tc(&ping_tc); - let request_id = token.req_id(); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); + let request_id = token.request_id(); let result = test_harness.handle_one_tc(); assert!(result.is_ok()); // We should see 4 replies in the TM queue now: Acceptance TM, Start TM, ping reply and @@ -288,19 +324,19 @@ mod tests { #[test] fn test_basic_ping_processing_using_store() { - let mut test_harness = Pus17HandlerWithStoreTester::new(); + let mut test_harness = Pus17HandlerWithStoreTester::new(0); ping_test(&mut test_harness); } #[test] fn test_basic_ping_processing_using_vec() { - let mut test_harness = Pus17HandlerWithVecTester::new(); + let mut test_harness = Pus17HandlerWithVecTester::new(0); ping_test(&mut test_harness); } #[test] fn test_empty_tc_queue() { - let mut test_harness = Pus17HandlerWithStoreTester::new(); + let mut test_harness = Pus17HandlerWithStoreTester::new(0); let result = test_harness.handle_one_tc(); assert!(result.is_ok()); let result = result.unwrap(); @@ -312,15 +348,19 @@ mod tests { #[test] fn test_sending_unsupported_service() { - let mut test_harness = Pus17HandlerWithStoreTester::new(); + let mut test_harness = Pus17HandlerWithStoreTester::new(0); let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sec_header = PusTcSecondaryHeader::new_simple(3, 1); let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); - test_harness.send_tc(&ping_tc); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); let result = test_harness.handle_one_tc(); assert!(result.is_err()); let error = result.unwrap_err(); - if let PusPacketHandlingError::WrongService(num) = error { + if let PusPacketHandlingError::RequestConversion(GenericConversionError::WrongService( + num, + )) = error + { assert_eq!(num, 3); } else { panic!("unexpected error type {error}") @@ -329,11 +369,12 @@ mod tests { #[test] fn test_sending_custom_subservice() { - let mut test_harness = Pus17HandlerWithStoreTester::new(); + let mut test_harness = Pus17HandlerWithStoreTester::new(0); let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sec_header = PusTcSecondaryHeader::new_simple(17, 200); let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); - test_harness.send_tc(&ping_tc); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); let result = test_harness.handle_one_tc(); assert!(result.is_ok()); let result = result.unwrap(); diff --git a/satrs/src/pus/verification.rs b/satrs/src/pus/verification.rs index e44bc73..a890b2e 100644 --- a/satrs/src/pus/verification.rs +++ b/satrs/src/pus/verification.rs @@ -17,10 +17,10 @@ //! use std::time::Duration; //! use satrs::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig}; //! use satrs::pus::verification::{ -//! VerificationReportingProvider, VerificationReporterCfg, VerificationReporterWithSender +//! VerificationReportingProvider, VerificationReporterCfg //! }; //! use satrs::seq_count::SeqCountProviderSimple; -//! use satrs::pus::TmInSharedPoolSenderWithMpsc; +//! use satrs::pus::MpscTmInSharedPoolSender; //! use satrs::tmtc::tm_helper::SharedTmPool; //! use spacepackets::ecss::PusPacket; //! use spacepackets::SpHeader; @@ -35,7 +35,7 @@ //! let shared_tm_store = SharedTmPool::new(tm_pool); //! let tm_store = shared_tm_store.clone_backing_pool(); //! let (verif_tx, verif_rx) = mpsc::channel(); -//! let sender = TmInSharedPoolSenderWithMpsc::new(0, "Test Sender", shared_tm_store, verif_tx); +//! let sender = MpscTmInSharedPoolSender::new(0, "Test Sender", shared_tm_store, verif_tx); //! let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); //! let mut reporter = VerificationReporterWithSender::new(&cfg , sender); //! @@ -98,9 +98,14 @@ pub use spacepackets::ecss::verification::*; #[cfg_attr(feature = "doc_cfg", doc(cfg(feature = "alloc")))] pub use alloc_mod::*; +use crate::request::Apid; +use crate::ComponentId; + +/* #[cfg(feature = "std")] #[cfg_attr(feature = "doc_cfg", doc(cfg(feature = "std")))] pub use std_mod::*; + */ /// This is a request identifier as specified in 5.4.11.2 c. of the PUS standard. /// @@ -139,12 +144,34 @@ impl PartialEq for RequestId { impl RequestId { pub const SIZE_AS_BYTES: usize = size_of::(); + /// This allows extracting the request ID from a given PUS telecommand. + pub fn new(tc: &(impl CcsdsPacket + IsPusTelecommand)) -> Self { + Self::new_from_ccsds_tc(tc) + } + + /// Extract the request ID from a CCSDS TC packet. + pub fn new_from_ccsds_tc(tc: &impl CcsdsPacket) -> Self { + RequestId { + version_number: tc.ccsds_version(), + packet_id: tc.packet_id(), + psc: tc.psc(), + } + } + pub fn raw(&self) -> u32 { ((self.version_number as u32) << 29) | ((self.packet_id.raw() as u32) << 16) | self.psc.raw() as u32 } + pub fn packet_id(&self) -> PacketId { + self.packet_id + } + + pub fn packet_seq_ctrl(&self) -> PacketSequenceCtrl { + self.psc + } + pub fn to_bytes(&self, buf: &mut [u8]) { let raw = self.raw(); buf.copy_from_slice(raw.to_be_bytes().as_slice()); @@ -162,17 +189,23 @@ impl RequestId { }) } } -impl RequestId { - /// This allows extracting the request ID from a given PUS telecommand. - pub fn new(tc: &(impl CcsdsPacket + IsPusTelecommand)) -> Self { - RequestId { - version_number: tc.ccsds_version(), - packet_id: tc.packet_id(), - psc: tc.psc(), + +impl From for RequestId { + fn from(value: u32) -> Self { + Self { + version_number: ((value >> 29) & 0b111) as u8, + packet_id: PacketId::from(((value >> 16) & 0xffff) as u16), + psc: PacketSequenceCtrl::from((value & 0xffff) as u16), } } } +impl From for u32 { + fn from(value: RequestId) -> Self { + value.raw() + } +} + /// If a verification operation fails, the passed token will be returned as well. This allows /// re-trying the operation at a later point. #[derive(Debug, Clone)] @@ -186,12 +219,48 @@ impl From> for VerificationOrSendErrorWithToken VerificationOrSendErrorWithToken(value.0, value.1) } } + /// Support token to allow type-state programming. This prevents calling the verification /// steps in an invalid order. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct VerificationToken { state: PhantomData, - req_id: RequestId, + request_id: RequestId, +} + +impl VerificationToken { + fn new(req_id: RequestId) -> VerificationToken { + VerificationToken { + state: PhantomData, + request_id: req_id, + } + } + + pub fn request_id(&self) -> RequestId { + self.request_id + } +} + +impl VerificationToken { + /// Create a verification token with the accepted state. This can be useful for test purposes. + /// For general purposes, it is recommended to use the API exposed by verification handlers. + pub fn new_accepted_state(req_id: RequestId) -> VerificationToken { + VerificationToken { + state: PhantomData, + request_id: req_id, + } + } +} + +impl VerificationToken { + /// Create a verification token with the started state. This can be useful for test purposes. + /// For general purposes, it is recommended to use the API exposed by verification handlers. + pub fn new_started_state(req_id: RequestId) -> VerificationToken { + VerificationToken { + state: PhantomData, + request_id: req_id, + } + } } pub trait WasAtLeastAccepted {} @@ -219,6 +288,17 @@ pub enum TcStateToken { Completed(VerificationToken), } +impl TcStateToken { + pub fn request_id(&self) -> RequestId { + match self { + TcStateToken::None(token) => token.request_id(), + TcStateToken::Accepted(token) => token.request_id(), + TcStateToken::Started(token) => token.request_id(), + TcStateToken::Completed(token) => token.request_id(), + } + } +} + impl From> for TcStateToken { fn from(t: VerificationToken) -> Self { TcStateToken::None(t) @@ -267,24 +347,11 @@ impl From> for TcStateToken { } } -impl VerificationToken { - fn new(req_id: RequestId) -> VerificationToken { - VerificationToken { - state: PhantomData, - req_id, - } - } - - pub fn req_id(&self) -> RequestId { - self.req_id - } -} - /// Composite helper struct to pass failure parameters to the [VerificationReporter] pub struct FailParams<'stamp, 'fargs> { - time_stamp: &'stamp [u8], - failure_code: &'fargs dyn EcssEnumeration, - failure_data: &'fargs [u8], + pub time_stamp: &'stamp [u8], + pub failure_code: &'fargs dyn EcssEnumeration, + pub failure_data: &'fargs [u8], } impl<'stamp, 'fargs> FailParams<'stamp, 'fargs> { @@ -310,8 +377,8 @@ impl<'stamp, 'fargs> FailParams<'stamp, 'fargs> { /// Composite helper struct to pass step failure parameters to the [VerificationReporter] pub struct FailParamsWithStep<'stamp, 'fargs> { - bp: FailParams<'stamp, 'fargs>, - step: &'fargs dyn EcssEnumeration, + pub common: FailParams<'stamp, 'fargs>, + pub step: &'fargs dyn EcssEnumeration, } impl<'stamp, 'fargs> FailParamsWithStep<'stamp, 'fargs> { @@ -322,13 +389,16 @@ impl<'stamp, 'fargs> FailParamsWithStep<'stamp, 'fargs> { failure_data: &'fargs [u8], ) -> Self { Self { - bp: FailParams::new(time_stamp, failure_code, failure_data), + common: FailParams::new(time_stamp, failure_code, failure_data), step, } } } pub trait VerificationReportingProvider { + fn set_apid(&mut self, apid: Apid); + fn apid(&self) -> Apid; + fn add_tc( &mut self, pus_tc: &(impl CcsdsPacket + IsPusTelecommand), @@ -340,30 +410,40 @@ pub trait VerificationReportingProvider { fn acceptance_success( &self, + sender_id: ComponentId, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, time_stamp: &[u8], ) -> Result, EcssTmtcError>; fn acceptance_failure( &self, + sender_id: ComponentId, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError>; fn start_success( &self, + sender_id: ComponentId, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, time_stamp: &[u8], ) -> Result, EcssTmtcError>; fn start_failure( &self, + sender_id: ComponentId, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError>; fn step_success( &self, + sender_id: ComponentId, + sender: &(impl EcssTmSenderCore + ?Sized), token: &VerificationToken, time_stamp: &[u8], step: impl EcssEnumeration, @@ -371,18 +451,24 @@ pub trait VerificationReportingProvider { fn step_failure( &self, + sender_id: ComponentId, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParamsWithStep, ) -> Result<(), EcssTmtcError>; fn completion_success( &self, + sender_id: ComponentId, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, time_stamp: &[u8], ) -> Result<(), EcssTmtcError>; fn completion_failure( &self, + sender_id: ComponentId, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError>; @@ -458,7 +544,7 @@ impl VerificationReportCreator { subservice, seq_count, msg_count, - &token.req_id, + &token.request_id(), time_stamp, None::<&dyn EcssEnumeration>, )?; @@ -482,7 +568,7 @@ impl VerificationReportCreator { subservice, seq_count, msg_count, - &token.req_id, + &token.request_id(), step, params, )?; @@ -516,7 +602,7 @@ impl VerificationReportCreator { tm_creator, VerificationToken { state: PhantomData, - req_id: token.req_id, + request_id: token.request_id(), }, )) } @@ -570,7 +656,7 @@ impl VerificationReportCreator { tm_creator, VerificationToken { state: PhantomData, - req_id: token.req_id, + request_id: token.request_id(), }, )) } @@ -615,7 +701,7 @@ impl VerificationReportCreator { Subservice::TmStepSuccess.into(), seq_count, msg_count, - &token.req_id, + &token.request_id(), time_stamp, Some(&step), ) @@ -638,9 +724,9 @@ impl VerificationReportCreator { Subservice::TmStepFailure.into(), seq_count, msg_count, - &token.req_id, + &token.request_id(), Some(params.step), - ¶ms.bp, + ¶ms.common, ) } @@ -791,10 +877,7 @@ impl VerificationReportCreator { #[cfg(feature = "alloc")] pub mod alloc_mod { use super::*; - use crate::{ - pus::{PusTmWrapper, TmAsVecSenderWithId, TmInSharedPoolSenderWithId}, - seq_count::SequenceCountProvider, - }; + use crate::{pus::PusTmVariant, ComponentId}; use core::cell::RefCell; #[derive(Clone)] @@ -830,10 +913,10 @@ pub mod alloc_mod { /// TM funnel. This helper will always set those fields to 0. #[derive(Clone)] pub struct VerificationReporter { + // TODO: We could add a hook object which allows users to manipulate the verification + // report TM before it is sent.. source_data_buf: RefCell>, - pub seq_count_provider: Option + Send>>, - pub msg_count_provider: Option + Send>>, - pub reporter: VerificationReportCreator, + pub reporter_creator: VerificationReportCreator, } impl VerificationReporter { @@ -847,14 +930,14 @@ pub mod alloc_mod { + cfg.fail_code_field_width + cfg.max_fail_data_len ]), - seq_count_provider: None, - msg_count_provider: None, - reporter, + // seq_count_provider: None, + // msg_count_provider: None, + reporter_creator: reporter, } } delegate!( - to self.reporter { + to self.reporter_creator { pub fn set_apid(&mut self, apid: u16) -> bool; pub fn apid(&self) -> u16; pub fn add_tc(&mut self, pus_tc: &(impl CcsdsPacket + IsPusTelecommand)) -> VerificationToken; @@ -867,146 +950,108 @@ pub mod alloc_mod { pub fn allowed_source_data_len(&self) -> usize { self.source_data_buf.borrow().capacity() } + } + + impl VerificationReportingProvider for VerificationReporter { + delegate!( + to self.reporter_creator { + fn set_apid(&mut self, apid: Apid); + fn apid(&self) -> Apid; + fn add_tc(&mut self, pus_tc: &(impl CcsdsPacket + IsPusTelecommand)) -> VerificationToken; + fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken; + } + ); /// Package and send a PUS TM\[1, 1\] packet, see 8.1.2.1 of the PUS standard - pub fn acceptance_success( + fn acceptance_success( &self, - token: VerificationToken, + sender_id: ComponentId, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, time_stamp: &[u8], ) -> Result, EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut source_data_buf = self.source_data_buf.borrow_mut(); let (tm_creator, token) = self - .reporter - .acceptance_success( - source_data_buf.as_mut_slice(), - token, - seq_count, - msg_count, - time_stamp, - ) + .reporter_creator + .acceptance_success(source_data_buf.as_mut_slice(), token, 0, 0, time_stamp) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(tm_creator))?; + sender.send_tm(sender_id, PusTmVariant::Direct(tm_creator))?; Ok(token) } /// Package and send a PUS TM\[1, 2\] packet, see 8.1.2.2 of the PUS standard - pub fn acceptance_failure( + fn acceptance_failure( &self, - token: VerificationToken, + sender_id: ComponentId, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); let sendable = self - .reporter - .acceptance_failure(buf.as_mut_slice(), token, seq_count, msg_count, params) + .reporter_creator + .acceptance_failure(buf.as_mut_slice(), token, 0, 0, params) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + sender.send_tm(sender_id, PusTmVariant::Direct(sendable))?; Ok(()) } /// Package and send a PUS TM\[1, 3\] packet, see 8.1.2.3 of the PUS standard. /// /// Requires a token previously acquired by calling [Self::acceptance_success]. - pub fn start_success( + fn start_success( &self, - token: VerificationToken, + sender_id: ComponentId, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, time_stamp: &[u8], ) -> Result, EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); let (tm_creator, started_token) = self - .reporter - .start_success(buf.as_mut_slice(), token, seq_count, msg_count, time_stamp) + .reporter_creator + .start_success(buf.as_mut_slice(), token, 0, 0, time_stamp) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(tm_creator))?; + sender.send_tm(sender_id, PusTmVariant::Direct(tm_creator))?; Ok(started_token) - //self.reporter.send_start_success(sendable, sender) } /// Package and send a PUS TM\[1, 4\] packet, see 8.1.2.4 of the PUS standard. /// /// Requires a token previously acquired by calling [Self::acceptance_success]. It consumes /// the token because verification handling is done. - pub fn start_failure( + fn start_failure( &self, - token: VerificationToken, + sender_id: ComponentId, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); let sendable = self - .reporter - .start_failure(buf.as_mut_slice(), token, seq_count, msg_count, params) + .reporter_creator + .start_failure(buf.as_mut_slice(), token, 0, 0, params) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + sender.send_tm(sender_id, PusTmVariant::Direct(sendable))?; Ok(()) } /// Package and send a PUS TM\[1, 5\] packet, see 8.1.2.5 of the PUS standard. /// /// Requires a token previously acquired by calling [Self::start_success]. - pub fn step_success( + fn step_success( &self, - token: &VerificationToken, + sender_id: ComponentId, sender: &(impl EcssTmSenderCore + ?Sized), + token: &VerificationToken, time_stamp: &[u8], step: impl EcssEnumeration, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); let sendable = self - .reporter - .step_success( - buf.as_mut_slice(), - token, - seq_count, - msg_count, - time_stamp, - step, - ) + .reporter_creator + .step_success(buf.as_mut_slice(), token, 0, 0, time_stamp, step) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + sender.send_tm(sender_id, PusTmVariant::Direct(sendable))?; Ok(()) } @@ -1014,26 +1059,19 @@ pub mod alloc_mod { /// /// Requires a token previously acquired by calling [Self::start_success]. It consumes the /// token because verification handling is done. - pub fn step_failure( + fn step_failure( &self, - token: VerificationToken, + sender_id: ComponentId, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, params: FailParamsWithStep, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); let sendable = self - .reporter - .step_failure(buf.as_mut_slice(), token, seq_count, msg_count, params) + .reporter_creator + .step_failure(buf.as_mut_slice(), token, 0, 0, params) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + sender.send_tm(sender_id, PusTmVariant::Direct(sendable))?; Ok(()) } @@ -1041,26 +1079,19 @@ pub mod alloc_mod { /// /// Requires a token previously acquired by calling [Self::start_success]. It consumes the /// token because verification handling is done. - pub fn completion_success( + fn completion_success( &self, - token: VerificationToken, + sender_id: ComponentId, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, time_stamp: &[u8], ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); let sendable = self - .reporter - .completion_success(buf.as_mut_slice(), token, seq_count, msg_count, time_stamp) + .reporter_creator + .completion_success(buf.as_mut_slice(), token, 0, 0, time_stamp) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + sender.send_tm(sender_id, PusTmVariant::Direct(sendable))?; Ok(()) } @@ -1068,395 +1099,514 @@ pub mod alloc_mod { /// /// Requires a token previously acquired by calling [Self::start_success]. It consumes the /// token because verification handling is done. - pub fn completion_failure( + fn completion_failure( &self, - token: VerificationToken, + sender_id: ComponentId, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); let sendable = self - .reporter - .completion_failure(buf.as_mut_slice(), token, seq_count, msg_count, params) + .reporter_creator + .completion_failure(buf.as_mut_slice(), token, 0, 00, params) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + sender.send_tm(sender_id, PusTmVariant::Direct(sendable))?; Ok(()) } } - - /// Helper object which caches the sender passed as a trait object. Provides the same - /// API as [VerificationReporter] but without the explicit sender arguments. - #[derive(Clone)] - pub struct VerificationReporterWithSender { - pub reporter: VerificationReporter, - pub sender: Sender, - } - - impl VerificationReporterWithSender { - pub fn new(cfg: &VerificationReporterCfg, sender: Sender) -> Self { - let reporter = VerificationReporter::new(cfg); - Self::new_from_reporter(reporter, sender) - } - - pub fn new_from_reporter(reporter: VerificationReporter, sender: Sender) -> Self { - Self { reporter, sender } - } - - delegate! { - to self.reporter { - pub fn set_apid(&mut self, apid: u16) -> bool; - pub fn apid(&self) -> u16; - pub fn dest_id(&self) -> u16; - pub fn set_dest_id(&mut self, dest_id: u16); - } - } - } - - impl VerificationReportingProvider - for VerificationReporterWithSender - { - delegate! { - to self.reporter { - fn add_tc( - &mut self, - pus_tc: &(impl CcsdsPacket + IsPusTelecommand), - ) -> VerificationToken; - fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken; - } - } - - fn acceptance_success( - &self, - token: VerificationToken, - time_stamp: &[u8], - ) -> Result, EcssTmtcError> { - self.reporter - .acceptance_success(token, &self.sender, time_stamp) - } - - fn acceptance_failure( - &self, - token: VerificationToken, - params: FailParams, - ) -> Result<(), EcssTmtcError> { - self.reporter - .acceptance_failure(token, &self.sender, params) - } - - fn start_success( - &self, - token: VerificationToken, - time_stamp: &[u8], - ) -> Result, EcssTmtcError> { - self.reporter.start_success(token, &self.sender, time_stamp) - } - - fn start_failure( - &self, - token: VerificationToken, - params: FailParams, - ) -> Result<(), EcssTmtcError> { - self.reporter.start_failure(token, &self.sender, params) - } - - fn step_success( - &self, - token: &VerificationToken, - time_stamp: &[u8], - step: impl EcssEnumeration, - ) -> Result<(), EcssTmtcError> { - self.reporter - .step_success(token, &self.sender, time_stamp, step) - } - - fn step_failure( - &self, - token: VerificationToken, - params: FailParamsWithStep, - ) -> Result<(), EcssTmtcError> { - self.reporter.step_failure(token, &self.sender, params) - } - - fn completion_success( - &self, - token: VerificationToken, - time_stamp: &[u8], - ) -> Result<(), EcssTmtcError> { - self.reporter - .completion_success(token, &self.sender, time_stamp) - } - - fn completion_failure( - &self, - token: VerificationToken, - params: FailParams, - ) -> Result<(), EcssTmtcError> { - self.reporter - .completion_failure(token, &self.sender, params) - } - } - - pub type VerificationReporterWithSharedPoolSender = - VerificationReporterWithSender>; - pub type VerificationReporterWithVecSender = - VerificationReporterWithSender>; } +/* #[cfg(feature = "std")] pub mod std_mod { use std::sync::mpsc; use crate::pool::StoreAddr; + use crate::pus::verification::VerificationReporterWithSender; - use super::alloc_mod::{ - VerificationReporterWithSharedPoolSender, VerificationReporterWithVecSender, - }; + use super::alloc_mod::VerificationReporterWithSharedPoolSender; pub type VerificationReporterWithSharedPoolMpscSender = VerificationReporterWithSharedPoolSender>; pub type VerificationReporterWithSharedPoolMpscBoundedSender = VerificationReporterWithSharedPoolSender>; pub type VerificationReporterWithVecMpscSender = - VerificationReporterWithVecSender>>; + VerificationReporterWithSender>>; pub type VerificationReporterWithVecMpscBoundedSender = - VerificationReporterWithVecSender>>; + VerificationReporterWithSender>>; } + */ -#[cfg(test)] -pub mod tests { - use crate::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig}; - use crate::pus::tests::CommonTmInfo; - use crate::pus::verification::{ - EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone, - VerificationReporter, VerificationReporterCfg, VerificationReporterWithSender, - VerificationToken, - }; - use crate::pus::{ - EcssChannel, PusTmWrapper, TmInSharedPoolSenderWithId, TmInSharedPoolSenderWithMpsc, - }; - use crate::tmtc::tm_helper::SharedTmPool; - use crate::ChannelId; - use alloc::format; - use alloc::sync::Arc; - use hashbrown::HashMap; - use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader}; - use spacepackets::ecss::tm::PusTmReader; - use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, PusError, PusPacket}; - use spacepackets::util::UnsignedEnum; - use spacepackets::{ByteConversionError, CcsdsPacket, SpHeader}; - use std::cell::RefCell; +#[cfg(any(feature = "test_util", test))] +pub mod test_util { + use alloc::vec::Vec; + use core::cell::RefCell; use std::collections::VecDeque; - use std::sync::{mpsc, Mutex}; - use std::time::Duration; - use std::vec; - use std::vec::Vec; - use super::VerificationReportingProvider; + use super::*; - fn is_send(_: &T) {} - #[allow(dead_code)] - fn is_sync(_: &T) {} - - pub struct VerificationStatus { - pub accepted: Option, - pub started: Option, - pub step: u64, - pub step_status: Option, - pub completed: Option, - pub failure_data: Option>, - pub fail_enum: Option, - } - - pub type SharedVerificationMap = Arc>>>; - - #[derive(Clone)] + #[derive(Default)] pub struct TestVerificationReporter { - pub verification_map: SharedVerificationMap, + pub report_queue: RefCell>, } - impl TestVerificationReporter { - pub fn new(verification_map: SharedVerificationMap) -> Self { - Self { verification_map } - } + #[derive(Debug, PartialEq)] + pub struct SuccessData { + pub sender: ComponentId, + pub time_stamp: Vec, + } + + #[derive(Debug, PartialEq)] + pub struct FailureData { + pub sender: ComponentId, + pub error_enum: u64, + pub fail_data: Vec, + pub time_stamp: Vec, + } + + #[derive(Debug, PartialEq)] + pub enum VerificationReportInfo { + Added, + AcceptanceSuccess(SuccessData), + AcceptanceFailure(FailureData), + StartedSuccess(SuccessData), + StartedFailure(FailureData), + StepSuccess { data: SuccessData, step: u16 }, + StepFailure(FailureData), + CompletionSuccess(SuccessData), + CompletionFailure(FailureData), } impl VerificationReportingProvider for TestVerificationReporter { + fn set_apid(&mut self, _apid: Apid) {} + + fn apid(&self) -> Apid { + 0 + } + fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken { - let verif_map = self.verification_map.lock().unwrap(); - verif_map.borrow_mut().insert( - req_id, - VerificationStatus { - accepted: None, - started: None, - step: 0, - step_status: None, - completed: None, - failure_data: None, - fail_enum: None, - }, - ); + self.report_queue + .borrow_mut() + .push_back((req_id, VerificationReportInfo::Added)); VerificationToken { - state: core::marker::PhantomData, - req_id, + state: PhantomData, + request_id: req_id, } } fn acceptance_success( &self, + sender_id: ComponentId, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, - _time_stamp: &[u8], - ) -> Result, EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => entry.accepted = Some(true), - None => panic!( - "unexpected acceptance success for request ID {}", - token.req_id() - ), - }; + time_stamp: &[u8], + ) -> Result, EcssTmtcError> { + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::AcceptanceSuccess(SuccessData { + sender: sender_id, + time_stamp: time_stamp.to_vec(), + }), + )); Ok(VerificationToken { - state: core::marker::PhantomData, - req_id: token.req_id, + state: PhantomData, + request_id: token.request_id, }) } fn acceptance_failure( &self, + sender_id: ComponentId, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.accepted = Some(false); - entry.failure_data = Some(params.failure_data.to_vec()); - entry.fail_enum = Some(params.failure_code.value()); - } - None => panic!( - "unexpected acceptance failure for request ID {}", - token.req_id() - ), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::AcceptanceFailure(FailureData { + sender: sender_id, + error_enum: params.failure_code.value(), + fail_data: params.failure_data.to_vec(), + time_stamp: params.time_stamp.to_vec(), + }), + )); Ok(()) } fn start_success( &self, - token: VerificationToken, - _time_stamp: &[u8], - ) -> Result, EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => entry.started = Some(true), - None => panic!("unexpected start success for request ID {}", token.req_id()), - }; + sender_id: ComponentId, + _sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, + time_stamp: &[u8], + ) -> Result, EcssTmtcError> { + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::StartedSuccess(SuccessData { + sender: sender_id, + time_stamp: time_stamp.to_vec(), + }), + )); Ok(VerificationToken { - state: core::marker::PhantomData, - req_id: token.req_id, + state: PhantomData, + request_id: token.request_id, }) } fn start_failure( &self, + sender_id: ComponentId, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.started = Some(false); - entry.failure_data = Some(params.failure_data.to_vec()); - entry.fail_enum = Some(params.failure_code.value()); - } - None => panic!("unexpected start failure for request ID {}", token.req_id()), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::StartedFailure(FailureData { + sender: sender_id, + error_enum: params.failure_code.value(), + fail_data: params.failure_data.to_vec(), + time_stamp: params.time_stamp.to_vec(), + }), + )); Ok(()) } fn step_success( &self, - token: &VerificationToken, - _time_stamp: &[u8], - step: impl spacepackets::ecss::EcssEnumeration, + sender_id: ComponentId, + _sender: &(impl EcssTmSenderCore + ?Sized), + token: &VerificationToken, + time_stamp: &[u8], + step: impl EcssEnumeration, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.step = step.value(); - entry.step_status = Some(true); - } - None => panic!("unexpected start success for request ID {}", token.req_id()), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::StepSuccess { + data: SuccessData { + sender: sender_id, + time_stamp: time_stamp.to_vec(), + }, + step: step.value() as u16, + }, + )); Ok(()) } fn step_failure( &self, - token: VerificationToken, - _params: FailParamsWithStep, + sender_id: ComponentId, + _sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, + params: FailParamsWithStep, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.step_status = Some(false); - } - None => panic!("unexpected start success for request ID {}", token.req_id()), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::StepFailure(FailureData { + sender: sender_id, + error_enum: params.common.failure_code.value(), + fail_data: params.common.failure_data.to_vec(), + time_stamp: params.common.time_stamp.to_vec(), + }), + )); Ok(()) } fn completion_success( &self, + sender_id: ComponentId, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, - _time_stamp: &[u8], + time_stamp: &[u8], ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => entry.completed = Some(true), - None => panic!( - "unexpected acceptance success for request ID {}", - token.req_id() - ), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::CompletionSuccess(SuccessData { + sender: sender_id, + time_stamp: time_stamp.to_vec(), + }), + )); Ok(()) } - fn completion_failure( + fn completion_failure( &self, + sender_id: ComponentId, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.completed = Some(false); - entry.failure_data = Some(params.failure_data.to_vec()); - entry.fail_enum = Some(params.failure_code.value()); - } - None => panic!( - "unexpected acceptance success for request ID {}", - token.req_id() - ), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::CompletionFailure(FailureData { + sender: sender_id, + error_enum: params.failure_code.value(), + fail_data: params.failure_data.to_vec(), + time_stamp: params.time_stamp.to_vec(), + }), + )); Ok(()) } } - const TEST_APID: u16 = 0x02; + impl TestVerificationReporter { + pub fn check_next_was_added(&self, request_id: RequestId) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + assert_eq!(info, VerificationReportInfo::Added); + } + pub fn check_next_is_acceptance_success(&self, sender_id: ComponentId, req_id: RequestId) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(req_id, last_report_req_id); + if let VerificationReportInfo::AcceptanceSuccess(data) = info { + assert_eq!(data.sender, sender_id); + return; + } + panic!("next message is not acceptance success message") + } + + pub fn check_next_is_started_success(&self, sender_id: ComponentId, req_id: RequestId) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(req_id, last_report_req_id); + if let VerificationReportInfo::StartedSuccess(data) = info { + assert_eq!(data.sender, sender_id); + return; + } + panic!("next message is not start success message") + } + + pub fn check_next_is_step_success( + &self, + sender_id: ComponentId, + request_id: RequestId, + expected_step: u16, + ) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + if let VerificationReportInfo::StepSuccess { data, step } = info { + assert_eq!(data.sender, sender_id); + assert_eq!(expected_step, step); + return; + } + panic!("next message is not step success message: {info:?}") + } + + pub fn check_next_is_step_failure( + &self, + sender_id: ComponentId, + request_id: RequestId, + error_code: u64, + ) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + if let VerificationReportInfo::StepFailure(data) = info { + assert_eq!(data.sender, sender_id); + assert_eq!(data.error_enum, error_code); + return; + } + panic!("next message is not step failure message") + } + + pub fn check_next_is_completion_success( + &self, + sender_id: ComponentId, + request_id: RequestId, + ) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + if let VerificationReportInfo::CompletionSuccess(data) = info { + assert_eq!(data.sender, sender_id); + return; + } + panic!("next message is not completion success message: {info:?}") + } + + pub fn check_next_is_completion_failure( + &mut self, + sender_id: ComponentId, + request_id: RequestId, + error_code: u64, + ) { + let (last_report_req_id, info) = self + .report_queue + .get_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + if let VerificationReportInfo::CompletionFailure(data) = info { + assert_eq!(data.sender, sender_id); + assert_eq!(data.error_enum, error_code); + return; + } + panic!("next message is not completion failure message: {info:?}") + } + + pub fn assert_full_completion_success( + &mut self, + sender_id: ComponentId, + request_id: RequestId, + expected_steps: Option, + ) { + self.check_next_was_added(request_id); + self.check_next_is_acceptance_success(sender_id, request_id); + self.check_next_is_started_success(sender_id, request_id); + if let Some(highest_num) = expected_steps { + for i in 0..highest_num { + self.check_next_is_step_success(sender_id, request_id, i); + } + } + self.check_next_is_completion_success(sender_id, request_id); + } + + pub fn assert_completion_failure( + &mut self, + sender_id: ComponentId, + request_id: RequestId, + expected_steps: Option, + error_code: u64, + ) { + self.check_next_was_added(request_id); + self.check_next_is_acceptance_success(sender_id, request_id); + self.check_next_is_started_success(sender_id, request_id); + if let Some(highest_num) = expected_steps { + for i in 0..highest_num { + self.check_next_is_step_success(sender_id, request_id, i); + } + } + self.check_next_is_completion_failure(sender_id, request_id, error_code); + } + + pub fn get_next_verification_message(&mut self) -> (RequestId, VerificationReportInfo) { + self.report_queue + .get_mut() + .pop_front() + .expect("report queue is empty") + } + /* + pub fn verification_info(&self, req_id: &RequestId) -> Option { + let verif_map = self.verification_map.lock().unwrap(); + let value = verif_map.borrow().get(req_id).cloned(); + value + } + + + pub fn check_started(&self, req_id: &RequestId) -> bool { + let verif_map = self.verification_map.lock().unwrap(); + if let Some(entry) = verif_map.borrow().get(req_id) { + return entry.started.unwrap_or(false); + } + false + } + + fn generic_completion_checks( + entry: &VerificationStatus, + step: Option, + completion_success: bool, + ) { + assert!(entry.accepted.unwrap()); + assert!(entry.started.unwrap()); + if let Some(step) = step { + assert!(entry.step_status.unwrap()); + assert_eq!(entry.step, step); + } else { + assert!(entry.step_status.is_none()); + } + assert_eq!(entry.completed.unwrap(), completion_success); + } + + + pub fn assert_completion_failure( + &self, + req_id: &RequestId, + step: Option, + error_code: u64, + ) { + let verif_map = self.verification_map.lock().unwrap(); + if let Some(entry) = verif_map.borrow().get(req_id) { + Self::generic_completion_checks(entry, step, false); + assert_eq!(entry.fail_enum.unwrap(), error_code); + return; + } + panic!("request not in verification map"); + } + + pub fn completion_status(&self, req_id: &RequestId) -> Option { + let verif_map = self.verification_map.lock().unwrap(); + if let Some(entry) = verif_map.borrow().get(req_id) { + return entry.completed; + } + panic!("request not in verification map"); + } + */ + } +} + +#[cfg(test)] +pub mod tests { + use crate::pool::{StaticMemoryPool, StaticPoolConfig}; + use crate::pus::test_util::{TEST_APID, TEST_COMPONENT_ID}; + use crate::pus::tests::CommonTmInfo; + use crate::pus::verification::{ + EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone, + VerificationReporter, VerificationReporterCfg, VerificationToken, + }; + use crate::pus::{ChannelWithId, MpscTmInSharedPoolSender, PusTmVariant}; + use crate::request::MessageMetadata; + use crate::tmtc::tm_helper::SharedTmPool; + use crate::ComponentId; + use alloc::format; + use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}; + use spacepackets::ecss::{ + EcssEnumU16, EcssEnumU32, EcssEnumU8, EcssEnumeration, PusError, PusPacket, + WritablePusPacket, + }; + use spacepackets::util::UnsignedEnum; + use spacepackets::{ByteConversionError, SpHeader}; + use std::cell::RefCell; + use std::collections::VecDeque; + use std::sync::mpsc; + use std::vec; + use std::vec::Vec; + + use super::{ + TcStateAccepted, TcStateStarted, VerificationReportingProvider, WasAtLeastAccepted, + }; + + fn is_send(_: &T) {} + #[allow(dead_code)] + fn is_sync(_: &T) {} + const EMPTY_STAMP: [u8; 7] = [0; 7]; #[derive(Debug, Eq, PartialEq, Clone)] struct TmInfo { + pub requestor: MessageMetadata, pub common: CommonTmInfo, - pub req_id: RequestId, pub additional_data: Option>, } @@ -1465,8 +1615,8 @@ pub mod tests { pub service_queue: RefCell>, } - impl EcssChannel for TestSender { - fn channel_id(&self) -> ChannelId { + impl ChannelWithId for TestSender { + fn id(&self) -> ComponentId { 0 } fn name(&self) -> &'static str { @@ -1475,12 +1625,12 @@ pub mod tests { } impl EcssTmSenderCore for TestSender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(_) => { + PusTmVariant::InStore(_) => { panic!("TestSender: Can not deal with addresses"); } - PusTmWrapper::Direct(tm) => { + PusTmVariant::Direct(tm) => { assert_eq!(PusPacket::service(&tm), 1); assert!(!tm.source_data().is_empty()); let mut time_stamp = [0; 7]; @@ -1496,8 +1646,8 @@ pub mod tests { vec = Some(new_vec); } self.service_queue.borrow_mut().push_back(TmInfo { + requestor: MessageMetadata::new(req_id.into(), sender_id), common: CommonTmInfo::new_from_tm(&tm), - req_id, additional_data: vec, }); Ok(()) @@ -1506,27 +1656,12 @@ pub mod tests { } } - struct TestBase<'a> { - vr: VerificationReporter, - #[allow(dead_code)] - tc: PusTcCreator<'a>, - } - - impl<'a> TestBase<'a> { - fn rep(&mut self) -> &mut VerificationReporter { - &mut self.vr - } - } - struct TestBaseWithHelper<'a, Sender: EcssTmSenderCore + Clone + 'static> { - helper: VerificationReporterWithSender, - #[allow(dead_code)] - tc: PusTcCreator<'a>, - } - - impl<'a, Sender: EcssTmSenderCore + Clone + 'static> TestBaseWithHelper<'a, Sender> { - fn rep(&mut self) -> &mut VerificationReporter { - &mut self.helper.reporter - } + struct VerificationReporterTestbench { + pub id: ComponentId, + sender: TestSender, + reporter: VerificationReporter, + pub request_id: RequestId, + tc: Vec, } fn base_reporter() -> VerificationReporter { @@ -1534,180 +1669,487 @@ pub mod tests { VerificationReporter::new(&cfg) } - fn base_tc_init(app_data: Option<&[u8]>) -> (PusTcCreator, RequestId) { + impl VerificationReporterTestbench { + fn new(id: ComponentId, tc: PusTcCreator) -> Self { + let reporter = base_reporter(); + Self { + id, + sender: TestSender::default(), + reporter, + request_id: RequestId::new(&tc), + tc: tc.to_vec().unwrap(), + } + } + + #[allow(dead_code)] + fn set_dest_id(&mut self, dest_id: u16) { + self.reporter.set_dest_id(dest_id); + } + + fn init(&mut self) -> VerificationToken { + self.reporter.add_tc(&PusTcReader::new(&self.tc).unwrap().0) + } + + fn acceptance_success( + &self, + token: VerificationToken, + time_stamp: &[u8], + ) -> Result, EcssTmtcError> { + self.reporter + .acceptance_success(self.id, &self.sender, token, time_stamp) + } + + fn acceptance_failure( + &self, + token: VerificationToken, + params: FailParams, + ) -> Result<(), EcssTmtcError> { + self.reporter + .acceptance_failure(self.id, &self.sender, token, params) + } + + fn start_success( + &self, + token: VerificationToken, + time_stamp: &[u8], + ) -> Result, EcssTmtcError> { + self.reporter + .start_success(self.id, &self.sender, token, time_stamp) + } + + fn start_failure( + &self, + token: VerificationToken, + params: FailParams, + ) -> Result<(), EcssTmtcError> { + self.reporter + .start_failure(self.id, &self.sender, token, params) + } + + fn step_success( + &self, + token: &VerificationToken, + time_stamp: &[u8], + step: impl EcssEnumeration, + ) -> Result<(), EcssTmtcError> { + self.reporter + .step_success(self.id, &self.sender, token, time_stamp, step) + } + + fn step_failure( + &self, + token: VerificationToken, + params: FailParamsWithStep, + ) -> Result<(), EcssTmtcError> { + self.reporter + .step_failure(self.id, &self.sender, token, params) + } + + fn completion_success( + &self, + token: VerificationToken, + time_stamp: &[u8], + ) -> Result<(), EcssTmtcError> { + self.reporter + .completion_success(self.id, &self.sender, token, time_stamp) + } + + fn completion_failure( + &self, + token: VerificationToken, + params: FailParams, + ) -> Result<(), EcssTmtcError> { + self.reporter + .completion_failure(self.id, &self.sender, token, params) + } + + fn acceptance_check(&self, time_stamp: &[u8; 7]) { + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 1, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: *time_stamp, + }, + additional_data: None, + }; + let mut service_queue = self.sender.service_queue.borrow_mut(); + assert_eq!(service_queue.len(), 1); + let info = service_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn acceptance_fail_check(&mut self, stamp_buf: [u8; 7]) { + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 2, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: stamp_buf, + }, + additional_data: Some([0, 2].to_vec()), + }; + let service_queue = self.sender.service_queue.get_mut(); + assert_eq!(service_queue.len(), 1); + let info = service_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn start_fail_check(&mut self, fail_data_raw: [u8; 4]) { + let mut srv_queue = self.sender.service_queue.borrow_mut(); + assert_eq!(srv_queue.len(), 2); + let mut cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 1, + apid: TEST_APID, + msg_counter: 0, + dest_id: 0, + time_stamp: EMPTY_STAMP, + }, + additional_data: None, + }; + let mut info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 4, + apid: TEST_APID, + msg_counter: 0, + dest_id: 0, + time_stamp: EMPTY_STAMP, + }, + additional_data: Some([&[22], fail_data_raw.as_slice()].concat().to_vec()), + }; + info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn step_success_check(&mut self, time_stamp: &[u8; 7]) { + let mut cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 1, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: *time_stamp, + }, + additional_data: None, + }; + let mut srv_queue = self.sender.service_queue.borrow_mut(); + let mut info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 3, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: *time_stamp, + }, + additional_data: None, + }; + info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 5, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: *time_stamp, + }, + additional_data: Some([0].to_vec()), + }; + info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 5, + apid: TEST_APID, + msg_counter: 0, + dest_id: 0, + time_stamp: *time_stamp, + }, + additional_data: Some([1].to_vec()), + }; + info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn check_step_failure(&mut self, fail_data_raw: [u8; 4]) { + assert_eq!(self.sender.service_queue.borrow().len(), 4); + let mut cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 1, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: EMPTY_STAMP, + }, + additional_data: None, + }; + let mut info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 3, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: [0, 1, 0, 1, 0, 1, 0], + }, + additional_data: None, + }; + info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 5, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: EMPTY_STAMP, + }, + additional_data: Some([0].to_vec()), + }; + info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 6, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: EMPTY_STAMP, + }, + additional_data: Some( + [ + [1].as_slice(), + &[0, 0, 0x10, 0x20], + fail_data_raw.as_slice(), + ] + .concat() + .to_vec(), + ), + }; + info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn completion_fail_check(&mut self) { + assert_eq!(self.sender.service_queue.borrow().len(), 3); + + let mut cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 1, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: EMPTY_STAMP, + }, + additional_data: None, + }; + let mut info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 3, + apid: TEST_APID, + msg_counter: 0, + dest_id: 0, + time_stamp: [0, 1, 0, 1, 0, 1, 0], + }, + additional_data: None, + }; + info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 8, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: EMPTY_STAMP, + }, + additional_data: Some([0, 0, 0x10, 0x20].to_vec()), + }; + info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn completion_success_check(&mut self) { + assert_eq!(self.sender.service_queue.borrow().len(), 3); + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 1, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: EMPTY_STAMP, + }, + additional_data: None, + }; + let mut info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 3, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: [0, 1, 0, 1, 0, 1, 0], + }, + additional_data: None, + }; + info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 7, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: EMPTY_STAMP, + }, + additional_data: None, + }; + info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + } + + fn create_generic_ping() -> PusTcCreator<'static> { let mut sph = SpHeader::tc_unseg(TEST_APID, 0x34, 0).unwrap(); let tc_header = PusTcSecondaryHeader::new_simple(17, 1); - let app_data = app_data.unwrap_or(&[]); - let pus_tc = PusTcCreator::new(&mut sph, tc_header, app_data, true); - let req_id = RequestId::new(&pus_tc); - (pus_tc, req_id) - } - - fn base_init(api_sel: bool) -> (TestBase<'static>, VerificationToken) { - let mut reporter = base_reporter(); - let (tc, req_id) = base_tc_init(None); - let init_tok = if api_sel { - reporter.add_tc_with_req_id(req_id) - } else { - reporter.add_tc(&tc) - }; - (TestBase { vr: reporter, tc }, init_tok) - } - - fn base_with_helper_init() -> ( - TestBaseWithHelper<'static, TestSender>, - VerificationToken, - ) { - let mut reporter = base_reporter(); - let (tc, _) = base_tc_init(None); - let init_tok = reporter.add_tc(&tc); - let sender = TestSender::default(); - let helper = VerificationReporterWithSender::new_from_reporter(reporter, sender); - (TestBaseWithHelper { helper, tc }, init_tok) - } - - fn acceptance_check(sender: &mut TestSender, req_id: &RequestId) { - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id: *req_id, - }; - let mut service_queue = sender.service_queue.borrow_mut(); - assert_eq!(service_queue.len(), 1); - let info = service_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); + PusTcCreator::new(&mut sph, tc_header, &[], true) } #[test] - fn test_mpsc_verif_send_sync() { + fn test_mpsc_verif_send() { let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)], false)); let shared_tm_store = SharedTmPool::new(pool); let (tx, _) = mpsc::channel(); - let mpsc_verif_sender = - TmInSharedPoolSenderWithMpsc::new(0, "verif_sender", shared_tm_store, tx); + let mpsc_verif_sender = MpscTmInSharedPoolSender::new(shared_tm_store, tx); is_send(&mpsc_verif_sender); } #[test] fn test_state() { - let (mut b, _) = base_init(false); - assert_eq!(b.vr.apid(), TEST_APID); - b.vr.set_apid(TEST_APID + 1); - assert_eq!(b.vr.apid(), TEST_APID + 1); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + assert_eq!(testbench.reporter.apid(), TEST_APID); + testbench.reporter.set_apid(TEST_APID + 1); + assert_eq!(testbench.reporter.apid(), TEST_APID + 1); } #[test] fn test_basic_acceptance_success() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - acceptance_check(&mut sender, &tok.req_id); - } - - #[test] - fn test_basic_acceptance_success_with_helper() { - let (mut b, tok) = base_with_helper_init(); - b.helper - .acceptance_success(tok, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - acceptance_check(&mut b.helper.sender, &tok.req_id); - } - - fn acceptance_fail_check(sender: &mut TestSender, req_id: RequestId, stamp_buf: [u8; 7]) { - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 2, - apid: TEST_APID, - msg_counter: 0, - dest_id: 5, - time_stamp: stamp_buf, - }, - additional_data: Some([0, 2].to_vec()), - req_id, - }; - let mut service_queue = sender.service_queue.borrow_mut(); - assert_eq!(service_queue.len(), 1); - let info = service_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); + testbench + .acceptance_success(token, &EMPTY_STAMP) + .expect("sending acceptance success failed"); + testbench.acceptance_check(&EMPTY_STAMP); } #[test] fn test_basic_acceptance_failure() { - let (mut b, tok) = base_init(true); - b.rep().reporter.dest_id = 5; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let init_token = testbench.init(); let stamp_buf = [1, 2, 3, 4, 5, 6, 7]; - let mut sender = TestSender::default(); let fail_code = EcssEnumU16::new(2); let fail_params = FailParams::new_no_fail_data(stamp_buf.as_slice(), &fail_code); - b.vr.acceptance_failure(tok, &sender, fail_params) - .expect("Sending acceptance success failed"); - acceptance_fail_check(&mut sender, tok.req_id, stamp_buf); + testbench + .acceptance_failure(init_token, fail_params) + .expect("sending acceptance failure failed"); + testbench.acceptance_fail_check(stamp_buf); } #[test] fn test_basic_acceptance_failure_with_helper() { - let (mut b, tok) = base_with_helper_init(); - b.rep().reporter.dest_id = 5; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let init_token = testbench.init(); let stamp_buf = [1, 2, 3, 4, 5, 6, 7]; let fail_code = EcssEnumU16::new(2); let fail_params = FailParams::new_no_fail_data(stamp_buf.as_slice(), &fail_code); - b.helper - .acceptance_failure(tok, fail_params) - .expect("Sending acceptance success failed"); - acceptance_fail_check(&mut b.helper.sender, tok.req_id, stamp_buf); + testbench + .acceptance_failure(init_token, fail_params) + .expect("sending acceptance failure failed"); + testbench.acceptance_fail_check(stamp_buf); } #[test] fn test_acceptance_fail_data_too_large() { - let (mut b, tok) = base_with_helper_init(); - b.rep().reporter.dest_id = 5; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let init_token = testbench.init(); let stamp_buf = [1, 2, 3, 4, 5, 6, 7]; let fail_code = EcssEnumU16::new(2); let fail_data: [u8; 16] = [0; 16]; // 4 req ID + 1 byte step + 2 byte error code + 8 byte fail data - assert_eq!(b.rep().allowed_source_data_len(), 15); + assert_eq!(testbench.reporter.allowed_source_data_len(), 15); let fail_params = FailParams::new(stamp_buf.as_slice(), &fail_code, fail_data.as_slice()); - let res = b.helper.acceptance_failure(tok, fail_params); - assert!(res.is_err()); - let err_with_token = res.unwrap_err(); - match err_with_token { + let result = testbench.acceptance_failure(init_token, fail_params); + assert!(result.is_err()); + let error = result.unwrap_err(); + match error { EcssTmtcError::Pus(PusError::ByteConversion(e)) => match e { ByteConversionError::ToSliceTooSmall { found, expected } => { assert_eq!( expected, fail_data.len() + RequestId::SIZE_AS_BYTES + fail_code.size() ); - assert_eq!(found, b.rep().allowed_source_data_len()); + assert_eq!(found, testbench.reporter.allowed_source_data_len()); } _ => { panic!("{}", format!("Unexpected error {:?}", e)) } }, _ => { - panic!("{}", format!("Unexpected error {:?}", err_with_token)) + panic!("{}", format!("Unexpected error {:?}", error)) } } } #[test] fn test_basic_acceptance_failure_with_fail_data() { - let (b, tok) = base_init(false); - let sender = TestSender::default(); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); let fail_code = EcssEnumU8::new(10); let fail_data = EcssEnumU32::new(12); let mut fail_data_raw = [0; 4]; fail_data.write_to_be_bytes(&mut fail_data_raw).unwrap(); let fail_params = FailParams::new(&EMPTY_STAMP, &fail_code, fail_data_raw.as_slice()); - b.vr.acceptance_failure(tok, &sender, fail_params) - .expect("Sending acceptance success failed"); + let init_token = testbench.init(); + testbench + .acceptance_failure(init_token, fail_params) + .expect("sending acceptance failure failed"); let cmp_info = TmInfo { + requestor: MessageMetadata::new(testbench.request_id.into(), testbench.id), common: CommonTmInfo { subservice: 2, apid: TEST_APID, @@ -1716,253 +2158,75 @@ pub mod tests { time_stamp: EMPTY_STAMP, }, additional_data: Some([10, 0, 0, 0, 12].to_vec()), - req_id: tok.req_id, }; - let mut service_queue = sender.service_queue.borrow_mut(); + let mut service_queue = testbench.sender.service_queue.borrow_mut(); assert_eq!(service_queue.len(), 1); let info = service_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); } - fn start_fail_check(sender: &mut TestSender, req_id: RequestId, fail_data_raw: [u8; 4]) { - let mut srv_queue = sender.service_queue.borrow_mut(); - assert_eq!(srv_queue.len(), 2); - let mut cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 4, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([&[22], fail_data_raw.as_slice()].concat().to_vec()), - req_id, - }; - info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - } - #[test] fn test_start_failure() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let init_token = testbench.init(); let fail_code = EcssEnumU8::new(22); let fail_data: i32 = -12; let mut fail_data_raw = [0; 4]; fail_data_raw.copy_from_slice(fail_data.to_be_bytes().as_slice()); let fail_params = FailParams::new(&EMPTY_STAMP, &fail_code, fail_data_raw.as_slice()); - let accepted_token = - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - b.vr.start_failure(accepted_token, &sender, fail_params) + let accepted_token = testbench + .acceptance_success(init_token, &EMPTY_STAMP) + .expect("Sending acceptance success failed"); + testbench + .start_failure(accepted_token, fail_params) .expect("Start failure failure"); - start_fail_check(&mut sender, tok.req_id, fail_data_raw); + testbench.start_fail_check(fail_data_raw); } #[test] fn test_start_failure_with_helper() { - let (mut b, tok) = base_with_helper_init(); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); let fail_code = EcssEnumU8::new(22); let fail_data: i32 = -12; let mut fail_data_raw = [0; 4]; fail_data_raw.copy_from_slice(fail_data.to_be_bytes().as_slice()); let fail_params = FailParams::new(&EMPTY_STAMP, &fail_code, fail_data_raw.as_slice()); - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - b.helper + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) + .expect("acceptance failed"); + testbench .start_failure(accepted_token, fail_params) - .expect("Start failure failure"); - start_fail_check(&mut b.helper.sender, tok.req_id, fail_data_raw); - } - - fn step_success_check(sender: &mut TestSender, req_id: RequestId) { - let mut cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut srv_queue = sender.service_queue.borrow_mut(); - let mut info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 3, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: [0, 1, 0, 1, 0, 1, 0], - }, - additional_data: None, - req_id, - }; - info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 5, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([0].to_vec()), - req_id, - }; - info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 5, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([1].to_vec()), - req_id, - }; - info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); + .expect("start failure failed"); + testbench.start_fail_check(fail_data_raw); } #[test] fn test_steps_success() { - let (mut b, tok) = base_init(false); - let mut sender = TestSender::default(); - let accepted_token = b - .rep() - .acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = b - .rep() - .start_success(accepted_token, &sender, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.rep() - .step_success(&started_token, &sender, &EMPTY_STAMP, EcssEnumU8::new(0)) - .expect("Sending step 0 success failed"); - b.vr.step_success(&started_token, &sender, &EMPTY_STAMP, EcssEnumU8::new(1)) - .expect("Sending step 1 success failed"); - assert_eq!(sender.service_queue.borrow().len(), 4); - step_success_check(&mut sender, tok.req_id); - } - - #[test] - fn test_steps_success_with_helper() { - let (mut b, tok) = base_with_helper_init(); - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = b - .helper - .start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.helper + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) + .expect("acceptance failed"); + let started_token = testbench + .start_success(accepted_token, &EMPTY_STAMP) + .expect("acceptance failed"); + testbench .step_success(&started_token, &EMPTY_STAMP, EcssEnumU8::new(0)) - .expect("Sending step 0 success failed"); - b.helper + .expect("step 0 failed"); + testbench .step_success(&started_token, &EMPTY_STAMP, EcssEnumU8::new(1)) - .expect("Sending step 1 success failed"); - assert_eq!(b.helper.sender.service_queue.borrow().len(), 4); - step_success_check(&mut b.helper.sender, tok.req_id); - } - - fn check_step_failure(sender: &mut TestSender, req_id: RequestId, fail_data_raw: [u8; 4]) { - assert_eq!(sender.service_queue.borrow().len(), 4); - let mut cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 3, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: [0, 1, 0, 1, 0, 1, 0], - }, - additional_data: None, - req_id, - }; - info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 5, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([0].to_vec()), - req_id, - }; - info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 6, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some( - [ - [1].as_slice(), - &[0, 0, 0x10, 0x20], - fail_data_raw.as_slice(), - ] - .concat() - .to_vec(), - ), - req_id, - }; - info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); + .expect("step 1 failed"); + assert_eq!(testbench.sender.service_queue.borrow().len(), 4); + testbench.step_success_check(&EMPTY_STAMP); } #[test] fn test_step_failure() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); - let req_id = tok.req_id; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); let fail_code = EcssEnumU32::new(0x1020); let fail_data: f32 = -22.3232; let mut fail_data_raw = [0; 4]; @@ -1975,269 +2239,54 @@ pub mod tests { fail_data_raw.as_slice(), ); - let accepted_token = - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = - b.vr.start_success(accepted_token, &sender, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.vr.step_success(&started_token, &sender, &EMPTY_STAMP, EcssEnumU8::new(0)) - .expect("Sending completion success failed"); - b.vr.step_failure(started_token, &sender, fail_params) - .expect("Step failure failed"); - check_step_failure(&mut sender, req_id, fail_data_raw); - } - - #[test] - fn test_steps_failure_with_helper() { - let (mut b, tok) = base_with_helper_init(); - let req_id = tok.req_id; - let fail_code = EcssEnumU32::new(0x1020); - let fail_data: f32 = -22.3232; - let mut fail_data_raw = [0; 4]; - fail_data_raw.copy_from_slice(fail_data.to_be_bytes().as_slice()); - let fail_step = EcssEnumU8::new(1); - let fail_params = FailParamsWithStep::new( - &EMPTY_STAMP, - &fail_step, - &fail_code, - fail_data_raw.as_slice(), - ); - - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) .expect("Sending acceptance success failed"); - let started_token = b - .helper + let started_token = testbench .start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0]) .expect("Sending start success failed"); - b.helper + testbench .step_success(&started_token, &EMPTY_STAMP, EcssEnumU8::new(0)) .expect("Sending completion success failed"); - b.helper + testbench .step_failure(started_token, fail_params) .expect("Step failure failed"); - check_step_failure(&mut b.helper.sender, req_id, fail_data_raw); - } - - fn completion_fail_check(sender: &mut TestSender, req_id: RequestId) { - assert_eq!(sender.service_queue.borrow().len(), 3); - - let mut cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 3, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: [0, 1, 0, 1, 0, 1, 0], - }, - additional_data: None, - req_id, - }; - info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 8, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([0, 0, 0x10, 0x20].to_vec()), - req_id, - }; - info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); + testbench.check_step_failure(fail_data_raw); } #[test] fn test_completion_failure() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); - let req_id = tok.req_id; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); let fail_code = EcssEnumU32::new(0x1020); let fail_params = FailParams::new_no_fail_data(&EMPTY_STAMP, &fail_code); - let accepted_token = - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = - b.vr.start_success(accepted_token, &sender, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.vr.completion_failure(started_token, &sender, fail_params) - .expect("Completion failure"); - completion_fail_check(&mut sender, req_id); - } - - #[test] - fn test_completion_failure_with_helper() { - let (mut b, tok) = base_with_helper_init(); - let req_id = tok.req_id; - let fail_code = EcssEnumU32::new(0x1020); - let fail_params = FailParams::new_no_fail_data(&EMPTY_STAMP, &fail_code); - - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) .expect("Sending acceptance success failed"); - let started_token = b - .helper + let started_token = testbench .start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0]) .expect("Sending start success failed"); - b.helper + testbench .completion_failure(started_token, fail_params) .expect("Completion failure"); - completion_fail_check(&mut b.helper.sender, req_id); - } - - fn completion_success_check(sender: &mut TestSender, req_id: RequestId) { - assert_eq!(sender.service_queue.borrow().len(), 3); - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 3, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: [0, 1, 0, 1, 0, 1, 0], - }, - additional_data: None, - req_id, - }; - info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 7, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); + testbench.completion_fail_check(); } #[test] fn test_complete_success_sequence() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); - let accepted_token = - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = - b.vr.start_success(accepted_token, &sender, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.vr.completion_success(started_token, &sender, &EMPTY_STAMP) - .expect("Sending completion success failed"); - completion_success_check(&mut sender, tok.req_id); - } - - #[test] - fn test_complete_success_sequence_with_helper() { - let (mut b, tok) = base_with_helper_init(); - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) + let mut testbench = + VerificationReporterTestbench::new(TEST_COMPONENT_ID.id(), create_generic_ping()); + let token = testbench.init(); + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) .expect("Sending acceptance success failed"); - let started_token = b - .helper + let started_token = testbench .start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0]) .expect("Sending start success failed"); - b.helper + testbench .completion_success(started_token, &EMPTY_STAMP) .expect("Sending completion success failed"); - completion_success_check(&mut b.helper.sender, tok.req_id); - } - - #[test] - fn test_seq_count_increment() { - let pool_cfg = - StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false); - let tm_pool = StaticMemoryPool::new(pool_cfg.clone()); - let shared_tm_store = SharedTmPool::new(tm_pool); - let shared_tm_pool = shared_tm_store.clone_backing_pool(); - let (verif_tx, verif_rx) = mpsc::channel(); - let sender = - TmInSharedPoolSenderWithId::new(0, "Verification Sender", shared_tm_store, verif_tx); - let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); - let mut reporter = VerificationReporterWithSender::new(&cfg, sender); - - let mut sph = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); - let tc_header = PusTcSecondaryHeader::new_simple(17, 1); - let pus_tc_0 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true); - let init_token = reporter.add_tc(&pus_tc_0); - - // Complete success sequence for a telecommand - let accepted_token = reporter - .acceptance_success(init_token, &EMPTY_STAMP) - .unwrap(); - let started_token = reporter - .start_success(accepted_token, &EMPTY_STAMP) - .unwrap(); - reporter - .completion_success(started_token, &EMPTY_STAMP) - .unwrap(); - - // Verify it arrives correctly on receiver end - let mut tm_buf: [u8; 1024] = [0; 1024]; - let mut packet_idx = 0; - while packet_idx < 3 { - let addr = verif_rx.recv_timeout(Duration::from_millis(10)).unwrap(); - let tm_len; - { - let mut rg = shared_tm_pool.write().expect("Error locking shared pool"); - let store_guard = rg.read_with_guard(addr); - tm_len = store_guard - .read(&mut tm_buf) - .expect("Error reading TM slice"); - } - let (pus_tm, _) = - PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM"); - if packet_idx == 0 { - assert_eq!(pus_tm.subservice(), 1); - assert_eq!(pus_tm.sp_header.seq_count(), 0); - } else if packet_idx == 1 { - assert_eq!(pus_tm.subservice(), 3); - assert_eq!(pus_tm.sp_header.seq_count(), 0); - } else if packet_idx == 2 { - assert_eq!(pus_tm.subservice(), 7); - assert_eq!(pus_tm.sp_header.seq_count(), 0); - } - packet_idx += 1; - } + testbench.completion_success_check(); } } diff --git a/satrs/src/queue.rs b/satrs/src/queue.rs index 5ba4bdc..93c8ec8 100644 --- a/satrs/src/queue.rs +++ b/satrs/src/queue.rs @@ -4,11 +4,17 @@ use std::error::Error; #[cfg(feature = "std")] use std::sync::mpsc; +use crate::ComponentId; + +/// Generic channel ID type. +pub type ChannelId = u32; + /// Generic error type for sending something via a message queue. -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum GenericSendError { RxDisconnected, QueueFull(Option), + TargetDoesNotExist(ComponentId), } impl Display for GenericSendError { @@ -20,6 +26,9 @@ impl Display for GenericSendError { GenericSendError::QueueFull(max_cap) => { write!(f, "queue with max capacity of {max_cap:?} is full") } + GenericSendError::TargetDoesNotExist(target) => { + write!(f, "target queue with ID {target} does not exist") + } } } } @@ -28,17 +37,17 @@ impl Display for GenericSendError { impl Error for GenericSendError {} /// Generic error type for sending something via a message queue. -#[derive(Debug, Copy, Clone)] -pub enum GenericRecvError { +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum GenericReceiveError { Empty, - TxDisconnected, + TxDisconnected(Option), } -impl Display for GenericRecvError { +impl Display for GenericReceiveError { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { match self { - Self::TxDisconnected => { - write!(f, "tx side has disconnected") + Self::TxDisconnected(channel_id) => { + write!(f, "tx side with id {channel_id:?} has disconnected") } Self::Empty => { write!(f, "nothing to receive") @@ -48,7 +57,43 @@ impl Display for GenericRecvError { } #[cfg(feature = "std")] -impl Error for GenericRecvError {} +impl Error for GenericReceiveError {} + +#[derive(Debug, Clone)] +pub enum GenericTargetedMessagingError { + Send(GenericSendError), + Receive(GenericReceiveError), +} +impl From for GenericTargetedMessagingError { + fn from(value: GenericSendError) -> Self { + Self::Send(value) + } +} + +impl From for GenericTargetedMessagingError { + fn from(value: GenericReceiveError) -> Self { + Self::Receive(value) + } +} + +impl Display for GenericTargetedMessagingError { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + Self::Send(err) => write!(f, "generic targeted messaging error: {}", err), + Self::Receive(err) => write!(f, "generic targeted messaging error: {}", err), + } + } +} + +#[cfg(feature = "std")] +impl Error for GenericTargetedMessagingError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + GenericTargetedMessagingError::Send(send) => Some(send), + GenericTargetedMessagingError::Receive(receive) => Some(receive), + } + } +} #[cfg(feature = "std")] impl From> for GenericSendError { diff --git a/satrs/src/request.rs b/satrs/src/request.rs index 24ca497..66ce5d3 100644 --- a/satrs/src/request.rs +++ b/satrs/src/request.rs @@ -1,110 +1,587 @@ -use core::fmt; +use core::{fmt, marker::PhantomData}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub use alloc_mod::*; + #[cfg(feature = "std")] -use std::error::Error; +#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] +pub use std_mod::*; use spacepackets::{ ecss::{tc::IsPusTelecommand, PusPacket}, ByteConversionError, CcsdsPacket, }; -use crate::TargetId; +use crate::{queue::GenericTargetedMessagingError, ComponentId}; +/// Generic request ID type. Requests can be associated with an ID to have a unique identifier +/// for them. This can be useful for tasks like tracking their progress. +pub type RequestId = u32; + +/// CCSDS APID type definition. Please note that the APID is a 14 bit value. pub type Apid = u16; -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum TargetIdCreationError { - ByteConversion(ByteConversionError), - NotEnoughAppData(usize), +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct UniqueApidTargetId { + pub apid: Apid, + pub unique_id: u32, } -impl From for TargetIdCreationError { - fn from(e: ByteConversionError) -> Self { - Self::ByteConversion(e) +impl UniqueApidTargetId { + pub const fn new(apid: Apid, target: u32) -> Self { + Self { + apid, + unique_id: target, + } + } + + pub fn raw(&self) -> ComponentId { + ((self.apid as u64) << 32) | (self.unique_id as u64) + } + + pub fn id(&self) -> ComponentId { + self.raw() + } + + /// This function attempts to build the ID from a PUS telecommand by extracting the APID + /// and the first four bytes of the application data field as the target field. + pub fn from_pus_tc( + tc: &(impl CcsdsPacket + PusPacket + IsPusTelecommand), + ) -> Result { + if tc.user_data().len() < 4 { + return Err(ByteConversionError::FromSliceTooSmall { + found: tc.user_data().len(), + expected: 4, + }); + } + Ok(Self::new( + tc.apid(), + u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()), + )) } } -impl fmt::Display for TargetIdCreationError { +impl From for UniqueApidTargetId { + fn from(raw: u64) -> Self { + Self { + apid: (raw >> 32) as u16, + unique_id: raw as u32, + } + } +} + +impl From for u64 { + fn from(target_and_apid_id: UniqueApidTargetId) -> Self { + target_and_apid_id.raw() + } +} + +impl fmt::Display for UniqueApidTargetId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::ByteConversion(e) => write!(f, "target ID creation: {}", e), - Self::NotEnoughAppData(len) => { - write!(f, "not enough app data to generate target ID: {}", len) + write!( + f, + "Target and APID ID with APID {:#03x} and target {}", + self.apid, self.unique_id + ) + } +} + +#[derive(Debug, Copy, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct MessageMetadata { + request_id: RequestId, + sender_id: ComponentId, +} + +impl MessageMetadata { + pub const fn new(request_id: RequestId, sender_id: ComponentId) -> Self { + Self { + request_id, + sender_id, + } + } + + pub fn request_id(&self) -> RequestId { + self.request_id + } + + pub fn sender_id(&self) -> ComponentId { + self.sender_id + } +} + +/// Generic message type which is associated with a sender using a [ChannelId] and associated +/// with a request using a [RequestId]. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct GenericMessage { + pub requestor_info: MessageMetadata, + pub message: MSG, +} + +impl GenericMessage { + pub fn new(requestor_info: MessageMetadata, message: MSG) -> Self { + Self { + requestor_info, + message, + } + } + + delegate::delegate! { + to self.requestor_info { + pub fn request_id(&self) -> RequestId; + pub fn sender_id(&self) -> ComponentId; + } + } +} + +/// Generic trait for objects which can send targeted messages. +pub trait MessageSender: Send { + fn send(&self, message: GenericMessage) -> Result<(), GenericTargetedMessagingError>; +} + +// Generic trait for objects which can receive targeted messages. +pub trait MessageReceiver { + fn try_recv(&self) -> Result>, GenericTargetedMessagingError>; +} + +pub struct MessageWithSenderIdReceiver>(pub R, PhantomData); + +impl> From for MessageWithSenderIdReceiver { + fn from(receiver: R) -> Self { + MessageWithSenderIdReceiver(receiver, PhantomData) + } +} + +impl> MessageWithSenderIdReceiver { + pub fn try_recv_message( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.0.try_recv() + } +} + +pub struct MessageReceiverWithId> { + local_channel_id: ComponentId, + reply_receiver: MessageWithSenderIdReceiver, +} + +impl> MessageReceiverWithId { + pub fn new(local_channel_id: ComponentId, reply_receiver: R) -> Self { + Self { + local_channel_id, + reply_receiver: MessageWithSenderIdReceiver::from(reply_receiver), + } + } + + pub fn local_channel_id(&self) -> ComponentId { + self.local_channel_id + } +} + +impl> MessageReceiverWithId { + pub fn try_recv_message( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.reply_receiver.0.try_recv() + } +} + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub mod alloc_mod { + use core::marker::PhantomData; + + use crate::queue::GenericSendError; + + use super::*; + use hashbrown::HashMap; + + pub struct MessageSenderMap>( + pub HashMap, + pub(crate) PhantomData, + ); + + impl> Default for MessageSenderMap { + fn default() -> Self { + Self(Default::default(), PhantomData) + } + } + + impl> MessageSenderMap { + pub fn add_message_target(&mut self, target_id: ComponentId, message_sender: S) { + self.0.insert(target_id, message_sender); + } + + pub fn send_message( + &self, + requestor_info: MessageMetadata, + target_channel_id: ComponentId, + message: MSG, + ) -> Result<(), GenericTargetedMessagingError> { + if self.0.contains_key(&target_channel_id) { + return self + .0 + .get(&target_channel_id) + .unwrap() + .send(GenericMessage::new(requestor_info, message)); } + Err(GenericSendError::TargetDoesNotExist(target_channel_id).into()) + } + } + + pub struct MessageSenderAndReceiver, R: MessageReceiver> { + pub local_channel_id: ComponentId, + pub message_sender_map: MessageSenderMap, + pub message_receiver: MessageWithSenderIdReceiver, + } + + impl, R: MessageReceiver> + MessageSenderAndReceiver + { + pub fn new(local_channel_id: ComponentId, message_receiver: R) -> Self { + Self { + local_channel_id, + message_sender_map: Default::default(), + message_receiver: MessageWithSenderIdReceiver::from(message_receiver), + } + } + + pub fn add_message_target(&mut self, target_id: ComponentId, message_sender: S) { + self.message_sender_map + .add_message_target(target_id, message_sender) + } + + pub fn local_channel_id_generic(&self) -> ComponentId { + self.local_channel_id + } + + /// Try to send a message, which can be a reply or a request, depending on the generics. + pub fn send_message( + &self, + request_id: RequestId, + target_id: ComponentId, + message: TO, + ) -> Result<(), GenericTargetedMessagingError> { + self.message_sender_map.send_message( + MessageMetadata::new(request_id, self.local_channel_id_generic()), + target_id, + message, + ) + } + + /// Try to receive a message, which can be a reply or a request, depending on the generics. + pub fn try_recv_message( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.message_receiver.try_recv_message() + } + } + + pub struct RequestAndReplySenderAndReceiver< + REQUEST, + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > { + pub local_channel_id: ComponentId, + // These 2 are a functional group. + pub request_sender_map: MessageSenderMap, + pub reply_receiver: MessageWithSenderIdReceiver, + // These 2 are a functional group. + pub request_receiver: MessageWithSenderIdReceiver, + pub reply_sender_map: MessageSenderMap, + } + + impl< + REQUEST, + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > RequestAndReplySenderAndReceiver + { + pub fn new( + local_channel_id: ComponentId, + request_receiver: R1, + reply_receiver: R0, + ) -> Self { + Self { + local_channel_id, + request_receiver: request_receiver.into(), + reply_receiver: reply_receiver.into(), + request_sender_map: Default::default(), + reply_sender_map: Default::default(), + } + } + + pub fn local_channel_id_generic(&self) -> ComponentId { + self.local_channel_id } } } #[cfg(feature = "std")] -impl Error for TargetIdCreationError { - fn source(&self) -> Option<&(dyn Error + 'static)> { - if let Self::ByteConversion(e) = self { - return Some(e); +#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] +pub mod std_mod { + + use super::*; + use std::sync::mpsc; + + use crate::queue::{GenericReceiveError, GenericSendError, GenericTargetedMessagingError}; + + impl MessageSender for mpsc::Sender> { + fn send(&self, message: GenericMessage) -> Result<(), GenericTargetedMessagingError> { + self.send(message) + .map_err(|_| GenericSendError::RxDisconnected)?; + Ok(()) } - None } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct TargetAndApidId { - pub apid: Apid, - pub target: u32, -} - -impl TargetAndApidId { - pub fn new(apid: Apid, target: u32) -> Self { - Self { apid, target } - } - - pub fn apid(&self) -> Apid { - self.apid - } - - pub fn target(&self) -> u32 { - self.target - } - - pub fn raw(&self) -> TargetId { - ((self.apid as u64) << 32) | (self.target as u64) - } - - pub fn target_id(&self) -> TargetId { - self.raw() - } - - pub fn from_pus_tc( - tc: &(impl CcsdsPacket + PusPacket + IsPusTelecommand), - ) -> Result { - if tc.user_data().len() < 4 { - return Err(ByteConversionError::FromSliceTooSmall { - found: tc.user_data().len(), - expected: 8, + impl MessageSender for mpsc::SyncSender> { + fn send(&self, message: GenericMessage) -> Result<(), GenericTargetedMessagingError> { + if let Err(e) = self.try_send(message) { + return match e { + mpsc::TrySendError::Full(_) => Err(GenericSendError::QueueFull(None).into()), + mpsc::TrySendError::Disconnected(_) => { + Err(GenericSendError::RxDisconnected.into()) + } + }; } - .into()); + Ok(()) } - Ok(Self { - apid: tc.apid(), - target: u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()), - }) } + + pub type MessageSenderMapMpsc = MessageReceiverWithId>; + pub type MessageSenderMapBoundedMpsc = MessageReceiverWithId>; + + impl MessageReceiver for mpsc::Receiver> { + fn try_recv(&self) -> Result>, GenericTargetedMessagingError> { + match self.try_recv() { + Ok(msg) => Ok(Some(msg)), + Err(e) => match e { + mpsc::TryRecvError::Empty => Ok(None), + mpsc::TryRecvError::Disconnected => { + Err(GenericReceiveError::TxDisconnected(None).into()) + } + }, + } + } + } + + pub type MessageReceiverWithIdMpsc = MessageReceiverWithId>; } -impl From for TargetAndApidId { - fn from(raw: u64) -> Self { - Self { - apid: (raw >> 32) as u16, - target: raw as u32, +#[cfg(test)] +mod tests { + use std::sync::mpsc; + + use alloc::string::ToString; + use spacepackets::{ + ecss::tc::{PusTcCreator, PusTcSecondaryHeader}, + ByteConversionError, SpHeader, + }; + + use crate::{ + queue::{GenericReceiveError, GenericSendError, GenericTargetedMessagingError}, + request::{MessageMetadata, MessageSenderMap}, + }; + + use super::{GenericMessage, MessageReceiverWithId, UniqueApidTargetId}; + + const TEST_CHANNEL_ID_0: u64 = 1; + const TEST_CHANNEL_ID_1: u64 = 2; + const TEST_CHANNEL_ID_2: u64 = 3; + + #[test] + fn test_basic_target_id_with_apid() { + let id = UniqueApidTargetId::new(0x111, 0x01); + assert_eq!(id.apid, 0x111); + assert_eq!(id.unique_id, 0x01); + assert_eq!(id.id(), id.raw()); + assert_eq!(u64::from(id), id.raw()); + let id_raw = id.raw(); + let id_from_raw = UniqueApidTargetId::from(id_raw); + assert_eq!(id_from_raw, id); + assert_eq!(id.id(), (0x111 << 32) | 0x01); + let string = id.to_string(); + assert_eq!( + string, + "Target and APID ID with APID 0x111 and target 1".to_string() + ); + } + + #[test] + fn test_basic_target_id_with_apid_from_pus_tc() { + let mut sp_header = SpHeader::tc_unseg(0x111, 5, 0).unwrap(); + let app_data = 1_u32.to_be_bytes(); + let pus_tc = PusTcCreator::new_simple(&mut sp_header, 17, 1, Some(&app_data), true); + let id = UniqueApidTargetId::from_pus_tc(&pus_tc).unwrap(); + assert_eq!(id.apid, 0x111); + assert_eq!(id.unique_id, 1); + } + + #[test] + fn test_basic_target_id_with_apid_from_pus_tc_invalid_app_data() { + let mut sp_header = SpHeader::tc_unseg(0x111, 5, 0).unwrap(); + let sec_header = PusTcSecondaryHeader::new_simple(17, 1); + let pus_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); + let error = UniqueApidTargetId::from_pus_tc(&pus_tc); + assert!(error.is_err()); + let error = error.unwrap_err(); + if let ByteConversionError::FromSliceTooSmall { found, expected } = error { + assert_eq!(found, 0); + assert_eq!(expected, 4); + } else { + panic!("Unexpected error type"); + } + } + + #[test] + fn test_receiver_only() { + let (sender, receiver) = mpsc::channel(); + // Test structure with only a receiver which has a channel ID. + let receiver = MessageReceiverWithId::new(TEST_CHANNEL_ID_0, receiver); + let request_id = 5; + sender + .send(GenericMessage::new( + MessageMetadata::new(request_id, TEST_CHANNEL_ID_1), + 5, + )) + .unwrap(); + let reply = receiver.try_recv_message().unwrap(); + assert!(reply.is_some()); + assert_eq!(receiver.local_channel_id(), TEST_CHANNEL_ID_0); + let reply = reply.unwrap(); + assert_eq!(reply.requestor_info.request_id, request_id); + assert_eq!(reply.requestor_info.sender_id, TEST_CHANNEL_ID_1); + assert_eq!(reply.message, 5); + } + + #[test] + fn test_receiver_empty() { + let (_sender, receiver) = mpsc::sync_channel::>(2); + // Test structure with only a receiver which has a channel ID. + let receiver = MessageReceiverWithId::new(TEST_CHANNEL_ID_0, receiver); + let reply = receiver.try_recv_message().unwrap(); + assert!(reply.is_none()); + } + + #[test] + fn test_all_tx_disconnected() { + let (sender, receiver) = mpsc::sync_channel::>(2); + // Test structure with only a receiver which has a channel ID. + let receiver = MessageReceiverWithId::new(TEST_CHANNEL_ID_0, receiver); + drop(sender); + let reply = receiver.try_recv_message(); + assert!(reply.is_err()); + let error = reply.unwrap_err(); + if let GenericTargetedMessagingError::Receive(GenericReceiveError::TxDisconnected(None)) = + error + { + } else { + panic!("unexpected error type"); + } + } + + #[test] + fn test_sender_map() { + let (sender0, receiver0) = mpsc::channel(); + let (sender1, receiver1) = mpsc::channel(); + let mut sender_map = MessageSenderMap::default(); + sender_map.add_message_target(TEST_CHANNEL_ID_1, sender0); + sender_map.add_message_target(TEST_CHANNEL_ID_2, sender1); + sender_map + .send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_1, + 5, + ) + .expect("sending message failed"); + let mut reply = receiver0.recv().expect("receiving message failed"); + assert_eq!(reply.request_id(), 1); + assert_eq!(reply.sender_id(), TEST_CHANNEL_ID_0); + assert_eq!(reply.message, 5); + sender_map + .send_message( + MessageMetadata::new(2, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_2, + 10, + ) + .expect("sending message failed"); + reply = receiver1.recv().expect("receiving message failed"); + assert_eq!(reply.request_id(), 2); + assert_eq!(reply.sender_id(), TEST_CHANNEL_ID_0); + assert_eq!(reply.message, 10); + } + + #[test] + fn test_sender_map_target_does_not_exist() { + let (sender0, _) = mpsc::channel(); + let mut sender_map_with_id = MessageSenderMap::default(); + sender_map_with_id.add_message_target(TEST_CHANNEL_ID_1, sender0); + let result = sender_map_with_id.send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_2, + 5, + ); + assert!(result.is_err()); + let error = result.unwrap_err(); + if let GenericTargetedMessagingError::Send(GenericSendError::TargetDoesNotExist(target)) = + error + { + assert_eq!(target, TEST_CHANNEL_ID_2); + } else { + panic!("Unexpected error type"); + } + } + #[test] + fn test_sender_map_queue_full() { + let (sender0, _receiver0) = mpsc::sync_channel(1); + let mut sender_map_with_id = MessageSenderMap::default(); + sender_map_with_id.add_message_target(TEST_CHANNEL_ID_1, sender0); + sender_map_with_id + .send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_1, + 5, + ) + .expect("sending message failed"); + let result = sender_map_with_id.send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_1, + 5, + ); + assert!(result.is_err()); + let error = result.unwrap_err(); + if let GenericTargetedMessagingError::Send(GenericSendError::QueueFull(capacity)) = error { + assert!(capacity.is_none()); + } else { + panic!("Unexpected error type {}", error); + } + } + + #[test] + fn test_sender_map_queue_receiver_disconnected() { + let (sender0, receiver0) = mpsc::sync_channel(1); + let mut sender_map_with_id = MessageSenderMap::default(); + sender_map_with_id.add_message_target(TEST_CHANNEL_ID_1, sender0); + drop(receiver0); + let result = sender_map_with_id.send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_1, + 5, + ); + assert!(result.is_err()); + let error = result.unwrap_err(); + if let GenericTargetedMessagingError::Send(GenericSendError::RxDisconnected) = error { + } else { + panic!("Unexpected error type {}", error); } } } - -impl From for u64 { - fn from(target_and_apid_id: TargetAndApidId) -> Self { - target_and_apid_id.raw() - } -} - -impl fmt::Display for TargetAndApidId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}, {}", self.apid, self.target) - } -} diff --git a/satrs/src/time.rs b/satrs/src/time.rs new file mode 100644 index 0000000..abd3fac --- /dev/null +++ b/satrs/src/time.rs @@ -0,0 +1,7 @@ +use core::fmt::Debug; + +/// Generic abstraction for a check/countdown timer. +pub trait CountdownProvider: Debug { + fn has_expired(&self) -> bool; + fn reset(&mut self); +} diff --git a/satrs/src/tmtc/ccsds_distrib.rs b/satrs/src/tmtc/ccsds_distrib.rs index 10ee80e..7b1ac34 100644 --- a/satrs/src/tmtc/ccsds_distrib.rs +++ b/satrs/src/tmtc/ccsds_distrib.rs @@ -96,6 +96,7 @@ use std::error::Error; pub trait CcsdsPacketHandler { type Error; + // TODO: Rework this to return a boolean based on u16 input.. fn valid_apids(&self) -> &'static [u16]; fn handle_known_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error>; diff --git a/satrs/src/tmtc/tm_helper.rs b/satrs/src/tmtc/tm_helper.rs index 005625a..630a338 100644 --- a/satrs/src/tmtc/tm_helper.rs +++ b/satrs/src/tmtc/tm_helper.rs @@ -8,7 +8,9 @@ pub use std_mod::*; #[cfg(feature = "std")] pub mod std_mod { - use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr}; + use crate::pool::{ + PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr, StoreError, + }; use crate::pus::EcssTmtcError; use spacepackets::ecss::tm::PusTmCreator; use spacepackets::ecss::WritablePusPacket; @@ -34,7 +36,7 @@ pub mod std_mod { } pub fn add_pus_tm(&self, pus_tm: &PusTmCreator) -> Result { - let mut pg = self.0.write().map_err(|_| EcssTmtcError::StoreLock)?; + let mut pg = self.0.write().map_err(|_| StoreError::LockError)?; let addr = pg.free_element(pus_tm.len_written(), |buf| { pus_tm .write_to_bytes(buf) diff --git a/satrs/tests/mode_tree.rs b/satrs/tests/mode_tree.rs new file mode 100644 index 0000000..3603e48 --- /dev/null +++ b/satrs/tests/mode_tree.rs @@ -0,0 +1,358 @@ +use core::cell::Cell; +use std::{println, sync::mpsc}; + +use satrs::mode::{ + ModeError, ModeProvider, ModeReplyReceiver, ModeReplySender, ModeRequestHandler, + ModeRequestHandlerMpscBounded, ModeRequestReceiver, ModeRequestorAndHandlerMpscBounded, + ModeRequestorBoundedMpsc, +}; +use satrs::request::MessageMetadata; +use satrs::{ + mode::{ModeAndSubmode, ModeReply, ModeRequest}, + queue::GenericTargetedMessagingError, + request::GenericMessage, + ComponentId, +}; +use std::string::{String, ToString}; + +pub enum TestComponentId { + Device1 = 1, + Device2 = 2, + Assembly = 3, + PusModeService = 4, +} + +struct PusModeService { + pub request_id_counter: Cell, + pub mode_node: ModeRequestorBoundedMpsc, +} + +impl PusModeService { + pub fn send_announce_mode_cmd_to_assy(&self) { + self.mode_node + .send_mode_request( + self.request_id_counter.get(), + TestComponentId::Assembly as ComponentId, + ModeRequest::AnnounceModeRecursive, + ) + .unwrap(); + self.request_id_counter + .replace(self.request_id_counter.get() + 1); + } +} + +struct TestDevice { + pub name: String, + pub mode_node: ModeRequestHandlerMpscBounded, + pub mode_and_submode: ModeAndSubmode, +} + +impl TestDevice { + pub fn run(&mut self) { + self.check_mode_requests().expect("mode messaging error"); + } + + pub fn check_mode_requests(&mut self) -> Result<(), ModeError> { + if let Some(request) = self.mode_node.try_recv_mode_request()? { + self.handle_mode_request(request)? + } + Ok(()) + } +} + +impl ModeProvider for TestDevice { + fn mode_and_submode(&self) -> ModeAndSubmode { + self.mode_and_submode + } +} + +impl ModeRequestHandler for TestDevice { + type Error = ModeError; + + fn start_transition( + &mut self, + requestor: MessageMetadata, + mode_and_submode: ModeAndSubmode, + ) -> Result<(), ModeError> { + self.mode_and_submode = mode_and_submode; + self.handle_mode_reached(Some(requestor))?; + Ok(()) + } + + fn announce_mode(&self, _requestor_info: MessageMetadata, _recursive: bool) { + println!( + "{}: announcing mode: {:?}", + self.name, self.mode_and_submode + ); + } + + fn handle_mode_reached(&mut self, requestor: Option) -> Result<(), ModeError> { + if let Some(requestor) = requestor { + self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode))?; + } + Ok(()) + } + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + reply: ModeReply, + ) -> Result<(), ModeError> { + self.mode_node.send_mode_reply(requestor_info, reply)?; + Ok(()) + } + + fn handle_mode_info( + &mut self, + requestor_info: MessageMetadata, + info: ModeAndSubmode, + ) -> Result<(), ModeError> { + // A device is a leaf in the tree.. so this really should not happen + println!( + "{}: unexpected mode info from {:?} with mode: {:?}", + self.name, + requestor_info.sender_id(), + info + ); + Ok(()) + } +} + +struct TestAssembly { + pub mode_node: ModeRequestorAndHandlerMpscBounded, + pub mode_requestor_info: Option, + pub mode_and_submode: ModeAndSubmode, + pub target_mode_and_submode: Option, +} + +impl ModeProvider for TestAssembly { + fn mode_and_submode(&self) -> ModeAndSubmode { + self.mode_and_submode + } +} + +impl TestAssembly { + pub fn run(&mut self) { + self.check_mode_requests().expect("mode messaging error"); + self.check_mode_replies().expect("mode messaging error"); + } + + pub fn check_mode_requests(&mut self) -> Result<(), GenericTargetedMessagingError> { + if let Some(request) = self.mode_node.try_recv_mode_request()? { + match request.message { + ModeRequest::SetMode(mode_and_submode) => { + self.start_transition(request.requestor_info, mode_and_submode) + .unwrap(); + } + ModeRequest::ReadMode => self + .mode_node + .send_mode_reply( + request.requestor_info, + ModeReply::ModeReply(self.mode_and_submode), + ) + .unwrap(), + ModeRequest::AnnounceMode => self.announce_mode(request.requestor_info, false), + ModeRequest::AnnounceModeRecursive => { + self.announce_mode(request.requestor_info, true) + } + ModeRequest::ModeInfo(_) => todo!(), + } + } + Ok(()) + } + + pub fn check_mode_replies(&mut self) -> Result<(), GenericTargetedMessagingError> { + if let Some(reply_and_id) = self.mode_node.try_recv_mode_reply()? { + match reply_and_id.message { + ModeReply::ModeReply(reply) => { + println!( + "TestAssembly: Received mode reply from {:?}, reached: {:?}", + reply_and_id.sender_id(), + reply + ); + } + ModeReply::CantReachMode(_) => todo!(), + ModeReply::WrongMode { expected, reached } => { + println!( + "TestAssembly: Wrong mode reply from {:?}, reached {:?}, expected {:?}", + reply_and_id.sender_id(), + reached, + expected + ); + } + } + } + Ok(()) + } +} + +impl ModeRequestHandler for TestAssembly { + type Error = ModeError; + fn start_transition( + &mut self, + requestor: MessageMetadata, + mode_and_submode: ModeAndSubmode, + ) -> Result<(), Self::Error> { + self.mode_requestor_info = Some(requestor); + self.target_mode_and_submode = Some(mode_and_submode); + Ok(()) + } + + fn announce_mode(&self, requestor_info: MessageMetadata, recursive: bool) { + println!( + "TestAssembly: Announcing mode (recursively: {}): {:?}", + recursive, self.mode_and_submode + ); + // self.mode_requestor_info = Some((request_id, sender_id)); + let mut mode_request = ModeRequest::AnnounceMode; + if recursive { + mode_request = ModeRequest::AnnounceModeRecursive; + } + self.mode_node + .request_sender_map + .0 + .iter() + .for_each(|(_, sender)| { + sender + .send(GenericMessage::new( + MessageMetadata::new( + requestor_info.request_id(), + self.mode_node.local_channel_id_generic(), + ), + mode_request, + )) + .expect("sending mode request failed"); + }); + } + + fn handle_mode_reached( + &mut self, + mode_requestor: Option, + ) -> Result<(), Self::Error> { + if let Some(requestor) = mode_requestor { + self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode))?; + } + Ok(()) + } + + fn send_mode_reply( + &self, + requestor: MessageMetadata, + reply: ModeReply, + ) -> Result<(), Self::Error> { + self.mode_node.send_mode_reply(requestor, reply)?; + Ok(()) + } + + fn handle_mode_info( + &mut self, + _requestor_info: MessageMetadata, + _info: ModeAndSubmode, + ) -> Result<(), Self::Error> { + // TODO: A proper assembly must reach to mode changes of its children.. + Ok(()) + } +} + +fn main() { + // All request channel handles. + let (request_sender_to_dev1, request_receiver_dev1) = mpsc::sync_channel(10); + let (request_sender_to_dev2, request_receiver_dev2) = mpsc::sync_channel(10); + let (request_sender_to_assy, request_receiver_assy) = mpsc::sync_channel(10); + + // All reply channel handles. + let (reply_sender_to_assy, reply_receiver_assy) = mpsc::sync_channel(10); + let (reply_sender_to_pus, reply_receiver_pus) = mpsc::sync_channel(10); + + // Mode requestors and handlers. + let mut mode_node_assy = ModeRequestorAndHandlerMpscBounded::new( + TestComponentId::Assembly as ComponentId, + request_receiver_assy, + reply_receiver_assy, + ); + // Mode requestors only. + let mut mode_node_pus = ModeRequestorBoundedMpsc::new( + TestComponentId::PusModeService as ComponentId, + reply_receiver_pus, + ); + + // Request handlers only. + let mut mode_node_dev1 = ModeRequestHandlerMpscBounded::new( + TestComponentId::Device1 as ComponentId, + request_receiver_dev1, + ); + let mut mode_node_dev2 = ModeRequestHandlerMpscBounded::new( + TestComponentId::Device2 as ComponentId, + request_receiver_dev2, + ); + + // Set up mode request senders first. + mode_node_pus.add_message_target( + TestComponentId::Assembly as ComponentId, + request_sender_to_assy, + ); + mode_node_pus.add_message_target( + TestComponentId::Device1 as ComponentId, + request_sender_to_dev1.clone(), + ); + mode_node_pus.add_message_target( + TestComponentId::Device2 as ComponentId, + request_sender_to_dev2.clone(), + ); + mode_node_assy.add_request_target( + TestComponentId::Device1 as ComponentId, + request_sender_to_dev1, + ); + mode_node_assy.add_request_target( + TestComponentId::Device2 as ComponentId, + request_sender_to_dev2, + ); + + // Set up mode reply senders. + mode_node_dev1.add_message_target( + TestComponentId::Assembly as ComponentId, + reply_sender_to_assy.clone(), + ); + mode_node_dev1.add_message_target( + TestComponentId::PusModeService as ComponentId, + reply_sender_to_pus.clone(), + ); + mode_node_dev2.add_message_target( + TestComponentId::Assembly as ComponentId, + reply_sender_to_assy, + ); + mode_node_dev2.add_message_target( + TestComponentId::PusModeService as ComponentId, + reply_sender_to_pus.clone(), + ); + mode_node_assy.add_reply_target( + TestComponentId::PusModeService as ComponentId, + reply_sender_to_pus, + ); + + let mut device1 = TestDevice { + name: "Test Device 1".to_string(), + mode_node: mode_node_dev1, + mode_and_submode: ModeAndSubmode::new(0, 0), + }; + let mut device2 = TestDevice { + name: "Test Device 2".to_string(), + mode_node: mode_node_dev2, + mode_and_submode: ModeAndSubmode::new(0, 0), + }; + let mut assy = TestAssembly { + mode_node: mode_node_assy, + mode_requestor_info: None, + mode_and_submode: ModeAndSubmode::new(0, 0), + target_mode_and_submode: None, + }; + let pus_service = PusModeService { + request_id_counter: Cell::new(0), + mode_node: mode_node_pus, + }; + + pus_service.send_announce_mode_cmd_to_assy(); + assy.run(); + device1.run(); + device2.run(); + assy.run(); +} diff --git a/satrs/tests/pus_events.rs b/satrs/tests/pus_events.rs index ca6d71e..d9c87fe 100644 --- a/satrs/tests/pus_events.rs +++ b/satrs/tests/pus_events.rs @@ -5,7 +5,8 @@ use satrs::events::{EventU32, EventU32TypedSev, Severity, SeverityInfo}; use satrs::params::U32Pair; use satrs::params::{Params, ParamsHeapless, WritableToBeBytes}; use satrs::pus::event_man::{DefaultPusEventMgmtBackend, EventReporter, PusEventDispatcher}; -use satrs::pus::TmAsVecSenderWithMpsc; +use satrs::pus::PusTmAsVec; +use satrs::request::UniqueApidTargetId; use spacepackets::ecss::tm::PusTmReader; use spacepackets::ecss::{PusError, PusPacket}; use std::sync::mpsc::{self, SendError, TryRecvError}; @@ -15,6 +16,8 @@ const INFO_EVENT: EventU32TypedSev = EventU32TypedSev::::const_new(1, 0); const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5); const EMPTY_STAMP: [u8; 7] = [0; 7]; +const TEST_APID: u16 = 0x02; +const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05); #[derive(Debug, Clone)] pub enum CustomTmSenderError { @@ -30,15 +33,14 @@ fn test_threaded_usage() { let (pus_event_man_tx, pus_event_man_rx) = mpsc::channel(); let pus_event_man_send_provider = EventU32SenderMpsc::new(1, pus_event_man_tx); - event_man.subscribe_all(pus_event_man_send_provider.channel_id()); + event_man.subscribe_all(pus_event_man_send_provider.target_id()); event_man.add_sender(pus_event_man_send_provider); - let (event_tx, event_rx) = mpsc::channel(); - let reporter = EventReporter::new(0x02, 128).expect("Creating event reporter failed"); - let mut pus_event_man = - PusEventDispatcher::new(reporter, DefaultPusEventMgmtBackend::default()); + let (event_tx, event_rx) = mpsc::channel::(); + let reporter = + EventReporter::new(TEST_ID.raw(), 0x02, 128).expect("Creating event reporter failed"); + let pus_event_man = PusEventDispatcher::new(reporter, DefaultPusEventMgmtBackend::default()); // PUS + Generic event manager thread let jh0 = thread::spawn(move || { - let mut sender = TmAsVecSenderWithMpsc::new(0, "event_sender", event_tx); let mut event_cnt = 0; let mut params_array: [u8; 128] = [0; 128]; loop { @@ -46,9 +48,9 @@ fn test_threaded_usage() { assert!(res.is_ok()); match pus_event_man_rx.try_recv() { Ok((event, aux_data)) => { - let mut gen_event = |aux_data| { + let gen_event = |aux_data| { pus_event_man.generate_pus_event_tm_generic( - &mut sender, + &event_tx, &EMPTY_STAMP, event, aux_data, @@ -60,12 +62,12 @@ fn test_threaded_usage() { ParamsHeapless::Raw(raw) => { raw.write_to_be_bytes(&mut params_array) .expect("Writing raw parameter failed"); - gen_event(Some(¶ms_array[0..raw.raw_len()])) + gen_event(Some(¶ms_array[0..raw.written_len()])) } ParamsHeapless::EcssEnum(e) => { e.write_to_be_bytes(&mut params_array) .expect("Writing ECSS enum failed"); - gen_event(Some(¶ms_array[0..e.raw_len()])) + gen_event(Some(¶ms_array[0..e.written_len()])) } }, Params::Vec(vec) => gen_event(Some(vec.as_slice())), @@ -101,8 +103,8 @@ fn test_threaded_usage() { match event_rx.try_recv() { // Event TM received successfully Ok(event_tm) => { - let tm = - PusTmReader::new(event_tm.as_slice(), 7).expect("Deserializing TM failed"); + let tm = PusTmReader::new(event_tm.packet.as_slice(), 7) + .expect("Deserializing TM failed"); assert_eq!(tm.0.service(), 5); assert_eq!(tm.0.subservice(), 1); let src_data = tm.0.source_data(); @@ -127,8 +129,8 @@ fn test_threaded_usage() { match event_rx.try_recv() { // Event TM received successfully Ok(event_tm) => { - let tm = - PusTmReader::new(event_tm.as_slice(), 7).expect("Deserializing TM failed"); + let tm = PusTmReader::new(event_tm.packet.as_slice(), 7) + .expect("Deserializing TM failed"); assert_eq!(tm.0.service(), 5); assert_eq!(tm.0.subservice(), 2); let src_data = tm.0.source_data(); diff --git a/satrs/tests/pus_verification.rs b/satrs/tests/pus_verification.rs index 386fea6..46dda69 100644 --- a/satrs/tests/pus_verification.rs +++ b/satrs/tests/pus_verification.rs @@ -3,10 +3,11 @@ pub mod crossbeam_test { use hashbrown::HashMap; use satrs::pool::{PoolProvider, PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig}; use satrs::pus::verification::{ - FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender, + FailParams, RequestId, VerificationReporter, VerificationReporterCfg, VerificationReportingProvider, }; use satrs::pus::TmInSharedPoolSenderWithCrossbeam; + use satrs::request::UniqueApidTargetId; use satrs::tmtc::tm_helper::SharedTmPool; use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}; use spacepackets::ecss::tm::PusTmReader; @@ -17,6 +18,8 @@ pub mod crossbeam_test { use std::time::Duration; const TEST_APID: u16 = 0x03; + const TEST_ID: TargetAndApidId = TargetAndApidId::new(TEST_APID, 0x05); + const FIXED_STAMP: [u8; 7] = [0; 7]; const PACKETS_SENT: u8 = 8; @@ -40,13 +43,9 @@ pub mod crossbeam_test { let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg))); let shared_tc_pool_1 = shared_tc_pool_0.clone(); let (tx, rx) = crossbeam_channel::bounded(10); - let sender = TmInSharedPoolSenderWithCrossbeam::new( - 0, - "verif_sender", - shared_tm_pool.clone(), - tx.clone(), - ); - let mut reporter_with_sender_0 = VerificationReporterWithSender::new(&cfg, sender); + let sender_0 = TmInSharedPoolSenderWithCrossbeam::new(shared_tm_pool.clone(), tx.clone()); + let sender_1 = sender_0.clone(); + let mut reporter_with_sender_0 = VerificationReporter::new(&cfg); let mut reporter_with_sender_1 = reporter_with_sender_0.clone(); // For test purposes, we retrieve the request ID from the TCs and pass them to the receiver // tread. @@ -93,24 +92,36 @@ pub mod crossbeam_test { let token = reporter_with_sender_0.add_tc_with_req_id(req_id_0); let accepted_token = reporter_with_sender_0 - .acceptance_success(token, &FIXED_STAMP) + .acceptance_success(TEST_ID.raw(), &sender_0, token, &FIXED_STAMP) .expect("Acceptance success failed"); // Do some start handling here let started_token = reporter_with_sender_0 - .start_success(accepted_token, &FIXED_STAMP) + .start_success(TEST_ID.raw(), &sender_0, accepted_token, &FIXED_STAMP) .expect("Start success failed"); // Do some step handling here reporter_with_sender_0 - .step_success(&started_token, &FIXED_STAMP, EcssEnumU8::new(0)) + .step_success( + TEST_ID.raw(), + &sender_0, + &started_token, + &FIXED_STAMP, + EcssEnumU8::new(0), + ) .expect("Start success failed"); // Finish up reporter_with_sender_0 - .step_success(&started_token, &FIXED_STAMP, EcssEnumU8::new(1)) + .step_success( + TEST_ID.raw(), + &sender_0, + &started_token, + &FIXED_STAMP, + EcssEnumU8::new(1), + ) .expect("Start success failed"); reporter_with_sender_0 - .completion_success(started_token, &FIXED_STAMP) + .completion_success(TEST_ID.raw(), &sender_0, started_token, &FIXED_STAMP) .expect("Completion success failed"); }); @@ -128,15 +139,15 @@ pub mod crossbeam_test { let (tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap(); let token = reporter_with_sender_1.add_tc(&tc); let accepted_token = reporter_with_sender_1 - .acceptance_success(token, &FIXED_STAMP) + .acceptance_success(TEST_ID.raw(), &sender_1, token, &FIXED_STAMP) .expect("Acceptance success failed"); let started_token = reporter_with_sender_1 - .start_success(accepted_token, &FIXED_STAMP) + .start_success(TEST_ID.raw(), &sender_1, accepted_token, &FIXED_STAMP) .expect("Start success failed"); let fail_code = EcssEnumU16::new(2); let params = FailParams::new_no_fail_data(&FIXED_STAMP, &fail_code); reporter_with_sender_1 - .completion_failure(started_token, params) + .completion_failure(TEST_ID.raw(), &sender_1, started_token, params) .expect("Completion success failed"); }); @@ -145,14 +156,14 @@ pub mod crossbeam_test { let mut tm_buf: [u8; 1024] = [0; 1024]; let mut verif_map = HashMap::new(); while packet_counter < PACKETS_SENT { - let verif_addr = rx + let tm_in_pool = rx .recv_timeout(Duration::from_millis(50)) .expect("Packet reception timeout"); let tm_len; let shared_tm_store = shared_tm_pool.clone_backing_pool(); { let mut rg = shared_tm_store.write().expect("Error locking shared pool"); - let store_guard = rg.read_with_guard(verif_addr); + let store_guard = rg.read_with_guard(tm_in_pool.store_addr); tm_len = store_guard .read(&mut tm_buf) .expect("Error reading TM slice");