From 5fa955365667b186ef16ff15733e151257155fd5 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Mon, 3 Jul 2023 01:33:13 +0200 Subject: [PATCH 01/39] this is complex.. --- satrs-core/Cargo.toml | 2 +- satrs-core/src/pus/mod.rs | 38 +- satrs-core/src/pus/verification.rs | 2 +- satrs-core/src/tmtc/pus_distrib.rs | 1 + satrs-example/src/pus.rs | 1117 +++++++++++++++------------- 5 files changed, 608 insertions(+), 552 deletions(-) diff --git a/satrs-core/Cargo.toml b/satrs-core/Cargo.toml index bf95cef..a6d8da1 100644 --- a/satrs-core/Cargo.toml +++ b/satrs-core/Cargo.toml @@ -60,7 +60,7 @@ optional = true # version = "0.5.4" # path = "../spacepackets" git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" -rev = "ef4244c8cb5c" +rev = "28cd8c02ac0" default-features = false [dev-dependencies] diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index 9396e03..c74e587 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -186,13 +186,14 @@ pub mod std_mod { } } - impl EcssTmSenderCore for MpscTmtcInStoreSender { - type Error = MpscPusInStoreSendError; - - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { + impl MpscTmtcInStoreSender { + pub fn send_tmtc( + &mut self, + tmtc: impl SerializablePusPacket, + ) -> Result<(), MpscPusInStoreSendError> { let operation = |mut store: RwLockWriteGuard| { - let (addr, slice) = store.free_element(tm.len_packed())?; - tm.write_to_bytes(slice)?; + let (addr, slice) = store.free_element(tmtc.len_packed())?; + tmtc.write_to_bytes(slice)?; self.sender.send(addr)?; Ok(()) }; @@ -209,26 +210,19 @@ pub mod std_mod { } } + impl EcssTmSenderCore for MpscTmtcInStoreSender { + type Error = MpscPusInStoreSendError; + + fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { + self.send_tmtc(tm) + } + } + impl EcssTcSenderCore for MpscTmtcInStoreSender { type Error = MpscPusInStoreSendError; fn send_tc(&mut self, tc: PusTc) -> Result<(), Self::Error> { - let operation = |mut store: RwLockWriteGuard| { - let (addr, slice) = store.free_element(tc.len_packed())?; - tc.write_to_bytes(slice)?; - self.sender.send(addr)?; - Ok(()) - }; - match self.store_helper.write() { - Ok(pool) => operation(pool), - Err(e) => { - if self.ignore_poison_errors { - operation(e.into_inner()) - } else { - Err(MpscPusInStoreSendError::LockError) - } - } - } + self.send_tmtc(tc) } } diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index c42e758..630afde 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -84,7 +84,7 @@ use core::mem::size_of; use delegate::delegate; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use spacepackets::ecss::{scheduling, EcssEnumeration, PusPacket}; +use spacepackets::ecss::{scheduling, EcssEnumeration, PusPacket, SerializablePusPacket}; use spacepackets::tc::PusTc; use spacepackets::tm::{PusTm, PusTmSecondaryHeader}; use spacepackets::{CcsdsPacket, PacketId, PacketSequenceCtrl}; diff --git a/satrs-core/src/tmtc/pus_distrib.rs b/satrs-core/src/tmtc/pus_distrib.rs index 9d2290e..bf591da 100644 --- a/satrs-core/src/tmtc/pus_distrib.rs +++ b/satrs-core/src/tmtc/pus_distrib.rs @@ -17,6 +17,7 @@ //! # Example //! //! ```rust +//! use spacepackets::ecss::SerializablePusPacket; //! use satrs_core::tmtc::pus_distrib::{PusDistributor, PusServiceProvider}; //! use satrs_core::tmtc::{ReceivesTc, ReceivesTcCore}; //! use spacepackets::SpHeader; diff --git a/satrs-example/src/pus.rs b/satrs-example/src/pus.rs index 64664a5..efa04fd 100644 --- a/satrs-example/src/pus.rs +++ b/satrs-example/src/pus.rs @@ -1,21 +1,22 @@ use crate::requests::{Request, RequestWithToken}; -use crate::tmtc::{PusTcSource, TmStore}; +use crate::tmtc::{PusTcSource, TmStore, PUS_APID}; use log::{info, warn}; use satrs_core::events::EventU32; use satrs_core::hk::{CollectionIntervalFactor, HkRequest}; use satrs_core::mode::{ModeAndSubmode, ModeRequest}; +use satrs_core::objects::ObjectId; use satrs_core::params::Params; use satrs_core::pool::{PoolProvider, StoreAddr}; use satrs_core::pus::event_man::{EventRequest, EventRequestWithToken}; -use satrs_core::pus::mode; +use satrs_core::pus::hk; use satrs_core::pus::mode::Subservice; use satrs_core::pus::scheduling::PusScheduler; use satrs_core::pus::verification::{ pus_11_generic_tc_check, FailParams, StdVerifReporterWithSender, TcStateAccepted, VerificationToken, }; -use satrs_core::pus::{event, GenericTcCheckError}; -use satrs_core::pus::{hk, EcssTmSender, EcssTmSenderCore}; +use satrs_core::pus::{event, EcssTcSenderCore, GenericTcCheckError, MpscTmtcInStoreSender}; +use satrs_core::pus::{mode, EcssTcSender}; use satrs_core::res_code::ResultU16; use satrs_core::seq_count::{SeqCountProviderSyncClonable, SequenceCountProviderCore}; use satrs_core::spacepackets::ecss::{scheduling, PusServiceId}; @@ -33,9 +34,52 @@ use std::convert::TryFrom; use std::rc::Rc; use std::sync::mpsc::{Receiver, Sender}; -pub trait PusTcMultiplexer { - fn route_pus_tc(tc: &PusTc, apid: u16, service: u8, subservice: u8); +// pub trait PusTcRouter { +// type Error; +// fn route_pus_tc( +// &mut self, +// apid: u16, +// service: u8, +// subservice: u8, +// tc: &PusTc, +// ); +// } + +pub type AcceptedTc = (StoreAddr, VerificationToken); + +pub struct PusTcMpscRouter { + test_service_receiver: MpscTmtcInStoreSender, + event_service_receiver: Sender, + sched_service_receiver: Sender, + hk_service_receiver: Sender, + action_service_receiver: Sender, } + +// impl PusTcRouter for PusTcMpscRouter { +// type Error = (); +// +// fn route_pus_tc(&mut self, apid: u16, service: u8, subservice: u8, tc: &PusTc) { +// if apid == PUS_APID { +// if service == PusServiceId::Event as u8 { +// self.event_service_receiver.send_tc(*tc).unwrap(); +// } +// if service == PusServiceId::Action as u8 { +// // TODO: Look up object ID and then route the action request to that object. +// self.action_service_receiver.send_tc(*tc).unwrap(); +// } +// if service == PusServiceId::Housekeeping as u8 { +// // TODO: Look up object ID and then route the HK request to that object. +// } +// if service == PusServiceId::Scheduling as u8 { +// self.sched_service_receiver.send_tc(*tc).unwrap(); +// } +// if service == PusServiceId::Test as u8 { +// self.test_service_receiver.send_tc(*tc).unwrap(); +// } +// } +// todo!() +// } +// } pub struct PusReceiver { pub tm_helper: PusTmWithCdsShortHelper, pub tm_args: PusTmArgs, @@ -60,55 +104,58 @@ impl PusTmArgs { } } -#[allow(dead_code)] -pub struct PusTcHandlerBase { - pub tc_store: Box, - pub receiver: Receiver<(StoreAddr, VerificationToken)>, - pub verif_reporter: StdVerifReporterWithSender, - pub time_provider: Box, -} - -pub trait TestHandlerNoPing { - fn handle_no_ping_tc(&mut self, tc: PusTc); -} - -#[allow(dead_code)] -pub struct PusTestTcHandler { - pub base: PusTcHandlerBase, - handler: Option>, -} - -#[allow(dead_code)] -pub struct PusScheduleTcHandler { - pub base: PusTestTcHandler, -} - -impl PusTestTcHandler { - #[allow(dead_code)] - pub fn operation(&mut self) { - let (addr, token) = self.base.receiver.recv().unwrap(); - let data = self.base.tc_store.read(&addr).unwrap(); - let (pus_tc, _len) = PusTc::from_bytes(data).unwrap(); - let stamp: [u8; 7] = [0; 7]; - if pus_tc.subservice() == 1 { - self.base - .verif_reporter - .completion_success(token, Some(&stamp)) - .unwrap(); - } else if let Some(handler) = &mut self.handler { - handler.handle_no_ping_tc(pus_tc); - } - } -} +// #[allow(dead_code)] +// pub struct PusTcHandlerBase { +// pub tc_store: Box, +// pub receiver: Receiver<(StoreAddr, VerificationToken)>, +// pub verif_reporter: StdVerifReporterWithSender, +// pub time_provider: Box, +// } +// +// pub trait TestHandlerNoPing { +// fn handle_no_ping_tc(&mut self, tc: PusTc); +// } +// +// #[allow(dead_code)] +// pub struct PusTestTcHandler { +// pub base: PusTcHandlerBase, +// handler: Option>, +// } +// +// #[allow(dead_code)] +// pub struct PusScheduleTcHandler { +// pub base: PusTestTcHandler, +// } +// +// impl PusTestTcHandler { +// #[allow(dead_code)] +// pub fn operation(&mut self) { +// let (addr, token) = self.base.receiver.recv().unwrap(); +// let data = self.base.tc_store.read(&addr).unwrap(); +// let (pus_tc, _len) = PusTc::from_bytes(data).unwrap(); +// let stamp: [u8; 7] = [0; 7]; +// if pus_tc.subservice() == 1 { +// self.base +// .verif_reporter +// .completion_success(token, Some(&stamp)) +// .unwrap(); +// } else if let Some(handler) = &mut self.handler { +// handler.handle_no_ping_tc(pus_tc); +// } +// } +// } pub struct PusTcArgs { pub event_request_tx: Sender, + /// This routes all telecommands to their respective recipients + pub pus_router: PusTcMpscRouter, /// Request routing helper. Maps targeted requests to their recipient. - pub request_map: HashMap>, + //pub request_map: HashMap>, /// Required for scheduling of telecommands. - pub tc_source: PusTcSource, + //pub tc_source: PusTcSource, + /// Used to send events from within the TC router pub event_sender: Sender<(EventU32, Option)>, - pub scheduler: Rc>, + //pub scheduler: Rc>, } struct TimeStampHelper { @@ -168,10 +215,24 @@ impl PusServiceProvider for PusReceiver { let service = PusServiceId::try_from(service); match service { Ok(standard_service) => match standard_service { - PusServiceId::Test => self.handle_test_service(pus_tc, accepted_token), - PusServiceId::Housekeeping => self.handle_hk_request(pus_tc, accepted_token), - PusServiceId::Event => self.handle_event_request(pus_tc, accepted_token), - PusServiceId::Scheduling => self.handle_scheduled_tc(pus_tc, accepted_token), + PusServiceId::Test => self + .tc_args + .pus_router + .test_service_receiver + .send_tc(*pus_tc), + PusServiceId::Housekeeping => { + self.tc_args.pus_router.hk_service_receiver.send_tc(*pus_tc) + } //self.handle_hk_request(pus_tc, accepted_token), + PusServiceId::Event => self + .tc_args + .pus_router + .event_service_receiver + .send_tc(*pus_tc), //self.handle_event_request(pus_tc, accepted_token), + PusServiceId::Scheduling => self + .tc_args + .pus_router + .sched_service_receiver + .send_tc(*pus_tc), //self.handle_scheduled_tc(pus_tc, accepted_token), _ => self .tm_args .verif_reporter @@ -212,478 +273,478 @@ impl PusServiceProvider for PusReceiver { } } -impl PusReceiver { - fn handle_test_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { - match PusPacket::subservice(pus_tc) { - 1 => { - info!("Received PUS ping command TC[17,1]"); - info!("Sending ping reply PUS TM[17,2]"); - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - let ping_reply = self.tm_helper.create_pus_tm_timestamp_now( - 17, - 2, - None, - self.tm_args.seq_count_provider.get(), - ); - let addr = self.tm_args.tm_store.add_pus_tm(&ping_reply); - self.tm_args - .tm_tx - .send(addr) - .expect("Sending TM to TM funnel failed"); - self.tm_args.seq_count_provider.increment(); - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } - 128 => { - info!("Generating test event"); - self.tc_args - .event_sender - .send((TEST_EVENT.into(), None)) - .expect("Sending test event failed"); - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } - _ => { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::INVALID_PUS_SUBSERVICE, - None, - ), - ) - .expect("Sending start failure TM failed"); - } - } - } - - fn handle_hk_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { - if pus_tc.user_data().is_none() { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ), - ) - .expect("Sending start failure TM failed"); - return; - } - let user_data = pus_tc.user_data().unwrap(); - if user_data.len() < 8 { - let err = if user_data.len() < 4 { - &hk_err::TARGET_ID_MISSING - } else { - &hk_err::UNIQUE_ID_MISSING - }; - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new(Some(self.stamp_helper.stamp()), err, None), - ) - .expect("Sending start failure TM failed"); - return; - } - let addressable_id = AddressableId::from_raw_be(user_data).unwrap(); - if !self - .tc_args - .request_map - .contains_key(&addressable_id.target_id) - { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &hk_err::UNKNOWN_TARGET_ID, - None, - ), - ) - .expect("Sending start failure TM failed"); - return; - } - let send_request = |target: TargetId, request: HkRequest| { - let sender = self - .tc_args - .request_map - .get(&addressable_id.target_id) - .unwrap(); - sender - .send(RequestWithToken::new( - target, - Request::HkRequest(request), - token, - )) - .unwrap_or_else(|_| panic!("Sending HK request {request:?} failed")); - }; - if PusPacket::subservice(pus_tc) == hk::Subservice::TcEnableHkGeneration as u8 { - send_request( - addressable_id.target_id, - HkRequest::Enable(addressable_id.unique_id), - ); - } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcDisableHkGeneration as u8 { - send_request( - addressable_id.target_id, - HkRequest::Disable(addressable_id.unique_id), - ); - } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcGenerateOneShotHk as u8 { - send_request( - addressable_id.target_id, - HkRequest::OneShot(addressable_id.unique_id), - ); - } else if PusPacket::subservice(pus_tc) - == hk::Subservice::TcModifyHkCollectionInterval as u8 - { - if user_data.len() < 12 { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &hk_err::COLLECTION_INTERVAL_MISSING, - None, - ), - ) - .expect("Sending start failure TM failed"); - return; - } - send_request( - addressable_id.target_id, - HkRequest::ModifyCollectionInterval( - addressable_id.unique_id, - CollectionIntervalFactor::from_be_bytes(user_data[8..12].try_into().unwrap()), - ), - ); - } - } - - fn handle_event_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { - let send_start_failure = |vr: &mut StdVerifReporterWithSender, - timestamp: &[u8], - failure_code: &ResultU16, - failure_data: Option<&[u8]>| { - vr.start_failure( - token, - FailParams::new(Some(timestamp), failure_code, failure_data), - ) - .expect("Sending start failure TM failed"); - }; - let send_start_acceptance = |vr: &mut StdVerifReporterWithSender, timestamp: &[u8]| { - vr.start_success(token, Some(timestamp)) - .expect("Sending start success TM failed") - }; - if pus_tc.user_data().is_none() { - send_start_failure( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ); - return; - } - let app_data = pus_tc.user_data().unwrap(); - if app_data.len() < 4 { - send_start_failure( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ); - return; - } - let event_id = EventU32::from(u32::from_be_bytes(app_data.try_into().unwrap())); - match PusPacket::subservice(pus_tc).try_into() { - Ok(event::Subservice::TcEnableEventGeneration) => { - let start_token = send_start_acceptance( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - ); - self.tc_args - .event_request_tx - .send(EventRequestWithToken { - request: EventRequest::Enable(event_id), - token: start_token, - }) - .expect("Sending event request failed"); - } - Ok(event::Subservice::TcDisableEventGeneration) => { - let start_token = send_start_acceptance( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - ); - self.tc_args - .event_request_tx - .send(EventRequestWithToken { - request: EventRequest::Disable(event_id), - token: start_token, - }) - .expect("Sending event request failed"); - } - _ => { - send_start_failure( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - &tmtc_err::INVALID_PUS_SUBSERVICE, - None, - ); - } - } - } - - fn handle_scheduled_tc(&mut self, pus_tc: &PusTc, token: VerificationToken) { - let subservice = match pus_11_generic_tc_check(pus_tc) { - Ok(subservice) => subservice, - Err(e) => match e { - GenericTcCheckError::NotEnoughAppData => { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ), - ) - .expect("could not sent verification error"); - return; - } - GenericTcCheckError::InvalidSubservice => { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::INVALID_PUS_SUBSERVICE, - None, - ), - ) - .expect("could not sent verification error"); - return; - } - }, - }; - match subservice { - scheduling::Subservice::TcEnableScheduling => { - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - - let mut scheduler = self.tc_args.scheduler.borrow_mut(); - scheduler.enable(); - if scheduler.is_enabled() { - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } else { - panic!("Failed to enable scheduler"); - } - } - scheduling::Subservice::TcDisableScheduling => { - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - - let mut scheduler = self.tc_args.scheduler.borrow_mut(); - scheduler.disable(); - if !scheduler.is_enabled() { - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } else { - panic!("Failed to disable scheduler"); - } - } - scheduling::Subservice::TcResetScheduling => { - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - - let mut pool = self - .tc_args - .tc_source - .tc_store - .pool - .write() - .expect("Locking pool failed"); - - let mut scheduler = self.tc_args.scheduler.borrow_mut(); - scheduler - .reset(pool.as_mut()) - .expect("Error resetting TC Pool"); - drop(scheduler); - - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } - scheduling::Subservice::TcInsertActivity => { - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("error sending start success"); - - let mut pool = self - .tc_args - .tc_source - .tc_store - .pool - .write() - .expect("locking pool failed"); - let mut scheduler = self.tc_args.scheduler.borrow_mut(); - scheduler - .insert_wrapped_tc::(pus_tc, pool.as_mut()) - .expect("insertion of activity into pool failed"); - drop(scheduler); - - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("sending completion success failed"); - } - _ => {} - } - } - - fn handle_mode_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { - let mut app_data_len = 0; - let app_data = pus_tc.user_data(); - if app_data.is_some() { - app_data_len = pus_tc.user_data().unwrap().len(); - } - if app_data_len < 4 { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::NOT_ENOUGH_APP_DATA, - Some(format!("expected {} bytes, found {}", 4, app_data_len).as_bytes()), - ), - ) - .expect("Sending start failure TM failed"); - } - let app_data = app_data.unwrap(); - let mut invalid_subservice_handler = || { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::INVALID_PUS_SUBSERVICE, - Some(&[PusPacket::subservice(pus_tc)]), - ), - ) - .expect("Sending start failure TM failed"); - }; - let subservice = mode::Subservice::try_from(PusPacket::subservice(pus_tc)); - if let Ok(subservice) = subservice { - let forward_mode_request = |target_id, mode_request: ModeRequest| match self - .tc_args - .request_map - .get(&target_id) - { - None => warn!("not mode request recipient for target ID {target_id} found"), - Some(sender_to_recipient) => { - sender_to_recipient - .send(RequestWithToken::new( - target_id, - Request::ModeRequest(mode_request), - token, - )) - .expect("sending mode request failed"); - } - }; - let mut valid_subservice = true; - match subservice { - Subservice::TcSetMode => { - let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); - let min_len = ModeAndSubmode::raw_len() + 4; - if app_data_len < min_len { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::NOT_ENOUGH_APP_DATA, - Some( - format!("expected {min_len} bytes, found {app_data_len}") - .as_bytes(), - ), - ), - ) - .expect("Sending start failure TM failed"); - } - // Should never fail after size check - let mode_submode = ModeAndSubmode::from_be_bytes( - app_data[4..4 + ModeAndSubmode::raw_len()] - .try_into() - .unwrap(), - ) - .unwrap(); - forward_mode_request(target_id, ModeRequest::SetMode(mode_submode)); - } - Subservice::TcReadMode => { - let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); - forward_mode_request(target_id, ModeRequest::ReadMode); - } - Subservice::TcAnnounceMode => { - let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); - forward_mode_request(target_id, ModeRequest::AnnounceMode); - } - Subservice::TcAnnounceModeRecursive => { - let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); - forward_mode_request(target_id, ModeRequest::AnnounceModeRecursive); - } - _ => { - warn!("Can not process mode request with subservice {subservice:?}"); - invalid_subservice_handler(); - valid_subservice = false; - } - } - if valid_subservice { - self.tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("sending start success TM failed"); - } - } else { - invalid_subservice_handler(); - } - } -} +// impl PusReceiver { +// fn handle_test_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// match PusPacket::subservice(pus_tc) { +// 1 => { +// info!("Received PUS ping command TC[17,1]"); +// info!("Sending ping reply PUS TM[17,2]"); +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// let ping_reply = self.tm_helper.create_pus_tm_timestamp_now( +// 17, +// 2, +// None, +// self.tm_args.seq_count_provider.get(), +// ); +// let addr = self.tm_args.tm_store.add_pus_tm(&ping_reply); +// self.tm_args +// .tm_tx +// .send(addr) +// .expect("Sending TM to TM funnel failed"); +// self.tm_args.seq_count_provider.increment(); +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } +// 128 => { +// info!("Generating test event"); +// self.tc_args +// .event_sender +// .send((TEST_EVENT.into(), None)) +// .expect("Sending test event failed"); +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } +// _ => { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::INVALID_PUS_SUBSERVICE, +// None, +// ), +// ) +// .expect("Sending start failure TM failed"); +// } +// } +// } +// +// fn handle_hk_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// if pus_tc.user_data().is_none() { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// None, +// ), +// ) +// .expect("Sending start failure TM failed"); +// return; +// } +// let user_data = pus_tc.user_data().unwrap(); +// if user_data.len() < 8 { +// let err = if user_data.len() < 4 { +// &hk_err::TARGET_ID_MISSING +// } else { +// &hk_err::UNIQUE_ID_MISSING +// }; +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new(Some(self.stamp_helper.stamp()), err, None), +// ) +// .expect("Sending start failure TM failed"); +// return; +// } +// let addressable_id = AddressableId::from_raw_be(user_data).unwrap(); +// if !self +// .tc_args +// .request_map +// .contains_key(&addressable_id.target_id) +// { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &hk_err::UNKNOWN_TARGET_ID, +// None, +// ), +// ) +// .expect("Sending start failure TM failed"); +// return; +// } +// let send_request = |target: TargetId, request: HkRequest| { +// let sender = self +// .tc_args +// .request_map +// .get(&addressable_id.target_id) +// .unwrap(); +// sender +// .send(RequestWithToken::new( +// target, +// Request::HkRequest(request), +// token, +// )) +// .unwrap_or_else(|_| panic!("Sending HK request {request:?} failed")); +// }; +// if PusPacket::subservice(pus_tc) == hk::Subservice::TcEnableHkGeneration as u8 { +// send_request( +// addressable_id.target_id, +// HkRequest::Enable(addressable_id.unique_id), +// ); +// } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcDisableHkGeneration as u8 { +// send_request( +// addressable_id.target_id, +// HkRequest::Disable(addressable_id.unique_id), +// ); +// } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcGenerateOneShotHk as u8 { +// send_request( +// addressable_id.target_id, +// HkRequest::OneShot(addressable_id.unique_id), +// ); +// } else if PusPacket::subservice(pus_tc) +// == hk::Subservice::TcModifyHkCollectionInterval as u8 +// { +// if user_data.len() < 12 { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &hk_err::COLLECTION_INTERVAL_MISSING, +// None, +// ), +// ) +// .expect("Sending start failure TM failed"); +// return; +// } +// send_request( +// addressable_id.target_id, +// HkRequest::ModifyCollectionInterval( +// addressable_id.unique_id, +// CollectionIntervalFactor::from_be_bytes(user_data[8..12].try_into().unwrap()), +// ), +// ); +// } +// } +// +// fn handle_event_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// let send_start_failure = |vr: &mut StdVerifReporterWithSender, +// timestamp: &[u8], +// failure_code: &ResultU16, +// failure_data: Option<&[u8]>| { +// vr.start_failure( +// token, +// FailParams::new(Some(timestamp), failure_code, failure_data), +// ) +// .expect("Sending start failure TM failed"); +// }; +// let send_start_acceptance = |vr: &mut StdVerifReporterWithSender, timestamp: &[u8]| { +// vr.start_success(token, Some(timestamp)) +// .expect("Sending start success TM failed") +// }; +// if pus_tc.user_data().is_none() { +// send_start_failure( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// None, +// ); +// return; +// } +// let app_data = pus_tc.user_data().unwrap(); +// if app_data.len() < 4 { +// send_start_failure( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// None, +// ); +// return; +// } +// let event_id = EventU32::from(u32::from_be_bytes(app_data.try_into().unwrap())); +// match PusPacket::subservice(pus_tc).try_into() { +// Ok(event::Subservice::TcEnableEventGeneration) => { +// let start_token = send_start_acceptance( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// ); +// self.tc_args +// .event_request_tx +// .send(EventRequestWithToken { +// request: EventRequest::Enable(event_id), +// token: start_token, +// }) +// .expect("Sending event request failed"); +// } +// Ok(event::Subservice::TcDisableEventGeneration) => { +// let start_token = send_start_acceptance( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// ); +// self.tc_args +// .event_request_tx +// .send(EventRequestWithToken { +// request: EventRequest::Disable(event_id), +// token: start_token, +// }) +// .expect("Sending event request failed"); +// } +// _ => { +// send_start_failure( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// &tmtc_err::INVALID_PUS_SUBSERVICE, +// None, +// ); +// } +// } +// } +// +// fn handle_scheduled_tc(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// let subservice = match pus_11_generic_tc_check(pus_tc) { +// Ok(subservice) => subservice, +// Err(e) => match e { +// GenericTcCheckError::NotEnoughAppData => { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// None, +// ), +// ) +// .expect("could not sent verification error"); +// return; +// } +// GenericTcCheckError::InvalidSubservice => { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::INVALID_PUS_SUBSERVICE, +// None, +// ), +// ) +// .expect("could not sent verification error"); +// return; +// } +// }, +// }; +// match subservice { +// scheduling::Subservice::TcEnableScheduling => { +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// +// let mut scheduler = self.tc_args.scheduler.borrow_mut(); +// scheduler.enable(); +// if scheduler.is_enabled() { +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } else { +// panic!("Failed to enable scheduler"); +// } +// } +// scheduling::Subservice::TcDisableScheduling => { +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// +// let mut scheduler = self.tc_args.scheduler.borrow_mut(); +// scheduler.disable(); +// if !scheduler.is_enabled() { +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } else { +// panic!("Failed to disable scheduler"); +// } +// } +// scheduling::Subservice::TcResetScheduling => { +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// +// let mut pool = self +// .tc_args +// .tc_source +// .tc_store +// .pool +// .write() +// .expect("Locking pool failed"); +// +// let mut scheduler = self.tc_args.scheduler.borrow_mut(); +// scheduler +// .reset(pool.as_mut()) +// .expect("Error resetting TC Pool"); +// drop(scheduler); +// +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } +// scheduling::Subservice::TcInsertActivity => { +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("error sending start success"); +// +// let mut pool = self +// .tc_args +// .tc_source +// .tc_store +// .pool +// .write() +// .expect("locking pool failed"); +// let mut scheduler = self.tc_args.scheduler.borrow_mut(); +// scheduler +// .insert_wrapped_tc::(pus_tc, pool.as_mut()) +// .expect("insertion of activity into pool failed"); +// drop(scheduler); +// +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("sending completion success failed"); +// } +// _ => {} +// } +// } +// +// fn handle_mode_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// let mut app_data_len = 0; +// let app_data = pus_tc.user_data(); +// if app_data.is_some() { +// app_data_len = pus_tc.user_data().unwrap().len(); +// } +// if app_data_len < 4 { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// Some(format!("expected {} bytes, found {}", 4, app_data_len).as_bytes()), +// ), +// ) +// .expect("Sending start failure TM failed"); +// } +// let app_data = app_data.unwrap(); +// let mut invalid_subservice_handler = || { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::INVALID_PUS_SUBSERVICE, +// Some(&[PusPacket::subservice(pus_tc)]), +// ), +// ) +// .expect("Sending start failure TM failed"); +// }; +// let subservice = mode::Subservice::try_from(PusPacket::subservice(pus_tc)); +// if let Ok(subservice) = subservice { +// let forward_mode_request = |target_id, mode_request: ModeRequest| match self +// .tc_args +// .request_map +// .get(&target_id) +// { +// None => warn!("not mode request recipient for target ID {target_id} found"), +// Some(sender_to_recipient) => { +// sender_to_recipient +// .send(RequestWithToken::new( +// target_id, +// Request::ModeRequest(mode_request), +// token, +// )) +// .expect("sending mode request failed"); +// } +// }; +// let mut valid_subservice = true; +// match subservice { +// Subservice::TcSetMode => { +// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); +// let min_len = ModeAndSubmode::raw_len() + 4; +// if app_data_len < min_len { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// Some( +// format!("expected {min_len} bytes, found {app_data_len}") +// .as_bytes(), +// ), +// ), +// ) +// .expect("Sending start failure TM failed"); +// } +// // Should never fail after size check +// let mode_submode = ModeAndSubmode::from_be_bytes( +// app_data[4..4 + ModeAndSubmode::raw_len()] +// .try_into() +// .unwrap(), +// ) +// .unwrap(); +// forward_mode_request(target_id, ModeRequest::SetMode(mode_submode)); +// } +// Subservice::TcReadMode => { +// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); +// forward_mode_request(target_id, ModeRequest::ReadMode); +// } +// Subservice::TcAnnounceMode => { +// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); +// forward_mode_request(target_id, ModeRequest::AnnounceMode); +// } +// Subservice::TcAnnounceModeRecursive => { +// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); +// forward_mode_request(target_id, ModeRequest::AnnounceModeRecursive); +// } +// _ => { +// warn!("Can not process mode request with subservice {subservice:?}"); +// invalid_subservice_handler(); +// valid_subservice = false; +// } +// } +// if valid_subservice { +// self.tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("sending start success TM failed"); +// } +// } else { +// invalid_subservice_handler(); +// } +// } +// } -- 2.43.0 From e786773a65588ed2b1453225cf70b2618a9ebd9b Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Mon, 3 Jul 2023 18:22:08 +0200 Subject: [PATCH 02/39] at least it compiles again --- satrs-core/src/hal/host/udp_server.rs | 1 + satrs-core/src/pus/event.rs | 12 ++- satrs-core/src/pus/mod.rs | 1 + satrs-core/src/pus/scheduling.rs | 1 + satrs-core/src/pus/verification.rs | 69 +++++++----- satrs-core/src/tmtc/ccsds_distrib.rs | 1 + satrs-core/tests/pus_verification.rs | 2 +- satrs-example/src/{pus.rs => pus/mod.rs} | 127 +++++++++++++++++++---- satrs-example/src/pus/test.rs | 12 +++ satrs-example/src/tmtc.rs | 22 ++-- 10 files changed, 191 insertions(+), 57 deletions(-) rename satrs-example/src/{pus.rs => pus/mod.rs} (86%) create mode 100644 satrs-example/src/pus/test.rs diff --git a/satrs-core/src/hal/host/udp_server.rs b/satrs-core/src/hal/host/udp_server.rs index ca0a26e..a83f4f8 100644 --- a/satrs-core/src/hal/host/udp_server.rs +++ b/satrs-core/src/hal/host/udp_server.rs @@ -141,6 +141,7 @@ impl UdpTcServer { mod tests { use crate::hal::host::udp_server::{ReceiveResult, UdpTcServer}; use crate::tmtc::ReceivesTcCore; + use spacepackets::ecss::SerializablePusPacket; use spacepackets::tc::PusTc; use spacepackets::SpHeader; use std::boxed::Box; diff --git a/satrs-core/src/pus/event.rs b/satrs-core/src/pus/event.rs index 146986c..9bea69c 100644 --- a/satrs-core/src/pus/event.rs +++ b/satrs-core/src/pus/event.rs @@ -243,6 +243,7 @@ mod tests { use super::*; use crate::events::{EventU32, Severity}; use crate::pus::tests::CommonTmInfo; + use crate::pus::EcssSender; use crate::SenderId; use spacepackets::ByteConversionError; use std::collections::VecDeque; @@ -266,12 +267,15 @@ mod tests { pub service_queue: VecDeque, } - impl EcssTmSenderCore for TestSender { - type Error = (); - + impl EcssSender for TestSender { fn id(&self) -> SenderId { 0 } + } + + impl EcssTmSenderCore for TestSender { + type Error = (); + fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { assert!(tm.source_data().is_some()); let src_data = tm.source_data().unwrap(); @@ -413,7 +417,7 @@ mod tests { let err = reporter.event_info(sender, &time_stamp_empty, event, None); assert!(err.is_err()); let err = err.unwrap_err(); - if let EcssTmErrorWithSend::EcssTmError(EcssTmtcError::ByteConversionError( + if let EcssTmtcErrorWithSend::EcssTmtcError(EcssTmtcError::ByteConversionError( ByteConversionError::ToSliceTooSmall(missmatch), )) = err { diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index c74e587..1fd7a23 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -20,6 +20,7 @@ pub mod verification; #[cfg(feature = "alloc")] pub use alloc_mod::*; +use crate::pus::verification::TcStateToken; use crate::SenderId; #[cfg(feature = "std")] pub use std_mod::*; diff --git a/satrs-core/src/pus/scheduling.rs b/satrs-core/src/pus/scheduling.rs index 63d827f..cf3d8e0 100644 --- a/satrs-core/src/pus/scheduling.rs +++ b/satrs-core/src/pus/scheduling.rs @@ -620,6 +620,7 @@ impl PusScheduler { mod tests { use super::*; use crate::pool::{LocalPool, PoolCfg, PoolProvider, StoreAddr, StoreError}; + use spacepackets::ecss::SerializablePusPacket; use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; use spacepackets::time::{cds, TimeWriter, UnixTimestamp}; use spacepackets::SpHeader; diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index 630afde..f9efd16 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -207,15 +207,19 @@ pub struct TcStateNone; pub struct TcStateAccepted; #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct TcStateStarted; +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct TcStateCompleted; impl WasAtLeastAccepted for TcStateAccepted {} impl WasAtLeastAccepted for TcStateStarted {} +impl WasAtLeastAccepted for TcStateCompleted {} #[derive(Debug, Eq, PartialEq)] pub enum TcStateToken { None(VerificationToken), Accepted(VerificationToken), Started(VerificationToken), + Completed(VerificationToken), } impl From> for TcStateToken { @@ -236,6 +240,12 @@ impl From> for TcStateToken { } } +impl From> for TcStateToken { + fn from(t: VerificationToken) -> Self { + TcStateToken::Completed(t) + } +} + impl VerificationToken { fn new(req_id: RequestId) -> VerificationToken { VerificationToken { @@ -1529,11 +1539,11 @@ mod tests { use crate::pool::{LocalPool, PoolCfg, SharedPool}; use crate::pus::tests::CommonTmInfo; use crate::pus::verification::{ - EcssTmError, EcssTmSenderCore, FailParams, FailParamsWithStep, MpscVerifSender, RequestId, - TcStateNone, VerificationReporter, VerificationReporterCfg, VerificationReporterWithSender, - VerificationToken, + EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, MpscVerifSender, + RequestId, TcStateNone, VerificationReporter, VerificationReporterCfg, + VerificationReporterWithSender, VerificationToken, }; - use crate::pus::EcssTmErrorWithSend; + use crate::pus::{EcssSender, EcssTmtcErrorWithSend}; use crate::seq_count::SeqCountProviderSimple; use crate::SenderId; use alloc::boxed::Box; @@ -1541,6 +1551,7 @@ mod tests { use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, EcssEnumeration, PusPacket}; use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; use spacepackets::tm::PusTm; + use spacepackets::util::UnsignedEnum; use spacepackets::{ByteConversionError, CcsdsPacket, SpHeader}; use std::collections::VecDeque; use std::sync::{mpsc, Arc, RwLock}; @@ -1567,12 +1578,17 @@ mod tests { pub service_queue: VecDeque, } - impl EcssTmSenderCore for TestSender { - type Error = (); - + impl EcssSender for TestSender { fn id(&self) -> SenderId { 0 } + fn name(&self) -> &'static str { + "test_sender" + } + } + + impl EcssTmSenderCore for TestSender { + type Error = (); fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { assert_eq!(PusPacket::service(&tm), 1); @@ -1595,10 +1611,6 @@ mod tests { }); Ok(()) } - - fn name(&self) -> &'static str { - "test_sender" - } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] @@ -1606,11 +1618,13 @@ mod tests { #[derive(Default, Clone)] struct FallibleSender {} - impl EcssTmSenderCore for FallibleSender { - type Error = DummyError; + impl EcssSender for FallibleSender { fn id(&self) -> SenderId { 0 } + } + impl EcssTmSenderCore for FallibleSender { + type Error = DummyError; fn send_tm(&mut self, _: PusTm) -> Result<(), Self::Error> { Err(DummyError {}) } @@ -1747,7 +1761,7 @@ mod tests { let err = res.unwrap_err(); assert_eq!(err.1, tok); match err.0 { - EcssTmErrorWithSend::SendError(e) => { + EcssTmtcErrorWithSend::SendError(e) => { assert_eq!(e, DummyError {}) } _ => panic!("{}", format!("Unexpected error {:?}", err.0)), @@ -1817,18 +1831,20 @@ mod tests { let err_with_token = res.unwrap_err(); assert_eq!(err_with_token.1, tok); match err_with_token.0 { - EcssTmErrorWithSend::EcssTmError(EcssTmtcError::ByteConversionError(e)) => match e { - ByteConversionError::ToSliceTooSmall(missmatch) => { - assert_eq!( - missmatch.expected, - fail_data.len() + RequestId::SIZE_AS_BYTES + fail_code.byte_width() - ); - assert_eq!(missmatch.found, b.rep().allowed_source_data_len()); + EcssTmtcErrorWithSend::EcssTmtcError(EcssTmtcError::ByteConversionError(e)) => { + match e { + ByteConversionError::ToSliceTooSmall(missmatch) => { + assert_eq!( + missmatch.expected, + fail_data.len() + RequestId::SIZE_AS_BYTES + fail_code.size() + ); + assert_eq!(missmatch.found, b.rep().allowed_source_data_len()); + } + _ => { + panic!("{}", format!("Unexpected error {:?}", e)) + } } - _ => { - panic!("{}", format!("Unexpected error {:?}", e)) - } - }, + } _ => { panic!("{}", format!("Unexpected error {:?}", err_with_token.0)) } @@ -2386,7 +2402,8 @@ mod tests { let shared_tm_pool: SharedPool = Arc::new(RwLock::new(Box::new(LocalPool::new(pool_cfg.clone())))); let (verif_tx, verif_rx) = mpsc::channel(); - let sender = MpscVerifSender::new(shared_tm_pool.clone(), verif_tx); + let sender = + MpscVerifSender::new(0, "Verification Sender", shared_tm_pool.clone(), verif_tx); let cfg = VerificationReporterCfg::new( TEST_APID, Box::new(SeqCountProviderSimple::default()), diff --git a/satrs-core/src/tmtc/ccsds_distrib.rs b/satrs-core/src/tmtc/ccsds_distrib.rs index 1e45680..6cb4987 100644 --- a/satrs-core/src/tmtc/ccsds_distrib.rs +++ b/satrs-core/src/tmtc/ccsds_distrib.rs @@ -224,6 +224,7 @@ impl CcsdsDistributor { pub(crate) mod tests { use super::*; use crate::tmtc::ccsds_distrib::{CcsdsDistributor, CcsdsPacketHandler}; + use spacepackets::ecss::SerializablePusPacket; use spacepackets::tc::PusTc; use spacepackets::CcsdsPacket; use std::collections::VecDeque; diff --git a/satrs-core/tests/pus_verification.rs b/satrs-core/tests/pus_verification.rs index 5a8fce2..035904b 100644 --- a/satrs-core/tests/pus_verification.rs +++ b/satrs-core/tests/pus_verification.rs @@ -8,7 +8,7 @@ pub mod crossbeam_test { VerificationReporterWithSender, }; use satrs_core::seq_count::SeqCountProviderSyncClonable; - use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket}; + use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket, SerializablePusPacket}; use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; use spacepackets::tm::PusTm; use spacepackets::SpHeader; diff --git a/satrs-example/src/pus.rs b/satrs-example/src/pus/mod.rs similarity index 86% rename from satrs-example/src/pus.rs rename to satrs-example/src/pus/mod.rs index efa04fd..96389e4 100644 --- a/satrs-example/src/pus.rs +++ b/satrs-example/src/pus/mod.rs @@ -1,5 +1,5 @@ use crate::requests::{Request, RequestWithToken}; -use crate::tmtc::{PusTcSource, TmStore, PUS_APID}; +use crate::tmtc::{MpscStoreAndSendError, PusTcSource, TmStore, PUS_APID}; use log::{info, warn}; use satrs_core::events::EventU32; use satrs_core::hk::{CollectionIntervalFactor, HkRequest}; @@ -34,6 +34,8 @@ use std::convert::TryFrom; use std::rc::Rc; use std::sync::mpsc::{Receiver, Sender}; +pub mod test; + // pub trait PusTcRouter { // type Error; // fn route_pus_tc( @@ -45,14 +47,19 @@ use std::sync::mpsc::{Receiver, Sender}; // ); // } +pub enum PusTcWrapper<'tc> { + PusTc(&'tc PusTc<'tc>), + StoreAddr(StoreAddr), +} + pub type AcceptedTc = (StoreAddr, VerificationToken); pub struct PusTcMpscRouter { - test_service_receiver: MpscTmtcInStoreSender, - event_service_receiver: Sender, - sched_service_receiver: Sender, - hk_service_receiver: Sender, - action_service_receiver: Sender, + pub test_service_receiver: Sender, + pub event_service_receiver: Sender, + pub sched_service_receiver: Sender, + pub hk_service_receiver: Sender, + pub action_service_receiver: Sender, } // impl PusTcRouter for PusTcMpscRouter { @@ -146,7 +153,7 @@ impl PusTmArgs { // } pub struct PusTcArgs { - pub event_request_tx: Sender, + //pub event_request_tx: Sender, /// This routes all telecommands to their respective recipients pub pus_router: PusTcMpscRouter, /// Request routing helper. Maps targeted requests to their recipient. @@ -196,15 +203,13 @@ impl PusReceiver { } } -impl PusServiceProvider for PusReceiver { - type Error = (); - - fn handle_pus_tc_packet( +impl PusReceiver { + pub fn handle_tc_packet( &mut self, + store_addr: StoreAddr, service: u8, - _header: &SpHeader, pus_tc: &PusTc, - ) -> Result<(), Self::Error> { + ) -> Result<(), MpscStoreAndSendError> { let init_token = self.tm_args.verif_reporter.add_tc(pus_tc); self.stamp_helper.update_from_now(); let accepted_token = self @@ -219,20 +224,26 @@ impl PusServiceProvider for PusReceiver { .tc_args .pus_router .test_service_receiver - .send_tc(*pus_tc), - PusServiceId::Housekeeping => { - self.tc_args.pus_router.hk_service_receiver.send_tc(*pus_tc) - } //self.handle_hk_request(pus_tc, accepted_token), + .send((store_addr, accepted_token)) + .unwrap(), + PusServiceId::Housekeeping => self + .tc_args + .pus_router + .hk_service_receiver + .send((store_addr, accepted_token)) + .unwrap(), PusServiceId::Event => self .tc_args .pus_router .event_service_receiver - .send_tc(*pus_tc), //self.handle_event_request(pus_tc, accepted_token), + .send((store_addr, accepted_token)) + .unwrap(), PusServiceId::Scheduling => self .tc_args .pus_router .sched_service_receiver - .send_tc(*pus_tc), //self.handle_scheduled_tc(pus_tc, accepted_token), + .send((store_addr, accepted_token)) + .unwrap(), _ => self .tm_args .verif_reporter @@ -250,7 +261,7 @@ impl PusServiceProvider for PusReceiver { if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) { match custom_service { CustomPusServiceId::Mode => { - self.handle_mode_service(pus_tc, accepted_token) + //self.handle_mode_service(pus_tc, accepted_token) } CustomPusServiceId::Health => {} } @@ -272,6 +283,82 @@ impl PusServiceProvider for PusReceiver { Ok(()) } } +// impl PusServiceProvider for PusReceiver { +// type Error = (); +// +// fn handle_pus_tc_packet( +// &mut self, +// service: u8, +// _header: &SpHeader, +// pus_tc: &PusTc, +// ) -> Result<(), Self::Error> { +// let init_token = self.tm_args.verif_reporter.add_tc(pus_tc); +// self.stamp_helper.update_from_now(); +// let accepted_token = self +// .tm_args +// .vr() +// .acceptance_success(init_token, Some(self.stamp_helper.stamp())) +// .expect("Acceptance success failure"); +// let service = PusServiceId::try_from(service); +// match service { +// Ok(standard_service) => match standard_service { +// PusServiceId::Test => self +// .tc_args +// .pus_router +// .test_service_receiver +// .send_tc(*pus_tc), +// PusServiceId::Housekeeping => { +// self.tc_args.pus_router.hk_service_receiver.send_tc(*pus_tc) +// } //self.handle_hk_request(pus_tc, accepted_token), +// PusServiceId::Event => self +// .tc_args +// .pus_router +// .event_service_receiver +// .send_tc(*pus_tc), //self.handle_event_request(pus_tc, accepted_token), +// PusServiceId::Scheduling => self +// .tc_args +// .pus_router +// .sched_service_receiver +// .send_tc(*pus_tc), //self.handle_scheduled_tc(pus_tc, accepted_token), +// _ => self +// .tm_args +// .verif_reporter +// .start_failure( +// accepted_token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::PUS_SERVICE_NOT_IMPLEMENTED, +// Some(&[standard_service as u8]), +// ), +// ) +// .expect("Start failure verification failed"), +// }, +// Err(e) => { +// if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) { +// match custom_service { +// CustomPusServiceId::Mode => { +// self.handle_mode_service(pus_tc, accepted_token) +// } +// CustomPusServiceId::Health => {} +// } +// } else { +// self.tm_args +// .verif_reporter +// .start_failure( +// accepted_token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::INVALID_PUS_SUBSERVICE, +// Some(&[e.number]), +// ), +// ) +// .expect("Start failure verification failed") +// } +// } +// } +// Ok(()) +// } +// } // impl PusReceiver { // fn handle_test_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs new file mode 100644 index 0000000..4fcf8dc --- /dev/null +++ b/satrs-example/src/pus/test.rs @@ -0,0 +1,12 @@ +use crate::pus::AcceptedTc; +use satrs_core::pus::verification::StdVerifReporterWithSender; +use std::sync::mpsc::Receiver; + +struct PusService17Handler { + receiver: Receiver, + verification_handler: StdVerifReporterWithSender, +} + +impl PusService17Handler { + pub fn periodic_operation(&mut self) {} +} diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 0124cc2..0e05799 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -8,12 +8,13 @@ use std::error::Error; use std::fmt::{Display, Formatter}; use std::net::SocketAddr; use std::rc::Rc; +use std::sync::mpsc; use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError}; use std::thread; use std::time::Duration; use crate::ccsds::CcsdsReceiver; -use crate::pus::{PusReceiver, PusTcArgs, PusTmArgs}; +use crate::pus::{PusReceiver, PusTcArgs, PusTcMpscRouter, PusTmArgs}; use crate::requests::RequestWithToken; use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; use satrs_core::pus::event_man::EventRequestWithToken; @@ -178,12 +179,21 @@ pub fn core_tmtc_task(args: OtherArgs, mut tc_args: TcArgs, tm_args: TmArgs) { verif_reporter: args.verif_reporter, seq_count_provider: args.seq_count_provider.clone(), }; + let (pus_test_tx, pus_tedt_rx) = mpsc::channel(); + let (pus_event_tx, pus_event_rx) = mpsc::channel(); + let (pus_sched_tx, pus_sched_rx) = mpsc::channel(); + let (pus_hk_tx, pus_hk_rx) = mpsc::channel(); + let (pus_action_tx, pus_action_rx) = mpsc::channel(); + let pus_router = PusTcMpscRouter { + test_service_receiver: pus_test_tx, + event_service_receiver: pus_event_tx, + sched_service_receiver: pus_sched_tx, + hk_service_receiver: pus_hk_tx, + action_service_receiver: pus_action_tx, + }; let pus_tc_args = PusTcArgs { - event_request_tx: args.event_request_tx, - request_map: args.request_map, - tc_source: tc_args.tc_source.clone(), + pus_router, event_sender: args.event_sender, - scheduler: sched_clone, }; let mut pus_receiver = PusReceiver::new(PUS_APID, pus_tm_args, pus_tc_args); @@ -266,7 +276,7 @@ fn core_tmtc_loop( match PusTc::from_bytes(tc_buf) { Ok((pus_tc, _)) => { pus_receiver - .handle_pus_tc_packet(pus_tc.service(), pus_tc.sp_header(), &pus_tc) + .handle_tc_packet(addr, pus_tc.service(), &pus_tc) .ok(); } Err(e) => { -- 2.43.0 From 3a1c5832a9481def49b1b3afc53005529bdde71e Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Tue, 4 Jul 2023 15:17:43 +0200 Subject: [PATCH 03/39] continue --- satrs-core/src/pus/mod.rs | 1 - satrs-core/src/tmtc/tm_helper.rs | 35 ++++++++++++ satrs-example/src/main.rs | 27 +++++++--- satrs-example/src/pus/mod.rs | 8 ++- satrs-example/src/pus/test.rs | 93 ++++++++++++++++++++++++++++++-- satrs-example/src/tmtc.rs | 21 ++------ 6 files changed, 149 insertions(+), 36 deletions(-) diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index 1fd7a23..c74e587 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -20,7 +20,6 @@ pub mod verification; #[cfg(feature = "alloc")] pub use alloc_mod::*; -use crate::pus::verification::TcStateToken; use crate::SenderId; #[cfg(feature = "std")] pub use std_mod::*; diff --git a/satrs-core/src/tmtc/tm_helper.rs b/satrs-core/src/tmtc/tm_helper.rs index c1e5720..6bc0edd 100644 --- a/satrs-core/src/tmtc/tm_helper.rs +++ b/satrs-core/src/tmtc/tm_helper.rs @@ -1,8 +1,43 @@ +use spacepackets::ecss::SerializablePusPacket; use spacepackets::time::cds::TimeProvider; use spacepackets::time::TimeWriter; use spacepackets::tm::{PusTm, PusTmSecondaryHeader}; use spacepackets::SpHeader; +#[cfg(feature = "std")] +pub use std_mod::*; + +#[cfg(feature = "std")] +pub mod std_mod { + use crate::pool::{SharedPool, StoreAddr}; + use spacepackets::ecss::SerializablePusPacket; + use spacepackets::tm::PusTm; + + #[derive(Clone)] + pub struct SharedTmStore { + pool: SharedPool, + } + + impl SharedTmStore { + pub fn new(backing_pool: SharedPool) -> Self { + Self { pool: backing_pool } + } + + pub fn backing_pool(&self) -> SharedPool { + self.pool.clone() + } + + pub fn add_pus_tm(&mut self, pus_tm: &PusTm) -> StoreAddr { + let mut pg = self.pool.write().expect("error locking TM store"); + let (addr, buf) = pg.free_element(pus_tm.len_packed()).expect("Store error"); + pus_tm + .write_to_bytes(buf) + .expect("writing PUS TM to store failed"); + addr + } + } +} + pub struct PusTmWithCdsShortHelper { apid: u16, cds_short_buf: [u8; 7], diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 450c694..916db1b 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -11,7 +11,7 @@ use crate::hk::AcsHkIds; use crate::logging::setup_logger; use crate::requests::{Request, RequestWithToken}; use crate::tmtc::{ - core_tmtc_task, OtherArgs, PusTcSource, TcArgs, TcStore, TmArgs, TmFunnel, TmStore, PUS_APID, + core_tmtc_task, OtherArgs, PusTcSource, TcArgs, TcStore, TmArgs, TmFunnel, PUS_APID, }; use satrs_core::event_man::{ EventManagerWithMpscQueue, MpscEventReceiver, MpscEventU32SendProvider, SendEventProvider, @@ -35,6 +35,7 @@ use satrs_core::spacepackets::{ tm::{PusTm, PusTmSecondaryHeader}, SequenceFlags, SpHeader, }; +use satrs_core::tmtc::tm_helper::SharedTmStore; use satrs_core::tmtc::AddressableId; use satrs_example::{RequestTargetId, OBSW_SERVER_ADDR, SERVER_PORT}; use std::collections::HashMap; @@ -43,6 +44,7 @@ use std::sync::mpsc::{channel, TryRecvError}; use std::sync::{Arc, RwLock}; use std::thread; use std::time::Duration; +use crate::pus::test::PusService17Handler; fn main() { setup_logger().expect("setting up logging with fern failed"); @@ -55,9 +57,7 @@ fn main() { (15, 1024), (15, 2048), ])); - let tm_store = TmStore { - pool: Arc::new(RwLock::new(Box::new(tm_pool))), - }; + let tm_store = SharedTmStore::new(Arc::new(RwLock::new(Box::new(tm_pool)))); let tc_pool = LocalPool::new(PoolCfg::new(vec![ (30, 32), (15, 64), @@ -80,7 +80,7 @@ fn main() { let verif_sender = MpscVerifSender::new( 0, "verif_sender", - tm_store.pool.clone(), + tm_store.backing_pool(), tm_funnel_tx.clone(), ); let verif_cfg = VerificationReporterCfg::new( @@ -152,6 +152,8 @@ fn main() { let aocs_to_funnel = tm_funnel_tx.clone(); let mut aocs_tm_store = tm_store.clone(); + let pus17_handler = PusService17Handler::new() + info!("Starting TMTC task"); let jh0 = thread::Builder::new() .name("TMTC".to_string()) @@ -184,8 +186,12 @@ fn main() { .name("Event".to_string()) .spawn(move || { let mut timestamp: [u8; 7] = [0; 7]; - let mut sender = - MpscTmtcInStoreSender::new(1, "event_sender", tm_store.pool, tm_funnel_tx); + let mut sender = MpscTmtcInStoreSender::new( + 1, + "event_sender", + tm_store.backing_pool(), + tm_funnel_tx, + ); let mut time_provider = TimeProvider::new_with_u16_days(0, 0); let mut report_completion = |event_req: EventRequestWithToken, timestamp: &[u8]| { reporter_event_handler @@ -307,10 +313,17 @@ fn main() { }) .unwrap(); + info!("Starting PUS handler thread"); + let jh4 = thread::Builder::new() + .name("AOCS".to_string()) + .spawn(move || { + + }); jh0.join().expect("Joining UDP TMTC server thread failed"); jh1.join().expect("Joining TM Funnel thread failed"); jh2.join().expect("Joining Event Manager thread failed"); jh3.join().expect("Joining AOCS thread failed"); + jh4.join().expect("Joining PUS handler thread failed"); } pub fn update_time(time_provider: &mut TimeProvider, timestamp: &mut [u8]) { diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 96389e4..8146c1e 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -1,6 +1,4 @@ -use crate::requests::{Request, RequestWithToken}; -use crate::tmtc::{MpscStoreAndSendError, PusTcSource, TmStore, PUS_APID}; -use log::{info, warn}; +use crate::tmtc::MpscStoreAndSendError; use satrs_core::events::EventU32; use satrs_core::hk::{CollectionIntervalFactor, HkRequest}; use satrs_core::mode::{ModeAndSubmode, ModeRequest}; @@ -21,7 +19,7 @@ use satrs_core::res_code::ResultU16; use satrs_core::seq_count::{SeqCountProviderSyncClonable, SequenceCountProviderCore}; use satrs_core::spacepackets::ecss::{scheduling, PusServiceId}; use satrs_core::spacepackets::time::CcsdsTimeProvider; -use satrs_core::tmtc::tm_helper::PusTmWithCdsShortHelper; +use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; use satrs_core::tmtc::{AddressableId, PusServiceProvider, TargetId}; use satrs_core::{ spacepackets::ecss::PusPacket, spacepackets::tc::PusTc, spacepackets::time::cds::TimeProvider, @@ -98,7 +96,7 @@ pub struct PusTmArgs { /// All telemetry is sent with this sender handle. pub tm_tx: Sender, /// All TM to be sent is stored here - pub tm_store: TmStore, + pub tm_store: SharedTmStore, /// All verification reporting is done with this reporter. pub verif_reporter: StdVerifReporterWithSender, /// Sequence count provider for TMs sent from within pus demultiplexer diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 4fcf8dc..ccc9d2a 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -1,12 +1,95 @@ use crate::pus::AcceptedTc; -use satrs_core::pus::verification::StdVerifReporterWithSender; -use std::sync::mpsc::Receiver; +use log::info; +use satrs_core::pool::{SharedPool, StoreAddr}; +use satrs_core::pus::verification::{ + StdVerifReporterWithSender, TcStateAccepted, VerificationToken, +}; +use satrs_core::seq_count::{SeqCountProviderSyncClonable, SequenceCountProviderCore}; +use satrs_core::spacepackets::ecss::PusPacket; +use satrs_core::spacepackets::tc::PusTc; +use satrs_core::spacepackets::time::cds::TimeProvider; +use satrs_core::spacepackets::time::TimeWriter; +use satrs_core::spacepackets::tm::PusTm; +use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; +use std::sync::mpsc::{Receiver, Sender, TryRecvError}; -struct PusService17Handler { - receiver: Receiver, +pub struct PusService17Handler { + tc_rx: Receiver, + tc_store: SharedPool, + tm_helper: PusTmWithCdsShortHelper, + tm_tx: Sender, + tm_store: SharedTmStore, verification_handler: StdVerifReporterWithSender, + stamp_buf: [u8; 7], + pus_buf: [u8; 2048], + handled_tcs: u32, } impl PusService17Handler { - pub fn periodic_operation(&mut self) {} + pub fn new(receiver: Receiver, tc_pool: SharedPool, tm_helper: PusTmWithCdsShortHelper, tm_tx: Sender, tm_store: SharedTmStore, verification_handler: StdVerifReporterWithSender) -> Self { + Self { + tc_rx: receiver, + tc_store: tc_pool, + tm_helper, + tm_tx, + tm_store, + verification_handler, + stamp_buf: [0; 7], + pus_buf: [0; 2048], + handled_tcs: 0 + } + } + // TODO: Return errors which occured + pub fn periodic_operation(&mut self) -> Result { + self.handled_tcs = 0; + loop { + match self.tc_rx.try_recv() { + Ok((addr, token)) => { + self.handle_one_tc(addr, token); + } + Err(e) => { + match e { + TryRecvError::Empty => return Ok(self.handled_tcs), + TryRecvError::Disconnected => { + // TODO: Replace panic by something cleaner + panic!("PusService17Handler: Sender disconnected"); + } + } + } + } + } + } + pub fn handle_one_tc(&mut self, addr: StoreAddr, token: VerificationToken) { + let time_provider = TimeProvider::from_now_with_u16_days().unwrap(); + // TODO: Better error handling + let (addr, token) = self.tc_rx.try_recv().unwrap(); + { + // Keep locked section as short as possible. + let mut tc_pool = self.tc_store.write().unwrap(); + let tc_guard = tc_pool.read_with_guard(addr); + let tc_raw = tc_guard.read().unwrap(); + self.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); + } + let tc = PusTc::from_bytes(&self.pus_buf).unwrap(); + // TODO: Robustness: Check that service is 17 + if tc.0.subservice() == 1 { + info!("Received PUS ping command TC[17,1]"); + info!("Sending ping reply PUS TM[17,2]"); + time_provider.write_to_bytes(&mut self.stamp_buf).unwrap(); + let start_token = self + .verification_handler + .start_success(token, Some(&self.stamp_buf)) + .expect("Error sending start success"); + // Sequence count will be handled centrally in TM funnel. + let ping_reply = self.tm_helper.create_pus_tm_with_stamp(17, 2, None, &time_provider, 0); + let addr = self.tm_store.add_pus_tm(&ping_reply); + self.tm_tx + .send(addr) + .expect("Sending TM to TM funnel failed"); + self.verification_handler + .completion_success(start_token, Some(&self.stamp_buf)) + .expect("Error sending completion success"); + self.handled_tcs += 1; + } + } } diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 0e05799..ded9090 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -23,6 +23,7 @@ use satrs_core::pus::verification::StdVerifReporterWithSender; use satrs_core::seq_count::SeqCountProviderSyncClonable; use satrs_core::spacepackets::ecss::SerializablePusPacket; use satrs_core::spacepackets::{ecss::PusPacket, tc::PusTc, tm::PusTm, SpHeader}; +use satrs_core::tmtc::tm_helper::SharedTmStore; use satrs_core::tmtc::{ CcsdsDistributor, CcsdsError, PusServiceProvider, ReceivesCcsdsTc, ReceivesEcssPusTc, }; @@ -39,7 +40,7 @@ pub struct OtherArgs { } pub struct TmArgs { - pub tm_store: TmStore, + pub tm_store: SharedTmStore, pub tm_sink_sender: Sender, pub tm_server_rx: Receiver, } @@ -96,27 +97,11 @@ impl From> for MpscStoreAndSendError { } } -#[derive(Clone)] -pub struct TmStore { - pub pool: SharedPool, -} - #[derive(Clone)] pub struct TcStore { pub pool: SharedPool, } -impl TmStore { - pub fn add_pus_tm(&mut self, pus_tm: &PusTm) -> StoreAddr { - let mut pg = self.pool.write().expect("error locking TM store"); - let (addr, buf) = pg.free_element(pus_tm.len_packed()).expect("Store error"); - pus_tm - .write_to_bytes(buf) - .expect("writing PUS TM to store failed"); - addr - } -} - impl TcStore { pub fn add_pus_tc(&mut self, pus_tc: &PusTc) -> Result { let mut pg = self.pool.write().expect("error locking TC store"); @@ -209,7 +194,7 @@ pub fn core_tmtc_task(args: OtherArgs, mut tc_args: TcArgs, tm_args: TmArgs) { let mut udp_tmtc_server = UdpTmtcServer { udp_tc_server, tm_rx: tm_args.tm_server_rx, - tm_store: tm_args.tm_store.pool.clone(), + tm_store: tm_args.tm_store.backing_pool(), }; let mut tc_buf: [u8; 4096] = [0; 4096]; -- 2.43.0 From 82b7717b496521a4e23e8a4337b323347bdc2480 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Tue, 4 Jul 2023 18:51:54 +0200 Subject: [PATCH 04/39] next helper class --- satrs-example/src/main.rs | 46 ++++++-- satrs-example/src/pus/mod.rs | 60 +++++++++-- satrs-example/src/pus/scheduler.rs | 166 +++++++++++++++++++++++++++++ satrs-example/src/pus/test.rs | 105 +++++++++++------- satrs-example/src/tmtc.rs | 19 ++-- 5 files changed, 327 insertions(+), 69 deletions(-) create mode 100644 satrs-example/src/pus/scheduler.rs diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 916db1b..bc569d2 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -9,6 +9,8 @@ use log::{info, warn}; use crate::hk::AcsHkIds; use crate::logging::setup_logger; +use crate::pus::test::PusService17TestHandler; +use crate::pus::PusTcMpscRouter; use crate::requests::{Request, RequestWithToken}; use crate::tmtc::{ core_tmtc_task, OtherArgs, PusTcSource, TcArgs, TcStore, TmArgs, TmFunnel, PUS_APID, @@ -35,16 +37,15 @@ use satrs_core::spacepackets::{ tm::{PusTm, PusTmSecondaryHeader}, SequenceFlags, SpHeader, }; -use satrs_core::tmtc::tm_helper::SharedTmStore; +use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; use satrs_core::tmtc::AddressableId; use satrs_example::{RequestTargetId, OBSW_SERVER_ADDR, SERVER_PORT}; use std::collections::HashMap; use std::net::{IpAddr, SocketAddr}; use std::sync::mpsc::{channel, TryRecvError}; -use std::sync::{Arc, RwLock}; +use std::sync::{mpsc, Arc, RwLock}; use std::thread; use std::time::Duration; -use crate::pus::test::PusService17Handler; fn main() { setup_logger().expect("setting up logging with fern failed"); @@ -126,14 +127,14 @@ fn main() { request_map.insert(RequestTargetId::AcsSubsystem as u32, acs_thread_tx); let tc_source = PusTcSource { - tc_store, + tc_store: tc_store.clone(), tc_source: tc_source_tx, }; // Create clones here to allow moving the values let core_args = OtherArgs { sock_addr, - verif_reporter, + verif_reporter: verif_reporter.clone(), event_sender, event_request_tx, request_map, @@ -152,13 +153,32 @@ fn main() { let aocs_to_funnel = tm_funnel_tx.clone(); let mut aocs_tm_store = tm_store.clone(); - let pus17_handler = PusService17Handler::new() + let (pus_test_tx, pus_test_rx) = channel(); + let (pus_event_tx, pus_event_rx) = channel(); + let (pus_sched_tx, pus_sched_rx) = channel(); + let (pus_hk_tx, pus_hk_rx) = channel(); + let (pus_action_tx, pus_action_rx) = channel(); + let pus_router = PusTcMpscRouter { + test_service_receiver: pus_test_tx, + event_service_receiver: pus_event_tx, + sched_service_receiver: pus_sched_tx, + hk_service_receiver: pus_hk_tx, + action_service_receiver: pus_action_tx, + }; + let mut pus17_handler = PusService17TestHandler::new( + pus_test_rx, + tc_store.pool.clone(), + PusTmWithCdsShortHelper::new(PUS_APID), + tm_funnel_tx.clone(), + tm_store.clone(), + verif_reporter.clone(), + ); info!("Starting TMTC task"); let jh0 = thread::Builder::new() .name("TMTC".to_string()) .spawn(move || { - core_tmtc_task(core_args, tc_args, tm_args); + core_tmtc_task(core_args, tc_args, tm_args, pus_router); }) .unwrap(); @@ -315,10 +335,16 @@ fn main() { info!("Starting PUS handler thread"); let jh4 = thread::Builder::new() - .name("AOCS".to_string()) + .name("PUS".to_string()) .spawn(move || { - - }); + loop { + // TODO: Better error handling + let res = pus17_handler.periodic_operation(); + res.expect("some PUS17 error"); + thread::sleep(Duration::from_millis(400)); + } + }) + .unwrap(); jh0.join().expect("Joining UDP TMTC server thread failed"); jh1.join().expect("Joining TM Funnel thread failed"); jh2.join().expect("Joining Event Manager thread failed"); diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 8146c1e..01300c2 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -1,10 +1,11 @@ +use crate::pus::test::PusService17TestHandler; use crate::tmtc::MpscStoreAndSendError; use satrs_core::events::EventU32; use satrs_core::hk::{CollectionIntervalFactor, HkRequest}; use satrs_core::mode::{ModeAndSubmode, ModeRequest}; use satrs_core::objects::ObjectId; use satrs_core::params::Params; -use satrs_core::pool::{PoolProvider, StoreAddr}; +use satrs_core::pool::{PoolProvider, SharedPool, StoreAddr}; use satrs_core::pus::event_man::{EventRequest, EventRequestWithToken}; use satrs_core::pus::hk; use satrs_core::pus::mode::Subservice; @@ -30,10 +31,46 @@ use std::cell::RefCell; use std::collections::HashMap; use std::convert::TryFrom; use std::rc::Rc; -use std::sync::mpsc::{Receiver, Sender}; +use std::sync::mpsc::{Receiver, SendError, Sender}; +pub mod scheduler; pub mod test; +pub struct PusServiceBase { + tc_rx: Receiver, + tc_store: SharedPool, + tm_helper: PusTmWithCdsShortHelper, + tm_tx: Sender, + tm_store: SharedTmStore, + verification_handler: StdVerifReporterWithSender, + stamp_buf: [u8; 7], + pus_buf: [u8; 2048], + handled_tcs: u32, +} + +impl PusServiceBase { + pub fn new( + receiver: Receiver, + tc_pool: SharedPool, + tm_helper: PusTmWithCdsShortHelper, + tm_tx: Sender, + tm_store: SharedTmStore, + verification_handler: StdVerifReporterWithSender, + ) -> Self { + Self { + tc_rx: receiver, + tc_store: tc_pool, + tm_helper, + tm_tx, + tm_store, + verification_handler, + stamp_buf: [0; 7], + pus_buf: [0; 2048], + handled_tcs: 0, + } + } +} + // pub trait PusTcRouter { // type Error; // fn route_pus_tc( @@ -218,12 +255,19 @@ impl PusReceiver { let service = PusServiceId::try_from(service); match service { Ok(standard_service) => match standard_service { - PusServiceId::Test => self - .tc_args - .pus_router - .test_service_receiver - .send((store_addr, accepted_token)) - .unwrap(), + PusServiceId::Test => { + let res = self + .tc_args + .pus_router + .test_service_receiver + .send((store_addr, accepted_token)); + match res { + Ok(_) => {} + Err(e) => { + println!("Error {e}") + } + } + } PusServiceId::Housekeeping => self .tc_args .pus_router diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs new file mode 100644 index 0000000..cf35b5a --- /dev/null +++ b/satrs-example/src/pus/scheduler.rs @@ -0,0 +1,166 @@ +use crate::pus::{AcceptedTc, PusServiceBase}; +use delegate::delegate; +use satrs_core::pool::{SharedPool, StoreAddr}; +use satrs_core::pus::scheduling::PusScheduler; +use satrs_core::pus::verification::{ + pus_11_generic_tc_check, FailParams, StdVerifReporterWithSender, TcStateAccepted, + VerificationToken, +}; +use satrs_core::pus::GenericTcCheckError; +use satrs_core::spacepackets::ecss::scheduling; +use satrs_core::spacepackets::tc::PusTc; +use satrs_core::spacepackets::time::cds::TimeProvider; +use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; +use satrs_example::tmtc_err; +use std::sync::mpsc::{Receiver, Sender}; + +pub struct PusService11SchedHandler { + psb: PusServiceBase, + scheduler: PusScheduler, +} + +impl PusService11SchedHandler { + pub fn new( + receiver: Receiver, + tc_pool: SharedPool, + tm_helper: PusTmWithCdsShortHelper, + tm_tx: Sender, + tm_store: SharedTmStore, + verification_handler: StdVerifReporterWithSender, + scheduler: PusScheduler, + ) -> Self { + Self { + psb: PusServiceBase::new( + receiver, + tc_pool, + tm_helper, + tm_tx, + tm_store, + verification_handler, + ), + scheduler, + } + } + // TODO: Return errors which occured + pub fn periodic_operation(&mut self) -> Result { + Ok(self.psb.handled_tcs) + } + + pub fn handle_one_tc(&mut self, addr: StoreAddr, token: VerificationToken) { + let time_provider = TimeProvider::from_now_with_u16_days().unwrap(); + // TODO: Better error handling + { + // Keep locked section as short as possible. + let mut tc_pool = self.psb.tc_store.write().unwrap(); + let tc_guard = tc_pool.read_with_guard(addr); + let tc_raw = tc_guard.read().unwrap(); + self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); + } + let (tc, tc_size) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); + let subservice = match pus_11_generic_tc_check(&tc) { + Ok(subservice) => subservice, + Err(e) => match e { + GenericTcCheckError::NotEnoughAppData => { + self.psb + .verification_handler + .start_failure( + token, + FailParams::new( + Some(&self.psb.stamp_buf), + &tmtc_err::NOT_ENOUGH_APP_DATA, + None, + ), + ) + .expect("could not sent verification error"); + return; + } + GenericTcCheckError::InvalidSubservice => { + self.psb + .verification_handler + .start_failure( + token, + FailParams::new( + Some(&self.psb.stamp_buf), + &tmtc_err::INVALID_PUS_SUBSERVICE, + None, + ), + ) + .expect("could not sent verification error"); + return; + } + }, + }; + match subservice { + scheduling::Subservice::TcEnableScheduling => { + let start_token = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .expect("Error sending start success"); + + self.scheduler.enable(); + if self.scheduler.is_enabled() { + self.psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .expect("Error sending completion success"); + } else { + panic!("Failed to enable scheduler"); + } + } + scheduling::Subservice::TcDisableScheduling => { + let start_token = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .expect("Error sending start success"); + + self.scheduler.disable(); + if !self.scheduler.is_enabled() { + self.psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .expect("Error sending completion success"); + } else { + panic!("Failed to disable scheduler"); + } + } + scheduling::Subservice::TcResetScheduling => { + let start_token = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .expect("Error sending start success"); + + let mut pool = self.psb.tc_store.write().expect("Locking pool failed"); + + self.scheduler + .reset(pool.as_mut()) + .expect("Error resetting TC Pool"); + + self.psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .expect("Error sending completion success"); + } + scheduling::Subservice::TcInsertActivity => { + let start_token = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .expect("error sending start success"); + + let mut pool = self.psb.tc_store.write().expect("locking pool failed"); + self.scheduler + .insert_wrapped_tc::(&tc, pool.as_mut()) + .expect("insertion of activity into pool failed"); + + self.psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .expect("sending completion success failed"); + } + _ => {} + } + } +} diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index ccc9d2a..8ed6bd6 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -1,5 +1,8 @@ -use crate::pus::AcceptedTc; +use crate::pus::{AcceptedTc, PusServiceBase}; +use delegate::delegate; use log::info; +use satrs_core::events::EventU32; +use satrs_core::params::Params; use satrs_core::pool::{SharedPool, StoreAddr}; use satrs_core::pus::verification::{ StdVerifReporterWithSender, TcStateAccepted, VerificationToken, @@ -11,45 +14,49 @@ use satrs_core::spacepackets::time::cds::TimeProvider; use satrs_core::spacepackets::time::TimeWriter; use satrs_core::spacepackets::tm::PusTm; use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; +use satrs_example::TEST_EVENT; use std::sync::mpsc::{Receiver, Sender, TryRecvError}; -pub struct PusService17Handler { - tc_rx: Receiver, - tc_store: SharedPool, - tm_helper: PusTmWithCdsShortHelper, - tm_tx: Sender, - tm_store: SharedTmStore, - verification_handler: StdVerifReporterWithSender, - stamp_buf: [u8; 7], - pus_buf: [u8; 2048], - handled_tcs: u32, +pub struct SatrsTestServiceCustomHandler { + pub event_sender: Sender<(EventU32, Option)>, } -impl PusService17Handler { - pub fn new(receiver: Receiver, tc_pool: SharedPool, tm_helper: PusTmWithCdsShortHelper, tm_tx: Sender, tm_store: SharedTmStore, verification_handler: StdVerifReporterWithSender) -> Self { +pub struct PusService17TestHandler { + psb: PusServiceBase, +} + +impl PusService17TestHandler { + pub fn new( + receiver: Receiver, + tc_pool: SharedPool, + tm_helper: PusTmWithCdsShortHelper, + tm_tx: Sender, + tm_store: SharedTmStore, + verification_handler: StdVerifReporterWithSender, + ) -> Self { Self { - tc_rx: receiver, - tc_store: tc_pool, - tm_helper, - tm_tx, - tm_store, - verification_handler, - stamp_buf: [0; 7], - pus_buf: [0; 2048], - handled_tcs: 0 + psb: PusServiceBase::new( + receiver, + tc_pool, + tm_helper, + tm_tx, + tm_store, + verification_handler, + ), } } + // TODO: Return errors which occured pub fn periodic_operation(&mut self) -> Result { - self.handled_tcs = 0; + self.psb.handled_tcs = 0; loop { - match self.tc_rx.try_recv() { + match self.psb.tc_rx.try_recv() { Ok((addr, token)) => { self.handle_one_tc(addr, token); } Err(e) => { match e { - TryRecvError::Empty => return Ok(self.handled_tcs), + TryRecvError::Empty => return Ok(self.psb.handled_tcs), TryRecvError::Disconnected => { // TODO: Replace panic by something cleaner panic!("PusService17Handler: Sender disconnected"); @@ -62,34 +69,56 @@ impl PusService17Handler { pub fn handle_one_tc(&mut self, addr: StoreAddr, token: VerificationToken) { let time_provider = TimeProvider::from_now_with_u16_days().unwrap(); // TODO: Better error handling - let (addr, token) = self.tc_rx.try_recv().unwrap(); { // Keep locked section as short as possible. - let mut tc_pool = self.tc_store.write().unwrap(); + let mut tc_pool = self.psb.tc_store.write().unwrap(); let tc_guard = tc_pool.read_with_guard(addr); let tc_raw = tc_guard.read().unwrap(); - self.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); + self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); } - let tc = PusTc::from_bytes(&self.pus_buf).unwrap(); + let (tc, tc_size) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); // TODO: Robustness: Check that service is 17 - if tc.0.subservice() == 1 { + if tc.subservice() == 1 { info!("Received PUS ping command TC[17,1]"); info!("Sending ping reply PUS TM[17,2]"); - time_provider.write_to_bytes(&mut self.stamp_buf).unwrap(); + time_provider + .write_to_bytes(&mut self.psb.stamp_buf) + .unwrap(); let start_token = self + .psb .verification_handler - .start_success(token, Some(&self.stamp_buf)) + .start_success(token, Some(&self.psb.stamp_buf)) .expect("Error sending start success"); // Sequence count will be handled centrally in TM funnel. - let ping_reply = self.tm_helper.create_pus_tm_with_stamp(17, 2, None, &time_provider, 0); - let addr = self.tm_store.add_pus_tm(&ping_reply); - self.tm_tx + let ping_reply = + self.psb + .tm_helper + .create_pus_tm_with_stamp(17, 2, None, &time_provider, 0); + let addr = self.psb.tm_store.add_pus_tm(&ping_reply); + self.psb + .tm_tx .send(addr) .expect("Sending TM to TM funnel failed"); - self.verification_handler - .completion_success(start_token, Some(&self.stamp_buf)) + self.psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) .expect("Error sending completion success"); - self.handled_tcs += 1; + self.psb.handled_tcs += 1; } + // TODO: How to handle invalid subservice? + // TODO: How do we handle custom code like this? Custom subservice handler via trait? + // if tc.subservice() == 128 { + // info!("Generating test event"); + // self.event_sender + // .send((TEST_EVENT.into(), None)) + // .expect("Sending test event failed"); + // let start_token = + // verification_handler + // .start_success(token, Some(&stamp_buf)) + // .expect("Error sending start success"); + // verification_handler + // .completion_success(start_token, Some(&stamp_buf)) + // .expect("Error sending completion success"); + // } } diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index ded9090..320cf5a 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -152,7 +152,12 @@ impl ReceivesCcsdsTc for PusTcSource { } } -pub fn core_tmtc_task(args: OtherArgs, mut tc_args: TcArgs, tm_args: TmArgs) { +pub fn core_tmtc_task( + args: OtherArgs, + mut tc_args: TcArgs, + tm_args: TmArgs, + pus_router: PusTcMpscRouter, +) { let scheduler = Rc::new(RefCell::new( PusScheduler::new_with_current_init_time(Duration::from_secs(5)).unwrap(), )); @@ -164,18 +169,6 @@ pub fn core_tmtc_task(args: OtherArgs, mut tc_args: TcArgs, tm_args: TmArgs) { verif_reporter: args.verif_reporter, seq_count_provider: args.seq_count_provider.clone(), }; - let (pus_test_tx, pus_tedt_rx) = mpsc::channel(); - let (pus_event_tx, pus_event_rx) = mpsc::channel(); - let (pus_sched_tx, pus_sched_rx) = mpsc::channel(); - let (pus_hk_tx, pus_hk_rx) = mpsc::channel(); - let (pus_action_tx, pus_action_rx) = mpsc::channel(); - let pus_router = PusTcMpscRouter { - test_service_receiver: pus_test_tx, - event_service_receiver: pus_event_tx, - sched_service_receiver: pus_sched_tx, - hk_service_receiver: pus_hk_tx, - action_service_receiver: pus_action_tx, - }; let pus_tc_args = PusTcArgs { pus_router, event_sender: args.event_sender, -- 2.43.0 From 6723fd9e5cc450b9fac79b0a690f1ad39e181269 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Tue, 4 Jul 2023 20:01:06 +0200 Subject: [PATCH 05/39] compleex --- satrs-example/src/main.rs | 35 ++++++++++++++++++++++++---- satrs-example/src/pus/mod.rs | 4 ++-- satrs-example/src/pus/test.rs | 44 ++++++++++++++++++++++++++++++++++- 3 files changed, 76 insertions(+), 7 deletions(-) diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index bc569d2..bd69787 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -9,7 +9,7 @@ use log::{info, warn}; use crate::hk::AcsHkIds; use crate::logging::setup_logger; -use crate::pus::test::PusService17TestHandler; +use crate::pus::test::{PacketHandlerResult, PusService17TestHandler}; use crate::pus::PusTcMpscRouter; use crate::requests::{Request, RequestWithToken}; use crate::tmtc::{ @@ -31,6 +31,7 @@ use satrs_core::pus::verification::{ }; use satrs_core::pus::MpscTmtcInStoreSender; use satrs_core::seq_count::{SeqCountProviderSimple, SeqCountProviderSyncClonable}; +use satrs_core::spacepackets::tc::{GenericPusTcSecondaryHeader, PusTc}; use satrs_core::spacepackets::{ time::cds::TimeProvider, time::TimeWriter, @@ -39,7 +40,7 @@ use satrs_core::spacepackets::{ }; use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; use satrs_core::tmtc::AddressableId; -use satrs_example::{RequestTargetId, OBSW_SERVER_ADDR, SERVER_PORT}; +use satrs_example::{RequestTargetId, OBSW_SERVER_ADDR, SERVER_PORT, TEST_EVENT}; use std::collections::HashMap; use std::net::{IpAddr, SocketAddr}; use std::sync::mpsc::{channel, TryRecvError}; @@ -338,10 +339,36 @@ fn main() { .name("PUS".to_string()) .spawn(move || { loop { + let mut handled_pings = 0; // TODO: Better error handling - let res = pus17_handler.periodic_operation(); + let res = pus17_handler.handle_next_packet().unwrap(); + match res { + PacketHandlerResult::PingRequestHandled => { + handled_pings += 1; + } + PacketHandlerResult::CustomSubservice => { + let (buf, _) = pus17_handler.pus_tc_buf(); + let (tc, size) = PusTc::from_bytes(&buf).unwrap(); + if tc.subservice() == 128 { + info!("Generating test event"); + event_sender + .send((TEST_EVENT.into(), None)) + .expect("Sending test event failed"); + let start_token = pus17_handler + .verification_handler() + .start_success(token, Some(&stamp_buf)) + .expect("Error sending start success"); + pus17_handler + .verification_handler() + .completion_success(start_token, Some(&stamp_buf)) + .expect("Error sending completion success"); + } + } + PacketHandlerResult::Empty => { + thread::sleep(Duration::from_millis(400)); + } + } res.expect("some PUS17 error"); - thread::sleep(Duration::from_millis(400)); } }) .unwrap(); diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 01300c2..5c38663 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -45,7 +45,7 @@ pub struct PusServiceBase { verification_handler: StdVerifReporterWithSender, stamp_buf: [u8; 7], pus_buf: [u8; 2048], - handled_tcs: u32, + pus_size: usize, } impl PusServiceBase { @@ -66,7 +66,7 @@ impl PusServiceBase { verification_handler, stamp_buf: [0; 7], pus_buf: [0; 2048], - handled_tcs: 0, + pus_size: 0, } } } diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 8ed6bd6..c3130f1 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -25,6 +25,12 @@ pub struct PusService17TestHandler { psb: PusServiceBase, } +pub enum PacketHandlerResult { + PingRequestHandled, + CustomSubservice(VerificationToken), + Empty, +} + impl PusService17TestHandler { pub fn new( receiver: Receiver, @@ -46,6 +52,14 @@ impl PusService17TestHandler { } } + pub fn verification_handler(&mut self) -> &mut StdVerifReporterWithSender { + &mut self.psb.verification_handler + } + + pub fn pus_tc_buf(&self) -> (&[u8], usize) { + (&self.psb.pus_buf, self.psb.pus_size) + } + // TODO: Return errors which occured pub fn periodic_operation(&mut self) -> Result { self.psb.handled_tcs = 0; @@ -66,7 +80,33 @@ impl PusService17TestHandler { } } } - pub fn handle_one_tc(&mut self, addr: StoreAddr, token: VerificationToken) { + + pub fn handle_next_packet(&mut self) -> Result { + match self.psb.tc_rx.try_recv() { + Ok((addr, token)) => { + if self.handle_one_tc(addr, token) { + return Ok(PacketHandlerResult::PingRequestHandled); + } else { + return Ok(PacketHandlerResult::CustomSubservice); + } + } + Err(e) => { + match e { + TryRecvError::Empty => return Ok(PacketHandlerResult::Empty), + TryRecvError::Disconnected => { + // TODO: Replace panic by something cleaner + panic!("PusService17Handler: Sender disconnected"); + } + } + } + } + } + + pub fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> bool { let time_provider = TimeProvider::from_now_with_u16_days().unwrap(); // TODO: Better error handling { @@ -104,7 +144,9 @@ impl PusService17TestHandler { .completion_success(start_token, Some(&self.psb.stamp_buf)) .expect("Error sending completion success"); self.psb.handled_tcs += 1; + true } + false // TODO: How to handle invalid subservice? // TODO: How do we handle custom code like this? Custom subservice handler via trait? // if tc.subservice() == 128 { -- 2.43.0 From 17c5b3d93ede26c1846bf518a17ecab73a35a489 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Tue, 4 Jul 2023 21:13:26 +0200 Subject: [PATCH 06/39] continue --- satrs-example/src/main.rs | 43 ++---- satrs-example/src/pus/mod.rs | 36 +++++- satrs-example/src/pus/scheduler.rs | 27 +++- satrs-example/src/pus/test.rs | 201 ++++++++++++++++------------- 4 files changed, 178 insertions(+), 129 deletions(-) diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index bd69787..9f5eea2 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -9,7 +9,7 @@ use log::{info, warn}; use crate::hk::AcsHkIds; use crate::logging::setup_logger; -use crate::pus::test::{PacketHandlerResult, PusService17TestHandler}; +use crate::pus::test::{PusService17TestHandler, Service17CustomWrapper}; use crate::pus::PusTcMpscRouter; use crate::requests::{Request, RequestWithToken}; use crate::tmtc::{ @@ -109,6 +109,7 @@ fn main() { // The event manager will receive the RX handle to receive all the events. let (event_sender, event_man_rx) = channel(); let event_recv = MpscEventReceiver::::new(event_man_rx); + let test_srv_event_sender = event_sender.clone(); let mut event_man = EventManagerWithMpscQueue::new(Box::new(event_recv)); // All events sent to the manager are routed to the PUS event manager, which generates PUS event @@ -174,6 +175,10 @@ fn main() { tm_store.clone(), verif_reporter.clone(), ); + let mut srv_17_wrapper = Service17CustomWrapper { + pus17_handler, + test_srv_event_sender, + }; info!("Starting TMTC task"); let jh0 = thread::Builder::new() @@ -337,38 +342,10 @@ fn main() { info!("Starting PUS handler thread"); let jh4 = thread::Builder::new() .name("PUS".to_string()) - .spawn(move || { - loop { - let mut handled_pings = 0; - // TODO: Better error handling - let res = pus17_handler.handle_next_packet().unwrap(); - match res { - PacketHandlerResult::PingRequestHandled => { - handled_pings += 1; - } - PacketHandlerResult::CustomSubservice => { - let (buf, _) = pus17_handler.pus_tc_buf(); - let (tc, size) = PusTc::from_bytes(&buf).unwrap(); - if tc.subservice() == 128 { - info!("Generating test event"); - event_sender - .send((TEST_EVENT.into(), None)) - .expect("Sending test event failed"); - let start_token = pus17_handler - .verification_handler() - .start_success(token, Some(&stamp_buf)) - .expect("Error sending start success"); - pus17_handler - .verification_handler() - .completion_success(start_token, Some(&stamp_buf)) - .expect("Error sending completion success"); - } - } - PacketHandlerResult::Empty => { - thread::sleep(Duration::from_millis(400)); - } - } - res.expect("some PUS17 error"); + .spawn(move || loop { + let queue_empty = srv_17_wrapper.perform_operation(); + if queue_empty { + thread::sleep(Duration::from_millis(400)); } }) .unwrap(); diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 5c38663..62dd423 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -5,7 +5,7 @@ use satrs_core::hk::{CollectionIntervalFactor, HkRequest}; use satrs_core::mode::{ModeAndSubmode, ModeRequest}; use satrs_core::objects::ObjectId; use satrs_core::params::Params; -use satrs_core::pool::{PoolProvider, SharedPool, StoreAddr}; +use satrs_core::pool::{PoolProvider, SharedPool, StoreAddr, StoreError}; use satrs_core::pus::event_man::{EventRequest, EventRequestWithToken}; use satrs_core::pus::hk; use satrs_core::pus::mode::Subservice; @@ -18,8 +18,8 @@ use satrs_core::pus::{event, EcssTcSenderCore, GenericTcCheckError, MpscTmtcInSt use satrs_core::pus::{mode, EcssTcSender}; use satrs_core::res_code::ResultU16; use satrs_core::seq_count::{SeqCountProviderSyncClonable, SequenceCountProviderCore}; -use satrs_core::spacepackets::ecss::{scheduling, PusServiceId}; -use satrs_core::spacepackets::time::CcsdsTimeProvider; +use satrs_core::spacepackets::ecss::{scheduling, PusError, PusServiceId}; +use satrs_core::spacepackets::time::{CcsdsTimeProvider, StdTimestampError}; use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; use satrs_core::tmtc::{AddressableId, PusServiceProvider, TargetId}; use satrs_core::{ @@ -36,6 +36,36 @@ use std::sync::mpsc::{Receiver, SendError, Sender}; pub mod scheduler; pub mod test; +#[derive(Debug, Clone)] +pub enum PusPacketHandlingError { + PusError(PusError), + WrongService(u8), + StoreError(StoreError), + RwGuardError(String), + TimeError(StdTimestampError), + TmSendError(String), + QueueDisconnected, + OtherError(String), +} + +impl From for PusPacketHandlingError { + fn from(value: PusError) -> Self { + Self::PusError(value) + } +} + +impl From for PusPacketHandlingError { + fn from(value: StdTimestampError) -> Self { + Self::TimeError(value) + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum PusPacketHandlerResult { + RequestHandled, + CustomSubservice(VerificationToken), + Empty, +} pub struct PusServiceBase { tc_rx: Receiver, tc_store: SharedPool, diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index cf35b5a..5b3b418 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -1,4 +1,4 @@ -use crate::pus::{AcceptedTc, PusServiceBase}; +use crate::pus::{AcceptedTc, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase}; use delegate::delegate; use satrs_core::pool::{SharedPool, StoreAddr}; use satrs_core::pus::scheduling::PusScheduler; @@ -12,7 +12,7 @@ use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::time::cds::TimeProvider; use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; use satrs_example::tmtc_err; -use std::sync::mpsc::{Receiver, Sender}; +use std::sync::mpsc::{Receiver, Sender, TryRecvError}; pub struct PusService11SchedHandler { psb: PusServiceBase, @@ -41,12 +41,27 @@ impl PusService11SchedHandler { scheduler, } } - // TODO: Return errors which occured - pub fn periodic_operation(&mut self) -> Result { - Ok(self.psb.handled_tcs) + + pub fn handle_next_packet(&mut self) -> Result { + return match self.psb.tc_rx.try_recv() { + Ok((addr, token)) => { + if self.handle_one_tc(addr, token)? { + return Ok(PusPacketHandlerResult::RequestHandled); + } + Ok(PusPacketHandlerResult::CustomSubservice(token)) + } + Err(e) => match e { + TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), + TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), + }, + }; } - pub fn handle_one_tc(&mut self, addr: StoreAddr, token: VerificationToken) { + pub fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> Result { let time_provider = TimeProvider::from_now_with_u16_days().unwrap(); // TODO: Better error handling { diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index c3130f1..2ec1b73 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -1,36 +1,91 @@ -use crate::pus::{AcceptedTc, PusServiceBase}; +use crate::pus::{AcceptedTc, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase}; use delegate::delegate; -use log::info; +use log::{error, info, warn}; use satrs_core::events::EventU32; use satrs_core::params::Params; -use satrs_core::pool::{SharedPool, StoreAddr}; +use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; use satrs_core::pus::verification::{ - StdVerifReporterWithSender, TcStateAccepted, VerificationToken, + FailParams, StdVerifReporterWithSender, TcStateAccepted, TcStateStarted, + VerificationOrSendErrorWithToken, VerificationToken, }; use satrs_core::seq_count::{SeqCountProviderSyncClonable, SequenceCountProviderCore}; -use satrs_core::spacepackets::ecss::PusPacket; +use satrs_core::spacepackets::ecss::{PusError, PusPacket}; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::time::cds::TimeProvider; -use satrs_core::spacepackets::time::TimeWriter; +use satrs_core::spacepackets::time::{StdTimestampError, TimeWriter}; use satrs_core::spacepackets::tm::PusTm; use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; -use satrs_example::TEST_EVENT; +use satrs_example::{tmtc_err, TEST_EVENT}; use std::sync::mpsc::{Receiver, Sender, TryRecvError}; +use std::thread; +use std::time::Duration; -pub struct SatrsTestServiceCustomHandler { - pub event_sender: Sender<(EventU32, Option)>, +pub struct Service17CustomWrapper { + pub pus17_handler: PusService17TestHandler, + pub test_srv_event_sender: Sender<(EventU32, Option)>, +} + +impl Service17CustomWrapper { + pub fn perform_operation(&mut self) -> bool { + let mut handled_pings = 0; + let res = self.pus17_handler.handle_next_packet(); + if res.is_err() { + warn!("PUS17 handler failed with error {:?}", res.unwrap_err()); + return true; + } + match res.unwrap() { + PusPacketHandlerResult::RequestHandled => { + info!("Received PUS ping command TC[17,1]"); + info!("Sent ping reply PUS TM[17,2]"); + handled_pings += 1; + } + PusPacketHandlerResult::CustomSubservice(token) => { + let (buf, _) = self.pus17_handler.pus_tc_buf(); + let (tc, size) = PusTc::from_bytes(buf).unwrap(); + let time_stamper = TimeProvider::from_now_with_u16_days().unwrap(); + let mut stamp_buf: [u8; 7] = [0; 7]; + time_stamper.write_to_bytes(&mut stamp_buf).unwrap(); + if tc.subservice() == 128 { + info!("Generating test event"); + self.test_srv_event_sender + .send((TEST_EVENT.into(), None)) + .expect("Sending test event failed"); + let start_token = self + .pus17_handler + .verification_handler() + .start_success(token, Some(&stamp_buf)) + .expect("Error sending start success"); + self.pus17_handler + .verification_handler() + .completion_success(start_token, Some(&stamp_buf)) + .expect("Error sending completion success"); + } else { + let fail_data = [tc.subservice()]; + self.pus17_handler + .verification_handler() + .start_failure( + token, + FailParams::new( + Some(&stamp_buf), + &tmtc_err::INVALID_PUS_SUBSERVICE, + Some(&fail_data), + ), + ) + .expect("Sending start failure verification failed"); + } + } + PusPacketHandlerResult::Empty => { + return false; + } + } + true + } } pub struct PusService17TestHandler { psb: PusServiceBase, } -pub enum PacketHandlerResult { - PingRequestHandled, - CustomSubservice(VerificationToken), - Empty, -} - impl PusService17TestHandler { pub fn new( receiver: Receiver, @@ -60,75 +115,57 @@ impl PusService17TestHandler { (&self.psb.pus_buf, self.psb.pus_size) } - // TODO: Return errors which occured - pub fn periodic_operation(&mut self) -> Result { - self.psb.handled_tcs = 0; - loop { - match self.psb.tc_rx.try_recv() { - Ok((addr, token)) => { - self.handle_one_tc(addr, token); - } - Err(e) => { - match e { - TryRecvError::Empty => return Ok(self.psb.handled_tcs), - TryRecvError::Disconnected => { - // TODO: Replace panic by something cleaner - panic!("PusService17Handler: Sender disconnected"); - } - } - } - } - } - } - - pub fn handle_next_packet(&mut self) -> Result { - match self.psb.tc_rx.try_recv() { + pub fn handle_next_packet(&mut self) -> Result { + return match self.psb.tc_rx.try_recv() { Ok((addr, token)) => { - if self.handle_one_tc(addr, token) { - return Ok(PacketHandlerResult::PingRequestHandled); - } else { - return Ok(PacketHandlerResult::CustomSubservice); + if self.handle_one_tc(addr, token)? { + return Ok(PusPacketHandlerResult::RequestHandled); } + Ok(PusPacketHandlerResult::CustomSubservice(token)) } - Err(e) => { - match e { - TryRecvError::Empty => return Ok(PacketHandlerResult::Empty), - TryRecvError::Disconnected => { - // TODO: Replace panic by something cleaner - panic!("PusService17Handler: Sender disconnected"); - } - } - } - } + Err(e) => match e { + TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), + TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), + }, + }; } pub fn handle_one_tc( &mut self, addr: StoreAddr, token: VerificationToken, - ) -> bool { - let time_provider = TimeProvider::from_now_with_u16_days().unwrap(); - // TODO: Better error handling + ) -> Result { { // Keep locked section as short as possible. - let mut tc_pool = self.psb.tc_store.write().unwrap(); + let mut tc_pool = self + .psb + .tc_store + .write() + .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; let tc_guard = tc_pool.read_with_guard(addr); - let tc_raw = tc_guard.read().unwrap(); + let tc_raw = tc_guard.read().expect("Reading pool guard failed"); self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); } - let (tc, tc_size) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); - // TODO: Robustness: Check that service is 17 + let (tc, tc_size) = PusTc::from_bytes(&self.psb.pus_buf)?; + if tc.service() != 17 { + return Err(PusPacketHandlingError::WrongService(tc.service())); + } if tc.subservice() == 1 { - info!("Received PUS ping command TC[17,1]"); - info!("Sending ping reply PUS TM[17,2]"); + let time_provider = TimeProvider::from_now_with_u16_days()?; + // Can not fail, buffer is large enough. time_provider .write_to_bytes(&mut self.psb.stamp_buf) .unwrap(); - let start_token = self + let result = self .psb .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) - .expect("Error sending start success"); + .start_success(token, Some(&self.psb.stamp_buf)); + let start_token = if result.is_err() { + error!("Could not send start success verification"); + None + } else { + Some(result.unwrap()) + }; // Sequence count will be handled centrally in TM funnel. let ping_reply = self.psb @@ -138,29 +175,19 @@ impl PusService17TestHandler { self.psb .tm_tx .send(addr) - .expect("Sending TM to TM funnel failed"); - self.psb - .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) - .expect("Error sending completion success"); - self.psb.handled_tcs += 1; - true + .map_err(|e| PusPacketHandlingError::TmSendError(format!("{e}")))?; + if let Some(start_token) = start_token { + if self + .psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .is_err() + { + error!("Could not send completion success verification"); + } + } + return Ok(true); } - false - // TODO: How to handle invalid subservice? - // TODO: How do we handle custom code like this? Custom subservice handler via trait? - // if tc.subservice() == 128 { - // info!("Generating test event"); - // self.event_sender - // .send((TEST_EVENT.into(), None)) - // .expect("Sending test event failed"); - // let start_token = - // verification_handler - // .start_success(token, Some(&stamp_buf)) - // .expect("Error sending start success"); - // verification_handler - // .completion_success(start_token, Some(&stamp_buf)) - // .expect("Error sending completion success"); - // + Ok(false) } } -- 2.43.0 From 2ba93b9942df02268f5748143f8006059936c1b6 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Tue, 4 Jul 2023 22:26:41 +0200 Subject: [PATCH 07/39] some things in rust are really hard --- satrs-core/src/tmtc/tm_helper.rs | 13 +++++- satrs-example/src/main.rs | 4 +- satrs-example/src/pus/mod.rs | 73 +++++++++++++++++++++++++---- satrs-example/src/pus/scheduler.rs | 75 ++++++++++-------------------- satrs-example/src/pus/test.rs | 69 ++++++++++++++++----------- 5 files changed, 143 insertions(+), 91 deletions(-) diff --git a/satrs-core/src/tmtc/tm_helper.rs b/satrs-core/src/tmtc/tm_helper.rs index 6bc0edd..07e8c18 100644 --- a/satrs-core/src/tmtc/tm_helper.rs +++ b/satrs-core/src/tmtc/tm_helper.rs @@ -64,7 +64,7 @@ impl PusTmWithCdsShortHelper { self.create_pus_tm_common(service, subservice, source_data, seq_count) } - pub fn create_pus_tm_with_stamp<'a>( + pub fn create_pus_tm_with_stamper<'a>( &'a mut self, service: u8, subservice: u8, @@ -76,6 +76,17 @@ impl PusTmWithCdsShortHelper { self.create_pus_tm_common(service, subservice, source_data, seq_count) } + pub fn create_pus_tm_with_stamp<'a>( + &'a mut self, + service: u8, + subservice: u8, + source_data: Option<&'a [u8]>, + timestamp: &'a [u8], + seq_count: u16, + ) -> PusTm { + self.create_pus_tm_common(service, subservice, source_data, seq_count) + } + fn create_pus_tm_common<'a>( &'a self, service: u8, diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 9f5eea2..04de260 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -170,9 +170,9 @@ fn main() { let mut pus17_handler = PusService17TestHandler::new( pus_test_rx, tc_store.pool.clone(), - PusTmWithCdsShortHelper::new(PUS_APID), tm_funnel_tx.clone(), tm_store.clone(), + PUS_APID, verif_reporter.clone(), ); let mut srv_17_wrapper = Service17CustomWrapper { @@ -345,7 +345,7 @@ fn main() { .spawn(move || loop { let queue_empty = srv_17_wrapper.perform_operation(); if queue_empty { - thread::sleep(Duration::from_millis(400)); + thread::sleep(Duration::from_millis(200)); } }) .unwrap(); diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 62dd423..078ccf1 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -11,7 +11,7 @@ use satrs_core::pus::hk; use satrs_core::pus::mode::Subservice; use satrs_core::pus::scheduling::PusScheduler; use satrs_core::pus::verification::{ - pus_11_generic_tc_check, FailParams, StdVerifReporterWithSender, TcStateAccepted, + pus_11_generic_tc_check, FailParams, StdVerifReporterWithSender, TcStateAccepted, TcStateToken, VerificationToken, }; use satrs_core::pus::{event, EcssTcSenderCore, GenericTcCheckError, MpscTmtcInStoreSender}; @@ -19,7 +19,7 @@ use satrs_core::pus::{mode, EcssTcSender}; use satrs_core::res_code::ResultU16; use satrs_core::seq_count::{SeqCountProviderSyncClonable, SequenceCountProviderCore}; use satrs_core::spacepackets::ecss::{scheduling, PusError, PusServiceId}; -use satrs_core::spacepackets::time::{CcsdsTimeProvider, StdTimestampError}; +use satrs_core::spacepackets::time::{CcsdsTimeProvider, StdTimestampError, TimestampError}; use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; use satrs_core::tmtc::{AddressableId, PusServiceProvider, TargetId}; use satrs_core::{ @@ -31,7 +31,7 @@ use std::cell::RefCell; use std::collections::HashMap; use std::convert::TryFrom; use std::rc::Rc; -use std::sync::mpsc::{Receiver, SendError, Sender}; +use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError}; pub mod scheduler; pub mod test; @@ -40,10 +40,9 @@ pub mod test; pub enum PusPacketHandlingError { PusError(PusError), WrongService(u8), + NotEnoughAppData(String), StoreError(StoreError), RwGuardError(String), - TimeError(StdTimestampError), - TmSendError(String), QueueDisconnected, OtherError(String), } @@ -54,24 +53,44 @@ impl From for PusPacketHandlingError { } } -impl From for PusPacketHandlingError { +impl From for PusPacketHandlingError { + fn from(value: StoreError) -> Self { + Self::StoreError(value) + } +} + +#[derive(Debug, Clone)] +pub enum PartialPusHandlingError { + TimeError(StdTimestampError), + TmSendError(String), + VerificationError, +} +impl From for PartialPusHandlingError { fn from(value: StdTimestampError) -> Self { Self::TimeError(value) } } -#[derive(Debug, Copy, Clone, PartialEq, Eq)] +impl From for PartialPusHandlingError { + fn from(value: TimestampError) -> Self { + Self::TimeError(StdTimestampError::TimestampError(value)) + } +} + +#[derive(Debug, Clone)] pub enum PusPacketHandlerResult { RequestHandled, + RequestHandledPartialSuccess(PartialPusHandlingError), CustomSubservice(VerificationToken), Empty, } + pub struct PusServiceBase { tc_rx: Receiver, tc_store: SharedPool, - tm_helper: PusTmWithCdsShortHelper, tm_tx: Sender, tm_store: SharedTmStore, + tm_apid: u16, verification_handler: StdVerifReporterWithSender, stamp_buf: [u8; 7], pus_buf: [u8; 2048], @@ -82,15 +101,15 @@ impl PusServiceBase { pub fn new( receiver: Receiver, tc_pool: SharedPool, - tm_helper: PusTmWithCdsShortHelper, tm_tx: Sender, tm_store: SharedTmStore, + tm_apid: u16, verification_handler: StdVerifReporterWithSender, ) -> Self { Self { tc_rx: receiver, tc_store: tc_pool, - tm_helper, + tm_apid, tm_tx, tm_store, verification_handler, @@ -99,6 +118,40 @@ impl PusServiceBase { pus_size: 0, } } + + pub fn handle_next_packet< + T: FnOnce( + StoreAddr, + VerificationToken, + ) -> Result, + >( + &mut self, + handle_one_packet: T, + ) -> Result { + return match self.tc_rx.try_recv() { + Ok((addr, token)) => handle_one_packet(addr, token), + Err(e) => match e { + TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), + TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), + }, + }; + } + + pub fn update_stamp(&mut self) -> Result<(), PartialPusHandlingError> { + let time_provider = TimeProvider::from_now_with_u16_days() + .map_err(|e| PartialPusHandlingError::TimeError(e)); + return if time_provider.is_ok() { + // Can not fail, buffer is large enough. + time_provider + .unwrap() + .write_to_bytes(&mut self.stamp_buf) + .unwrap(); + Ok(()) + } else { + self.stamp_buf = [0; 7]; + Err(time_provider.unwrap_err()) + }; + } } // pub trait PusTcRouter { diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index 5b3b418..915e6e3 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -1,4 +1,7 @@ -use crate::pus::{AcceptedTc, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase}; +use crate::pus::{ + AcceptedTc, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, + PusServiceBase, +}; use delegate::delegate; use satrs_core::pool::{SharedPool, StoreAddr}; use satrs_core::pus::scheduling::PusScheduler; @@ -7,9 +10,10 @@ use satrs_core::pus::verification::{ VerificationToken, }; use satrs_core::pus::GenericTcCheckError; -use satrs_core::spacepackets::ecss::scheduling; +use satrs_core::spacepackets::ecss::{scheduling, PusPacket}; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::time::cds::TimeProvider; +use satrs_core::spacepackets::time::TimeWriter; use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; use satrs_example::tmtc_err; use std::sync::mpsc::{Receiver, Sender, TryRecvError}; @@ -23,9 +27,9 @@ impl PusService11SchedHandler { pub fn new( receiver: Receiver, tc_pool: SharedPool, - tm_helper: PusTmWithCdsShortHelper, tm_tx: Sender, tm_store: SharedTmStore, + tm_apid: u16, verification_handler: StdVerifReporterWithSender, scheduler: PusScheduler, ) -> Self { @@ -33,9 +37,9 @@ impl PusService11SchedHandler { psb: PusServiceBase::new( receiver, tc_pool, - tm_helper, tm_tx, tm_store, + tm_apid, verification_handler, ), scheduler, @@ -44,68 +48,36 @@ impl PusService11SchedHandler { pub fn handle_next_packet(&mut self) -> Result { return match self.psb.tc_rx.try_recv() { - Ok((addr, token)) => { - if self.handle_one_tc(addr, token)? { - return Ok(PusPacketHandlerResult::RequestHandled); - } - Ok(PusPacketHandlerResult::CustomSubservice(token)) - } + Ok((addr, token)) => self.handle_one_tc(addr, token), Err(e) => match e { TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), }, }; } - pub fn handle_one_tc( &mut self, addr: StoreAddr, token: VerificationToken, - ) -> Result { - let time_provider = TimeProvider::from_now_with_u16_days().unwrap(); - // TODO: Better error handling + ) -> Result { + let mut partial_result = self.psb.update_stamp().err(); { // Keep locked section as short as possible. - let mut tc_pool = self.psb.tc_store.write().unwrap(); + let mut tc_pool = self + .psb + .tc_store + .write() + .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; let tc_guard = tc_pool.read_with_guard(addr); let tc_raw = tc_guard.read().unwrap(); self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); } let (tc, tc_size) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); - let subservice = match pus_11_generic_tc_check(&tc) { - Ok(subservice) => subservice, - Err(e) => match e { - GenericTcCheckError::NotEnoughAppData => { - self.psb - .verification_handler - .start_failure( - token, - FailParams::new( - Some(&self.psb.stamp_buf), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ), - ) - .expect("could not sent verification error"); - return; - } - GenericTcCheckError::InvalidSubservice => { - self.psb - .verification_handler - .start_failure( - token, - FailParams::new( - Some(&self.psb.stamp_buf), - &tmtc_err::INVALID_PUS_SUBSERVICE, - None, - ), - ) - .expect("could not sent verification error"); - return; - } - }, - }; - match subservice { + let std_service = scheduling::Subservice::try_from(tc.subservice()); + if std_service.is_err() { + return Ok(PusPacketHandlerResult::CustomSubservice(token)); + } + match std_service.unwrap() { scheduling::Subservice::TcEnableScheduling => { let start_token = self .psb @@ -175,7 +147,10 @@ impl PusService11SchedHandler { .completion_success(start_token, Some(&self.psb.stamp_buf)) .expect("sending completion success failed"); } - _ => {} + _ => { + return Ok(PusPacketHandlerResult::CustomSubservice(token)); + } } + Ok(PusPacketHandlerResult::CustomSubservice(token)) } } diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 2ec1b73..93b75fe 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -1,4 +1,7 @@ -use crate::pus::{AcceptedTc, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase}; +use crate::pus::{ + AcceptedTc, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, + PusServiceBase, +}; use delegate::delegate; use log::{error, info, warn}; use satrs_core::events::EventU32; @@ -13,7 +16,8 @@ use satrs_core::spacepackets::ecss::{PusError, PusPacket}; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::time::cds::TimeProvider; use satrs_core::spacepackets::time::{StdTimestampError, TimeWriter}; -use satrs_core::spacepackets::tm::PusTm; +use satrs_core::spacepackets::tm::{PusTm, PusTmSecondaryHeader}; +use satrs_core::spacepackets::SpHeader; use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; use satrs_example::{tmtc_err, TEST_EVENT}; use std::sync::mpsc::{Receiver, Sender, TryRecvError}; @@ -39,6 +43,13 @@ impl Service17CustomWrapper { info!("Sent ping reply PUS TM[17,2]"); handled_pings += 1; } + PusPacketHandlerResult::RequestHandledPartialSuccess(partial_err) => { + warn!( + "Handled PUS ping command with partial success: {:?}", + partial_err + ); + handled_pings += 1; + } PusPacketHandlerResult::CustomSubservice(token) => { let (buf, _) = self.pus17_handler.pus_tc_buf(); let (tc, size) = PusTc::from_bytes(buf).unwrap(); @@ -90,18 +101,18 @@ impl PusService17TestHandler { pub fn new( receiver: Receiver, tc_pool: SharedPool, - tm_helper: PusTmWithCdsShortHelper, tm_tx: Sender, tm_store: SharedTmStore, + tm_apid: u16, verification_handler: StdVerifReporterWithSender, ) -> Self { Self { psb: PusServiceBase::new( receiver, tc_pool, - tm_helper, tm_tx, tm_store, + tm_apid, verification_handler, ), } @@ -117,12 +128,7 @@ impl PusService17TestHandler { pub fn handle_next_packet(&mut self) -> Result { return match self.psb.tc_rx.try_recv() { - Ok((addr, token)) => { - if self.handle_one_tc(addr, token)? { - return Ok(PusPacketHandlerResult::RequestHandled); - } - Ok(PusPacketHandlerResult::CustomSubservice(token)) - } + Ok((addr, token)) => self.handle_one_tc(addr, token), Err(e) => match e { TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), @@ -134,7 +140,8 @@ impl PusService17TestHandler { &mut self, addr: StoreAddr, token: VerificationToken, - ) -> Result { + ) -> Result { + let mut partial_result = None; { // Keep locked section as short as possible. let mut tc_pool = self @@ -143,39 +150,40 @@ impl PusService17TestHandler { .write() .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; let tc_guard = tc_pool.read_with_guard(addr); - let tc_raw = tc_guard.read().expect("Reading pool guard failed"); + let tc_raw = tc_guard.read()?; self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); } + let mut partial_error = None; let (tc, tc_size) = PusTc::from_bytes(&self.psb.pus_buf)?; if tc.service() != 17 { return Err(PusPacketHandlingError::WrongService(tc.service())); } if tc.subservice() == 1 { - let time_provider = TimeProvider::from_now_with_u16_days()?; - // Can not fail, buffer is large enough. - time_provider - .write_to_bytes(&mut self.psb.stamp_buf) - .unwrap(); + partial_result = self.psb.update_stamp().err(); let result = self .psb .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)); + .start_success(token, Some(&self.psb.stamp_buf)) + .map_err(|e| PartialPusHandlingError::VerificationError); let start_token = if result.is_err() { - error!("Could not send start success verification"); + partial_error = Some(result.unwrap_err()); None } else { Some(result.unwrap()) }; // Sequence count will be handled centrally in TM funnel. - let ping_reply = - self.psb - .tm_helper - .create_pus_tm_with_stamp(17, 2, None, &time_provider, 0); + let mut reply_header = SpHeader::tm_unseg(self.psb.tm_apid, 0, 0).unwrap(); + let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &self.psb.stamp_buf); + let ping_reply = PusTm::new(&mut reply_header, tc_header, None, true); let addr = self.psb.tm_store.add_pus_tm(&ping_reply); - self.psb + if let Err(e) = self + .psb .tm_tx .send(addr) - .map_err(|e| PusPacketHandlingError::TmSendError(format!("{e}")))?; + .map_err(|e| PartialPusHandlingError::TmSendError(format!("{e}"))) + { + partial_error = Some(e); + } if let Some(start_token) = start_token { if self .psb @@ -183,11 +191,16 @@ impl PusService17TestHandler { .completion_success(start_token, Some(&self.psb.stamp_buf)) .is_err() { - error!("Could not send completion success verification"); + partial_error = Some(PartialPusHandlingError::VerificationError) } } - return Ok(true); + if partial_error.is_some() { + return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( + partial_error.unwrap(), + )); + } + return Ok(PusPacketHandlerResult::RequestHandled); } - Ok(false) + Ok(PusPacketHandlerResult::CustomSubservice(token)) } } -- 2.43.0 From 3f47474393f6ef53d96b074d9268df8f2f07cbe0 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 09:37:34 +0200 Subject: [PATCH 08/39] try another trait --- satrs-example/src/pus/test.rs | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 93b75fe..e8dc0c2 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -93,6 +93,24 @@ impl Service17CustomWrapper { } } +pub trait PusServiceHandler { + fn psb(&mut self) -> &mut PusServiceBase; + fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> Result; + fn handle_next_packet(&mut self) -> Result { + return match self.psb().tc_rx.try_recv() { + Ok((addr, token)) => self.handle_one_tc(addr, token), + Err(e) => match e { + TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), + TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), + }, + }; + } + +} pub struct PusService17TestHandler { psb: PusServiceBase, } @@ -125,18 +143,14 @@ impl PusService17TestHandler { pub fn pus_tc_buf(&self) -> (&[u8], usize) { (&self.psb.pus_buf, self.psb.pus_size) } +} - pub fn handle_next_packet(&mut self) -> Result { - return match self.psb.tc_rx.try_recv() { - Ok((addr, token)) => self.handle_one_tc(addr, token), - Err(e) => match e { - TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), - TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), - }, - }; +impl PusServiceHandler for PusService17TestHandler { + fn psb(&mut self) -> &mut PusServiceBase { + &mut self.psb } - pub fn handle_one_tc( + fn handle_one_tc( &mut self, addr: StoreAddr, token: VerificationToken, @@ -203,4 +217,4 @@ impl PusService17TestHandler { } Ok(PusPacketHandlerResult::CustomSubservice(token)) } -} +} \ No newline at end of file -- 2.43.0 From 9a40301c0fa6be3658f424b3620513cd5dbc33b0 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 09:39:35 +0200 Subject: [PATCH 09/39] this seems to work --- satrs-example/src/pus/scheduler.rs | 10 +++++++++- satrs-example/src/pus/test.rs | 3 +-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index 915e6e3..7b7ffd1 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -1,3 +1,4 @@ +use crate::pus::test::PusServiceHandler; use crate::pus::{ AcceptedTc, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, @@ -55,7 +56,14 @@ impl PusService11SchedHandler { }, }; } - pub fn handle_one_tc( +} + +impl PusServiceHandler for PusService11SchedHandler { + fn psb(&mut self) -> &mut PusServiceBase { + &mut self.psb + } + + fn handle_one_tc( &mut self, addr: StoreAddr, token: VerificationToken, diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index e8dc0c2..7765c21 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -109,7 +109,6 @@ pub trait PusServiceHandler { }, }; } - } pub struct PusService17TestHandler { psb: PusServiceBase, @@ -217,4 +216,4 @@ impl PusServiceHandler for PusService17TestHandler { } Ok(PusPacketHandlerResult::CustomSubservice(token)) } -} \ No newline at end of file +} -- 2.43.0 From d2e896fc92b4b23c066b25e7403632b885282bf5 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 11:25:23 +0200 Subject: [PATCH 10/39] more cleaning and restructuring --- satrs-core/Cargo.toml | 7 +- satrs-core/src/pus/mod.rs | 169 +++- .../src/pus/{scheduling.rs => scheduler.rs} | 761 +++++++++--------- satrs-core/src/pus/scheduler_srv.rs | 168 ++++ satrs-core/src/pus/test.rs | 114 +++ satrs-core/src/pus/verification.rs | 18 +- satrs-core/src/tmtc/tm_helper.rs | 12 - satrs-example/src/main.rs | 14 +- satrs-example/src/pus/mod.rs | 172 +--- satrs-example/src/pus/scheduler.rs | 163 ---- satrs-example/src/pus/test.rs | 163 +--- satrs-example/src/tmtc.rs | 13 +- satrs-mib/codegen/src/lib.rs | 2 +- 13 files changed, 855 insertions(+), 921 deletions(-) rename satrs-core/src/pus/{scheduling.rs => scheduler.rs} (74%) create mode 100644 satrs-core/src/pus/scheduler_srv.rs create mode 100644 satrs-core/src/pus/test.rs diff --git a/satrs-core/Cargo.toml b/satrs-core/Cargo.toml index a6d8da1..871b3c8 100644 --- a/satrs-core/Cargo.toml +++ b/satrs-core/Cargo.toml @@ -51,6 +51,10 @@ version= "0.5" default-features = false optional = true +[dependencies.thiserror] +version = "1" +optional = true + [dependencies.serde] version = "1" default-features = false @@ -82,7 +86,8 @@ std = [ "crossbeam-channel/std", "serde/std", "spacepackets/std", - "num_enum/std" + "num_enum/std", + "thiserror" ] alloc = [ "serde/alloc", diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index c74e587..d43f8ed 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -13,8 +13,10 @@ pub mod event; pub mod event_man; pub mod hk; pub mod mode; +pub mod scheduler; +pub mod scheduler_srv; #[cfg(feature = "std")] -pub mod scheduling; +pub mod test; pub mod verification; #[cfg(feature = "alloc")] @@ -133,40 +135,36 @@ mod alloc_mod { #[cfg(feature = "std")] pub mod std_mod { use crate::pool::{ShareablePoolProvider, SharedPool, StoreAddr, StoreError}; + use crate::pus::verification::{ + StdVerifReporterWithSender, TcStateAccepted, VerificationToken, + }; use crate::pus::{EcssSender, EcssTcSenderCore, EcssTmSenderCore}; + use crate::tmtc::tm_helper::SharedTmStore; use crate::SenderId; use alloc::vec::Vec; use spacepackets::ecss::{PusError, SerializablePusPacket}; use spacepackets::tc::PusTc; + use spacepackets::time::cds::TimeProvider; + use spacepackets::time::{StdTimestampError, TimeWriter}; use spacepackets::tm::PusTm; - use std::sync::mpsc::SendError; + use std::string::String; use std::sync::{mpsc, RwLockWriteGuard}; + use thiserror::Error; - #[derive(Debug, Clone)] + #[derive(Debug, Clone, Error)] pub enum MpscPusInStoreSendError { + #[error("RwGuard lock error")] LockError, - PusError(PusError), - StoreError(StoreError), - SendError(SendError), + #[error("Generic PUS error: {0}")] + PusError(#[from] PusError), + #[error("Generic store error: {0}")] + StoreError(#[from] StoreError), + #[error("Generic send error: {0}")] + SendError(#[from] mpsc::SendError), + #[error("RX handle has disconnected")] RxDisconnected(StoreAddr), } - impl From for MpscPusInStoreSendError { - fn from(value: PusError) -> Self { - MpscPusInStoreSendError::PusError(value) - } - } - impl From> for MpscPusInStoreSendError { - fn from(value: SendError) -> Self { - MpscPusInStoreSendError::SendError(value) - } - } - impl From for MpscPusInStoreSendError { - fn from(value: StoreError) -> Self { - MpscPusInStoreSendError::StoreError(value) - } - } - #[derive(Clone)] pub struct MpscTmtcInStoreSender { id: SenderId, @@ -246,7 +244,7 @@ pub mod std_mod { #[derive(Debug, Clone)] pub enum MpscAsVecSenderError { PusError(PusError), - SendError(SendError>), + SendError(mpsc::SendError>), } #[derive(Debug, Clone)] @@ -284,12 +282,127 @@ pub mod std_mod { Ok(()) } } -} -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum GenericTcCheckError { - NotEnoughAppData, - InvalidSubservice, + #[derive(Debug, Clone, Error)] + pub enum PusPacketHandlingError { + #[error("Generic PUS error: {0}")] + PusError(#[from] PusError), + #[error("Wrong service number {0} for packet handler")] + WrongService(u8), + #[error("Not enough application data available: {0}")] + NotEnoughAppData(String), + #[error("Generic store error: {0}")] + StoreError(#[from] StoreError), + #[error("Error with the pool RwGuard")] + RwGuardError(String), + #[error("MQ backend disconnect error")] + QueueDisconnected, + #[error("Other error {0}")] + OtherError(String), + } + + #[derive(Debug, Clone, Error)] + pub enum PartialPusHandlingError { + #[error("Generic timestamp generation error")] + TimeError(StdTimestampError), + #[error("Error sending telemetry: {0}")] + TmSendError(String), + #[error("Error sending verification message")] + VerificationError, + } + + #[derive(Debug, Clone)] + pub enum PusPacketHandlerResult { + RequestHandled, + RequestHandledPartialSuccess(PartialPusHandlingError), + CustomSubservice(VerificationToken), + Empty, + } + + impl From for PusPacketHandlerResult { + fn from(value: PartialPusHandlingError) -> Self { + Self::RequestHandledPartialSuccess(value) + } + } + + pub type AcceptedTc = (StoreAddr, VerificationToken); + + pub struct PusServiceBase { + pub(crate) tc_rx: mpsc::Receiver, + pub(crate) tc_store: SharedPool, + pub(crate) tm_tx: mpsc::Sender, + pub(crate) tm_store: SharedTmStore, + pub(crate) tm_apid: u16, + pub(crate) verification_handler: StdVerifReporterWithSender, + pub(crate) stamp_buf: [u8; 7], + pub(crate) pus_buf: [u8; 2048], + pus_size: usize, + } + + impl PusServiceBase { + pub fn new( + receiver: mpsc::Receiver, + tc_pool: SharedPool, + tm_tx: mpsc::Sender, + tm_store: SharedTmStore, + tm_apid: u16, + verification_handler: StdVerifReporterWithSender, + ) -> Self { + Self { + tc_rx: receiver, + tc_store: tc_pool, + tm_apid, + tm_tx, + tm_store, + verification_handler, + stamp_buf: [0; 7], + pus_buf: [0; 2048], + pus_size: 0, + } + } + + pub fn update_stamp(&mut self) -> Result<(), PartialPusHandlingError> { + let time_provider = + TimeProvider::from_now_with_u16_days().map_err(PartialPusHandlingError::TimeError); + if let Ok(time_provider) = time_provider { + time_provider.write_to_bytes(&mut self.stamp_buf).unwrap(); + Ok(()) + } else { + self.stamp_buf = [0; 7]; + Err(time_provider.unwrap_err()) + } + } + } + + pub trait PusServiceHandler { + fn psb_mut(&mut self) -> &mut PusServiceBase; + fn psb(&self) -> &PusServiceBase; + fn verification_reporter(&mut self) -> &mut StdVerifReporterWithSender { + &mut self.psb_mut().verification_handler + } + fn tc_store(&mut self) -> &mut SharedPool { + &mut self.psb_mut().tc_store + } + fn pus_tc_buf(&self) -> (&[u8], usize) { + (&self.psb().pus_buf, self.psb().pus_size) + } + fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> Result; + fn handle_next_packet(&mut self) -> Result { + return match self.psb().tc_rx.try_recv() { + Ok((addr, token)) => self.handle_one_tc(addr, token), + Err(e) => match e { + mpsc::TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), + mpsc::TryRecvError::Disconnected => { + Err(PusPacketHandlingError::QueueDisconnected) + } + }, + }; + } + } } pub(crate) fn source_buffer_large_enough(cap: usize, len: usize) -> Result<(), EcssTmtcError> { diff --git a/satrs-core/src/pus/scheduling.rs b/satrs-core/src/pus/scheduler.rs similarity index 74% rename from satrs-core/src/pus/scheduling.rs rename to satrs-core/src/pus/scheduler.rs index cf3d8e0..0406f83 100644 --- a/satrs-core/src/pus/scheduling.rs +++ b/satrs-core/src/pus/scheduler.rs @@ -2,25 +2,24 @@ //! //! The core data structure of this module is the [PusScheduler]. This structure can be used //! to perform the scheduling of telecommands like specified in the ECSS standard. -use crate::pool::{PoolProvider, StoreAddr, StoreError}; -use alloc::collections::btree_map::{Entry, Range}; -use alloc::vec; -use alloc::vec::Vec; +use crate::pool::{StoreAddr, StoreError}; use core::fmt::{Debug, Display, Formatter}; use core::time::Duration; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use spacepackets::ecss::scheduling::TimeWindowType; -use spacepackets::ecss::{PusError, PusPacket}; +use spacepackets::ecss::PusError; use spacepackets::tc::{GenericPusTcSecondaryHeader, PusTc}; -use spacepackets::time::cds::DaysLen24Bits; -use spacepackets::time::{cds, CcsdsTimeProvider, TimeReader, TimestampError, UnixTimestamp}; +use spacepackets::time::{CcsdsTimeProvider, TimestampError, UnixTimestamp}; use spacepackets::CcsdsPacket; -use std::collections::BTreeMap; #[cfg(feature = "std")] use std::error::Error; -#[cfg(feature = "std")] -use std::time::SystemTimeError; + +//#[cfg(feature = "std")] +//pub use std_mod::*; + +#[cfg(feature = "alloc")] +pub use alloc_mod::*; /// This is the request ID as specified in ECSS-E-ST-70-41C 5.4.11.2 of the standard. /// @@ -171,35 +170,6 @@ impl TcInfo { } } -/// This is the core data structure for scheduling PUS telecommands with [alloc] support. -/// -/// It is assumed that the actual telecommand data is stored in a separate TC pool offering -/// a [crate::pool::PoolProvider] API. This data structure just tracks the store addresses and their -/// release times and offers a convenient API to insert and release telecommands and perform -/// other functionality specified by the ECSS standard in section 6.11. The time is tracked -/// as a [spacepackets::time::UnixTimestamp] but the only requirement to the timekeeping of -/// the user is that it is convertible to that timestamp. -/// -/// The standard also specifies that the PUS scheduler can be enabled and disabled. -/// A disabled scheduler should still delete commands where the execution time has been reached -/// but should not release them to be executed. -/// -/// The implementation uses an ordered map internally with the release timestamp being the key. -/// This allows efficient time based insertions and extractions which should be the primary use-case -/// for a time-based command scheduler. -/// There is no way to avoid duplicate [RequestId]s during insertion, which can occur even if the -/// user always correctly increment for sequence counter due to overflows. To avoid this issue, -/// it can make sense to split up telecommand groups by the APID to avoid overflows. -/// -/// Currently, sub-schedules and groups are not supported. -#[derive(Debug)] -pub struct PusScheduler { - tc_map: BTreeMap>, - current_time: UnixTimestamp, - time_margin: Duration, - enabled: bool, -} - enum DeletionResult { WithoutStoreDeletion(Option), WithStoreDeletion(Result), @@ -259,360 +229,410 @@ impl TimeWindow { } } -impl PusScheduler { - /// Create a new PUS scheduler. - /// - /// # Arguments - /// - /// * `init_current_time` - The time to initialize the scheduler with. - /// * `time_margin` - This time margin is used when inserting new telecommands into the - /// schedule. If the release time of a new telecommand is earlier than the time margin - /// added to the current time, it will not be inserted into the schedule. - pub fn new(init_current_time: UnixTimestamp, time_margin: Duration) -> Self { - PusScheduler { - tc_map: Default::default(), - current_time: init_current_time, - time_margin, - enabled: true, - } - } +#[cfg(feature = "alloc")] +pub mod alloc_mod { + use crate::pool::{PoolProvider, StoreAddr, StoreError}; + use crate::pus::scheduler::{DeletionResult, RequestId, ScheduleError, TcInfo, TimeWindow}; + use alloc::collections::btree_map::{Entry, Range}; + use alloc::collections::BTreeMap; + use alloc::vec; + use alloc::vec::Vec; + use core::time::Duration; + use spacepackets::ecss::scheduling::TimeWindowType; + use spacepackets::ecss::PusPacket; + use spacepackets::tc::PusTc; + use spacepackets::time::cds::DaysLen24Bits; + use spacepackets::time::{cds, CcsdsTimeProvider, TimeReader, UnixTimestamp}; - /// Like [Self::new], but sets the `init_current_time` parameter to the current system time. #[cfg(feature = "std")] - #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] - pub fn new_with_current_init_time(time_margin: Duration) -> Result { - Ok(Self::new(UnixTimestamp::from_now()?, time_margin)) - } - - pub fn num_scheduled_telecommands(&self) -> u64 { - let mut num_entries = 0; - for entries in &self.tc_map { - num_entries += entries.1.len() as u64; - } - num_entries - } - - pub fn is_enabled(&self) -> bool { - self.enabled - } - - pub fn enable(&mut self) { - self.enabled = true; - } + use std::time::SystemTimeError; + /// This is the core data structure for scheduling PUS telecommands with [alloc] support. + /// + /// It is assumed that the actual telecommand data is stored in a separate TC pool offering + /// a [crate::pool::PoolProvider] API. This data structure just tracks the store addresses and their + /// release times and offers a convenient API to insert and release telecommands and perform + /// other functionality specified by the ECSS standard in section 6.11. The time is tracked + /// as a [spacepackets::time::UnixTimestamp] but the only requirement to the timekeeping of + /// the user is that it is convertible to that timestamp. + /// + /// The standard also specifies that the PUS scheduler can be enabled and disabled. /// A disabled scheduler should still delete commands where the execution time has been reached /// but should not release them to be executed. - pub fn disable(&mut self) { - self.enabled = false; - } - - /// This will disable the scheduler and clear the schedule as specified in 6.11.4.4. - /// Be careful with this command as it will delete all the commands in the schedule. /// - /// The holding store for the telecommands needs to be passed so all the stored telecommands - /// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error - /// will be returned but the method will still try to delete all the commands in the schedule. - pub fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError> { - self.enabled = false; - let mut deletion_ok = Ok(()); - for tc_lists in &mut self.tc_map { - for tc in tc_lists.1 { - let res = store.delete(tc.addr); - if res.is_err() { - deletion_ok = res; - } - } - } - self.tc_map.clear(); - deletion_ok - } - - pub fn update_time(&mut self, current_time: UnixTimestamp) { - self.current_time = current_time; - } - - pub fn current_time(&self) -> &UnixTimestamp { - &self.current_time - } - - /// Insert a telecommand which was already unwrapped from the outer Service 11 packet and stored - /// inside the telecommand packet pool. - pub fn insert_unwrapped_and_stored_tc( - &mut self, - time_stamp: UnixTimestamp, - info: TcInfo, - ) -> Result<(), ScheduleError> { - if time_stamp < self.current_time + self.time_margin { - return Err(ScheduleError::ReleaseTimeInTimeMargin( - self.current_time, - self.time_margin, - time_stamp, - )); - } - match self.tc_map.entry(time_stamp) { - Entry::Vacant(e) => { - e.insert(vec![info]); - } - Entry::Occupied(mut v) => { - v.get_mut().push(info); - } - } - Ok(()) - } - - /// Insert a telecommand which was already unwrapped from the outer Service 11 packet but still - /// needs to be stored inside the telecommand pool. - pub fn insert_unwrapped_tc( - &mut self, - time_stamp: UnixTimestamp, - tc: &[u8], - pool: &mut (impl PoolProvider + ?Sized), - ) -> Result { - let check_tc = PusTc::from_bytes(tc)?; - if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 { - return Err(ScheduleError::NestedScheduledTc); - } - let req_id = RequestId::from_tc(&check_tc.0); - - match pool.add(tc) { - Ok(addr) => { - let info = TcInfo::new(addr, req_id); - self.insert_unwrapped_and_stored_tc(time_stamp, info)?; - Ok(info) - } - Err(err) => Err(err.into()), - } - } - - /// Insert a telecommand based on the fully wrapped time-tagged telecommand. The timestamp - /// provider needs to be supplied via a generic. - pub fn insert_wrapped_tc( - &mut self, - pus_tc: &PusTc, - pool: &mut (impl PoolProvider + ?Sized), - ) -> Result { - if PusPacket::service(pus_tc) != 11 { - return Err(ScheduleError::WrongService); - } - if PusPacket::subservice(pus_tc) != 4 { - return Err(ScheduleError::WrongSubservice); - } - return if let Some(user_data) = pus_tc.user_data() { - let stamp: TimeStamp = TimeReader::from_bytes(user_data)?; - let unix_stamp = stamp.unix_stamp(); - let stamp_len = stamp.len_as_bytes(); - self.insert_unwrapped_tc(unix_stamp, &user_data[stamp_len..], pool) - } else { - Err(ScheduleError::TcDataEmpty) - }; - } - - /// Insert a telecommand based on the fully wrapped time-tagged telecommand using a CDS - /// short timestamp with 16-bit length of days field. - pub fn insert_wrapped_tc_cds_short( - &mut self, - pus_tc: &PusTc, - pool: &mut (impl PoolProvider + ?Sized), - ) -> Result { - self.insert_wrapped_tc::(pus_tc, pool) - } - - /// Insert a telecommand based on the fully wrapped time-tagged telecommand using a CDS - /// long timestamp with a 24-bit length of days field. - pub fn insert_wrapped_tc_cds_long( - &mut self, - pus_tc: &PusTc, - pool: &mut (impl PoolProvider + ?Sized), - ) -> Result { - self.insert_wrapped_tc::>(pus_tc, pool) - } - - /// This function uses [Self::retrieve_by_time_filter] to extract all scheduled commands inside - /// the time range and then deletes them from the provided store. + /// The implementation uses an ordered map internally with the release timestamp being the key. + /// This allows efficient time based insertions and extractions which should be the primary use-case + /// for a time-based command scheduler. + /// There is no way to avoid duplicate [RequestId]s during insertion, which can occur even if the + /// user always correctly increment for sequence counter due to overflows. To avoid this issue, + /// it can make sense to split up telecommand groups by the APID to avoid overflows. /// - /// Like specified in the documentation of [Self::retrieve_by_time_filter], the range extraction - /// for deletion is always inclusive. - /// - /// This function returns the number of deleted commands on success. In case any deletion fails, - /// the last deletion will be supplied in addition to the number of deleted commands. - pub fn delete_by_time_filter( - &mut self, - time_window: TimeWindow, - pool: &mut (impl PoolProvider + ?Sized), - ) -> Result { - let range = self.retrieve_by_time_filter(time_window); - let mut del_packets = 0; - let mut res_if_fails = None; - let mut keys_to_delete = Vec::new(); - for time_bucket in range { - for tc in time_bucket.1 { - match pool.delete(tc.addr) { - Ok(_) => del_packets += 1, - Err(e) => res_if_fails = Some(e), - } - } - keys_to_delete.push(*time_bucket.0); - } - for key in keys_to_delete { - self.tc_map.remove(&key); - } - if let Some(err) = res_if_fails { - return Err((del_packets, err)); - } - Ok(del_packets) + /// Currently, sub-schedules and groups are not supported. + #[derive(Debug)] + pub struct PusScheduler { + tc_map: BTreeMap>, + pub(crate) current_time: UnixTimestamp, + time_margin: Duration, + enabled: bool, } - - /// Deletes all the scheduled commands. This also deletes the packets from the passed TC pool. - /// - /// This function returns the number of deleted commands on success. In case any deletion fails, - /// the last deletion will be supplied in addition to the number of deleted commands. - pub fn delete_all( - &mut self, - pool: &mut (impl PoolProvider + ?Sized), - ) -> Result { - self.delete_by_time_filter(TimeWindow::::new_select_all(), pool) - } - - /// Retrieve a range over all scheduled commands. - pub fn retrieve_all(&mut self) -> Range<'_, UnixTimestamp, Vec> { - self.tc_map.range(..) - } - - /// This retrieves scheduled telecommands which are inside the provided time window. - /// - /// It should be noted that the ranged extraction is always inclusive. For example, a range - /// from 50 to 100 unix seconds would also include command scheduled at 100 unix seconds. - pub fn retrieve_by_time_filter( - &mut self, - time_window: TimeWindow, - ) -> Range<'_, UnixTimestamp, Vec> { - match time_window.time_window_type() { - TimeWindowType::SelectAll => self.tc_map.range(..), - TimeWindowType::TimeTagToTimeTag => { - // This should be guaranteed to be valid by library API, so unwrap is okay - let start_time = time_window.start_time().unwrap().unix_stamp(); - let end_time = time_window.end_time().unwrap().unix_stamp(); - self.tc_map.range(start_time..=end_time) - } - TimeWindowType::FromTimeTag => { - // This should be guaranteed to be valid by library API, so unwrap is okay - let start_time = time_window.start_time().unwrap().unix_stamp(); - self.tc_map.range(start_time..) - } - TimeWindowType::ToTimeTag => { - // This should be guaranteed to be valid by library API, so unwrap is okay - let end_time = time_window.end_time().unwrap().unix_stamp(); - self.tc_map.range(..=end_time) + impl PusScheduler { + /// Create a new PUS scheduler. + /// + /// # Arguments + /// + /// * `init_current_time` - The time to initialize the scheduler with. + /// * `time_margin` - This time margin is used when inserting new telecommands into the + /// schedule. If the release time of a new telecommand is earlier than the time margin + /// added to the current time, it will not be inserted into the schedule. + pub fn new(init_current_time: UnixTimestamp, time_margin: Duration) -> Self { + PusScheduler { + tc_map: Default::default(), + current_time: init_current_time, + time_margin, + enabled: true, } } - } - /// Deletes a scheduled command with the given request ID. Returns the store address if a - /// scheduled command was found in the map and deleted, and None otherwise. - /// - /// Please note that this function will stop on the first telecommand with a request ID match. - /// In case of duplicate IDs (which should generally not happen), this function needs to be - /// called repeatedly. - pub fn delete_by_request_id(&mut self, req_id: &RequestId) -> Option { - if let DeletionResult::WithoutStoreDeletion(v) = - self.delete_by_request_id_internal(req_id, None::<&mut dyn PoolProvider>) - { - return v; + /// Like [Self::new], but sets the `init_current_time` parameter to the current system time. + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + pub fn new_with_current_init_time(time_margin: Duration) -> Result { + Ok(Self::new(UnixTimestamp::from_now()?, time_margin)) } - panic!("unexpected deletion result"); - } - /// This behaves like [Self::delete_by_request_id] but deletes the packet from the pool as well. - pub fn delete_by_request_id_and_from_pool( - &mut self, - req_id: &RequestId, - pool: &mut (impl PoolProvider + ?Sized), - ) -> Result { - if let DeletionResult::WithStoreDeletion(v) = - self.delete_by_request_id_internal(req_id, Some(pool)) - { - return v; - } - panic!("unexpected deletion result"); - } - - fn delete_by_request_id_internal( - &mut self, - req_id: &RequestId, - pool: Option<&mut (impl PoolProvider + ?Sized)>, - ) -> DeletionResult { - let mut idx_found = None; - for time_bucket in &mut self.tc_map { - for (idx, tc_info) in time_bucket.1.iter().enumerate() { - if &tc_info.request_id == req_id { - idx_found = Some(idx); - } - } - if let Some(idx) = idx_found { - let addr = time_bucket.1.remove(idx).addr; - if let Some(pool) = pool { - return match pool.delete(addr) { - Ok(_) => DeletionResult::WithStoreDeletion(Ok(true)), - Err(e) => DeletionResult::WithStoreDeletion(Err(e)), - }; - } - return DeletionResult::WithoutStoreDeletion(Some(addr)); + pub fn num_scheduled_telecommands(&self) -> u64 { + let mut num_entries = 0; + for entries in &self.tc_map { + num_entries += entries.1.len() as u64; } + num_entries } - if pool.is_none() { - DeletionResult::WithoutStoreDeletion(None) - } else { - DeletionResult::WithStoreDeletion(Ok(false)) + + pub fn is_enabled(&self) -> bool { + self.enabled } - } - /// Retrieve all telecommands which should be release based on the current time. - pub fn telecommands_to_release(&self) -> Range<'_, UnixTimestamp, Vec> { - self.tc_map.range(..=self.current_time) - } - #[cfg(feature = "std")] - #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] - pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError> { - self.current_time = UnixTimestamp::from_now()?; - Ok(()) - } + pub fn enable(&mut self) { + self.enabled = true; + } - /// Utility method which calls [Self::telecommands_to_release] and then calls a releaser - /// closure for each telecommand which should be released. This function will also delete - /// the telecommands from the holding store after calling the release closure, if the scheduler - /// is disabled. - /// - /// # Arguments - /// - /// * `releaser` - Closure where the first argument is whether the scheduler is enabled and - /// the second argument is the telecommand information also containing the store address. - /// This closure should return whether the command should be deleted if the scheduler is - /// disabled to prevent memory leaks. - /// * `store` - The holding store of the telecommands. - pub fn release_telecommands bool>( - &mut self, - mut releaser: R, - tc_store: &mut (impl PoolProvider + ?Sized), - ) -> Result { - let tcs_to_release = self.telecommands_to_release(); - let mut released_tcs = 0; - let mut store_error = Ok(()); - for tc in tcs_to_release { - for info in tc.1 { - let should_delete = releaser(self.enabled, info); - released_tcs += 1; - if should_delete && !self.is_enabled() { - let res = tc_store.delete(info.addr); + /// A disabled scheduler should still delete commands where the execution time has been reached + /// but should not release them to be executed. + pub fn disable(&mut self) { + self.enabled = false; + } + + /// This will disable the scheduler and clear the schedule as specified in 6.11.4.4. + /// Be careful with this command as it will delete all the commands in the schedule. + /// + /// The holding store for the telecommands needs to be passed so all the stored telecommands + /// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error + /// will be returned but the method will still try to delete all the commands in the schedule. + pub fn reset( + &mut self, + store: &mut (impl PoolProvider + ?Sized), + ) -> Result<(), StoreError> { + self.enabled = false; + let mut deletion_ok = Ok(()); + for tc_lists in &mut self.tc_map { + for tc in tc_lists.1 { + let res = store.delete(tc.addr); if res.is_err() { - store_error = res; + deletion_ok = res; } } } + self.tc_map.clear(); + deletion_ok + } + + pub fn update_time(&mut self, current_time: UnixTimestamp) { + self.current_time = current_time; + } + + pub fn current_time(&self) -> &UnixTimestamp { + &self.current_time + } + + /// Insert a telecommand which was already unwrapped from the outer Service 11 packet and stored + /// inside the telecommand packet pool. + pub fn insert_unwrapped_and_stored_tc( + &mut self, + time_stamp: UnixTimestamp, + info: TcInfo, + ) -> Result<(), ScheduleError> { + if time_stamp < self.current_time + self.time_margin { + return Err(ScheduleError::ReleaseTimeInTimeMargin( + self.current_time, + self.time_margin, + time_stamp, + )); + } + match self.tc_map.entry(time_stamp) { + Entry::Vacant(e) => { + e.insert(vec![info]); + } + Entry::Occupied(mut v) => { + v.get_mut().push(info); + } + } + Ok(()) + } + + /// Insert a telecommand which was already unwrapped from the outer Service 11 packet but still + /// needs to be stored inside the telecommand pool. + pub fn insert_unwrapped_tc( + &mut self, + time_stamp: UnixTimestamp, + tc: &[u8], + pool: &mut (impl PoolProvider + ?Sized), + ) -> Result { + let check_tc = PusTc::from_bytes(tc)?; + if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 { + return Err(ScheduleError::NestedScheduledTc); + } + let req_id = RequestId::from_tc(&check_tc.0); + + match pool.add(tc) { + Ok(addr) => { + let info = TcInfo::new(addr, req_id); + self.insert_unwrapped_and_stored_tc(time_stamp, info)?; + Ok(info) + } + Err(err) => Err(err.into()), + } + } + + /// Insert a telecommand based on the fully wrapped time-tagged telecommand. The timestamp + /// provider needs to be supplied via a generic. + pub fn insert_wrapped_tc( + &mut self, + pus_tc: &PusTc, + pool: &mut (impl PoolProvider + ?Sized), + ) -> Result { + if PusPacket::service(pus_tc) != 11 { + return Err(ScheduleError::WrongService); + } + if PusPacket::subservice(pus_tc) != 4 { + return Err(ScheduleError::WrongSubservice); + } + return if let Some(user_data) = pus_tc.user_data() { + let stamp: TimeStamp = TimeReader::from_bytes(user_data)?; + let unix_stamp = stamp.unix_stamp(); + let stamp_len = stamp.len_as_bytes(); + self.insert_unwrapped_tc(unix_stamp, &user_data[stamp_len..], pool) + } else { + Err(ScheduleError::TcDataEmpty) + }; + } + + /// Insert a telecommand based on the fully wrapped time-tagged telecommand using a CDS + /// short timestamp with 16-bit length of days field. + pub fn insert_wrapped_tc_cds_short( + &mut self, + pus_tc: &PusTc, + pool: &mut (impl PoolProvider + ?Sized), + ) -> Result { + self.insert_wrapped_tc::(pus_tc, pool) + } + + /// Insert a telecommand based on the fully wrapped time-tagged telecommand using a CDS + /// long timestamp with a 24-bit length of days field. + pub fn insert_wrapped_tc_cds_long( + &mut self, + pus_tc: &PusTc, + pool: &mut (impl PoolProvider + ?Sized), + ) -> Result { + self.insert_wrapped_tc::>(pus_tc, pool) + } + + /// This function uses [Self::retrieve_by_time_filter] to extract all scheduled commands inside + /// the time range and then deletes them from the provided store. + /// + /// Like specified in the documentation of [Self::retrieve_by_time_filter], the range extraction + /// for deletion is always inclusive. + /// + /// This function returns the number of deleted commands on success. In case any deletion fails, + /// the last deletion will be supplied in addition to the number of deleted commands. + pub fn delete_by_time_filter( + &mut self, + time_window: TimeWindow, + pool: &mut (impl PoolProvider + ?Sized), + ) -> Result { + let range = self.retrieve_by_time_filter(time_window); + let mut del_packets = 0; + let mut res_if_fails = None; + let mut keys_to_delete = Vec::new(); + for time_bucket in range { + for tc in time_bucket.1 { + match pool.delete(tc.addr) { + Ok(_) => del_packets += 1, + Err(e) => res_if_fails = Some(e), + } + } + keys_to_delete.push(*time_bucket.0); + } + for key in keys_to_delete { + self.tc_map.remove(&key); + } + if let Some(err) = res_if_fails { + return Err((del_packets, err)); + } + Ok(del_packets) + } + + /// Deletes all the scheduled commands. This also deletes the packets from the passed TC pool. + /// + /// This function returns the number of deleted commands on success. In case any deletion fails, + /// the last deletion will be supplied in addition to the number of deleted commands. + pub fn delete_all( + &mut self, + pool: &mut (impl PoolProvider + ?Sized), + ) -> Result { + self.delete_by_time_filter(TimeWindow::::new_select_all(), pool) + } + + /// Retrieve a range over all scheduled commands. + pub fn retrieve_all(&mut self) -> Range<'_, UnixTimestamp, Vec> { + self.tc_map.range(..) + } + + /// This retrieves scheduled telecommands which are inside the provided time window. + /// + /// It should be noted that the ranged extraction is always inclusive. For example, a range + /// from 50 to 100 unix seconds would also include command scheduled at 100 unix seconds. + pub fn retrieve_by_time_filter( + &mut self, + time_window: TimeWindow, + ) -> Range<'_, UnixTimestamp, Vec> { + match time_window.time_window_type() { + TimeWindowType::SelectAll => self.tc_map.range(..), + TimeWindowType::TimeTagToTimeTag => { + // This should be guaranteed to be valid by library API, so unwrap is okay + let start_time = time_window.start_time().unwrap().unix_stamp(); + let end_time = time_window.end_time().unwrap().unix_stamp(); + self.tc_map.range(start_time..=end_time) + } + TimeWindowType::FromTimeTag => { + // This should be guaranteed to be valid by library API, so unwrap is okay + let start_time = time_window.start_time().unwrap().unix_stamp(); + self.tc_map.range(start_time..) + } + TimeWindowType::ToTimeTag => { + // This should be guaranteed to be valid by library API, so unwrap is okay + let end_time = time_window.end_time().unwrap().unix_stamp(); + self.tc_map.range(..=end_time) + } + } + } + + /// Deletes a scheduled command with the given request ID. Returns the store address if a + /// scheduled command was found in the map and deleted, and None otherwise. + /// + /// Please note that this function will stop on the first telecommand with a request ID match. + /// In case of duplicate IDs (which should generally not happen), this function needs to be + /// called repeatedly. + pub fn delete_by_request_id(&mut self, req_id: &RequestId) -> Option { + if let DeletionResult::WithoutStoreDeletion(v) = + self.delete_by_request_id_internal(req_id, None::<&mut dyn PoolProvider>) + { + return v; + } + panic!("unexpected deletion result"); + } + + /// This behaves like [Self::delete_by_request_id] but deletes the packet from the pool as well. + pub fn delete_by_request_id_and_from_pool( + &mut self, + req_id: &RequestId, + pool: &mut (impl PoolProvider + ?Sized), + ) -> Result { + if let DeletionResult::WithStoreDeletion(v) = + self.delete_by_request_id_internal(req_id, Some(pool)) + { + return v; + } + panic!("unexpected deletion result"); + } + + fn delete_by_request_id_internal( + &mut self, + req_id: &RequestId, + pool: Option<&mut (impl PoolProvider + ?Sized)>, + ) -> DeletionResult { + let mut idx_found = None; + for time_bucket in &mut self.tc_map { + for (idx, tc_info) in time_bucket.1.iter().enumerate() { + if &tc_info.request_id == req_id { + idx_found = Some(idx); + } + } + if let Some(idx) = idx_found { + let addr = time_bucket.1.remove(idx).addr; + if let Some(pool) = pool { + return match pool.delete(addr) { + Ok(_) => DeletionResult::WithStoreDeletion(Ok(true)), + Err(e) => DeletionResult::WithStoreDeletion(Err(e)), + }; + } + return DeletionResult::WithoutStoreDeletion(Some(addr)); + } + } + if pool.is_none() { + DeletionResult::WithoutStoreDeletion(None) + } else { + DeletionResult::WithStoreDeletion(Ok(false)) + } + } + /// Retrieve all telecommands which should be release based on the current time. + pub fn telecommands_to_release(&self) -> Range<'_, UnixTimestamp, Vec> { + self.tc_map.range(..=self.current_time) + } + + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError> { + self.current_time = UnixTimestamp::from_now()?; + Ok(()) + } + + /// Utility method which calls [Self::telecommands_to_release] and then calls a releaser + /// closure for each telecommand which should be released. This function will also delete + /// the telecommands from the holding store after calling the release closure, if the scheduler + /// is disabled. + /// + /// # Arguments + /// + /// * `releaser` - Closure where the first argument is whether the scheduler is enabled and + /// the second argument is the telecommand information also containing the store address. + /// This closure should return whether the command should be deleted if the scheduler is + /// disabled to prevent memory leaks. + /// * `store` - The holding store of the telecommands. + pub fn release_telecommands bool>( + &mut self, + mut releaser: R, + tc_store: &mut (impl PoolProvider + ?Sized), + ) -> Result { + let tcs_to_release = self.telecommands_to_release(); + let mut released_tcs = 0; + let mut store_error = Ok(()); + for tc in tcs_to_release { + for info in tc.1 { + let should_delete = releaser(self.enabled, info); + released_tcs += 1; + if should_delete && !self.is_enabled() { + let res = tc_store.delete(info.addr); + if res.is_err() { + store_error = res; + } + } + } + } + self.tc_map.retain(|k, _| k > &self.current_time); + store_error + .map(|_| released_tcs) + .map_err(|e| (released_tcs, e)) } - self.tc_map.retain(|k, _| k > &self.current_time); - store_error - .map(|_| released_tcs) - .map_err(|e| (released_tcs, e)) } } @@ -620,6 +640,7 @@ impl PusScheduler { mod tests { use super::*; use crate::pool::{LocalPool, PoolCfg, PoolProvider, StoreAddr, StoreError}; + use alloc::collections::btree_map::Range; use spacepackets::ecss::SerializablePusPacket; use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; use spacepackets::time::{cds, TimeWriter, UnixTimestamp}; diff --git a/satrs-core/src/pus/scheduler_srv.rs b/satrs-core/src/pus/scheduler_srv.rs new file mode 100644 index 0000000..80bd2b5 --- /dev/null +++ b/satrs-core/src/pus/scheduler_srv.rs @@ -0,0 +1,168 @@ +use crate::pool::{SharedPool, StoreAddr}; +use crate::pus::scheduler::PusScheduler; +use crate::pus::verification::{StdVerifReporterWithSender, TcStateAccepted, VerificationToken}; +use crate::pus::{ + AcceptedTc, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, + PusServiceBase, PusServiceHandler, +}; +use crate::tmtc::tm_helper::SharedTmStore; +use spacepackets::ecss::{scheduling, PusPacket}; +use spacepackets::tc::PusTc; +use spacepackets::time::cds::TimeProvider; +use spacepackets::time::TimeWriter; +use std::format; +use std::sync::mpsc::{Receiver, Sender}; + +pub struct PusService11SchedHandler { + psb: PusServiceBase, + scheduler: PusScheduler, +} + +impl PusService11SchedHandler { + pub fn new( + receiver: Receiver, + tc_pool: SharedPool, + tm_tx: Sender, + tm_store: SharedTmStore, + tm_apid: u16, + verification_handler: StdVerifReporterWithSender, + scheduler: PusScheduler, + ) -> Self { + Self { + psb: PusServiceBase::new( + receiver, + tc_pool, + tm_tx, + tm_store, + tm_apid, + verification_handler, + ), + scheduler, + } + } +} + +impl PusServiceHandler for PusService11SchedHandler { + fn psb_mut(&mut self) -> &mut PusServiceBase { + &mut self.psb + } + fn psb(&self) -> &PusServiceBase { + &self.psb + } + + fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> Result { + { + // Keep locked section as short as possible. + let mut tc_pool = self + .psb + .tc_store + .write() + .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; + let tc_guard = tc_pool.read_with_guard(addr); + let tc_raw = tc_guard.read().unwrap(); + self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); + } + let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); + let std_service = scheduling::Subservice::try_from(tc.subservice()); + if std_service.is_err() { + return Ok(PusPacketHandlerResult::CustomSubservice(token)); + } + //let partial_error = self.psb.update_stamp().err(); + let time_provider = + TimeProvider::from_now_with_u16_days().map_err(PartialPusHandlingError::TimeError); + let partial_error = if let Ok(time_provider) = time_provider { + time_provider + .write_to_bytes(&mut self.psb.stamp_buf) + .unwrap(); + Ok(()) + } else { + self.psb.stamp_buf = [0; 7]; + Err(time_provider.unwrap_err()) + }; + let partial_error = partial_error.err(); + match std_service.unwrap() { + scheduling::Subservice::TcEnableScheduling => { + let start_token = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .expect("Error sending start success"); + + self.scheduler.enable(); + if self.scheduler.is_enabled() { + self.psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .expect("Error sending completion success"); + } else { + panic!("Failed to enable scheduler"); + } + } + scheduling::Subservice::TcDisableScheduling => { + let start_token = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .expect("Error sending start success"); + + self.scheduler.disable(); + if !self.scheduler.is_enabled() { + self.psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .expect("Error sending completion success"); + } else { + panic!("Failed to disable scheduler"); + } + } + scheduling::Subservice::TcResetScheduling => { + let start_token = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .expect("Error sending start success"); + + let mut pool = self.psb.tc_store.write().expect("Locking pool failed"); + + self.scheduler + .reset(pool.as_mut()) + .expect("Error resetting TC Pool"); + + self.psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .expect("Error sending completion success"); + } + scheduling::Subservice::TcInsertActivity => { + let start_token = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .expect("error sending start success"); + + let mut pool = self.psb.tc_store.write().expect("locking pool failed"); + self.scheduler + .insert_wrapped_tc::(&tc, pool.as_mut()) + .expect("insertion of activity into pool failed"); + + self.psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .expect("sending completion success failed"); + } + _ => { + return Ok(PusPacketHandlerResult::CustomSubservice(token)); + } + } + if let Some(partial_error) = partial_error { + return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( + partial_error, + )); + } + Ok(PusPacketHandlerResult::CustomSubservice(token)) + } +} diff --git a/satrs-core/src/pus/test.rs b/satrs-core/src/pus/test.rs new file mode 100644 index 0000000..397fc14 --- /dev/null +++ b/satrs-core/src/pus/test.rs @@ -0,0 +1,114 @@ +use crate::pool::{SharedPool, StoreAddr}; +use crate::pus::verification::{StdVerifReporterWithSender, TcStateAccepted, VerificationToken}; +use crate::pus::{ + AcceptedTc, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, + PusServiceBase, PusServiceHandler, +}; +use crate::tmtc::tm_helper::SharedTmStore; +use spacepackets::ecss::PusPacket; +use spacepackets::tc::PusTc; +use spacepackets::tm::{PusTm, PusTmSecondaryHeader}; +use spacepackets::SpHeader; +use std::format; +use std::sync::mpsc::{Receiver, Sender}; + +pub struct PusService17TestHandler { + psb: PusServiceBase, +} + +impl PusService17TestHandler { + pub fn new( + receiver: Receiver, + tc_pool: SharedPool, + tm_tx: Sender, + tm_store: SharedTmStore, + tm_apid: u16, + verification_handler: StdVerifReporterWithSender, + ) -> Self { + Self { + psb: PusServiceBase::new( + receiver, + tc_pool, + tm_tx, + tm_store, + tm_apid, + verification_handler, + ), + } + } +} + +impl PusServiceHandler for PusService17TestHandler { + fn psb_mut(&mut self) -> &mut PusServiceBase { + &mut self.psb + } + fn psb(&self) -> &PusServiceBase { + &self.psb + } + + fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> Result { + { + // Keep locked section as short as possible. + let mut tc_pool = self + .psb + .tc_store + .write() + .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; + let tc_guard = tc_pool.read_with_guard(addr); + let tc_raw = tc_guard.read()?; + self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); + } + let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf)?; + if tc.service() != 17 { + return Err(PusPacketHandlingError::WrongService(tc.service())); + } + if tc.subservice() == 1 { + let mut partial_error = self.psb.update_stamp().err(); + let result = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .map_err(|_| PartialPusHandlingError::VerificationError); + let start_token = if let Ok(result) = result { + Some(result) + } else { + partial_error = Some(result.unwrap_err()); + None + }; + // Sequence count will be handled centrally in TM funnel. + let mut reply_header = SpHeader::tm_unseg(self.psb.tm_apid, 0, 0).unwrap(); + let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &self.psb.stamp_buf); + let ping_reply = PusTm::new(&mut reply_header, tc_header, None, true); + let addr = self.psb.tm_store.add_pus_tm(&ping_reply); + if let Err(e) = self + .psb + .tm_tx + .send(addr) + .map_err(|e| PartialPusHandlingError::TmSendError(format!("{e}"))) + { + partial_error = Some(e); + } + if let Some(start_token) = start_token { + if self + .psb + .verification_handler + .completion_success(start_token, Some(&self.psb.stamp_buf)) + .is_err() + { + partial_error = Some(PartialPusHandlingError::VerificationError) + } + } + if let Some(partial_error) = partial_error { + return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( + partial_error, + )); + }; + return Ok(PusPacketHandlerResult::RequestHandled); + } + Ok(PusPacketHandlerResult::CustomSubservice(token)) + } +} diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index f9efd16..6597302 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -74,7 +74,6 @@ //! context involving multiple threads use crate::pus::{ source_buffer_large_enough, EcssTmSenderCore, EcssTmtcError, EcssTmtcErrorWithSend, - GenericTcCheckError, }; use core::fmt::{Debug, Display, Formatter}; use core::hash::{Hash, Hasher}; @@ -84,7 +83,7 @@ use core::mem::size_of; use delegate::delegate; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use spacepackets::ecss::{scheduling, EcssEnumeration, PusPacket, SerializablePusPacket}; +use spacepackets::ecss::{EcssEnumeration, SerializablePusPacket}; use spacepackets::tc::PusTc; use spacepackets::tm::{PusTm, PusTmSecondaryHeader}; use spacepackets::{CcsdsPacket, PacketId, PacketSequenceCtrl}; @@ -1519,21 +1518,6 @@ mod stdmod { } } -pub fn pus_11_generic_tc_check( - pus_tc: &PusTc, -) -> Result { - if pus_tc.user_data().is_none() { - return Err(GenericTcCheckError::NotEnoughAppData); - } - let subservice: scheduling::Subservice = match pus_tc.subservice().try_into() { - Ok(subservice) => subservice, - Err(_) => { - return Err(GenericTcCheckError::InvalidSubservice); - } - }; - Ok(subservice) -} - #[cfg(test)] mod tests { use crate::pool::{LocalPool, PoolCfg, SharedPool}; diff --git a/satrs-core/src/tmtc/tm_helper.rs b/satrs-core/src/tmtc/tm_helper.rs index 07e8c18..2ef099c 100644 --- a/satrs-core/src/tmtc/tm_helper.rs +++ b/satrs-core/src/tmtc/tm_helper.rs @@ -1,4 +1,3 @@ -use spacepackets::ecss::SerializablePusPacket; use spacepackets::time::cds::TimeProvider; use spacepackets::time::TimeWriter; use spacepackets::tm::{PusTm, PusTmSecondaryHeader}; @@ -76,17 +75,6 @@ impl PusTmWithCdsShortHelper { self.create_pus_tm_common(service, subservice, source_data, seq_count) } - pub fn create_pus_tm_with_stamp<'a>( - &'a mut self, - service: u8, - subservice: u8, - source_data: Option<&'a [u8]>, - timestamp: &'a [u8], - seq_count: u16, - ) -> PusTm { - self.create_pus_tm_common(service, subservice, source_data, seq_count) - } - fn create_pus_tm_common<'a>( &'a self, service: u8, diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 04de260..a9bdc57 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -9,7 +9,7 @@ use log::{info, warn}; use crate::hk::AcsHkIds; use crate::logging::setup_logger; -use crate::pus::test::{PusService17TestHandler, Service17CustomWrapper}; +use crate::pus::test::Service17CustomWrapper; use crate::pus::PusTcMpscRouter; use crate::requests::{Request, RequestWithToken}; use crate::tmtc::{ @@ -26,25 +26,25 @@ use satrs_core::pus::event_man::{ PusEventDispatcher, }; use satrs_core::pus::hk::Subservice as HkSubservice; +use satrs_core::pus::test::PusService17TestHandler; use satrs_core::pus::verification::{ MpscVerifSender, VerificationReporterCfg, VerificationReporterWithSender, }; use satrs_core::pus::MpscTmtcInStoreSender; use satrs_core::seq_count::{SeqCountProviderSimple, SeqCountProviderSyncClonable}; -use satrs_core::spacepackets::tc::{GenericPusTcSecondaryHeader, PusTc}; use satrs_core::spacepackets::{ time::cds::TimeProvider, time::TimeWriter, tm::{PusTm, PusTmSecondaryHeader}, SequenceFlags, SpHeader, }; -use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; +use satrs_core::tmtc::tm_helper::SharedTmStore; use satrs_core::tmtc::AddressableId; -use satrs_example::{RequestTargetId, OBSW_SERVER_ADDR, SERVER_PORT, TEST_EVENT}; +use satrs_example::{RequestTargetId, OBSW_SERVER_ADDR, SERVER_PORT}; use std::collections::HashMap; use std::net::{IpAddr, SocketAddr}; use std::sync::mpsc::{channel, TryRecvError}; -use std::sync::{mpsc, Arc, RwLock}; +use std::sync::{Arc, RwLock}; use std::thread; use std::time::Duration; @@ -167,13 +167,13 @@ fn main() { hk_service_receiver: pus_hk_tx, action_service_receiver: pus_action_tx, }; - let mut pus17_handler = PusService17TestHandler::new( + let pus17_handler = PusService17TestHandler::new( pus_test_rx, tc_store.pool.clone(), tm_funnel_tx.clone(), tm_store.clone(), PUS_APID, - verif_reporter.clone(), + verif_reporter, ); let mut srv_17_wrapper = Service17CustomWrapper { pus17_handler, diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 078ccf1..4e1941b 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -1,177 +1,26 @@ -use crate::pus::test::PusService17TestHandler; use crate::tmtc::MpscStoreAndSendError; use satrs_core::events::EventU32; -use satrs_core::hk::{CollectionIntervalFactor, HkRequest}; -use satrs_core::mode::{ModeAndSubmode, ModeRequest}; -use satrs_core::objects::ObjectId; use satrs_core::params::Params; -use satrs_core::pool::{PoolProvider, SharedPool, StoreAddr, StoreError}; -use satrs_core::pus::event_man::{EventRequest, EventRequestWithToken}; -use satrs_core::pus::hk; -use satrs_core::pus::mode::Subservice; -use satrs_core::pus::scheduling::PusScheduler; -use satrs_core::pus::verification::{ - pus_11_generic_tc_check, FailParams, StdVerifReporterWithSender, TcStateAccepted, TcStateToken, - VerificationToken, -}; -use satrs_core::pus::{event, EcssTcSenderCore, GenericTcCheckError, MpscTmtcInStoreSender}; -use satrs_core::pus::{mode, EcssTcSender}; -use satrs_core::res_code::ResultU16; -use satrs_core::seq_count::{SeqCountProviderSyncClonable, SequenceCountProviderCore}; -use satrs_core::spacepackets::ecss::{scheduling, PusError, PusServiceId}; -use satrs_core::spacepackets::time::{CcsdsTimeProvider, StdTimestampError, TimestampError}; +use satrs_core::pool::StoreAddr; +use satrs_core::pus::verification::{FailParams, StdVerifReporterWithSender}; +use satrs_core::pus::AcceptedTc; +use satrs_core::seq_count::SeqCountProviderSyncClonable; +use satrs_core::spacepackets::ecss::PusServiceId; +use satrs_core::spacepackets::tc::PusTc; +use satrs_core::spacepackets::time::cds::TimeProvider; +use satrs_core::spacepackets::time::TimeWriter; use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; -use satrs_core::tmtc::{AddressableId, PusServiceProvider, TargetId}; -use satrs_core::{ - spacepackets::ecss::PusPacket, spacepackets::tc::PusTc, spacepackets::time::cds::TimeProvider, - spacepackets::time::TimeWriter, spacepackets::SpHeader, -}; -use satrs_example::{hk_err, tmtc_err, CustomPusServiceId, TEST_EVENT}; -use std::cell::RefCell; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::rc::Rc; -use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError}; +use satrs_example::{tmtc_err, CustomPusServiceId}; +use std::sync::mpsc::Sender; pub mod scheduler; pub mod test; -#[derive(Debug, Clone)] -pub enum PusPacketHandlingError { - PusError(PusError), - WrongService(u8), - NotEnoughAppData(String), - StoreError(StoreError), - RwGuardError(String), - QueueDisconnected, - OtherError(String), -} - -impl From for PusPacketHandlingError { - fn from(value: PusError) -> Self { - Self::PusError(value) - } -} - -impl From for PusPacketHandlingError { - fn from(value: StoreError) -> Self { - Self::StoreError(value) - } -} - -#[derive(Debug, Clone)] -pub enum PartialPusHandlingError { - TimeError(StdTimestampError), - TmSendError(String), - VerificationError, -} -impl From for PartialPusHandlingError { - fn from(value: StdTimestampError) -> Self { - Self::TimeError(value) - } -} - -impl From for PartialPusHandlingError { - fn from(value: TimestampError) -> Self { - Self::TimeError(StdTimestampError::TimestampError(value)) - } -} - -#[derive(Debug, Clone)] -pub enum PusPacketHandlerResult { - RequestHandled, - RequestHandledPartialSuccess(PartialPusHandlingError), - CustomSubservice(VerificationToken), - Empty, -} - -pub struct PusServiceBase { - tc_rx: Receiver, - tc_store: SharedPool, - tm_tx: Sender, - tm_store: SharedTmStore, - tm_apid: u16, - verification_handler: StdVerifReporterWithSender, - stamp_buf: [u8; 7], - pus_buf: [u8; 2048], - pus_size: usize, -} - -impl PusServiceBase { - pub fn new( - receiver: Receiver, - tc_pool: SharedPool, - tm_tx: Sender, - tm_store: SharedTmStore, - tm_apid: u16, - verification_handler: StdVerifReporterWithSender, - ) -> Self { - Self { - tc_rx: receiver, - tc_store: tc_pool, - tm_apid, - tm_tx, - tm_store, - verification_handler, - stamp_buf: [0; 7], - pus_buf: [0; 2048], - pus_size: 0, - } - } - - pub fn handle_next_packet< - T: FnOnce( - StoreAddr, - VerificationToken, - ) -> Result, - >( - &mut self, - handle_one_packet: T, - ) -> Result { - return match self.tc_rx.try_recv() { - Ok((addr, token)) => handle_one_packet(addr, token), - Err(e) => match e { - TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), - TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), - }, - }; - } - - pub fn update_stamp(&mut self) -> Result<(), PartialPusHandlingError> { - let time_provider = TimeProvider::from_now_with_u16_days() - .map_err(|e| PartialPusHandlingError::TimeError(e)); - return if time_provider.is_ok() { - // Can not fail, buffer is large enough. - time_provider - .unwrap() - .write_to_bytes(&mut self.stamp_buf) - .unwrap(); - Ok(()) - } else { - self.stamp_buf = [0; 7]; - Err(time_provider.unwrap_err()) - }; - } -} - -// pub trait PusTcRouter { -// type Error; -// fn route_pus_tc( -// &mut self, -// apid: u16, -// service: u8, -// subservice: u8, -// tc: &PusTc, -// ); -// } - pub enum PusTcWrapper<'tc> { PusTc(&'tc PusTc<'tc>), StoreAddr(StoreAddr), } -pub type AcceptedTc = (StoreAddr, VerificationToken); - pub struct PusTcMpscRouter { pub test_service_receiver: Sender, pub event_service_receiver: Sender, @@ -280,7 +129,6 @@ pub struct PusTcArgs { //pub tc_source: PusTcSource, /// Used to send events from within the TC router pub event_sender: Sender<(EventU32, Option)>, - //pub scheduler: Rc>, } struct TimeStampHelper { diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index 7b7ffd1..8b13789 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -1,164 +1 @@ -use crate::pus::test::PusServiceHandler; -use crate::pus::{ - AcceptedTc, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, - PusServiceBase, -}; -use delegate::delegate; -use satrs_core::pool::{SharedPool, StoreAddr}; -use satrs_core::pus::scheduling::PusScheduler; -use satrs_core::pus::verification::{ - pus_11_generic_tc_check, FailParams, StdVerifReporterWithSender, TcStateAccepted, - VerificationToken, -}; -use satrs_core::pus::GenericTcCheckError; -use satrs_core::spacepackets::ecss::{scheduling, PusPacket}; -use satrs_core::spacepackets::tc::PusTc; -use satrs_core::spacepackets::time::cds::TimeProvider; -use satrs_core::spacepackets::time::TimeWriter; -use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; -use satrs_example::tmtc_err; -use std::sync::mpsc::{Receiver, Sender, TryRecvError}; -pub struct PusService11SchedHandler { - psb: PusServiceBase, - scheduler: PusScheduler, -} - -impl PusService11SchedHandler { - pub fn new( - receiver: Receiver, - tc_pool: SharedPool, - tm_tx: Sender, - tm_store: SharedTmStore, - tm_apid: u16, - verification_handler: StdVerifReporterWithSender, - scheduler: PusScheduler, - ) -> Self { - Self { - psb: PusServiceBase::new( - receiver, - tc_pool, - tm_tx, - tm_store, - tm_apid, - verification_handler, - ), - scheduler, - } - } - - pub fn handle_next_packet(&mut self) -> Result { - return match self.psb.tc_rx.try_recv() { - Ok((addr, token)) => self.handle_one_tc(addr, token), - Err(e) => match e { - TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), - TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), - }, - }; - } -} - -impl PusServiceHandler for PusService11SchedHandler { - fn psb(&mut self) -> &mut PusServiceBase { - &mut self.psb - } - - fn handle_one_tc( - &mut self, - addr: StoreAddr, - token: VerificationToken, - ) -> Result { - let mut partial_result = self.psb.update_stamp().err(); - { - // Keep locked section as short as possible. - let mut tc_pool = self - .psb - .tc_store - .write() - .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; - let tc_guard = tc_pool.read_with_guard(addr); - let tc_raw = tc_guard.read().unwrap(); - self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); - } - let (tc, tc_size) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); - let std_service = scheduling::Subservice::try_from(tc.subservice()); - if std_service.is_err() { - return Ok(PusPacketHandlerResult::CustomSubservice(token)); - } - match std_service.unwrap() { - scheduling::Subservice::TcEnableScheduling => { - let start_token = self - .psb - .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) - .expect("Error sending start success"); - - self.scheduler.enable(); - if self.scheduler.is_enabled() { - self.psb - .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) - .expect("Error sending completion success"); - } else { - panic!("Failed to enable scheduler"); - } - } - scheduling::Subservice::TcDisableScheduling => { - let start_token = self - .psb - .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) - .expect("Error sending start success"); - - self.scheduler.disable(); - if !self.scheduler.is_enabled() { - self.psb - .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) - .expect("Error sending completion success"); - } else { - panic!("Failed to disable scheduler"); - } - } - scheduling::Subservice::TcResetScheduling => { - let start_token = self - .psb - .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) - .expect("Error sending start success"); - - let mut pool = self.psb.tc_store.write().expect("Locking pool failed"); - - self.scheduler - .reset(pool.as_mut()) - .expect("Error resetting TC Pool"); - - self.psb - .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) - .expect("Error sending completion success"); - } - scheduling::Subservice::TcInsertActivity => { - let start_token = self - .psb - .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) - .expect("error sending start success"); - - let mut pool = self.psb.tc_store.write().expect("locking pool failed"); - self.scheduler - .insert_wrapped_tc::(&tc, pool.as_mut()) - .expect("insertion of activity into pool failed"); - - self.psb - .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) - .expect("sending completion success failed"); - } - _ => { - return Ok(PusPacketHandlerResult::CustomSubservice(token)); - } - } - Ok(PusPacketHandlerResult::CustomSubservice(token)) - } -} diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 7765c21..b970b61 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -1,28 +1,15 @@ -use crate::pus::{ - AcceptedTc, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, - PusServiceBase, -}; -use delegate::delegate; -use log::{error, info, warn}; +use log::{info, warn}; use satrs_core::events::EventU32; use satrs_core::params::Params; -use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; -use satrs_core::pus::verification::{ - FailParams, StdVerifReporterWithSender, TcStateAccepted, TcStateStarted, - VerificationOrSendErrorWithToken, VerificationToken, -}; -use satrs_core::seq_count::{SeqCountProviderSyncClonable, SequenceCountProviderCore}; -use satrs_core::spacepackets::ecss::{PusError, PusPacket}; +use satrs_core::pus::test::PusService17TestHandler; +use satrs_core::pus::verification::FailParams; +use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; +use satrs_core::spacepackets::ecss::PusPacket; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::time::cds::TimeProvider; -use satrs_core::spacepackets::time::{StdTimestampError, TimeWriter}; -use satrs_core::spacepackets::tm::{PusTm, PusTmSecondaryHeader}; -use satrs_core::spacepackets::SpHeader; -use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; +use satrs_core::spacepackets::time::TimeWriter; use satrs_example::{tmtc_err, TEST_EVENT}; -use std::sync::mpsc::{Receiver, Sender, TryRecvError}; -use std::thread; -use std::time::Duration; +use std::sync::mpsc::Sender; pub struct Service17CustomWrapper { pub pus17_handler: PusService17TestHandler, @@ -31,7 +18,6 @@ pub struct Service17CustomWrapper { impl Service17CustomWrapper { pub fn perform_operation(&mut self) -> bool { - let mut handled_pings = 0; let res = self.pus17_handler.handle_next_packet(); if res.is_err() { warn!("PUS17 handler failed with error {:?}", res.unwrap_err()); @@ -41,18 +27,16 @@ impl Service17CustomWrapper { PusPacketHandlerResult::RequestHandled => { info!("Received PUS ping command TC[17,1]"); info!("Sent ping reply PUS TM[17,2]"); - handled_pings += 1; } PusPacketHandlerResult::RequestHandledPartialSuccess(partial_err) => { warn!( "Handled PUS ping command with partial success: {:?}", partial_err ); - handled_pings += 1; } PusPacketHandlerResult::CustomSubservice(token) => { let (buf, _) = self.pus17_handler.pus_tc_buf(); - let (tc, size) = PusTc::from_bytes(buf).unwrap(); + let (tc, _) = PusTc::from_bytes(buf).unwrap(); let time_stamper = TimeProvider::from_now_with_u16_days().unwrap(); let mut stamp_buf: [u8; 7] = [0; 7]; time_stamper.write_to_bytes(&mut stamp_buf).unwrap(); @@ -63,17 +47,17 @@ impl Service17CustomWrapper { .expect("Sending test event failed"); let start_token = self .pus17_handler - .verification_handler() + .verification_reporter() .start_success(token, Some(&stamp_buf)) .expect("Error sending start success"); self.pus17_handler - .verification_handler() + .verification_reporter() .completion_success(start_token, Some(&stamp_buf)) .expect("Error sending completion success"); } else { let fail_data = [tc.subservice()]; self.pus17_handler - .verification_handler() + .verification_reporter() .start_failure( token, FailParams::new( @@ -92,128 +76,3 @@ impl Service17CustomWrapper { true } } - -pub trait PusServiceHandler { - fn psb(&mut self) -> &mut PusServiceBase; - fn handle_one_tc( - &mut self, - addr: StoreAddr, - token: VerificationToken, - ) -> Result; - fn handle_next_packet(&mut self) -> Result { - return match self.psb().tc_rx.try_recv() { - Ok((addr, token)) => self.handle_one_tc(addr, token), - Err(e) => match e { - TryRecvError::Empty => Ok(PusPacketHandlerResult::Empty), - TryRecvError::Disconnected => Err(PusPacketHandlingError::QueueDisconnected), - }, - }; - } -} -pub struct PusService17TestHandler { - psb: PusServiceBase, -} - -impl PusService17TestHandler { - pub fn new( - receiver: Receiver, - tc_pool: SharedPool, - tm_tx: Sender, - tm_store: SharedTmStore, - tm_apid: u16, - verification_handler: StdVerifReporterWithSender, - ) -> Self { - Self { - psb: PusServiceBase::new( - receiver, - tc_pool, - tm_tx, - tm_store, - tm_apid, - verification_handler, - ), - } - } - - pub fn verification_handler(&mut self) -> &mut StdVerifReporterWithSender { - &mut self.psb.verification_handler - } - - pub fn pus_tc_buf(&self) -> (&[u8], usize) { - (&self.psb.pus_buf, self.psb.pus_size) - } -} - -impl PusServiceHandler for PusService17TestHandler { - fn psb(&mut self) -> &mut PusServiceBase { - &mut self.psb - } - - fn handle_one_tc( - &mut self, - addr: StoreAddr, - token: VerificationToken, - ) -> Result { - let mut partial_result = None; - { - // Keep locked section as short as possible. - let mut tc_pool = self - .psb - .tc_store - .write() - .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; - let tc_guard = tc_pool.read_with_guard(addr); - let tc_raw = tc_guard.read()?; - self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); - } - let mut partial_error = None; - let (tc, tc_size) = PusTc::from_bytes(&self.psb.pus_buf)?; - if tc.service() != 17 { - return Err(PusPacketHandlingError::WrongService(tc.service())); - } - if tc.subservice() == 1 { - partial_result = self.psb.update_stamp().err(); - let result = self - .psb - .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) - .map_err(|e| PartialPusHandlingError::VerificationError); - let start_token = if result.is_err() { - partial_error = Some(result.unwrap_err()); - None - } else { - Some(result.unwrap()) - }; - // Sequence count will be handled centrally in TM funnel. - let mut reply_header = SpHeader::tm_unseg(self.psb.tm_apid, 0, 0).unwrap(); - let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &self.psb.stamp_buf); - let ping_reply = PusTm::new(&mut reply_header, tc_header, None, true); - let addr = self.psb.tm_store.add_pus_tm(&ping_reply); - if let Err(e) = self - .psb - .tm_tx - .send(addr) - .map_err(|e| PartialPusHandlingError::TmSendError(format!("{e}"))) - { - partial_error = Some(e); - } - if let Some(start_token) = start_token { - if self - .psb - .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) - .is_err() - { - partial_error = Some(PartialPusHandlingError::VerificationError) - } - } - if partial_error.is_some() { - return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( - partial_error.unwrap(), - )); - } - return Ok(PusPacketHandlerResult::RequestHandled); - } - Ok(PusPacketHandlerResult::CustomSubservice(token)) - } -} diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 320cf5a..accef97 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -8,7 +8,6 @@ use std::error::Error; use std::fmt::{Display, Formatter}; use std::net::SocketAddr; use std::rc::Rc; -use std::sync::mpsc; use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError}; use std::thread; use std::time::Duration; @@ -18,15 +17,14 @@ use crate::pus::{PusReceiver, PusTcArgs, PusTcMpscRouter, PusTmArgs}; use crate::requests::RequestWithToken; use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; use satrs_core::pus::event_man::EventRequestWithToken; -use satrs_core::pus::scheduling::{PusScheduler, TcInfo}; +use satrs_core::pus::scheduler::{PusScheduler, TcInfo}; use satrs_core::pus::verification::StdVerifReporterWithSender; use satrs_core::seq_count::SeqCountProviderSyncClonable; -use satrs_core::spacepackets::ecss::SerializablePusPacket; -use satrs_core::spacepackets::{ecss::PusPacket, tc::PusTc, tm::PusTm, SpHeader}; +use satrs_core::spacepackets::ecss::{PusPacket, SerializablePusPacket}; +use satrs_core::spacepackets::tc::PusTc; +use satrs_core::spacepackets::SpHeader; use satrs_core::tmtc::tm_helper::SharedTmStore; -use satrs_core::tmtc::{ - CcsdsDistributor, CcsdsError, PusServiceProvider, ReceivesCcsdsTc, ReceivesEcssPusTc, -}; +use satrs_core::tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, ReceivesEcssPusTc}; pub const PUS_APID: u16 = 0x02; @@ -162,7 +160,6 @@ pub fn core_tmtc_task( PusScheduler::new_with_current_init_time(Duration::from_secs(5)).unwrap(), )); - let sched_clone = scheduler.clone(); let pus_tm_args = PusTmArgs { tm_tx: tm_args.tm_sink_sender, tm_store: tm_args.tm_store.clone(), diff --git a/satrs-mib/codegen/src/lib.rs b/satrs-mib/codegen/src/lib.rs index 3f144c6..906bc1a 100644 --- a/satrs-mib/codegen/src/lib.rs +++ b/satrs-mib/codegen/src/lib.rs @@ -20,7 +20,7 @@ pub fn resultcode( let item = parse_macro_input!(item as ItemConst); // Generate additional generated info struct used for introspection. - let result_code_name = item.ident.clone(); + let result_code_name = &item.ident; let name_as_str = result_code_name.to_string(); let gen_struct_name = format_ident!("{}_EXT", result_code_name); let info_str = info_str.map_or(String::from(""), |v| v.value()); -- 2.43.0 From 363770066d435c462fab172e40ca1cbcd0a2eb0c Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 11:58:43 +0200 Subject: [PATCH 11/39] start adding event service --- satrs-core/src/pus/mod.rs | 20 ++++++++-- satrs-core/src/pus/scheduler_srv.rs | 15 ++++++-- satrs-core/src/pus/test.rs | 5 ++- satrs-example/src/main.rs | 30 +++++++++++++-- satrs-example/src/pus/events.rs | 59 +++++++++++++++++++++++++++++ satrs-example/src/pus/mod.rs | 6 +-- satrs-example/src/pus/scheduler.rs | 29 ++++++++++++++ satrs-example/src/pus/test.rs | 16 +++----- 8 files changed, 154 insertions(+), 26 deletions(-) create mode 100644 satrs-example/src/pus/events.rs diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index d43f8ed..127b84e 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -136,13 +136,13 @@ mod alloc_mod { pub mod std_mod { use crate::pool::{ShareablePoolProvider, SharedPool, StoreAddr, StoreError}; use crate::pus::verification::{ - StdVerifReporterWithSender, TcStateAccepted, VerificationToken, + FailParams, StdVerifReporterWithSender, TcStateAccepted, VerificationToken, }; use crate::pus::{EcssSender, EcssTcSenderCore, EcssTmSenderCore}; use crate::tmtc::tm_helper::SharedTmStore; use crate::SenderId; use alloc::vec::Vec; - use spacepackets::ecss::{PusError, SerializablePusPacket}; + use spacepackets::ecss::{EcssEnumeration, PusError, SerializablePusPacket}; use spacepackets::tc::PusTc; use spacepackets::time::cds::TimeProvider; use spacepackets::time::{StdTimestampError, TimeWriter}; @@ -315,7 +315,7 @@ pub mod std_mod { pub enum PusPacketHandlerResult { RequestHandled, RequestHandledPartialSuccess(PartialPusHandlingError), - CustomSubservice(VerificationToken), + CustomSubservice(u8, VerificationToken), Empty, } @@ -372,6 +372,20 @@ pub mod std_mod { Err(time_provider.unwrap_err()) } } + + pub fn report_start_failure( + &mut self, + token: VerificationToken, + failure_code: &impl EcssEnumeration, + failure_data: Option<&[u8]>, + ) -> Result<(), VerificationToken> { + self.verification_handler + .start_failure( + token, + FailParams::new(Some(&self.stamp_buf), failure_code, failure_data), + ) + .map_err(|e| e.1) + } } pub trait PusServiceHandler { diff --git a/satrs-core/src/pus/scheduler_srv.rs b/satrs-core/src/pus/scheduler_srv.rs index 80bd2b5..98ea570 100644 --- a/satrs-core/src/pus/scheduler_srv.rs +++ b/satrs-core/src/pus/scheduler_srv.rs @@ -69,7 +69,10 @@ impl PusServiceHandler for PusService11SchedHandler { let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); let std_service = scheduling::Subservice::try_from(tc.subservice()); if std_service.is_err() { - return Ok(PusPacketHandlerResult::CustomSubservice(token)); + return Ok(PusPacketHandlerResult::CustomSubservice( + tc.subservice(), + token, + )); } //let partial_error = self.psb.update_stamp().err(); let time_provider = @@ -155,7 +158,10 @@ impl PusServiceHandler for PusService11SchedHandler { .expect("sending completion success failed"); } _ => { - return Ok(PusPacketHandlerResult::CustomSubservice(token)); + return Ok(PusPacketHandlerResult::CustomSubservice( + tc.subservice(), + token, + )); } } if let Some(partial_error) = partial_error { @@ -163,6 +169,9 @@ impl PusServiceHandler for PusService11SchedHandler { partial_error, )); } - Ok(PusPacketHandlerResult::CustomSubservice(token)) + Ok(PusPacketHandlerResult::CustomSubservice( + tc.subservice(), + token, + )) } } diff --git a/satrs-core/src/pus/test.rs b/satrs-core/src/pus/test.rs index 397fc14..09614bc 100644 --- a/satrs-core/src/pus/test.rs +++ b/satrs-core/src/pus/test.rs @@ -109,6 +109,9 @@ impl PusServiceHandler for PusService17TestHandler { }; return Ok(PusPacketHandlerResult::RequestHandled); } - Ok(PusPacketHandlerResult::CustomSubservice(token)) + Ok(PusPacketHandlerResult::CustomSubservice( + tc.subservice(), + token, + )) } } diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index a9bdc57..dff9a5b 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -9,6 +9,7 @@ use log::{info, warn}; use crate::hk::AcsHkIds; use crate::logging::setup_logger; +use crate::pus::scheduler::Pus11Wrapper; use crate::pus::test::Service17CustomWrapper; use crate::pus::PusTcMpscRouter; use crate::requests::{Request, RequestWithToken}; @@ -26,6 +27,8 @@ use satrs_core::pus::event_man::{ PusEventDispatcher, }; use satrs_core::pus::hk::Subservice as HkSubservice; +use satrs_core::pus::scheduler::PusScheduler; +use satrs_core::pus::scheduler_srv::PusService11SchedHandler; use satrs_core::pus::test::PusService17TestHandler; use satrs_core::pus::verification::{ MpscVerifSender, VerificationReporterCfg, VerificationReporterWithSender, @@ -173,12 +176,24 @@ fn main() { tm_funnel_tx.clone(), tm_store.clone(), PUS_APID, - verif_reporter, + verif_reporter.clone(), ); - let mut srv_17_wrapper = Service17CustomWrapper { + let mut pus_17_wrapper = Service17CustomWrapper { pus17_handler, test_srv_event_sender, }; + let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) + .expect("Creating PUS Scheduler failed"); + let pus11_handler = PusService11SchedHandler::new( + pus_sched_rx, + tc_store.pool.clone(), + tm_funnel_tx.clone(), + tm_store.clone(), + PUS_APID, + verif_reporter, + scheduler, + ); + let mut pus_11_wrapper = Pus11Wrapper { pus11_handler }; info!("Starting TMTC task"); let jh0 = thread::Builder::new() @@ -343,8 +358,15 @@ fn main() { let jh4 = thread::Builder::new() .name("PUS".to_string()) .spawn(move || loop { - let queue_empty = srv_17_wrapper.perform_operation(); - if queue_empty { + let mut all_queues_empty = true; + let mut is_srv_finished = |srv_handler_finished: bool| { + if !srv_handler_finished { + all_queues_empty = false; + } + }; + is_srv_finished(pus_17_wrapper.perform_operation()); + is_srv_finished(pus_11_wrapper.perform_operation()); + if all_queues_empty { thread::sleep(Duration::from_millis(200)); } }) diff --git a/satrs-example/src/pus/events.rs b/satrs-example/src/pus/events.rs new file mode 100644 index 0000000..554bbee --- /dev/null +++ b/satrs-example/src/pus/events.rs @@ -0,0 +1,59 @@ +use satrs_core::event_man::{EventManager, EventManagerWithMpscQueue}; +use satrs_core::events::EventU32; +use satrs_core::params::Params; +use satrs_core::pool::{SharedPool, StoreAddr}; +use satrs_core::pus::event_man::EventReporter; +use satrs_core::pus::verification::{ + StdVerifReporterWithSender, TcStateAccepted, VerificationToken, +}; +use satrs_core::pus::{ + AcceptedTc, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, PusServiceHandler, +}; +use satrs_core::tmtc::tm_helper::SharedTmStore; +use std::sync::mpsc::{Receiver, Sender}; + +pub struct PusService5EventHandler { + psb: PusServiceBase, + event_manager: EventManagerWithMpscQueue, +} + +impl PusService5EventHandler { + pub fn new( + receiver: Receiver, + tc_pool: SharedPool, + tm_tx: Sender, + tm_store: SharedTmStore, + tm_apid: u16, + verification_handler: StdVerifReporterWithSender, + event_manager: EventManagerWithMpscQueue, + ) -> Self { + Self { + psb: PusServiceBase::new( + receiver, + tc_pool, + tm_tx, + tm_store, + tm_apid, + verification_handler, + ), + event_manager, + } + } +} + +impl PusServiceHandler for PusService5EventHandler { + fn psb_mut(&mut self) -> &mut PusServiceBase { + &mut self.psb + } + fn psb(&self) -> &PusServiceBase { + &self.psb + } + + fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> Result { + Ok(PusPacketHandlerResult::RequestHandled) + } +} diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 4e1941b..9fb1e9c 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -13,14 +13,10 @@ use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; use satrs_example::{tmtc_err, CustomPusServiceId}; use std::sync::mpsc::Sender; +pub mod events; pub mod scheduler; pub mod test; -pub enum PusTcWrapper<'tc> { - PusTc(&'tc PusTc<'tc>), - StoreAddr(StoreAddr), -} - pub struct PusTcMpscRouter { pub test_service_receiver: Sender, pub event_service_receiver: Sender, diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index 8b13789..1b8d194 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -1 +1,30 @@ +use log::{error, warn}; +use satrs_core::pus::scheduler_srv::PusService11SchedHandler; +use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; +pub struct Pus11Wrapper { + pub pus11_handler: PusService11SchedHandler, +} + +impl Pus11Wrapper { + pub fn perform_operation(&mut self) -> bool { + match self.pus11_handler.handle_next_packet() { + Ok(result) => match result { + PusPacketHandlerResult::RequestHandled => {} + PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { + warn!("PUS11 partial packet handling success: {e:?}") + } + PusPacketHandlerResult::CustomSubservice(invalid, _) => { + warn!("PUS11 invalid subservice {invalid}"); + } + PusPacketHandlerResult::Empty => { + return true; + } + }, + Err(error) => { + error!("PUS packet handling error: {error:?}") + } + } + false + } +} diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index b970b61..7aa1229 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -2,7 +2,6 @@ use log::{info, warn}; use satrs_core::events::EventU32; use satrs_core::params::Params; use satrs_core::pus::test::PusService17TestHandler; -use satrs_core::pus::verification::FailParams; use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; use satrs_core::spacepackets::ecss::PusPacket; use satrs_core::spacepackets::tc::PusTc; @@ -34,13 +33,13 @@ impl Service17CustomWrapper { partial_err ); } - PusPacketHandlerResult::CustomSubservice(token) => { + PusPacketHandlerResult::CustomSubservice(subservice, token) => { let (buf, _) = self.pus17_handler.pus_tc_buf(); let (tc, _) = PusTc::from_bytes(buf).unwrap(); let time_stamper = TimeProvider::from_now_with_u16_days().unwrap(); let mut stamp_buf: [u8; 7] = [0; 7]; time_stamper.write_to_bytes(&mut stamp_buf).unwrap(); - if tc.subservice() == 128 { + if subservice == 128 { info!("Generating test event"); self.test_srv_event_sender .send((TEST_EVENT.into(), None)) @@ -57,14 +56,11 @@ impl Service17CustomWrapper { } else { let fail_data = [tc.subservice()]; self.pus17_handler - .verification_reporter() - .start_failure( + .psb_mut() + .report_start_failure( token, - FailParams::new( - Some(&stamp_buf), - &tmtc_err::INVALID_PUS_SUBSERVICE, - Some(&fail_data), - ), + &tmtc_err::INVALID_PUS_SUBSERVICE, + Some(&fail_data), ) .expect("Sending start failure verification failed"); } -- 2.43.0 From 52a7682a5515fd3a086643f23666b685f4a60741 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 14:25:51 +0200 Subject: [PATCH 12/39] continue --- satrs-core/src/pus/event_man.rs | 4 +- satrs-core/src/pus/event_srv.rs | 144 +++++++++++++ satrs-core/src/pus/mod.rs | 10 +- satrs-core/src/pus/verification.rs | 11 + satrs-example/src/main.rs | 23 +- satrs-example/src/pus/event.rs | 33 +++ satrs-example/src/pus/events.rs | 59 ----- satrs-example/src/pus/mod.rs | 335 +---------------------------- satrs-example/src/pus/scheduler.rs | 7 +- satrs-example/src/pus/test.rs | 3 + satrs-example/src/tmtc.rs | 4 - 11 files changed, 226 insertions(+), 407 deletions(-) create mode 100644 satrs-core/src/pus/event_srv.rs create mode 100644 satrs-example/src/pus/event.rs delete mode 100644 satrs-example/src/pus/events.rs diff --git a/satrs-core/src/pus/event_man.rs b/satrs-core/src/pus/event_man.rs index dbde7fd..0c701e6 100644 --- a/satrs-core/src/pus/event_man.rs +++ b/satrs-core/src/pus/event_man.rs @@ -10,7 +10,7 @@ use hashbrown::HashSet; #[cfg(feature = "alloc")] pub use crate::pus::event::EventReporter; -use crate::pus::verification::{TcStateStarted, VerificationToken}; +use crate::pus::verification::TcStateToken; #[cfg(feature = "alloc")] use crate::pus::EcssTmSenderCore; use crate::pus::EcssTmtcErrorWithSend; @@ -91,7 +91,7 @@ pub enum EventRequest { #[derive(Debug)] pub struct EventRequestWithToken { pub request: EventRequest, - pub token: VerificationToken, + pub token: TcStateToken, } #[derive(Debug)] diff --git a/satrs-core/src/pus/event_srv.rs b/satrs-core/src/pus/event_srv.rs new file mode 100644 index 0000000..ad098fa --- /dev/null +++ b/satrs-core/src/pus/event_srv.rs @@ -0,0 +1,144 @@ +use crate::events::EventU32; +use crate::pool::{SharedPool, StoreAddr}; +use crate::pus::event_man::{EventRequest, EventRequestWithToken}; +use crate::pus::verification::{ + StdVerifReporterWithSender, TcStateAccepted, TcStateToken, VerificationToken, +}; +use crate::pus::{ + AcceptedTc, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, + PusServiceBase, PusServiceHandler, +}; +use crate::tmtc::tm_helper::SharedTmStore; +use spacepackets::ecss::event::Subservice; +use spacepackets::ecss::PusPacket; +use spacepackets::tc::PusTc; +use std::format; +use std::sync::mpsc::{Receiver, Sender}; + +pub struct PusService5EventHandler { + psb: PusServiceBase, + event_request_tx: Sender, +} + +impl PusService5EventHandler { + pub fn new( + receiver: Receiver, + tc_pool: SharedPool, + tm_tx: Sender, + tm_store: SharedTmStore, + tm_apid: u16, + verification_handler: StdVerifReporterWithSender, + event_request_tx: Sender, + ) -> Self { + Self { + psb: PusServiceBase::new( + receiver, + tc_pool, + tm_tx, + tm_store, + tm_apid, + verification_handler, + ), + event_request_tx, + } + } +} + +impl PusServiceHandler for PusService5EventHandler { + fn psb_mut(&mut self) -> &mut PusServiceBase { + &mut self.psb + } + fn psb(&self) -> &PusServiceBase { + &self.psb + } + + fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> Result { + { + // Keep locked section as short as possible. + let mut tc_pool = self + .psb + .tc_store + .write() + .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; + let tc_guard = tc_pool.read_with_guard(addr); + let tc_raw = tc_guard.read().unwrap(); + self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); + } + let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); + let srv = Subservice::try_from(tc.subservice()); + if srv.is_err() { + return Ok(PusPacketHandlerResult::CustomSubservice( + tc.subservice(), + token, + )); + } + let mut handle_enable_disable_request = |enable: bool| { + if tc.user_data().is_none() || tc.user_data().unwrap().len() < 4 { + return Err(PusPacketHandlingError::NotEnoughAppData( + "At least 4 bytes event ID expected".into(), + )); + } + let user_data = tc.user_data().unwrap(); + let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap())); + + let start_token = self + .psb + .verification_handler + .start_success(token, Some(&self.psb.stamp_buf)) + .map_err(|_| PartialPusHandlingError::VerificationError); + let partial_error = start_token.clone().err(); + let mut token: TcStateToken = token.into(); + if let Ok(start_token) = start_token { + token = start_token.into(); + } + let event_req_with_token = if enable { + EventRequestWithToken { + request: EventRequest::Enable(event_u32), + token, + } + } else { + EventRequestWithToken { + request: EventRequest::Disable(event_u32), + token, + } + }; + self.event_request_tx + .send(event_req_with_token) + .map_err(|_| { + PusPacketHandlingError::SendError("Forwarding event request failed".into()) + })?; + if let Some(partial_error) = partial_error { + return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( + partial_error, + )); + } + Ok(PusPacketHandlerResult::RequestHandled) + }; + match srv.unwrap() { + Subservice::TmInfoReport + | Subservice::TmLowSeverityReport + | Subservice::TmMediumSeverityReport + | Subservice::TmHighSeverityReport => { + return Err(PusPacketHandlingError::InvalidSubservice(tc.subservice())) + } + Subservice::TcEnableEventGeneration => { + handle_enable_disable_request(true)?; + } + Subservice::TcDisableEventGeneration => { + handle_enable_disable_request(false)?; + } + Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => { + return Ok(PusPacketHandlerResult::SubserviceNotImplemented( + tc.subservice(), + token, + )); + } + } + + Ok(PusPacketHandlerResult::RequestHandled) + } +} diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index 127b84e..4c7cd18 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -11,6 +11,7 @@ use spacepackets::{ByteConversionError, SizeMissmatch}; pub mod event; pub mod event_man; +pub mod event_srv; pub mod hk; pub mod mode; pub mod scheduler; @@ -289,13 +290,17 @@ pub mod std_mod { PusError(#[from] PusError), #[error("Wrong service number {0} for packet handler")] WrongService(u8), + #[error("Invalid subservice {0}")] + InvalidSubservice(u8), #[error("Not enough application data available: {0}")] NotEnoughAppData(String), #[error("Generic store error: {0}")] StoreError(#[from] StoreError), - #[error("Error with the pool RwGuard")] + #[error("Error with the pool RwGuard: {0}")] RwGuardError(String), - #[error("MQ backend disconnect error")] + #[error("MQ send error: {0}")] + SendError(String), + #[error("TX message queue side has disconnected")] QueueDisconnected, #[error("Other error {0}")] OtherError(String), @@ -315,6 +320,7 @@ pub mod std_mod { pub enum PusPacketHandlerResult { RequestHandled, RequestHandledPartialSuccess(PartialPusHandlingError), + SubserviceNotImplemented(u8, VerificationToken), CustomSubservice(u8, VerificationToken), Empty, } diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index 6597302..95a54eb 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -227,6 +227,17 @@ impl From> for TcStateToken { } } +impl TryFrom for VerificationToken { + type Error = (); + + fn try_from(value: TcStateToken) -> Result { + if let TcStateToken::Accepted(token) = value { + Ok(token) + } else { + return Err(()); + } + } +} impl From> for TcStateToken { fn from(t: VerificationToken) -> Self { TcStateToken::Accepted(t) diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index dff9a5b..66f805e 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -9,6 +9,7 @@ use log::{info, warn}; use crate::hk::AcsHkIds; use crate::logging::setup_logger; +use crate::pus::event::Pus5Wrapper; use crate::pus::scheduler::Pus11Wrapper; use crate::pus::test::Service17CustomWrapper; use crate::pus::PusTcMpscRouter; @@ -26,6 +27,7 @@ use satrs_core::pus::event_man::{ DefaultPusMgmtBackendProvider, EventReporter, EventRequest, EventRequestWithToken, PusEventDispatcher, }; +use satrs_core::pus::event_srv::PusService5EventHandler; use satrs_core::pus::hk::Subservice as HkSubservice; use satrs_core::pus::scheduler::PusScheduler; use satrs_core::pus::scheduler_srv::PusService11SchedHandler; @@ -141,7 +143,7 @@ fn main() { sock_addr, verif_reporter: verif_reporter.clone(), event_sender, - event_request_tx, + // event_request_tx, request_map, seq_count_provider: seq_count_provider_tmtc, }; @@ -184,16 +186,26 @@ fn main() { }; let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) .expect("Creating PUS Scheduler failed"); - let pus11_handler = PusService11SchedHandler::new( + let pus_11_handler = PusService11SchedHandler::new( pus_sched_rx, tc_store.pool.clone(), tm_funnel_tx.clone(), tm_store.clone(), PUS_APID, - verif_reporter, + verif_reporter.clone(), scheduler, ); - let mut pus_11_wrapper = Pus11Wrapper { pus11_handler }; + let mut pus_11_wrapper = Pus11Wrapper { pus_11_handler }; + let pus_5_handler = PusService5EventHandler::new( + pus_event_rx, + tc_store.pool.clone(), + tm_funnel_tx.clone(), + tm_store.clone(), + PUS_APID, + verif_reporter, + event_request_tx, + ); + let mut pus_5_wrapper = Pus5Wrapper { pus_5_handler }; info!("Starting TMTC task"); let jh0 = thread::Builder::new() @@ -236,7 +248,7 @@ fn main() { let mut time_provider = TimeProvider::new_with_u16_days(0, 0); let mut report_completion = |event_req: EventRequestWithToken, timestamp: &[u8]| { reporter_event_handler - .completion_success(event_req.token, Some(timestamp)) + .completion_success(event_req.token.try_into().unwrap(), Some(timestamp)) .expect("Sending completion success failed"); }; loop { @@ -366,6 +378,7 @@ fn main() { }; is_srv_finished(pus_17_wrapper.perform_operation()); is_srv_finished(pus_11_wrapper.perform_operation()); + is_srv_finished(pus_5_wrapper.perform_operation()); if all_queues_empty { thread::sleep(Duration::from_millis(200)); } diff --git a/satrs-example/src/pus/event.rs b/satrs-example/src/pus/event.rs new file mode 100644 index 0000000..ef47c87 --- /dev/null +++ b/satrs-example/src/pus/event.rs @@ -0,0 +1,33 @@ +use log::{error, warn}; +use satrs_core::pus::event_srv::PusService5EventHandler; +use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; + +pub struct Pus5Wrapper { + pub pus_5_handler: PusService5EventHandler, +} + +impl Pus5Wrapper { + pub fn perform_operation(&mut self) -> bool { + match self.pus_5_handler.handle_next_packet() { + Ok(result) => match result { + PusPacketHandlerResult::RequestHandled => {} + PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { + warn!("PUS 5 partial packet handling success: {e:?}") + } + PusPacketHandlerResult::CustomSubservice(invalid, _) => { + warn!("PUS 5 invalid subservice {invalid}"); + } + PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { + warn!("PUS 5 subservice {subservice} not implemented"); + } + PusPacketHandlerResult::Empty => { + return true; + } + }, + Err(error) => { + error!("PUS packet handling error: {error:?}") + } + } + false + } +} diff --git a/satrs-example/src/pus/events.rs b/satrs-example/src/pus/events.rs deleted file mode 100644 index 554bbee..0000000 --- a/satrs-example/src/pus/events.rs +++ /dev/null @@ -1,59 +0,0 @@ -use satrs_core::event_man::{EventManager, EventManagerWithMpscQueue}; -use satrs_core::events::EventU32; -use satrs_core::params::Params; -use satrs_core::pool::{SharedPool, StoreAddr}; -use satrs_core::pus::event_man::EventReporter; -use satrs_core::pus::verification::{ - StdVerifReporterWithSender, TcStateAccepted, VerificationToken, -}; -use satrs_core::pus::{ - AcceptedTc, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, PusServiceHandler, -}; -use satrs_core::tmtc::tm_helper::SharedTmStore; -use std::sync::mpsc::{Receiver, Sender}; - -pub struct PusService5EventHandler { - psb: PusServiceBase, - event_manager: EventManagerWithMpscQueue, -} - -impl PusService5EventHandler { - pub fn new( - receiver: Receiver, - tc_pool: SharedPool, - tm_tx: Sender, - tm_store: SharedTmStore, - tm_apid: u16, - verification_handler: StdVerifReporterWithSender, - event_manager: EventManagerWithMpscQueue, - ) -> Self { - Self { - psb: PusServiceBase::new( - receiver, - tc_pool, - tm_tx, - tm_store, - tm_apid, - verification_handler, - ), - event_manager, - } - } -} - -impl PusServiceHandler for PusService5EventHandler { - fn psb_mut(&mut self) -> &mut PusServiceBase { - &mut self.psb - } - fn psb(&self) -> &PusServiceBase { - &self.psb - } - - fn handle_one_tc( - &mut self, - addr: StoreAddr, - token: VerificationToken, - ) -> Result { - Ok(PusPacketHandlerResult::RequestHandled) - } -} diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 9fb1e9c..a3a2292 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -9,11 +9,11 @@ use satrs_core::spacepackets::ecss::PusServiceId; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::time::cds::TimeProvider; use satrs_core::spacepackets::time::TimeWriter; -use satrs_core::tmtc::tm_helper::{PusTmWithCdsShortHelper, SharedTmStore}; +use satrs_core::tmtc::tm_helper::PusTmWithCdsShortHelper; use satrs_example::{tmtc_err, CustomPusServiceId}; use std::sync::mpsc::Sender; -pub mod events; +pub mod event; pub mod scheduler; pub mod test; @@ -25,31 +25,6 @@ pub struct PusTcMpscRouter { pub action_service_receiver: Sender, } -// impl PusTcRouter for PusTcMpscRouter { -// type Error = (); -// -// fn route_pus_tc(&mut self, apid: u16, service: u8, subservice: u8, tc: &PusTc) { -// if apid == PUS_APID { -// if service == PusServiceId::Event as u8 { -// self.event_service_receiver.send_tc(*tc).unwrap(); -// } -// if service == PusServiceId::Action as u8 { -// // TODO: Look up object ID and then route the action request to that object. -// self.action_service_receiver.send_tc(*tc).unwrap(); -// } -// if service == PusServiceId::Housekeeping as u8 { -// // TODO: Look up object ID and then route the HK request to that object. -// } -// if service == PusServiceId::Scheduling as u8 { -// self.sched_service_receiver.send_tc(*tc).unwrap(); -// } -// if service == PusServiceId::Test as u8 { -// self.test_service_receiver.send_tc(*tc).unwrap(); -// } -// } -// todo!() -// } -// } pub struct PusReceiver { pub tm_helper: PusTmWithCdsShortHelper, pub tm_args: PusTmArgs, @@ -58,10 +33,6 @@ pub struct PusReceiver { } pub struct PusTmArgs { - /// All telemetry is sent with this sender handle. - pub tm_tx: Sender, - /// All TM to be sent is stored here - pub tm_store: SharedTmStore, /// All verification reporting is done with this reporter. pub verif_reporter: StdVerifReporterWithSender, /// Sequence count provider for TMs sent from within pus demultiplexer @@ -74,55 +45,9 @@ impl PusTmArgs { } } -// #[allow(dead_code)] -// pub struct PusTcHandlerBase { -// pub tc_store: Box, -// pub receiver: Receiver<(StoreAddr, VerificationToken)>, -// pub verif_reporter: StdVerifReporterWithSender, -// pub time_provider: Box, -// } -// -// pub trait TestHandlerNoPing { -// fn handle_no_ping_tc(&mut self, tc: PusTc); -// } -// -// #[allow(dead_code)] -// pub struct PusTestTcHandler { -// pub base: PusTcHandlerBase, -// handler: Option>, -// } -// -// #[allow(dead_code)] -// pub struct PusScheduleTcHandler { -// pub base: PusTestTcHandler, -// } -// -// impl PusTestTcHandler { -// #[allow(dead_code)] -// pub fn operation(&mut self) { -// let (addr, token) = self.base.receiver.recv().unwrap(); -// let data = self.base.tc_store.read(&addr).unwrap(); -// let (pus_tc, _len) = PusTc::from_bytes(data).unwrap(); -// let stamp: [u8; 7] = [0; 7]; -// if pus_tc.subservice() == 1 { -// self.base -// .verif_reporter -// .completion_success(token, Some(&stamp)) -// .unwrap(); -// } else if let Some(handler) = &mut self.handler { -// handler.handle_no_ping_tc(pus_tc); -// } -// } -// } - pub struct PusTcArgs { - //pub event_request_tx: Sender, /// This routes all telecommands to their respective recipients pub pus_router: PusTcMpscRouter, - /// Request routing helper. Maps targeted requests to their recipient. - //pub request_map: HashMap>, - /// Required for scheduling of telecommands. - //pub tc_source: PusTcSource, /// Used to send events from within the TC router pub event_sender: Sender<(EventU32, Option)>, } @@ -330,64 +255,6 @@ impl PusReceiver { // } // impl PusReceiver { -// fn handle_test_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { -// match PusPacket::subservice(pus_tc) { -// 1 => { -// info!("Received PUS ping command TC[17,1]"); -// info!("Sending ping reply PUS TM[17,2]"); -// let start_token = self -// .tm_args -// .verif_reporter -// .start_success(token, Some(self.stamp_helper.stamp())) -// .expect("Error sending start success"); -// let ping_reply = self.tm_helper.create_pus_tm_timestamp_now( -// 17, -// 2, -// None, -// self.tm_args.seq_count_provider.get(), -// ); -// let addr = self.tm_args.tm_store.add_pus_tm(&ping_reply); -// self.tm_args -// .tm_tx -// .send(addr) -// .expect("Sending TM to TM funnel failed"); -// self.tm_args.seq_count_provider.increment(); -// self.tm_args -// .verif_reporter -// .completion_success(start_token, Some(self.stamp_helper.stamp())) -// .expect("Error sending completion success"); -// } -// 128 => { -// info!("Generating test event"); -// self.tc_args -// .event_sender -// .send((TEST_EVENT.into(), None)) -// .expect("Sending test event failed"); -// let start_token = self -// .tm_args -// .verif_reporter -// .start_success(token, Some(self.stamp_helper.stamp())) -// .expect("Error sending start success"); -// self.tm_args -// .verif_reporter -// .completion_success(start_token, Some(self.stamp_helper.stamp())) -// .expect("Error sending completion success"); -// } -// _ => { -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &tmtc_err::INVALID_PUS_SUBSERVICE, -// None, -// ), -// ) -// .expect("Sending start failure TM failed"); -// } -// } -// } // // fn handle_hk_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { // if pus_tc.user_data().is_none() { @@ -495,204 +362,6 @@ impl PusReceiver { // } // } // -// fn handle_event_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { -// let send_start_failure = |vr: &mut StdVerifReporterWithSender, -// timestamp: &[u8], -// failure_code: &ResultU16, -// failure_data: Option<&[u8]>| { -// vr.start_failure( -// token, -// FailParams::new(Some(timestamp), failure_code, failure_data), -// ) -// .expect("Sending start failure TM failed"); -// }; -// let send_start_acceptance = |vr: &mut StdVerifReporterWithSender, timestamp: &[u8]| { -// vr.start_success(token, Some(timestamp)) -// .expect("Sending start success TM failed") -// }; -// if pus_tc.user_data().is_none() { -// send_start_failure( -// &mut self.tm_args.verif_reporter, -// self.stamp_helper.stamp(), -// &tmtc_err::NOT_ENOUGH_APP_DATA, -// None, -// ); -// return; -// } -// let app_data = pus_tc.user_data().unwrap(); -// if app_data.len() < 4 { -// send_start_failure( -// &mut self.tm_args.verif_reporter, -// self.stamp_helper.stamp(), -// &tmtc_err::NOT_ENOUGH_APP_DATA, -// None, -// ); -// return; -// } -// let event_id = EventU32::from(u32::from_be_bytes(app_data.try_into().unwrap())); -// match PusPacket::subservice(pus_tc).try_into() { -// Ok(event::Subservice::TcEnableEventGeneration) => { -// let start_token = send_start_acceptance( -// &mut self.tm_args.verif_reporter, -// self.stamp_helper.stamp(), -// ); -// self.tc_args -// .event_request_tx -// .send(EventRequestWithToken { -// request: EventRequest::Enable(event_id), -// token: start_token, -// }) -// .expect("Sending event request failed"); -// } -// Ok(event::Subservice::TcDisableEventGeneration) => { -// let start_token = send_start_acceptance( -// &mut self.tm_args.verif_reporter, -// self.stamp_helper.stamp(), -// ); -// self.tc_args -// .event_request_tx -// .send(EventRequestWithToken { -// request: EventRequest::Disable(event_id), -// token: start_token, -// }) -// .expect("Sending event request failed"); -// } -// _ => { -// send_start_failure( -// &mut self.tm_args.verif_reporter, -// self.stamp_helper.stamp(), -// &tmtc_err::INVALID_PUS_SUBSERVICE, -// None, -// ); -// } -// } -// } -// -// fn handle_scheduled_tc(&mut self, pus_tc: &PusTc, token: VerificationToken) { -// let subservice = match pus_11_generic_tc_check(pus_tc) { -// Ok(subservice) => subservice, -// Err(e) => match e { -// GenericTcCheckError::NotEnoughAppData => { -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &tmtc_err::NOT_ENOUGH_APP_DATA, -// None, -// ), -// ) -// .expect("could not sent verification error"); -// return; -// } -// GenericTcCheckError::InvalidSubservice => { -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &tmtc_err::INVALID_PUS_SUBSERVICE, -// None, -// ), -// ) -// .expect("could not sent verification error"); -// return; -// } -// }, -// }; -// match subservice { -// scheduling::Subservice::TcEnableScheduling => { -// let start_token = self -// .tm_args -// .verif_reporter -// .start_success(token, Some(self.stamp_helper.stamp())) -// .expect("Error sending start success"); -// -// let mut scheduler = self.tc_args.scheduler.borrow_mut(); -// scheduler.enable(); -// if scheduler.is_enabled() { -// self.tm_args -// .verif_reporter -// .completion_success(start_token, Some(self.stamp_helper.stamp())) -// .expect("Error sending completion success"); -// } else { -// panic!("Failed to enable scheduler"); -// } -// } -// scheduling::Subservice::TcDisableScheduling => { -// let start_token = self -// .tm_args -// .verif_reporter -// .start_success(token, Some(self.stamp_helper.stamp())) -// .expect("Error sending start success"); -// -// let mut scheduler = self.tc_args.scheduler.borrow_mut(); -// scheduler.disable(); -// if !scheduler.is_enabled() { -// self.tm_args -// .verif_reporter -// .completion_success(start_token, Some(self.stamp_helper.stamp())) -// .expect("Error sending completion success"); -// } else { -// panic!("Failed to disable scheduler"); -// } -// } -// scheduling::Subservice::TcResetScheduling => { -// let start_token = self -// .tm_args -// .verif_reporter -// .start_success(token, Some(self.stamp_helper.stamp())) -// .expect("Error sending start success"); -// -// let mut pool = self -// .tc_args -// .tc_source -// .tc_store -// .pool -// .write() -// .expect("Locking pool failed"); -// -// let mut scheduler = self.tc_args.scheduler.borrow_mut(); -// scheduler -// .reset(pool.as_mut()) -// .expect("Error resetting TC Pool"); -// drop(scheduler); -// -// self.tm_args -// .verif_reporter -// .completion_success(start_token, Some(self.stamp_helper.stamp())) -// .expect("Error sending completion success"); -// } -// scheduling::Subservice::TcInsertActivity => { -// let start_token = self -// .tm_args -// .verif_reporter -// .start_success(token, Some(self.stamp_helper.stamp())) -// .expect("error sending start success"); -// -// let mut pool = self -// .tc_args -// .tc_source -// .tc_store -// .pool -// .write() -// .expect("locking pool failed"); -// let mut scheduler = self.tc_args.scheduler.borrow_mut(); -// scheduler -// .insert_wrapped_tc::(pus_tc, pool.as_mut()) -// .expect("insertion of activity into pool failed"); -// drop(scheduler); -// -// self.tm_args -// .verif_reporter -// .completion_success(start_token, Some(self.stamp_helper.stamp())) -// .expect("sending completion success failed"); -// } -// _ => {} -// } -// } // // fn handle_mode_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { // let mut app_data_len = 0; diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index 1b8d194..2f10636 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -3,12 +3,12 @@ use satrs_core::pus::scheduler_srv::PusService11SchedHandler; use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; pub struct Pus11Wrapper { - pub pus11_handler: PusService11SchedHandler, + pub pus_11_handler: PusService11SchedHandler, } impl Pus11Wrapper { pub fn perform_operation(&mut self) -> bool { - match self.pus11_handler.handle_next_packet() { + match self.pus_11_handler.handle_next_packet() { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { @@ -17,6 +17,9 @@ impl Pus11Wrapper { PusPacketHandlerResult::CustomSubservice(invalid, _) => { warn!("PUS11 invalid subservice {invalid}"); } + PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { + warn!("PUS11: Subservice {subservice} not implemented"); + } PusPacketHandlerResult::Empty => { return true; } diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 7aa1229..daf8727 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -33,6 +33,9 @@ impl Service17CustomWrapper { partial_err ); } + PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { + warn!("PUS17: Subservice {subservice} not implemented") + } PusPacketHandlerResult::CustomSubservice(subservice, token) => { let (buf, _) = self.pus17_handler.pus_tc_buf(); let (tc, _) = PusTc::from_bytes(buf).unwrap(); diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index accef97..86b5c1c 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -16,7 +16,6 @@ use crate::ccsds::CcsdsReceiver; use crate::pus::{PusReceiver, PusTcArgs, PusTcMpscRouter, PusTmArgs}; use crate::requests::RequestWithToken; use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; -use satrs_core::pus::event_man::EventRequestWithToken; use satrs_core::pus::scheduler::{PusScheduler, TcInfo}; use satrs_core::pus::verification::StdVerifReporterWithSender; use satrs_core::seq_count::SeqCountProviderSyncClonable; @@ -32,7 +31,6 @@ pub struct OtherArgs { pub sock_addr: SocketAddr, pub verif_reporter: StdVerifReporterWithSender, pub event_sender: Sender<(EventU32, Option)>, - pub event_request_tx: Sender, pub request_map: HashMap>, pub seq_count_provider: SeqCountProviderSyncClonable, } @@ -161,8 +159,6 @@ pub fn core_tmtc_task( )); let pus_tm_args = PusTmArgs { - tm_tx: tm_args.tm_sink_sender, - tm_store: tm_args.tm_store.clone(), verif_reporter: args.verif_reporter, seq_count_provider: args.seq_count_provider.clone(), }; -- 2.43.0 From 43408f3a9b1988adeaaf96b87e6d5815f19e6193 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 15:12:03 +0200 Subject: [PATCH 13/39] only actions and modes remaining --- satrs-core/src/pus/scheduler_srv.rs | 4 +++ satrs-example/src/main.rs | 34 +++++++++++++++--------- satrs-example/src/pus/event.rs | 2 +- satrs-example/src/pus/scheduler.rs | 41 +++++++++++++++++++++++++++-- satrs-example/src/pus/test.rs | 2 +- satrs-example/src/tmtc.rs | 38 -------------------------- 6 files changed, 66 insertions(+), 55 deletions(-) diff --git a/satrs-core/src/pus/scheduler_srv.rs b/satrs-core/src/pus/scheduler_srv.rs index 98ea570..9de4981 100644 --- a/satrs-core/src/pus/scheduler_srv.rs +++ b/satrs-core/src/pus/scheduler_srv.rs @@ -40,6 +40,10 @@ impl PusService11SchedHandler { scheduler, } } + + pub fn scheduler_mut(&mut self) -> &mut PusScheduler { + &mut self.scheduler + } } impl PusServiceHandler for PusService11SchedHandler { diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 66f805e..8621a98 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -133,7 +133,7 @@ fn main() { let (acs_thread_tx, acs_thread_rx) = channel::(); request_map.insert(RequestTargetId::AcsSubsystem as u32, acs_thread_tx); - let tc_source = PusTcSource { + let tc_source_wrapper = PusTcSource { tc_store: tc_store.clone(), tc_source: tc_source_tx, }; @@ -148,7 +148,7 @@ fn main() { seq_count_provider: seq_count_provider_tmtc, }; let tc_args = TcArgs { - tc_source, + tc_source: tc_source_wrapper.clone(), tc_receiver: tc_source_rx, }; let tm_args = TmArgs { @@ -195,7 +195,10 @@ fn main() { verif_reporter.clone(), scheduler, ); - let mut pus_11_wrapper = Pus11Wrapper { pus_11_handler }; + let mut pus_11_wrapper = Pus11Wrapper { + pus_11_handler, + tc_source_wrapper, + }; let pus_5_handler = PusService5EventHandler::new( pus_event_rx, tc_store.pool.clone(), @@ -370,18 +373,23 @@ fn main() { let jh4 = thread::Builder::new() .name("PUS".to_string()) .spawn(move || loop { - let mut all_queues_empty = true; - let mut is_srv_finished = |srv_handler_finished: bool| { - if !srv_handler_finished { - all_queues_empty = false; + pus_11_wrapper.release_tcs(); + loop { + let mut all_queues_empty = true; + let mut is_srv_finished = |srv_handler_finished: bool| { + if !srv_handler_finished { + all_queues_empty = false; + } + }; + is_srv_finished(pus_17_wrapper.handle_next_packet()); + is_srv_finished(pus_11_wrapper.handle_next_packet()); + is_srv_finished(pus_5_wrapper.handle_next_packet()); + if all_queues_empty { + break; } - }; - is_srv_finished(pus_17_wrapper.perform_operation()); - is_srv_finished(pus_11_wrapper.perform_operation()); - is_srv_finished(pus_5_wrapper.perform_operation()); - if all_queues_empty { - thread::sleep(Duration::from_millis(200)); } + + thread::sleep(Duration::from_millis(200)); }) .unwrap(); jh0.join().expect("Joining UDP TMTC server thread failed"); diff --git a/satrs-example/src/pus/event.rs b/satrs-example/src/pus/event.rs index ef47c87..0f2654e 100644 --- a/satrs-example/src/pus/event.rs +++ b/satrs-example/src/pus/event.rs @@ -7,7 +7,7 @@ pub struct Pus5Wrapper { } impl Pus5Wrapper { - pub fn perform_operation(&mut self) -> bool { + pub fn handle_next_packet(&mut self) -> bool { match self.pus_5_handler.handle_next_packet() { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index 2f10636..ffd8c89 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -1,13 +1,50 @@ -use log::{error, warn}; +use crate::tmtc::PusTcSource; +use log::{error, info, warn}; +use satrs_core::pool::{SharedPool, StoreAddr}; +use satrs_core::pus::scheduler::TcInfo; use satrs_core::pus::scheduler_srv::PusService11SchedHandler; use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; pub struct Pus11Wrapper { pub pus_11_handler: PusService11SchedHandler, + pub tc_source_wrapper: PusTcSource, } impl Pus11Wrapper { - pub fn perform_operation(&mut self) -> bool { + pub fn release_tcs(&mut self) { + let releaser = |enabled: bool, info: &TcInfo| -> bool { + if enabled { + self.tc_source_wrapper + .tc_source + .send(info.addr()) + .expect("sending TC to TC source failed"); + } + true + }; + + let mut pool = self + .tc_source_wrapper + .tc_store + .pool + .write() + .expect("error locking pool"); + + self.pus_11_handler + .scheduler_mut() + .update_time_from_now() + .unwrap(); + if let Ok(released_tcs) = self + .pus_11_handler + .scheduler_mut() + .release_telecommands(releaser, pool.as_mut()) + { + if released_tcs > 0 { + info!("{released_tcs} TC(s) released from scheduler"); + } + } + } + + pub fn handle_next_packet(&mut self) -> bool { match self.pus_11_handler.handle_next_packet() { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index daf8727..3aefa6e 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -16,7 +16,7 @@ pub struct Service17CustomWrapper { } impl Service17CustomWrapper { - pub fn perform_operation(&mut self) -> bool { + pub fn handle_next_packet(&mut self) -> bool { let res = self.pus17_handler.handle_next_packet(); if res.is_err() { warn!("PUS17 handler failed with error {:?}", res.unwrap_err()); diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 86b5c1c..28aaab9 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -2,12 +2,10 @@ use log::info; use satrs_core::events::EventU32; use satrs_core::hal::host::udp_server::{ReceiveResult, UdpTcServer}; use satrs_core::params::Params; -use std::cell::RefCell; use std::collections::HashMap; use std::error::Error; use std::fmt::{Display, Formatter}; use std::net::SocketAddr; -use std::rc::Rc; use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError}; use std::thread; use std::time::Duration; @@ -16,7 +14,6 @@ use crate::ccsds::CcsdsReceiver; use crate::pus::{PusReceiver, PusTcArgs, PusTcMpscRouter, PusTmArgs}; use crate::requests::RequestWithToken; use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; -use satrs_core::pus::scheduler::{PusScheduler, TcInfo}; use satrs_core::pus::verification::StdVerifReporterWithSender; use satrs_core::seq_count::SeqCountProviderSyncClonable; use satrs_core::spacepackets::ecss::{PusPacket, SerializablePusPacket}; @@ -154,10 +151,6 @@ pub fn core_tmtc_task( tm_args: TmArgs, pus_router: PusTcMpscRouter, ) { - let scheduler = Rc::new(RefCell::new( - PusScheduler::new_with_current_init_time(Duration::from_secs(5)).unwrap(), - )); - let pus_tm_args = PusTmArgs { verif_reporter: args.verif_reporter, seq_count_provider: args.seq_count_provider.clone(), @@ -185,13 +178,11 @@ pub fn core_tmtc_task( let mut tc_buf: [u8; 4096] = [0; 4096]; loop { - let tmtc_sched = scheduler.clone(); core_tmtc_loop( &mut udp_tmtc_server, &mut tc_args, &mut tc_buf, &mut pus_receiver, - tmtc_sched, ); thread::sleep(Duration::from_millis(400)); } @@ -202,36 +193,7 @@ fn core_tmtc_loop( tc_args: &mut TcArgs, tc_buf: &mut [u8], pus_receiver: &mut PusReceiver, - scheduler: Rc>, ) { - let releaser = |enabled: bool, info: &TcInfo| -> bool { - if enabled { - tc_args - .tc_source - .tc_source - .send(info.addr()) - .expect("sending TC to TC source failed"); - } - true - }; - - let mut pool = tc_args - .tc_source - .tc_store - .pool - .write() - .expect("error locking pool"); - - let mut scheduler = scheduler.borrow_mut(); - scheduler.update_time_from_now().unwrap(); - if let Ok(released_tcs) = scheduler.release_telecommands(releaser, pool.as_mut()) { - if released_tcs > 0 { - info!("{released_tcs} TC(s) released from scheduler"); - } - } - drop(pool); - drop(scheduler); - while poll_tc_server(udp_tmtc_server) {} match tc_args.tc_receiver.try_recv() { Ok(addr) => { -- 2.43.0 From 831cb46059284f6057caed58ede70297bbf880ad Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 15:15:19 +0200 Subject: [PATCH 14/39] fix to avoid burning through CPU time --- satrs-example/src/main.rs | 1 - satrs-example/src/pus/test.rs | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 8621a98..b4042e1 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -388,7 +388,6 @@ fn main() { break; } } - thread::sleep(Duration::from_millis(200)); }) .unwrap(); diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 3aefa6e..5248028 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -69,9 +69,9 @@ impl Service17CustomWrapper { } } PusPacketHandlerResult::Empty => { - return false; + return true; } } - true + false } } -- 2.43.0 From bc65cae3ec8648920d7a7bb2656eaca17a5c2bd8 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 17:24:53 +0200 Subject: [PATCH 15/39] simplifications for verification reporter --- satrs-core/src/pus/verification.rs | 247 +++++++++-------------------- satrs-core/src/seq_count.rs | 8 + satrs-example/src/main.rs | 45 ++++-- satrs-example/src/pus/mod.rs | 3 - satrs-example/src/tmtc.rs | 3 - 5 files changed, 113 insertions(+), 193 deletions(-) diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index 95a54eb..91e3f9d 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -97,7 +97,6 @@ pub use alloc_mod::{ VerificationReporter, VerificationReporterCfg, VerificationReporterWithSender, }; -use crate::seq_count::SequenceCountProviderCore; #[cfg(all(feature = "crossbeam", feature = "std"))] pub use stdmod::CrossbeamVerifSender; #[cfg(feature = "std")] @@ -316,14 +315,6 @@ pub struct VerificationReporterCore { apid: u16, } -pub(crate) fn increment_seq_counter( - seq_counter: Option<&(impl SequenceCountProviderCore + ?Sized)>, -) { - if let Some(seq_counter) = seq_counter { - seq_counter.increment(); - } -} - pub enum VerifSuccess {} pub enum VerifFailure {} @@ -366,14 +357,7 @@ impl<'src_data, State, SuccessOrFailure> VerificationSendable<'src_data, State, } impl<'src_data, State> VerificationSendable<'src_data, State, VerifFailure> { - pub fn send_success_verif_failure( - self, - seq_counter: Option<&(impl SequenceCountProviderCore + ?Sized)>, - msg_counter: Option<&(impl SequenceCountProviderCore + ?Sized)>, - ) { - increment_seq_counter(seq_counter); - increment_seq_counter(msg_counter); - } + pub fn send_success_verif_failure(self) {} } impl<'src_data, State> VerificationSendable<'src_data, State, VerifFailure> { @@ -383,13 +367,7 @@ impl<'src_data, State> VerificationSendable<'src_data, State, VerifFailure> { } impl<'src_data> VerificationSendable<'src_data, TcStateNone, VerifSuccess> { - pub fn send_success_acceptance_success( - self, - seq_counter: Option<&(impl SequenceCountProviderCore + ?Sized)>, - msg_counter: Option<&(impl SequenceCountProviderCore + ?Sized)>, - ) -> VerificationToken { - increment_seq_counter(seq_counter); - increment_seq_counter(msg_counter); + pub fn send_success_acceptance_success(self) -> VerificationToken { VerificationToken { state: PhantomData, req_id: self.token.unwrap().req_id(), @@ -398,13 +376,7 @@ impl<'src_data> VerificationSendable<'src_data, TcStateNone, VerifSuccess> { } impl<'src_data> VerificationSendable<'src_data, TcStateAccepted, VerifSuccess> { - pub fn send_success_start_success( - self, - seq_counter: Option<&(impl SequenceCountProviderCore + ?Sized)>, - msg_counter: Option<&(impl SequenceCountProviderCore + ?Sized)>, - ) -> VerificationToken { - increment_seq_counter(seq_counter); - increment_seq_counter(msg_counter); + pub fn send_success_start_success(self) -> VerificationToken { VerificationToken { state: PhantomData, req_id: self.token.unwrap().req_id(), @@ -415,14 +387,7 @@ impl<'src_data> VerificationSendable<'src_data, TcStateAccepted, VerifSuccess> { impl<'src_data, TcState: WasAtLeastAccepted + Copy> VerificationSendable<'src_data, TcState, VerifSuccess> { - pub fn send_success_step_or_completion_success( - self, - seq_counter: Option<&(impl SequenceCountProviderCore + ?Sized)>, - msg_counter: Option<&(impl SequenceCountProviderCore + ?Sized)>, - ) { - increment_seq_counter(seq_counter); - increment_seq_counter(msg_counter); - } + pub fn send_success_step_or_completion_success(self) {} } /// Primary verification handler. It provides an API to send PUS 1 verification telemetry packets @@ -477,8 +442,8 @@ impl VerificationReporterCore { src_data_buf: &'src_data mut [u8], subservice: u8, token: VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_count: u16, + msg_count: u16, time_stamp: Option<&'src_data [u8]>, ) -> Result< VerificationSendable<'src_data, State, VerifSuccess>, @@ -488,8 +453,8 @@ impl VerificationReporterCore { self.create_pus_verif_success_tm( src_data_buf, subservice, - seq_counter.get(), - msg_counter.get(), + seq_count, + msg_count, &token.req_id, time_stamp, None::<&dyn EcssEnumeration>, @@ -506,8 +471,8 @@ impl VerificationReporterCore { src_data_buf: &'src_data mut [u8], subservice: u8, token: VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_count: u16, + msg_count: u16, step: Option<&(impl EcssEnumeration + ?Sized)>, params: &FailParams<'src_data, '_>, ) -> Result< @@ -518,8 +483,8 @@ impl VerificationReporterCore { self.create_pus_verif_fail_tm( src_data_buf, subservice, - seq_counter.get(), - msg_counter.get(), + seq_count, + msg_count, &token.req_id, step, params, @@ -534,8 +499,8 @@ impl VerificationReporterCore { &mut self, src_data_buf: &'src_data mut [u8], token: VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_count: u16, + msg_count: u16, time_stamp: Option<&'src_data [u8]>, ) -> Result< VerificationSendable<'src_data, TcStateNone, VerifSuccess>, @@ -545,8 +510,8 @@ impl VerificationReporterCore { src_data_buf, Subservice::TmAcceptanceSuccess.into(), token, - seq_counter, - msg_counter, + seq_count, + msg_count, time_stamp, ) } @@ -554,8 +519,6 @@ impl VerificationReporterCore { pub fn send_acceptance_success( &self, mut sendable: VerificationSendable<'_, TcStateNone, VerifSuccess>, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result, VerificationOrSendErrorWithToken> { @@ -567,14 +530,12 @@ impl VerificationReporterCore { sendable.token.unwrap(), ) })?; - Ok(sendable.send_success_acceptance_success(Some(seq_counter), Some(msg_counter))) + Ok(sendable.send_success_acceptance_success()) } pub fn send_acceptance_failure( &self, mut sendable: VerificationSendable<'_, TcStateNone, VerifFailure>, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result<(), VerificationOrSendErrorWithToken> { sender @@ -585,7 +546,7 @@ impl VerificationReporterCore { sendable.token.unwrap(), ) })?; - sendable.send_success_verif_failure(Some(seq_counter), Some(msg_counter)); + sendable.send_success_verif_failure(); Ok(()) } @@ -594,8 +555,8 @@ impl VerificationReporterCore { &mut self, src_data_buf: &'src_data mut [u8], token: VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_count: u16, + msg_count: u16, params: FailParams<'src_data, '_>, ) -> Result< VerificationSendable<'src_data, TcStateNone, VerifFailure>, @@ -605,8 +566,8 @@ impl VerificationReporterCore { src_data_buf, Subservice::TmAcceptanceFailure.into(), token, - seq_counter, - msg_counter, + seq_count, + msg_count, None::<&dyn EcssEnumeration>, ¶ms, ) @@ -619,8 +580,8 @@ impl VerificationReporterCore { &mut self, src_data_buf: &'src_data mut [u8], token: VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_count: u16, + msg_count: u16, time_stamp: Option<&'src_data [u8]>, ) -> Result< VerificationSendable<'src_data, TcStateAccepted, VerifSuccess>, @@ -630,8 +591,8 @@ impl VerificationReporterCore { src_data_buf, Subservice::TmStartSuccess.into(), token, - seq_counter, - msg_counter, + seq_count, + msg_count, time_stamp, ) } @@ -639,8 +600,6 @@ impl VerificationReporterCore { pub fn send_start_success( &self, mut sendable: VerificationSendable<'_, TcStateAccepted, VerifSuccess>, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result< VerificationToken, @@ -654,7 +613,7 @@ impl VerificationReporterCore { sendable.token.unwrap(), ) })?; - Ok(sendable.send_success_start_success(Some(seq_counter), Some(msg_counter))) + Ok(sendable.send_success_start_success()) } /// Package and send a PUS TM\[1, 4\] packet, see 8.1.2.4 of the PUS standard. @@ -665,8 +624,8 @@ impl VerificationReporterCore { &mut self, src_data_buf: &'src_data mut [u8], token: VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_count: u16, + msg_count: u16, params: FailParams<'src_data, '_>, ) -> Result< VerificationSendable<'src_data, TcStateAccepted, VerifFailure>, @@ -676,8 +635,8 @@ impl VerificationReporterCore { src_data_buf, Subservice::TmStartFailure.into(), token, - seq_counter, - msg_counter, + seq_count, + msg_count, None::<&dyn EcssEnumeration>, ¶ms, ) @@ -686,8 +645,6 @@ impl VerificationReporterCore { pub fn send_start_failure( &self, mut sendable: VerificationSendable<'_, TcStateAccepted, VerifFailure>, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result<(), VerificationOrSendErrorWithToken> { sender @@ -698,7 +655,7 @@ impl VerificationReporterCore { sendable.token.unwrap(), ) })?; - sendable.send_success_verif_failure(Some(seq_counter), Some(msg_counter)); + sendable.send_success_verif_failure(); Ok(()) } @@ -709,8 +666,8 @@ impl VerificationReporterCore { &mut self, src_data_buf: &'src_data mut [u8], token: &VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_count: u16, + msg_count: u16, time_stamp: Option<&'src_data [u8]>, step: impl EcssEnumeration, ) -> Result, EcssTmtcError> { @@ -718,8 +675,8 @@ impl VerificationReporterCore { self.create_pus_verif_success_tm( src_data_buf, Subservice::TmStepSuccess.into(), - seq_counter.get(), - msg_counter.get(), + seq_count, + msg_count, &token.req_id, time_stamp, Some(&step), @@ -735,8 +692,8 @@ impl VerificationReporterCore { &mut self, src_data_buf: &'src_data mut [u8], token: VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_count: u16, + msg_count: u16, params: FailParamsWithStep<'src_data, '_>, ) -> Result< VerificationSendable<'src_data, TcStateStarted, VerifFailure>, @@ -746,8 +703,8 @@ impl VerificationReporterCore { self.create_pus_verif_fail_tm( src_data_buf, Subservice::TmStepFailure.into(), - seq_counter.get(), - msg_counter.get(), + seq_count, + msg_count, &token.req_id, Some(params.step), ¶ms.bp, @@ -765,8 +722,8 @@ impl VerificationReporterCore { &mut self, src_data_buf: &'src_data mut [u8], token: VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_counter: u16, + msg_counter: u16, time_stamp: Option<&'src_data [u8]>, ) -> Result< VerificationSendable<'src_data, TcState, VerifSuccess>, @@ -790,8 +747,8 @@ impl VerificationReporterCore { &mut self, src_data_buf: &'src_data mut [u8], token: VerificationToken, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), + seq_count: u16, + msg_count: u16, params: FailParams<'src_data, '_>, ) -> Result< VerificationSendable<'src_data, TcState, VerifFailure>, @@ -801,8 +758,8 @@ impl VerificationReporterCore { src_data_buf, Subservice::TmCompletionFailure.into(), token, - seq_counter, - msg_counter, + seq_count, + msg_count, None::<&dyn EcssEnumeration>, ¶ms, ) @@ -811,8 +768,6 @@ impl VerificationReporterCore { pub fn send_step_or_completion_success( &self, mut sendable: VerificationSendable<'_, TcState, VerifSuccess>, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result<(), VerificationOrSendErrorWithToken> { sender @@ -823,15 +778,13 @@ impl VerificationReporterCore { sendable.token.unwrap(), ) })?; - sendable.send_success_step_or_completion_success(Some(seq_counter), Some(msg_counter)); + sendable.send_success_step_or_completion_success(); Ok(()) } pub fn send_step_or_completion_failure( &self, mut sendable: VerificationSendable<'_, TcState, VerifFailure>, - seq_counter: &(impl SequenceCountProviderCore + ?Sized), - msg_counter: &(impl SequenceCountProviderCore + ?Sized), sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result<(), VerificationOrSendErrorWithToken> { sender @@ -842,7 +795,7 @@ impl VerificationReporterCore { sendable.token.unwrap(), ) })?; - sendable.send_success_verif_failure(Some(seq_counter), Some(msg_counter)); + sendable.send_success_verif_failure(); Ok(()) } @@ -953,7 +906,6 @@ impl VerificationReporterCore { mod alloc_mod { use super::*; use crate::pus::alloc_mod::EcssTmSender; - use crate::seq_count::SequenceCountProvider; use alloc::boxed::Box; use alloc::vec; use alloc::vec::Vec; @@ -961,8 +913,6 @@ mod alloc_mod { #[derive(Clone)] pub struct VerificationReporterCfg { apid: u16, - seq_counter: Box + Send>, - msg_counter: Box + Send>, pub step_field_width: usize, pub fail_code_field_width: usize, pub max_fail_data_len: usize, @@ -971,8 +921,6 @@ mod alloc_mod { impl VerificationReporterCfg { pub fn new( apid: u16, - seq_counter: Box + Send>, - msg_counter: Box + Send>, step_field_width: usize, fail_code_field_width: usize, max_fail_data_len: usize, @@ -982,8 +930,6 @@ mod alloc_mod { } Some(Self { apid, - seq_counter, - msg_counter, step_field_width, fail_code_field_width, max_fail_data_len, @@ -993,11 +939,11 @@ mod alloc_mod { /// Primary verification handler. It provides an API to send PUS 1 verification telemetry packets /// and verify the various steps of telecommand handling as specified in the PUS standard. + /// It is assumed that the sequence counter and message counters are written in a central + /// TM funnel. This helper will always set those fields to 0. #[derive(Clone)] pub struct VerificationReporter { source_data_buf: Vec, - seq_counter: Box + Send + 'static>, - msg_counter: Box + Send + 'static>, pub reporter: VerificationReporterCore, } @@ -1012,8 +958,6 @@ mod alloc_mod { + cfg.fail_code_field_width + cfg.max_fail_data_len ], - seq_counter: cfg.seq_counter.clone(), - msg_counter: cfg.msg_counter.clone(), reporter, } } @@ -1046,16 +990,11 @@ mod alloc_mod { let sendable = self.reporter.acceptance_success( self.source_data_buf.as_mut_slice(), token, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), + 0, + 0, time_stamp, )?; - self.reporter.send_acceptance_success( - sendable, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), - sender, - ) + self.reporter.send_acceptance_success(sendable, sender) } /// Package and send a PUS TM\[1, 2\] packet, see 8.1.2.2 of the PUS standard @@ -1068,16 +1007,11 @@ mod alloc_mod { let sendable = self.reporter.acceptance_failure( self.source_data_buf.as_mut_slice(), token, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), + 0, + 0, params, )?; - self.reporter.send_acceptance_failure( - sendable, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), - sender, - ) + self.reporter.send_acceptance_failure(sendable, sender) } /// Package and send a PUS TM\[1, 3\] packet, see 8.1.2.3 of the PUS standard. @@ -1095,16 +1029,11 @@ mod alloc_mod { let sendable = self.reporter.start_success( self.source_data_buf.as_mut_slice(), token, - self.seq_counter.as_mut(), - self.msg_counter.as_mut(), + 0, + 0, time_stamp, )?; - self.reporter.send_start_success( - sendable, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), - sender, - ) + self.reporter.send_start_success(sendable, sender) } /// Package and send a PUS TM\[1, 4\] packet, see 8.1.2.4 of the PUS standard. @@ -1120,16 +1049,11 @@ mod alloc_mod { let sendable = self.reporter.start_failure( self.source_data_buf.as_mut_slice(), token, - self.seq_counter.as_mut(), - self.msg_counter.as_mut(), + 0, + 0, params, )?; - self.reporter.send_start_failure( - sendable, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), - sender, - ) + self.reporter.send_start_failure(sendable, sender) } /// Package and send a PUS TM\[1, 5\] packet, see 8.1.2.5 of the PUS standard. @@ -1145,18 +1069,13 @@ mod alloc_mod { let sendable = self.reporter.step_success( self.source_data_buf.as_mut_slice(), token, - self.seq_counter.as_mut(), - self.msg_counter.as_mut(), + 0, + 0, time_stamp, step, )?; self.reporter - .send_step_or_completion_success( - sendable, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), - sender, - ) + .send_step_or_completion_success(sendable, sender) .map_err(|e| e.0) } @@ -1173,16 +1092,12 @@ mod alloc_mod { let sendable = self.reporter.step_failure( self.source_data_buf.as_mut_slice(), token, - self.seq_counter.as_mut(), - self.msg_counter.as_mut(), + 0, + 0, params, )?; - self.reporter.send_step_or_completion_failure( - sendable, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), - sender, - ) + self.reporter + .send_step_or_completion_failure(sendable, sender) } /// Package and send a PUS TM\[1, 7\] packet, see 8.1.2.7 of the PUS standard. @@ -1198,16 +1113,12 @@ mod alloc_mod { let sendable = self.reporter.completion_success( self.source_data_buf.as_mut_slice(), token, - self.seq_counter.as_mut(), - self.msg_counter.as_mut(), + 0, + 0, time_stamp, )?; - self.reporter.send_step_or_completion_success( - sendable, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), - sender, - ) + self.reporter + .send_step_or_completion_success(sendable, sender) } /// Package and send a PUS TM\[1, 8\] packet, see 8.1.2.8 of the PUS standard. @@ -1223,16 +1134,12 @@ mod alloc_mod { let sendable = self.reporter.completion_failure( self.source_data_buf.as_mut_slice(), token, - self.seq_counter.as_mut(), - self.msg_counter.as_mut(), + 0, + 0, params, )?; - self.reporter.send_step_or_completion_failure( - sendable, - self.seq_counter.as_ref(), - self.msg_counter.as_ref(), - sender, - ) + self.reporter + .send_step_or_completion_failure(sendable, sender) } } diff --git a/satrs-core/src/seq_count.rs b/satrs-core/src/seq_count.rs index b299756..2258d94 100644 --- a/satrs-core/src/seq_count.rs +++ b/satrs-core/src/seq_count.rs @@ -46,6 +46,14 @@ pub struct SeqCountProviderSimple { seq_count: Cell, } +impl SeqCountProviderSimple { + pub fn new() -> Self { + Self { + seq_count: Cell::new(0), + } + } +} + impl SequenceCountProviderCore for SeqCountProviderSimple { fn get(&self) -> u16 { self.seq_count.get() diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index b4042e1..fc8e40e 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -6,6 +6,7 @@ mod requests; mod tmtc; use log::{info, warn}; +use std::collections::hash_map::Entry; use crate::hk::AcsHkIds; use crate::logging::setup_logger; @@ -36,7 +37,8 @@ use satrs_core::pus::verification::{ MpscVerifSender, VerificationReporterCfg, VerificationReporterWithSender, }; use satrs_core::pus::MpscTmtcInStoreSender; -use satrs_core::seq_count::{SeqCountProviderSimple, SeqCountProviderSyncClonable}; +use satrs_core::seq_count::{SeqCountProviderSimple, SequenceCountProviderCore}; +use satrs_core::spacepackets::ecss::{PusPacket, SerializablePusPacket}; use satrs_core::spacepackets::{ time::cds::TimeProvider, time::TimeWriter, @@ -65,6 +67,7 @@ fn main() { (15, 2048), ])); let tm_store = SharedTmStore::new(Arc::new(RwLock::new(Box::new(tm_pool)))); + let tm_store_event = tm_store.clone(); let tc_pool = LocalPool::new(PoolCfg::new(vec![ (30, 32), (15, 64), @@ -77,9 +80,8 @@ fn main() { pool: Arc::new(RwLock::new(Box::new(tc_pool))), }; - let seq_count_provider = SeqCountProviderSyncClonable::default(); - let seq_count_provider_verif = seq_count_provider.clone(); - let seq_count_provider_tmtc = seq_count_provider; + let seq_count_provider = SeqCountProviderSimple::new(); + let mut msg_counter_map: HashMap = HashMap::new(); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let (tc_source_tx, tc_source_rx) = channel(); let (tm_funnel_tx, tm_funnel_rx) = channel(); @@ -90,16 +92,7 @@ fn main() { tm_store.backing_pool(), tm_funnel_tx.clone(), ); - let verif_cfg = VerificationReporterCfg::new( - PUS_APID, - Box::new(seq_count_provider_verif), - #[allow(clippy::box_default)] - Box::new(SeqCountProviderSimple::default()), - 1, - 2, - 8, - ) - .unwrap(); + let verif_cfg = VerificationReporterCfg::new(PUS_APID, 1, 2, 8).unwrap(); // Every software component which needs to generate verification telemetry, gets a cloned // verification reporter. let verif_reporter = VerificationReporterWithSender::new(&verif_cfg, Box::new(verif_sender)); @@ -143,9 +136,7 @@ fn main() { sock_addr, verif_reporter: verif_reporter.clone(), event_sender, - // event_request_tx, request_map, - seq_count_provider: seq_count_provider_tmtc, }; let tc_args = TcArgs { tc_source: tc_source_wrapper.clone(), @@ -222,12 +213,32 @@ fn main() { let jh1 = thread::Builder::new() .name("TM Funnel".to_string()) .spawn(move || { + let mut tm_buf: [u8; 2048] = [0; 2048]; let tm_funnel = TmFunnel { tm_server_tx, tm_funnel_rx, }; loop { if let Ok(addr) = tm_funnel.tm_funnel_rx.recv() { + // Read the TM, set sequence counter and message counter, and finally write + // it back with the updated CRC + let shared_pool = tm_store.backing_pool(); + let mut pool_guard = shared_pool.write().expect("Locking TM pool failed"); + let tm_raw = pool_guard + .modify(&addr) + .expect("Reading TM from pool failed"); + tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw); + let (mut tm, size) = + PusTm::from_bytes(&tm_buf, 7).expect("Creating TM from raw slice failed"); + tm.sp_header.set_apid(PUS_APID); + tm.sp_header + .set_seq_count(seq_count_provider.get_and_increment()); + let entry = msg_counter_map.entry(tm.service()).or_insert(0); + tm.sec_header.msg_counter = *entry; + *entry += 1; + tm.calc_crc_on_serialization = true; + tm.write_to_bytes(tm_raw) + .expect("Writing PUS TM back failed"); tm_funnel .tm_server_tx .send(addr) @@ -245,7 +256,7 @@ fn main() { let mut sender = MpscTmtcInStoreSender::new( 1, "event_sender", - tm_store.backing_pool(), + tm_store_event.backing_pool(), tm_funnel_tx, ); let mut time_provider = TimeProvider::new_with_u16_days(0, 0); diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index a3a2292..4186359 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -4,7 +4,6 @@ use satrs_core::params::Params; use satrs_core::pool::StoreAddr; use satrs_core::pus::verification::{FailParams, StdVerifReporterWithSender}; use satrs_core::pus::AcceptedTc; -use satrs_core::seq_count::SeqCountProviderSyncClonable; use satrs_core::spacepackets::ecss::PusServiceId; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::time::cds::TimeProvider; @@ -35,8 +34,6 @@ pub struct PusReceiver { pub struct PusTmArgs { /// All verification reporting is done with this reporter. pub verif_reporter: StdVerifReporterWithSender, - /// Sequence count provider for TMs sent from within pus demultiplexer - pub seq_count_provider: SeqCountProviderSyncClonable, } impl PusTmArgs { diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 28aaab9..5299017 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -15,7 +15,6 @@ use crate::pus::{PusReceiver, PusTcArgs, PusTcMpscRouter, PusTmArgs}; use crate::requests::RequestWithToken; use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; use satrs_core::pus::verification::StdVerifReporterWithSender; -use satrs_core::seq_count::SeqCountProviderSyncClonable; use satrs_core::spacepackets::ecss::{PusPacket, SerializablePusPacket}; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::SpHeader; @@ -29,7 +28,6 @@ pub struct OtherArgs { pub verif_reporter: StdVerifReporterWithSender, pub event_sender: Sender<(EventU32, Option)>, pub request_map: HashMap>, - pub seq_count_provider: SeqCountProviderSyncClonable, } pub struct TmArgs { @@ -153,7 +151,6 @@ pub fn core_tmtc_task( ) { let pus_tm_args = PusTmArgs { verif_reporter: args.verif_reporter, - seq_count_provider: args.seq_count_provider.clone(), }; let pus_tc_args = PusTcArgs { pus_router, -- 2.43.0 From 5895c3d3579de12ac1305d60eb41e735fcf85bc7 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 17:32:00 +0200 Subject: [PATCH 16/39] something broke.. --- satrs-example/src/main.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index fc8e40e..9f94db8 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -221,7 +221,9 @@ fn main() { loop { if let Ok(addr) = tm_funnel.tm_funnel_rx.recv() { // Read the TM, set sequence counter and message counter, and finally write - // it back with the updated CRC + // it back with the updated CRC. + // We could theoretically manipulate the counters and the CRC directly + // in place as an optimization, but I don't think this is necessary.. let shared_pool = tm_store.backing_pool(); let mut pool_guard = shared_pool.write().expect("Locking TM pool failed"); let tm_raw = pool_guard -- 2.43.0 From da7f929ec062123161b132f59067a704a734dfd8 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 19:11:09 +0200 Subject: [PATCH 17/39] continue --- satrs-core/Cargo.toml | 2 +- satrs-core/src/events.rs | 1 - satrs-core/src/pool.rs | 24 +++++++++++++++++++++++- satrs-core/src/pus/verification.rs | 23 +++-------------------- satrs-example/src/main.rs | 6 +++++- 5 files changed, 32 insertions(+), 24 deletions(-) diff --git a/satrs-core/Cargo.toml b/satrs-core/Cargo.toml index 871b3c8..8fc7015 100644 --- a/satrs-core/Cargo.toml +++ b/satrs-core/Cargo.toml @@ -18,7 +18,7 @@ paste = "1" embed-doc-image = "0.1" [dependencies.num_enum] -version = "0.5" +version = "0.6" default-features = false [dependencies.dyn-clone] diff --git a/satrs-core/src/events.rs b/satrs-core/src/events.rs index 2c53fcd..4b4c0c9 100644 --- a/satrs-core/src/events.rs +++ b/satrs-core/src/events.rs @@ -630,7 +630,6 @@ impl PartialEq> for EventU16 { mod tests { use super::EventU32TypedSev; use super::*; - use spacepackets::ecss::EcssEnumeration; use spacepackets::ByteConversionError; use std::mem::size_of; diff --git a/satrs-core/src/pool.rs b/satrs-core/src/pool.rs index 965bd6a..afc4f73 100644 --- a/satrs-core/src/pool.rs +++ b/satrs-core/src/pool.rs @@ -402,7 +402,8 @@ impl PoolProvider for LocalPool { fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> { let curr_size = self.addr_check(addr)?; let raw_pos = self.raw_pos(addr).unwrap(); - let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..curr_size]; + let block = + &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size]; Ok(block) } @@ -779,4 +780,25 @@ mod tests { drop(rw_guard); assert!(!local_pool.has_element_at(&addr).expect("Invalid address")); } + + #[test] + fn modify_pool_index_above_0() { + let mut local_pool = basic_small_pool(); + let test_buf_0: [u8; 4] = [1; 4]; + let test_buf_1: [u8; 4] = [2; 4]; + let test_buf_2: [u8; 4] = [3; 4]; + let test_buf_3: [u8; 4] = [4; 4]; + let addr0 = local_pool.add(&test_buf_0).expect("Adding data failed"); + let addr1 = local_pool.add(&test_buf_1).expect("Adding data failed"); + let addr2 = local_pool.add(&test_buf_2).expect("Adding data failed"); + let addr3 = local_pool.add(&test_buf_3).expect("Adding data failed"); + let tm0_raw = local_pool.modify(&addr0).expect("Modifying data failed"); + assert_eq!(tm0_raw, test_buf_0); + let tm1_raw = local_pool.modify(&addr1).expect("Modifying data failed"); + assert_eq!(tm1_raw, test_buf_1); + let tm2_raw = local_pool.modify(&addr2).expect("Modifying data failed"); + assert_eq!(tm2_raw, test_buf_2); + let tm3_raw = local_pool.modify(&addr3).expect("Modifying data failed"); + assert_eq!(tm3_raw, test_buf_3); + } } diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index 91e3f9d..3c522e8 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -1446,11 +1446,10 @@ mod tests { VerificationReporterWithSender, VerificationToken, }; use crate::pus::{EcssSender, EcssTmtcErrorWithSend}; - use crate::seq_count::SeqCountProviderSimple; use crate::SenderId; use alloc::boxed::Box; use alloc::format; - use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, EcssEnumeration, PusPacket}; + use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, PusPacket}; use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; use spacepackets::tm::PusTm; use spacepackets::util::UnsignedEnum; @@ -1556,15 +1555,7 @@ mod tests { } fn base_reporter() -> VerificationReporter { - let cfg = VerificationReporterCfg::new( - TEST_APID, - Box::new(SeqCountProviderSimple::default()), - Box::new(SeqCountProviderSimple::default()), - 1, - 2, - 8, - ) - .unwrap(); + let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); VerificationReporter::new(&cfg) } @@ -2306,15 +2297,7 @@ mod tests { let (verif_tx, verif_rx) = mpsc::channel(); let sender = MpscVerifSender::new(0, "Verification Sender", shared_tm_pool.clone(), verif_tx); - let cfg = VerificationReporterCfg::new( - TEST_APID, - Box::new(SeqCountProviderSimple::default()), - Box::new(SeqCountProviderSimple::default()), - 1, - 2, - 8, - ) - .unwrap(); + let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); let mut reporter = VerificationReporterWithSender::new(&cfg, Box::new(sender)); let mut sph = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 9f94db8..0f7ad9c 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -237,7 +237,11 @@ fn main() { .set_seq_count(seq_count_provider.get_and_increment()); let entry = msg_counter_map.entry(tm.service()).or_insert(0); tm.sec_header.msg_counter = *entry; - *entry += 1; + if *entry == u16::MAX { + *entry = 0; + } else { + *entry += 1; + } tm.calc_crc_on_serialization = true; tm.write_to_bytes(tm_raw) .expect("Writing PUS TM back failed"); -- 2.43.0 From f34ef841bfc9fe6b356eda02f1a3c63b5992c7e8 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 19:11:59 +0200 Subject: [PATCH 18/39] update deps --- satrs-core/Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/satrs-core/Cargo.toml b/satrs-core/Cargo.toml index 8fc7015..66fb017 100644 --- a/satrs-core/Cargo.toml +++ b/satrs-core/Cargo.toml @@ -13,7 +13,7 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -delegate = ">=0.8, <0.10" +delegate = ">=0.8, <0.11" paste = "1" embed-doc-image = "0.1" @@ -26,7 +26,7 @@ version = "1" optional = true [dependencies.hashbrown] -version = "0.13" +version = "0.14" optional = true [dependencies.heapless] @@ -74,7 +74,7 @@ once_cell = "1.13" serde_json = "1" [dev-dependencies.postcard] -version = "1.0" +version = "1" [features] default = ["std"] -- 2.43.0 From 8210e01615322f09fb176825abedbab783299bc3 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 19:31:48 +0200 Subject: [PATCH 19/39] zero copy TM funnel --- satrs-core/Cargo.toml | 2 +- satrs-core/src/pus/verification.rs | 2 +- satrs-example/src/main.rs | 32 +++++++++++++----------------- satrs-example/src/pus/scheduler.rs | 1 - 4 files changed, 16 insertions(+), 21 deletions(-) diff --git a/satrs-core/Cargo.toml b/satrs-core/Cargo.toml index 66fb017..6424c01 100644 --- a/satrs-core/Cargo.toml +++ b/satrs-core/Cargo.toml @@ -64,7 +64,7 @@ optional = true # version = "0.5.4" # path = "../spacepackets" git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" -rev = "28cd8c02ac0" +rev = "4485ed26699d32" default-features = false [dev-dependencies] diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index 3c522e8..99a1c68 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -233,7 +233,7 @@ impl TryFrom for VerificationToken { if let TcStateToken::Accepted(token) = value { Ok(token) } else { - return Err(()); + Err(()) } } } diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 0f7ad9c..2e830f0 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -6,7 +6,6 @@ mod requests; mod tmtc; use log::{info, warn}; -use std::collections::hash_map::Entry; use crate::hk::AcsHkIds; use crate::logging::setup_logger; @@ -38,7 +37,7 @@ use satrs_core::pus::verification::{ }; use satrs_core::pus::MpscTmtcInStoreSender; use satrs_core::seq_count::{SeqCountProviderSimple, SequenceCountProviderCore}; -use satrs_core::spacepackets::ecss::{PusPacket, SerializablePusPacket}; +use satrs_core::spacepackets::tm::PusTmZeroCopyWriter; use satrs_core::spacepackets::{ time::cds::TimeProvider, time::TimeWriter, @@ -213,38 +212,35 @@ fn main() { let jh1 = thread::Builder::new() .name("TM Funnel".to_string()) .spawn(move || { - let mut tm_buf: [u8; 2048] = [0; 2048]; let tm_funnel = TmFunnel { tm_server_tx, tm_funnel_rx, }; loop { if let Ok(addr) = tm_funnel.tm_funnel_rx.recv() { - // Read the TM, set sequence counter and message counter, and finally write - // it back with the updated CRC. - // We could theoretically manipulate the counters and the CRC directly - // in place as an optimization, but I don't think this is necessary.. + // Read the TM, set sequence counter and message counter, and finally update + // the CRC. let shared_pool = tm_store.backing_pool(); let mut pool_guard = shared_pool.write().expect("Locking TM pool failed"); let tm_raw = pool_guard .modify(&addr) .expect("Reading TM from pool failed"); - tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw); - let (mut tm, size) = - PusTm::from_bytes(&tm_buf, 7).expect("Creating TM from raw slice failed"); - tm.sp_header.set_apid(PUS_APID); - tm.sp_header - .set_seq_count(seq_count_provider.get_and_increment()); - let entry = msg_counter_map.entry(tm.service()).or_insert(0); - tm.sec_header.msg_counter = *entry; + let mut zero_copy_writer = PusTmZeroCopyWriter::new(tm_raw) + .expect("Creating TM zero copy writer failed"); + zero_copy_writer.set_apid(PUS_APID); + zero_copy_writer.set_seq_count(seq_count_provider.get_and_increment()); + let entry = msg_counter_map + .entry(zero_copy_writer.service()) + .or_insert(0); + zero_copy_writer.set_msg_count(*entry); if *entry == u16::MAX { *entry = 0; } else { *entry += 1; } - tm.calc_crc_on_serialization = true; - tm.write_to_bytes(tm_raw) - .expect("Writing PUS TM back failed"); + + // This operation has to come last! + zero_copy_writer.finish(); tm_funnel .tm_server_tx .send(addr) diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index ffd8c89..35c84b8 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -1,6 +1,5 @@ use crate::tmtc::PusTcSource; use log::{error, info, warn}; -use satrs_core::pool::{SharedPool, StoreAddr}; use satrs_core::pus::scheduler::TcInfo; use satrs_core::pus::scheduler_srv::PusService11SchedHandler; use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; -- 2.43.0 From 5f5b77b721b5e79ee2816c21126ab5a099ebb23e Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 21:08:04 +0200 Subject: [PATCH 20/39] almost finished completely --- satrs-core/src/objects.rs | 3 +- satrs-core/src/pus/event_srv.rs | 30 ++--- satrs-core/src/pus/mod.rs | 75 ++++++------- satrs-core/src/pus/scheduler_srv.rs | 48 ++------ satrs-core/src/pus/test.rs | 21 +--- satrs-core/src/pus/verification.rs | 8 +- satrs-example/src/lib.rs | 8 ++ satrs-example/src/main.rs | 29 +++-- satrs-example/src/pus/action.rs | 165 ++++++++++++++++++++++++++++ satrs-example/src/pus/mod.rs | 1 + satrs-example/src/pus/test.rs | 25 +++-- satrs-example/src/requests.rs | 23 ++-- satrs-example/src/tmtc.rs | 3 - 13 files changed, 297 insertions(+), 142 deletions(-) create mode 100644 satrs-example/src/pus/action.rs diff --git a/satrs-core/src/objects.rs b/satrs-core/src/objects.rs index bc17696..0ff117f 100644 --- a/satrs-core/src/objects.rs +++ b/satrs-core/src/objects.rs @@ -51,6 +51,7 @@ //! assert_eq!(example_obj.id, obj_id); //! assert_eq!(example_obj.dummy, 42); //! ``` +use crate::tmtc::TargetId; #[cfg(feature = "alloc")] use alloc::boxed::Box; #[cfg(feature = "alloc")] @@ -64,7 +65,7 @@ use std::error::Error; #[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)] pub struct ObjectId { - pub id: u32, + pub id: TargetId, pub name: &'static str, } diff --git a/satrs-core/src/pus/event_srv.rs b/satrs-core/src/pus/event_srv.rs index ad098fa..83e1d78 100644 --- a/satrs-core/src/pus/event_srv.rs +++ b/satrs-core/src/pus/event_srv.rs @@ -12,7 +12,6 @@ use crate::tmtc::tm_helper::SharedTmStore; use spacepackets::ecss::event::Subservice; use spacepackets::ecss::PusPacket; use spacepackets::tc::PusTc; -use std::format; use std::sync::mpsc::{Receiver, Sender}; pub struct PusService5EventHandler { @@ -57,26 +56,19 @@ impl PusServiceHandler for PusService5EventHandler { addr: StoreAddr, token: VerificationToken, ) -> Result { - { - // Keep locked section as short as possible. - let mut tc_pool = self - .psb - .tc_store - .write() - .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; - let tc_guard = tc_pool.read_with_guard(addr); - let tc_raw = tc_guard.read().unwrap(); - self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); - } + self.copy_tc_to_buf(addr)?; let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); - let srv = Subservice::try_from(tc.subservice()); + let subservice = tc.subservice(); + let srv = Subservice::try_from(subservice); if srv.is_err() { return Ok(PusPacketHandlerResult::CustomSubservice( tc.subservice(), token, )); } - let mut handle_enable_disable_request = |enable: bool| { + let mut partial_error = None; + let time_stamp = self.psb().get_current_timestamp(&mut partial_error); + let mut handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| { if tc.user_data().is_none() || tc.user_data().unwrap().len() < 4 { return Err(PusPacketHandlingError::NotEnoughAppData( "At least 4 bytes event ID expected".into(), @@ -84,11 +76,10 @@ impl PusServiceHandler for PusService5EventHandler { } let user_data = tc.user_data().unwrap(); let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap())); - let start_token = self .psb .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) + .start_success(token, Some(&stamp)) .map_err(|_| PartialPusHandlingError::VerificationError); let partial_error = start_token.clone().err(); let mut token: TcStateToken = token.into(); @@ -126,15 +117,14 @@ impl PusServiceHandler for PusService5EventHandler { return Err(PusPacketHandlingError::InvalidSubservice(tc.subservice())) } Subservice::TcEnableEventGeneration => { - handle_enable_disable_request(true)?; + handle_enable_disable_request(true, time_stamp)?; } Subservice::TcDisableEventGeneration => { - handle_enable_disable_request(false)?; + handle_enable_disable_request(false, time_stamp)?; } Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => { return Ok(PusPacketHandlerResult::SubserviceNotImplemented( - tc.subservice(), - token, + subservice, token, )); } } diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index 4c7cd18..f1dda6e 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -137,17 +137,18 @@ mod alloc_mod { pub mod std_mod { use crate::pool::{ShareablePoolProvider, SharedPool, StoreAddr, StoreError}; use crate::pus::verification::{ - FailParams, StdVerifReporterWithSender, TcStateAccepted, VerificationToken, + StdVerifReporterWithSender, TcStateAccepted, VerificationToken, }; use crate::pus::{EcssSender, EcssTcSenderCore, EcssTmSenderCore}; use crate::tmtc::tm_helper::SharedTmStore; use crate::SenderId; use alloc::vec::Vec; - use spacepackets::ecss::{EcssEnumeration, PusError, SerializablePusPacket}; + use spacepackets::ecss::{PusError, SerializablePusPacket}; use spacepackets::tc::PusTc; use spacepackets::time::cds::TimeProvider; use spacepackets::time::{StdTimestampError, TimeWriter}; use spacepackets::tm::PusTm; + use std::format; use std::string::String; use std::sync::{mpsc, RwLockWriteGuard}; use thiserror::Error; @@ -334,15 +335,14 @@ pub mod std_mod { pub type AcceptedTc = (StoreAddr, VerificationToken); pub struct PusServiceBase { - pub(crate) tc_rx: mpsc::Receiver, - pub(crate) tc_store: SharedPool, - pub(crate) tm_tx: mpsc::Sender, - pub(crate) tm_store: SharedTmStore, - pub(crate) tm_apid: u16, - pub(crate) verification_handler: StdVerifReporterWithSender, - pub(crate) stamp_buf: [u8; 7], - pub(crate) pus_buf: [u8; 2048], - pus_size: usize, + pub tc_rx: mpsc::Receiver, + pub tc_store: SharedPool, + pub tm_tx: mpsc::Sender, + pub tm_store: SharedTmStore, + pub tm_apid: u16, + pub verification_handler: StdVerifReporterWithSender, + pub pus_buf: [u8; 2048], + pub pus_size: usize, } impl PusServiceBase { @@ -361,56 +361,53 @@ pub mod std_mod { tm_tx, tm_store, verification_handler, - stamp_buf: [0; 7], pus_buf: [0; 2048], pus_size: 0, } } - pub fn update_stamp(&mut self) -> Result<(), PartialPusHandlingError> { + pub fn get_current_timestamp( + &self, + partial_error: &mut Option, + ) -> [u8; 7] { + let mut time_stamp: [u8; 7] = [0; 7]; let time_provider = TimeProvider::from_now_with_u16_days().map_err(PartialPusHandlingError::TimeError); if let Ok(time_provider) = time_provider { - time_provider.write_to_bytes(&mut self.stamp_buf).unwrap(); - Ok(()) + time_provider.write_to_bytes(&mut time_stamp).unwrap(); } else { - self.stamp_buf = [0; 7]; - Err(time_provider.unwrap_err()) + *partial_error = Some(time_provider.unwrap_err()); } + time_stamp } - - pub fn report_start_failure( - &mut self, - token: VerificationToken, - failure_code: &impl EcssEnumeration, - failure_data: Option<&[u8]>, - ) -> Result<(), VerificationToken> { - self.verification_handler - .start_failure( - token, - FailParams::new(Some(&self.stamp_buf), failure_code, failure_data), - ) - .map_err(|e| e.1) + pub fn get_current_timestamp_ignore_error(&self) -> [u8; 7] { + let mut dummy = None; + self.get_current_timestamp(&mut dummy) } } pub trait PusServiceHandler { fn psb_mut(&mut self) -> &mut PusServiceBase; fn psb(&self) -> &PusServiceBase; - fn verification_reporter(&mut self) -> &mut StdVerifReporterWithSender { - &mut self.psb_mut().verification_handler - } - fn tc_store(&mut self) -> &mut SharedPool { - &mut self.psb_mut().tc_store - } - fn pus_tc_buf(&self) -> (&[u8], usize) { - (&self.psb().pus_buf, self.psb().pus_size) - } fn handle_one_tc( &mut self, addr: StoreAddr, token: VerificationToken, ) -> Result; + + fn copy_tc_to_buf(&mut self, addr: StoreAddr) -> Result<(), PusPacketHandlingError> { + // Keep locked section as short as possible. + let psb_mut = self.psb_mut(); + let mut tc_pool = psb_mut + .tc_store + .write() + .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; + let tc_guard = tc_pool.read_with_guard(addr); + let tc_raw = tc_guard.read().unwrap(); + psb_mut.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); + Ok(()) + } + fn handle_next_packet(&mut self) -> Result { return match self.psb().tc_rx.try_recv() { Ok((addr, token)) => self.handle_one_tc(addr, token), diff --git a/satrs-core/src/pus/scheduler_srv.rs b/satrs-core/src/pus/scheduler_srv.rs index 9de4981..036c724 100644 --- a/satrs-core/src/pus/scheduler_srv.rs +++ b/satrs-core/src/pus/scheduler_srv.rs @@ -2,15 +2,12 @@ use crate::pool::{SharedPool, StoreAddr}; use crate::pus::scheduler::PusScheduler; use crate::pus::verification::{StdVerifReporterWithSender, TcStateAccepted, VerificationToken}; use crate::pus::{ - AcceptedTc, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, - PusServiceBase, PusServiceHandler, + AcceptedTc, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, PusServiceHandler, }; use crate::tmtc::tm_helper::SharedTmStore; use spacepackets::ecss::{scheduling, PusPacket}; use spacepackets::tc::PusTc; use spacepackets::time::cds::TimeProvider; -use spacepackets::time::TimeWriter; -use std::format; use std::sync::mpsc::{Receiver, Sender}; pub struct PusService11SchedHandler { @@ -59,17 +56,7 @@ impl PusServiceHandler for PusService11SchedHandler { addr: StoreAddr, token: VerificationToken, ) -> Result { - { - // Keep locked section as short as possible. - let mut tc_pool = self - .psb - .tc_store - .write() - .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; - let tc_guard = tc_pool.read_with_guard(addr); - let tc_raw = tc_guard.read().unwrap(); - self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); - } + self.copy_tc_to_buf(addr)?; let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf).unwrap(); let std_service = scheduling::Subservice::try_from(tc.subservice()); if std_service.is_err() { @@ -78,32 +65,21 @@ impl PusServiceHandler for PusService11SchedHandler { token, )); } - //let partial_error = self.psb.update_stamp().err(); - let time_provider = - TimeProvider::from_now_with_u16_days().map_err(PartialPusHandlingError::TimeError); - let partial_error = if let Ok(time_provider) = time_provider { - time_provider - .write_to_bytes(&mut self.psb.stamp_buf) - .unwrap(); - Ok(()) - } else { - self.psb.stamp_buf = [0; 7]; - Err(time_provider.unwrap_err()) - }; - let partial_error = partial_error.err(); + let mut partial_error = None; + let time_stamp = self.psb().get_current_timestamp(&mut partial_error); match std_service.unwrap() { scheduling::Subservice::TcEnableScheduling => { let start_token = self .psb .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) + .start_success(token, Some(&time_stamp)) .expect("Error sending start success"); self.scheduler.enable(); if self.scheduler.is_enabled() { self.psb .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) + .completion_success(start_token, Some(&time_stamp)) .expect("Error sending completion success"); } else { panic!("Failed to enable scheduler"); @@ -113,14 +89,14 @@ impl PusServiceHandler for PusService11SchedHandler { let start_token = self .psb .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) + .start_success(token, Some(&time_stamp)) .expect("Error sending start success"); self.scheduler.disable(); if !self.scheduler.is_enabled() { self.psb .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) + .completion_success(start_token, Some(&time_stamp)) .expect("Error sending completion success"); } else { panic!("Failed to disable scheduler"); @@ -130,7 +106,7 @@ impl PusServiceHandler for PusService11SchedHandler { let start_token = self .psb .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) + .start_success(token, Some(&time_stamp)) .expect("Error sending start success"); let mut pool = self.psb.tc_store.write().expect("Locking pool failed"); @@ -141,14 +117,14 @@ impl PusServiceHandler for PusService11SchedHandler { self.psb .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) + .completion_success(start_token, Some(&time_stamp)) .expect("Error sending completion success"); } scheduling::Subservice::TcInsertActivity => { let start_token = self .psb .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) + .start_success(token, Some(&time_stamp)) .expect("error sending start success"); let mut pool = self.psb.tc_store.write().expect("locking pool failed"); @@ -158,7 +134,7 @@ impl PusServiceHandler for PusService11SchedHandler { self.psb .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) + .completion_success(start_token, Some(&time_stamp)) .expect("sending completion success failed"); } _ => { diff --git a/satrs-core/src/pus/test.rs b/satrs-core/src/pus/test.rs index 09614bc..a74f910 100644 --- a/satrs-core/src/pus/test.rs +++ b/satrs-core/src/pus/test.rs @@ -51,27 +51,18 @@ impl PusServiceHandler for PusService17TestHandler { addr: StoreAddr, token: VerificationToken, ) -> Result { - { - // Keep locked section as short as possible. - let mut tc_pool = self - .psb - .tc_store - .write() - .map_err(|e| PusPacketHandlingError::RwGuardError(format!("{e}")))?; - let tc_guard = tc_pool.read_with_guard(addr); - let tc_raw = tc_guard.read()?; - self.psb.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); - } + self.copy_tc_to_buf(addr)?; let (tc, _) = PusTc::from_bytes(&self.psb.pus_buf)?; if tc.service() != 17 { return Err(PusPacketHandlingError::WrongService(tc.service())); } if tc.subservice() == 1 { - let mut partial_error = self.psb.update_stamp().err(); + let mut partial_error = None; + let time_stamp = self.psb().get_current_timestamp(&mut partial_error); let result = self .psb .verification_handler - .start_success(token, Some(&self.psb.stamp_buf)) + .start_success(token, Some(&time_stamp)) .map_err(|_| PartialPusHandlingError::VerificationError); let start_token = if let Ok(result) = result { Some(result) @@ -81,7 +72,7 @@ impl PusServiceHandler for PusService17TestHandler { }; // Sequence count will be handled centrally in TM funnel. let mut reply_header = SpHeader::tm_unseg(self.psb.tm_apid, 0, 0).unwrap(); - let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &self.psb.stamp_buf); + let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp); let ping_reply = PusTm::new(&mut reply_header, tc_header, None, true); let addr = self.psb.tm_store.add_pus_tm(&ping_reply); if let Err(e) = self @@ -96,7 +87,7 @@ impl PusServiceHandler for PusService17TestHandler { if self .psb .verification_handler - .completion_success(start_token, Some(&self.psb.stamp_buf)) + .completion_success(start_token, Some(&time_stamp)) .is_err() { partial_error = Some(PartialPusHandlingError::VerificationError) diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index 99a1c68..b4db15f 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -467,7 +467,7 @@ impl VerificationReporterCore { // Internal helper function, too many arguments is acceptable for this case. #[allow(clippy::too_many_arguments)] fn sendable_failure_no_step<'src_data, State: Copy>( - &mut self, + &self, src_data_buf: &'src_data mut [u8], subservice: u8, token: VerificationToken, @@ -621,7 +621,7 @@ impl VerificationReporterCore { /// Requires a token previously acquired by calling [Self::acceptance_success]. It consumes /// the token because verification handling is done. pub fn start_failure<'src_data>( - &mut self, + &self, src_data_buf: &'src_data mut [u8], token: VerificationToken, seq_count: u16, @@ -838,7 +838,7 @@ impl VerificationReporterCore { // Internal helper function, too many arguments is acceptable for this case. #[allow(clippy::too_many_arguments)] fn create_pus_verif_fail_tm<'src_data>( - &mut self, + &self, src_data_buf: &'src_data mut [u8], subservice: u8, seq_count: u16, @@ -883,7 +883,7 @@ impl VerificationReporterCore { } fn create_pus_verif_tm_base<'src_data>( - &mut self, + &self, src_data_buf: &'src_data mut [u8], subservice: u8, msg_counter: u16, diff --git a/satrs-example/src/lib.rs b/satrs-example/src/lib.rs index 9429755..754be9b 100644 --- a/satrs-example/src/lib.rs +++ b/satrs-example/src/lib.rs @@ -1,5 +1,6 @@ use num_enum::{IntoPrimitive, TryFromPrimitive}; use satrs_core::events::{EventU32TypedSev, SeverityInfo}; +use satrs_core::objects::ObjectId; use std::net::Ipv4Addr; use satrs_mib::res_code::{ResultU16, ResultU16Info}; @@ -17,6 +18,11 @@ pub enum RequestTargetId { AcsSubsystem = 1, } +pub const ACS_OBJECT_ID: ObjectId = ObjectId { + id: RequestTargetId::AcsSubsystem as u32, + name: "ACS_SUBSYSTEM", +}; + #[derive(Debug)] pub enum GroupId { Tmtc = 0, @@ -38,6 +44,8 @@ pub mod tmtc_err { pub const INVALID_PUS_SUBSERVICE: ResultU16 = ResultU16::const_new(GroupId::Tmtc as u8, 1); #[resultcode] pub const PUS_SERVICE_NOT_IMPLEMENTED: ResultU16 = ResultU16::const_new(GroupId::Tmtc as u8, 2); + #[resultcode] + pub const UNKNOWN_TARGET_ID: ResultU16 = ResultU16::const_new(GroupId::Tmtc as u8, 3); #[resultcode( info = "Not enough data inside the TC application data field. Optionally includes: \ diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 2e830f0..6cfe305 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -9,6 +9,7 @@ use log::{info, warn}; use crate::hk::AcsHkIds; use crate::logging::setup_logger; +use crate::pus::action::{Pus8Wrapper, PusService8ActionHandler}; use crate::pus::event::Pus5Wrapper; use crate::pus::scheduler::Pus11Wrapper; use crate::pus::test::Service17CustomWrapper; @@ -45,7 +46,7 @@ use satrs_core::spacepackets::{ SequenceFlags, SpHeader, }; use satrs_core::tmtc::tm_helper::SharedTmStore; -use satrs_core::tmtc::AddressableId; +use satrs_core::tmtc::{AddressableId, TargetId}; use satrs_example::{RequestTargetId, OBSW_SERVER_ADDR, SERVER_PORT}; use std::collections::HashMap; use std::net::{IpAddr, SocketAddr}; @@ -123,7 +124,7 @@ fn main() { // Some request are targetable. This map is used to retrieve sender handles based on a target ID. let mut request_map = HashMap::new(); let (acs_thread_tx, acs_thread_rx) = channel::(); - request_map.insert(RequestTargetId::AcsSubsystem as u32, acs_thread_tx); + request_map.insert(RequestTargetId::AcsSubsystem as TargetId, acs_thread_tx); let tc_source_wrapper = PusTcSource { tc_store: tc_store.clone(), @@ -135,7 +136,6 @@ fn main() { sock_addr, verif_reporter: verif_reporter.clone(), event_sender, - request_map, }; let tc_args = TcArgs { tc_source: tc_source_wrapper.clone(), @@ -153,7 +153,7 @@ fn main() { let (pus_test_tx, pus_test_rx) = channel(); let (pus_event_tx, pus_event_rx) = channel(); let (pus_sched_tx, pus_sched_rx) = channel(); - let (pus_hk_tx, pus_hk_rx) = channel(); + let (pus_hk_tx, _pus_hk_rx) = channel(); let (pus_action_tx, pus_action_rx) = channel(); let pus_router = PusTcMpscRouter { test_service_receiver: pus_test_tx, @@ -195,11 +195,22 @@ fn main() { tm_funnel_tx.clone(), tm_store.clone(), PUS_APID, - verif_reporter, + verif_reporter.clone(), event_request_tx, ); let mut pus_5_wrapper = Pus5Wrapper { pus_5_handler }; + let pus_8_handler = PusService8ActionHandler::new( + pus_action_rx, + tc_store.pool.clone(), + tm_funnel_tx.clone(), + tm_store.clone(), + PUS_APID, + verif_reporter, + request_map.clone(), + ); + let mut pus_8_wrapper = Pus8Wrapper { pus_8_handler }; + info!("Starting TMTC task"); let jh0 = thread::Builder::new() .name("TMTC".to_string()) @@ -320,7 +331,7 @@ fn main() { ); update_time(&mut time_provider, &mut timestamp); match request.targeted_request.request { - Request::HkRequest(hk_req) => match hk_req { + Request::Hk(hk_req) => match hk_req { HkRequest::OneShot(unique_id) => { let target = request.targeted_request.target_id; assert_eq!(target, RequestTargetId::AcsSubsystem as u32); @@ -359,9 +370,12 @@ fn main() { HkRequest::Disable(_) => {} HkRequest::ModifyCollectionInterval(_, _) => {} }, - Request::ModeRequest(_mode_req) => { + Request::Mode(_mode_req) => { warn!("mode request handling not implemented yet") } + Request::Action(_action_req) => { + warn!("action request handling not implemented yet") + } } let started_token = reporter_aocs .start_success(request.token, Some(×tamp)) @@ -397,6 +411,7 @@ fn main() { is_srv_finished(pus_17_wrapper.handle_next_packet()); is_srv_finished(pus_11_wrapper.handle_next_packet()); is_srv_finished(pus_5_wrapper.handle_next_packet()); + is_srv_finished(pus_8_wrapper.handle_next_packet()); if all_queues_empty { break; } diff --git a/satrs-example/src/pus/action.rs b/satrs-example/src/pus/action.rs new file mode 100644 index 0000000..f240648 --- /dev/null +++ b/satrs-example/src/pus/action.rs @@ -0,0 +1,165 @@ +use crate::requests::{ActionRequest, Request, RequestWithToken}; +use log::{error, warn}; +use satrs_core::pool::{SharedPool, StoreAddr}; +use satrs_core::pus::verification::{ + FailParams, StdVerifReporterWithSender, TcStateAccepted, VerificationToken, +}; +use satrs_core::pus::{ + AcceptedTc, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, PusServiceHandler, +}; +use satrs_core::spacepackets::ecss::PusPacket; +use satrs_core::spacepackets::tc::PusTc; +use satrs_core::tmtc::tm_helper::SharedTmStore; +use satrs_core::tmtc::TargetId; +use satrs_example::tmtc_err; +use std::collections::HashMap; +use std::sync::mpsc::{Receiver, Sender}; + +pub struct PusService8ActionHandler { + psb: PusServiceBase, + request_handlers: HashMap>, +} + +impl PusService8ActionHandler { + pub fn new( + receiver: Receiver, + tc_pool: SharedPool, + tm_tx: Sender, + tm_store: SharedTmStore, + tm_apid: u16, + verification_handler: StdVerifReporterWithSender, + request_handlers: HashMap>, + ) -> Self { + Self { + psb: PusServiceBase::new( + receiver, + tc_pool, + tm_tx, + tm_store, + tm_apid, + verification_handler, + ), + request_handlers, + } + } +} + +impl PusServiceHandler for PusService8ActionHandler { + fn psb_mut(&mut self) -> &mut PusServiceBase { + &mut self.psb + } + fn psb(&self) -> &PusServiceBase { + &self.psb + } + + fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> Result { + self.copy_tc_to_buf(addr)?; + let (tc, _) = PusTc::from_bytes(&self.psb().pus_buf).unwrap(); + let subservice = tc.subservice(); + let mut partial_error = None; + let time_stamp = self.psb().get_current_timestamp(&mut partial_error); + match subservice { + 128 => { + let user_data = tc.user_data(); + if user_data.is_none() || user_data.unwrap().len() < 8 { + self.psb_mut() + .verification_handler + .start_failure( + token, + FailParams::new( + Some(&time_stamp), + &tmtc_err::NOT_ENOUGH_APP_DATA, + None, + ), + ) + .expect("Sending start failure failed"); + return Err(PusPacketHandlingError::NotEnoughAppData( + "Expected at least 4 bytes".into(), + )); + } + let user_data = user_data.unwrap(); + let target_id = u32::from_be_bytes(user_data[0..4].try_into().unwrap()); + let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap()); + if let Some(sender) = self.request_handlers.get(&target_id) { + sender + .send(RequestWithToken::new( + target_id, + Request::Action(ActionRequest::CmdWithU32Id(( + action_id, + Vec::from(&user_data[8..]), + ))), + token, + )) + .expect("Forwarding action request failed"); + } else { + let mut fail_data: [u8; 4] = [0; 4]; + fail_data.copy_from_slice(&target_id.to_be_bytes()); + self.psb_mut() + .verification_handler + .start_failure( + token, + FailParams::new( + Some(&time_stamp), + &tmtc_err::UNKNOWN_TARGET_ID, + Some(&fail_data), + ), + ) + .expect("Sending start failure failed"); + return Err(PusPacketHandlingError::OtherError(format!( + "Unknown target ID {target_id}" + ))); + } + } + _ => { + let fail_data = [subservice]; + self.psb_mut() + .verification_handler + .start_failure( + token, + FailParams::new( + Some(&time_stamp), + &tmtc_err::INVALID_PUS_SUBSERVICE, + Some(&fail_data), + ), + ) + .expect("Sending start failure failed"); + return Err(PusPacketHandlingError::InvalidSubservice(subservice)); + } + } + Ok(PusPacketHandlerResult::RequestHandled) + } +} + +pub struct Pus8Wrapper { + pub(crate) pus_8_handler: PusService8ActionHandler, +} + +impl Pus8Wrapper { + pub fn handle_next_packet(&mut self) -> bool { + match self.pus_8_handler.handle_next_packet() { + Ok(result) => match result { + PusPacketHandlerResult::RequestHandled => {} + PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { + warn!("PUS 8 partial packet handling success: {e:?}") + } + PusPacketHandlerResult::CustomSubservice(invalid, _) => { + warn!("PUS 8 invalid subservice {invalid}"); + } + PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { + warn!("PUS 8 subservice {subservice} not implemented"); + } + PusPacketHandlerResult::Empty => { + return true; + } + }, + Err(error) => { + error!("PUS packet handling error: {error:?}") + } + } + false + } +} diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 4186359..e17fd78 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -12,6 +12,7 @@ use satrs_core::tmtc::tm_helper::PusTmWithCdsShortHelper; use satrs_example::{tmtc_err, CustomPusServiceId}; use std::sync::mpsc::Sender; +pub mod action; pub mod event; pub mod scheduler; pub mod test; diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 5248028..707dae1 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -2,6 +2,7 @@ use log::{info, warn}; use satrs_core::events::EventU32; use satrs_core::params::Params; use satrs_core::pus::test::PusService17TestHandler; +use satrs_core::pus::verification::FailParams; use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; use satrs_core::spacepackets::ecss::PusPacket; use satrs_core::spacepackets::tc::PusTc; @@ -37,8 +38,9 @@ impl Service17CustomWrapper { warn!("PUS17: Subservice {subservice} not implemented") } PusPacketHandlerResult::CustomSubservice(subservice, token) => { - let (buf, _) = self.pus17_handler.pus_tc_buf(); - let (tc, _) = PusTc::from_bytes(buf).unwrap(); + let psb_mut = self.pus17_handler.psb_mut(); + let buf = psb_mut.pus_buf; + let (tc, _) = PusTc::from_bytes(&buf).unwrap(); let time_stamper = TimeProvider::from_now_with_u16_days().unwrap(); let mut stamp_buf: [u8; 7] = [0; 7]; time_stamper.write_to_bytes(&mut stamp_buf).unwrap(); @@ -47,23 +49,26 @@ impl Service17CustomWrapper { self.test_srv_event_sender .send((TEST_EVENT.into(), None)) .expect("Sending test event failed"); - let start_token = self - .pus17_handler - .verification_reporter() + let start_token = psb_mut + .verification_handler .start_success(token, Some(&stamp_buf)) .expect("Error sending start success"); - self.pus17_handler - .verification_reporter() + psb_mut + .verification_handler .completion_success(start_token, Some(&stamp_buf)) .expect("Error sending completion success"); } else { let fail_data = [tc.subservice()]; self.pus17_handler .psb_mut() - .report_start_failure( + .verification_handler + .start_failure( token, - &tmtc_err::INVALID_PUS_SUBSERVICE, - Some(&fail_data), + FailParams::new( + Some(&stamp_buf), + &tmtc_err::INVALID_PUS_SUBSERVICE, + Some(&fail_data), + ), ) .expect("Sending start failure verification failed"); } diff --git a/satrs-example/src/requests.rs b/satrs-example/src/requests.rs index 6f11eef..5fa2549 100644 --- a/satrs-example/src/requests.rs +++ b/satrs-example/src/requests.rs @@ -3,14 +3,23 @@ use satrs_core::mode::ModeRequest; use satrs_core::pus::verification::{TcStateAccepted, VerificationToken}; use satrs_core::tmtc::TargetId; -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -#[non_exhaustive] -pub enum Request { - HkRequest(HkRequest), - ModeRequest(ModeRequest), +#[allow(dead_code)] +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum ActionRequest { + CmdWithU32Id((u32, Vec)), + CmdWithStringId((String, Vec)), } -#[derive(Copy, Clone, Eq, PartialEq, Debug)] +#[allow(dead_code)] +#[derive(Clone, Eq, PartialEq, Debug)] +#[non_exhaustive] +pub enum Request { + Hk(HkRequest), + Mode(ModeRequest), + Action(ActionRequest), +} + +#[derive(Clone, Eq, PartialEq, Debug)] pub struct TargetedRequest { pub(crate) target_id: TargetId, pub(crate) request: Request, @@ -22,7 +31,7 @@ impl TargetedRequest { } } -#[derive(Copy, Clone, Eq, PartialEq, Debug)] +#[derive(Clone, Eq, PartialEq, Debug)] pub struct RequestWithToken { pub(crate) targeted_request: TargetedRequest, pub(crate) token: VerificationToken, diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 5299017..b26eb42 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -2,7 +2,6 @@ use log::info; use satrs_core::events::EventU32; use satrs_core::hal::host::udp_server::{ReceiveResult, UdpTcServer}; use satrs_core::params::Params; -use std::collections::HashMap; use std::error::Error; use std::fmt::{Display, Formatter}; use std::net::SocketAddr; @@ -12,7 +11,6 @@ use std::time::Duration; use crate::ccsds::CcsdsReceiver; use crate::pus::{PusReceiver, PusTcArgs, PusTcMpscRouter, PusTmArgs}; -use crate::requests::RequestWithToken; use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; use satrs_core::pus::verification::StdVerifReporterWithSender; use satrs_core::spacepackets::ecss::{PusPacket, SerializablePusPacket}; @@ -27,7 +25,6 @@ pub struct OtherArgs { pub sock_addr: SocketAddr, pub verif_reporter: StdVerifReporterWithSender, pub event_sender: Sender<(EventU32, Option)>, - pub request_map: HashMap>, } pub struct TmArgs { -- 2.43.0 From b11af185f90a66793fdfdb8c4cafff789c76103a Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 21:09:28 +0200 Subject: [PATCH 21/39] some missing stuff --- satrs-example/src/pus/action.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/satrs-example/src/pus/action.rs b/satrs-example/src/pus/action.rs index f240648..5f42ea3 100644 --- a/satrs-example/src/pus/action.rs +++ b/satrs-example/src/pus/action.rs @@ -130,6 +130,11 @@ impl PusServiceHandler for PusService8ActionHandler { return Err(PusPacketHandlingError::InvalidSubservice(subservice)); } } + if let Some(partial_error) = partial_error { + return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( + partial_error, + )); + } Ok(PusPacketHandlerResult::RequestHandled) } } -- 2.43.0 From 90493a6dcbd27281700295e7680113a29e4ea26c Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 5 Jul 2023 21:10:45 +0200 Subject: [PATCH 22/39] add HK module --- satrs-example/src/pus/hk.rs | 1 + satrs-example/src/pus/mod.rs | 1 + 2 files changed, 2 insertions(+) create mode 100644 satrs-example/src/pus/hk.rs diff --git a/satrs-example/src/pus/hk.rs b/satrs-example/src/pus/hk.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/satrs-example/src/pus/hk.rs @@ -0,0 +1 @@ + diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index e17fd78..b7dcc11 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -14,6 +14,7 @@ use std::sync::mpsc::Sender; pub mod action; pub mod event; +pub mod hk; pub mod scheduler; pub mod test; -- 2.43.0 From f7bf4a4d7bad45ddfda6b000041a3b5deb811b8b Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Thu, 6 Jul 2023 00:49:18 +0200 Subject: [PATCH 23/39] done --- satrs-core/src/pus/event_srv.rs | 7 +- satrs-core/src/pus/mod.rs | 7 +- satrs-core/src/pus/scheduler_srv.rs | 8 + satrs-core/src/pus/test.rs | 2 + satrs-example/src/main.rs | 34 +-- satrs-example/src/pus/action.rs | 109 +++++---- satrs-example/src/pus/hk.rs | 194 ++++++++++++++++ satrs-example/src/pus/mod.rs | 340 +--------------------------- satrs-example/src/pus/test.rs | 3 + satrs-example/src/tmtc.rs | 24 +- 10 files changed, 308 insertions(+), 420 deletions(-) diff --git a/satrs-core/src/pus/event_srv.rs b/satrs-core/src/pus/event_srv.rs index 83e1d78..37ccd3b 100644 --- a/satrs-core/src/pus/event_srv.rs +++ b/satrs-core/src/pus/event_srv.rs @@ -66,9 +66,7 @@ impl PusServiceHandler for PusService5EventHandler { token, )); } - let mut partial_error = None; - let time_stamp = self.psb().get_current_timestamp(&mut partial_error); - let mut handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| { + let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| { if tc.user_data().is_none() || tc.user_data().unwrap().len() < 4 { return Err(PusPacketHandlingError::NotEnoughAppData( "At least 4 bytes event ID expected".into(), @@ -79,6 +77,7 @@ impl PusServiceHandler for PusService5EventHandler { let start_token = self .psb .verification_handler + .borrow_mut() .start_success(token, Some(&stamp)) .map_err(|_| PartialPusHandlingError::VerificationError); let partial_error = start_token.clone().err(); @@ -109,6 +108,8 @@ impl PusServiceHandler for PusService5EventHandler { } Ok(PusPacketHandlerResult::RequestHandled) }; + let mut partial_error = None; + let time_stamp = self.psb().get_current_timestamp(&mut partial_error); match srv.unwrap() { Subservice::TmInfoReport | Subservice::TmLowSeverityReport diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index f1dda6e..9c04181 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -148,6 +148,7 @@ pub mod std_mod { use spacepackets::time::cds::TimeProvider; use spacepackets::time::{StdTimestampError, TimeWriter}; use spacepackets::tm::PusTm; + use std::cell::RefCell; use std::format; use std::string::String; use std::sync::{mpsc, RwLockWriteGuard}; @@ -340,7 +341,9 @@ pub mod std_mod { pub tm_tx: mpsc::Sender, pub tm_store: SharedTmStore, pub tm_apid: u16, - pub verification_handler: StdVerifReporterWithSender, + /// The verification handler is wrapped in a [RefCell] to allow the interior mutability + /// pattern. This makes writing methods which are not mutable a lot easier. + pub verification_handler: RefCell, pub pus_buf: [u8; 2048], pub pus_size: usize, } @@ -360,7 +363,7 @@ pub mod std_mod { tm_apid, tm_tx, tm_store, - verification_handler, + verification_handler: RefCell::new(verification_handler), pus_buf: [0; 2048], pus_size: 0, } diff --git a/satrs-core/src/pus/scheduler_srv.rs b/satrs-core/src/pus/scheduler_srv.rs index 036c724..250ff8c 100644 --- a/satrs-core/src/pus/scheduler_srv.rs +++ b/satrs-core/src/pus/scheduler_srv.rs @@ -72,6 +72,7 @@ impl PusServiceHandler for PusService11SchedHandler { let start_token = self .psb .verification_handler + .get_mut() .start_success(token, Some(&time_stamp)) .expect("Error sending start success"); @@ -79,6 +80,7 @@ impl PusServiceHandler for PusService11SchedHandler { if self.scheduler.is_enabled() { self.psb .verification_handler + .get_mut() .completion_success(start_token, Some(&time_stamp)) .expect("Error sending completion success"); } else { @@ -89,6 +91,7 @@ impl PusServiceHandler for PusService11SchedHandler { let start_token = self .psb .verification_handler + .get_mut() .start_success(token, Some(&time_stamp)) .expect("Error sending start success"); @@ -96,6 +99,7 @@ impl PusServiceHandler for PusService11SchedHandler { if !self.scheduler.is_enabled() { self.psb .verification_handler + .get_mut() .completion_success(start_token, Some(&time_stamp)) .expect("Error sending completion success"); } else { @@ -106,6 +110,7 @@ impl PusServiceHandler for PusService11SchedHandler { let start_token = self .psb .verification_handler + .get_mut() .start_success(token, Some(&time_stamp)) .expect("Error sending start success"); @@ -117,6 +122,7 @@ impl PusServiceHandler for PusService11SchedHandler { self.psb .verification_handler + .get_mut() .completion_success(start_token, Some(&time_stamp)) .expect("Error sending completion success"); } @@ -124,6 +130,7 @@ impl PusServiceHandler for PusService11SchedHandler { let start_token = self .psb .verification_handler + .get_mut() .start_success(token, Some(&time_stamp)) .expect("error sending start success"); @@ -134,6 +141,7 @@ impl PusServiceHandler for PusService11SchedHandler { self.psb .verification_handler + .get_mut() .completion_success(start_token, Some(&time_stamp)) .expect("sending completion success failed"); } diff --git a/satrs-core/src/pus/test.rs b/satrs-core/src/pus/test.rs index a74f910..bd7c264 100644 --- a/satrs-core/src/pus/test.rs +++ b/satrs-core/src/pus/test.rs @@ -62,6 +62,7 @@ impl PusServiceHandler for PusService17TestHandler { let result = self .psb .verification_handler + .get_mut() .start_success(token, Some(&time_stamp)) .map_err(|_| PartialPusHandlingError::VerificationError); let start_token = if let Ok(result) = result { @@ -87,6 +88,7 @@ impl PusServiceHandler for PusService17TestHandler { if self .psb .verification_handler + .get_mut() .completion_success(start_token, Some(&time_stamp)) .is_err() { diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 6cfe305..99a475c 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -11,13 +11,12 @@ use crate::hk::AcsHkIds; use crate::logging::setup_logger; use crate::pus::action::{Pus8Wrapper, PusService8ActionHandler}; use crate::pus::event::Pus5Wrapper; +use crate::pus::hk::{Pus3Wrapper, PusService3HkHandler}; use crate::pus::scheduler::Pus11Wrapper; use crate::pus::test::Service17CustomWrapper; use crate::pus::PusTcMpscRouter; use crate::requests::{Request, RequestWithToken}; -use crate::tmtc::{ - core_tmtc_task, OtherArgs, PusTcSource, TcArgs, TcStore, TmArgs, TmFunnel, PUS_APID, -}; +use crate::tmtc::{core_tmtc_task, PusTcSource, TcArgs, TcStore, TmArgs, TmFunnel, PUS_APID}; use satrs_core::event_man::{ EventManagerWithMpscQueue, MpscEventReceiver, MpscEventU32SendProvider, SendEventProvider, }; @@ -107,7 +106,7 @@ fn main() { // The event manager will receive the RX handle to receive all the events. let (event_sender, event_man_rx) = channel(); let event_recv = MpscEventReceiver::::new(event_man_rx); - let test_srv_event_sender = event_sender.clone(); + let test_srv_event_sender = event_sender; let mut event_man = EventManagerWithMpscQueue::new(Box::new(event_recv)); // All events sent to the manager are routed to the PUS event manager, which generates PUS event @@ -132,11 +131,6 @@ fn main() { }; // Create clones here to allow moving the values - let core_args = OtherArgs { - sock_addr, - verif_reporter: verif_reporter.clone(), - event_sender, - }; let tc_args = TcArgs { tc_source: tc_source_wrapper.clone(), tc_receiver: tc_source_rx, @@ -147,13 +141,13 @@ fn main() { tm_server_rx, }; - let aocs_to_funnel = tm_funnel_tx.clone(); + let aocs_tm_funnel = tm_funnel_tx.clone(); let mut aocs_tm_store = tm_store.clone(); let (pus_test_tx, pus_test_rx) = channel(); let (pus_event_tx, pus_event_rx) = channel(); let (pus_sched_tx, pus_sched_rx) = channel(); - let (pus_hk_tx, _pus_hk_rx) = channel(); + let (pus_hk_tx, pus_hk_rx) = channel(); let (pus_action_tx, pus_action_rx) = channel(); let pus_router = PusTcMpscRouter { test_service_receiver: pus_test_tx, @@ -206,16 +200,27 @@ fn main() { tm_funnel_tx.clone(), tm_store.clone(), PUS_APID, - verif_reporter, + verif_reporter.clone(), request_map.clone(), ); let mut pus_8_wrapper = Pus8Wrapper { pus_8_handler }; + let pus_3_handler = PusService3HkHandler::new( + pus_hk_rx, + tc_store.pool.clone(), + tm_funnel_tx.clone(), + tm_store.clone(), + PUS_APID, + verif_reporter.clone(), + request_map, + ); + let mut pus_3_wrapper = Pus3Wrapper { pus_3_handler }; + info!("Starting TMTC task"); let jh0 = thread::Builder::new() .name("TMTC".to_string()) .spawn(move || { - core_tmtc_task(core_args, tc_args, tm_args, pus_router); + core_tmtc_task(sock_addr, tc_args, tm_args, verif_reporter, pus_router); }) .unwrap(); @@ -363,7 +368,7 @@ fn main() { true, ); let addr = aocs_tm_store.add_pus_tm(&pus_tm); - aocs_to_funnel.send(addr).expect("Sending HK TM failed"); + aocs_tm_funnel.send(addr).expect("Sending HK TM failed"); } } HkRequest::Enable(_) => {} @@ -412,6 +417,7 @@ fn main() { is_srv_finished(pus_11_wrapper.handle_next_packet()); is_srv_finished(pus_5_wrapper.handle_next_packet()); is_srv_finished(pus_8_wrapper.handle_next_packet()); + is_srv_finished(pus_3_wrapper.handle_next_packet()); if all_queues_empty { break; } diff --git a/satrs-example/src/pus/action.rs b/satrs-example/src/pus/action.rs index 5f42ea3..0975c79 100644 --- a/satrs-example/src/pus/action.rs +++ b/satrs-example/src/pus/action.rs @@ -44,6 +44,64 @@ impl PusService8ActionHandler { } } +impl PusService8ActionHandler { + fn handle_action_request_with_id( + &self, + token: VerificationToken, + tc: &PusTc, + time_stamp: &[u8], + ) -> Result<(), PusPacketHandlingError> { + let user_data = tc.user_data(); + if user_data.is_none() || user_data.unwrap().len() < 8 { + self.psb() + .verification_handler + .borrow_mut() + .start_failure( + token, + FailParams::new(Some(time_stamp), &tmtc_err::NOT_ENOUGH_APP_DATA, None), + ) + .expect("Sending start failure failed"); + return Err(PusPacketHandlingError::NotEnoughAppData( + "Expected at least 4 bytes".into(), + )); + } + let user_data = user_data.unwrap(); + let target_id = u32::from_be_bytes(user_data[0..4].try_into().unwrap()); + let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap()); + if let Some(sender) = self.request_handlers.get(&target_id) { + sender + .send(RequestWithToken::new( + target_id, + Request::Action(ActionRequest::CmdWithU32Id(( + action_id, + Vec::from(&user_data[8..]), + ))), + token, + )) + .expect("Forwarding action request failed"); + } else { + let mut fail_data: [u8; 4] = [0; 4]; + fail_data.copy_from_slice(&target_id.to_be_bytes()); + self.psb() + .verification_handler + .borrow_mut() + .start_failure( + token, + FailParams::new( + Some(time_stamp), + &tmtc_err::UNKNOWN_TARGET_ID, + Some(&fail_data), + ), + ) + .expect("Sending start failure failed"); + return Err(PusPacketHandlingError::OtherError(format!( + "Unknown target ID {target_id}" + ))); + } + Ok(()) + } +} + impl PusServiceHandler for PusService8ActionHandler { fn psb_mut(&mut self) -> &mut PusServiceBase { &mut self.psb @@ -64,60 +122,13 @@ impl PusServiceHandler for PusService8ActionHandler { let time_stamp = self.psb().get_current_timestamp(&mut partial_error); match subservice { 128 => { - let user_data = tc.user_data(); - if user_data.is_none() || user_data.unwrap().len() < 8 { - self.psb_mut() - .verification_handler - .start_failure( - token, - FailParams::new( - Some(&time_stamp), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ), - ) - .expect("Sending start failure failed"); - return Err(PusPacketHandlingError::NotEnoughAppData( - "Expected at least 4 bytes".into(), - )); - } - let user_data = user_data.unwrap(); - let target_id = u32::from_be_bytes(user_data[0..4].try_into().unwrap()); - let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap()); - if let Some(sender) = self.request_handlers.get(&target_id) { - sender - .send(RequestWithToken::new( - target_id, - Request::Action(ActionRequest::CmdWithU32Id(( - action_id, - Vec::from(&user_data[8..]), - ))), - token, - )) - .expect("Forwarding action request failed"); - } else { - let mut fail_data: [u8; 4] = [0; 4]; - fail_data.copy_from_slice(&target_id.to_be_bytes()); - self.psb_mut() - .verification_handler - .start_failure( - token, - FailParams::new( - Some(&time_stamp), - &tmtc_err::UNKNOWN_TARGET_ID, - Some(&fail_data), - ), - ) - .expect("Sending start failure failed"); - return Err(PusPacketHandlingError::OtherError(format!( - "Unknown target ID {target_id}" - ))); - } + self.handle_action_request_with_id(token, &tc, &time_stamp)?; } _ => { let fail_data = [subservice]; self.psb_mut() .verification_handler + .get_mut() .start_failure( token, FailParams::new( diff --git a/satrs-example/src/pus/hk.rs b/satrs-example/src/pus/hk.rs index 8b13789..56e0b4b 100644 --- a/satrs-example/src/pus/hk.rs +++ b/satrs-example/src/pus/hk.rs @@ -1 +1,195 @@ +use crate::requests::{Request, RequestWithToken}; +use log::{error, warn}; +use satrs_core::hk::{CollectionIntervalFactor, HkRequest}; +use satrs_core::pool::{SharedPool, StoreAddr}; +use satrs_core::pus::verification::{ + FailParams, StdVerifReporterWithSender, TcStateAccepted, VerificationToken, +}; +use satrs_core::pus::{ + AcceptedTc, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, PusServiceHandler, +}; +use satrs_core::spacepackets::ecss::{hk, PusPacket}; +use satrs_core::spacepackets::tc::PusTc; +use satrs_core::tmtc::tm_helper::SharedTmStore; +use satrs_core::tmtc::{AddressableId, TargetId}; +use satrs_example::{hk_err, tmtc_err}; +use std::collections::HashMap; +use std::sync::mpsc::{Receiver, Sender}; +pub struct PusService3HkHandler { + psb: PusServiceBase, + request_handlers: HashMap>, +} + +impl PusService3HkHandler { + pub fn new( + receiver: Receiver, + tc_pool: SharedPool, + tm_tx: Sender, + tm_store: SharedTmStore, + tm_apid: u16, + verification_handler: StdVerifReporterWithSender, + request_handlers: HashMap>, + ) -> Self { + Self { + psb: PusServiceBase::new( + receiver, + tc_pool, + tm_tx, + tm_store, + tm_apid, + verification_handler, + ), + request_handlers, + } + } +} + +impl PusServiceHandler for PusService3HkHandler { + fn psb_mut(&mut self) -> &mut PusServiceBase { + &mut self.psb + } + fn psb(&self) -> &PusServiceBase { + &self.psb + } + + fn handle_one_tc( + &mut self, + addr: StoreAddr, + token: VerificationToken, + ) -> Result { + self.copy_tc_to_buf(addr)?; + let (tc, _) = PusTc::from_bytes(&self.psb().pus_buf).unwrap(); + let subservice = tc.subservice(); + let mut partial_error = None; + let time_stamp = self.psb().get_current_timestamp(&mut partial_error); + let user_data = tc.user_data().unwrap(); + if tc.user_data().is_none() { + self.psb + .verification_handler + .borrow_mut() + .start_failure( + token, + FailParams::new(Some(&time_stamp), &tmtc_err::NOT_ENOUGH_APP_DATA, None), + ) + .expect("Sending start failure TM failed"); + return Err(PusPacketHandlingError::NotEnoughAppData( + "Expected at least 8 bytes of app data".into(), + )); + } + if user_data.len() < 8 { + let err = if user_data.len() < 4 { + &hk_err::TARGET_ID_MISSING + } else { + &hk_err::UNIQUE_ID_MISSING + }; + self.psb + .verification_handler + .borrow_mut() + .start_failure(token, FailParams::new(Some(&time_stamp), err, None)) + .expect("Sending start failure TM failed"); + return Err(PusPacketHandlingError::NotEnoughAppData( + "Expected at least 8 bytes of app data".into(), + )); + } + let addressable_id = AddressableId::from_raw_be(user_data).unwrap(); + if !self + .request_handlers + .contains_key(&addressable_id.target_id) + { + self.psb + .verification_handler + .borrow_mut() + .start_failure( + token, + FailParams::new(Some(&time_stamp), &hk_err::UNKNOWN_TARGET_ID, None), + ) + .expect("Sending start failure TM failed"); + let tgt_id = addressable_id.target_id; + return Err(PusPacketHandlingError::NotEnoughAppData(format!( + "Unknown target ID {tgt_id}" + ))); + } + let send_request = |target: TargetId, request: HkRequest| { + let sender = self + .request_handlers + .get(&addressable_id.target_id) + .unwrap(); + sender + .send(RequestWithToken::new(target, Request::Hk(request), token)) + .unwrap_or_else(|_| panic!("Sending HK request {request:?} failed")); + }; + if subservice == hk::Subservice::TcEnableHkGeneration as u8 { + send_request( + addressable_id.target_id, + HkRequest::Enable(addressable_id.unique_id), + ); + } else if subservice == hk::Subservice::TcDisableHkGeneration as u8 { + send_request( + addressable_id.target_id, + HkRequest::Disable(addressable_id.unique_id), + ); + } else if subservice == hk::Subservice::TcGenerateOneShotHk as u8 { + send_request( + addressable_id.target_id, + HkRequest::OneShot(addressable_id.unique_id), + ); + } else if subservice == hk::Subservice::TcModifyHkCollectionInterval as u8 { + if user_data.len() < 12 { + self.psb + .verification_handler + .borrow_mut() + .start_failure( + token, + FailParams::new( + Some(&time_stamp), + &hk_err::COLLECTION_INTERVAL_MISSING, + None, + ), + ) + .expect("Sending start failure TM failed"); + return Err(PusPacketHandlingError::NotEnoughAppData( + "Collection interval missing".into(), + )); + } + send_request( + addressable_id.target_id, + HkRequest::ModifyCollectionInterval( + addressable_id.unique_id, + CollectionIntervalFactor::from_be_bytes(user_data[8..12].try_into().unwrap()), + ), + ); + } + Ok(PusPacketHandlerResult::RequestHandled) + } +} + +pub struct Pus3Wrapper { + pub(crate) pus_3_handler: PusService3HkHandler, +} + +impl Pus3Wrapper { + pub fn handle_next_packet(&mut self) -> bool { + match self.pus_3_handler.handle_next_packet() { + Ok(result) => match result { + PusPacketHandlerResult::RequestHandled => {} + PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { + warn!("PUS 3 partial packet handling success: {e:?}") + } + PusPacketHandlerResult::CustomSubservice(invalid, _) => { + warn!("PUS 3 invalid subservice {invalid}"); + } + PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { + warn!("PUS 3 subservice {subservice} not implemented"); + } + PusPacketHandlerResult::Empty => { + return true; + } + }, + Err(error) => { + error!("PUS packet handling error: {error:?}") + } + } + false + } +} diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index b7dcc11..fbb68ab 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -1,6 +1,4 @@ use crate::tmtc::MpscStoreAndSendError; -use satrs_core::events::EventU32; -use satrs_core::params::Params; use satrs_core::pool::StoreAddr; use satrs_core::pus::verification::{FailParams, StdVerifReporterWithSender}; use satrs_core::pus::AcceptedTc; @@ -8,7 +6,6 @@ use satrs_core::spacepackets::ecss::PusServiceId; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::time::cds::TimeProvider; use satrs_core::spacepackets::time::TimeWriter; -use satrs_core::tmtc::tm_helper::PusTmWithCdsShortHelper; use satrs_example::{tmtc_err, CustomPusServiceId}; use std::sync::mpsc::Sender; @@ -27,28 +24,9 @@ pub struct PusTcMpscRouter { } pub struct PusReceiver { - pub tm_helper: PusTmWithCdsShortHelper, - pub tm_args: PusTmArgs, - pub tc_args: PusTcArgs, - stamp_helper: TimeStampHelper, -} - -pub struct PusTmArgs { - /// All verification reporting is done with this reporter. pub verif_reporter: StdVerifReporterWithSender, -} - -impl PusTmArgs { - fn vr(&mut self) -> &mut StdVerifReporterWithSender { - &mut self.verif_reporter - } -} - -pub struct PusTcArgs { - /// This routes all telecommands to their respective recipients pub pus_router: PusTcMpscRouter, - /// Used to send events from within the TC router - pub event_sender: Sender<(EventU32, Option)>, + stamp_helper: TimeStampHelper, } struct TimeStampHelper { @@ -79,11 +57,10 @@ impl TimeStampHelper { } impl PusReceiver { - pub fn new(apid: u16, tm_arguments: PusTmArgs, tc_arguments: PusTcArgs) -> Self { + pub fn new(verif_reporter: StdVerifReporterWithSender, pus_router: PusTcMpscRouter) -> Self { Self { - tm_helper: PusTmWithCdsShortHelper::new(apid), - tm_args: tm_arguments, - tc_args: tc_arguments, + verif_reporter, + pus_router, stamp_helper: TimeStampHelper::new(), } } @@ -96,11 +73,10 @@ impl PusReceiver { service: u8, pus_tc: &PusTc, ) -> Result<(), MpscStoreAndSendError> { - let init_token = self.tm_args.verif_reporter.add_tc(pus_tc); + let init_token = self.verif_reporter.add_tc(pus_tc); self.stamp_helper.update_from_now(); let accepted_token = self - .tm_args - .vr() + .verif_reporter .acceptance_success(init_token, Some(self.stamp_helper.stamp())) .expect("Acceptance success failure"); let service = PusServiceId::try_from(service); @@ -108,7 +84,6 @@ impl PusReceiver { Ok(standard_service) => match standard_service { PusServiceId::Test => { let res = self - .tc_args .pus_router .test_service_receiver .send((store_addr, accepted_token)); @@ -120,25 +95,21 @@ impl PusReceiver { } } PusServiceId::Housekeeping => self - .tc_args .pus_router .hk_service_receiver .send((store_addr, accepted_token)) .unwrap(), PusServiceId::Event => self - .tc_args .pus_router .event_service_receiver .send((store_addr, accepted_token)) .unwrap(), PusServiceId::Scheduling => self - .tc_args .pus_router .sched_service_receiver .send((store_addr, accepted_token)) .unwrap(), _ => self - .tm_args .verif_reporter .start_failure( accepted_token, @@ -159,8 +130,7 @@ impl PusReceiver { CustomPusServiceId::Health => {} } } else { - self.tm_args - .verif_reporter + self.verif_reporter .start_failure( accepted_token, FailParams::new( @@ -176,299 +146,3 @@ impl PusReceiver { Ok(()) } } -// impl PusServiceProvider for PusReceiver { -// type Error = (); -// -// fn handle_pus_tc_packet( -// &mut self, -// service: u8, -// _header: &SpHeader, -// pus_tc: &PusTc, -// ) -> Result<(), Self::Error> { -// let init_token = self.tm_args.verif_reporter.add_tc(pus_tc); -// self.stamp_helper.update_from_now(); -// let accepted_token = self -// .tm_args -// .vr() -// .acceptance_success(init_token, Some(self.stamp_helper.stamp())) -// .expect("Acceptance success failure"); -// let service = PusServiceId::try_from(service); -// match service { -// Ok(standard_service) => match standard_service { -// PusServiceId::Test => self -// .tc_args -// .pus_router -// .test_service_receiver -// .send_tc(*pus_tc), -// PusServiceId::Housekeeping => { -// self.tc_args.pus_router.hk_service_receiver.send_tc(*pus_tc) -// } //self.handle_hk_request(pus_tc, accepted_token), -// PusServiceId::Event => self -// .tc_args -// .pus_router -// .event_service_receiver -// .send_tc(*pus_tc), //self.handle_event_request(pus_tc, accepted_token), -// PusServiceId::Scheduling => self -// .tc_args -// .pus_router -// .sched_service_receiver -// .send_tc(*pus_tc), //self.handle_scheduled_tc(pus_tc, accepted_token), -// _ => self -// .tm_args -// .verif_reporter -// .start_failure( -// accepted_token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &tmtc_err::PUS_SERVICE_NOT_IMPLEMENTED, -// Some(&[standard_service as u8]), -// ), -// ) -// .expect("Start failure verification failed"), -// }, -// Err(e) => { -// if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) { -// match custom_service { -// CustomPusServiceId::Mode => { -// self.handle_mode_service(pus_tc, accepted_token) -// } -// CustomPusServiceId::Health => {} -// } -// } else { -// self.tm_args -// .verif_reporter -// .start_failure( -// accepted_token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &tmtc_err::INVALID_PUS_SUBSERVICE, -// Some(&[e.number]), -// ), -// ) -// .expect("Start failure verification failed") -// } -// } -// } -// Ok(()) -// } -// } - -// impl PusReceiver { -// -// fn handle_hk_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { -// if pus_tc.user_data().is_none() { -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &tmtc_err::NOT_ENOUGH_APP_DATA, -// None, -// ), -// ) -// .expect("Sending start failure TM failed"); -// return; -// } -// let user_data = pus_tc.user_data().unwrap(); -// if user_data.len() < 8 { -// let err = if user_data.len() < 4 { -// &hk_err::TARGET_ID_MISSING -// } else { -// &hk_err::UNIQUE_ID_MISSING -// }; -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new(Some(self.stamp_helper.stamp()), err, None), -// ) -// .expect("Sending start failure TM failed"); -// return; -// } -// let addressable_id = AddressableId::from_raw_be(user_data).unwrap(); -// if !self -// .tc_args -// .request_map -// .contains_key(&addressable_id.target_id) -// { -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &hk_err::UNKNOWN_TARGET_ID, -// None, -// ), -// ) -// .expect("Sending start failure TM failed"); -// return; -// } -// let send_request = |target: TargetId, request: HkRequest| { -// let sender = self -// .tc_args -// .request_map -// .get(&addressable_id.target_id) -// .unwrap(); -// sender -// .send(RequestWithToken::new( -// target, -// Request::HkRequest(request), -// token, -// )) -// .unwrap_or_else(|_| panic!("Sending HK request {request:?} failed")); -// }; -// if PusPacket::subservice(pus_tc) == hk::Subservice::TcEnableHkGeneration as u8 { -// send_request( -// addressable_id.target_id, -// HkRequest::Enable(addressable_id.unique_id), -// ); -// } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcDisableHkGeneration as u8 { -// send_request( -// addressable_id.target_id, -// HkRequest::Disable(addressable_id.unique_id), -// ); -// } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcGenerateOneShotHk as u8 { -// send_request( -// addressable_id.target_id, -// HkRequest::OneShot(addressable_id.unique_id), -// ); -// } else if PusPacket::subservice(pus_tc) -// == hk::Subservice::TcModifyHkCollectionInterval as u8 -// { -// if user_data.len() < 12 { -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &hk_err::COLLECTION_INTERVAL_MISSING, -// None, -// ), -// ) -// .expect("Sending start failure TM failed"); -// return; -// } -// send_request( -// addressable_id.target_id, -// HkRequest::ModifyCollectionInterval( -// addressable_id.unique_id, -// CollectionIntervalFactor::from_be_bytes(user_data[8..12].try_into().unwrap()), -// ), -// ); -// } -// } -// -// -// fn handle_mode_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { -// let mut app_data_len = 0; -// let app_data = pus_tc.user_data(); -// if app_data.is_some() { -// app_data_len = pus_tc.user_data().unwrap().len(); -// } -// if app_data_len < 4 { -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &tmtc_err::NOT_ENOUGH_APP_DATA, -// Some(format!("expected {} bytes, found {}", 4, app_data_len).as_bytes()), -// ), -// ) -// .expect("Sending start failure TM failed"); -// } -// let app_data = app_data.unwrap(); -// let mut invalid_subservice_handler = || { -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &tmtc_err::INVALID_PUS_SUBSERVICE, -// Some(&[PusPacket::subservice(pus_tc)]), -// ), -// ) -// .expect("Sending start failure TM failed"); -// }; -// let subservice = mode::Subservice::try_from(PusPacket::subservice(pus_tc)); -// if let Ok(subservice) = subservice { -// let forward_mode_request = |target_id, mode_request: ModeRequest| match self -// .tc_args -// .request_map -// .get(&target_id) -// { -// None => warn!("not mode request recipient for target ID {target_id} found"), -// Some(sender_to_recipient) => { -// sender_to_recipient -// .send(RequestWithToken::new( -// target_id, -// Request::ModeRequest(mode_request), -// token, -// )) -// .expect("sending mode request failed"); -// } -// }; -// let mut valid_subservice = true; -// match subservice { -// Subservice::TcSetMode => { -// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); -// let min_len = ModeAndSubmode::raw_len() + 4; -// if app_data_len < min_len { -// self.tm_args -// .verif_reporter -// .start_failure( -// token, -// FailParams::new( -// Some(self.stamp_helper.stamp()), -// &tmtc_err::NOT_ENOUGH_APP_DATA, -// Some( -// format!("expected {min_len} bytes, found {app_data_len}") -// .as_bytes(), -// ), -// ), -// ) -// .expect("Sending start failure TM failed"); -// } -// // Should never fail after size check -// let mode_submode = ModeAndSubmode::from_be_bytes( -// app_data[4..4 + ModeAndSubmode::raw_len()] -// .try_into() -// .unwrap(), -// ) -// .unwrap(); -// forward_mode_request(target_id, ModeRequest::SetMode(mode_submode)); -// } -// Subservice::TcReadMode => { -// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); -// forward_mode_request(target_id, ModeRequest::ReadMode); -// } -// Subservice::TcAnnounceMode => { -// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); -// forward_mode_request(target_id, ModeRequest::AnnounceMode); -// } -// Subservice::TcAnnounceModeRecursive => { -// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); -// forward_mode_request(target_id, ModeRequest::AnnounceModeRecursive); -// } -// _ => { -// warn!("Can not process mode request with subservice {subservice:?}"); -// invalid_subservice_handler(); -// valid_subservice = false; -// } -// } -// if valid_subservice { -// self.tm_args -// .verif_reporter -// .start_success(token, Some(self.stamp_helper.stamp())) -// .expect("sending start success TM failed"); -// } -// } else { -// invalid_subservice_handler(); -// } -// } -// } diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 707dae1..5edae74 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -51,10 +51,12 @@ impl Service17CustomWrapper { .expect("Sending test event failed"); let start_token = psb_mut .verification_handler + .get_mut() .start_success(token, Some(&stamp_buf)) .expect("Error sending start success"); psb_mut .verification_handler + .get_mut() .completion_success(start_token, Some(&stamp_buf)) .expect("Error sending completion success"); } else { @@ -62,6 +64,7 @@ impl Service17CustomWrapper { self.pus17_handler .psb_mut() .verification_handler + .get_mut() .start_failure( token, FailParams::new( diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index b26eb42..3adc4ea 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -1,7 +1,5 @@ use log::info; -use satrs_core::events::EventU32; use satrs_core::hal::host::udp_server::{ReceiveResult, UdpTcServer}; -use satrs_core::params::Params; use std::error::Error; use std::fmt::{Display, Formatter}; use std::net::SocketAddr; @@ -10,7 +8,7 @@ use std::thread; use std::time::Duration; use crate::ccsds::CcsdsReceiver; -use crate::pus::{PusReceiver, PusTcArgs, PusTcMpscRouter, PusTmArgs}; +use crate::pus::{PusReceiver, PusTcMpscRouter}; use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; use satrs_core::pus::verification::StdVerifReporterWithSender; use satrs_core::spacepackets::ecss::{PusPacket, SerializablePusPacket}; @@ -21,12 +19,6 @@ use satrs_core::tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, ReceivesEc pub const PUS_APID: u16 = 0x02; -pub struct OtherArgs { - pub sock_addr: SocketAddr, - pub verif_reporter: StdVerifReporterWithSender, - pub event_sender: Sender<(EventU32, Option)>, -} - pub struct TmArgs { pub tm_store: SharedTmStore, pub tm_sink_sender: Sender, @@ -141,19 +133,13 @@ impl ReceivesCcsdsTc for PusTcSource { } pub fn core_tmtc_task( - args: OtherArgs, + socket_addr: SocketAddr, mut tc_args: TcArgs, tm_args: TmArgs, + verif_reporter: StdVerifReporterWithSender, pus_router: PusTcMpscRouter, ) { - let pus_tm_args = PusTmArgs { - verif_reporter: args.verif_reporter, - }; - let pus_tc_args = PusTcArgs { - pus_router, - event_sender: args.event_sender, - }; - let mut pus_receiver = PusReceiver::new(PUS_APID, pus_tm_args, pus_tc_args); + let mut pus_receiver = PusReceiver::new(verif_reporter, pus_router); let ccsds_receiver = CcsdsReceiver { tc_source: tc_args.tc_source.clone(), @@ -161,7 +147,7 @@ pub fn core_tmtc_task( let ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver)); - let udp_tc_server = UdpTcServer::new(args.sock_addr, 2048, Box::new(ccsds_distributor)) + let udp_tc_server = UdpTcServer::new(socket_addr, 2048, Box::new(ccsds_distributor)) .expect("Creating UDP TMTC server failed"); let mut udp_tmtc_server = UdpTmtcServer { -- 2.43.0 From 28240da002ad635ad515d9120902bbce691115b8 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Thu, 6 Jul 2023 00:58:25 +0200 Subject: [PATCH 24/39] fix tests --- satrs-core/src/hal/host/udp_server.rs | 1 + satrs-core/src/pus/verification.rs | 32 +++++++++++++-------------- satrs-core/src/tmtc/ccsds_distrib.rs | 1 + satrs-core/tests/pus_verification.rs | 11 +-------- 4 files changed, 19 insertions(+), 26 deletions(-) diff --git a/satrs-core/src/hal/host/udp_server.rs b/satrs-core/src/hal/host/udp_server.rs index a83f4f8..8f80eb7 100644 --- a/satrs-core/src/hal/host/udp_server.rs +++ b/satrs-core/src/hal/host/udp_server.rs @@ -18,6 +18,7 @@ use std::vec::Vec; /// /// ``` /// use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}; +/// use spacepackets::ecss::SerializablePusPacket; /// use satrs_core::hal::host::udp_server::UdpTcServer; /// use satrs_core::tmtc::{ReceivesTc, ReceivesTcCore}; /// use spacepackets::SpHeader; diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index b4db15f..c3c8522 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -29,8 +29,8 @@ //! let pool_cfg = PoolCfg::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]); //! let shared_tm_pool: SharedPool = Arc::new(RwLock::new(Box::new(LocalPool::new(pool_cfg.clone())))); //! let (verif_tx, verif_rx) = mpsc::channel(); -//! let sender = MpscVerifSender::new(shared_tm_pool.clone(), verif_tx); -//! let cfg = VerificationReporterCfg::new(TEST_APID, Box::new(SeqCountProviderSimple::default()), Box::new(SeqCountProviderSimple::default()), 1, 2, 8).unwrap(); +//! let sender = MpscVerifSender::new(0, "Test Sender", shared_tm_pool.clone(), verif_tx); +//! let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); //! let mut reporter = VerificationReporterWithSender::new(&cfg , Box::new(sender)); //! //! let mut sph = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); @@ -939,7 +939,7 @@ mod alloc_mod { /// Primary verification handler. It provides an API to send PUS 1 verification telemetry packets /// and verify the various steps of telecommand handling as specified in the PUS standard. - /// It is assumed that the sequence counter and message counters are written in a central + /// It is assumed that the sequence counter and message counters are updated in a central /// TM funnel. This helper will always set those fields to 0. #[derive(Clone)] pub struct VerificationReporter { @@ -1795,7 +1795,7 @@ mod tests { common: CommonTmInfo { subservice: 4, apid: TEST_APID, - msg_counter: 1, + msg_counter: 0, dest_id: 0, time_stamp: EMPTY_STAMP, }, @@ -1874,7 +1874,7 @@ mod tests { common: CommonTmInfo { subservice: 3, apid: TEST_APID, - msg_counter: 1, + msg_counter: 0, dest_id: 0, time_stamp: [0, 1, 0, 1, 0, 1, 0], }, @@ -1887,7 +1887,7 @@ mod tests { common: CommonTmInfo { subservice: 5, apid: TEST_APID, - msg_counter: 2, + msg_counter: 0, dest_id: 0, time_stamp: EMPTY_STAMP, }, @@ -1900,7 +1900,7 @@ mod tests { common: CommonTmInfo { subservice: 5, apid: TEST_APID, - msg_counter: 3, + msg_counter: 0, dest_id: 0, time_stamp: EMPTY_STAMP, }, @@ -1992,7 +1992,7 @@ mod tests { common: CommonTmInfo { subservice: 3, apid: TEST_APID, - msg_counter: 1, + msg_counter: 0, dest_id: 0, time_stamp: [0, 1, 0, 1, 0, 1, 0], }, @@ -2006,7 +2006,7 @@ mod tests { common: CommonTmInfo { subservice: 5, apid: TEST_APID, - msg_counter: 2, + msg_counter: 0, dest_id: 0, time_stamp: EMPTY_STAMP, }, @@ -2020,7 +2020,7 @@ mod tests { common: CommonTmInfo { subservice: 6, apid: TEST_APID, - msg_counter: 3, + msg_counter: 0, dest_id: 0, time_stamp: EMPTY_STAMP, }, @@ -2137,7 +2137,7 @@ mod tests { common: CommonTmInfo { subservice: 3, apid: TEST_APID, - msg_counter: 1, + msg_counter: 0, dest_id: 0, time_stamp: [0, 1, 0, 1, 0, 1, 0], }, @@ -2151,7 +2151,7 @@ mod tests { common: CommonTmInfo { subservice: 8, apid: TEST_APID, - msg_counter: 2, + msg_counter: 0, dest_id: 0, time_stamp: EMPTY_STAMP, }, @@ -2227,7 +2227,7 @@ mod tests { common: CommonTmInfo { subservice: 3, apid: TEST_APID, - msg_counter: 1, + msg_counter: 0, dest_id: 0, time_stamp: [0, 1, 0, 1, 0, 1, 0], }, @@ -2240,7 +2240,7 @@ mod tests { common: CommonTmInfo { subservice: 7, apid: TEST_APID, - msg_counter: 2, + msg_counter: 0, dest_id: 0, time_stamp: EMPTY_STAMP, }, @@ -2336,10 +2336,10 @@ mod tests { assert_eq!(pus_tm.sp_header.seq_count(), 0); } else if packet_idx == 1 { assert_eq!(pus_tm.subservice(), 3); - assert_eq!(pus_tm.sp_header.seq_count(), 1); + assert_eq!(pus_tm.sp_header.seq_count(), 0); } else if packet_idx == 2 { assert_eq!(pus_tm.subservice(), 7); - assert_eq!(pus_tm.sp_header.seq_count(), 2); + assert_eq!(pus_tm.sp_header.seq_count(), 0); } packet_idx += 1; } diff --git a/satrs-core/src/tmtc/ccsds_distrib.rs b/satrs-core/src/tmtc/ccsds_distrib.rs index 6cb4987..eefed54 100644 --- a/satrs-core/src/tmtc/ccsds_distrib.rs +++ b/satrs-core/src/tmtc/ccsds_distrib.rs @@ -21,6 +21,7 @@ //! use satrs_core::tmtc::ccsds_distrib::{CcsdsPacketHandler, CcsdsDistributor}; //! use satrs_core::tmtc::{ReceivesTc, ReceivesTcCore}; //! use spacepackets::{CcsdsPacket, SpHeader}; +//! use spacepackets::ecss::SerializablePusPacket; //! use spacepackets::tc::PusTc; //! //! #[derive (Default)] diff --git a/satrs-core/tests/pus_verification.rs b/satrs-core/tests/pus_verification.rs index 035904b..cd68e98 100644 --- a/satrs-core/tests/pus_verification.rs +++ b/satrs-core/tests/pus_verification.rs @@ -7,7 +7,6 @@ pub mod crossbeam_test { CrossbeamVerifSender, FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender, }; - use satrs_core::seq_count::SeqCountProviderSyncClonable; use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket, SerializablePusPacket}; use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; use spacepackets::tm::PusTm; @@ -32,15 +31,7 @@ pub mod crossbeam_test { // We use a synced sequence count provider here because both verification reporters have the // the same APID. If they had distinct APIDs, the more correct approach would be to have // each reporter have an own sequence count provider. - let cfg = VerificationReporterCfg::new( - TEST_APID, - Box::new(SeqCountProviderSyncClonable::default()), - Box::new(SeqCountProviderSyncClonable::default()), - 1, - 2, - 8, - ) - .unwrap(); + let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); // Shared pool object to store the verification PUS telemetry let pool_cfg = PoolCfg::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]); let shared_tm_pool: SharedPool = -- 2.43.0 From 0d6d85e6ffae6bf6cf8e8c48a5db48b53fc9af44 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Thu, 6 Jul 2023 01:00:26 +0200 Subject: [PATCH 25/39] fix broken link --- satrs-core/src/event_man.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/satrs-core/src/event_man.rs b/satrs-core/src/event_man.rs index ca47c39..c556e99 100644 --- a/satrs-core/src/event_man.rs +++ b/satrs-core/src/event_man.rs @@ -51,7 +51,7 @@ doc = ::embed_doc_image::embed_image!("event_man_arch", "images/event_man_arch.p //! //! # Examples //! -//! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/fsrc-launchpad/src/branch/main/fsrc-core/tests/pus_events.rs) +//! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/pus_events.rs) //! for a concrete example using multi-threading where events are routed to //! different threads. use crate::events::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw}; -- 2.43.0 From 985af06d9431d6b08a3280d320b59746da54abfe Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Thu, 6 Jul 2023 01:14:01 +0200 Subject: [PATCH 26/39] doc --- satrs-core/src/pus/mod.rs | 12 ++++++++++++ satrs-core/src/pus/scheduler_srv.rs | 12 ++++++++++++ satrs-core/src/pus/test.rs | 2 ++ 3 files changed, 26 insertions(+) diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index 9c04181..b8d9ec4 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -1,4 +1,7 @@ //! # PUS support modules +//! +//! This module contains structures to make working with the PUS C standard easier. +//! The satrs-example application contains various usage examples of these components. #[cfg(feature = "alloc")] use downcast_rs::{impl_downcast, Downcast}; #[cfg(feature = "alloc")] @@ -296,6 +299,8 @@ pub mod std_mod { InvalidSubservice(u8), #[error("Not enough application data available: {0}")] NotEnoughAppData(String), + #[error("Invalid application data")] + InvalidAppData(String), #[error("Generic store error: {0}")] StoreError(#[from] StoreError), #[error("Error with the pool RwGuard: {0}")] @@ -318,6 +323,7 @@ pub mod std_mod { VerificationError, } + /// Generic result type for handlers which can process PUS packets. #[derive(Debug, Clone)] pub enum PusPacketHandlerResult { RequestHandled, @@ -333,8 +339,13 @@ pub mod std_mod { } } + /// Generic abstraction for a telecommand being sent around after is has been accepted. + /// The actual telecommand is stored inside a pre-allocated pool structure. pub type AcceptedTc = (StoreAddr, VerificationToken); + /// Base class for handlers which can handle PUS TC packets. Right now, the message queue + /// backend is constrained to [mpsc::channel]s and the verification reporter + /// is constrained to the [StdVerifReporterWithSender]. pub struct PusServiceBase { pub tc_rx: mpsc::Receiver, pub tc_store: SharedPool, @@ -383,6 +394,7 @@ pub mod std_mod { } time_stamp } + pub fn get_current_timestamp_ignore_error(&self) -> [u8; 7] { let mut dummy = None; self.get_current_timestamp(&mut dummy) diff --git a/satrs-core/src/pus/scheduler_srv.rs b/satrs-core/src/pus/scheduler_srv.rs index 250ff8c..87f0830 100644 --- a/satrs-core/src/pus/scheduler_srv.rs +++ b/satrs-core/src/pus/scheduler_srv.rs @@ -10,6 +10,14 @@ use spacepackets::tc::PusTc; use spacepackets::time::cds::TimeProvider; use std::sync::mpsc::{Receiver, Sender}; +/// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service) +/// packets. This handler is constrained to using the [PusScheduler], but is able to process +/// the most important PUS requests for a scheduling service. +/// +/// Please note that this class does not do the regular periodic handling like releasing any +/// telecommands inside the scheduler. The user can retrieve the wrapped scheduler via the +/// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release +/// telecommands when applicable. pub struct PusService11SchedHandler { psb: PusServiceBase, scheduler: PusScheduler, @@ -41,6 +49,10 @@ impl PusService11SchedHandler { pub fn scheduler_mut(&mut self) -> &mut PusScheduler { &mut self.scheduler } + + pub fn scheduler(&self) -> &PusScheduler { + &self.scheduler + } } impl PusServiceHandler for PusService11SchedHandler { diff --git a/satrs-core/src/pus/test.rs b/satrs-core/src/pus/test.rs index bd7c264..99ccfa0 100644 --- a/satrs-core/src/pus/test.rs +++ b/satrs-core/src/pus/test.rs @@ -12,6 +12,8 @@ use spacepackets::SpHeader; use std::format; use std::sync::mpsc::{Receiver, Sender}; +/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets. +/// This handler only processes ping requests and generates a ping reply for them accordingly. pub struct PusService17TestHandler { psb: PusServiceBase, } -- 2.43.0 From 4decb1d0b7ed8a2f3f599122a30dd3b4e37d1900 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Thu, 6 Jul 2023 01:31:12 +0200 Subject: [PATCH 27/39] update spacepackets to released version v0.6.0 --- satrs-core/Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/satrs-core/Cargo.toml b/satrs-core/Cargo.toml index 6424c01..895be4f 100644 --- a/satrs-core/Cargo.toml +++ b/satrs-core/Cargo.toml @@ -61,10 +61,10 @@ default-features = false optional = true [dependencies.spacepackets] -# version = "0.5.4" +version = "0.6" # path = "../spacepackets" -git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" -rev = "4485ed26699d32" +# git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" +# rev = "4485ed26699d32" default-features = false [dev-dependencies] -- 2.43.0 From 42775c0bb7d22397e140b927b1978f798157e192 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Fri, 7 Jul 2023 01:25:23 +0200 Subject: [PATCH 28/39] improve and fix sequence counters --- satrs-core/src/pus/verification.rs | 2 + satrs-core/src/seq_count.rs | 142 +++++++++++++++++++++++------ satrs-example/src/main.rs | 4 +- 3 files changed, 117 insertions(+), 31 deletions(-) diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index c3c8522..7c187f0 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -944,6 +944,8 @@ mod alloc_mod { #[derive(Clone)] pub struct VerificationReporter { source_data_buf: Vec, + //pub seq_count_provider: Option> + //pub msg_count_provider: Option>>, pub reporter: VerificationReporterCore, } diff --git a/satrs-core/src/seq_count.rs b/satrs-core/src/seq_count.rs index 2258d94..af03f68 100644 --- a/satrs-core/src/seq_count.rs +++ b/satrs-core/src/seq_count.rs @@ -2,6 +2,8 @@ use core::cell::Cell; use core::sync::atomic::{AtomicU16, Ordering}; #[cfg(feature = "alloc")] use dyn_clone::DynClone; +use paste::paste; +use spacepackets::MAX_SEQ_COUNT; #[cfg(feature = "std")] pub use stdmod::*; @@ -15,21 +17,11 @@ pub trait SequenceCountProviderCore { fn increment(&self); - // TODO: Maybe remove this? - fn increment_mut(&mut self) { - self.increment(); - } - fn get_and_increment(&self) -> Raw { let val = self.get(); self.increment(); val } - - // TODO: Maybe remove this? - fn get_and_increment_mut(&mut self) -> Raw { - self.get_and_increment() - } } /// Extension trait which allows cloning a sequence count provider after it was turned into @@ -42,36 +34,84 @@ dyn_clone::clone_trait_object!(SequenceCountProvider); impl SequenceCountProvider for T where T: SequenceCountProviderCore + Clone {} #[derive(Default, Clone)] -pub struct SeqCountProviderSimple { - seq_count: Cell, +pub struct SeqCountProviderSimple { + seq_count: Cell, + max_val: T, } -impl SeqCountProviderSimple { +macro_rules! impl_for_primitives { + ($($ty: ident,)+) => { + $( + paste! { + impl SeqCountProviderSimple<$ty> { + pub fn [](max_val: $ty) -> Self { + Self { + seq_count: Cell::new(0), + max_val, + } + } + + pub fn []() -> Self { + Self { + seq_count: Cell::new(0), + max_val: $ty::MAX + } + } + } + + impl SequenceCountProviderCore<$ty> for SeqCountProviderSimple<$ty> { + fn get(&self) -> $ty { + self.seq_count.get() + } + + fn increment(&self) { + self.get_and_increment(); + } + + fn get_and_increment(&self) -> $ty { + let curr_count = self.seq_count.get(); + + if curr_count == self.max_val { + self.seq_count.set(0); + } else { + self.seq_count.set(curr_count + 1); + } + curr_count + } + } + } + )+ + } +} + +impl_for_primitives!(u8, u16, u32, u64,); + +/// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT]. +pub struct CcsdsSimpleSeqCountProvider { + provider: SeqCountProviderSimple, +} + +impl CcsdsSimpleSeqCountProvider { pub fn new() -> Self { Self { - seq_count: Cell::new(0), + provider: SeqCountProviderSimple::new_u16_max_val(MAX_SEQ_COUNT), } } } -impl SequenceCountProviderCore for SeqCountProviderSimple { - fn get(&self) -> u16 { - self.seq_count.get() +impl Default for CcsdsSimpleSeqCountProvider { + fn default() -> Self { + Self::new() } +} - fn increment(&self) { - self.get_and_increment(); - } - - fn get_and_increment(&self) -> u16 { - let curr_count = self.seq_count.get(); - - if curr_count == u16::MAX { - self.seq_count.set(0); - } else { - self.seq_count.set(curr_count + 1); +impl SequenceCountProviderCore for CcsdsSimpleSeqCountProvider { + delegate::delegate! { + to self.provider { + fn get(&self) -> u16; + fn increment(&self); + fn get_and_increment(&self) -> u16; } - curr_count } } @@ -127,3 +167,47 @@ pub mod stdmod { } } } + +#[cfg(test)] +mod tests { + use crate::seq_count::{ + CcsdsSimpleSeqCountProvider, SeqCountProviderSimple, SequenceCountProviderCore, + }; + use spacepackets::MAX_SEQ_COUNT; + + #[test] + fn test_u8_counter() { + let u8_counter = SeqCountProviderSimple::new_u8(); + assert_eq!(u8_counter.get(), 0); + assert_eq!(u8_counter.get_and_increment(), 0); + assert_eq!(u8_counter.get_and_increment(), 1); + assert_eq!(u8_counter.get(), 2); + } + + #[test] + fn test_u8_counter_overflow() { + let u8_counter = SeqCountProviderSimple::new_u8(); + for _ in 0..256 { + u8_counter.increment(); + } + assert_eq!(u8_counter.get(), 0); + } + + #[test] + fn test_ccsds_counter() { + let ccsds_counter = CcsdsSimpleSeqCountProvider::default(); + assert_eq!(ccsds_counter.get(), 0); + assert_eq!(ccsds_counter.get_and_increment(), 0); + assert_eq!(ccsds_counter.get_and_increment(), 1); + assert_eq!(ccsds_counter.get(), 2); + } + + #[test] + fn test_ccsds_counter_overflow() { + let ccsds_counter = CcsdsSimpleSeqCountProvider::default(); + for _ in 0..MAX_SEQ_COUNT + 1 { + ccsds_counter.increment(); + } + assert_eq!(ccsds_counter.get(), 0); + } +} diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 99a475c..743a148 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -36,7 +36,7 @@ use satrs_core::pus::verification::{ MpscVerifSender, VerificationReporterCfg, VerificationReporterWithSender, }; use satrs_core::pus::MpscTmtcInStoreSender; -use satrs_core::seq_count::{SeqCountProviderSimple, SequenceCountProviderCore}; +use satrs_core::seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}; use satrs_core::spacepackets::tm::PusTmZeroCopyWriter; use satrs_core::spacepackets::{ time::cds::TimeProvider, @@ -79,7 +79,7 @@ fn main() { pool: Arc::new(RwLock::new(Box::new(tc_pool))), }; - let seq_count_provider = SeqCountProviderSimple::new(); + let seq_count_provider = CcsdsSimpleSeqCountProvider::new(); let mut msg_counter_map: HashMap = HashMap::new(); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let (tc_source_tx, tc_source_rx) = channel(); -- 2.43.0 From b8c338c91c8cb936bee15a18b1d0d4f63db3e512 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Fri, 7 Jul 2023 19:49:11 +0200 Subject: [PATCH 29/39] these counter types are more useful --- satrs-core/src/seq_count.rs | 122 ++++++++++++++++++++++-------------- 1 file changed, 74 insertions(+), 48 deletions(-) diff --git a/satrs-core/src/seq_count.rs b/satrs-core/src/seq_count.rs index af03f68..5658281 100644 --- a/satrs-core/src/seq_count.rs +++ b/satrs-core/src/seq_count.rs @@ -1,5 +1,4 @@ use core::cell::Cell; -use core::sync::atomic::{AtomicU16, Ordering}; #[cfg(feature = "alloc")] use dyn_clone::DynClone; use paste::paste; @@ -115,63 +114,63 @@ impl SequenceCountProviderCore for CcsdsSimpleSeqCountProvider { } } -pub struct SeqCountProviderAtomicRef { - atomic: AtomicU16, - ordering: Ordering, -} - -impl SeqCountProviderAtomicRef { - pub const fn new(ordering: Ordering) -> Self { - Self { - atomic: AtomicU16::new(0), - ordering, - } - } -} - -impl SequenceCountProviderCore for SeqCountProviderAtomicRef { - fn get(&self) -> u16 { - self.atomic.load(self.ordering) - } - - fn increment(&self) { - self.atomic.fetch_add(1, self.ordering); - } - - fn get_and_increment(&self) -> u16 { - self.atomic.fetch_add(1, self.ordering) - } -} - #[cfg(feature = "std")] pub mod stdmod { use super::*; - use std::sync::Arc; + use std::sync::{Arc, Mutex}; - #[derive(Clone, Default)] - pub struct SeqCountProviderSyncClonable { - seq_count: Arc, - } - - impl SequenceCountProviderCore for SeqCountProviderSyncClonable { - fn get(&self) -> u16 { - self.seq_count.load(Ordering::SeqCst) - } - - fn increment(&self) { - self.seq_count.fetch_add(1, Ordering::SeqCst); - } - - fn get_and_increment(&self) -> u16 { - self.seq_count.fetch_add(1, Ordering::SeqCst) - } + macro_rules! sync_clonable_seq_counter_impl { + ($($ty: ident,)+) => { + $(paste! { + #[derive(Clone, Default)] + pub struct [] { + seq_count: Arc>, + max_val: $ty + } + + impl [] { + pub fn new() -> Self { + Self::new_with_max_val($ty::MAX) + } + + pub fn new_with_max_val(max_val: $ty) -> Self { + Self { + seq_count: Arc::default(), + max_val + } + } + } + impl SequenceCountProviderCore<$ty> for [] { + fn get(&self) -> $ty { + *self.seq_count.lock().unwrap() + } + + fn increment(&self) { + self.get_and_increment(); + } + + fn get_and_increment(&self) -> $ty { + let mut counter = self.seq_count.lock().unwrap(); + let current_val = *counter; + if *counter == self.max_val { + *counter = 0; + } else { + *counter += 1; + } + current_val + } + } + })+ + } } + sync_clonable_seq_counter_impl!(u8, u16, u32, u64,); } #[cfg(test)] mod tests { use crate::seq_count::{ - CcsdsSimpleSeqCountProvider, SeqCountProviderSimple, SequenceCountProviderCore, + CcsdsSimpleSeqCountProvider, SeqCountProviderSimple, SeqCountProviderSyncU8, + SequenceCountProviderCore, }; use spacepackets::MAX_SEQ_COUNT; @@ -210,4 +209,31 @@ mod tests { } assert_eq!(ccsds_counter.get(), 0); } + + #[test] + fn test_atomic_ref_counters() { + let sync_u8_counter = SeqCountProviderSyncU8::new(); + assert_eq!(sync_u8_counter.get(), 0); + assert_eq!(sync_u8_counter.get_and_increment(), 0); + assert_eq!(sync_u8_counter.get_and_increment(), 1); + assert_eq!(sync_u8_counter.get(), 2); + } + + #[test] + fn test_atomic_ref_counters_overflow() { + let sync_u8_counter = SeqCountProviderSyncU8::new(); + for _ in 0..u8::MAX as u16 + 1 { + sync_u8_counter.increment(); + } + assert_eq!(sync_u8_counter.get(), 0); + } + + #[test] + fn test_atomic_ref_counters_overflow_custom_max_val() { + let sync_u8_counter = SeqCountProviderSyncU8::new_with_max_val(128); + for _ in 0..129 { + sync_u8_counter.increment(); + } + assert_eq!(sync_u8_counter.get(), 0); + } } -- 2.43.0 From fd31adc19ecad64d64557604d31f29b2c7f5c71e Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Fri, 7 Jul 2023 20:04:33 +0200 Subject: [PATCH 30/39] cleaner impl, tests --- satrs-core/src/seq_count.rs | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/satrs-core/src/seq_count.rs b/satrs-core/src/seq_count.rs index 5658281..c60c8f7 100644 --- a/satrs-core/src/seq_count.rs +++ b/satrs-core/src/seq_count.rs @@ -122,6 +122,10 @@ pub mod stdmod { macro_rules! sync_clonable_seq_counter_impl { ($($ty: ident,)+) => { $(paste! { + /// These sequence counters can be shared between threads and can also be + /// configured to wrap around at specified maximum values. Please note that + /// that the API provided by this class will not panic und [Mutex] lock errors, + /// but it will yield 0 for the getter functions. #[derive(Clone, Default)] pub struct [] { seq_count: Arc>, @@ -142,7 +146,10 @@ pub mod stdmod { } impl SequenceCountProviderCore<$ty> for [] { fn get(&self) -> $ty { - *self.seq_count.lock().unwrap() + match self.seq_count.lock() { + Ok(counter) => *counter, + Err(_) => 0 + } } fn increment(&self) { @@ -150,14 +157,18 @@ pub mod stdmod { } fn get_and_increment(&self) -> $ty { - let mut counter = self.seq_count.lock().unwrap(); - let current_val = *counter; - if *counter == self.max_val { - *counter = 0; - } else { - *counter += 1; + match self.seq_count.lock() { + Ok(mut counter) => { + let val = *counter; + if val == self.max_val { + *counter = 0; + } else { + *counter += 1; + } + val + } + Err(_) => 0, } - current_val } } })+ -- 2.43.0 From dd91b7d815a885997f14673cffa68657d6bf7a58 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Fri, 7 Jul 2023 20:11:07 +0200 Subject: [PATCH 31/39] add back sequence count providers but optional --- satrs-core/src/pus/verification.rs | 103 ++++++++++++++++++++++++----- 1 file changed, 85 insertions(+), 18 deletions(-) diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index 7c187f0..7d6b136 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -906,6 +906,7 @@ impl VerificationReporterCore { mod alloc_mod { use super::*; use crate::pus::alloc_mod::EcssTmSender; + use crate::seq_count::SequenceCountProvider; use alloc::boxed::Box; use alloc::vec; use alloc::vec::Vec; @@ -944,8 +945,8 @@ mod alloc_mod { #[derive(Clone)] pub struct VerificationReporter { source_data_buf: Vec, - //pub seq_count_provider: Option> - //pub msg_count_provider: Option>>, + pub seq_count_provider: Option + Send>>, + pub msg_count_provider: Option + Send>>, pub reporter: VerificationReporterCore, } @@ -960,6 +961,8 @@ mod alloc_mod { + cfg.fail_code_field_width + cfg.max_fail_data_len ], + seq_count_provider: None, + msg_count_provider: None, reporter, } } @@ -989,11 +992,19 @@ mod alloc_mod { VerificationToken, VerificationOrSendErrorWithToken, > { + let seq_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); + let msg_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); let sendable = self.reporter.acceptance_success( self.source_data_buf.as_mut_slice(), token, - 0, - 0, + seq_count, + msg_count, time_stamp, )?; self.reporter.send_acceptance_success(sendable, sender) @@ -1006,11 +1017,19 @@ mod alloc_mod { sender: &mut (impl EcssTmSenderCore + ?Sized), params: FailParams, ) -> Result<(), VerificationOrSendErrorWithToken> { + let seq_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); + let msg_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); let sendable = self.reporter.acceptance_failure( self.source_data_buf.as_mut_slice(), token, - 0, - 0, + seq_count, + msg_count, params, )?; self.reporter.send_acceptance_failure(sendable, sender) @@ -1028,11 +1047,19 @@ mod alloc_mod { VerificationToken, VerificationOrSendErrorWithToken, > { + let seq_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); + let msg_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); let sendable = self.reporter.start_success( self.source_data_buf.as_mut_slice(), token, - 0, - 0, + seq_count, + msg_count, time_stamp, )?; self.reporter.send_start_success(sendable, sender) @@ -1048,11 +1075,19 @@ mod alloc_mod { sender: &mut (impl EcssTmSenderCore + ?Sized), params: FailParams, ) -> Result<(), VerificationOrSendErrorWithToken> { + let seq_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); + let msg_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); let sendable = self.reporter.start_failure( self.source_data_buf.as_mut_slice(), token, - 0, - 0, + seq_count, + msg_count, params, )?; self.reporter.send_start_failure(sendable, sender) @@ -1068,11 +1103,19 @@ mod alloc_mod { time_stamp: Option<&[u8]>, step: impl EcssEnumeration, ) -> Result<(), EcssTmtcErrorWithSend> { + let seq_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); + let msg_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); let sendable = self.reporter.step_success( self.source_data_buf.as_mut_slice(), token, - 0, - 0, + seq_count, + msg_count, time_stamp, step, )?; @@ -1091,11 +1134,19 @@ mod alloc_mod { sender: &mut (impl EcssTmSenderCore + ?Sized), params: FailParamsWithStep, ) -> Result<(), VerificationOrSendErrorWithToken> { + let seq_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); + let msg_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); let sendable = self.reporter.step_failure( self.source_data_buf.as_mut_slice(), token, - 0, - 0, + seq_count, + msg_count, params, )?; self.reporter @@ -1112,11 +1163,19 @@ mod alloc_mod { sender: &mut (impl EcssTmSenderCore + ?Sized), time_stamp: Option<&[u8]>, ) -> Result<(), VerificationOrSendErrorWithToken> { + let seq_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); + let msg_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); let sendable = self.reporter.completion_success( self.source_data_buf.as_mut_slice(), token, - 0, - 0, + seq_count, + msg_count, time_stamp, )?; self.reporter @@ -1133,11 +1192,19 @@ mod alloc_mod { sender: &mut (impl EcssTmSenderCore + ?Sized), params: FailParams, ) -> Result<(), VerificationOrSendErrorWithToken> { + let seq_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); + let msg_count = self + .seq_count_provider + .as_ref() + .map_or(0, |v| v.get_and_increment()); let sendable = self.reporter.completion_failure( self.source_data_buf.as_mut_slice(), token, - 0, - 0, + seq_count, + msg_count, params, )?; self.reporter -- 2.43.0 From 0537956febb004c472470ee8d1adf2cf32868b0e Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Fri, 7 Jul 2023 20:31:38 +0200 Subject: [PATCH 32/39] add first unittest for PUS service handler --- satrs-core/src/pus/test.rs | 67 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/satrs-core/src/pus/test.rs b/satrs-core/src/pus/test.rs index 99ccfa0..4308791 100644 --- a/satrs-core/src/pus/test.rs +++ b/satrs-core/src/pus/test.rs @@ -110,3 +110,70 @@ impl PusServiceHandler for PusService17TestHandler { )) } } + +#[cfg(test)] +mod tests { + use crate::pool::{LocalPool, PoolCfg, SharedPool}; + use crate::pus::test::PusService17TestHandler; + use crate::pus::verification::{ + MpscVerifSender, StdVerifReporterWithSender, VerificationReporterCfg, + }; + use crate::pus::PusServiceHandler; + use crate::tmtc::tm_helper::SharedTmStore; + use spacepackets::ecss::SerializablePusPacket; + use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; + use spacepackets::{SequenceFlags, SpHeader}; + use std::boxed::Box; + use std::sync::{mpsc, RwLock}; + use std::vec; + + const TEST_APID: u16 = 0x101; + + #[test] + fn test_basic() { + let mut pus_buf: [u8; 64] = [0; 64]; + let pool_cfg = PoolCfg::new(vec![(16, 16), (8, 32), (4, 64)]); + let tc_pool = LocalPool::new(pool_cfg.clone()); + let tm_pool = LocalPool::new(pool_cfg); + let tc_pool_shared = SharedPool::new(RwLock::new(Box::new(tc_pool))); + let tm_pool_shared = SharedPool::new(RwLock::new(Box::new(tm_pool))); + let shared_tm_store = SharedTmStore::new(tm_pool_shared); + let (test_srv_tx, test_srv_rx) = mpsc::channel(); + let (tm_tx, tm_rx) = mpsc::channel(); + let verif_sender = MpscVerifSender::new( + 0, + "verif_sender", + shared_tm_store.backing_pool(), + tm_tx.clone(), + ); + let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); + let mut verification_handler = + StdVerifReporterWithSender::new(&verif_cfg, Box::new(verif_sender)); + let mut pus_17_handler = PusService17TestHandler::new( + test_srv_rx, + tc_pool_shared.clone(), + tm_tx, + shared_tm_store, + TEST_APID, + verification_handler.clone(), + ); + // Create a ping TC, verify acceptance. + let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); + let sec_header = PusTcSecondaryHeader::new_simple(17, 1); + let ping_tc = PusTc::new(&mut sp_header, sec_header, None, true); + let token = verification_handler.add_tc(&ping_tc); + let token = verification_handler + .acceptance_success(token, None) + .unwrap(); + let tc_size = ping_tc.write_to_bytes(&mut pus_buf).unwrap(); + let mut tc_pool = tc_pool_shared.write().unwrap(); + let addr = tc_pool.add(&pus_buf[..tc_size]).unwrap(); + drop(tc_pool); + // Send accepted TC to test service handler. + test_srv_tx.send((addr, token)).unwrap(); + let result = pus_17_handler.handle_next_packet(); + assert!(result.is_ok()); + // We should see 4 replies in the TM queue now: Acceptance TM, Start TM, ping reply and + // Completion TM + } +} -- 2.43.0 From ea44093ce76adc21d2aac2d94bd9e950dd1d967d Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Fri, 7 Jul 2023 21:14:14 +0200 Subject: [PATCH 33/39] first basic test for PUS test service done --- satrs-core/src/pus/test.rs | 54 +++++++++++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/satrs-core/src/pus/test.rs b/satrs-core/src/pus/test.rs index 4308791..1c8a184 100644 --- a/satrs-core/src/pus/test.rs +++ b/satrs-core/src/pus/test.rs @@ -116,12 +116,13 @@ mod tests { use crate::pool::{LocalPool, PoolCfg, SharedPool}; use crate::pus::test::PusService17TestHandler; use crate::pus::verification::{ - MpscVerifSender, StdVerifReporterWithSender, VerificationReporterCfg, + MpscVerifSender, RequestId, StdVerifReporterWithSender, VerificationReporterCfg, }; use crate::pus::PusServiceHandler; use crate::tmtc::tm_helper::SharedTmStore; - use spacepackets::ecss::SerializablePusPacket; + use spacepackets::ecss::{PusPacket, SerializablePusPacket}; use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; + use spacepackets::tm::PusTm; use spacepackets::{SequenceFlags, SpHeader}; use std::boxed::Box; use std::sync::{mpsc, RwLock}; @@ -130,14 +131,14 @@ mod tests { const TEST_APID: u16 = 0x101; #[test] - fn test_basic() { + fn test_basic_ping_processing() { let mut pus_buf: [u8; 64] = [0; 64]; let pool_cfg = PoolCfg::new(vec![(16, 16), (8, 32), (4, 64)]); let tc_pool = LocalPool::new(pool_cfg.clone()); let tm_pool = LocalPool::new(pool_cfg); let tc_pool_shared = SharedPool::new(RwLock::new(Box::new(tc_pool))); let tm_pool_shared = SharedPool::new(RwLock::new(Box::new(tm_pool))); - let shared_tm_store = SharedTmStore::new(tm_pool_shared); + let shared_tm_store = SharedTmStore::new(tm_pool_shared.clone()); let (test_srv_tx, test_srv_rx) = mpsc::channel(); let (tm_tx, tm_rx) = mpsc::channel(); let verif_sender = MpscVerifSender::new( @@ -175,5 +176,50 @@ mod tests { assert!(result.is_ok()); // We should see 4 replies in the TM queue now: Acceptance TM, Start TM, ping reply and // Completion TM + let mut next_msg = tm_rx.try_recv(); + assert!(next_msg.is_ok()); + let mut tm_addr = next_msg.unwrap(); + let tm_pool = tm_pool_shared.read().unwrap(); + let tm_raw = tm_pool.read(&tm_addr).unwrap(); + let (tm, _) = PusTm::from_bytes(&tm_raw, 0).unwrap(); + assert_eq!(tm.service(), 1); + assert_eq!(tm.subservice(), 1); + let req_id = RequestId::from_bytes(tm.user_data().unwrap()).unwrap(); + assert_eq!(req_id, token.req_id()); + + // Acceptance TM + next_msg = tm_rx.try_recv(); + assert!(next_msg.is_ok()); + tm_addr = next_msg.unwrap(); + let tm_raw = tm_pool.read(&tm_addr).unwrap(); + // Is generated with CDS short timestamp. + let (tm, _) = PusTm::from_bytes(&tm_raw, 7).unwrap(); + assert_eq!(tm.service(), 1); + assert_eq!(tm.subservice(), 3); + let req_id = RequestId::from_bytes(tm.user_data().unwrap()).unwrap(); + assert_eq!(req_id, token.req_id()); + + // Ping reply + next_msg = tm_rx.try_recv(); + assert!(next_msg.is_ok()); + tm_addr = next_msg.unwrap(); + let tm_raw = tm_pool.read(&tm_addr).unwrap(); + // Is generated with CDS short timestamp. + let (tm, _) = PusTm::from_bytes(&tm_raw, 7).unwrap(); + assert_eq!(tm.service(), 17); + assert_eq!(tm.subservice(), 2); + assert!(tm.user_data().is_none()); + + // TM completion + next_msg = tm_rx.try_recv(); + assert!(next_msg.is_ok()); + tm_addr = next_msg.unwrap(); + let tm_raw = tm_pool.read(&tm_addr).unwrap(); + // Is generated with CDS short timestamp. + let (tm, _) = PusTm::from_bytes(&tm_raw, 7).unwrap(); + assert_eq!(tm.service(), 1); + assert_eq!(tm.subservice(), 7); + let req_id = RequestId::from_bytes(tm.user_data().unwrap()).unwrap(); + assert_eq!(req_id, token.req_id()); } } -- 2.43.0 From c9989bf7aa872b181d841830798ba23aad780102 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Sat, 8 Jul 2023 13:20:08 +0200 Subject: [PATCH 34/39] improve error handling --- satrs-example/Cargo.toml | 1 + satrs-example/src/pus/mod.rs | 39 ++++++++++++----------------- satrs-example/src/tmtc.rs | 48 +++++++----------------------------- 3 files changed, 26 insertions(+), 62 deletions(-) diff --git a/satrs-example/Cargo.toml b/satrs-example/Cargo.toml index a5c559b..34d8491 100644 --- a/satrs-example/Cargo.toml +++ b/satrs-example/Cargo.toml @@ -14,6 +14,7 @@ delegate = "0.10" zerocopy = "0.6" csv = "1" num_enum = "0.6" +thiserror = "1" [dependencies.satrs-core] path = "../satrs-core" diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index fbb68ab..0a879f8 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -1,7 +1,8 @@ use crate::tmtc::MpscStoreAndSendError; +use log::warn; use satrs_core::pool::StoreAddr; use satrs_core::pus::verification::{FailParams, StdVerifReporterWithSender}; -use satrs_core::pus::AcceptedTc; +use satrs_core::pus::{AcceptedTc, PusPacketHandlerResult}; use satrs_core::spacepackets::ecss::PusServiceId; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::time::cds::TimeProvider; @@ -72,7 +73,7 @@ impl PusReceiver { store_addr: StoreAddr, service: u8, pus_tc: &PusTc, - ) -> Result<(), MpscStoreAndSendError> { + ) -> Result { let init_token = self.verif_reporter.add_tc(pus_tc); self.stamp_helper.update_from_now(); let accepted_token = self @@ -83,43 +84,35 @@ impl PusReceiver { match service { Ok(standard_service) => match standard_service { PusServiceId::Test => { - let res = self - .pus_router + self.pus_router .test_service_receiver - .send((store_addr, accepted_token)); - match res { - Ok(_) => {} - Err(e) => { - println!("Error {e}") - } - } + .send((store_addr, accepted_token))?; } PusServiceId::Housekeeping => self .pus_router .hk_service_receiver - .send((store_addr, accepted_token)) - .unwrap(), + .send((store_addr, accepted_token))?, PusServiceId::Event => self .pus_router .event_service_receiver - .send((store_addr, accepted_token)) - .unwrap(), + .send((store_addr, accepted_token))?, PusServiceId::Scheduling => self .pus_router .sched_service_receiver - .send((store_addr, accepted_token)) - .unwrap(), - _ => self - .verif_reporter - .start_failure( + .send((store_addr, accepted_token))?, + _ => { + let result = self.verif_reporter.start_failure( accepted_token, FailParams::new( Some(self.stamp_helper.stamp()), &tmtc_err::PUS_SERVICE_NOT_IMPLEMENTED, Some(&[standard_service as u8]), ), - ) - .expect("Start failure verification failed"), + ); + if result.is_err() { + warn!("Sending verification failure failed"); + } + } }, Err(e) => { if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) { @@ -143,6 +136,6 @@ impl PusReceiver { } } } - Ok(()) + Ok(PusPacketHandlerResult::RequestHandled) } } diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 3adc4ea..8400970 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -1,16 +1,16 @@ use log::info; use satrs_core::hal::host::udp_server::{ReceiveResult, UdpTcServer}; -use std::error::Error; -use std::fmt::{Display, Formatter}; use std::net::SocketAddr; use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError}; use std::thread; use std::time::Duration; +use thiserror::Error; use crate::ccsds::CcsdsReceiver; use crate::pus::{PusReceiver, PusTcMpscRouter}; use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; use satrs_core::pus::verification::StdVerifReporterWithSender; +use satrs_core::pus::AcceptedTc; use satrs_core::spacepackets::ecss::{PusPacket, SerializablePusPacket}; use satrs_core::spacepackets::tc::PusTc; use satrs_core::spacepackets::SpHeader; @@ -37,44 +37,14 @@ impl TcArgs { } } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Error)] pub enum MpscStoreAndSendError { - StoreError(StoreError), - SendError(SendError), -} - -impl Display for MpscStoreAndSendError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - MpscStoreAndSendError::StoreError(s) => { - write!(f, "store error {s}") - } - MpscStoreAndSendError::SendError(s) => { - write!(f, "send error {s}") - } - } - } -} - -impl Error for MpscStoreAndSendError { - fn source(&self) -> Option<&(dyn Error + 'static)> { - match self { - MpscStoreAndSendError::StoreError(s) => Some(s), - MpscStoreAndSendError::SendError(s) => Some(s), - } - } -} - -impl From for MpscStoreAndSendError { - fn from(value: StoreError) -> Self { - Self::StoreError(value) - } -} - -impl From> for MpscStoreAndSendError { - fn from(value: SendError) -> Self { - Self::SendError(value) - } + #[error("Store error: {0}")] + Store(#[from] StoreError), + #[error("TC send error: {0}")] + TcSend(#[from] SendError), + #[error("TMTC send error: {0}")] + TmTcSend(#[from] SendError), } #[derive(Clone)] -- 2.43.0 From 4613aaaf2ccbd91d1a3ac780a8413f6690bc7606 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Sat, 8 Jul 2023 13:24:43 +0200 Subject: [PATCH 35/39] use logger --- satrs-example/src/tmtc.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 8400970..62df0ca 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -1,4 +1,4 @@ -use log::info; +use log::{info, warn}; use satrs_core::hal::host::udp_server::{ReceiveResult, UdpTcServer}; use std::net::SocketAddr; use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError}; @@ -118,7 +118,7 @@ pub fn core_tmtc_task( let ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver)); let udp_tc_server = UdpTcServer::new(socket_addr, 2048, Box::new(ccsds_distributor)) - .expect("Creating UDP TMTC server failed"); + .expect("creating UDP TMTC server failed"); let mut udp_tmtc_server = UdpTmtcServer { udp_tc_server, @@ -163,14 +163,14 @@ fn core_tmtc_loop( .ok(); } Err(e) => { - println!("error creating PUS TC from raw data: {e}"); - println!("raw data: {tc_buf:x?}"); + warn!("error creating PUS TC from raw data: {e}"); + warn!("raw data: {tc_buf:x?}"); } } } Err(e) => { if let TryRecvError::Disconnected = e { - println!("tmtc thread: sender disconnected") + warn!("tmtc thread: sender disconnected") } } } @@ -185,16 +185,16 @@ fn poll_tc_server(udp_tmtc_server: &mut UdpTmtcServer) -> bool { Err(e) => match e { ReceiveResult::ReceiverError(e) => match e { CcsdsError::ByteConversionError(e) => { - println!("Got packet error: {e:?}"); + warn!("packet error: {e:?}"); true } - CcsdsError::CustomError(_) => { - println!("Unknown receiver error"); + CcsdsError::CustomError(e) => { + warn!("mpsc store and send error {e:?}"); true } }, ReceiveResult::IoError(e) => { - println!("IO error {e}"); + warn!("IO error {e}"); false } ReceiveResult::NothingReceived => false, -- 2.43.0 From c485fbd21dced328b8eadfd3b51efc6a48dc38c3 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Sat, 8 Jul 2023 13:26:49 +0200 Subject: [PATCH 36/39] get rid of some more panics --- satrs-example/src/tmtc.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 62df0ca..6a4fb2f 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -217,10 +217,9 @@ fn core_tm_handling(udp_tmtc_server: &mut UdpTmtcServer, recv_addr: &SocketAddr) } else { info!("Sending PUS TM"); } - udp_tmtc_server - .udp_tc_server - .socket - .send_to(buf, recv_addr) - .expect("sending TM failed"); + let result = udp_tmtc_server.udp_tc_server.socket.send_to(buf, recv_addr); + if let Err(e) = result { + warn!("Sending TM with UDP socket failed: {e}") + } } } -- 2.43.0 From 0b63fa6a230de95fba1df81be146e3e5333a0bd1 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Sat, 8 Jul 2023 13:28:53 +0200 Subject: [PATCH 37/39] replace some more expects --- satrs-example/src/tmtc.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 6a4fb2f..ddc7f46 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -204,12 +204,21 @@ fn poll_tc_server(udp_tmtc_server: &mut UdpTmtcServer) -> bool { fn core_tm_handling(udp_tmtc_server: &mut UdpTmtcServer, recv_addr: &SocketAddr) { while let Ok(addr) = udp_tmtc_server.tm_rx.try_recv() { - let mut store_lock = udp_tmtc_server + let store_lock = udp_tmtc_server .tm_store - .write() - .expect("locking TM store failed"); + .write(); + if store_lock.is_err() { + warn!("Locking TM store failed"); + continue; + } + let mut store_lock = store_lock.unwrap(); let pg = store_lock.read_with_guard(addr); - let buf = pg.read().expect("error reading TM pool data"); + let read_res = pg.read(); + if read_res.is_err() { + warn!("Error reading TM pool data"); + continue; + } + let buf = read_res.unwrap(); if buf.len() > 9 { let service = buf[7]; let subservice = buf[8]; -- 2.43.0 From ea6971d9b318f45afa8610ccddd65cc9c0de15ca Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Sat, 8 Jul 2023 13:37:27 +0200 Subject: [PATCH 38/39] improve error enum --- satrs-core/src/pus/event_srv.rs | 2 +- satrs-core/src/pus/mod.rs | 8 ++++---- satrs-core/src/pus/test.rs | 6 +++--- satrs-example/src/tmtc.rs | 4 +--- 4 files changed, 9 insertions(+), 11 deletions(-) diff --git a/satrs-core/src/pus/event_srv.rs b/satrs-core/src/pus/event_srv.rs index 37ccd3b..0ff81ee 100644 --- a/satrs-core/src/pus/event_srv.rs +++ b/satrs-core/src/pus/event_srv.rs @@ -79,7 +79,7 @@ impl PusServiceHandler for PusService5EventHandler { .verification_handler .borrow_mut() .start_success(token, Some(&stamp)) - .map_err(|_| PartialPusHandlingError::VerificationError); + .map_err(|_| PartialPusHandlingError::Verification); let partial_error = start_token.clone().err(); let mut token: TcStateToken = token.into(); if let Ok(start_token) = start_token { diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index b8d9ec4..0ec74be 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -316,11 +316,11 @@ pub mod std_mod { #[derive(Debug, Clone, Error)] pub enum PartialPusHandlingError { #[error("Generic timestamp generation error")] - TimeError(StdTimestampError), + Time(StdTimestampError), #[error("Error sending telemetry: {0}")] - TmSendError(String), + TmSend(String), #[error("Error sending verification message")] - VerificationError, + Verification, } /// Generic result type for handlers which can process PUS packets. @@ -386,7 +386,7 @@ pub mod std_mod { ) -> [u8; 7] { let mut time_stamp: [u8; 7] = [0; 7]; let time_provider = - TimeProvider::from_now_with_u16_days().map_err(PartialPusHandlingError::TimeError); + TimeProvider::from_now_with_u16_days().map_err(PartialPusHandlingError::Time); if let Ok(time_provider) = time_provider { time_provider.write_to_bytes(&mut time_stamp).unwrap(); } else { diff --git a/satrs-core/src/pus/test.rs b/satrs-core/src/pus/test.rs index 1c8a184..ef12ab4 100644 --- a/satrs-core/src/pus/test.rs +++ b/satrs-core/src/pus/test.rs @@ -66,7 +66,7 @@ impl PusServiceHandler for PusService17TestHandler { .verification_handler .get_mut() .start_success(token, Some(&time_stamp)) - .map_err(|_| PartialPusHandlingError::VerificationError); + .map_err(|_| PartialPusHandlingError::Verification); let start_token = if let Ok(result) = result { Some(result) } else { @@ -82,7 +82,7 @@ impl PusServiceHandler for PusService17TestHandler { .psb .tm_tx .send(addr) - .map_err(|e| PartialPusHandlingError::TmSendError(format!("{e}"))) + .map_err(|e| PartialPusHandlingError::TmSend(format!("{e}"))) { partial_error = Some(e); } @@ -94,7 +94,7 @@ impl PusServiceHandler for PusService17TestHandler { .completion_success(start_token, Some(&time_stamp)) .is_err() { - partial_error = Some(PartialPusHandlingError::VerificationError) + partial_error = Some(PartialPusHandlingError::Verification) } } if let Some(partial_error) = partial_error { diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index ddc7f46..864b40b 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -204,9 +204,7 @@ fn poll_tc_server(udp_tmtc_server: &mut UdpTmtcServer) -> bool { fn core_tm_handling(udp_tmtc_server: &mut UdpTmtcServer, recv_addr: &SocketAddr) { while let Ok(addr) = udp_tmtc_server.tm_rx.try_recv() { - let store_lock = udp_tmtc_server - .tm_store - .write(); + let store_lock = udp_tmtc_server.tm_store.write(); if store_lock.is_err() { warn!("Locking TM store failed"); continue; -- 2.43.0 From 1f2bd0fd54195a469841abab5e7b631e65e59447 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Sat, 8 Jul 2023 14:57:11 +0200 Subject: [PATCH 39/39] improved and unified TM sending API --- satrs-core/src/pool.rs | 10 + satrs-core/src/pus/event.rs | 50 ++- satrs-core/src/pus/mod.rs | 141 ++++--- satrs-core/src/pus/test.rs | 6 +- satrs-core/src/pus/verification.rs | 531 ++++++++++++++------------- satrs-core/tests/pus_verification.rs | 11 +- satrs-example/src/main.rs | 10 +- 7 files changed, 419 insertions(+), 340 deletions(-) diff --git a/satrs-core/src/pool.rs b/satrs-core/src/pool.rs index afc4f73..d7253d5 100644 --- a/satrs-core/src/pool.rs +++ b/satrs-core/src/pool.rs @@ -138,6 +138,16 @@ pub struct StoreAddr { pub(crate) packet_idx: NumBlocks, } +impl Display for StoreAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!( + f, + "StoreAddr(pool index: {}, packet index: {})", + self.pool_idx, self.packet_idx + ) + } +} + impl StoreAddr { pub const INVALID_ADDR: u32 = 0xFFFFFFFF; diff --git a/satrs-core/src/pus/event.rs b/satrs-core/src/pus/event.rs index 9bea69c..57f378a 100644 --- a/satrs-core/src/pus/event.rs +++ b/satrs-core/src/pus/event.rs @@ -110,7 +110,7 @@ impl EventReporterBase { ) -> Result<(), EcssTmtcErrorWithSend> { let tm = self.generate_generic_event_tm(buf, subservice, time_stamp, event_id, aux_data)?; sender - .send_tm(tm) + .send_tm(tm.into()) .map_err(|e| EcssTmtcErrorWithSend::SendError(e))?; self.msg_count += 1; Ok(()) @@ -243,9 +243,10 @@ mod tests { use super::*; use crate::events::{EventU32, Severity}; use crate::pus::tests::CommonTmInfo; - use crate::pus::EcssSender; + use crate::pus::{EcssSender, PusTmWrapper}; use crate::SenderId; use spacepackets::ByteConversionError; + use std::cell::RefCell; use std::collections::VecDeque; use std::vec::Vec; @@ -264,7 +265,7 @@ mod tests { #[derive(Default, Clone)] struct TestSender { - pub service_queue: VecDeque, + pub service_queue: RefCell>, } impl EcssSender for TestSender { @@ -276,21 +277,29 @@ mod tests { impl EcssTmSenderCore for TestSender { type Error = (); - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { - assert!(tm.source_data().is_some()); - let src_data = tm.source_data().unwrap(); - assert!(src_data.len() >= 4); - let event = EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap())); - let mut aux_data = Vec::new(); - if src_data.len() > 4 { - aux_data.extend_from_slice(&src_data[4..]); + fn send_tm(&self, tm: PusTmWrapper) -> Result<(), Self::Error> { + match tm { + PusTmWrapper::InStore(_) => { + panic!("TestSender: unexpected call with address"); + } + PusTmWrapper::Direct(tm) => { + assert!(tm.source_data().is_some()); + let src_data = tm.source_data().unwrap(); + assert!(src_data.len() >= 4); + let event = + EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap())); + let mut aux_data = Vec::new(); + if src_data.len() > 4 { + aux_data.extend_from_slice(&src_data[4..]); + } + self.service_queue.borrow_mut().push_back(TmInfo { + common: CommonTmInfo::new_from_tm(&tm), + event, + aux_data, + }); + Ok(()) + } } - self.service_queue.push_back(TmInfo { - common: CommonTmInfo::new_from_tm(&tm), - event, - aux_data, - }); - Ok(()) } } @@ -359,8 +368,9 @@ mod tests { severity, error_data, ); - assert_eq!(sender.service_queue.len(), 1); - let tm_info = sender.service_queue.pop_front().unwrap(); + let mut service_queue = sender.service_queue.borrow_mut(); + assert_eq!(service_queue.len(), 1); + let tm_info = service_queue.pop_front().unwrap(); assert_eq!( tm_info.common.subservice, severity_to_subservice(severity) as u8 @@ -417,7 +427,7 @@ mod tests { let err = reporter.event_info(sender, &time_stamp_empty, event, None); assert!(err.is_err()); let err = err.unwrap_err(); - if let EcssTmtcErrorWithSend::EcssTmtcError(EcssTmtcError::ByteConversionError( + if let EcssTmtcErrorWithSend::EcssTmtcError(EcssTmtcError::ByteConversion( ByteConversionError::ToSliceTooSmall(missmatch), )) = err { diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index 0ec74be..cc5f9ce 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -2,6 +2,8 @@ //! //! This module contains structures to make working with the PUS C standard easier. //! The satrs-example application contains various usage examples of these components. +use crate::pus::verification::TcStateToken; +use crate::SenderId; #[cfg(feature = "alloc")] use downcast_rs::{impl_downcast, Downcast}; #[cfg(feature = "alloc")] @@ -26,10 +28,28 @@ pub mod verification; #[cfg(feature = "alloc")] pub use alloc_mod::*; -use crate::SenderId; +use crate::pool::StoreAddr; #[cfg(feature = "std")] pub use std_mod::*; +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum PusTmWrapper<'tm> { + InStore(StoreAddr), + Direct(PusTm<'tm>), +} + +impl From for PusTmWrapper<'_> { + fn from(value: StoreAddr) -> Self { + Self::InStore(value) + } +} + +impl<'tm> From> for PusTmWrapper<'tm> { + fn from(value: PusTm<'tm>) -> Self { + Self::Direct(value) + } +} + #[derive(Debug, Clone)] pub enum EcssTmtcErrorWithSend { /// Errors related to sending the telemetry to a TMTC recipient @@ -47,22 +67,22 @@ impl From for EcssTmtcErrorWithSend { #[derive(Debug, Clone)] pub enum EcssTmtcError { /// Errors related to the time stamp format of the telemetry - TimestampError(TimestampError), + Timestamp(TimestampError), /// Errors related to byte conversion, for example insufficient buffer size for given data - ByteConversionError(ByteConversionError), + ByteConversion(ByteConversionError), /// Errors related to PUS packet format - PusError(PusError), + Pus(PusError), } impl From for EcssTmtcError { fn from(e: PusError) -> Self { - EcssTmtcError::PusError(e) + EcssTmtcError::Pus(e) } } impl From for EcssTmtcError { fn from(e: ByteConversionError) -> Self { - EcssTmtcError::ByteConversionError(e) + EcssTmtcError::ByteConversion(e) } } @@ -79,16 +99,17 @@ pub trait EcssSender: Send { pub trait EcssTmSenderCore: EcssSender { type Error; - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error>; + fn send_tm(&self, tm: PusTmWrapper) -> Result<(), Self::Error>; } /// Generic trait for a user supplied sender object. /// -/// This sender object is responsible for sending PUS telecommands to a TC recipient. +/// This sender object is responsible for sending PUS telecommands to a TC recipient. Each +/// telecommand can optionally have a token which contains its verification state. pub trait EcssTcSenderCore: EcssSender { type Error; - fn send_tc(&mut self, tc: PusTc) -> Result<(), Self::Error>; + fn send_tc(&self, tc: PusTc, token: Option) -> Result<(), Self::Error>; } #[cfg(feature = "alloc")] @@ -142,15 +163,13 @@ pub mod std_mod { use crate::pus::verification::{ StdVerifReporterWithSender, TcStateAccepted, VerificationToken, }; - use crate::pus::{EcssSender, EcssTcSenderCore, EcssTmSenderCore}; + use crate::pus::{EcssSender, EcssTmSenderCore, PusTmWrapper}; use crate::tmtc::tm_helper::SharedTmStore; use crate::SenderId; use alloc::vec::Vec; use spacepackets::ecss::{PusError, SerializablePusPacket}; - use spacepackets::tc::PusTc; use spacepackets::time::cds::TimeProvider; use spacepackets::time::{StdTimestampError, TimeWriter}; - use spacepackets::tm::PusTm; use std::cell::RefCell; use std::format; use std::string::String; @@ -158,21 +177,21 @@ pub mod std_mod { use thiserror::Error; #[derive(Debug, Clone, Error)] - pub enum MpscPusInStoreSendError { + pub enum MpscTmInStoreSenderError { #[error("RwGuard lock error")] - LockError, + StoreLock, #[error("Generic PUS error: {0}")] - PusError(#[from] PusError), + Pus(#[from] PusError), #[error("Generic store error: {0}")] - StoreError(#[from] StoreError), - #[error("Generic send error: {0}")] - SendError(#[from] mpsc::SendError), + Store(#[from] StoreError), + #[error("MPSC channel send error: {0}")] + Send(#[from] mpsc::SendError), #[error("RX handle has disconnected")] - RxDisconnected(StoreAddr), + RxDisconnected, } #[derive(Clone)] - pub struct MpscTmtcInStoreSender { + pub struct MpscTmInStoreSender { id: SenderId, name: &'static str, store_helper: SharedPool, @@ -180,7 +199,7 @@ pub mod std_mod { pub ignore_poison_errors: bool, } - impl EcssSender for MpscTmtcInStoreSender { + impl EcssSender for MpscTmInStoreSender { fn id(&self) -> SenderId { self.id } @@ -190,11 +209,11 @@ pub mod std_mod { } } - impl MpscTmtcInStoreSender { - pub fn send_tmtc( - &mut self, + impl MpscTmInStoreSender { + pub fn send_direct_tm( + &self, tmtc: impl SerializablePusPacket, - ) -> Result<(), MpscPusInStoreSendError> { + ) -> Result<(), MpscTmInStoreSenderError> { let operation = |mut store: RwLockWriteGuard| { let (addr, slice) = store.free_element(tmtc.len_packed())?; tmtc.write_to_bytes(slice)?; @@ -207,30 +226,28 @@ pub mod std_mod { if self.ignore_poison_errors { operation(e.into_inner()) } else { - Err(MpscPusInStoreSendError::LockError) + Err(MpscTmInStoreSenderError::StoreLock) } } } } } - impl EcssTmSenderCore for MpscTmtcInStoreSender { - type Error = MpscPusInStoreSendError; + impl EcssTmSenderCore for MpscTmInStoreSender { + type Error = MpscTmInStoreSenderError; - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { - self.send_tmtc(tm) + fn send_tm(&self, tm: PusTmWrapper) -> Result<(), Self::Error> { + match tm { + PusTmWrapper::InStore(addr) => self + .sender + .send(addr) + .map_err(MpscTmInStoreSenderError::Send), + PusTmWrapper::Direct(tm) => self.send_direct_tm(tm), + } } } - impl EcssTcSenderCore for MpscTmtcInStoreSender { - type Error = MpscPusInStoreSendError; - - fn send_tc(&mut self, tc: PusTc) -> Result<(), Self::Error> { - self.send_tmtc(tc) - } - } - - impl MpscTmtcInStoreSender { + impl MpscTmInStoreSender { pub fn new( id: SenderId, name: &'static str, @@ -247,13 +264,23 @@ pub mod std_mod { } } - #[derive(Debug, Clone)] - pub enum MpscAsVecSenderError { - PusError(PusError), - SendError(mpsc::SendError>), + #[derive(Debug, Clone, Error)] + pub enum MpscTmAsVecSenderError { + #[error("Generic PUS error: {0}")] + Pus(#[from] PusError), + #[error("MPSC channel send error: {0}")] + Send(#[from] mpsc::SendError>), + #[error("can not handle store addresses")] + CantSendAddr(StoreAddr), + #[error("RX handle has disconnected")] + RxDisconnected, } - #[derive(Debug, Clone)] + /// This class can be used if frequent heap allocations during run-time are not an issue. + /// PUS TM packets will be sent around as [Vec]s. Please note that the current implementation + /// of this class can not deal with store addresses, so it is assumed that is is always + /// going to be called with direct packets. + #[derive(Clone)] pub struct MpscTmAsVecSender { id: SenderId, sender: mpsc::Sender>, @@ -276,16 +303,21 @@ pub mod std_mod { } impl EcssTmSenderCore for MpscTmAsVecSender { - type Error = MpscAsVecSenderError; + type Error = MpscTmAsVecSenderError; - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { - let mut vec = Vec::new(); - tm.append_to_vec(&mut vec) - .map_err(MpscAsVecSenderError::PusError)?; - self.sender - .send(vec) - .map_err(MpscAsVecSenderError::SendError)?; - Ok(()) + fn send_tm(&self, tm: PusTmWrapper) -> Result<(), Self::Error> { + match tm { + PusTmWrapper::InStore(addr) => Err(MpscTmAsVecSenderError::CantSendAddr(addr)), + PusTmWrapper::Direct(tm) => { + let mut vec = Vec::new(); + tm.append_to_vec(&mut vec) + .map_err(MpscTmAsVecSenderError::Pus)?; + self.sender + .send(vec) + .map_err(MpscTmAsVecSenderError::Send)?; + Ok(()) + } + } } } @@ -388,6 +420,7 @@ pub mod std_mod { let time_provider = TimeProvider::from_now_with_u16_days().map_err(PartialPusHandlingError::Time); if let Ok(time_provider) = time_provider { + // Can't fail, we have a buffer with the exact required size. time_provider.write_to_bytes(&mut time_stamp).unwrap(); } else { *partial_error = Some(time_provider.unwrap_err()); @@ -439,7 +472,7 @@ pub mod std_mod { pub(crate) fn source_buffer_large_enough(cap: usize, len: usize) -> Result<(), EcssTmtcError> { if len > cap { - return Err(EcssTmtcError::ByteConversionError( + return Err(EcssTmtcError::ByteConversion( ByteConversionError::ToSliceTooSmall(SizeMissmatch { found: cap, expected: len, diff --git a/satrs-core/src/pus/test.rs b/satrs-core/src/pus/test.rs index ef12ab4..0e596fa 100644 --- a/satrs-core/src/pus/test.rs +++ b/satrs-core/src/pus/test.rs @@ -116,9 +116,9 @@ mod tests { use crate::pool::{LocalPool, PoolCfg, SharedPool}; use crate::pus::test::PusService17TestHandler; use crate::pus::verification::{ - MpscVerifSender, RequestId, StdVerifReporterWithSender, VerificationReporterCfg, + RequestId, StdVerifReporterWithSender, VerificationReporterCfg, }; - use crate::pus::PusServiceHandler; + use crate::pus::{MpscTmInStoreSender, PusServiceHandler}; use crate::tmtc::tm_helper::SharedTmStore; use spacepackets::ecss::{PusPacket, SerializablePusPacket}; use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; @@ -141,7 +141,7 @@ mod tests { let shared_tm_store = SharedTmStore::new(tm_pool_shared.clone()); let (test_srv_tx, test_srv_rx) = mpsc::channel(); let (tm_tx, tm_rx) = mpsc::channel(); - let verif_sender = MpscVerifSender::new( + let verif_sender = MpscTmInStoreSender::new( 0, "verif_sender", shared_tm_store.backing_pool(), diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index 7d6b136..445f10b 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -16,8 +16,9 @@ //! use std::sync::{Arc, mpsc, RwLock}; //! use std::time::Duration; //! use satrs_core::pool::{LocalPool, PoolCfg, PoolProvider, SharedPool}; -//! use satrs_core::pus::verification::{MpscVerifSender, VerificationReporterCfg, VerificationReporterWithSender}; +//! use satrs_core::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender}; //! use satrs_core::seq_count::SeqCountProviderSimple; +//! use satrs_core::pus::MpscTmInStoreSender; //! use spacepackets::ecss::PusPacket; //! use spacepackets::SpHeader; //! use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; @@ -29,7 +30,7 @@ //! let pool_cfg = PoolCfg::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]); //! let shared_tm_pool: SharedPool = Arc::new(RwLock::new(Box::new(LocalPool::new(pool_cfg.clone())))); //! let (verif_tx, verif_rx) = mpsc::channel(); -//! let sender = MpscVerifSender::new(0, "Test Sender", shared_tm_pool.clone(), verif_tx); +//! let sender = MpscTmInStoreSender::new(0, "Test Sender", shared_tm_pool.clone(), verif_tx); //! let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); //! let mut reporter = VerificationReporterWithSender::new(&cfg , Box::new(sender)); //! @@ -96,11 +97,8 @@ pub use spacepackets::ecss::verification::*; pub use alloc_mod::{ VerificationReporter, VerificationReporterCfg, VerificationReporterWithSender, }; - -#[cfg(all(feature = "crossbeam", feature = "std"))] -pub use stdmod::CrossbeamVerifSender; #[cfg(feature = "std")] -pub use stdmod::{MpscVerifSender, SharedStdVerifReporterWithSender, StdVerifReporterWithSender}; +pub use std_mod::*; /// This is a request identifier as specified in 5.4.11.2 c. of the PUS standard. /// @@ -523,7 +521,7 @@ impl VerificationReporterCore { ) -> Result, VerificationOrSendErrorWithToken> { sender - .send_tm(sendable.pus_tm.take().unwrap()) + .send_tm(sendable.pus_tm.take().unwrap().into()) .map_err(|e| { VerificationOrSendErrorWithToken( EcssTmtcErrorWithSend::SendError(e), @@ -539,7 +537,7 @@ impl VerificationReporterCore { sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result<(), VerificationOrSendErrorWithToken> { sender - .send_tm(sendable.pus_tm.take().unwrap()) + .send_tm(sendable.pus_tm.take().unwrap().into()) .map_err(|e| { VerificationOrSendErrorWithToken( EcssTmtcErrorWithSend::SendError(e), @@ -606,7 +604,7 @@ impl VerificationReporterCore { VerificationOrSendErrorWithToken, > { sender - .send_tm(sendable.pus_tm.take().unwrap()) + .send_tm(sendable.pus_tm.take().unwrap().into()) .map_err(|e| { VerificationOrSendErrorWithToken( EcssTmtcErrorWithSend::SendError(e), @@ -648,7 +646,7 @@ impl VerificationReporterCore { sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result<(), VerificationOrSendErrorWithToken> { sender - .send_tm(sendable.pus_tm.take().unwrap()) + .send_tm(sendable.pus_tm.take().unwrap().into()) .map_err(|e| { VerificationOrSendErrorWithToken( EcssTmtcErrorWithSend::SendError(e), @@ -771,7 +769,7 @@ impl VerificationReporterCore { sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result<(), VerificationOrSendErrorWithToken> { sender - .send_tm(sendable.pus_tm.take().unwrap()) + .send_tm(sendable.pus_tm.take().unwrap().into()) .map_err(|e| { VerificationOrSendErrorWithToken( EcssTmtcErrorWithSend::SendError(e), @@ -788,7 +786,7 @@ impl VerificationReporterCore { sender: &mut (impl EcssTmSenderCore + ?Sized), ) -> Result<(), VerificationOrSendErrorWithToken> { sender - .send_tm(sendable.pus_tm.take().unwrap()) + .send_tm(sendable.pus_tm.take().unwrap().into()) .map_err(|e| { VerificationOrSendErrorWithToken( EcssTmtcErrorWithSend::SendError(e), @@ -1329,180 +1327,197 @@ mod alloc_mod { } #[cfg(feature = "std")] -mod stdmod { - use super::alloc_mod::VerificationReporterWithSender; - use super::*; - use crate::pool::{ShareablePoolProvider, SharedPool, StoreAddr}; - use crate::pus::{EcssSender, MpscPusInStoreSendError}; - use crate::SenderId; - use delegate::delegate; - use spacepackets::ecss::SerializablePusPacket; - use spacepackets::tm::PusTm; - use std::sync::{mpsc, Arc, Mutex, RwLockWriteGuard}; +mod std_mod { + use crate::pus::verification::VerificationReporterWithSender; + use crate::pus::MpscTmInStoreSenderError; + use std::sync::{Arc, Mutex}; - pub type StdVerifReporterWithSender = VerificationReporterWithSender; + // use super::alloc_mod::VerificationReporterWithSender; + // use super::*; + // use crate::pool::{ShareablePoolProvider, SharedPool, StoreAddr}; + // use crate::pus::{EcssSender, MpscPusInStoreSendError, PusTmWrapper}; + // use crate::SenderId; + // use delegate::delegate; + // use spacepackets::ecss::SerializablePusPacket; + // use std::sync::{mpsc, Arc, Mutex, RwLockWriteGuard}; + // + pub type StdVerifReporterWithSender = VerificationReporterWithSender; pub type SharedStdVerifReporterWithSender = Arc>; - - trait SendBackend: Send { - fn send(&self, addr: StoreAddr) -> Result<(), StoreAddr>; - } - - #[derive(Clone)] - struct StdSenderBase { - id: SenderId, - name: &'static str, - tm_store: SharedPool, - tx: S, - pub ignore_poison_error: bool, - } - - impl StdSenderBase { - pub fn new(id: SenderId, name: &'static str, tm_store: SharedPool, tx: S) -> Self { - Self { - id, - name, - tm_store, - tx, - ignore_poison_error: false, - } - } - } - - unsafe impl Sync for StdSenderBase {} - unsafe impl Send for StdSenderBase {} - - impl SendBackend for mpsc::Sender { - fn send(&self, addr: StoreAddr) -> Result<(), StoreAddr> { - self.send(addr).map_err(|_| addr) - } - } - - #[derive(Clone)] - pub struct MpscVerifSender { - base: StdSenderBase>, - } - - /// Verification sender with a [mpsc::Sender] backend. - /// It implements the [EcssTmSenderCore] trait to be used as PUS Verification TM sender. - impl MpscVerifSender { - pub fn new( - id: SenderId, - name: &'static str, - tm_store: SharedPool, - tx: mpsc::Sender, - ) -> Self { - Self { - base: StdSenderBase::new(id, name, tm_store, tx), - } - } - } - - //noinspection RsTraitImplementation - impl EcssSender for MpscVerifSender { - delegate!( - to self.base { - fn id(&self) -> SenderId; - fn name(&self) -> &'static str; - } - ); - } - - //noinspection RsTraitImplementation - impl EcssTmSenderCore for MpscVerifSender { - type Error = MpscPusInStoreSendError; - - delegate!( - to self.base { - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error>; - } - ); - } - - impl SendBackend for crossbeam_channel::Sender { - fn send(&self, addr: StoreAddr) -> Result<(), StoreAddr> { - self.send(addr).map_err(|_| addr) - } - } - - /// Verification sender with a [crossbeam_channel::Sender] backend. - /// It implements the [EcssTmSenderCore] trait to be used as PUS Verification TM sender - #[cfg(feature = "crossbeam")] - #[derive(Clone)] - pub struct CrossbeamVerifSender { - base: StdSenderBase>, - } - - #[cfg(feature = "crossbeam")] - impl CrossbeamVerifSender { - pub fn new( - id: SenderId, - name: &'static str, - tm_store: SharedPool, - tx: crossbeam_channel::Sender, - ) -> Self { - Self { - base: StdSenderBase::new(id, name, tm_store, tx), - } - } - } - - //noinspection RsTraitImplementation - #[cfg(feature = "crossbeam")] - impl EcssSender for CrossbeamVerifSender { - delegate!( - to self.base { - fn id(&self) -> SenderId; - fn name(&self) -> &'static str; - } - ); - } - - //noinspection RsTraitImplementation - #[cfg(feature = "crossbeam")] - impl EcssTmSenderCore for CrossbeamVerifSender { - type Error = MpscPusInStoreSendError; - - delegate!( - to self.base { - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error>; - } - ); - } - - impl EcssSender for StdSenderBase { - fn id(&self) -> SenderId { - self.id - } - fn name(&self) -> &'static str { - self.name - } - } - impl EcssTmSenderCore for StdSenderBase { - type Error = MpscPusInStoreSendError; - - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { - let operation = |mut mg: RwLockWriteGuard| { - let (addr, buf) = mg.free_element(tm.len_packed())?; - tm.write_to_bytes(buf) - .map_err(MpscPusInStoreSendError::PusError)?; - drop(mg); - self.tx - .send(addr) - .map_err(|_| MpscPusInStoreSendError::RxDisconnected(addr))?; - Ok(()) - }; - match self.tm_store.write() { - Ok(lock) => operation(lock), - Err(poison_error) => { - if self.ignore_poison_error { - operation(poison_error.into_inner()) - } else { - Err(MpscPusInStoreSendError::LockError) - } - } - } - } - } + // + // trait SendBackend: Send { + // type SendError: Debug; + // + // fn send(&self, addr: StoreAddr) -> Result<(), Self::SendError>; + // } + // + // #[derive(Clone)] + // struct StdSenderBase { + // id: SenderId, + // name: &'static str, + // tm_store: SharedPool, + // tx: S, + // pub ignore_poison_error: bool, + // } + // + // impl StdSenderBase { + // pub fn new(id: SenderId, name: &'static str, tm_store: SharedPool, tx: S) -> Self { + // Self { + // id, + // name, + // tm_store, + // tx, + // ignore_poison_error: false, + // } + // } + // } + // + // unsafe impl Sync for StdSenderBase {} + // unsafe impl Send for StdSenderBase {} + // + // impl SendBackend for mpsc::Sender { + // type SendError = mpsc::SendError; + // + // fn send(&self, addr: StoreAddr) -> Result<(), Self::SendError> { + // self.send(addr) + // } + // } + // + // #[derive(Clone)] + // pub struct MpscVerifSender { + // base: StdSenderBase>, + // } + // + // /// Verification sender with a [mpsc::Sender] backend. + // /// It implements the [EcssTmSenderCore] trait to be used as PUS Verification TM sender. + // impl MpscVerifSender { + // pub fn new( + // id: SenderId, + // name: &'static str, + // tm_store: SharedPool, + // tx: mpsc::Sender, + // ) -> Self { + // Self { + // base: StdSenderBase::new(id, name, tm_store, tx), + // } + // } + // } + // + // //noinspection RsTraitImplementation + // impl EcssSender for MpscVerifSender { + // delegate!( + // to self.base { + // fn id(&self) -> SenderId; + // fn name(&self) -> &'static str; + // } + // ); + // } + // + // //noinspection RsTraitImplementation + // impl EcssTmSenderCore for MpscVerifSender { + // type Error = MpscPusInStoreSendError; + // + // delegate!( + // to self.base { + // fn send_tm(&self, tm: PusTmWrapper) -> Result<(), Self::Error>; + // } + // ); + // } + // + // impl SendBackend for crossbeam_channel::Sender { + // type SendError = crossbeam_channel::SendError; + // + // fn send(&self, addr: StoreAddr) -> Result<(), Self::SendError> { + // self.send(addr) + // } + // } + // + // /// Verification sender with a [crossbeam_channel::Sender] backend. + // /// It implements the [EcssTmSenderCore] trait to be used as PUS Verification TM sender + // #[cfg(feature = "crossbeam")] + // #[derive(Clone)] + // pub struct CrossbeamVerifSender { + // base: StdSenderBase>, + // } + // + // #[cfg(feature = "crossbeam")] + // impl CrossbeamVerifSender { + // pub fn new( + // id: SenderId, + // name: &'static str, + // tm_store: SharedPool, + // tx: crossbeam_channel::Sender, + // ) -> Self { + // Self { + // base: StdSenderBase::new(id, name, tm_store, tx), + // } + // } + // } + // + // //noinspection RsTraitImplementation + // #[cfg(feature = "crossbeam")] + // impl EcssSender for CrossbeamVerifSender { + // delegate!( + // to self.base { + // fn id(&self) -> SenderId; + // fn name(&self) -> &'static str; + // } + // ); + // } + // + // //noinspection RsTraitImplementation + // #[cfg(feature = "crossbeam")] + // impl EcssTmSenderCore for CrossbeamVerifSender { + // type Error = MpscPusInStoreSendError; + // + // delegate!( + // to self.base { + // fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error>; + // } + // ); + // } + // + // impl EcssSender for StdSenderBase { + // fn id(&self) -> SenderId { + // self.id + // } + // fn name(&self) -> &'static str { + // self.name + // } + // } + // impl EcssTmSenderCore for StdSenderBase { + // type Error = MpscPusInStoreSendError; + // + // fn send_tm(&self, tm: PusTmWrapper) -> Result<(), Self::Error> { + // match tm { + // PusTmWrapper::InStore(addr) => { + // self.tx.send(addr).unwrap(); + // Ok(()) + // } + // PusTmWrapper::Direct(tm) => { + // let operation = |mut mg: RwLockWriteGuard| { + // let (addr, buf) = mg.free_element(tm.len_packed())?; + // tm.write_to_bytes(buf) + // .map_err(MpscPusInStoreSendError::Pus)?; + // drop(mg); + // self.tx + // .send(addr) + // .map_err(|_| MpscPusInStoreSendError::RxDisconnected(addr))?; + // Ok(()) + // }; + // match self.tm_store.write() { + // Ok(lock) => operation(lock), + // Err(poison_error) => { + // if self.ignore_poison_error { + // operation(poison_error.into_inner()) + // } else { + // Err(MpscPusInStoreSendError::StoreLock) + // } + // } + // } + // } + // } + // } + // } } #[cfg(test)] @@ -1510,11 +1525,11 @@ mod tests { use crate::pool::{LocalPool, PoolCfg, SharedPool}; use crate::pus::tests::CommonTmInfo; use crate::pus::verification::{ - EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, MpscVerifSender, - RequestId, TcStateNone, VerificationReporter, VerificationReporterCfg, - VerificationReporterWithSender, VerificationToken, + EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone, + VerificationReporter, VerificationReporterCfg, VerificationReporterWithSender, + VerificationToken, }; - use crate::pus::{EcssSender, EcssTmtcErrorWithSend}; + use crate::pus::{EcssSender, EcssTmtcErrorWithSend, MpscTmInStoreSender, PusTmWrapper}; use crate::SenderId; use alloc::boxed::Box; use alloc::format; @@ -1523,6 +1538,7 @@ mod tests { use spacepackets::tm::PusTm; use spacepackets::util::UnsignedEnum; use spacepackets::{ByteConversionError, CcsdsPacket, SpHeader}; + use std::cell::RefCell; use std::collections::VecDeque; use std::sync::{mpsc, Arc, RwLock}; use std::time::Duration; @@ -1545,7 +1561,7 @@ mod tests { #[derive(Default, Clone)] struct TestSender { - pub service_queue: VecDeque, + pub service_queue: RefCell>, } impl EcssSender for TestSender { @@ -1560,26 +1576,34 @@ mod tests { impl EcssTmSenderCore for TestSender { type Error = (); - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { - assert_eq!(PusPacket::service(&tm), 1); - assert!(tm.source_data().is_some()); - let mut time_stamp = [0; 7]; - time_stamp.clone_from_slice(&tm.timestamp().unwrap()[0..7]); - let src_data = tm.source_data().unwrap(); - assert!(src_data.len() >= 4); - let req_id = RequestId::from_bytes(&src_data[0..RequestId::SIZE_AS_BYTES]).unwrap(); - let mut vec = None; - if src_data.len() > 4 { - let mut new_vec = Vec::new(); - new_vec.extend_from_slice(&src_data[RequestId::SIZE_AS_BYTES..]); - vec = Some(new_vec); + fn send_tm(&self, tm: PusTmWrapper) -> Result<(), Self::Error> { + match tm { + PusTmWrapper::InStore(_) => { + panic!("TestSender: Can not deal with addresses"); + } + PusTmWrapper::Direct(tm) => { + assert_eq!(PusPacket::service(&tm), 1); + assert!(tm.source_data().is_some()); + let mut time_stamp = [0; 7]; + time_stamp.clone_from_slice(&tm.timestamp().unwrap()[0..7]); + let src_data = tm.source_data().unwrap(); + assert!(src_data.len() >= 4); + let req_id = + RequestId::from_bytes(&src_data[0..RequestId::SIZE_AS_BYTES]).unwrap(); + let mut vec = None; + if src_data.len() > 4 { + let mut new_vec = Vec::new(); + new_vec.extend_from_slice(&src_data[RequestId::SIZE_AS_BYTES..]); + vec = Some(new_vec); + } + self.service_queue.borrow_mut().push_back(TmInfo { + common: CommonTmInfo::new_from_tm(&tm), + req_id, + additional_data: vec, + }); + Ok(()) + } } - self.service_queue.push_back(TmInfo { - common: CommonTmInfo::new_from_tm(&tm), - req_id, - additional_data: vec, - }); - Ok(()) } } @@ -1595,7 +1619,7 @@ mod tests { } impl EcssTmSenderCore for FallibleSender { type Error = DummyError; - fn send_tm(&mut self, _: PusTm) -> Result<(), Self::Error> { + fn send_tm(&self, _: PusTmWrapper) -> Result<(), Self::Error> { Err(DummyError {}) } } @@ -1672,8 +1696,9 @@ mod tests { additional_data: None, req_id: req_id.clone(), }; - assert_eq!(sender.service_queue.len(), 1); - let info = sender.service_queue.pop_front().unwrap(); + let mut service_queue = sender.service_queue.borrow_mut(); + assert_eq!(service_queue.len(), 1); + let info = service_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); } @@ -1682,7 +1707,7 @@ mod tests { let pool = LocalPool::new(PoolCfg::new(vec![(8, 8)])); let shared_pool: SharedPool = Arc::new(RwLock::new(Box::new(pool))); let (tx, _) = mpsc::channel(); - let mpsc_verif_sender = MpscVerifSender::new(0, "verif_sender", shared_pool, tx); + let mpsc_verif_sender = MpscTmInStoreSender::new(0, "verif_sender", shared_pool, tx); is_send(&mpsc_verif_sender); } @@ -1742,8 +1767,9 @@ mod tests { additional_data: Some([0, 2].to_vec()), req_id, }; - assert_eq!(sender.service_queue.len(), 1); - let info = sender.service_queue.pop_front().unwrap(); + let mut service_queue = sender.service_queue.borrow_mut(); + assert_eq!(service_queue.len(), 1); + let info = service_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); } @@ -1793,20 +1819,18 @@ mod tests { let err_with_token = res.unwrap_err(); assert_eq!(err_with_token.1, tok); match err_with_token.0 { - EcssTmtcErrorWithSend::EcssTmtcError(EcssTmtcError::ByteConversionError(e)) => { - match e { - ByteConversionError::ToSliceTooSmall(missmatch) => { - assert_eq!( - missmatch.expected, - fail_data.len() + RequestId::SIZE_AS_BYTES + fail_code.size() - ); - assert_eq!(missmatch.found, b.rep().allowed_source_data_len()); - } - _ => { - panic!("{}", format!("Unexpected error {:?}", e)) - } + EcssTmtcErrorWithSend::EcssTmtcError(EcssTmtcError::ByteConversion(e)) => match e { + ByteConversionError::ToSliceTooSmall(missmatch) => { + assert_eq!( + missmatch.expected, + fail_data.len() + RequestId::SIZE_AS_BYTES + fail_code.size() + ); + assert_eq!(missmatch.found, b.rep().allowed_source_data_len()); } - } + _ => { + panic!("{}", format!("Unexpected error {:?}", e)) + } + }, _ => { panic!("{}", format!("Unexpected error {:?}", err_with_token.0)) } @@ -1839,13 +1863,15 @@ mod tests { additional_data: Some([10, 0, 0, 0, 12].to_vec()), req_id: tok.req_id, }; - assert_eq!(sender.service_queue.len(), 1); - let info = sender.service_queue.pop_front().unwrap(); + let mut service_queue = sender.service_queue.borrow_mut(); + assert_eq!(service_queue.len(), 1); + let info = service_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); } fn start_fail_check(sender: &mut TestSender, req_id: RequestId, fail_data_raw: [u8; 4]) { - assert_eq!(sender.service_queue.len(), 2); + let mut srv_queue = sender.service_queue.borrow_mut(); + assert_eq!(srv_queue.len(), 2); let mut cmp_info = TmInfo { common: CommonTmInfo { subservice: 1, @@ -1857,7 +1883,7 @@ mod tests { additional_data: None, req_id, }; - let mut info = sender.service_queue.pop_front().unwrap(); + let mut info = srv_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); cmp_info = TmInfo { @@ -1871,7 +1897,7 @@ mod tests { additional_data: Some([&[22], fail_data_raw.as_slice()].concat().to_vec()), req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = srv_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); } @@ -1937,7 +1963,8 @@ mod tests { additional_data: None, req_id, }; - let mut info = sender.service_queue.pop_front().unwrap(); + let mut srv_queue = sender.service_queue.borrow_mut(); + let mut info = srv_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); cmp_info = TmInfo { common: CommonTmInfo { @@ -1950,7 +1977,7 @@ mod tests { additional_data: None, req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = srv_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); cmp_info = TmInfo { common: CommonTmInfo { @@ -1963,7 +1990,7 @@ mod tests { additional_data: Some([0].to_vec()), req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = srv_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); cmp_info = TmInfo { common: CommonTmInfo { @@ -1976,7 +2003,7 @@ mod tests { additional_data: Some([1].to_vec()), req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = srv_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); } @@ -2011,7 +2038,7 @@ mod tests { ) .expect("Sending step 1 success failed"); assert_eq!(empty, ()); - assert_eq!(sender.service_queue.len(), 4); + assert_eq!(sender.service_queue.borrow().len(), 4); step_success_check(&mut sender, tok.req_id); } @@ -2037,12 +2064,12 @@ mod tests { .expect("Sending step 1 success failed"); assert_eq!(empty, ()); let sender: &mut TestSender = b.helper.sender.downcast_mut().unwrap(); - assert_eq!(sender.service_queue.len(), 4); + assert_eq!(sender.service_queue.borrow().len(), 4); step_success_check(sender, tok.req_id); } fn check_step_failure(sender: &mut TestSender, req_id: RequestId, fail_data_raw: [u8; 4]) { - assert_eq!(sender.service_queue.len(), 4); + assert_eq!(sender.service_queue.borrow().len(), 4); let mut cmp_info = TmInfo { common: CommonTmInfo { subservice: 1, @@ -2054,7 +2081,7 @@ mod tests { additional_data: None, req_id, }; - let mut info = sender.service_queue.pop_front().unwrap(); + let mut info = sender.service_queue.borrow_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); cmp_info = TmInfo { @@ -2068,7 +2095,7 @@ mod tests { additional_data: None, req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = sender.service_queue.borrow_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); cmp_info = TmInfo { @@ -2082,7 +2109,7 @@ mod tests { additional_data: Some([0].to_vec()), req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = sender.service_queue.get_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); cmp_info = TmInfo { @@ -2104,7 +2131,7 @@ mod tests { ), req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = sender.service_queue.get_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); } @@ -2186,7 +2213,7 @@ mod tests { } fn completion_fail_check(sender: &mut TestSender, req_id: RequestId) { - assert_eq!(sender.service_queue.len(), 3); + assert_eq!(sender.service_queue.borrow().len(), 3); let mut cmp_info = TmInfo { common: CommonTmInfo { @@ -2199,7 +2226,7 @@ mod tests { additional_data: None, req_id, }; - let mut info = sender.service_queue.pop_front().unwrap(); + let mut info = sender.service_queue.get_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); cmp_info = TmInfo { @@ -2213,7 +2240,7 @@ mod tests { additional_data: None, req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = sender.service_queue.get_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); cmp_info = TmInfo { @@ -2227,7 +2254,7 @@ mod tests { additional_data: Some([0, 0, 0x10, 0x20].to_vec()), req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = sender.service_queue.get_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); } @@ -2277,7 +2304,7 @@ mod tests { } fn completion_success_check(sender: &mut TestSender, req_id: RequestId) { - assert_eq!(sender.service_queue.len(), 3); + assert_eq!(sender.service_queue.borrow().len(), 3); let cmp_info = TmInfo { common: CommonTmInfo { subservice: 1, @@ -2289,7 +2316,7 @@ mod tests { additional_data: None, req_id, }; - let mut info = sender.service_queue.pop_front().unwrap(); + let mut info = sender.service_queue.borrow_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); let cmp_info = TmInfo { @@ -2303,7 +2330,7 @@ mod tests { additional_data: None, req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = sender.service_queue.borrow_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); let cmp_info = TmInfo { common: CommonTmInfo { @@ -2316,7 +2343,7 @@ mod tests { additional_data: None, req_id, }; - info = sender.service_queue.pop_front().unwrap(); + info = sender.service_queue.borrow_mut().pop_front().unwrap(); assert_eq!(info, cmp_info); } @@ -2365,7 +2392,7 @@ mod tests { Arc::new(RwLock::new(Box::new(LocalPool::new(pool_cfg.clone())))); let (verif_tx, verif_rx) = mpsc::channel(); let sender = - MpscVerifSender::new(0, "Verification Sender", shared_tm_pool.clone(), verif_tx); + MpscTmInStoreSender::new(0, "Verification Sender", shared_tm_pool.clone(), verif_tx); let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); let mut reporter = VerificationReporterWithSender::new(&cfg, Box::new(sender)); diff --git a/satrs-core/tests/pus_verification.rs b/satrs-core/tests/pus_verification.rs index cd68e98..313bc84 100644 --- a/satrs-core/tests/pus_verification.rs +++ b/satrs-core/tests/pus_verification.rs @@ -1,17 +1,18 @@ // TODO: Refactor this to also test the STD impl using mpsc +// TODO: Change back to cross-beam as soon as STD impl was added back for TM. #[cfg(feature = "crossbeam")] pub mod crossbeam_test { use hashbrown::HashMap; use satrs_core::pool::{LocalPool, PoolCfg, PoolProvider, SharedPool}; use satrs_core::pus::verification::{ - CrossbeamVerifSender, FailParams, RequestId, VerificationReporterCfg, - VerificationReporterWithSender, + FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender, }; + use satrs_core::pus::MpscTmInStoreSender; use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket, SerializablePusPacket}; use spacepackets::tc::{PusTc, PusTcSecondaryHeader}; use spacepackets::tm::PusTm; use spacepackets::SpHeader; - use std::sync::{Arc, RwLock}; + use std::sync::{mpsc, Arc, RwLock}; use std::thread; use std::time::Duration; @@ -38,9 +39,9 @@ pub mod crossbeam_test { Arc::new(RwLock::new(Box::new(LocalPool::new(pool_cfg.clone())))); let shared_tc_pool_0 = Arc::new(RwLock::new(LocalPool::new(pool_cfg))); let shared_tc_pool_1 = shared_tc_pool_0.clone(); - let (tx, rx) = crossbeam_channel::bounded(5); + let (tx, rx) = mpsc::channel(); let sender = - CrossbeamVerifSender::new(0, "verif_sender", shared_tm_pool.clone(), tx.clone()); + MpscTmInStoreSender::new(0, "verif_sender", shared_tm_pool.clone(), tx.clone()); let mut reporter_with_sender_0 = VerificationReporterWithSender::new(&cfg, Box::new(sender)); let mut reporter_with_sender_1 = reporter_with_sender_0.clone(); diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index 743a148..85346c3 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -32,10 +32,8 @@ use satrs_core::pus::hk::Subservice as HkSubservice; use satrs_core::pus::scheduler::PusScheduler; use satrs_core::pus::scheduler_srv::PusService11SchedHandler; use satrs_core::pus::test::PusService17TestHandler; -use satrs_core::pus::verification::{ - MpscVerifSender, VerificationReporterCfg, VerificationReporterWithSender, -}; -use satrs_core::pus::MpscTmtcInStoreSender; +use satrs_core::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender}; +use satrs_core::pus::MpscTmInStoreSender; use satrs_core::seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}; use satrs_core::spacepackets::tm::PusTmZeroCopyWriter; use satrs_core::spacepackets::{ @@ -85,7 +83,7 @@ fn main() { let (tc_source_tx, tc_source_rx) = channel(); let (tm_funnel_tx, tm_funnel_rx) = channel(); let (tm_server_tx, tm_server_rx) = channel(); - let verif_sender = MpscVerifSender::new( + let verif_sender = MpscTmInStoreSender::new( 0, "verif_sender", tm_store.backing_pool(), @@ -271,7 +269,7 @@ fn main() { .name("Event".to_string()) .spawn(move || { let mut timestamp: [u8; 7] = [0; 7]; - let mut sender = MpscTmtcInStoreSender::new( + let mut sender = MpscTmInStoreSender::new( 1, "event_sender", tm_store_event.backing_pool(), -- 2.43.0