diff --git a/satrs-core/Cargo.toml b/satrs-core/Cargo.toml index bf95cef..a6d8da1 100644 --- a/satrs-core/Cargo.toml +++ b/satrs-core/Cargo.toml @@ -60,7 +60,7 @@ optional = true # version = "0.5.4" # path = "../spacepackets" git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" -rev = "ef4244c8cb5c" +rev = "28cd8c02ac0" default-features = false [dev-dependencies] diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index 9396e03..c74e587 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -186,13 +186,14 @@ pub mod std_mod { } } - impl EcssTmSenderCore for MpscTmtcInStoreSender { - type Error = MpscPusInStoreSendError; - - fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { + impl MpscTmtcInStoreSender { + pub fn send_tmtc( + &mut self, + tmtc: impl SerializablePusPacket, + ) -> Result<(), MpscPusInStoreSendError> { let operation = |mut store: RwLockWriteGuard| { - let (addr, slice) = store.free_element(tm.len_packed())?; - tm.write_to_bytes(slice)?; + let (addr, slice) = store.free_element(tmtc.len_packed())?; + tmtc.write_to_bytes(slice)?; self.sender.send(addr)?; Ok(()) }; @@ -209,26 +210,19 @@ pub mod std_mod { } } + impl EcssTmSenderCore for MpscTmtcInStoreSender { + type Error = MpscPusInStoreSendError; + + fn send_tm(&mut self, tm: PusTm) -> Result<(), Self::Error> { + self.send_tmtc(tm) + } + } + impl EcssTcSenderCore for MpscTmtcInStoreSender { type Error = MpscPusInStoreSendError; fn send_tc(&mut self, tc: PusTc) -> Result<(), Self::Error> { - let operation = |mut store: RwLockWriteGuard| { - let (addr, slice) = store.free_element(tc.len_packed())?; - tc.write_to_bytes(slice)?; - self.sender.send(addr)?; - Ok(()) - }; - match self.store_helper.write() { - Ok(pool) => operation(pool), - Err(e) => { - if self.ignore_poison_errors { - operation(e.into_inner()) - } else { - Err(MpscPusInStoreSendError::LockError) - } - } - } + self.send_tmtc(tc) } } diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index c42e758..630afde 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -84,7 +84,7 @@ use core::mem::size_of; use delegate::delegate; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use spacepackets::ecss::{scheduling, EcssEnumeration, PusPacket}; +use spacepackets::ecss::{scheduling, EcssEnumeration, PusPacket, SerializablePusPacket}; use spacepackets::tc::PusTc; use spacepackets::tm::{PusTm, PusTmSecondaryHeader}; use spacepackets::{CcsdsPacket, PacketId, PacketSequenceCtrl}; diff --git a/satrs-core/src/tmtc/pus_distrib.rs b/satrs-core/src/tmtc/pus_distrib.rs index 9d2290e..bf591da 100644 --- a/satrs-core/src/tmtc/pus_distrib.rs +++ b/satrs-core/src/tmtc/pus_distrib.rs @@ -17,6 +17,7 @@ //! # Example //! //! ```rust +//! use spacepackets::ecss::SerializablePusPacket; //! use satrs_core::tmtc::pus_distrib::{PusDistributor, PusServiceProvider}; //! use satrs_core::tmtc::{ReceivesTc, ReceivesTcCore}; //! use spacepackets::SpHeader; diff --git a/satrs-example/src/pus.rs b/satrs-example/src/pus.rs index 64664a5..efa04fd 100644 --- a/satrs-example/src/pus.rs +++ b/satrs-example/src/pus.rs @@ -1,21 +1,22 @@ use crate::requests::{Request, RequestWithToken}; -use crate::tmtc::{PusTcSource, TmStore}; +use crate::tmtc::{PusTcSource, TmStore, PUS_APID}; use log::{info, warn}; use satrs_core::events::EventU32; use satrs_core::hk::{CollectionIntervalFactor, HkRequest}; use satrs_core::mode::{ModeAndSubmode, ModeRequest}; +use satrs_core::objects::ObjectId; use satrs_core::params::Params; use satrs_core::pool::{PoolProvider, StoreAddr}; use satrs_core::pus::event_man::{EventRequest, EventRequestWithToken}; -use satrs_core::pus::mode; +use satrs_core::pus::hk; use satrs_core::pus::mode::Subservice; use satrs_core::pus::scheduling::PusScheduler; use satrs_core::pus::verification::{ pus_11_generic_tc_check, FailParams, StdVerifReporterWithSender, TcStateAccepted, VerificationToken, }; -use satrs_core::pus::{event, GenericTcCheckError}; -use satrs_core::pus::{hk, EcssTmSender, EcssTmSenderCore}; +use satrs_core::pus::{event, EcssTcSenderCore, GenericTcCheckError, MpscTmtcInStoreSender}; +use satrs_core::pus::{mode, EcssTcSender}; use satrs_core::res_code::ResultU16; use satrs_core::seq_count::{SeqCountProviderSyncClonable, SequenceCountProviderCore}; use satrs_core::spacepackets::ecss::{scheduling, PusServiceId}; @@ -33,9 +34,52 @@ use std::convert::TryFrom; use std::rc::Rc; use std::sync::mpsc::{Receiver, Sender}; -pub trait PusTcMultiplexer { - fn route_pus_tc(tc: &PusTc, apid: u16, service: u8, subservice: u8); +// pub trait PusTcRouter { +// type Error; +// fn route_pus_tc( +// &mut self, +// apid: u16, +// service: u8, +// subservice: u8, +// tc: &PusTc, +// ); +// } + +pub type AcceptedTc = (StoreAddr, VerificationToken); + +pub struct PusTcMpscRouter { + test_service_receiver: MpscTmtcInStoreSender, + event_service_receiver: Sender, + sched_service_receiver: Sender, + hk_service_receiver: Sender, + action_service_receiver: Sender, } + +// impl PusTcRouter for PusTcMpscRouter { +// type Error = (); +// +// fn route_pus_tc(&mut self, apid: u16, service: u8, subservice: u8, tc: &PusTc) { +// if apid == PUS_APID { +// if service == PusServiceId::Event as u8 { +// self.event_service_receiver.send_tc(*tc).unwrap(); +// } +// if service == PusServiceId::Action as u8 { +// // TODO: Look up object ID and then route the action request to that object. +// self.action_service_receiver.send_tc(*tc).unwrap(); +// } +// if service == PusServiceId::Housekeeping as u8 { +// // TODO: Look up object ID and then route the HK request to that object. +// } +// if service == PusServiceId::Scheduling as u8 { +// self.sched_service_receiver.send_tc(*tc).unwrap(); +// } +// if service == PusServiceId::Test as u8 { +// self.test_service_receiver.send_tc(*tc).unwrap(); +// } +// } +// todo!() +// } +// } pub struct PusReceiver { pub tm_helper: PusTmWithCdsShortHelper, pub tm_args: PusTmArgs, @@ -60,55 +104,58 @@ impl PusTmArgs { } } -#[allow(dead_code)] -pub struct PusTcHandlerBase { - pub tc_store: Box, - pub receiver: Receiver<(StoreAddr, VerificationToken)>, - pub verif_reporter: StdVerifReporterWithSender, - pub time_provider: Box, -} - -pub trait TestHandlerNoPing { - fn handle_no_ping_tc(&mut self, tc: PusTc); -} - -#[allow(dead_code)] -pub struct PusTestTcHandler { - pub base: PusTcHandlerBase, - handler: Option>, -} - -#[allow(dead_code)] -pub struct PusScheduleTcHandler { - pub base: PusTestTcHandler, -} - -impl PusTestTcHandler { - #[allow(dead_code)] - pub fn operation(&mut self) { - let (addr, token) = self.base.receiver.recv().unwrap(); - let data = self.base.tc_store.read(&addr).unwrap(); - let (pus_tc, _len) = PusTc::from_bytes(data).unwrap(); - let stamp: [u8; 7] = [0; 7]; - if pus_tc.subservice() == 1 { - self.base - .verif_reporter - .completion_success(token, Some(&stamp)) - .unwrap(); - } else if let Some(handler) = &mut self.handler { - handler.handle_no_ping_tc(pus_tc); - } - } -} +// #[allow(dead_code)] +// pub struct PusTcHandlerBase { +// pub tc_store: Box, +// pub receiver: Receiver<(StoreAddr, VerificationToken)>, +// pub verif_reporter: StdVerifReporterWithSender, +// pub time_provider: Box, +// } +// +// pub trait TestHandlerNoPing { +// fn handle_no_ping_tc(&mut self, tc: PusTc); +// } +// +// #[allow(dead_code)] +// pub struct PusTestTcHandler { +// pub base: PusTcHandlerBase, +// handler: Option>, +// } +// +// #[allow(dead_code)] +// pub struct PusScheduleTcHandler { +// pub base: PusTestTcHandler, +// } +// +// impl PusTestTcHandler { +// #[allow(dead_code)] +// pub fn operation(&mut self) { +// let (addr, token) = self.base.receiver.recv().unwrap(); +// let data = self.base.tc_store.read(&addr).unwrap(); +// let (pus_tc, _len) = PusTc::from_bytes(data).unwrap(); +// let stamp: [u8; 7] = [0; 7]; +// if pus_tc.subservice() == 1 { +// self.base +// .verif_reporter +// .completion_success(token, Some(&stamp)) +// .unwrap(); +// } else if let Some(handler) = &mut self.handler { +// handler.handle_no_ping_tc(pus_tc); +// } +// } +// } pub struct PusTcArgs { pub event_request_tx: Sender, + /// This routes all telecommands to their respective recipients + pub pus_router: PusTcMpscRouter, /// Request routing helper. Maps targeted requests to their recipient. - pub request_map: HashMap>, + //pub request_map: HashMap>, /// Required for scheduling of telecommands. - pub tc_source: PusTcSource, + //pub tc_source: PusTcSource, + /// Used to send events from within the TC router pub event_sender: Sender<(EventU32, Option)>, - pub scheduler: Rc>, + //pub scheduler: Rc>, } struct TimeStampHelper { @@ -168,10 +215,24 @@ impl PusServiceProvider for PusReceiver { let service = PusServiceId::try_from(service); match service { Ok(standard_service) => match standard_service { - PusServiceId::Test => self.handle_test_service(pus_tc, accepted_token), - PusServiceId::Housekeeping => self.handle_hk_request(pus_tc, accepted_token), - PusServiceId::Event => self.handle_event_request(pus_tc, accepted_token), - PusServiceId::Scheduling => self.handle_scheduled_tc(pus_tc, accepted_token), + PusServiceId::Test => self + .tc_args + .pus_router + .test_service_receiver + .send_tc(*pus_tc), + PusServiceId::Housekeeping => { + self.tc_args.pus_router.hk_service_receiver.send_tc(*pus_tc) + } //self.handle_hk_request(pus_tc, accepted_token), + PusServiceId::Event => self + .tc_args + .pus_router + .event_service_receiver + .send_tc(*pus_tc), //self.handle_event_request(pus_tc, accepted_token), + PusServiceId::Scheduling => self + .tc_args + .pus_router + .sched_service_receiver + .send_tc(*pus_tc), //self.handle_scheduled_tc(pus_tc, accepted_token), _ => self .tm_args .verif_reporter @@ -212,478 +273,478 @@ impl PusServiceProvider for PusReceiver { } } -impl PusReceiver { - fn handle_test_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { - match PusPacket::subservice(pus_tc) { - 1 => { - info!("Received PUS ping command TC[17,1]"); - info!("Sending ping reply PUS TM[17,2]"); - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - let ping_reply = self.tm_helper.create_pus_tm_timestamp_now( - 17, - 2, - None, - self.tm_args.seq_count_provider.get(), - ); - let addr = self.tm_args.tm_store.add_pus_tm(&ping_reply); - self.tm_args - .tm_tx - .send(addr) - .expect("Sending TM to TM funnel failed"); - self.tm_args.seq_count_provider.increment(); - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } - 128 => { - info!("Generating test event"); - self.tc_args - .event_sender - .send((TEST_EVENT.into(), None)) - .expect("Sending test event failed"); - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } - _ => { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::INVALID_PUS_SUBSERVICE, - None, - ), - ) - .expect("Sending start failure TM failed"); - } - } - } - - fn handle_hk_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { - if pus_tc.user_data().is_none() { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ), - ) - .expect("Sending start failure TM failed"); - return; - } - let user_data = pus_tc.user_data().unwrap(); - if user_data.len() < 8 { - let err = if user_data.len() < 4 { - &hk_err::TARGET_ID_MISSING - } else { - &hk_err::UNIQUE_ID_MISSING - }; - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new(Some(self.stamp_helper.stamp()), err, None), - ) - .expect("Sending start failure TM failed"); - return; - } - let addressable_id = AddressableId::from_raw_be(user_data).unwrap(); - if !self - .tc_args - .request_map - .contains_key(&addressable_id.target_id) - { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &hk_err::UNKNOWN_TARGET_ID, - None, - ), - ) - .expect("Sending start failure TM failed"); - return; - } - let send_request = |target: TargetId, request: HkRequest| { - let sender = self - .tc_args - .request_map - .get(&addressable_id.target_id) - .unwrap(); - sender - .send(RequestWithToken::new( - target, - Request::HkRequest(request), - token, - )) - .unwrap_or_else(|_| panic!("Sending HK request {request:?} failed")); - }; - if PusPacket::subservice(pus_tc) == hk::Subservice::TcEnableHkGeneration as u8 { - send_request( - addressable_id.target_id, - HkRequest::Enable(addressable_id.unique_id), - ); - } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcDisableHkGeneration as u8 { - send_request( - addressable_id.target_id, - HkRequest::Disable(addressable_id.unique_id), - ); - } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcGenerateOneShotHk as u8 { - send_request( - addressable_id.target_id, - HkRequest::OneShot(addressable_id.unique_id), - ); - } else if PusPacket::subservice(pus_tc) - == hk::Subservice::TcModifyHkCollectionInterval as u8 - { - if user_data.len() < 12 { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &hk_err::COLLECTION_INTERVAL_MISSING, - None, - ), - ) - .expect("Sending start failure TM failed"); - return; - } - send_request( - addressable_id.target_id, - HkRequest::ModifyCollectionInterval( - addressable_id.unique_id, - CollectionIntervalFactor::from_be_bytes(user_data[8..12].try_into().unwrap()), - ), - ); - } - } - - fn handle_event_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { - let send_start_failure = |vr: &mut StdVerifReporterWithSender, - timestamp: &[u8], - failure_code: &ResultU16, - failure_data: Option<&[u8]>| { - vr.start_failure( - token, - FailParams::new(Some(timestamp), failure_code, failure_data), - ) - .expect("Sending start failure TM failed"); - }; - let send_start_acceptance = |vr: &mut StdVerifReporterWithSender, timestamp: &[u8]| { - vr.start_success(token, Some(timestamp)) - .expect("Sending start success TM failed") - }; - if pus_tc.user_data().is_none() { - send_start_failure( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ); - return; - } - let app_data = pus_tc.user_data().unwrap(); - if app_data.len() < 4 { - send_start_failure( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ); - return; - } - let event_id = EventU32::from(u32::from_be_bytes(app_data.try_into().unwrap())); - match PusPacket::subservice(pus_tc).try_into() { - Ok(event::Subservice::TcEnableEventGeneration) => { - let start_token = send_start_acceptance( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - ); - self.tc_args - .event_request_tx - .send(EventRequestWithToken { - request: EventRequest::Enable(event_id), - token: start_token, - }) - .expect("Sending event request failed"); - } - Ok(event::Subservice::TcDisableEventGeneration) => { - let start_token = send_start_acceptance( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - ); - self.tc_args - .event_request_tx - .send(EventRequestWithToken { - request: EventRequest::Disable(event_id), - token: start_token, - }) - .expect("Sending event request failed"); - } - _ => { - send_start_failure( - &mut self.tm_args.verif_reporter, - self.stamp_helper.stamp(), - &tmtc_err::INVALID_PUS_SUBSERVICE, - None, - ); - } - } - } - - fn handle_scheduled_tc(&mut self, pus_tc: &PusTc, token: VerificationToken) { - let subservice = match pus_11_generic_tc_check(pus_tc) { - Ok(subservice) => subservice, - Err(e) => match e { - GenericTcCheckError::NotEnoughAppData => { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::NOT_ENOUGH_APP_DATA, - None, - ), - ) - .expect("could not sent verification error"); - return; - } - GenericTcCheckError::InvalidSubservice => { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::INVALID_PUS_SUBSERVICE, - None, - ), - ) - .expect("could not sent verification error"); - return; - } - }, - }; - match subservice { - scheduling::Subservice::TcEnableScheduling => { - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - - let mut scheduler = self.tc_args.scheduler.borrow_mut(); - scheduler.enable(); - if scheduler.is_enabled() { - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } else { - panic!("Failed to enable scheduler"); - } - } - scheduling::Subservice::TcDisableScheduling => { - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - - let mut scheduler = self.tc_args.scheduler.borrow_mut(); - scheduler.disable(); - if !scheduler.is_enabled() { - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } else { - panic!("Failed to disable scheduler"); - } - } - scheduling::Subservice::TcResetScheduling => { - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("Error sending start success"); - - let mut pool = self - .tc_args - .tc_source - .tc_store - .pool - .write() - .expect("Locking pool failed"); - - let mut scheduler = self.tc_args.scheduler.borrow_mut(); - scheduler - .reset(pool.as_mut()) - .expect("Error resetting TC Pool"); - drop(scheduler); - - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("Error sending completion success"); - } - scheduling::Subservice::TcInsertActivity => { - let start_token = self - .tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("error sending start success"); - - let mut pool = self - .tc_args - .tc_source - .tc_store - .pool - .write() - .expect("locking pool failed"); - let mut scheduler = self.tc_args.scheduler.borrow_mut(); - scheduler - .insert_wrapped_tc::(pus_tc, pool.as_mut()) - .expect("insertion of activity into pool failed"); - drop(scheduler); - - self.tm_args - .verif_reporter - .completion_success(start_token, Some(self.stamp_helper.stamp())) - .expect("sending completion success failed"); - } - _ => {} - } - } - - fn handle_mode_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { - let mut app_data_len = 0; - let app_data = pus_tc.user_data(); - if app_data.is_some() { - app_data_len = pus_tc.user_data().unwrap().len(); - } - if app_data_len < 4 { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::NOT_ENOUGH_APP_DATA, - Some(format!("expected {} bytes, found {}", 4, app_data_len).as_bytes()), - ), - ) - .expect("Sending start failure TM failed"); - } - let app_data = app_data.unwrap(); - let mut invalid_subservice_handler = || { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::INVALID_PUS_SUBSERVICE, - Some(&[PusPacket::subservice(pus_tc)]), - ), - ) - .expect("Sending start failure TM failed"); - }; - let subservice = mode::Subservice::try_from(PusPacket::subservice(pus_tc)); - if let Ok(subservice) = subservice { - let forward_mode_request = |target_id, mode_request: ModeRequest| match self - .tc_args - .request_map - .get(&target_id) - { - None => warn!("not mode request recipient for target ID {target_id} found"), - Some(sender_to_recipient) => { - sender_to_recipient - .send(RequestWithToken::new( - target_id, - Request::ModeRequest(mode_request), - token, - )) - .expect("sending mode request failed"); - } - }; - let mut valid_subservice = true; - match subservice { - Subservice::TcSetMode => { - let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); - let min_len = ModeAndSubmode::raw_len() + 4; - if app_data_len < min_len { - self.tm_args - .verif_reporter - .start_failure( - token, - FailParams::new( - Some(self.stamp_helper.stamp()), - &tmtc_err::NOT_ENOUGH_APP_DATA, - Some( - format!("expected {min_len} bytes, found {app_data_len}") - .as_bytes(), - ), - ), - ) - .expect("Sending start failure TM failed"); - } - // Should never fail after size check - let mode_submode = ModeAndSubmode::from_be_bytes( - app_data[4..4 + ModeAndSubmode::raw_len()] - .try_into() - .unwrap(), - ) - .unwrap(); - forward_mode_request(target_id, ModeRequest::SetMode(mode_submode)); - } - Subservice::TcReadMode => { - let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); - forward_mode_request(target_id, ModeRequest::ReadMode); - } - Subservice::TcAnnounceMode => { - let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); - forward_mode_request(target_id, ModeRequest::AnnounceMode); - } - Subservice::TcAnnounceModeRecursive => { - let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); - forward_mode_request(target_id, ModeRequest::AnnounceModeRecursive); - } - _ => { - warn!("Can not process mode request with subservice {subservice:?}"); - invalid_subservice_handler(); - valid_subservice = false; - } - } - if valid_subservice { - self.tm_args - .verif_reporter - .start_success(token, Some(self.stamp_helper.stamp())) - .expect("sending start success TM failed"); - } - } else { - invalid_subservice_handler(); - } - } -} +// impl PusReceiver { +// fn handle_test_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// match PusPacket::subservice(pus_tc) { +// 1 => { +// info!("Received PUS ping command TC[17,1]"); +// info!("Sending ping reply PUS TM[17,2]"); +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// let ping_reply = self.tm_helper.create_pus_tm_timestamp_now( +// 17, +// 2, +// None, +// self.tm_args.seq_count_provider.get(), +// ); +// let addr = self.tm_args.tm_store.add_pus_tm(&ping_reply); +// self.tm_args +// .tm_tx +// .send(addr) +// .expect("Sending TM to TM funnel failed"); +// self.tm_args.seq_count_provider.increment(); +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } +// 128 => { +// info!("Generating test event"); +// self.tc_args +// .event_sender +// .send((TEST_EVENT.into(), None)) +// .expect("Sending test event failed"); +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } +// _ => { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::INVALID_PUS_SUBSERVICE, +// None, +// ), +// ) +// .expect("Sending start failure TM failed"); +// } +// } +// } +// +// fn handle_hk_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// if pus_tc.user_data().is_none() { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// None, +// ), +// ) +// .expect("Sending start failure TM failed"); +// return; +// } +// let user_data = pus_tc.user_data().unwrap(); +// if user_data.len() < 8 { +// let err = if user_data.len() < 4 { +// &hk_err::TARGET_ID_MISSING +// } else { +// &hk_err::UNIQUE_ID_MISSING +// }; +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new(Some(self.stamp_helper.stamp()), err, None), +// ) +// .expect("Sending start failure TM failed"); +// return; +// } +// let addressable_id = AddressableId::from_raw_be(user_data).unwrap(); +// if !self +// .tc_args +// .request_map +// .contains_key(&addressable_id.target_id) +// { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &hk_err::UNKNOWN_TARGET_ID, +// None, +// ), +// ) +// .expect("Sending start failure TM failed"); +// return; +// } +// let send_request = |target: TargetId, request: HkRequest| { +// let sender = self +// .tc_args +// .request_map +// .get(&addressable_id.target_id) +// .unwrap(); +// sender +// .send(RequestWithToken::new( +// target, +// Request::HkRequest(request), +// token, +// )) +// .unwrap_or_else(|_| panic!("Sending HK request {request:?} failed")); +// }; +// if PusPacket::subservice(pus_tc) == hk::Subservice::TcEnableHkGeneration as u8 { +// send_request( +// addressable_id.target_id, +// HkRequest::Enable(addressable_id.unique_id), +// ); +// } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcDisableHkGeneration as u8 { +// send_request( +// addressable_id.target_id, +// HkRequest::Disable(addressable_id.unique_id), +// ); +// } else if PusPacket::subservice(pus_tc) == hk::Subservice::TcGenerateOneShotHk as u8 { +// send_request( +// addressable_id.target_id, +// HkRequest::OneShot(addressable_id.unique_id), +// ); +// } else if PusPacket::subservice(pus_tc) +// == hk::Subservice::TcModifyHkCollectionInterval as u8 +// { +// if user_data.len() < 12 { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &hk_err::COLLECTION_INTERVAL_MISSING, +// None, +// ), +// ) +// .expect("Sending start failure TM failed"); +// return; +// } +// send_request( +// addressable_id.target_id, +// HkRequest::ModifyCollectionInterval( +// addressable_id.unique_id, +// CollectionIntervalFactor::from_be_bytes(user_data[8..12].try_into().unwrap()), +// ), +// ); +// } +// } +// +// fn handle_event_request(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// let send_start_failure = |vr: &mut StdVerifReporterWithSender, +// timestamp: &[u8], +// failure_code: &ResultU16, +// failure_data: Option<&[u8]>| { +// vr.start_failure( +// token, +// FailParams::new(Some(timestamp), failure_code, failure_data), +// ) +// .expect("Sending start failure TM failed"); +// }; +// let send_start_acceptance = |vr: &mut StdVerifReporterWithSender, timestamp: &[u8]| { +// vr.start_success(token, Some(timestamp)) +// .expect("Sending start success TM failed") +// }; +// if pus_tc.user_data().is_none() { +// send_start_failure( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// None, +// ); +// return; +// } +// let app_data = pus_tc.user_data().unwrap(); +// if app_data.len() < 4 { +// send_start_failure( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// None, +// ); +// return; +// } +// let event_id = EventU32::from(u32::from_be_bytes(app_data.try_into().unwrap())); +// match PusPacket::subservice(pus_tc).try_into() { +// Ok(event::Subservice::TcEnableEventGeneration) => { +// let start_token = send_start_acceptance( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// ); +// self.tc_args +// .event_request_tx +// .send(EventRequestWithToken { +// request: EventRequest::Enable(event_id), +// token: start_token, +// }) +// .expect("Sending event request failed"); +// } +// Ok(event::Subservice::TcDisableEventGeneration) => { +// let start_token = send_start_acceptance( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// ); +// self.tc_args +// .event_request_tx +// .send(EventRequestWithToken { +// request: EventRequest::Disable(event_id), +// token: start_token, +// }) +// .expect("Sending event request failed"); +// } +// _ => { +// send_start_failure( +// &mut self.tm_args.verif_reporter, +// self.stamp_helper.stamp(), +// &tmtc_err::INVALID_PUS_SUBSERVICE, +// None, +// ); +// } +// } +// } +// +// fn handle_scheduled_tc(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// let subservice = match pus_11_generic_tc_check(pus_tc) { +// Ok(subservice) => subservice, +// Err(e) => match e { +// GenericTcCheckError::NotEnoughAppData => { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// None, +// ), +// ) +// .expect("could not sent verification error"); +// return; +// } +// GenericTcCheckError::InvalidSubservice => { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::INVALID_PUS_SUBSERVICE, +// None, +// ), +// ) +// .expect("could not sent verification error"); +// return; +// } +// }, +// }; +// match subservice { +// scheduling::Subservice::TcEnableScheduling => { +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// +// let mut scheduler = self.tc_args.scheduler.borrow_mut(); +// scheduler.enable(); +// if scheduler.is_enabled() { +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } else { +// panic!("Failed to enable scheduler"); +// } +// } +// scheduling::Subservice::TcDisableScheduling => { +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// +// let mut scheduler = self.tc_args.scheduler.borrow_mut(); +// scheduler.disable(); +// if !scheduler.is_enabled() { +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } else { +// panic!("Failed to disable scheduler"); +// } +// } +// scheduling::Subservice::TcResetScheduling => { +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("Error sending start success"); +// +// let mut pool = self +// .tc_args +// .tc_source +// .tc_store +// .pool +// .write() +// .expect("Locking pool failed"); +// +// let mut scheduler = self.tc_args.scheduler.borrow_mut(); +// scheduler +// .reset(pool.as_mut()) +// .expect("Error resetting TC Pool"); +// drop(scheduler); +// +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("Error sending completion success"); +// } +// scheduling::Subservice::TcInsertActivity => { +// let start_token = self +// .tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("error sending start success"); +// +// let mut pool = self +// .tc_args +// .tc_source +// .tc_store +// .pool +// .write() +// .expect("locking pool failed"); +// let mut scheduler = self.tc_args.scheduler.borrow_mut(); +// scheduler +// .insert_wrapped_tc::(pus_tc, pool.as_mut()) +// .expect("insertion of activity into pool failed"); +// drop(scheduler); +// +// self.tm_args +// .verif_reporter +// .completion_success(start_token, Some(self.stamp_helper.stamp())) +// .expect("sending completion success failed"); +// } +// _ => {} +// } +// } +// +// fn handle_mode_service(&mut self, pus_tc: &PusTc, token: VerificationToken) { +// let mut app_data_len = 0; +// let app_data = pus_tc.user_data(); +// if app_data.is_some() { +// app_data_len = pus_tc.user_data().unwrap().len(); +// } +// if app_data_len < 4 { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// Some(format!("expected {} bytes, found {}", 4, app_data_len).as_bytes()), +// ), +// ) +// .expect("Sending start failure TM failed"); +// } +// let app_data = app_data.unwrap(); +// let mut invalid_subservice_handler = || { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::INVALID_PUS_SUBSERVICE, +// Some(&[PusPacket::subservice(pus_tc)]), +// ), +// ) +// .expect("Sending start failure TM failed"); +// }; +// let subservice = mode::Subservice::try_from(PusPacket::subservice(pus_tc)); +// if let Ok(subservice) = subservice { +// let forward_mode_request = |target_id, mode_request: ModeRequest| match self +// .tc_args +// .request_map +// .get(&target_id) +// { +// None => warn!("not mode request recipient for target ID {target_id} found"), +// Some(sender_to_recipient) => { +// sender_to_recipient +// .send(RequestWithToken::new( +// target_id, +// Request::ModeRequest(mode_request), +// token, +// )) +// .expect("sending mode request failed"); +// } +// }; +// let mut valid_subservice = true; +// match subservice { +// Subservice::TcSetMode => { +// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); +// let min_len = ModeAndSubmode::raw_len() + 4; +// if app_data_len < min_len { +// self.tm_args +// .verif_reporter +// .start_failure( +// token, +// FailParams::new( +// Some(self.stamp_helper.stamp()), +// &tmtc_err::NOT_ENOUGH_APP_DATA, +// Some( +// format!("expected {min_len} bytes, found {app_data_len}") +// .as_bytes(), +// ), +// ), +// ) +// .expect("Sending start failure TM failed"); +// } +// // Should never fail after size check +// let mode_submode = ModeAndSubmode::from_be_bytes( +// app_data[4..4 + ModeAndSubmode::raw_len()] +// .try_into() +// .unwrap(), +// ) +// .unwrap(); +// forward_mode_request(target_id, ModeRequest::SetMode(mode_submode)); +// } +// Subservice::TcReadMode => { +// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); +// forward_mode_request(target_id, ModeRequest::ReadMode); +// } +// Subservice::TcAnnounceMode => { +// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); +// forward_mode_request(target_id, ModeRequest::AnnounceMode); +// } +// Subservice::TcAnnounceModeRecursive => { +// let target_id = u32::from_be_bytes(app_data[0..4].try_into().unwrap()); +// forward_mode_request(target_id, ModeRequest::AnnounceModeRecursive); +// } +// _ => { +// warn!("Can not process mode request with subservice {subservice:?}"); +// invalid_subservice_handler(); +// valid_subservice = false; +// } +// } +// if valid_subservice { +// self.tm_args +// .verif_reporter +// .start_success(token, Some(self.stamp_helper.stamp())) +// .expect("sending start success TM failed"); +// } +// } else { +// invalid_subservice_handler(); +// } +// } +// }