fix some of the examples
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit

This commit is contained in:
Robin Müller 2024-02-01 17:43:11 +01:00
parent 5d7672e9c2
commit 2a84b04c47
Signed by: muellerr
GPG Key ID: A649FB78196E3849
6 changed files with 113 additions and 83 deletions

View File

@ -13,10 +13,10 @@
//! # Example //! # Example
//! //!
//! ``` //! ```
//! use satrs_core::pool::{StaticMemoryPool, PoolCfg, PoolProvider}; //! use satrs_core::pool::{StaticMemoryPool, StaticPoolConfig, PoolProvider};
//! //!
//! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes //! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
//! let pool_cfg = PoolCfg::new(vec![(4, 4), (2, 8), (1, 16)]); //! let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
//! let mut local_pool = StaticMemoryPool::new(pool_cfg); //! let mut local_pool = StaticMemoryPool::new(pool_cfg);
//! let mut addr; //! let mut addr;
//! { //! {
@ -229,7 +229,7 @@ pub trait PoolProvider {
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
mod alloc_mod { mod alloc_mod {
use super::{StaticPoolAddr, PoolProvider}; use super::{PoolProvider, StaticPoolAddr};
use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError}; use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError};
use alloc::boxed::Box; use alloc::boxed::Box;
use alloc::vec; use alloc::vec;
@ -554,7 +554,7 @@ mod alloc_mod {
mod tests { mod tests {
use crate::pool::{ use crate::pool::{
PoolGuard, PoolProvider, PoolRwGuard, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, PoolGuard, PoolProvider, PoolRwGuard, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig,
StoreAddr, StoreError, StoreIdError, POOL_MAX_SIZE, StoreError, StoreIdError, POOL_MAX_SIZE, PoolProviderWithGuards,
}; };
use std::vec; use std::vec;
@ -624,11 +624,10 @@ mod tests {
let (addr, buf_ref) = res.unwrap(); let (addr, buf_ref) = res.unwrap();
assert_eq!( assert_eq!(
addr, addr,
StaticPoolAddr { u64::from(StaticPoolAddr {
pool_idx: 2, pool_idx: 2,
packet_idx: 0 packet_idx: 0
} })
.into()
); );
assert_eq!(buf_ref.len(), 12); assert_eq!(buf_ref.len(), 12);
} }

View File

@ -924,7 +924,7 @@ pub mod tests {
use spacepackets::ecss::{PusPacket, WritablePusPacket}; use spacepackets::ecss::{PusPacket, WritablePusPacket};
use spacepackets::CcsdsPacket; use spacepackets::CcsdsPacket;
use crate::pool::{PoolCfg, SharedPool, StaticMemoryPool, StoreAddr}; use crate::pool::{StaticPoolConfig, SharedPool, StaticMemoryPool, StoreAddr};
use crate::pus::verification::RequestId; use crate::pus::verification::RequestId;
use crate::tmtc::tm_helper::SharedTmStore; use crate::tmtc::tm_helper::SharedTmStore;
@ -987,7 +987,7 @@ pub mod tests {
/// ///
/// The PUS service handler is instantiated with a [EcssTcInStoreConverter]. /// The PUS service handler is instantiated with a [EcssTcInStoreConverter].
pub fn new() -> (Self, PusServiceHelper<EcssTcInStoreConverter>) { pub fn new() -> (Self, PusServiceHelper<EcssTcInStoreConverter>) {
let pool_cfg = PoolCfg::new(vec![(16, 16), (8, 32), (4, 64)]); let pool_cfg = StaticPoolConfig::new(vec![(16, 16), (8, 32), (4, 64)]);
let tc_pool = StaticMemoryPool::new(pool_cfg.clone()); let tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let tm_pool = StaticMemoryPool::new(pool_cfg); let tm_pool = StaticMemoryPool::new(pool_cfg);
let shared_tc_pool = SharedPool::new(RwLock::new(Box::new(tc_pool))); let shared_tc_pool = SharedPool::new(RwLock::new(Box::new(tc_pool)));

View File

@ -6,7 +6,7 @@ use core::fmt::Debug;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use spacepackets::ecss::scheduling::TimeWindowType; use spacepackets::ecss::scheduling::TimeWindowType;
use spacepackets::ecss::tc::{GenericPusTcSecondaryHeader, IsPusTelecommand}; use spacepackets::ecss::tc::{GenericPusTcSecondaryHeader, IsPusTelecommand, PusTcReader};
use spacepackets::ecss::PusPacket; use spacepackets::ecss::PusPacket;
use spacepackets::time::{CcsdsTimeProvider, TimeReader, UnixTimestamp}; use spacepackets::time::{CcsdsTimeProvider, TimeReader, UnixTimestamp};
use spacepackets::CcsdsPacket; use spacepackets::CcsdsPacket;
@ -149,12 +149,6 @@ pub trait PusSchedulerInterface {
/// but should not release them to be executed. /// but should not release them to be executed.
fn disable(&mut self); fn disable(&mut self);
fn insert_wrapped_tc<TimeProvider>(
&mut self,
pus_tc: &(impl IsPusTelecommand + PusPacket + GenericPusTcSecondaryHeader),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError>;
/// Insert a telecommand which was already unwrapped from the outer Service 11 packet and stored /// Insert a telecommand which was already unwrapped from the outer Service 11 packet and stored
/// inside the telecommand packet pool. /// inside the telecommand packet pool.
fn insert_unwrapped_and_stored_tc( fn insert_unwrapped_and_stored_tc(
@ -163,6 +157,29 @@ pub trait PusSchedulerInterface {
info: TcInfo, info: TcInfo,
) -> Result<(), ScheduleError>; ) -> Result<(), ScheduleError>;
/// Insert a telecommand based on the fully wrapped time-tagged telecommand. The timestamp
/// provider needs to be supplied via a generic.
fn insert_wrapped_tc<TimeProvider>(
&mut self,
pus_tc: &(impl IsPusTelecommand + PusPacket + GenericPusTcSecondaryHeader),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
if PusPacket::service(pus_tc) != 11 {
return Err(ScheduleError::WrongService);
}
if PusPacket::subservice(pus_tc) != 4 {
return Err(ScheduleError::WrongSubservice);
}
if pus_tc.user_data().is_empty() {
return Err(ScheduleError::TcDataEmpty);
}
let user_data = pus_tc.user_data();
let stamp: Self::TimeProvider = TimeReader::from_bytes(user_data)?;
let unix_stamp = stamp.unix_stamp();
let stamp_len = stamp.len_as_bytes();
self.insert_unwrapped_tc(unix_stamp, &user_data[stamp_len..], pool)
}
/// Insert a telecommand which was already unwrapped from the outer Service 11 packet but still /// Insert a telecommand which was already unwrapped from the outer Service 11 packet but still
/// needs to be stored inside the telecommand pool. /// needs to be stored inside the telecommand pool.
fn insert_unwrapped_tc( fn insert_unwrapped_tc(
@ -170,7 +187,22 @@ pub trait PusSchedulerInterface {
time_stamp: UnixTimestamp, time_stamp: UnixTimestamp,
tc: &[u8], tc: &[u8],
pool: &mut (impl PoolProvider + ?Sized), pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError>; ) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
return Err(ScheduleError::NestedScheduledTc);
}
let req_id = RequestId::from_tc(&check_tc.0);
match pool.add(tc) {
Ok(addr) => {
let info = TcInfo::new(addr, req_id);
self.insert_unwrapped_and_stored_tc(time_stamp, info)?;
Ok(info)
}
Err(err) => Err(err.into()),
}
}
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
@ -184,12 +216,10 @@ pub mod alloc_mod {
use core::fmt::{Display, Formatter}; use core::fmt::{Display, Formatter};
use core::time::Duration; use core::time::Duration;
use spacepackets::ecss::scheduling::TimeWindowType; use spacepackets::ecss::scheduling::TimeWindowType;
use spacepackets::ecss::tc::{ use spacepackets::ecss::tc::{PusTc, PusTcReader};
GenericPusTcSecondaryHeader, IsPusTelecommand, PusTc, PusTcReader,
};
use spacepackets::ecss::{PusError, PusPacket}; use spacepackets::ecss::{PusError, PusPacket};
use spacepackets::time::cds::DaysLen24Bits; use spacepackets::time::cds::DaysLen24Bits;
use spacepackets::time::{cds, CcsdsTimeProvider, TimeReader, TimestampError, UnixTimestamp}; use spacepackets::time::{cds, CcsdsTimeProvider, TimestampError, UnixTimestamp};
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::time::SystemTimeError; use std::time::SystemTimeError;
@ -636,29 +666,6 @@ pub mod alloc_mod {
self.enabled = false; self.enabled = false;
} }
/// Insert a telecommand based on the fully wrapped time-tagged telecommand. The timestamp
/// provider needs to be supplied via a generic.
fn insert_wrapped_tc<TimeProvider>(
&mut self,
pus_tc: &(impl IsPusTelecommand + PusPacket + GenericPusTcSecondaryHeader),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
if PusPacket::service(pus_tc) != 11 {
return Err(ScheduleError::WrongService);
}
if PusPacket::subservice(pus_tc) != 4 {
return Err(ScheduleError::WrongSubservice);
}
if pus_tc.user_data().is_empty() {
return Err(ScheduleError::TcDataEmpty);
}
let user_data = pus_tc.user_data();
let stamp: Self::TimeProvider = TimeReader::from_bytes(user_data)?;
let unix_stamp = stamp.unix_stamp();
let stamp_len = stamp.len_as_bytes();
self.insert_unwrapped_tc(unix_stamp, &user_data[stamp_len..], pool)
}
fn insert_unwrapped_and_stored_tc( fn insert_unwrapped_and_stored_tc(
&mut self, &mut self,
time_stamp: UnixTimestamp, time_stamp: UnixTimestamp,
@ -681,30 +688,6 @@ pub mod alloc_mod {
} }
Ok(()) Ok(())
} }
/// Insert a telecommand which was already unwrapped from the outer Service 11 packet but still
/// needs to be stored inside the telecommand pool.
fn insert_unwrapped_tc(
&mut self,
time_stamp: UnixTimestamp,
tc: &[u8],
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
return Err(ScheduleError::NestedScheduledTc);
}
let req_id = RequestId::from_tc(&check_tc.0);
match pool.add(tc) {
Ok(addr) => {
let info = TcInfo::new(addr, req_id);
self.insert_unwrapped_and_stored_tc(time_stamp, info)?;
Ok(info)
}
Err(err) => Err(err.into()),
}
}
} }
} }

View File

@ -170,18 +170,23 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::pus::scheduler::RequestId as RequestIdSched;
use crate::{ use crate::{
events::EventU32, events::EventU32,
pool::SharedPool, pool::SharedPool,
pus::{ pus::{
scheduler::PusScheduler, scheduler::{PusScheduler, PusSchedulerInterface, TcInfo},
tests::{PusServiceHandlerWithStoreCommon, PusTestHarness}, tests::{PusServiceHandlerWithStoreCommon, PusTestHarness},
verification::{RequestId, TcStateAccepted, VerificationToken}, verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInStoreConverter, PusPacketHandlerResult, PusPacketHandlingError, EcssTcInStoreConverter, PusPacketHandlerResult, PusPacketHandlingError,
}, },
}; };
use alloc::collections::VecDeque;
use delegate::delegate; use delegate::delegate;
use spacepackets::ecss::{tc::PusTcCreator, tm::PusTmReader}; use spacepackets::{
ecss::{tc::PusTcCreator, tm::PusTmReader},
time::cds,
};
use super::PusService11SchedHandler; use super::PusService11SchedHandler;
@ -217,6 +222,50 @@ mod tests {
} }
} }
pub struct TestScheduler {
reset_count: u32,
enabled: bool,
enabled_count: u32,
disabled_count: u32,
inserted_tcs: VecDeque<TcInfo>,
current_addr: u64,
}
impl PusSchedulerInterface for TestScheduler {
type TimeProvider = cds::TimeProvider;
fn reset(
&mut self,
store: &mut (impl crate::pool::PoolProvider + ?Sized),
) -> Result<(), crate::pool::StoreError> {
self.reset_count += 1;
Ok(())
}
fn is_enabled(&self) -> bool {
self.enabled
}
fn enable(&mut self) {
self.enabled_count += 1;
self.enabled = true;
}
fn disable(&mut self) {
self.disabled_count += 1;
self.enabled = false;
}
fn insert_unwrapped_and_stored_tc(
&mut self,
time_stamp: spacepackets::time::UnixTimestamp,
info: crate::pus::scheduler::TcInfo,
) -> Result<(), crate::pus::scheduler::ScheduleError> {
self.inserted_tcs.push_back(info);
Ok(())
}
}
#[test] #[test]
fn test_scheduling_tc() {} fn test_scheduling_tc() {}
} }

View File

@ -15,7 +15,7 @@
//! ``` //! ```
//! use std::sync::{Arc, mpsc, RwLock}; //! use std::sync::{Arc, mpsc, RwLock};
//! use std::time::Duration; //! use std::time::Duration;
//! use satrs_core::pool::{StaticMemoryPool, PoolCfg, PoolProvider, SharedPool}; //! use satrs_core::pool::{StaticMemoryPool, StaticPoolConfig, PoolProvider, SharedPool};
//! use satrs_core::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender}; //! use satrs_core::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender};
//! use satrs_core::seq_count::SeqCountProviderSimple; //! use satrs_core::seq_count::SeqCountProviderSimple;
//! use satrs_core::pus::MpscTmInStoreSender; //! use satrs_core::pus::MpscTmInStoreSender;
@ -28,7 +28,7 @@
//! const EMPTY_STAMP: [u8; 7] = [0; 7]; //! const EMPTY_STAMP: [u8; 7] = [0; 7];
//! const TEST_APID: u16 = 0x02; //! const TEST_APID: u16 = 0x02;
//! //!
//! let pool_cfg = PoolCfg::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]); //! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
//! let tm_pool = StaticMemoryPool::new(pool_cfg.clone()); //! let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
//! let shared_tm_store = SharedTmStore::new(Box::new(tm_pool)); //! let shared_tm_store = SharedTmStore::new(Box::new(tm_pool));
//! let tm_store = shared_tm_store.clone_backing_pool(); //! let tm_store = shared_tm_store.clone_backing_pool();
@ -1449,12 +1449,11 @@ mod tests {
fn base_init(api_sel: bool) -> (TestBase<'static>, VerificationToken<TcStateNone>) { fn base_init(api_sel: bool) -> (TestBase<'static>, VerificationToken<TcStateNone>) {
let mut reporter = base_reporter(); let mut reporter = base_reporter();
let (tc, req_id) = base_tc_init(None); let (tc, req_id) = base_tc_init(None);
let init_tok; let init_tok = if api_sel {
if api_sel { reporter.add_tc_with_req_id(req_id)
init_tok = reporter.add_tc_with_req_id(req_id);
} else { } else {
init_tok = reporter.add_tc(&tc); reporter.add_tc(&tc)
} };
(TestBase { vr: reporter, tc }, init_tok) (TestBase { vr: reporter, tc }, init_tok)
} }
@ -1477,7 +1476,7 @@ mod tests {
time_stamp: EMPTY_STAMP, time_stamp: EMPTY_STAMP,
}, },
additional_data: None, additional_data: None,
req_id: req_id.clone(), req_id: *req_id,
}; };
let mut service_queue = sender.service_queue.borrow_mut(); let mut service_queue = sender.service_queue.borrow_mut();
assert_eq!(service_queue.len(), 1); assert_eq!(service_queue.len(), 1);
@ -1507,7 +1506,7 @@ mod tests {
fn test_basic_acceptance_success() { fn test_basic_acceptance_success() {
let (b, tok) = base_init(false); let (b, tok) = base_init(false);
let mut sender = TestSender::default(); let mut sender = TestSender::default();
b.vr.acceptance_success(tok, &mut sender, Some(&EMPTY_STAMP)) b.vr.acceptance_success(tok, &sender, Some(&EMPTY_STAMP))
.expect("Sending acceptance success failed"); .expect("Sending acceptance success failed");
acceptance_check(&mut sender, &tok.req_id); acceptance_check(&mut sender, &tok.req_id);
} }
@ -1607,7 +1606,7 @@ mod tests {
#[test] #[test]
fn test_basic_acceptance_failure_with_fail_data() { fn test_basic_acceptance_failure_with_fail_data() {
let (b, tok) = base_init(false); let (b, tok) = base_init(false);
let mut sender = TestSender::default(); let sender = TestSender::default();
let fail_code = EcssEnumU8::new(10); let fail_code = EcssEnumU8::new(10);
let fail_data = EcssEnumU32::new(12); let fail_data = EcssEnumU32::new(12);
let mut fail_data_raw = [0; 4]; let mut fail_data_raw = [0; 4];
@ -1617,7 +1616,7 @@ mod tests {
&fail_code, &fail_code,
Some(fail_data_raw.as_slice()), Some(fail_data_raw.as_slice()),
); );
b.vr.acceptance_failure(tok, &mut sender, fail_params) b.vr.acceptance_failure(tok, &sender, fail_params)
.expect("Sending acceptance success failed"); .expect("Sending acceptance success failed");
let cmp_info = TmInfo { let cmp_info = TmInfo {
common: CommonTmInfo { common: CommonTmInfo {

View File

@ -1,4 +1,4 @@
use satrs_core::pool::{PoolCfg, PoolGuard, PoolProvider, StaticMemoryPool, StoreAddr}; use satrs_core::pool::{PoolGuard, PoolProvider, StaticMemoryPool, StoreAddr, StaticPoolConfig};
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::mpsc; use std::sync::mpsc;
use std::sync::mpsc::{Receiver, Sender}; use std::sync::mpsc::{Receiver, Sender};
@ -9,7 +9,7 @@ const DUMMY_DATA: [u8; 4] = [0, 1, 2, 3];
#[test] #[test]
fn threaded_usage() { fn threaded_usage() {
let pool_cfg = PoolCfg::new(vec![(16, 6), (32, 3), (8, 12)]); let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)]);
let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg))); let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_clone = shared_pool.clone(); let shared_clone = shared_pool.clone();
let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel(); let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel();