From 19d43b1b2ccd0cf24eb42f81ca8e6e247c3842bb Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 25 Jan 2023 01:40:44 +0100 Subject: [PATCH 1/3] Extend scheduler and tests 1. `reset` and `release_telecommands`: Add store handling to avoid memory leaks 2. Add first documentation 3. Add `new_with_current_time` method. --- .idea/runConfigurations/Test_satrs_core.xml | 19 ++ satrs-core/src/pus/scheduling.rs | 222 +++++++++++--------- 2 files changed, 147 insertions(+), 94 deletions(-) create mode 100644 .idea/runConfigurations/Test_satrs_core.xml diff --git a/.idea/runConfigurations/Test_satrs_core.xml b/.idea/runConfigurations/Test_satrs_core.xml new file mode 100644 index 0000000..fe5e4da --- /dev/null +++ b/.idea/runConfigurations/Test_satrs_core.xml @@ -0,0 +1,19 @@ + + + + \ No newline at end of file diff --git a/satrs-core/src/pus/scheduling.rs b/satrs-core/src/pus/scheduling.rs index 3205a59..ec2744c 100644 --- a/satrs-core/src/pus/scheduling.rs +++ b/satrs-core/src/pus/scheduling.rs @@ -1,12 +1,20 @@ -use crate::pool::StoreAddr; +use crate::pool::{PoolProvider, StoreAddr, StoreError}; use alloc::collections::btree_map::{Entry, Range}; +use alloc::vec; +use alloc::vec::Vec; use core::time::Duration; use spacepackets::time::UnixTimestamp; use std::collections::BTreeMap; +#[cfg(feature = "std")] use std::time::SystemTimeError; -use std::vec; -use std::vec::Vec; +/// This is the core data structure for scheduling PUS telecommands with [alloc] support. +/// +/// The ECSS standard specifies that the PUS scheduler can be enabled and disabled. +/// A disabled scheduler should still delete commands where the execution time has been reached +/// but should not release them to be executed. +/// +/// Currently, sub-schedules and groups are not supported. #[derive(Debug)] pub struct PusScheduler { tc_map: BTreeMap>, @@ -16,6 +24,14 @@ pub struct PusScheduler { } impl PusScheduler { + /// Create a new PUS scheduler. + /// + /// # Arguments + /// + /// * `init_current_time` - The time to initialize the scheduler with. + /// * `time_margin` - This time margin is used when inserting new telecommands into the + /// schedule. If the release time of a new telecommand is earlier than the time margin + /// added to the current time, it will not be inserted into the schedule. pub fn new(init_current_time: UnixTimestamp, time_margin: Duration) -> Self { PusScheduler { tc_map: Default::default(), @@ -25,6 +41,13 @@ impl PusScheduler { } } + /// Like [Self::new], but sets the `init_current_time` parameter to the current system time. + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + pub fn new_with_current_init_time(time_margin: Duration) -> Result { + Ok(Self::new(UnixTimestamp::from_now()?, time_margin)) + } + pub fn num_scheduled_telecommands(&self) -> u64 { let mut num_entries = 0; for entries in &self.tc_map { @@ -45,9 +68,25 @@ impl PusScheduler { self.enabled = false; } - pub fn reset(&mut self) { + /// This will disable the scheduler and clear the schedule as specified in 6.11.4.4. + /// Be careful with this command as it will delete all the commands in the schedule. + /// + /// The holding store for the telecommands needs to be passed so all the stored telecommands + /// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error + /// will be returned but the method will still try to delete all the commands in the schedule. + pub fn reset(&mut self, store: &mut impl PoolProvider) -> Result<(), StoreError> { self.enabled = false; + let mut deletion_ok = Ok(()); + for tc_lists in &mut self.tc_map { + for tc in tc_lists.1 { + let res = store.delete(*tc); + if res.is_err() { + deletion_ok = res; + } + } + } self.tc_map.clear(); + deletion_ok } pub fn update_time(&mut self, current_time: UnixTimestamp) { @@ -84,27 +123,42 @@ impl PusScheduler { Ok(()) } - pub fn release_telecommands(&mut self, mut releaser: R) { + /// Utility method which calls [Self::telecommands_to_release] and then calls a releaser + /// closure for each telecommand which should be released. This function will also delete + /// the telecommands from the holding store after calling the release closure. + pub fn release_telecommands( + &mut self, + mut releaser: R, + store: &mut impl PoolProvider, + ) -> Result { let tcs_to_release = self.telecommands_to_release(); + let mut released_tcs = 0; + let mut store_error = Ok(()); for tc in tcs_to_release { for addr in tc.1 { releaser(self.enabled, addr); + released_tcs += 1; + let res = store.delete(*addr); + if res.is_err() { + store_error = res; + } } } self.tc_map.retain(|k, _| k > &self.current_time); + store_error + .map(|_| released_tcs) + .map_err(|e| (released_tcs, e)) } } #[cfg(test)] mod tests { - use crate::pool::StoreAddr; + use crate::pool::{LocalPool, PoolCfg, PoolProvider, StoreAddr}; use crate::pus::scheduling::PusScheduler; - use spacepackets::ecss::PacketTypeCodes::UnsignedInt; + use alloc::vec::Vec; use spacepackets::time::UnixTimestamp; - use std::sync::mpsc; - use std::sync::mpsc::{channel, Receiver, TryRecvError}; use std::time::Duration; - use std::vec::Vec; + #[allow(unused_imports)] use std::{println, vec}; #[test] @@ -118,44 +172,33 @@ mod tests { #[test] fn reset() { + let mut pool = LocalPool::new(PoolCfg::new(vec![(10, 32), (5, 64)])); let mut scheduler = PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); - let worked = scheduler.insert_tc( - UnixTimestamp::new_only_seconds(100), - StoreAddr { - pool_idx: 0, - packet_idx: 1, - }, - ); + let first_addr = pool.add(&[0, 1, 2]).unwrap(); + let worked = scheduler.insert_tc(UnixTimestamp::new_only_seconds(100), first_addr.clone()); assert!(worked); - let worked = scheduler.insert_tc( - UnixTimestamp::new_only_seconds(200), - StoreAddr { - pool_idx: 0, - packet_idx: 2, - }, - ); + let second_addr = pool.add(&[2, 3, 4]).unwrap(); + let worked = scheduler.insert_tc(UnixTimestamp::new_only_seconds(200), second_addr.clone()); assert!(worked); - let worked = scheduler.insert_tc( - UnixTimestamp::new_only_seconds(300), - StoreAddr { - pool_idx: 0, - packet_idx: 2, - }, - ); + let third_addr = pool.add(&[5, 6, 7]).unwrap(); + let worked = scheduler.insert_tc(UnixTimestamp::new_only_seconds(300), third_addr.clone()); assert!(worked); assert_eq!(scheduler.num_scheduled_telecommands(), 3); assert!(scheduler.is_enabled()); - scheduler.reset(); + scheduler.reset(&mut pool).expect("deletion of TCs failed"); assert!(!scheduler.is_enabled()); assert_eq!(scheduler.num_scheduled_telecommands(), 0); + assert!(!pool.has_element_at(&first_addr).unwrap()); + assert!(!pool.has_element_at(&second_addr).unwrap()); + assert!(!pool.has_element_at(&third_addr).unwrap()); } #[test] @@ -205,69 +248,66 @@ mod tests { assert_eq!(scheduler.current_time(), &time); } + fn common_check( + enabled: bool, + store_addr: &StoreAddr, + expected_store_addrs: Vec, + counter: &mut usize, + ) { + assert_eq!(enabled, true); + assert!(expected_store_addrs.contains(store_addr)); + *counter += 1; + } #[test] fn release_basic() { + let mut pool = LocalPool::new(PoolCfg::new(vec![(10, 32), (5, 64)])); let mut scheduler = PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); - scheduler.insert_tc( - UnixTimestamp::new_only_seconds(100), - StoreAddr { - pool_idx: 0, - packet_idx: 1, - }, - ); + let first_addr = pool.add(&[2, 2, 2]).unwrap(); + scheduler.insert_tc(UnixTimestamp::new_only_seconds(100), first_addr); - scheduler.insert_tc( - UnixTimestamp::new_only_seconds(200), - StoreAddr { - pool_idx: 0, - packet_idx: 2, - }, - ); + let second_addr = pool.add(&[5, 6, 7]).unwrap(); + scheduler.insert_tc(UnixTimestamp::new_only_seconds(200), second_addr); let mut i = 0; let mut test_closure_1 = |boolvar: bool, store_addr: &StoreAddr| { - assert_eq!(boolvar, true); - assert_eq!( - store_addr, - &StoreAddr { - pool_idx: 0, - packet_idx: 1, - } - ); - i += 1; + common_check(boolvar, store_addr, vec![first_addr], &mut i); }; // test 1: too early, no tcs scheduler.update_time(UnixTimestamp::new_only_seconds(99)); - scheduler.release_telecommands(&mut test_closure_1); + scheduler + .release_telecommands(&mut test_closure_1, &mut pool) + .expect("deletion failed"); // test 2: exact time stamp of tc, releases 1 tc scheduler.update_time(UnixTimestamp::new_only_seconds(100)); - scheduler.release_telecommands(&mut test_closure_1); + let mut released = scheduler + .release_telecommands(&mut test_closure_1, &mut pool) + .expect("deletion failed"); + assert_eq!(released, 1); + assert!(!pool.has_element_at(&first_addr).unwrap()); // test 3, late timestamp, release 1 overdue tc let mut test_closure_2 = |boolvar: bool, store_addr: &StoreAddr| { - assert_eq!(boolvar, true); - assert_eq!( - store_addr, - &StoreAddr { - pool_idx: 0, - packet_idx: 2, - } - ); - i += 1; + common_check(boolvar, store_addr, vec![second_addr], &mut i); }; scheduler.update_time(UnixTimestamp::new_only_seconds(206)); - scheduler.release_telecommands(&mut test_closure_2); + released = scheduler + .release_telecommands(&mut test_closure_2, &mut pool) + .expect("deletion failed"); + assert_eq!(released, 1); + assert!(!pool.has_element_at(&second_addr).unwrap()); //test 4: no tcs left - scheduler.release_telecommands(&mut test_closure_2); + scheduler + .release_telecommands(&mut test_closure_2, &mut pool) + .expect("deletion failed"); // check that 2 total tcs have been released assert_eq!(i, 2); @@ -275,50 +315,44 @@ mod tests { #[test] fn release_multi_with_same_time() { + let mut pool = LocalPool::new(PoolCfg::new(vec![(10, 32), (5, 64)])); let mut scheduler = PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); - scheduler.insert_tc( - UnixTimestamp::new_only_seconds(100), - StoreAddr { - pool_idx: 0, - packet_idx: 1, - }, - ); + let first_addr = pool.add(&[2, 2, 2]).unwrap(); + scheduler.insert_tc(UnixTimestamp::new_only_seconds(100), first_addr); - scheduler.insert_tc( - UnixTimestamp::new_only_seconds(100), - StoreAddr { - pool_idx: 0, - packet_idx: 1, - }, - ); + let second_addr = pool.add(&[2, 2, 2]).unwrap(); + scheduler.insert_tc(UnixTimestamp::new_only_seconds(100), second_addr); let mut i = 0; let mut test_closure = |boolvar: bool, store_addr: &StoreAddr| { - assert_eq!(boolvar, true); - assert_eq!( - store_addr, - &StoreAddr { - pool_idx: 0, - packet_idx: 1, - } - ); - i += 1; + common_check(boolvar, store_addr, vec![first_addr, second_addr], &mut i); }; // test 1: too early, no tcs scheduler.update_time(UnixTimestamp::new_only_seconds(99)); - scheduler.release_telecommands(&mut test_closure); + let mut released = scheduler + .release_telecommands(&mut test_closure, &mut pool) + .expect("deletion failed"); + assert_eq!(released, 0); // test 2: exact time stamp of tc, releases 2 tc scheduler.update_time(UnixTimestamp::new_only_seconds(100)); - scheduler.release_telecommands(&mut test_closure); + released = scheduler + .release_telecommands(&mut test_closure, &mut pool) + .expect("deletion failed"); + assert_eq!(released, 2); + assert!(!pool.has_element_at(&first_addr).unwrap()); + assert!(!pool.has_element_at(&second_addr).unwrap()); //test 3: no tcs left - scheduler.release_telecommands(&mut test_closure); + released = scheduler + .release_telecommands(&mut test_closure, &mut pool) + .expect("deletion failed"); + assert_eq!(released, 0); // check that 2 total tcs have been released assert_eq!(i, 2); From 36e63bc9a9a12bde1c9251409a376bb8eb616881 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 25 Jan 2023 10:43:11 +0100 Subject: [PATCH 2/3] closure returns a boolean whether to del or not --- satrs-core/src/pus/scheduling.rs | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/satrs-core/src/pus/scheduling.rs b/satrs-core/src/pus/scheduling.rs index ec2744c..58ae535 100644 --- a/satrs-core/src/pus/scheduling.rs +++ b/satrs-core/src/pus/scheduling.rs @@ -126,21 +126,30 @@ impl PusScheduler { /// Utility method which calls [Self::telecommands_to_release] and then calls a releaser /// closure for each telecommand which should be released. This function will also delete /// the telecommands from the holding store after calling the release closure. - pub fn release_telecommands( + /// + /// # Arguments + /// + /// * `releaser` - Closure where the first argument is whether the scheduler is enabled and + /// the second argument is the store address. This closure should return whether the + /// command should be deleted. + /// * `store` - The holding store of the telecommands. + pub fn release_telecommands bool>( &mut self, mut releaser: R, - store: &mut impl PoolProvider, + tc_store: &mut impl PoolProvider, ) -> Result { let tcs_to_release = self.telecommands_to_release(); let mut released_tcs = 0; let mut store_error = Ok(()); for tc in tcs_to_release { for addr in tc.1 { - releaser(self.enabled, addr); + let should_delete = releaser(self.enabled, addr); released_tcs += 1; - let res = store.delete(*addr); - if res.is_err() { - store_error = res; + if should_delete { + let res = tc_store.delete(*addr); + if res.is_err() { + store_error = res; + } } } } @@ -273,6 +282,7 @@ mod tests { let mut i = 0; let mut test_closure_1 = |boolvar: bool, store_addr: &StoreAddr| { common_check(boolvar, store_addr, vec![first_addr], &mut i); + true }; // test 1: too early, no tcs @@ -294,6 +304,7 @@ mod tests { // test 3, late timestamp, release 1 overdue tc let mut test_closure_2 = |boolvar: bool, store_addr: &StoreAddr| { common_check(boolvar, store_addr, vec![second_addr], &mut i); + true }; scheduler.update_time(UnixTimestamp::new_only_seconds(206)); @@ -328,6 +339,7 @@ mod tests { let mut i = 0; let mut test_closure = |boolvar: bool, store_addr: &StoreAddr| { common_check(boolvar, store_addr, vec![first_addr, second_addr], &mut i); + true }; // test 1: too early, no tcs From 7a2d518a8c77688a45b63a6f6f25a307227efbb0 Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Wed, 25 Jan 2023 10:52:24 +0100 Subject: [PATCH 3/3] docs fixes --- satrs-core/src/pus/mod.rs | 13 +++++++------ satrs-core/src/pus/scheduling.rs | 10 +++++++++- satrs-core/src/pus/verification.rs | 2 +- satrs-core/src/seq_count.rs | 2 +- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs index 6bbc165..d0c1964 100644 --- a/satrs-core/src/pus/mod.rs +++ b/satrs-core/src/pus/mod.rs @@ -1,8 +1,4 @@ -//! All PUS support modules -//! -//! Currenty includes: -//! -//! 1. PUS Verification Service 1 module inside [verification]. Requires [alloc] support. +//! # PUS support modules #[cfg(feature = "alloc")] use downcast_rs::{impl_downcast, Downcast}; #[cfg(feature = "alloc")] @@ -19,6 +15,9 @@ pub mod hk; pub mod scheduling; pub mod verification; +#[cfg(feature = "alloc")] +pub use alloc_mod::*; + #[derive(Debug, Clone)] pub enum EcssTmErrorWithSend { /// Errors related to sending the verification telemetry to a TM recipient @@ -65,7 +64,7 @@ pub trait EcssTmSenderCore: Send { } #[cfg(feature = "alloc")] -pub mod alloc_mod { +mod alloc_mod { use super::*; /// Extension trait for [EcssTmSenderCore]. @@ -78,6 +77,8 @@ pub mod alloc_mod { /// /// [DynClone] allows cloning the trait object as long as the boxed object implements /// [Clone]. + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] pub trait EcssTmSender: EcssTmSenderCore + Downcast + DynClone {} /// Blanket implementation for all types which implement [EcssTmSenderCore] and are clonable. diff --git a/satrs-core/src/pus/scheduling.rs b/satrs-core/src/pus/scheduling.rs index 58ae535..2f91483 100644 --- a/satrs-core/src/pus/scheduling.rs +++ b/satrs-core/src/pus/scheduling.rs @@ -1,3 +1,4 @@ +//! # PUS Service 11 Scheduling Module use crate::pool::{PoolProvider, StoreAddr, StoreError}; use alloc::collections::btree_map::{Entry, Range}; use alloc::vec; @@ -10,7 +11,14 @@ use std::time::SystemTimeError; /// This is the core data structure for scheduling PUS telecommands with [alloc] support. /// -/// The ECSS standard specifies that the PUS scheduler can be enabled and disabled. +/// It is assumed that the actual telecommand data is stored in a separate TC pool offering +/// a [crate::pool::PoolProvider] API. This data structure just tracks the store addresses and their +/// release times and offers a convenient API to insert and release telecommands and perform +/// other functionality specified by the ECSS standard in section 6.11. The time is tracked +/// as a [spacepackets::time::UnixTimestamp] but the only requirement to the timekeeping of +/// the user is that it is convertible to that timestamp. +/// +/// The standard also specifies that the PUS scheduler can be enabled and disabled. /// A disabled scheduler should still delete commands where the execution time has been reached /// but should not release them to be executed. /// diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs index 8fba721..96c1073 100644 --- a/satrs-core/src/pus/verification.rs +++ b/satrs-core/src/pus/verification.rs @@ -1,4 +1,4 @@ -//! # PUS Verification Service 1 Module +//! # PUS Service 1 Verification Module //! //! This module allows packaging and sending PUS Service 1 packets. It is conforming to section //! 8 of the PUS standard ECSS-E-ST-70-41C. diff --git a/satrs-core/src/seq_count.rs b/satrs-core/src/seq_count.rs index 6871491..b299756 100644 --- a/satrs-core/src/seq_count.rs +++ b/satrs-core/src/seq_count.rs @@ -9,7 +9,7 @@ pub use stdmod::*; /// /// The core functions are not mutable on purpose to allow easier usage with /// static structs when using the interior mutability pattern. This can be achieved by using -/// [Cell], [RefCell] or atomic types. +/// [Cell], [core::cell::RefCell] or atomic types. pub trait SequenceCountProviderCore { fn get(&self) -> Raw;