diff --git a/.idea/runConfigurations/Test_satrs_core.xml b/.idea/runConfigurations/Test_satrs_core.xml
new file mode 100644
index 0000000..fe5e4da
--- /dev/null
+++ b/.idea/runConfigurations/Test_satrs_core.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/satrs-core/src/pus/mod.rs b/satrs-core/src/pus/mod.rs
index 6bbc165..d0c1964 100644
--- a/satrs-core/src/pus/mod.rs
+++ b/satrs-core/src/pus/mod.rs
@@ -1,8 +1,4 @@
-//! All PUS support modules
-//!
-//! Currenty includes:
-//!
-//! 1. PUS Verification Service 1 module inside [verification]. Requires [alloc] support.
+//! # PUS support modules
#[cfg(feature = "alloc")]
use downcast_rs::{impl_downcast, Downcast};
#[cfg(feature = "alloc")]
@@ -19,6 +15,9 @@ pub mod hk;
pub mod scheduling;
pub mod verification;
+#[cfg(feature = "alloc")]
+pub use alloc_mod::*;
+
#[derive(Debug, Clone)]
pub enum EcssTmErrorWithSend {
/// Errors related to sending the verification telemetry to a TM recipient
@@ -65,7 +64,7 @@ pub trait EcssTmSenderCore: Send {
}
#[cfg(feature = "alloc")]
-pub mod alloc_mod {
+mod alloc_mod {
use super::*;
/// Extension trait for [EcssTmSenderCore].
@@ -78,6 +77,8 @@ pub mod alloc_mod {
///
/// [DynClone] allows cloning the trait object as long as the boxed object implements
/// [Clone].
+ #[cfg(feature = "alloc")]
+ #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub trait EcssTmSender: EcssTmSenderCore + Downcast + DynClone {}
/// Blanket implementation for all types which implement [EcssTmSenderCore] and are clonable.
diff --git a/satrs-core/src/pus/scheduling.rs b/satrs-core/src/pus/scheduling.rs
index 3205a59..2f91483 100644
--- a/satrs-core/src/pus/scheduling.rs
+++ b/satrs-core/src/pus/scheduling.rs
@@ -1,12 +1,28 @@
-use crate::pool::StoreAddr;
+//! # PUS Service 11 Scheduling Module
+use crate::pool::{PoolProvider, StoreAddr, StoreError};
use alloc::collections::btree_map::{Entry, Range};
+use alloc::vec;
+use alloc::vec::Vec;
use core::time::Duration;
use spacepackets::time::UnixTimestamp;
use std::collections::BTreeMap;
+#[cfg(feature = "std")]
use std::time::SystemTimeError;
-use std::vec;
-use std::vec::Vec;
+/// This is the core data structure for scheduling PUS telecommands with [alloc] support.
+///
+/// It is assumed that the actual telecommand data is stored in a separate TC pool offering
+/// a [crate::pool::PoolProvider] API. This data structure just tracks the store addresses and their
+/// release times and offers a convenient API to insert and release telecommands and perform
+/// other functionality specified by the ECSS standard in section 6.11. The time is tracked
+/// as a [spacepackets::time::UnixTimestamp] but the only requirement to the timekeeping of
+/// the user is that it is convertible to that timestamp.
+///
+/// The standard also specifies that the PUS scheduler can be enabled and disabled.
+/// A disabled scheduler should still delete commands where the execution time has been reached
+/// but should not release them to be executed.
+///
+/// Currently, sub-schedules and groups are not supported.
#[derive(Debug)]
pub struct PusScheduler {
tc_map: BTreeMap>,
@@ -16,6 +32,14 @@ pub struct PusScheduler {
}
impl PusScheduler {
+ /// Create a new PUS scheduler.
+ ///
+ /// # Arguments
+ ///
+ /// * `init_current_time` - The time to initialize the scheduler with.
+ /// * `time_margin` - This time margin is used when inserting new telecommands into the
+ /// schedule. If the release time of a new telecommand is earlier than the time margin
+ /// added to the current time, it will not be inserted into the schedule.
pub fn new(init_current_time: UnixTimestamp, time_margin: Duration) -> Self {
PusScheduler {
tc_map: Default::default(),
@@ -25,6 +49,13 @@ impl PusScheduler {
}
}
+ /// Like [Self::new], but sets the `init_current_time` parameter to the current system time.
+ #[cfg(feature = "std")]
+ #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
+ pub fn new_with_current_init_time(time_margin: Duration) -> Result {
+ Ok(Self::new(UnixTimestamp::from_now()?, time_margin))
+ }
+
pub fn num_scheduled_telecommands(&self) -> u64 {
let mut num_entries = 0;
for entries in &self.tc_map {
@@ -45,9 +76,25 @@ impl PusScheduler {
self.enabled = false;
}
- pub fn reset(&mut self) {
+ /// This will disable the scheduler and clear the schedule as specified in 6.11.4.4.
+ /// Be careful with this command as it will delete all the commands in the schedule.
+ ///
+ /// The holding store for the telecommands needs to be passed so all the stored telecommands
+ /// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error
+ /// will be returned but the method will still try to delete all the commands in the schedule.
+ pub fn reset(&mut self, store: &mut impl PoolProvider) -> Result<(), StoreError> {
self.enabled = false;
+ let mut deletion_ok = Ok(());
+ for tc_lists in &mut self.tc_map {
+ for tc in tc_lists.1 {
+ let res = store.delete(*tc);
+ if res.is_err() {
+ deletion_ok = res;
+ }
+ }
+ }
self.tc_map.clear();
+ deletion_ok
}
pub fn update_time(&mut self, current_time: UnixTimestamp) {
@@ -84,27 +131,51 @@ impl PusScheduler {
Ok(())
}
- pub fn release_telecommands(&mut self, mut releaser: R) {
+ /// Utility method which calls [Self::telecommands_to_release] and then calls a releaser
+ /// closure for each telecommand which should be released. This function will also delete
+ /// the telecommands from the holding store after calling the release closure.
+ ///
+ /// # Arguments
+ ///
+ /// * `releaser` - Closure where the first argument is whether the scheduler is enabled and
+ /// the second argument is the store address. This closure should return whether the
+ /// command should be deleted.
+ /// * `store` - The holding store of the telecommands.
+ pub fn release_telecommands bool>(
+ &mut self,
+ mut releaser: R,
+ tc_store: &mut impl PoolProvider,
+ ) -> Result {
let tcs_to_release = self.telecommands_to_release();
+ let mut released_tcs = 0;
+ let mut store_error = Ok(());
for tc in tcs_to_release {
for addr in tc.1 {
- releaser(self.enabled, addr);
+ let should_delete = releaser(self.enabled, addr);
+ released_tcs += 1;
+ if should_delete {
+ let res = tc_store.delete(*addr);
+ if res.is_err() {
+ store_error = res;
+ }
+ }
}
}
self.tc_map.retain(|k, _| k > &self.current_time);
+ store_error
+ .map(|_| released_tcs)
+ .map_err(|e| (released_tcs, e))
}
}
#[cfg(test)]
mod tests {
- use crate::pool::StoreAddr;
+ use crate::pool::{LocalPool, PoolCfg, PoolProvider, StoreAddr};
use crate::pus::scheduling::PusScheduler;
- use spacepackets::ecss::PacketTypeCodes::UnsignedInt;
+ use alloc::vec::Vec;
use spacepackets::time::UnixTimestamp;
- use std::sync::mpsc;
- use std::sync::mpsc::{channel, Receiver, TryRecvError};
use std::time::Duration;
- use std::vec::Vec;
+ #[allow(unused_imports)]
use std::{println, vec};
#[test]
@@ -118,44 +189,33 @@ mod tests {
#[test]
fn reset() {
+ let mut pool = LocalPool::new(PoolCfg::new(vec![(10, 32), (5, 64)]));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
- let worked = scheduler.insert_tc(
- UnixTimestamp::new_only_seconds(100),
- StoreAddr {
- pool_idx: 0,
- packet_idx: 1,
- },
- );
+ let first_addr = pool.add(&[0, 1, 2]).unwrap();
+ let worked = scheduler.insert_tc(UnixTimestamp::new_only_seconds(100), first_addr.clone());
assert!(worked);
- let worked = scheduler.insert_tc(
- UnixTimestamp::new_only_seconds(200),
- StoreAddr {
- pool_idx: 0,
- packet_idx: 2,
- },
- );
+ let second_addr = pool.add(&[2, 3, 4]).unwrap();
+ let worked = scheduler.insert_tc(UnixTimestamp::new_only_seconds(200), second_addr.clone());
assert!(worked);
- let worked = scheduler.insert_tc(
- UnixTimestamp::new_only_seconds(300),
- StoreAddr {
- pool_idx: 0,
- packet_idx: 2,
- },
- );
+ let third_addr = pool.add(&[5, 6, 7]).unwrap();
+ let worked = scheduler.insert_tc(UnixTimestamp::new_only_seconds(300), third_addr.clone());
assert!(worked);
assert_eq!(scheduler.num_scheduled_telecommands(), 3);
assert!(scheduler.is_enabled());
- scheduler.reset();
+ scheduler.reset(&mut pool).expect("deletion of TCs failed");
assert!(!scheduler.is_enabled());
assert_eq!(scheduler.num_scheduled_telecommands(), 0);
+ assert!(!pool.has_element_at(&first_addr).unwrap());
+ assert!(!pool.has_element_at(&second_addr).unwrap());
+ assert!(!pool.has_element_at(&third_addr).unwrap());
}
#[test]
@@ -205,69 +265,68 @@ mod tests {
assert_eq!(scheduler.current_time(), &time);
}
+ fn common_check(
+ enabled: bool,
+ store_addr: &StoreAddr,
+ expected_store_addrs: Vec,
+ counter: &mut usize,
+ ) {
+ assert_eq!(enabled, true);
+ assert!(expected_store_addrs.contains(store_addr));
+ *counter += 1;
+ }
#[test]
fn release_basic() {
+ let mut pool = LocalPool::new(PoolCfg::new(vec![(10, 32), (5, 64)]));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
- scheduler.insert_tc(
- UnixTimestamp::new_only_seconds(100),
- StoreAddr {
- pool_idx: 0,
- packet_idx: 1,
- },
- );
+ let first_addr = pool.add(&[2, 2, 2]).unwrap();
+ scheduler.insert_tc(UnixTimestamp::new_only_seconds(100), first_addr);
- scheduler.insert_tc(
- UnixTimestamp::new_only_seconds(200),
- StoreAddr {
- pool_idx: 0,
- packet_idx: 2,
- },
- );
+ let second_addr = pool.add(&[5, 6, 7]).unwrap();
+ scheduler.insert_tc(UnixTimestamp::new_only_seconds(200), second_addr);
let mut i = 0;
let mut test_closure_1 = |boolvar: bool, store_addr: &StoreAddr| {
- assert_eq!(boolvar, true);
- assert_eq!(
- store_addr,
- &StoreAddr {
- pool_idx: 0,
- packet_idx: 1,
- }
- );
- i += 1;
+ common_check(boolvar, store_addr, vec![first_addr], &mut i);
+ true
};
// test 1: too early, no tcs
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
- scheduler.release_telecommands(&mut test_closure_1);
+ scheduler
+ .release_telecommands(&mut test_closure_1, &mut pool)
+ .expect("deletion failed");
// test 2: exact time stamp of tc, releases 1 tc
scheduler.update_time(UnixTimestamp::new_only_seconds(100));
- scheduler.release_telecommands(&mut test_closure_1);
+ let mut released = scheduler
+ .release_telecommands(&mut test_closure_1, &mut pool)
+ .expect("deletion failed");
+ assert_eq!(released, 1);
+ assert!(!pool.has_element_at(&first_addr).unwrap());
// test 3, late timestamp, release 1 overdue tc
let mut test_closure_2 = |boolvar: bool, store_addr: &StoreAddr| {
- assert_eq!(boolvar, true);
- assert_eq!(
- store_addr,
- &StoreAddr {
- pool_idx: 0,
- packet_idx: 2,
- }
- );
- i += 1;
+ common_check(boolvar, store_addr, vec![second_addr], &mut i);
+ true
};
scheduler.update_time(UnixTimestamp::new_only_seconds(206));
- scheduler.release_telecommands(&mut test_closure_2);
+ released = scheduler
+ .release_telecommands(&mut test_closure_2, &mut pool)
+ .expect("deletion failed");
+ assert_eq!(released, 1);
+ assert!(!pool.has_element_at(&second_addr).unwrap());
//test 4: no tcs left
- scheduler.release_telecommands(&mut test_closure_2);
+ scheduler
+ .release_telecommands(&mut test_closure_2, &mut pool)
+ .expect("deletion failed");
// check that 2 total tcs have been released
assert_eq!(i, 2);
@@ -275,50 +334,45 @@ mod tests {
#[test]
fn release_multi_with_same_time() {
+ let mut pool = LocalPool::new(PoolCfg::new(vec![(10, 32), (5, 64)]));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
- scheduler.insert_tc(
- UnixTimestamp::new_only_seconds(100),
- StoreAddr {
- pool_idx: 0,
- packet_idx: 1,
- },
- );
+ let first_addr = pool.add(&[2, 2, 2]).unwrap();
+ scheduler.insert_tc(UnixTimestamp::new_only_seconds(100), first_addr);
- scheduler.insert_tc(
- UnixTimestamp::new_only_seconds(100),
- StoreAddr {
- pool_idx: 0,
- packet_idx: 1,
- },
- );
+ let second_addr = pool.add(&[2, 2, 2]).unwrap();
+ scheduler.insert_tc(UnixTimestamp::new_only_seconds(100), second_addr);
let mut i = 0;
let mut test_closure = |boolvar: bool, store_addr: &StoreAddr| {
- assert_eq!(boolvar, true);
- assert_eq!(
- store_addr,
- &StoreAddr {
- pool_idx: 0,
- packet_idx: 1,
- }
- );
- i += 1;
+ common_check(boolvar, store_addr, vec![first_addr, second_addr], &mut i);
+ true
};
// test 1: too early, no tcs
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
- scheduler.release_telecommands(&mut test_closure);
+ let mut released = scheduler
+ .release_telecommands(&mut test_closure, &mut pool)
+ .expect("deletion failed");
+ assert_eq!(released, 0);
// test 2: exact time stamp of tc, releases 2 tc
scheduler.update_time(UnixTimestamp::new_only_seconds(100));
- scheduler.release_telecommands(&mut test_closure);
+ released = scheduler
+ .release_telecommands(&mut test_closure, &mut pool)
+ .expect("deletion failed");
+ assert_eq!(released, 2);
+ assert!(!pool.has_element_at(&first_addr).unwrap());
+ assert!(!pool.has_element_at(&second_addr).unwrap());
//test 3: no tcs left
- scheduler.release_telecommands(&mut test_closure);
+ released = scheduler
+ .release_telecommands(&mut test_closure, &mut pool)
+ .expect("deletion failed");
+ assert_eq!(released, 0);
// check that 2 total tcs have been released
assert_eq!(i, 2);
diff --git a/satrs-core/src/pus/verification.rs b/satrs-core/src/pus/verification.rs
index 8fba721..96c1073 100644
--- a/satrs-core/src/pus/verification.rs
+++ b/satrs-core/src/pus/verification.rs
@@ -1,4 +1,4 @@
-//! # PUS Verification Service 1 Module
+//! # PUS Service 1 Verification Module
//!
//! This module allows packaging and sending PUS Service 1 packets. It is conforming to section
//! 8 of the PUS standard ECSS-E-ST-70-41C.
diff --git a/satrs-core/src/seq_count.rs b/satrs-core/src/seq_count.rs
index 6871491..b299756 100644
--- a/satrs-core/src/seq_count.rs
+++ b/satrs-core/src/seq_count.rs
@@ -9,7 +9,7 @@ pub use stdmod::*;
///
/// The core functions are not mutable on purpose to allow easier usage with
/// static structs when using the interior mutability pattern. This can be achieved by using
-/// [Cell], [RefCell] or atomic types.
+/// [Cell], [core::cell::RefCell] or atomic types.
pub trait SequenceCountProviderCore {
fn get(&self) -> Raw;