Merge pull request 'Try to make no_std build' (#56) from try-to-make-no-std-build into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good

Reviewed-on: #56
This commit is contained in:
Robin Müller 2023-07-12 14:14:45 +02:00
commit 3fb404ff0e
6 changed files with 423 additions and 418 deletions

View File

@ -33,8 +33,6 @@ pub mod hk;
pub mod mode; pub mod mode;
pub mod objects; pub mod objects;
pub mod params; pub mod params;
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod pool; pub mod pool;
pub mod power; pub mod power;
pub mod pus; pub mod pus;

View File

@ -1,5 +1,6 @@
use crate::tmtc::TargetId; use crate::tmtc::TargetId;
use core::mem::size_of; use core::mem::size_of;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use spacepackets::{ByteConversionError, SizeMissmatch}; use spacepackets::{ByteConversionError, SizeMissmatch};

View File

@ -73,63 +73,17 @@
//! assert_eq!(buf_read_back[0], 7); //! assert_eq!(buf_read_back[0], 7);
//! } //! }
//! ``` //! ```
use alloc::format; #[cfg(feature = "alloc")]
use alloc::string::String; #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
use alloc::vec; pub use alloc_mod::*;
use alloc::vec::Vec;
use core::fmt::{Display, Formatter}; use core::fmt::{Display, Formatter};
use delegate::delegate;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::boxed::Box;
#[cfg(feature = "std")]
use std::error::Error; use std::error::Error;
#[cfg(feature = "std")]
use std::sync::{Arc, RwLock};
type NumBlocks = u16; type NumBlocks = u16;
#[cfg(feature = "std")]
pub type ShareablePoolProvider = Box<dyn PoolProvider + Send + Sync>;
#[cfg(feature = "std")]
pub type SharedPool = Arc<RwLock<ShareablePoolProvider>>;
/// Configuration structure of the [local pool][LocalPool]
///
/// # Parameters
///
/// * `cfg`: Vector of tuples which represent a subpool. The first entry in the tuple specifies the
/// number of memory blocks in the subpool, the second entry the size of the blocks
#[derive(Clone)]
pub struct PoolCfg {
cfg: Vec<(NumBlocks, usize)>,
}
impl PoolCfg {
pub fn new(cfg: Vec<(NumBlocks, usize)>) -> Self {
PoolCfg { cfg }
}
pub fn sanitize(&mut self) -> usize {
self.cfg
.retain(|&(bucket_num, size)| bucket_num > 0 && size < LocalPool::MAX_SIZE);
self.cfg
.sort_unstable_by(|(_, sz0), (_, sz1)| sz0.partial_cmp(sz1).unwrap());
self.cfg.len()
}
}
type PoolSize = usize;
/// Pool implementation providing sub-pools with fixed size memory blocks. More details in
/// the [module documentation][super::pool]
pub struct LocalPool {
pool_cfg: PoolCfg,
pool: Vec<Vec<u8>>,
sizes_lists: Vec<Vec<PoolSize>>,
}
/// Simple address type used for transactions with the local pool. /// Simple address type used for transactions with the local pool.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
@ -138,6 +92,14 @@ pub struct StoreAddr {
pub(crate) packet_idx: NumBlocks, pub(crate) packet_idx: NumBlocks,
} }
impl StoreAddr {
pub const INVALID_ADDR: u32 = 0xFFFFFFFF;
pub fn raw(&self) -> u32 {
((self.pool_idx as u32) << 16) | self.packet_idx as u32
}
}
impl Display for StoreAddr { impl Display for StoreAddr {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
write!( write!(
@ -148,14 +110,6 @@ impl Display for StoreAddr {
} }
} }
impl StoreAddr {
pub const INVALID_ADDR: u32 = 0xFFFFFFFF;
pub fn raw(&self) -> u32 {
((self.pool_idx as u32) << 16) | self.packet_idx as u32
}
}
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum StoreIdError { pub enum StoreIdError {
@ -191,7 +145,7 @@ pub enum StoreError {
/// Valid subpool and packet index, but no data is stored at the given address /// Valid subpool and packet index, but no data is stored at the given address
DataDoesNotExist(StoreAddr), DataDoesNotExist(StoreAddr),
/// Internal or configuration errors /// Internal or configuration errors
InternalError(String), InternalError(u32),
} }
impl Display for StoreError { impl Display for StoreError {
@ -226,7 +180,119 @@ impl Error for StoreError {
} }
} }
pub trait PoolProvider { #[cfg(feature = "alloc")]
mod alloc_mod {
use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError};
use alloc::boxed::Box;
use alloc::vec;
use alloc::vec::Vec;
use delegate::delegate;
#[cfg(feature = "std")]
use std::sync::{Arc, RwLock};
#[cfg(feature = "std")]
pub type ShareablePoolProvider = Box<dyn PoolProvider + Send + Sync>;
#[cfg(feature = "std")]
pub type SharedPool = Arc<RwLock<ShareablePoolProvider>>;
type PoolSize = usize;
const STORE_FREE: PoolSize = PoolSize::MAX;
pub const POOL_MAX_SIZE: PoolSize = STORE_FREE - 1;
/// Configuration structure of the [local pool][LocalPool]
///
/// # Parameters
///
/// * `cfg`: Vector of tuples which represent a subpool. The first entry in the tuple specifies the
/// number of memory blocks in the subpool, the second entry the size of the blocks
#[derive(Clone)]
pub struct PoolCfg {
cfg: Vec<(NumBlocks, usize)>,
}
impl PoolCfg {
pub fn new(cfg: Vec<(NumBlocks, usize)>) -> Self {
PoolCfg { cfg }
}
pub fn cfg(&self) -> &Vec<(NumBlocks, usize)> {
&self.cfg
}
pub fn sanitize(&mut self) -> usize {
self.cfg
.retain(|&(bucket_num, size)| bucket_num > 0 && size < POOL_MAX_SIZE);
self.cfg
.sort_unstable_by(|(_, sz0), (_, sz1)| sz0.partial_cmp(sz1).unwrap());
self.cfg.len()
}
}
pub struct PoolGuard<'a> {
pool: &'a mut LocalPool,
pub addr: StoreAddr,
no_deletion: bool,
deletion_failed_error: Option<StoreError>,
}
/// This helper object
impl<'a> PoolGuard<'a> {
pub fn new(pool: &'a mut LocalPool, addr: StoreAddr) -> Self {
Self {
pool,
addr,
no_deletion: false,
deletion_failed_error: None,
}
}
pub fn read(&self) -> Result<&[u8], StoreError> {
self.pool.read(&self.addr)
}
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self) {
self.no_deletion = true;
}
}
impl Drop for PoolGuard<'_> {
fn drop(&mut self) {
if !self.no_deletion {
if let Err(e) = self.pool.delete(self.addr) {
self.deletion_failed_error = Some(e);
}
}
}
}
pub struct PoolRwGuard<'a> {
guard: PoolGuard<'a>,
}
impl<'a> PoolRwGuard<'a> {
pub fn new(pool: &'a mut LocalPool, addr: StoreAddr) -> Self {
Self {
guard: PoolGuard::new(pool, addr),
}
}
pub fn modify(&mut self) -> Result<&mut [u8], StoreError> {
self.guard.pool.modify(&self.guard.addr)
}
delegate!(
to self.guard {
pub fn read(&self) -> Result<&[u8], StoreError>;
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self);
}
);
}
pub trait PoolProvider {
/// Add new data to the pool. The provider should attempt to reserve a memory block with the /// Add new data to the pool. The provider should attempt to reserve a memory block with the
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can /// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
/// be used to access the data stored in the pool /// be used to access the data stored in the pool
@ -275,11 +341,17 @@ pub trait PoolProvider {
} }
Ok(self.read(addr)?.len()) Ok(self.read(addr)?.len())
} }
} }
impl LocalPool { /// Pool implementation providing sub-pools with fixed size memory blocks. More details in
const STORE_FREE: PoolSize = PoolSize::MAX; /// the [module documentation][super::pool]
const MAX_SIZE: PoolSize = Self::STORE_FREE - 1; pub struct LocalPool {
pool_cfg: PoolCfg,
pool: Vec<Vec<u8>>,
sizes_lists: Vec<Vec<PoolSize>>,
}
impl LocalPool {
/// Create a new local pool from the [given configuration][PoolCfg]. This function will sanitize /// Create a new local pool from the [given configuration][PoolCfg]. This function will sanitize
/// the given configuration as well. /// the given configuration as well.
pub fn new(mut cfg: PoolCfg) -> LocalPool { pub fn new(mut cfg: PoolCfg) -> LocalPool {
@ -295,7 +367,7 @@ impl LocalPool {
let next_sizes_list_len = num_elems as usize; let next_sizes_list_len = num_elems as usize;
local_pool local_pool
.sizes_lists .sizes_lists
.push(vec![Self::STORE_FREE; next_sizes_list_len]); .push(vec![STORE_FREE; next_sizes_list_len]);
} }
local_pool local_pool
} }
@ -305,7 +377,7 @@ impl LocalPool {
let pool_idx = addr.pool_idx as usize; let pool_idx = addr.pool_idx as usize;
let size_list = self.sizes_lists.get(pool_idx).unwrap(); let size_list = self.sizes_lists.get(pool_idx).unwrap();
let curr_size = size_list[addr.packet_idx as usize]; let curr_size = size_list[addr.packet_idx as usize];
if curr_size == Self::STORE_FREE { if curr_size == STORE_FREE {
return Err(StoreError::DataDoesNotExist(*addr)); return Err(StoreError::DataDoesNotExist(*addr));
} }
Ok(curr_size) Ok(curr_size)
@ -351,16 +423,11 @@ impl LocalPool {
} }
fn write(&mut self, addr: &StoreAddr, data: &[u8]) -> Result<(), StoreError> { fn write(&mut self, addr: &StoreAddr, data: &[u8]) -> Result<(), StoreError> {
let packet_pos = self.raw_pos(addr).ok_or_else(|| { let packet_pos = self.raw_pos(addr).ok_or(StoreError::InternalError(0))?;
StoreError::InternalError(format!( let subpool = self
"write: Error in raw_pos func with address {addr:?}" .pool
)) .get_mut(addr.pool_idx as usize)
})?; .ok_or(StoreError::InternalError(1))?;
let subpool = self.pool.get_mut(addr.pool_idx as usize).ok_or_else(|| {
StoreError::InternalError(format!(
"write: Error retrieving pool slice with address {addr:?}"
))
})?;
let pool_slice = &mut subpool[packet_pos..packet_pos + data.len()]; let pool_slice = &mut subpool[packet_pos..packet_pos + data.len()];
pool_slice.copy_from_slice(data); pool_slice.copy_from_slice(data);
Ok(()) Ok(())
@ -369,7 +436,7 @@ impl LocalPool {
fn find_empty(&mut self, subpool: u16) -> Result<(u16, &mut usize), StoreError> { fn find_empty(&mut self, subpool: u16) -> Result<(u16, &mut usize), StoreError> {
if let Some(size_list) = self.sizes_lists.get_mut(subpool as usize) { if let Some(size_list) = self.sizes_lists.get_mut(subpool as usize) {
for (i, elem_size) in size_list.iter_mut().enumerate() { for (i, elem_size) in size_list.iter_mut().enumerate() {
if *elem_size == Self::STORE_FREE { if *elem_size == STORE_FREE {
return Ok((i as u16, elem_size)); return Ok((i as u16, elem_size));
} }
} }
@ -386,12 +453,12 @@ impl LocalPool {
let (_, size) = self.pool_cfg.cfg.get(addr.pool_idx as usize)?; let (_, size) = self.pool_cfg.cfg.get(addr.pool_idx as usize)?;
Some(addr.packet_idx as usize * size) Some(addr.packet_idx as usize * size)
} }
} }
impl PoolProvider for LocalPool { impl PoolProvider for LocalPool {
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> { fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> {
let data_len = data.len(); let data_len = data.len();
if data_len > Self::MAX_SIZE { if data_len > POOL_MAX_SIZE {
return Err(StoreError::DataTooLarge(data_len)); return Err(StoreError::DataTooLarge(data_len));
} }
let addr = self.reserve(data_len)?; let addr = self.reserve(data_len)?;
@ -400,20 +467,21 @@ impl PoolProvider for LocalPool {
} }
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError> { fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError> {
if len > Self::MAX_SIZE { if len > POOL_MAX_SIZE {
return Err(StoreError::DataTooLarge(len)); return Err(StoreError::DataTooLarge(len));
} }
let addr = self.reserve(len)?; let addr = self.reserve(len)?;
let raw_pos = self.raw_pos(&addr).unwrap(); let raw_pos = self.raw_pos(&addr).unwrap();
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len]; let block =
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len];
Ok((addr, block)) Ok((addr, block))
} }
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> { fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> {
let curr_size = self.addr_check(addr)?; let curr_size = self.addr_check(addr)?;
let raw_pos = self.raw_pos(addr).unwrap(); let raw_pos = self.raw_pos(addr).unwrap();
let block = let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size]; [raw_pos..raw_pos + curr_size];
Ok(block) Ok(block)
} }
@ -424,7 +492,8 @@ impl PoolProvider for LocalPool {
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError> { fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError> {
let curr_size = self.addr_check(addr)?; let curr_size = self.addr_check(addr)?;
let raw_pos = self.raw_pos(addr).unwrap(); let raw_pos = self.raw_pos(addr).unwrap();
let block = &self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size]; let block =
&self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
Ok(block) Ok(block)
} }
@ -436,10 +505,10 @@ impl PoolProvider for LocalPool {
self.addr_check(&addr)?; self.addr_check(&addr)?;
let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1; let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1;
let raw_pos = self.raw_pos(&addr).unwrap(); let raw_pos = self.raw_pos(&addr).unwrap();
let block = let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + block_size]; [raw_pos..raw_pos + block_size];
let size_list = self.sizes_lists.get_mut(addr.pool_idx as usize).unwrap(); let size_list = self.sizes_lists.get_mut(addr.pool_idx as usize).unwrap();
size_list[addr.packet_idx as usize] = Self::STORE_FREE; size_list[addr.packet_idx as usize] = STORE_FREE;
block.fill(0); block.fill(0);
Ok(()) Ok(())
} }
@ -449,82 +518,19 @@ impl PoolProvider for LocalPool {
let pool_idx = addr.pool_idx as usize; let pool_idx = addr.pool_idx as usize;
let size_list = self.sizes_lists.get(pool_idx).unwrap(); let size_list = self.sizes_lists.get(pool_idx).unwrap();
let curr_size = size_list[addr.packet_idx as usize]; let curr_size = size_list[addr.packet_idx as usize];
if curr_size == Self::STORE_FREE { if curr_size == STORE_FREE {
return Ok(false); return Ok(false);
} }
Ok(true) Ok(true)
} }
}
pub struct PoolGuard<'a> {
pool: &'a mut LocalPool,
pub addr: StoreAddr,
no_deletion: bool,
deletion_failed_error: Option<StoreError>,
}
/// This helper object
impl<'a> PoolGuard<'a> {
pub fn new(pool: &'a mut LocalPool, addr: StoreAddr) -> Self {
Self {
pool,
addr,
no_deletion: false,
deletion_failed_error: None,
} }
}
pub fn read(&self) -> Result<&[u8], StoreError> {
self.pool.read(&self.addr)
}
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self) {
self.no_deletion = true;
}
}
impl Drop for PoolGuard<'_> {
fn drop(&mut self) {
if !self.no_deletion {
if let Err(e) = self.pool.delete(self.addr) {
self.deletion_failed_error = Some(e);
}
}
}
}
pub struct PoolRwGuard<'a> {
guard: PoolGuard<'a>,
}
impl<'a> PoolRwGuard<'a> {
pub fn new(pool: &'a mut LocalPool, addr: StoreAddr) -> Self {
Self {
guard: PoolGuard::new(pool, addr),
}
}
pub fn modify(&mut self) -> Result<&mut [u8], StoreError> {
self.guard.pool.modify(&self.guard.addr)
}
delegate!(
to self.guard {
pub fn read(&self) -> Result<&[u8], StoreError>;
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self);
}
);
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::pool::{ use crate::pool::{
LocalPool, PoolCfg, PoolGuard, PoolProvider, PoolRwGuard, StoreAddr, StoreError, LocalPool, PoolCfg, PoolGuard, PoolProvider, PoolRwGuard, StoreAddr, StoreError,
StoreIdError, StoreIdError, POOL_MAX_SIZE,
}; };
use std::vec; use std::vec;
@ -537,19 +543,19 @@ mod tests {
#[test] #[test]
fn test_cfg() { fn test_cfg() {
// Values where number of buckets is 0 or size is too large should be removed // Values where number of buckets is 0 or size is too large should be removed
let mut pool_cfg = PoolCfg::new(vec![(0, 0), (1, 0), (2, LocalPool::MAX_SIZE)]); let mut pool_cfg = PoolCfg::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)]);
pool_cfg.sanitize(); pool_cfg.sanitize();
assert_eq!(pool_cfg.cfg, vec![(1, 0)]); assert_eq!(*pool_cfg.cfg(), vec![(1, 0)]);
// Entries should be ordered according to bucket size // Entries should be ordered according to bucket size
pool_cfg = PoolCfg::new(vec![(16, 6), (32, 3), (8, 12)]); pool_cfg = PoolCfg::new(vec![(16, 6), (32, 3), (8, 12)]);
pool_cfg.sanitize(); pool_cfg.sanitize();
assert_eq!(pool_cfg.cfg, vec![(32, 3), (16, 6), (8, 12)]); assert_eq!(*pool_cfg.cfg(), vec![(32, 3), (16, 6), (8, 12)]);
// Unstable sort is used, so order of entries with same block length should not matter // Unstable sort is used, so order of entries with same block length should not matter
pool_cfg = PoolCfg::new(vec![(12, 12), (14, 16), (10, 12)]); pool_cfg = PoolCfg::new(vec![(12, 12), (14, 16), (10, 12)]);
pool_cfg.sanitize(); pool_cfg.sanitize();
assert!( assert!(
pool_cfg.cfg == vec![(12, 12), (10, 12), (14, 16)] *pool_cfg.cfg() == vec![(12, 12), (10, 12), (14, 16)]
|| pool_cfg.cfg == vec![(10, 12), (12, 12), (14, 16)] || *pool_cfg.cfg() == vec![(10, 12), (12, 12), (14, 16)]
); );
} }
@ -721,11 +727,11 @@ mod tests {
#[test] #[test]
fn test_data_too_large_1() { fn test_data_too_large_1() {
let mut local_pool = basic_small_pool(); let mut local_pool = basic_small_pool();
let res = local_pool.free_element(LocalPool::MAX_SIZE + 1); let res = local_pool.free_element(POOL_MAX_SIZE + 1);
assert!(res.is_err()); assert!(res.is_err());
assert_eq!( assert_eq!(
res.unwrap_err(), res.unwrap_err(),
StoreError::DataTooLarge(LocalPool::MAX_SIZE + 1) StoreError::DataTooLarge(POOL_MAX_SIZE + 1)
); );
} }

View File

@ -8,10 +8,10 @@ use crate::pus::{
EcssTcReceiver, EcssTmSender, PartialPusHandlingError, PusPacketHandlerResult, EcssTcReceiver, EcssTmSender, PartialPusHandlingError, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceBase, PusServiceHandler, PusPacketHandlingError, PusServiceBase, PusServiceHandler,
}; };
use alloc::boxed::Box;
use spacepackets::ecss::event::Subservice; use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::tc::PusTcReader; use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::PusPacket; use spacepackets::ecss::PusPacket;
use std::boxed::Box;
use std::sync::mpsc::Sender; use std::sync::mpsc::Sender;
pub struct PusService5EventHandler { pub struct PusService5EventHandler {

View File

@ -8,18 +8,22 @@ use core::fmt::{Display, Formatter};
use downcast_rs::{impl_downcast, Downcast}; use downcast_rs::{impl_downcast, Downcast};
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use dyn_clone::DynClone; use dyn_clone::DynClone;
#[cfg(feature = "std")]
use std::error::Error;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader}; use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
use spacepackets::ecss::tm::PusTmCreator; use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::PusError; use spacepackets::ecss::PusError;
use spacepackets::{ByteConversionError, SizeMissmatch, SpHeader}; use spacepackets::{ByteConversionError, SizeMissmatch, SpHeader};
use std::error::Error;
pub mod event; pub mod event;
pub mod event_man; pub mod event_man;
#[cfg(feature = "std")]
pub mod event_srv; pub mod event_srv;
pub mod hk; pub mod hk;
pub mod mode; pub mod mode;
pub mod scheduler; pub mod scheduler;
#[cfg(feature = "std")]
pub mod scheduler_srv; pub mod scheduler_srv;
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub mod test; pub mod test;

View File

@ -2,22 +2,17 @@
//! //!
//! The core data structure of this module is the [PusScheduler]. This structure can be used //! The core data structure of this module is the [PusScheduler]. This structure can be used
//! to perform the scheduling of telecommands like specified in the ECSS standard. //! to perform the scheduling of telecommands like specified in the ECSS standard.
use crate::pool::{StoreAddr, StoreError}; use core::fmt::Debug;
use core::fmt::{Debug, Display, Formatter};
use core::time::Duration;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use spacepackets::ecss::scheduling::TimeWindowType; use spacepackets::ecss::scheduling::TimeWindowType;
use spacepackets::ecss::tc::{GenericPusTcSecondaryHeader, IsPusTelecommand}; use spacepackets::ecss::tc::{GenericPusTcSecondaryHeader, IsPusTelecommand};
use spacepackets::ecss::PusError; use spacepackets::time::CcsdsTimeProvider;
use spacepackets::time::{CcsdsTimeProvider, TimestampError, UnixTimestamp};
use spacepackets::CcsdsPacket; use spacepackets::CcsdsPacket;
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::error::Error; use std::error::Error;
//#[cfg(feature = "std")] use crate::pool::StoreAddr;
//pub use std_mod::*;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub use alloc_mod::*; pub use alloc_mod::*;
@ -61,78 +56,6 @@ impl RequestId {
} }
} }
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ScheduleError {
PusError(PusError),
/// The release time is within the time-margin added on top of the current time.
/// The first parameter is the current time, the second one the time margin, and the third one
/// the release time.
ReleaseTimeInTimeMargin(UnixTimestamp, Duration, UnixTimestamp),
/// Nested time-tagged commands are not allowed.
NestedScheduledTc,
StoreError(StoreError),
TcDataEmpty,
TimestampError(TimestampError),
WrongSubservice,
WrongService,
}
impl Display for ScheduleError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
ScheduleError::PusError(e) => {
write!(f, "Pus Error: {e}")
}
ScheduleError::ReleaseTimeInTimeMargin(current_time, margin, timestamp) => {
write!(
f,
"Error: time margin too short, current time: {current_time:?}, time margin: {margin:?}, release time: {timestamp:?}"
)
}
ScheduleError::NestedScheduledTc => {
write!(f, "Error: nested scheduling is not allowed")
}
ScheduleError::StoreError(e) => {
write!(f, "Store Error: {e}")
}
ScheduleError::TcDataEmpty => {
write!(f, "Error: empty Tc Data field")
}
ScheduleError::TimestampError(e) => {
write!(f, "Timestamp Error: {e}")
}
ScheduleError::WrongService => {
write!(f, "Error: Service not 11.")
}
ScheduleError::WrongSubservice => {
write!(f, "Error: Subservice not 4.")
}
}
}
}
impl From<PusError> for ScheduleError {
fn from(e: PusError) -> Self {
ScheduleError::PusError(e)
}
}
impl From<StoreError> for ScheduleError {
fn from(e: StoreError) -> Self {
ScheduleError::StoreError(e)
}
}
impl From<TimestampError> for ScheduleError {
fn from(e: TimestampError) -> Self {
ScheduleError::TimestampError(e)
}
}
#[cfg(feature = "std")]
impl Error for ScheduleError {}
/// This is the format stored internally by the TC scheduler for each scheduled telecommand. /// This is the format stored internally by the TC scheduler for each scheduled telecommand.
/// It consists of the address of that telecommand in the TC pool and a request ID. /// It consists of the address of that telecommand in the TC pool and a request ID.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
@ -156,11 +79,6 @@ impl TcInfo {
} }
} }
enum DeletionResult {
WithoutStoreDeletion(Option<StoreAddr>),
WithStoreDeletion(Result<bool, StoreError>),
}
pub struct TimeWindow<TimeProvder> { pub struct TimeWindow<TimeProvder> {
time_window_type: TimeWindowType, time_window_type: TimeWindowType,
start_time: Option<TimeProvder>, start_time: Option<TimeProvder>,
@ -217,24 +135,102 @@ impl<TimeProvider: CcsdsTimeProvider + Clone> TimeWindow<TimeProvider> {
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub mod alloc_mod { pub mod alloc_mod {
use super::*;
use crate::pool::{PoolProvider, StoreAddr, StoreError}; use crate::pool::{PoolProvider, StoreAddr, StoreError};
use crate::pus::scheduler::{DeletionResult, RequestId, ScheduleError, TcInfo, TimeWindow};
use alloc::collections::btree_map::{Entry, Range}; use alloc::collections::btree_map::{Entry, Range};
use alloc::collections::BTreeMap; use alloc::collections::BTreeMap;
use alloc::vec; use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
use core::fmt::{Display, Formatter};
use core::time::Duration; use core::time::Duration;
use spacepackets::ecss::scheduling::TimeWindowType; use spacepackets::ecss::scheduling::TimeWindowType;
use spacepackets::ecss::tc::{ use spacepackets::ecss::tc::{
GenericPusTcSecondaryHeader, IsPusTelecommand, PusTc, PusTcReader, GenericPusTcSecondaryHeader, IsPusTelecommand, PusTc, PusTcReader,
}; };
use spacepackets::ecss::PusPacket; use spacepackets::ecss::{PusError, PusPacket};
use spacepackets::time::cds::DaysLen24Bits; use spacepackets::time::cds::DaysLen24Bits;
use spacepackets::time::{cds, CcsdsTimeProvider, TimeReader, UnixTimestamp}; use spacepackets::time::{cds, CcsdsTimeProvider, TimeReader, TimestampError, UnixTimestamp};
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::time::SystemTimeError; use std::time::SystemTimeError;
enum DeletionResult {
WithoutStoreDeletion(Option<StoreAddr>),
WithStoreDeletion(Result<bool, StoreError>),
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ScheduleError {
PusError(PusError),
/// The release time is within the time-margin added on top of the current time.
/// The first parameter is the current time, the second one the time margin, and the third one
/// the release time.
ReleaseTimeInTimeMargin(UnixTimestamp, Duration, UnixTimestamp),
/// Nested time-tagged commands are not allowed.
NestedScheduledTc,
StoreError(StoreError),
TcDataEmpty,
TimestampError(TimestampError),
WrongSubservice,
WrongService,
}
impl Display for ScheduleError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
ScheduleError::PusError(e) => {
write!(f, "Pus Error: {e}")
}
ScheduleError::ReleaseTimeInTimeMargin(current_time, margin, timestamp) => {
write!(
f,
"Error: time margin too short, current time: {current_time:?}, time margin: {margin:?}, release time: {timestamp:?}"
)
}
ScheduleError::NestedScheduledTc => {
write!(f, "Error: nested scheduling is not allowed")
}
ScheduleError::StoreError(e) => {
write!(f, "Store Error: {e}")
}
ScheduleError::TcDataEmpty => {
write!(f, "Error: empty Tc Data field")
}
ScheduleError::TimestampError(e) => {
write!(f, "Timestamp Error: {e}")
}
ScheduleError::WrongService => {
write!(f, "Error: Service not 11.")
}
ScheduleError::WrongSubservice => {
write!(f, "Error: Subservice not 4.")
}
}
}
}
impl From<PusError> for ScheduleError {
fn from(e: PusError) -> Self {
ScheduleError::PusError(e)
}
}
impl From<StoreError> for ScheduleError {
fn from(e: StoreError) -> Self {
ScheduleError::StoreError(e)
}
}
impl From<TimestampError> for ScheduleError {
fn from(e: TimestampError) -> Self {
ScheduleError::TimestampError(e)
}
}
#[cfg(feature = "std")]
impl Error for ScheduleError {}
/// This is the core data structure for scheduling PUS telecommands with [alloc] support. /// This is the core data structure for scheduling PUS telecommands with [alloc] support.
/// ///
/// It is assumed that the actual telecommand data is stored in a separate TC pool offering /// It is assumed that the actual telecommand data is stored in a separate TC pool offering