Merge pull request 'Try to make no_std build' (#56) from try-to-make-no-std-build into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #56
This commit is contained in:
commit
3fb404ff0e
@ -33,8 +33,6 @@ pub mod hk;
|
|||||||
pub mod mode;
|
pub mod mode;
|
||||||
pub mod objects;
|
pub mod objects;
|
||||||
pub mod params;
|
pub mod params;
|
||||||
#[cfg(feature = "alloc")]
|
|
||||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
|
||||||
pub mod pool;
|
pub mod pool;
|
||||||
pub mod power;
|
pub mod power;
|
||||||
pub mod pus;
|
pub mod pus;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use crate::tmtc::TargetId;
|
use crate::tmtc::TargetId;
|
||||||
use core::mem::size_of;
|
use core::mem::size_of;
|
||||||
|
#[cfg(feature = "serde")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use spacepackets::{ByteConversionError, SizeMissmatch};
|
use spacepackets::{ByteConversionError, SizeMissmatch};
|
||||||
|
|
||||||
|
@ -73,63 +73,17 @@
|
|||||||
//! assert_eq!(buf_read_back[0], 7);
|
//! assert_eq!(buf_read_back[0], 7);
|
||||||
//! }
|
//! }
|
||||||
//! ```
|
//! ```
|
||||||
use alloc::format;
|
#[cfg(feature = "alloc")]
|
||||||
use alloc::string::String;
|
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||||
use alloc::vec;
|
pub use alloc_mod::*;
|
||||||
use alloc::vec::Vec;
|
|
||||||
use core::fmt::{Display, Formatter};
|
use core::fmt::{Display, Formatter};
|
||||||
use delegate::delegate;
|
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
use std::boxed::Box;
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
#[cfg(feature = "std")]
|
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
|
|
||||||
type NumBlocks = u16;
|
type NumBlocks = u16;
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
pub type ShareablePoolProvider = Box<dyn PoolProvider + Send + Sync>;
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
pub type SharedPool = Arc<RwLock<ShareablePoolProvider>>;
|
|
||||||
|
|
||||||
/// Configuration structure of the [local pool][LocalPool]
|
|
||||||
///
|
|
||||||
/// # Parameters
|
|
||||||
///
|
|
||||||
/// * `cfg`: Vector of tuples which represent a subpool. The first entry in the tuple specifies the
|
|
||||||
/// number of memory blocks in the subpool, the second entry the size of the blocks
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct PoolCfg {
|
|
||||||
cfg: Vec<(NumBlocks, usize)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PoolCfg {
|
|
||||||
pub fn new(cfg: Vec<(NumBlocks, usize)>) -> Self {
|
|
||||||
PoolCfg { cfg }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sanitize(&mut self) -> usize {
|
|
||||||
self.cfg
|
|
||||||
.retain(|&(bucket_num, size)| bucket_num > 0 && size < LocalPool::MAX_SIZE);
|
|
||||||
self.cfg
|
|
||||||
.sort_unstable_by(|(_, sz0), (_, sz1)| sz0.partial_cmp(sz1).unwrap());
|
|
||||||
self.cfg.len()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type PoolSize = usize;
|
|
||||||
|
|
||||||
/// Pool implementation providing sub-pools with fixed size memory blocks. More details in
|
|
||||||
/// the [module documentation][super::pool]
|
|
||||||
pub struct LocalPool {
|
|
||||||
pool_cfg: PoolCfg,
|
|
||||||
pool: Vec<Vec<u8>>,
|
|
||||||
sizes_lists: Vec<Vec<PoolSize>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Simple address type used for transactions with the local pool.
|
/// Simple address type used for transactions with the local pool.
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
@ -138,6 +92,14 @@ pub struct StoreAddr {
|
|||||||
pub(crate) packet_idx: NumBlocks,
|
pub(crate) packet_idx: NumBlocks,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl StoreAddr {
|
||||||
|
pub const INVALID_ADDR: u32 = 0xFFFFFFFF;
|
||||||
|
|
||||||
|
pub fn raw(&self) -> u32 {
|
||||||
|
((self.pool_idx as u32) << 16) | self.packet_idx as u32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Display for StoreAddr {
|
impl Display for StoreAddr {
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
@ -148,14 +110,6 @@ impl Display for StoreAddr {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StoreAddr {
|
|
||||||
pub const INVALID_ADDR: u32 = 0xFFFFFFFF;
|
|
||||||
|
|
||||||
pub fn raw(&self) -> u32 {
|
|
||||||
((self.pool_idx as u32) << 16) | self.packet_idx as u32
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
pub enum StoreIdError {
|
pub enum StoreIdError {
|
||||||
@ -191,7 +145,7 @@ pub enum StoreError {
|
|||||||
/// Valid subpool and packet index, but no data is stored at the given address
|
/// Valid subpool and packet index, but no data is stored at the given address
|
||||||
DataDoesNotExist(StoreAddr),
|
DataDoesNotExist(StoreAddr),
|
||||||
/// Internal or configuration errors
|
/// Internal or configuration errors
|
||||||
InternalError(String),
|
InternalError(u32),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for StoreError {
|
impl Display for StoreError {
|
||||||
@ -226,233 +180,51 @@ impl Error for StoreError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait PoolProvider {
|
#[cfg(feature = "alloc")]
|
||||||
/// Add new data to the pool. The provider should attempt to reserve a memory block with the
|
mod alloc_mod {
|
||||||
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
|
use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError};
|
||||||
/// be used to access the data stored in the pool
|
use alloc::boxed::Box;
|
||||||
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>;
|
use alloc::vec;
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
use delegate::delegate;
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
/// The provider should attempt to reserve a free memory block with the appropriate size and
|
#[cfg(feature = "std")]
|
||||||
/// then return a mutable reference to it. Yields a [StoreAddr] which can be used to access
|
pub type ShareablePoolProvider = Box<dyn PoolProvider + Send + Sync>;
|
||||||
/// the data stored in the pool
|
#[cfg(feature = "std")]
|
||||||
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError>;
|
pub type SharedPool = Arc<RwLock<ShareablePoolProvider>>;
|
||||||
|
|
||||||
/// Modify data added previously using a given [StoreAddr] by yielding a mutable reference
|
type PoolSize = usize;
|
||||||
/// to it
|
|
||||||
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError>;
|
|
||||||
|
|
||||||
/// This function behaves like [Self::modify], but consumes the provided address and returns a
|
|
||||||
/// RAII conformant guard object.
|
|
||||||
///
|
|
||||||
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
|
|
||||||
/// given address will be deleted automatically when the guard is dropped.
|
|
||||||
/// This can prevent memory leaks. Users can read (and modify) the data and release the guard
|
|
||||||
/// if the data in the store is valid for further processing. If the data is faulty, no
|
|
||||||
/// manual deletion is necessary when returning from a processing function prematurely.
|
|
||||||
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard;
|
|
||||||
|
|
||||||
/// Read data by yielding a read-only reference given a [StoreAddr]
|
|
||||||
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError>;
|
|
||||||
|
|
||||||
/// This function behaves like [Self::read], but consumes the provided address and returns a
|
|
||||||
/// RAII conformant guard object.
|
|
||||||
///
|
|
||||||
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
|
|
||||||
/// given address will be deleted automatically when the guard is dropped.
|
|
||||||
/// This can prevent memory leaks. Users can read the data and release the guard
|
|
||||||
/// if the data in the store is valid for further processing. If the data is faulty, no
|
|
||||||
/// manual deletion is necessary when returning from a processing function prematurely.
|
|
||||||
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard;
|
|
||||||
|
|
||||||
/// Delete data inside the pool given a [StoreAddr]
|
|
||||||
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
|
|
||||||
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
|
|
||||||
|
|
||||||
/// Retrieve the length of the data at the given store address.
|
|
||||||
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
|
|
||||||
if !self.has_element_at(addr)? {
|
|
||||||
return Err(StoreError::DataDoesNotExist(*addr));
|
|
||||||
}
|
|
||||||
Ok(self.read(addr)?.len())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LocalPool {
|
|
||||||
const STORE_FREE: PoolSize = PoolSize::MAX;
|
const STORE_FREE: PoolSize = PoolSize::MAX;
|
||||||
const MAX_SIZE: PoolSize = Self::STORE_FREE - 1;
|
pub const POOL_MAX_SIZE: PoolSize = STORE_FREE - 1;
|
||||||
/// Create a new local pool from the [given configuration][PoolCfg]. This function will sanitize
|
|
||||||
/// the given configuration as well.
|
/// Configuration structure of the [local pool][LocalPool]
|
||||||
pub fn new(mut cfg: PoolCfg) -> LocalPool {
|
///
|
||||||
let subpools_num = cfg.sanitize();
|
/// # Parameters
|
||||||
let mut local_pool = LocalPool {
|
///
|
||||||
pool_cfg: cfg,
|
/// * `cfg`: Vector of tuples which represent a subpool. The first entry in the tuple specifies the
|
||||||
pool: Vec::with_capacity(subpools_num),
|
/// number of memory blocks in the subpool, the second entry the size of the blocks
|
||||||
sizes_lists: Vec::with_capacity(subpools_num),
|
#[derive(Clone)]
|
||||||
};
|
pub struct PoolCfg {
|
||||||
for &(num_elems, elem_size) in local_pool.pool_cfg.cfg.iter() {
|
cfg: Vec<(NumBlocks, usize)>,
|
||||||
let next_pool_len = elem_size * num_elems as usize;
|
|
||||||
local_pool.pool.push(vec![0; next_pool_len]);
|
|
||||||
let next_sizes_list_len = num_elems as usize;
|
|
||||||
local_pool
|
|
||||||
.sizes_lists
|
|
||||||
.push(vec![Self::STORE_FREE; next_sizes_list_len]);
|
|
||||||
}
|
|
||||||
local_pool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn addr_check(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
|
impl PoolCfg {
|
||||||
self.validate_addr(addr)?;
|
pub fn new(cfg: Vec<(NumBlocks, usize)>) -> Self {
|
||||||
let pool_idx = addr.pool_idx as usize;
|
PoolCfg { cfg }
|
||||||
let size_list = self.sizes_lists.get(pool_idx).unwrap();
|
|
||||||
let curr_size = size_list[addr.packet_idx as usize];
|
|
||||||
if curr_size == Self::STORE_FREE {
|
|
||||||
return Err(StoreError::DataDoesNotExist(*addr));
|
|
||||||
}
|
|
||||||
Ok(curr_size)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_addr(&self, addr: &StoreAddr) -> Result<(), StoreError> {
|
pub fn cfg(&self) -> &Vec<(NumBlocks, usize)> {
|
||||||
let pool_idx = addr.pool_idx as usize;
|
&self.cfg
|
||||||
if pool_idx >= self.pool_cfg.cfg.len() {
|
|
||||||
return Err(StoreError::InvalidStoreId(
|
|
||||||
StoreIdError::InvalidSubpool(addr.pool_idx),
|
|
||||||
Some(*addr),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
if addr.packet_idx >= self.pool_cfg.cfg[addr.pool_idx as usize].0 {
|
|
||||||
return Err(StoreError::InvalidStoreId(
|
|
||||||
StoreIdError::InvalidPacketIdx(addr.packet_idx),
|
|
||||||
Some(*addr),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reserve(&mut self, data_len: usize) -> Result<StoreAddr, StoreError> {
|
pub fn sanitize(&mut self) -> usize {
|
||||||
let subpool_idx = self.find_subpool(data_len, 0)?;
|
self.cfg
|
||||||
let (slot, size_slot_ref) = self.find_empty(subpool_idx)?;
|
.retain(|&(bucket_num, size)| bucket_num > 0 && size < POOL_MAX_SIZE);
|
||||||
*size_slot_ref = data_len;
|
self.cfg
|
||||||
Ok(StoreAddr {
|
.sort_unstable_by(|(_, sz0), (_, sz1)| sz0.partial_cmp(sz1).unwrap());
|
||||||
pool_idx: subpool_idx,
|
self.cfg.len()
|
||||||
packet_idx: slot,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_subpool(&self, req_size: usize, start_at_subpool: u16) -> Result<u16, StoreError> {
|
|
||||||
for (i, &(_, elem_size)) in self.pool_cfg.cfg.iter().enumerate() {
|
|
||||||
if i < start_at_subpool as usize {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if elem_size >= req_size {
|
|
||||||
return Ok(i as u16);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(StoreError::DataTooLarge(req_size))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write(&mut self, addr: &StoreAddr, data: &[u8]) -> Result<(), StoreError> {
|
|
||||||
let packet_pos = self.raw_pos(addr).ok_or_else(|| {
|
|
||||||
StoreError::InternalError(format!(
|
|
||||||
"write: Error in raw_pos func with address {addr:?}"
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
let subpool = self.pool.get_mut(addr.pool_idx as usize).ok_or_else(|| {
|
|
||||||
StoreError::InternalError(format!(
|
|
||||||
"write: Error retrieving pool slice with address {addr:?}"
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
let pool_slice = &mut subpool[packet_pos..packet_pos + data.len()];
|
|
||||||
pool_slice.copy_from_slice(data);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_empty(&mut self, subpool: u16) -> Result<(u16, &mut usize), StoreError> {
|
|
||||||
if let Some(size_list) = self.sizes_lists.get_mut(subpool as usize) {
|
|
||||||
for (i, elem_size) in size_list.iter_mut().enumerate() {
|
|
||||||
if *elem_size == Self::STORE_FREE {
|
|
||||||
return Ok((i as u16, elem_size));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(StoreError::InvalidStoreId(
|
|
||||||
StoreIdError::InvalidSubpool(subpool),
|
|
||||||
None,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Err(StoreError::StoreFull(subpool))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn raw_pos(&self, addr: &StoreAddr) -> Option<usize> {
|
|
||||||
let (_, size) = self.pool_cfg.cfg.get(addr.pool_idx as usize)?;
|
|
||||||
Some(addr.packet_idx as usize * size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PoolProvider for LocalPool {
|
|
||||||
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> {
|
|
||||||
let data_len = data.len();
|
|
||||||
if data_len > Self::MAX_SIZE {
|
|
||||||
return Err(StoreError::DataTooLarge(data_len));
|
|
||||||
}
|
|
||||||
let addr = self.reserve(data_len)?;
|
|
||||||
self.write(&addr, data)?;
|
|
||||||
Ok(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError> {
|
|
||||||
if len > Self::MAX_SIZE {
|
|
||||||
return Err(StoreError::DataTooLarge(len));
|
|
||||||
}
|
|
||||||
let addr = self.reserve(len)?;
|
|
||||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
|
||||||
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len];
|
|
||||||
Ok((addr, block))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> {
|
|
||||||
let curr_size = self.addr_check(addr)?;
|
|
||||||
let raw_pos = self.raw_pos(addr).unwrap();
|
|
||||||
let block =
|
|
||||||
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
|
|
||||||
Ok(block)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard {
|
|
||||||
PoolRwGuard::new(self, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError> {
|
|
||||||
let curr_size = self.addr_check(addr)?;
|
|
||||||
let raw_pos = self.raw_pos(addr).unwrap();
|
|
||||||
let block = &self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
|
|
||||||
Ok(block)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard {
|
|
||||||
PoolGuard::new(self, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> {
|
|
||||||
self.addr_check(&addr)?;
|
|
||||||
let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1;
|
|
||||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
|
||||||
let block =
|
|
||||||
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + block_size];
|
|
||||||
let size_list = self.sizes_lists.get_mut(addr.pool_idx as usize).unwrap();
|
|
||||||
size_list[addr.packet_idx as usize] = Self::STORE_FREE;
|
|
||||||
block.fill(0);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError> {
|
|
||||||
self.validate_addr(addr)?;
|
|
||||||
let pool_idx = addr.pool_idx as usize;
|
|
||||||
let size_list = self.sizes_lists.get(pool_idx).unwrap();
|
|
||||||
let curr_size = size_list[addr.packet_idx as usize];
|
|
||||||
if curr_size == Self::STORE_FREE {
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
Ok(true)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -520,11 +292,245 @@ impl<'a> PoolRwGuard<'a> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait PoolProvider {
|
||||||
|
/// Add new data to the pool. The provider should attempt to reserve a memory block with the
|
||||||
|
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
|
||||||
|
/// be used to access the data stored in the pool
|
||||||
|
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>;
|
||||||
|
|
||||||
|
/// The provider should attempt to reserve a free memory block with the appropriate size and
|
||||||
|
/// then return a mutable reference to it. Yields a [StoreAddr] which can be used to access
|
||||||
|
/// the data stored in the pool
|
||||||
|
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError>;
|
||||||
|
|
||||||
|
/// Modify data added previously using a given [StoreAddr] by yielding a mutable reference
|
||||||
|
/// to it
|
||||||
|
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError>;
|
||||||
|
|
||||||
|
/// This function behaves like [Self::modify], but consumes the provided address and returns a
|
||||||
|
/// RAII conformant guard object.
|
||||||
|
///
|
||||||
|
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
|
||||||
|
/// given address will be deleted automatically when the guard is dropped.
|
||||||
|
/// This can prevent memory leaks. Users can read (and modify) the data and release the guard
|
||||||
|
/// if the data in the store is valid for further processing. If the data is faulty, no
|
||||||
|
/// manual deletion is necessary when returning from a processing function prematurely.
|
||||||
|
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard;
|
||||||
|
|
||||||
|
/// Read data by yielding a read-only reference given a [StoreAddr]
|
||||||
|
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError>;
|
||||||
|
|
||||||
|
/// This function behaves like [Self::read], but consumes the provided address and returns a
|
||||||
|
/// RAII conformant guard object.
|
||||||
|
///
|
||||||
|
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
|
||||||
|
/// given address will be deleted automatically when the guard is dropped.
|
||||||
|
/// This can prevent memory leaks. Users can read the data and release the guard
|
||||||
|
/// if the data in the store is valid for further processing. If the data is faulty, no
|
||||||
|
/// manual deletion is necessary when returning from a processing function prematurely.
|
||||||
|
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard;
|
||||||
|
|
||||||
|
/// Delete data inside the pool given a [StoreAddr]
|
||||||
|
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
|
||||||
|
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
|
||||||
|
|
||||||
|
/// Retrieve the length of the data at the given store address.
|
||||||
|
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
|
||||||
|
if !self.has_element_at(addr)? {
|
||||||
|
return Err(StoreError::DataDoesNotExist(*addr));
|
||||||
|
}
|
||||||
|
Ok(self.read(addr)?.len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pool implementation providing sub-pools with fixed size memory blocks. More details in
|
||||||
|
/// the [module documentation][super::pool]
|
||||||
|
pub struct LocalPool {
|
||||||
|
pool_cfg: PoolCfg,
|
||||||
|
pool: Vec<Vec<u8>>,
|
||||||
|
sizes_lists: Vec<Vec<PoolSize>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LocalPool {
|
||||||
|
/// Create a new local pool from the [given configuration][PoolCfg]. This function will sanitize
|
||||||
|
/// the given configuration as well.
|
||||||
|
pub fn new(mut cfg: PoolCfg) -> LocalPool {
|
||||||
|
let subpools_num = cfg.sanitize();
|
||||||
|
let mut local_pool = LocalPool {
|
||||||
|
pool_cfg: cfg,
|
||||||
|
pool: Vec::with_capacity(subpools_num),
|
||||||
|
sizes_lists: Vec::with_capacity(subpools_num),
|
||||||
|
};
|
||||||
|
for &(num_elems, elem_size) in local_pool.pool_cfg.cfg.iter() {
|
||||||
|
let next_pool_len = elem_size * num_elems as usize;
|
||||||
|
local_pool.pool.push(vec![0; next_pool_len]);
|
||||||
|
let next_sizes_list_len = num_elems as usize;
|
||||||
|
local_pool
|
||||||
|
.sizes_lists
|
||||||
|
.push(vec![STORE_FREE; next_sizes_list_len]);
|
||||||
|
}
|
||||||
|
local_pool
|
||||||
|
}
|
||||||
|
|
||||||
|
fn addr_check(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
|
||||||
|
self.validate_addr(addr)?;
|
||||||
|
let pool_idx = addr.pool_idx as usize;
|
||||||
|
let size_list = self.sizes_lists.get(pool_idx).unwrap();
|
||||||
|
let curr_size = size_list[addr.packet_idx as usize];
|
||||||
|
if curr_size == STORE_FREE {
|
||||||
|
return Err(StoreError::DataDoesNotExist(*addr));
|
||||||
|
}
|
||||||
|
Ok(curr_size)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_addr(&self, addr: &StoreAddr) -> Result<(), StoreError> {
|
||||||
|
let pool_idx = addr.pool_idx as usize;
|
||||||
|
if pool_idx >= self.pool_cfg.cfg.len() {
|
||||||
|
return Err(StoreError::InvalidStoreId(
|
||||||
|
StoreIdError::InvalidSubpool(addr.pool_idx),
|
||||||
|
Some(*addr),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if addr.packet_idx >= self.pool_cfg.cfg[addr.pool_idx as usize].0 {
|
||||||
|
return Err(StoreError::InvalidStoreId(
|
||||||
|
StoreIdError::InvalidPacketIdx(addr.packet_idx),
|
||||||
|
Some(*addr),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reserve(&mut self, data_len: usize) -> Result<StoreAddr, StoreError> {
|
||||||
|
let subpool_idx = self.find_subpool(data_len, 0)?;
|
||||||
|
let (slot, size_slot_ref) = self.find_empty(subpool_idx)?;
|
||||||
|
*size_slot_ref = data_len;
|
||||||
|
Ok(StoreAddr {
|
||||||
|
pool_idx: subpool_idx,
|
||||||
|
packet_idx: slot,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_subpool(&self, req_size: usize, start_at_subpool: u16) -> Result<u16, StoreError> {
|
||||||
|
for (i, &(_, elem_size)) in self.pool_cfg.cfg.iter().enumerate() {
|
||||||
|
if i < start_at_subpool as usize {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if elem_size >= req_size {
|
||||||
|
return Ok(i as u16);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(StoreError::DataTooLarge(req_size))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write(&mut self, addr: &StoreAddr, data: &[u8]) -> Result<(), StoreError> {
|
||||||
|
let packet_pos = self.raw_pos(addr).ok_or(StoreError::InternalError(0))?;
|
||||||
|
let subpool = self
|
||||||
|
.pool
|
||||||
|
.get_mut(addr.pool_idx as usize)
|
||||||
|
.ok_or(StoreError::InternalError(1))?;
|
||||||
|
let pool_slice = &mut subpool[packet_pos..packet_pos + data.len()];
|
||||||
|
pool_slice.copy_from_slice(data);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_empty(&mut self, subpool: u16) -> Result<(u16, &mut usize), StoreError> {
|
||||||
|
if let Some(size_list) = self.sizes_lists.get_mut(subpool as usize) {
|
||||||
|
for (i, elem_size) in size_list.iter_mut().enumerate() {
|
||||||
|
if *elem_size == STORE_FREE {
|
||||||
|
return Ok((i as u16, elem_size));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(StoreError::InvalidStoreId(
|
||||||
|
StoreIdError::InvalidSubpool(subpool),
|
||||||
|
None,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Err(StoreError::StoreFull(subpool))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn raw_pos(&self, addr: &StoreAddr) -> Option<usize> {
|
||||||
|
let (_, size) = self.pool_cfg.cfg.get(addr.pool_idx as usize)?;
|
||||||
|
Some(addr.packet_idx as usize * size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PoolProvider for LocalPool {
|
||||||
|
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> {
|
||||||
|
let data_len = data.len();
|
||||||
|
if data_len > POOL_MAX_SIZE {
|
||||||
|
return Err(StoreError::DataTooLarge(data_len));
|
||||||
|
}
|
||||||
|
let addr = self.reserve(data_len)?;
|
||||||
|
self.write(&addr, data)?;
|
||||||
|
Ok(addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError> {
|
||||||
|
if len > POOL_MAX_SIZE {
|
||||||
|
return Err(StoreError::DataTooLarge(len));
|
||||||
|
}
|
||||||
|
let addr = self.reserve(len)?;
|
||||||
|
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||||
|
let block =
|
||||||
|
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len];
|
||||||
|
Ok((addr, block))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> {
|
||||||
|
let curr_size = self.addr_check(addr)?;
|
||||||
|
let raw_pos = self.raw_pos(addr).unwrap();
|
||||||
|
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
|
||||||
|
[raw_pos..raw_pos + curr_size];
|
||||||
|
Ok(block)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard {
|
||||||
|
PoolRwGuard::new(self, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError> {
|
||||||
|
let curr_size = self.addr_check(addr)?;
|
||||||
|
let raw_pos = self.raw_pos(addr).unwrap();
|
||||||
|
let block =
|
||||||
|
&self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
|
||||||
|
Ok(block)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard {
|
||||||
|
PoolGuard::new(self, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> {
|
||||||
|
self.addr_check(&addr)?;
|
||||||
|
let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1;
|
||||||
|
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||||
|
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
|
||||||
|
[raw_pos..raw_pos + block_size];
|
||||||
|
let size_list = self.sizes_lists.get_mut(addr.pool_idx as usize).unwrap();
|
||||||
|
size_list[addr.packet_idx as usize] = STORE_FREE;
|
||||||
|
block.fill(0);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError> {
|
||||||
|
self.validate_addr(addr)?;
|
||||||
|
let pool_idx = addr.pool_idx as usize;
|
||||||
|
let size_list = self.sizes_lists.get(pool_idx).unwrap();
|
||||||
|
let curr_size = size_list[addr.packet_idx as usize];
|
||||||
|
if curr_size == STORE_FREE {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::pool::{
|
use crate::pool::{
|
||||||
LocalPool, PoolCfg, PoolGuard, PoolProvider, PoolRwGuard, StoreAddr, StoreError,
|
LocalPool, PoolCfg, PoolGuard, PoolProvider, PoolRwGuard, StoreAddr, StoreError,
|
||||||
StoreIdError,
|
StoreIdError, POOL_MAX_SIZE,
|
||||||
};
|
};
|
||||||
use std::vec;
|
use std::vec;
|
||||||
|
|
||||||
@ -537,19 +543,19 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_cfg() {
|
fn test_cfg() {
|
||||||
// Values where number of buckets is 0 or size is too large should be removed
|
// Values where number of buckets is 0 or size is too large should be removed
|
||||||
let mut pool_cfg = PoolCfg::new(vec![(0, 0), (1, 0), (2, LocalPool::MAX_SIZE)]);
|
let mut pool_cfg = PoolCfg::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)]);
|
||||||
pool_cfg.sanitize();
|
pool_cfg.sanitize();
|
||||||
assert_eq!(pool_cfg.cfg, vec![(1, 0)]);
|
assert_eq!(*pool_cfg.cfg(), vec![(1, 0)]);
|
||||||
// Entries should be ordered according to bucket size
|
// Entries should be ordered according to bucket size
|
||||||
pool_cfg = PoolCfg::new(vec![(16, 6), (32, 3), (8, 12)]);
|
pool_cfg = PoolCfg::new(vec![(16, 6), (32, 3), (8, 12)]);
|
||||||
pool_cfg.sanitize();
|
pool_cfg.sanitize();
|
||||||
assert_eq!(pool_cfg.cfg, vec![(32, 3), (16, 6), (8, 12)]);
|
assert_eq!(*pool_cfg.cfg(), vec![(32, 3), (16, 6), (8, 12)]);
|
||||||
// Unstable sort is used, so order of entries with same block length should not matter
|
// Unstable sort is used, so order of entries with same block length should not matter
|
||||||
pool_cfg = PoolCfg::new(vec![(12, 12), (14, 16), (10, 12)]);
|
pool_cfg = PoolCfg::new(vec![(12, 12), (14, 16), (10, 12)]);
|
||||||
pool_cfg.sanitize();
|
pool_cfg.sanitize();
|
||||||
assert!(
|
assert!(
|
||||||
pool_cfg.cfg == vec![(12, 12), (10, 12), (14, 16)]
|
*pool_cfg.cfg() == vec![(12, 12), (10, 12), (14, 16)]
|
||||||
|| pool_cfg.cfg == vec![(10, 12), (12, 12), (14, 16)]
|
|| *pool_cfg.cfg() == vec![(10, 12), (12, 12), (14, 16)]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -721,11 +727,11 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_data_too_large_1() {
|
fn test_data_too_large_1() {
|
||||||
let mut local_pool = basic_small_pool();
|
let mut local_pool = basic_small_pool();
|
||||||
let res = local_pool.free_element(LocalPool::MAX_SIZE + 1);
|
let res = local_pool.free_element(POOL_MAX_SIZE + 1);
|
||||||
assert!(res.is_err());
|
assert!(res.is_err());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
res.unwrap_err(),
|
res.unwrap_err(),
|
||||||
StoreError::DataTooLarge(LocalPool::MAX_SIZE + 1)
|
StoreError::DataTooLarge(POOL_MAX_SIZE + 1)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,10 +8,10 @@ use crate::pus::{
|
|||||||
EcssTcReceiver, EcssTmSender, PartialPusHandlingError, PusPacketHandlerResult,
|
EcssTcReceiver, EcssTmSender, PartialPusHandlingError, PusPacketHandlerResult,
|
||||||
PusPacketHandlingError, PusServiceBase, PusServiceHandler,
|
PusPacketHandlingError, PusServiceBase, PusServiceHandler,
|
||||||
};
|
};
|
||||||
|
use alloc::boxed::Box;
|
||||||
use spacepackets::ecss::event::Subservice;
|
use spacepackets::ecss::event::Subservice;
|
||||||
use spacepackets::ecss::tc::PusTcReader;
|
use spacepackets::ecss::tc::PusTcReader;
|
||||||
use spacepackets::ecss::PusPacket;
|
use spacepackets::ecss::PusPacket;
|
||||||
use std::boxed::Box;
|
|
||||||
use std::sync::mpsc::Sender;
|
use std::sync::mpsc::Sender;
|
||||||
|
|
||||||
pub struct PusService5EventHandler {
|
pub struct PusService5EventHandler {
|
||||||
|
@ -8,18 +8,22 @@ use core::fmt::{Display, Formatter};
|
|||||||
use downcast_rs::{impl_downcast, Downcast};
|
use downcast_rs::{impl_downcast, Downcast};
|
||||||
#[cfg(feature = "alloc")]
|
#[cfg(feature = "alloc")]
|
||||||
use dyn_clone::DynClone;
|
use dyn_clone::DynClone;
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
use std::error::Error;
|
||||||
|
|
||||||
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
|
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
|
||||||
use spacepackets::ecss::tm::PusTmCreator;
|
use spacepackets::ecss::tm::PusTmCreator;
|
||||||
use spacepackets::ecss::PusError;
|
use spacepackets::ecss::PusError;
|
||||||
use spacepackets::{ByteConversionError, SizeMissmatch, SpHeader};
|
use spacepackets::{ByteConversionError, SizeMissmatch, SpHeader};
|
||||||
use std::error::Error;
|
|
||||||
|
|
||||||
pub mod event;
|
pub mod event;
|
||||||
pub mod event_man;
|
pub mod event_man;
|
||||||
|
#[cfg(feature = "std")]
|
||||||
pub mod event_srv;
|
pub mod event_srv;
|
||||||
pub mod hk;
|
pub mod hk;
|
||||||
pub mod mode;
|
pub mod mode;
|
||||||
pub mod scheduler;
|
pub mod scheduler;
|
||||||
|
#[cfg(feature = "std")]
|
||||||
pub mod scheduler_srv;
|
pub mod scheduler_srv;
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
pub mod test;
|
pub mod test;
|
||||||
|
@ -2,22 +2,17 @@
|
|||||||
//!
|
//!
|
||||||
//! The core data structure of this module is the [PusScheduler]. This structure can be used
|
//! The core data structure of this module is the [PusScheduler]. This structure can be used
|
||||||
//! to perform the scheduling of telecommands like specified in the ECSS standard.
|
//! to perform the scheduling of telecommands like specified in the ECSS standard.
|
||||||
use crate::pool::{StoreAddr, StoreError};
|
use core::fmt::Debug;
|
||||||
use core::fmt::{Debug, Display, Formatter};
|
|
||||||
use core::time::Duration;
|
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use spacepackets::ecss::scheduling::TimeWindowType;
|
use spacepackets::ecss::scheduling::TimeWindowType;
|
||||||
use spacepackets::ecss::tc::{GenericPusTcSecondaryHeader, IsPusTelecommand};
|
use spacepackets::ecss::tc::{GenericPusTcSecondaryHeader, IsPusTelecommand};
|
||||||
use spacepackets::ecss::PusError;
|
use spacepackets::time::CcsdsTimeProvider;
|
||||||
use spacepackets::time::{CcsdsTimeProvider, TimestampError, UnixTimestamp};
|
|
||||||
use spacepackets::CcsdsPacket;
|
use spacepackets::CcsdsPacket;
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
|
|
||||||
//#[cfg(feature = "std")]
|
use crate::pool::StoreAddr;
|
||||||
//pub use std_mod::*;
|
|
||||||
|
|
||||||
#[cfg(feature = "alloc")]
|
#[cfg(feature = "alloc")]
|
||||||
pub use alloc_mod::*;
|
pub use alloc_mod::*;
|
||||||
|
|
||||||
@ -61,6 +56,109 @@ impl RequestId {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This is the format stored internally by the TC scheduler for each scheduled telecommand.
|
||||||
|
/// It consists of the address of that telecommand in the TC pool and a request ID.
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
|
pub struct TcInfo {
|
||||||
|
addr: StoreAddr,
|
||||||
|
request_id: RequestId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TcInfo {
|
||||||
|
pub fn addr(&self) -> StoreAddr {
|
||||||
|
self.addr
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn request_id(&self) -> RequestId {
|
||||||
|
self.request_id
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new(addr: StoreAddr, request_id: RequestId) -> Self {
|
||||||
|
TcInfo { addr, request_id }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TimeWindow<TimeProvder> {
|
||||||
|
time_window_type: TimeWindowType,
|
||||||
|
start_time: Option<TimeProvder>,
|
||||||
|
end_time: Option<TimeProvder>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TimeProvider> TimeWindow<TimeProvider> {
|
||||||
|
pub fn new_select_all() -> Self {
|
||||||
|
Self {
|
||||||
|
time_window_type: TimeWindowType::SelectAll,
|
||||||
|
start_time: None,
|
||||||
|
end_time: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn time_window_type(&self) -> TimeWindowType {
|
||||||
|
self.time_window_type
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start_time(&self) -> Option<&TimeProvider> {
|
||||||
|
self.start_time.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn end_time(&self) -> Option<&TimeProvider> {
|
||||||
|
self.end_time.as_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TimeProvider: CcsdsTimeProvider + Clone> TimeWindow<TimeProvider> {
|
||||||
|
pub fn new_from_time_to_time(start_time: &TimeProvider, end_time: &TimeProvider) -> Self {
|
||||||
|
Self {
|
||||||
|
time_window_type: TimeWindowType::TimeTagToTimeTag,
|
||||||
|
start_time: Some(start_time.clone()),
|
||||||
|
end_time: Some(end_time.clone()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_from_time(start_time: &TimeProvider) -> Self {
|
||||||
|
Self {
|
||||||
|
time_window_type: TimeWindowType::FromTimeTag,
|
||||||
|
start_time: Some(start_time.clone()),
|
||||||
|
end_time: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_to_time(end_time: &TimeProvider) -> Self {
|
||||||
|
Self {
|
||||||
|
time_window_type: TimeWindowType::ToTimeTag,
|
||||||
|
start_time: None,
|
||||||
|
end_time: Some(end_time.clone()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
pub mod alloc_mod {
|
||||||
|
use super::*;
|
||||||
|
use crate::pool::{PoolProvider, StoreAddr, StoreError};
|
||||||
|
use alloc::collections::btree_map::{Entry, Range};
|
||||||
|
use alloc::collections::BTreeMap;
|
||||||
|
use alloc::vec;
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
use core::fmt::{Display, Formatter};
|
||||||
|
use core::time::Duration;
|
||||||
|
use spacepackets::ecss::scheduling::TimeWindowType;
|
||||||
|
use spacepackets::ecss::tc::{
|
||||||
|
GenericPusTcSecondaryHeader, IsPusTelecommand, PusTc, PusTcReader,
|
||||||
|
};
|
||||||
|
use spacepackets::ecss::{PusError, PusPacket};
|
||||||
|
use spacepackets::time::cds::DaysLen24Bits;
|
||||||
|
use spacepackets::time::{cds, CcsdsTimeProvider, TimeReader, TimestampError, UnixTimestamp};
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
use std::time::SystemTimeError;
|
||||||
|
|
||||||
|
enum DeletionResult {
|
||||||
|
WithoutStoreDeletion(Option<StoreAddr>),
|
||||||
|
WithStoreDeletion(Result<bool, StoreError>),
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||||
pub enum ScheduleError {
|
pub enum ScheduleError {
|
||||||
@ -133,108 +231,6 @@ impl From<TimestampError> for ScheduleError {
|
|||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
impl Error for ScheduleError {}
|
impl Error for ScheduleError {}
|
||||||
|
|
||||||
/// This is the format stored internally by the TC scheduler for each scheduled telecommand.
|
|
||||||
/// It consists of the address of that telecommand in the TC pool and a request ID.
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
|
||||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
|
||||||
pub struct TcInfo {
|
|
||||||
addr: StoreAddr,
|
|
||||||
request_id: RequestId,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TcInfo {
|
|
||||||
pub fn addr(&self) -> StoreAddr {
|
|
||||||
self.addr
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn request_id(&self) -> RequestId {
|
|
||||||
self.request_id
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(addr: StoreAddr, request_id: RequestId) -> Self {
|
|
||||||
TcInfo { addr, request_id }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enum DeletionResult {
|
|
||||||
WithoutStoreDeletion(Option<StoreAddr>),
|
|
||||||
WithStoreDeletion(Result<bool, StoreError>),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct TimeWindow<TimeProvder> {
|
|
||||||
time_window_type: TimeWindowType,
|
|
||||||
start_time: Option<TimeProvder>,
|
|
||||||
end_time: Option<TimeProvder>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TimeProvider> TimeWindow<TimeProvider> {
|
|
||||||
pub fn new_select_all() -> Self {
|
|
||||||
Self {
|
|
||||||
time_window_type: TimeWindowType::SelectAll,
|
|
||||||
start_time: None,
|
|
||||||
end_time: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn time_window_type(&self) -> TimeWindowType {
|
|
||||||
self.time_window_type
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start_time(&self) -> Option<&TimeProvider> {
|
|
||||||
self.start_time.as_ref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn end_time(&self) -> Option<&TimeProvider> {
|
|
||||||
self.end_time.as_ref()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TimeProvider: CcsdsTimeProvider + Clone> TimeWindow<TimeProvider> {
|
|
||||||
pub fn new_from_time_to_time(start_time: &TimeProvider, end_time: &TimeProvider) -> Self {
|
|
||||||
Self {
|
|
||||||
time_window_type: TimeWindowType::TimeTagToTimeTag,
|
|
||||||
start_time: Some(start_time.clone()),
|
|
||||||
end_time: Some(end_time.clone()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_from_time(start_time: &TimeProvider) -> Self {
|
|
||||||
Self {
|
|
||||||
time_window_type: TimeWindowType::FromTimeTag,
|
|
||||||
start_time: Some(start_time.clone()),
|
|
||||||
end_time: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_to_time(end_time: &TimeProvider) -> Self {
|
|
||||||
Self {
|
|
||||||
time_window_type: TimeWindowType::ToTimeTag,
|
|
||||||
start_time: None,
|
|
||||||
end_time: Some(end_time.clone()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "alloc")]
|
|
||||||
pub mod alloc_mod {
|
|
||||||
use crate::pool::{PoolProvider, StoreAddr, StoreError};
|
|
||||||
use crate::pus::scheduler::{DeletionResult, RequestId, ScheduleError, TcInfo, TimeWindow};
|
|
||||||
use alloc::collections::btree_map::{Entry, Range};
|
|
||||||
use alloc::collections::BTreeMap;
|
|
||||||
use alloc::vec;
|
|
||||||
use alloc::vec::Vec;
|
|
||||||
use core::time::Duration;
|
|
||||||
use spacepackets::ecss::scheduling::TimeWindowType;
|
|
||||||
use spacepackets::ecss::tc::{
|
|
||||||
GenericPusTcSecondaryHeader, IsPusTelecommand, PusTc, PusTcReader,
|
|
||||||
};
|
|
||||||
use spacepackets::ecss::PusPacket;
|
|
||||||
use spacepackets::time::cds::DaysLen24Bits;
|
|
||||||
use spacepackets::time::{cds, CcsdsTimeProvider, TimeReader, UnixTimestamp};
|
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
use std::time::SystemTimeError;
|
|
||||||
|
|
||||||
/// This is the core data structure for scheduling PUS telecommands with [alloc] support.
|
/// This is the core data structure for scheduling PUS telecommands with [alloc] support.
|
||||||
///
|
///
|
||||||
/// It is assumed that the actual telecommand data is stored in a separate TC pool offering
|
/// It is assumed that the actual telecommand data is stored in a separate TC pool offering
|
||||||
|
Loading…
Reference in New Issue
Block a user