Compare commits
15 Commits
ff4ecc1df4
...
satrs-core
Author | SHA1 | Date | |
---|---|---|---|
20d5212710 | |||
2dd38c163f | |||
79d095b1f7 | |||
377ffc052c | |||
c39ce99e2c | |||
c0692a3523 | |||
712dc718f9 | |||
a69347af7b | |||
3c7113c231 | |||
9c310e7a36 | |||
0bab5799e5 | |||
b56bbc8c41
|
|||
12ac5913aa | |||
d017b9c179
|
|||
f3ca570e53
|
@ -5,7 +5,14 @@ High-level documentation of the [sat-rs project](https://absatsw.irs.uni-stuttga
|
||||
|
||||
## Building
|
||||
|
||||
If you have not done so, install `mdbook` using `cargo install mdbook --locked`.
|
||||
If you have not done so, install the pre-requisites first:
|
||||
|
||||
```sh
|
||||
cargo install mdbook --locked
|
||||
cargo install mdbook-linkcheck --locked
|
||||
```
|
||||
|
||||
After that, you can build the book with:
|
||||
|
||||
```sh
|
||||
mdbook build
|
||||
|
@ -11,7 +11,8 @@ time where the OBSW might be running on Linux based systems with hundreds of MBs
|
||||
|
||||
A useful pattern used commonly in space systems is to limit heap allocations to program
|
||||
initialization time and avoid frequent run-time allocations. This prevents issues like
|
||||
running out of memory (something even Rust can not protect from) or heap fragmentation.
|
||||
running out of memory (something even Rust can not protect from) or heap fragmentation on systems
|
||||
without a MMU.
|
||||
|
||||
# Using pre-allocated pool structures
|
||||
|
||||
@ -24,6 +25,12 @@ For example, a very small telecommand (TC) pool might look like this:
|
||||
|
||||

|
||||
|
||||
The core of the pool abstractions is the
|
||||
[PoolProvider trait](https://docs.rs/satrs-core/0.1.0-alpha.3/satrs_core/pool/trait.PoolProvider.html).
|
||||
This trait specifies the general API a pool structure should have without making assumption
|
||||
of how the data is stored.
|
||||
|
||||
This trait is implemented by a static memory pool implementation.
|
||||
The code to generate this static pool would look like this:
|
||||
|
||||
```rust
|
||||
@ -39,21 +46,27 @@ let tc_pool = StaticMemoryPool::new(StaticPoolConfig::new(vec
|
||||
- [`StaticMemoryPool` API](https://docs.rs/satrs-core/0.1.0-alpha.3/satrs_core/pool/struct.StaticMemoryPool.html)
|
||||
|
||||
for more details.
|
||||
|
||||
In the future, optimized pool structures which use standard containers or are
|
||||
[`Sync`](https://doc.rust-lang.org/std/marker/trait.Sync.html) by default might be added as well.
|
||||
|
||||
# Using special crates to prevent smaller allocations
|
||||
|
||||
Another common way to use the heap on host systems is using containers like `String` and `Vec<u8>`
|
||||
to work with data where the size is not known beforehand. The most common solution for embedded
|
||||
systems is to determine the maximum expected size and then use a pre-allocated `u8` buffer and a
|
||||
size variable. Alternatively, you can use the following crates for more convenience or a smart
|
||||
behaviour which at the very least reduce heap allocations:
|
||||
behaviour which at the very least reduces heap allocations:
|
||||
|
||||
1. [`smallvec`](https://docs.rs/smallvec/latest/smallvec/).
|
||||
2. [`arrayvec`](https://docs.rs/arrayvec/latest/arrayvec/index.html) which also contains an
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "satrs-core"
|
||||
version = "0.1.0-alpha.2"
|
||||
version = "0.1.0-alpha.3"
|
||||
edition = "2021"
|
||||
rust-version = "1.61"
|
||||
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
|
||||
|
@ -1,13 +1,21 @@
|
||||
//! # Pool implementation providing memory pools for packet storage.
|
||||
//!
|
||||
//! This module provides generic abstractions for memory pools which provide a storage
|
||||
//! machanism for variable sized data like Telemetry and Telecommand (TMTC) packets. The core
|
||||
//! abstraction for this is the [PoolProvider] trait.
|
||||
//!
|
||||
//! It also contains the [StaticMemoryPool] as a concrete implementation which can be used to avoid
|
||||
//! dynamic run-time allocations for the storage of TMTC packets.
|
||||
//!
|
||||
//! # Example for the [StaticMemoryPool]
|
||||
//!
|
||||
//! ```
|
||||
//! use satrs_core::pool::{PoolProviderMemInPlace, StaticMemoryPool, StaticPoolConfig};
|
||||
//! use satrs_core::pool::{PoolProvider, StaticMemoryPool, StaticPoolConfig};
|
||||
//!
|
||||
//! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
|
||||
//! let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
|
||||
//! let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)], false);
|
||||
//! let mut local_pool = StaticMemoryPool::new(pool_cfg);
|
||||
//! let mut read_buf: [u8; 16] = [0; 16];
|
||||
//! let mut addr;
|
||||
//! {
|
||||
//! // Add new data to the pool
|
||||
@ -20,25 +28,25 @@
|
||||
//!
|
||||
//! {
|
||||
//! // Read the store data back
|
||||
//! let res = local_pool.read(&addr);
|
||||
//! let res = local_pool.read(&addr, &mut read_buf);
|
||||
//! assert!(res.is_ok());
|
||||
//! let buf_read_back = res.unwrap();
|
||||
//! assert_eq!(buf_read_back.len(), 4);
|
||||
//! assert_eq!(buf_read_back[0], 42);
|
||||
//! let read_bytes = res.unwrap();
|
||||
//! assert_eq!(read_bytes, 4);
|
||||
//! assert_eq!(read_buf[0], 42);
|
||||
//! // Modify the stored data
|
||||
//! let res = local_pool.modify(&addr);
|
||||
//! let res = local_pool.modify(&addr, |buf| {
|
||||
//! buf[0] = 12;
|
||||
//! });
|
||||
//! assert!(res.is_ok());
|
||||
//! let buf_read_back = res.unwrap();
|
||||
//! buf_read_back[0] = 12;
|
||||
//! }
|
||||
//!
|
||||
//! {
|
||||
//! // Read the modified data back
|
||||
//! let res = local_pool.read(&addr);
|
||||
//! let res = local_pool.read(&addr, &mut read_buf);
|
||||
//! assert!(res.is_ok());
|
||||
//! let buf_read_back = res.unwrap();
|
||||
//! assert_eq!(buf_read_back.len(), 4);
|
||||
//! assert_eq!(buf_read_back[0], 12);
|
||||
//! let read_bytes = res.unwrap();
|
||||
//! assert_eq!(read_bytes, 4);
|
||||
//! assert_eq!(read_buf[0], 12);
|
||||
//! }
|
||||
//!
|
||||
//! // Delete the stored data
|
||||
@ -46,21 +54,21 @@
|
||||
//!
|
||||
//! // Get a free element in the pool with an appropriate size
|
||||
//! {
|
||||
//! let res = local_pool.free_element(12);
|
||||
//! let res = local_pool.free_element(12, |buf| {
|
||||
//! buf[0] = 7;
|
||||
//! });
|
||||
//! assert!(res.is_ok());
|
||||
//! let (tmp, mut_buf) = res.unwrap();
|
||||
//! addr = tmp;
|
||||
//! mut_buf[0] = 7;
|
||||
//! addr = res.unwrap();
|
||||
//! }
|
||||
//!
|
||||
//! // Read back the data
|
||||
//! {
|
||||
//! // Read the store data back
|
||||
//! let res = local_pool.read(&addr);
|
||||
//! let res = local_pool.read(&addr, &mut read_buf);
|
||||
//! assert!(res.is_ok());
|
||||
//! let buf_read_back = res.unwrap();
|
||||
//! assert_eq!(buf_read_back.len(), 12);
|
||||
//! assert_eq!(buf_read_back[0], 7);
|
||||
//! let read_bytes = res.unwrap();
|
||||
//! assert_eq!(read_bytes, 12);
|
||||
//! assert_eq!(read_buf[0], 7);
|
||||
//! }
|
||||
//! ```
|
||||
#[cfg(feature = "alloc")]
|
||||
@ -70,6 +78,7 @@ use core::fmt::{Display, Formatter};
|
||||
use delegate::delegate;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
use spacepackets::ByteConversionError;
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
|
||||
@ -151,6 +160,8 @@ pub enum StoreError {
|
||||
InvalidStoreId(StoreIdError, Option<StoreAddr>),
|
||||
/// Valid subpool and packet index, but no data is stored at the given address
|
||||
DataDoesNotExist(StoreAddr),
|
||||
ByteConversionError(spacepackets::ByteConversionError),
|
||||
LockError,
|
||||
/// Internal or configuration errors
|
||||
InternalError(u32),
|
||||
}
|
||||
@ -173,10 +184,22 @@ impl Display for StoreError {
|
||||
StoreError::InternalError(e) => {
|
||||
write!(f, "internal error: {e}")
|
||||
}
|
||||
StoreError::ByteConversionError(e) => {
|
||||
write!(f, "store error: {e}")
|
||||
}
|
||||
StoreError::LockError => {
|
||||
write!(f, "lock error")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ByteConversionError> for StoreError {
|
||||
fn from(value: ByteConversionError) -> Self {
|
||||
Self::ByteConversionError(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl Error for StoreError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
@ -187,41 +210,61 @@ impl Error for StoreError {
|
||||
}
|
||||
}
|
||||
|
||||
/// Generic trait for pool providers where the data can be modified and read in-place. This
|
||||
/// generally means that a shared pool structure has to be wrapped inside a lock structure.
|
||||
pub trait PoolProviderMemInPlace {
|
||||
/// Generic trait for pool providers which provide memory pools for variable sized data.
|
||||
///
|
||||
/// It specifies a basic API to [Self::add], [Self::modify], [Self::read] and [Self::delete] data
|
||||
/// in the store at its core. The API was designed so internal optimizations can be performed
|
||||
/// more easily and that is is also possible to make the pool structure [Sync] without the whole
|
||||
/// pool structure being wrapped inside a lock.
|
||||
pub trait PoolProvider {
|
||||
/// Add new data to the pool. The provider should attempt to reserve a memory block with the
|
||||
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
|
||||
/// be used to access the data stored in the pool
|
||||
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>;
|
||||
|
||||
/// The provider should attempt to reserve a free memory block with the appropriate size and
|
||||
/// then return a mutable reference to it. Yields a [StoreAddr] which can be used to access
|
||||
/// the data stored in the pool
|
||||
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError>;
|
||||
/// The provider should attempt to reserve a free memory block with the appropriate size first.
|
||||
/// It then executes a user-provided closure and passes a mutable reference to that memory
|
||||
/// block to the closure. This allows the user to write data to the memory block.
|
||||
/// The function should yield a [StoreAddr] which can be used to access the data stored in the
|
||||
/// pool.
|
||||
fn free_element<W: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
len: usize,
|
||||
writer: W,
|
||||
) -> Result<StoreAddr, StoreError>;
|
||||
|
||||
/// Modify data added previously using a given [StoreAddr] by yielding a mutable reference
|
||||
/// to it
|
||||
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError>;
|
||||
/// Modify data added previously using a given [StoreAddr]. The provider should use the store
|
||||
/// address to determine if a memory block exists for that address. If it does, it should
|
||||
/// call the user-provided closure and pass a mutable reference to the memory block
|
||||
/// to the closure. This allows the user to modify the memory block.
|
||||
fn modify<U: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
addr: &StoreAddr,
|
||||
updater: U,
|
||||
) -> Result<(), StoreError>;
|
||||
|
||||
/// Read data by yielding a read-only reference given a [StoreAddr]
|
||||
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError>;
|
||||
/// The provider should copy the data from the memory block to the user-provided buffer if
|
||||
/// it exists.
|
||||
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError>;
|
||||
|
||||
/// Delete data inside the pool given a [StoreAddr]
|
||||
/// Delete data inside the pool given a [StoreAddr].
|
||||
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
|
||||
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
|
||||
|
||||
/// Retrieve the length of the data at the given store address.
|
||||
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
|
||||
if !self.has_element_at(addr)? {
|
||||
return Err(StoreError::DataDoesNotExist(*addr));
|
||||
}
|
||||
Ok(self.read(addr)?.len())
|
||||
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError>;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
fn read_as_vec(&self, addr: &StoreAddr) -> Result<alloc::vec::Vec<u8>, StoreError> {
|
||||
let mut vec = alloc::vec![0; self.len_of_data(addr)?];
|
||||
self.read(addr, &mut vec)?;
|
||||
Ok(vec)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
|
||||
/// This function behaves like [PoolProviderMemInPlace::read], but consumes the provided address
|
||||
/// Extension trait which adds guarded pool access classes.
|
||||
pub trait PoolProviderWithGuards: PoolProvider {
|
||||
/// This function behaves like [PoolProvider::read], but consumes the provided address
|
||||
/// and returns a RAII conformant guard object.
|
||||
///
|
||||
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
|
||||
@ -231,7 +274,7 @@ pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
|
||||
/// manual deletion is necessary when returning from a processing function prematurely.
|
||||
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self>;
|
||||
|
||||
/// This function behaves like [PoolProviderMemInPlace::modify], but consumes the provided
|
||||
/// This function behaves like [PoolProvider::modify], but consumes the provided
|
||||
/// address and returns a RAII conformant guard object.
|
||||
///
|
||||
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
|
||||
@ -242,15 +285,16 @@ pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
|
||||
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self>;
|
||||
}
|
||||
|
||||
pub struct PoolGuard<'a, MemProvider: PoolProviderMemInPlace + ?Sized> {
|
||||
pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> {
|
||||
pool: &'a mut MemProvider,
|
||||
pub addr: StoreAddr,
|
||||
no_deletion: bool,
|
||||
deletion_failed_error: Option<StoreError>,
|
||||
}
|
||||
|
||||
/// This helper object
|
||||
impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
|
||||
/// This helper object can be used to safely access pool data without worrying about memory
|
||||
/// leaks.
|
||||
impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> {
|
||||
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
|
||||
Self {
|
||||
pool,
|
||||
@ -260,8 +304,13 @@ impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read(&self) -> Result<&[u8], StoreError> {
|
||||
self.pool.read(&self.addr)
|
||||
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError> {
|
||||
self.pool.read(&self.addr, buf)
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub fn read_as_vec(&self) -> Result<alloc::vec::Vec<u8>, StoreError> {
|
||||
self.pool.read_as_vec(&self.addr)
|
||||
}
|
||||
|
||||
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
|
||||
@ -271,7 +320,7 @@ impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<MemProvider: PoolProviderMemInPlace + ?Sized> Drop for PoolGuard<'_, MemProvider> {
|
||||
impl<MemProvider: PoolProvider + ?Sized> Drop for PoolGuard<'_, MemProvider> {
|
||||
fn drop(&mut self) {
|
||||
if !self.no_deletion {
|
||||
if let Err(e) = self.pool.delete(self.addr) {
|
||||
@ -281,24 +330,24 @@ impl<MemProvider: PoolProviderMemInPlace + ?Sized> Drop for PoolGuard<'_, MemPro
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PoolRwGuard<'a, MemProvider: PoolProviderMemInPlace + ?Sized> {
|
||||
pub struct PoolRwGuard<'a, MemProvider: PoolProvider + ?Sized> {
|
||||
guard: PoolGuard<'a, MemProvider>,
|
||||
}
|
||||
|
||||
impl<'a, MemProvider: PoolProviderMemInPlace> PoolRwGuard<'a, MemProvider> {
|
||||
impl<'a, MemProvider: PoolProvider> PoolRwGuard<'a, MemProvider> {
|
||||
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
|
||||
Self {
|
||||
guard: PoolGuard::new(pool, addr),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn modify(&mut self) -> Result<&mut [u8], StoreError> {
|
||||
self.guard.pool.modify(&self.guard.addr)
|
||||
pub fn update<U: FnMut(&mut [u8])>(&mut self, updater: &mut U) -> Result<(), StoreError> {
|
||||
self.guard.pool.modify(&self.guard.addr, updater)
|
||||
}
|
||||
|
||||
delegate!(
|
||||
to self.guard {
|
||||
pub fn read(&self) -> Result<&[u8], StoreError>;
|
||||
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError>;
|
||||
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
|
||||
/// is dropped.
|
||||
pub fn release(&mut self);
|
||||
@ -308,13 +357,11 @@ impl<'a, MemProvider: PoolProviderMemInPlace> PoolRwGuard<'a, MemProvider> {
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
mod alloc_mod {
|
||||
use super::{
|
||||
PoolGuard, PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, PoolRwGuard,
|
||||
StaticPoolAddr,
|
||||
};
|
||||
use super::{PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticPoolAddr};
|
||||
use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError};
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use spacepackets::ByteConversionError;
|
||||
#[cfg(feature = "std")]
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
@ -329,16 +376,24 @@ mod alloc_mod {
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// * `cfg`: Vector of tuples which represent a subpool. The first entry in the tuple specifies the
|
||||
/// number of memory blocks in the subpool, the second entry the size of the blocks
|
||||
/// * `cfg` - Vector of tuples which represent a subpool. The first entry in the tuple specifies
|
||||
/// the number of memory blocks in the subpool, the second entry the size of the blocks
|
||||
/// * `spill_to_higher_subpools` - Specifies whether data will be spilled to higher subpools
|
||||
/// if the next fitting subpool is full. This is useful to ensure the pool remains useful
|
||||
/// for all data sizes as long as possible. However, an undesirable side-effect might be
|
||||
/// the chocking of larger subpools by underdimensioned smaller subpools.
|
||||
#[derive(Clone)]
|
||||
pub struct StaticPoolConfig {
|
||||
cfg: Vec<(NumBlocks, usize)>,
|
||||
spill_to_higher_subpools: bool,
|
||||
}
|
||||
|
||||
impl StaticPoolConfig {
|
||||
pub fn new(cfg: Vec<(NumBlocks, usize)>) -> Self {
|
||||
StaticPoolConfig { cfg }
|
||||
pub fn new(cfg: Vec<(NumBlocks, usize)>, spill_to_higher_subpools: bool) -> Self {
|
||||
StaticPoolConfig {
|
||||
cfg,
|
||||
spill_to_higher_subpools,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cfg(&self) -> &Vec<(NumBlocks, usize)> {
|
||||
@ -356,16 +411,21 @@ mod alloc_mod {
|
||||
|
||||
/// Pool implementation providing sub-pools with fixed size memory blocks.
|
||||
///
|
||||
/// This is a simple memory pool implementation which pre-allocates all sub-pools using a given pool
|
||||
/// configuration. After the pre-allocation, no dynamic memory allocation will be performed
|
||||
/// during run-time. This makes the implementation suitable for real-time applications and
|
||||
/// embedded environments. The pool implementation will also track the size of the data stored
|
||||
/// inside it.
|
||||
/// This is a simple memory pool implementation which pre-allocates all subpools using a given
|
||||
/// pool configuration. After the pre-allocation, no dynamic memory allocation will be
|
||||
/// performed during run-time. This makes the implementation suitable for real-time
|
||||
/// applications and embedded environments.
|
||||
///
|
||||
/// The subpool bucket sizes only denote the maximum possible data size being stored inside
|
||||
/// them and the pool implementation will still track the size of the data stored inside it.
|
||||
/// The implementation will generally determine the best fitting subpool for given data to
|
||||
/// add. Currently, the pool does not support spilling to larger subpools if the closest
|
||||
/// fitting subpool is full. This might be added in the future.
|
||||
///
|
||||
/// Transactions with the [pool][StaticMemoryPool] are done using a generic
|
||||
/// [address][StoreAddr] type.
|
||||
/// Adding any data to the pool will yield a store address. Modification and read operations are
|
||||
/// done using a reference to a store address. Deletion will consume the store address.
|
||||
/// [address][StoreAddr] type. Adding any data to the pool will yield a store address.
|
||||
/// Modification and read operations are done using a reference to a store address. Deletion
|
||||
/// will consume the store address.
|
||||
pub struct StaticMemoryPool {
|
||||
pool_cfg: StaticPoolConfig,
|
||||
pool: Vec<Vec<u8>>,
|
||||
@ -422,7 +482,17 @@ mod alloc_mod {
|
||||
}
|
||||
|
||||
fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, StoreError> {
|
||||
let subpool_idx = self.find_subpool(data_len, 0)?;
|
||||
let mut subpool_idx = self.find_subpool(data_len, 0)?;
|
||||
|
||||
if self.pool_cfg.spill_to_higher_subpools {
|
||||
while let Err(StoreError::StoreFull(_)) = self.find_empty(subpool_idx) {
|
||||
if (subpool_idx + 1) as usize == self.sizes_lists.len() {
|
||||
return Err(StoreError::StoreFull(subpool_idx));
|
||||
}
|
||||
subpool_idx += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let (slot, size_slot_ref) = self.find_empty(subpool_idx)?;
|
||||
*size_slot_ref = data_len;
|
||||
Ok(StaticPoolAddr {
|
||||
@ -476,7 +546,7 @@ mod alloc_mod {
|
||||
}
|
||||
}
|
||||
|
||||
impl PoolProviderMemInPlace for StaticMemoryPool {
|
||||
impl PoolProvider for StaticMemoryPool {
|
||||
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> {
|
||||
let data_len = data.len();
|
||||
if data_len > POOL_MAX_SIZE {
|
||||
@ -487,7 +557,11 @@ mod alloc_mod {
|
||||
Ok(addr.into())
|
||||
}
|
||||
|
||||
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError> {
|
||||
fn free_element<W: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
len: usize,
|
||||
mut writer: W,
|
||||
) -> Result<StoreAddr, StoreError> {
|
||||
if len > POOL_MAX_SIZE {
|
||||
return Err(StoreError::DataTooLarge(len));
|
||||
}
|
||||
@ -495,25 +569,40 @@ mod alloc_mod {
|
||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||
let block =
|
||||
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len];
|
||||
Ok((addr.into(), block))
|
||||
writer(block);
|
||||
Ok(addr.into())
|
||||
}
|
||||
|
||||
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> {
|
||||
fn modify<U: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
addr: &StoreAddr,
|
||||
mut updater: U,
|
||||
) -> Result<(), StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
let curr_size = self.addr_check(&addr)?;
|
||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
|
||||
[raw_pos..raw_pos + curr_size];
|
||||
Ok(block)
|
||||
updater(block);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError> {
|
||||
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
let curr_size = self.addr_check(&addr)?;
|
||||
if buf.len() < curr_size {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
found: buf.len(),
|
||||
expected: curr_size,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||
let block =
|
||||
&self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
|
||||
Ok(block)
|
||||
//block.copy_from_slice(&src);
|
||||
buf[..curr_size].copy_from_slice(block);
|
||||
Ok(curr_size)
|
||||
}
|
||||
|
||||
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> {
|
||||
@ -540,9 +629,21 @@ mod alloc_mod {
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
self.validate_addr(&addr)?;
|
||||
let pool_idx = addr.pool_idx as usize;
|
||||
let size_list = self.sizes_lists.get(pool_idx).unwrap();
|
||||
let size = size_list[addr.packet_idx as usize];
|
||||
Ok(match size {
|
||||
STORE_FREE => 0,
|
||||
_ => size,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PoolProviderMemInPlaceWithGuards for StaticMemoryPool {
|
||||
impl PoolProviderWithGuards for StaticMemoryPool {
|
||||
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self> {
|
||||
PoolRwGuard::new(self, addr)
|
||||
}
|
||||
@ -556,30 +657,29 @@ mod alloc_mod {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::pool::{
|
||||
PoolGuard, PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, PoolRwGuard,
|
||||
StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError,
|
||||
POOL_MAX_SIZE,
|
||||
PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticMemoryPool,
|
||||
StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError, POOL_MAX_SIZE,
|
||||
};
|
||||
use std::vec;
|
||||
|
||||
fn basic_small_pool() -> StaticMemoryPool {
|
||||
// 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)], false);
|
||||
StaticMemoryPool::new(pool_cfg)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cfg() {
|
||||
// Values where number of buckets is 0 or size is too large should be removed
|
||||
let mut pool_cfg = StaticPoolConfig::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)]);
|
||||
let mut pool_cfg = StaticPoolConfig::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)], false);
|
||||
pool_cfg.sanitize();
|
||||
assert_eq!(*pool_cfg.cfg(), vec![(1, 0)]);
|
||||
// Entries should be ordered according to bucket size
|
||||
pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)]);
|
||||
pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false);
|
||||
pool_cfg.sanitize();
|
||||
assert_eq!(*pool_cfg.cfg(), vec![(32, 3), (16, 6), (8, 12)]);
|
||||
// Unstable sort is used, so order of entries with same block length should not matter
|
||||
pool_cfg = StaticPoolConfig::new(vec![(12, 12), (14, 16), (10, 12)]);
|
||||
pool_cfg = StaticPoolConfig::new(vec![(12, 12), (14, 16), (10, 12)], false);
|
||||
pool_cfg.sanitize();
|
||||
assert!(
|
||||
*pool_cfg.cfg() == vec![(12, 12), (10, 12), (14, 16)]
|
||||
@ -594,13 +694,14 @@ mod tests {
|
||||
for (i, val) in test_buf.iter_mut().enumerate() {
|
||||
*val = i as u8;
|
||||
}
|
||||
let mut other_buf: [u8; 16] = [0; 16];
|
||||
let addr = local_pool.add(&test_buf).expect("Adding data failed");
|
||||
// Read back data and verify correctness
|
||||
let res = local_pool.read(&addr);
|
||||
let res = local_pool.read(&addr, &mut other_buf);
|
||||
assert!(res.is_ok());
|
||||
let buf_read_back = res.unwrap();
|
||||
assert_eq!(buf_read_back.len(), 16);
|
||||
for (i, &val) in buf_read_back.iter().enumerate() {
|
||||
let read_len = res.unwrap();
|
||||
assert_eq!(read_len, 16);
|
||||
for (i, &val) in other_buf.iter().enumerate() {
|
||||
assert_eq!(val, i as u8);
|
||||
}
|
||||
}
|
||||
@ -610,8 +711,10 @@ mod tests {
|
||||
let mut local_pool = basic_small_pool();
|
||||
let test_buf: [u8; 12] = [0; 12];
|
||||
let addr = local_pool.add(&test_buf).expect("Adding data failed");
|
||||
let res = local_pool.read(&addr).expect("Read back failed");
|
||||
assert_eq!(res.len(), 12);
|
||||
let res = local_pool
|
||||
.read(&addr, &mut [0; 12])
|
||||
.expect("Read back failed");
|
||||
assert_eq!(res, 12);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -622,10 +725,13 @@ mod tests {
|
||||
// Delete the data
|
||||
let res = local_pool.delete(addr);
|
||||
assert!(res.is_ok());
|
||||
let mut writer = |buf: &mut [u8]| {
|
||||
assert_eq!(buf.len(), 12);
|
||||
};
|
||||
// Verify that the slot is free by trying to get a reference to it
|
||||
let res = local_pool.free_element(12);
|
||||
let res = local_pool.free_element(12, &mut writer);
|
||||
assert!(res.is_ok());
|
||||
let (addr, buf_ref) = res.unwrap();
|
||||
let addr = res.unwrap();
|
||||
assert_eq!(
|
||||
addr,
|
||||
u64::from(StaticPoolAddr {
|
||||
@ -633,7 +739,6 @@ mod tests {
|
||||
packet_idx: 0
|
||||
})
|
||||
);
|
||||
assert_eq!(buf_ref.len(), 12);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -647,29 +752,34 @@ mod tests {
|
||||
|
||||
{
|
||||
// Verify that the slot is free by trying to get a reference to it
|
||||
let res = local_pool.modify(&addr).expect("Modifying data failed");
|
||||
res[0] = 0;
|
||||
res[1] = 0x42;
|
||||
local_pool
|
||||
.modify(&addr, &mut |buf: &mut [u8]| {
|
||||
buf[0] = 0;
|
||||
buf[1] = 0x42;
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
}
|
||||
|
||||
let res = local_pool.read(&addr).expect("Reading back data failed");
|
||||
assert_eq!(res[0], 0);
|
||||
assert_eq!(res[1], 0x42);
|
||||
assert_eq!(res[2], 2);
|
||||
assert_eq!(res[3], 3);
|
||||
local_pool
|
||||
.read(&addr, &mut test_buf)
|
||||
.expect("Reading back data failed");
|
||||
assert_eq!(test_buf[0], 0);
|
||||
assert_eq!(test_buf[1], 0x42);
|
||||
assert_eq!(test_buf[2], 2);
|
||||
assert_eq!(test_buf[3], 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_consecutive_reservation() {
|
||||
let mut local_pool = basic_small_pool();
|
||||
// Reserve two smaller blocks consecutively and verify that the third reservation fails
|
||||
let res = local_pool.free_element(8);
|
||||
let res = local_pool.free_element(8, |_| {});
|
||||
assert!(res.is_ok());
|
||||
let (addr0, _) = res.unwrap();
|
||||
let res = local_pool.free_element(8);
|
||||
let addr0 = res.unwrap();
|
||||
let res = local_pool.free_element(8, |_| {});
|
||||
assert!(res.is_ok());
|
||||
let (addr1, _) = res.unwrap();
|
||||
let res = local_pool.free_element(8);
|
||||
let addr1 = res.unwrap();
|
||||
let res = local_pool.free_element(8, |_| {});
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert_eq!(err, StoreError::StoreFull(1));
|
||||
@ -689,6 +799,7 @@ mod tests {
|
||||
pool_idx: 0,
|
||||
}
|
||||
.into(),
|
||||
&mut [],
|
||||
);
|
||||
assert!(res.is_err());
|
||||
assert!(matches!(
|
||||
@ -720,7 +831,7 @@ mod tests {
|
||||
packet_idx: 0,
|
||||
}
|
||||
.into();
|
||||
let res = local_pool.read(&addr);
|
||||
let res = local_pool.read(&addr, &mut []);
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert!(matches!(
|
||||
@ -737,7 +848,7 @@ mod tests {
|
||||
packet_idx: 1,
|
||||
};
|
||||
assert_eq!(addr.raw(), 0x00020001);
|
||||
let res = local_pool.read(&addr.into());
|
||||
let res = local_pool.read(&addr.into(), &mut []);
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert!(matches!(
|
||||
@ -759,7 +870,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_data_too_large_1() {
|
||||
let mut local_pool = basic_small_pool();
|
||||
let res = local_pool.free_element(POOL_MAX_SIZE + 1);
|
||||
let res = local_pool.free_element(POOL_MAX_SIZE + 1, |_| {});
|
||||
assert!(res.is_err());
|
||||
assert_eq!(
|
||||
res.unwrap_err(),
|
||||
@ -771,7 +882,7 @@ mod tests {
|
||||
fn test_free_element_too_large() {
|
||||
let mut local_pool = basic_small_pool();
|
||||
// Try to request a slot which is too large
|
||||
let res = local_pool.free_element(20);
|
||||
let res = local_pool.free_element(20, |_| {});
|
||||
assert!(res.is_err());
|
||||
assert_eq!(res.unwrap_err(), StoreError::DataTooLarge(20));
|
||||
}
|
||||
@ -813,7 +924,7 @@ mod tests {
|
||||
let test_buf: [u8; 16] = [0; 16];
|
||||
let addr = local_pool.add(&test_buf).expect("Adding data failed");
|
||||
let mut rw_guard = PoolRwGuard::new(&mut local_pool, addr);
|
||||
let _ = rw_guard.modify().expect("modify failed");
|
||||
rw_guard.update(&mut |_| {}).expect("modify failed");
|
||||
drop(rw_guard);
|
||||
assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
|
||||
}
|
||||
@ -824,7 +935,7 @@ mod tests {
|
||||
let test_buf: [u8; 16] = [0; 16];
|
||||
let addr = local_pool.add(&test_buf).expect("Adding data failed");
|
||||
let mut rw_guard = local_pool.modify_with_guard(addr);
|
||||
let _ = rw_guard.modify().expect("modify failed");
|
||||
rw_guard.update(&mut |_| {}).expect("modify failed");
|
||||
drop(rw_guard);
|
||||
assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
|
||||
}
|
||||
@ -840,13 +951,93 @@ mod tests {
|
||||
let addr1 = local_pool.add(&test_buf_1).expect("Adding data failed");
|
||||
let addr2 = local_pool.add(&test_buf_2).expect("Adding data failed");
|
||||
let addr3 = local_pool.add(&test_buf_3).expect("Adding data failed");
|
||||
let tm0_raw = local_pool.modify(&addr0).expect("Modifying data failed");
|
||||
assert_eq!(tm0_raw, test_buf_0);
|
||||
let tm1_raw = local_pool.modify(&addr1).expect("Modifying data failed");
|
||||
assert_eq!(tm1_raw, test_buf_1);
|
||||
let tm2_raw = local_pool.modify(&addr2).expect("Modifying data failed");
|
||||
assert_eq!(tm2_raw, test_buf_2);
|
||||
let tm3_raw = local_pool.modify(&addr3).expect("Modifying data failed");
|
||||
assert_eq!(tm3_raw, test_buf_3);
|
||||
local_pool
|
||||
.modify(&addr0, |buf| {
|
||||
assert_eq!(buf, test_buf_0);
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
local_pool
|
||||
.modify(&addr1, |buf| {
|
||||
assert_eq!(buf, test_buf_1);
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
local_pool
|
||||
.modify(&addr2, |buf| {
|
||||
assert_eq!(buf, test_buf_2);
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
local_pool
|
||||
.modify(&addr3, |buf| {
|
||||
assert_eq!(buf, test_buf_3);
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spills_to_higher_subpools() {
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(2, 8), (2, 16)], true);
|
||||
let mut local_pool = StaticMemoryPool::new(pool_cfg);
|
||||
local_pool.free_element(8, |_| {}).unwrap();
|
||||
local_pool.free_element(8, |_| {}).unwrap();
|
||||
let mut in_larger_subpool_now = local_pool.free_element(8, |_| {});
|
||||
assert!(in_larger_subpool_now.is_ok());
|
||||
let generic_addr = in_larger_subpool_now.unwrap();
|
||||
let pool_addr = StaticPoolAddr::from(generic_addr);
|
||||
assert_eq!(pool_addr.pool_idx, 1);
|
||||
assert_eq!(pool_addr.packet_idx, 0);
|
||||
assert!(local_pool.has_element_at(&generic_addr).unwrap());
|
||||
in_larger_subpool_now = local_pool.free_element(8, |_| {});
|
||||
assert!(in_larger_subpool_now.is_ok());
|
||||
let generic_addr = in_larger_subpool_now.unwrap();
|
||||
let pool_addr = StaticPoolAddr::from(generic_addr);
|
||||
assert_eq!(pool_addr.pool_idx, 1);
|
||||
assert_eq!(pool_addr.packet_idx, 1);
|
||||
assert!(local_pool.has_element_at(&generic_addr).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spillage_fails_as_well() {
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 16)], true);
|
||||
let mut local_pool = StaticMemoryPool::new(pool_cfg);
|
||||
local_pool.free_element(8, |_| {}).unwrap();
|
||||
local_pool.free_element(8, |_| {}).unwrap();
|
||||
let should_fail = local_pool.free_element(8, |_| {});
|
||||
assert!(should_fail.is_err());
|
||||
if let Err(err) = should_fail {
|
||||
assert_eq!(err, StoreError::StoreFull(1));
|
||||
} else {
|
||||
panic!("unexpected store address");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spillage_works_across_multiple_subpools() {
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 12), (1, 16)], true);
|
||||
let mut local_pool = StaticMemoryPool::new(pool_cfg);
|
||||
local_pool.free_element(8, |_| {}).unwrap();
|
||||
local_pool.free_element(12, |_| {}).unwrap();
|
||||
let in_larger_subpool_now = local_pool.free_element(8, |_| {});
|
||||
assert!(in_larger_subpool_now.is_ok());
|
||||
let generic_addr = in_larger_subpool_now.unwrap();
|
||||
let pool_addr = StaticPoolAddr::from(generic_addr);
|
||||
assert_eq!(pool_addr.pool_idx, 2);
|
||||
assert_eq!(pool_addr.packet_idx, 0);
|
||||
assert!(local_pool.has_element_at(&generic_addr).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spillage_fails_across_multiple_subpools() {
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 12), (1, 16)], true);
|
||||
let mut local_pool = StaticMemoryPool::new(pool_cfg);
|
||||
local_pool.free_element(8, |_| {}).unwrap();
|
||||
local_pool.free_element(12, |_| {}).unwrap();
|
||||
local_pool.free_element(16, |_| {}).unwrap();
|
||||
let should_fail = local_pool.free_element(8, |_| {});
|
||||
assert!(should_fail.is_err());
|
||||
if let Err(err) = should_fail {
|
||||
assert_eq!(err, StoreError::StoreFull(2));
|
||||
} else {
|
||||
panic!("unexpected store address");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -390,7 +390,7 @@ mod alloc_mod {
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub mod std_mod {
|
||||
use crate::pool::{PoolProviderMemInPlaceWithGuards, SharedStaticMemoryPool, StoreAddr};
|
||||
use crate::pool::{PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr};
|
||||
use crate::pus::verification::{
|
||||
StdVerifReporterWithSender, TcStateAccepted, VerificationToken,
|
||||
};
|
||||
@ -789,12 +789,15 @@ pub mod std_mod {
|
||||
.shared_tc_store
|
||||
.write()
|
||||
.map_err(|_| PusPacketHandlingError::EcssTmtc(EcssTmtcError::StoreLock))?;
|
||||
let tc_guard = tc_pool.read_with_guard(addr);
|
||||
let tc_raw = tc_guard.read().unwrap();
|
||||
if tc_raw.len() > self.pus_buf.len() {
|
||||
return Err(PusPacketHandlingError::PusPacketTooLarge(tc_raw.len()));
|
||||
let tc_size = tc_pool
|
||||
.len_of_data(&addr)
|
||||
.map_err(|e| PusPacketHandlingError::EcssTmtc(EcssTmtcError::Store(e)))?;
|
||||
if tc_size > self.pus_buf.len() {
|
||||
return Err(PusPacketHandlingError::PusPacketTooLarge(tc_size));
|
||||
}
|
||||
self.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw);
|
||||
let tc_guard = tc_pool.read_with_guard(addr);
|
||||
// TODO: Proper error handling.
|
||||
tc_guard.read(&mut self.pus_buf[0..tc_size]).unwrap();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -947,8 +950,7 @@ pub mod tests {
|
||||
use spacepackets::CcsdsPacket;
|
||||
|
||||
use crate::pool::{
|
||||
PoolProviderMemInPlace, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig,
|
||||
StoreAddr,
|
||||
PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig, StoreAddr,
|
||||
};
|
||||
use crate::pus::verification::RequestId;
|
||||
use crate::tmtc::tm_helper::SharedTmPool;
|
||||
@ -1015,7 +1017,7 @@ pub mod tests {
|
||||
///
|
||||
/// The PUS service handler is instantiated with a [EcssTcInStoreConverter].
|
||||
pub fn new() -> (Self, PusServiceHelper<EcssTcInSharedStoreConverter>) {
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(16, 16), (8, 32), (4, 64)]);
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(16, 16), (8, 32), (4, 64)], false);
|
||||
let tc_pool = StaticMemoryPool::new(pool_cfg.clone());
|
||||
let tm_pool = StaticMemoryPool::new(pool_cfg);
|
||||
let shared_tc_pool = SharedStaticMemoryPool::new(RwLock::new(tc_pool));
|
||||
@ -1078,8 +1080,8 @@ pub mod tests {
|
||||
assert!(next_msg.is_ok());
|
||||
let tm_addr = next_msg.unwrap();
|
||||
let tm_pool = self.tm_pool.0.read().unwrap();
|
||||
let tm_raw = tm_pool.read(&tm_addr).unwrap();
|
||||
self.tm_buf[0..tm_raw.len()].copy_from_slice(tm_raw);
|
||||
let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap();
|
||||
self.tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw);
|
||||
PusTmReader::new(&self.tm_buf, 7).unwrap().0
|
||||
}
|
||||
|
||||
@ -1096,8 +1098,8 @@ pub mod tests {
|
||||
assert!(next_msg.is_ok());
|
||||
let tm_addr = next_msg.unwrap();
|
||||
let tm_pool = self.tm_pool.0.read().unwrap();
|
||||
let tm_raw = tm_pool.read(&tm_addr).unwrap();
|
||||
let tm = PusTmReader::new(tm_raw, 7).unwrap().0;
|
||||
let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap();
|
||||
let tm = PusTmReader::new(&tm_raw, 7).unwrap().0;
|
||||
assert_eq!(PusPacket::service(&tm), 1);
|
||||
assert_eq!(PusPacket::subservice(&tm), subservice);
|
||||
assert_eq!(tm.apid(), TEST_APID);
|
||||
|
@ -16,7 +16,7 @@ use spacepackets::{ByteConversionError, CcsdsPacket};
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
|
||||
use crate::pool::{PoolProviderMemInPlace, StoreError};
|
||||
use crate::pool::{PoolProvider, StoreError};
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
|
||||
@ -238,13 +238,11 @@ impl Error for ScheduleError {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PusSchedulerInterface {
|
||||
/// Generic trait for scheduler objects which are able to schedule ECSS PUS C packets.
|
||||
pub trait PusSchedulerProvider {
|
||||
type TimeProvider: CcsdsTimeProvider + TimeReader;
|
||||
|
||||
fn reset(
|
||||
&mut self,
|
||||
store: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
) -> Result<(), StoreError>;
|
||||
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError>;
|
||||
|
||||
fn is_enabled(&self) -> bool;
|
||||
|
||||
@ -267,7 +265,7 @@ pub trait PusSchedulerInterface {
|
||||
fn insert_wrapped_tc<TimeProvider>(
|
||||
&mut self,
|
||||
pus_tc: &(impl IsPusTelecommand + PusPacket + GenericPusTcSecondaryHeader),
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
if PusPacket::service(pus_tc) != 11 {
|
||||
return Err(ScheduleError::WrongService(PusPacket::service(pus_tc)));
|
||||
@ -293,7 +291,7 @@ pub trait PusSchedulerInterface {
|
||||
&mut self,
|
||||
time_stamp: UnixTimestamp,
|
||||
tc: &[u8],
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
let check_tc = PusTcReader::new(tc)?;
|
||||
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
|
||||
@ -343,7 +341,7 @@ pub fn generate_insert_telecommand_app_data(
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod alloc_mod {
|
||||
use super::*;
|
||||
use crate::pool::{PoolProviderMemInPlace, StoreAddr, StoreError};
|
||||
use crate::pool::{PoolProvider, StoreAddr, StoreError};
|
||||
use alloc::collections::btree_map::{Entry, Range};
|
||||
use alloc::collections::BTreeMap;
|
||||
use alloc::vec;
|
||||
@ -379,7 +377,7 @@ pub mod alloc_mod {
|
||||
/// This is the core data structure for scheduling PUS telecommands with [alloc] support.
|
||||
///
|
||||
/// It is assumed that the actual telecommand data is stored in a separate TC pool offering
|
||||
/// a [crate::pool::PoolProviderMemInPlace] API. This data structure just tracks the store
|
||||
/// a [crate::pool::PoolProvider] API. This data structure just tracks the store
|
||||
/// addresses and their release times and offers a convenient API to insert and release
|
||||
/// telecommands and perform other functionality specified by the ECSS standard in section 6.11.
|
||||
/// The time is tracked as a [spacepackets::time::UnixTimestamp] but the only requirement to
|
||||
@ -413,6 +411,8 @@ pub mod alloc_mod {
|
||||
/// * `time_margin` - This time margin is used when inserting new telecommands into the
|
||||
/// schedule. If the release time of a new telecommand is earlier than the time margin
|
||||
/// added to the current time, it will not be inserted into the schedule.
|
||||
/// * `tc_buf_size` - Buffer for temporary storage of telecommand packets. This buffer
|
||||
/// should be large enough to accomodate the largest expected TC packets.
|
||||
pub fn new(init_current_time: UnixTimestamp, time_margin: Duration) -> Self {
|
||||
PusScheduler {
|
||||
tc_map: Default::default(),
|
||||
@ -476,7 +476,7 @@ pub mod alloc_mod {
|
||||
&mut self,
|
||||
time_stamp: UnixTimestamp,
|
||||
tc: &[u8],
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
let check_tc = PusTcReader::new(tc)?;
|
||||
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
|
||||
@ -499,7 +499,7 @@ pub mod alloc_mod {
|
||||
pub fn insert_wrapped_tc_cds_short(
|
||||
&mut self,
|
||||
pus_tc: &PusTc,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
self.insert_wrapped_tc::<cds::TimeProvider>(pus_tc, pool)
|
||||
}
|
||||
@ -509,7 +509,7 @@ pub mod alloc_mod {
|
||||
pub fn insert_wrapped_tc_cds_long(
|
||||
&mut self,
|
||||
pus_tc: &PusTc,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
self.insert_wrapped_tc::<cds::TimeProvider<DaysLen24Bits>>(pus_tc, pool)
|
||||
}
|
||||
@ -525,7 +525,7 @@ pub mod alloc_mod {
|
||||
pub fn delete_by_time_filter<TimeProvider: CcsdsTimeProvider + Clone>(
|
||||
&mut self,
|
||||
time_window: TimeWindow<TimeProvider>,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
let range = self.retrieve_by_time_filter(time_window);
|
||||
let mut del_packets = 0;
|
||||
@ -555,7 +555,7 @@ pub mod alloc_mod {
|
||||
/// the last deletion will be supplied in addition to the number of deleted commands.
|
||||
pub fn delete_all(
|
||||
&mut self,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
self.delete_by_time_filter(TimeWindow::<cds::TimeProvider>::new_select_all(), pool)
|
||||
}
|
||||
@ -613,7 +613,7 @@ pub mod alloc_mod {
|
||||
pub fn delete_by_request_id_and_from_pool(
|
||||
&mut self,
|
||||
req_id: &RequestId,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<bool, StoreError> {
|
||||
if let DeletionResult::WithStoreDeletion(v) =
|
||||
self.delete_by_request_id_internal_with_store_deletion(req_id, pool)
|
||||
@ -645,7 +645,7 @@ pub mod alloc_mod {
|
||||
fn delete_by_request_id_internal_with_store_deletion(
|
||||
&mut self,
|
||||
req_id: &RequestId,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> DeletionResult {
|
||||
let mut idx_found = None;
|
||||
for time_bucket in &mut self.tc_map {
|
||||
@ -675,7 +675,8 @@ pub mod alloc_mod {
|
||||
/// Utility method which calls [Self::telecommands_to_release] and then calls a releaser
|
||||
/// closure for each telecommand which should be released. This function will also delete
|
||||
/// the telecommands from the holding store after calling the release closure if the user
|
||||
/// returns [true] from the release closure.
|
||||
/// returns [true] from the release closure. A buffer must be provided to hold the
|
||||
/// telecommands for the release process.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
@ -685,18 +686,55 @@ pub mod alloc_mod {
|
||||
/// note that returning false might lead to memory leaks if the TC is not cleared from
|
||||
/// the store in some other way.
|
||||
/// * `tc_store` - The holding store of the telecommands.
|
||||
/// * `tc_buf` - Buffer to hold each telecommand being released.
|
||||
pub fn release_telecommands_with_buffer<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
|
||||
&mut self,
|
||||
releaser: R,
|
||||
tc_store: &mut (impl PoolProvider + ?Sized),
|
||||
tc_buf: &mut [u8],
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
self.release_telecommands_internal(releaser, tc_store, Some(tc_buf))
|
||||
}
|
||||
|
||||
/// This functions is almost identical to [Self::release_telecommands_with_buffer] but does
|
||||
/// not require a user provided TC buffer because it will always use the
|
||||
/// [PoolProvider::read_as_vec] API to read the TC packets.
|
||||
///
|
||||
/// However, this might also perform frequent allocations for all telecommands being
|
||||
/// released.
|
||||
pub fn release_telecommands<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
|
||||
&mut self,
|
||||
releaser: R,
|
||||
tc_store: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
self.release_telecommands_internal(releaser, tc_store, None)
|
||||
}
|
||||
|
||||
fn release_telecommands_internal<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
|
||||
&mut self,
|
||||
mut releaser: R,
|
||||
tc_store: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
tc_store: &mut (impl PoolProvider + ?Sized),
|
||||
mut tc_buf: Option<&mut [u8]>,
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
let tcs_to_release = self.telecommands_to_release();
|
||||
let mut released_tcs = 0;
|
||||
let mut store_error = Ok(());
|
||||
for tc in tcs_to_release {
|
||||
for info in tc.1 {
|
||||
let tc = tc_store.read(&info.addr).map_err(|e| (released_tcs, e))?;
|
||||
let should_delete = releaser(self.enabled, info, tc);
|
||||
let should_delete = match tc_buf.as_mut() {
|
||||
Some(buf) => {
|
||||
tc_store
|
||||
.read(&info.addr, buf)
|
||||
.map_err(|e| (released_tcs, e))?;
|
||||
releaser(self.enabled, info, buf)
|
||||
}
|
||||
None => {
|
||||
let tc = tc_store
|
||||
.read_as_vec(&info.addr)
|
||||
.map_err(|e| (released_tcs, e))?;
|
||||
releaser(self.enabled, info, &tc)
|
||||
}
|
||||
};
|
||||
released_tcs += 1;
|
||||
if should_delete {
|
||||
let res = tc_store.delete(info.addr);
|
||||
@ -721,16 +759,17 @@ pub mod alloc_mod {
|
||||
pub fn release_telecommands_no_deletion<R: FnMut(bool, &TcInfo, &[u8])>(
|
||||
&mut self,
|
||||
mut releaser: R,
|
||||
tc_store: &(impl PoolProviderMemInPlace + ?Sized),
|
||||
tc_store: &(impl PoolProvider + ?Sized),
|
||||
tc_buf: &mut [u8],
|
||||
) -> Result<Vec<TcInfo>, (Vec<TcInfo>, StoreError)> {
|
||||
let tcs_to_release = self.telecommands_to_release();
|
||||
let mut released_tcs = Vec::new();
|
||||
for tc in tcs_to_release {
|
||||
for info in tc.1 {
|
||||
let tc = tc_store
|
||||
.read(&info.addr)
|
||||
tc_store
|
||||
.read(&info.addr, tc_buf)
|
||||
.map_err(|e| (released_tcs.clone(), e))?;
|
||||
releaser(self.is_enabled(), info, tc);
|
||||
releaser(self.is_enabled(), info, tc_buf);
|
||||
released_tcs.push(*info);
|
||||
}
|
||||
}
|
||||
@ -744,7 +783,7 @@ pub mod alloc_mod {
|
||||
}
|
||||
}
|
||||
|
||||
impl PusSchedulerInterface for PusScheduler {
|
||||
impl PusSchedulerProvider for PusScheduler {
|
||||
type TimeProvider = cds::TimeProvider;
|
||||
|
||||
/// This will disable the scheduler and clear the schedule as specified in 6.11.4.4.
|
||||
@ -753,10 +792,7 @@ pub mod alloc_mod {
|
||||
/// The holding store for the telecommands needs to be passed so all the stored telecommands
|
||||
/// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error
|
||||
/// will be returned but the method will still try to delete all the commands in the schedule.
|
||||
fn reset(
|
||||
&mut self,
|
||||
store: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
) -> Result<(), StoreError> {
|
||||
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError> {
|
||||
self.enabled = false;
|
||||
let mut deletion_ok = Ok(());
|
||||
for tc_lists in &mut self.tc_map {
|
||||
@ -814,8 +850,7 @@ pub mod alloc_mod {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::pool::{
|
||||
PoolProviderMemInPlace, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreAddr,
|
||||
StoreError,
|
||||
PoolProvider, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreAddr, StoreError,
|
||||
};
|
||||
use alloc::collections::btree_map::Range;
|
||||
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
|
||||
@ -911,7 +946,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_reset() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
@ -1063,7 +1098,7 @@ mod tests {
|
||||
}
|
||||
#[test]
|
||||
fn test_release_telecommands() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
@ -1088,8 +1123,9 @@ mod tests {
|
||||
// test 1: too early, no tcs
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
|
||||
|
||||
let mut tc_buf: [u8; 128] = [0; 128];
|
||||
scheduler
|
||||
.release_telecommands(&mut test_closure_1, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure_1, &mut pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
|
||||
// test 2: exact time stamp of tc, releases 1 tc
|
||||
@ -1111,7 +1147,7 @@ mod tests {
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(206));
|
||||
|
||||
released = scheduler
|
||||
.release_telecommands(&mut test_closure_2, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure_2, &mut pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
assert_eq!(released, 1);
|
||||
// TC is deleted.
|
||||
@ -1128,7 +1164,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn release_multi_with_same_time() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
@ -1157,9 +1193,10 @@ mod tests {
|
||||
|
||||
// test 1: too early, no tcs
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
|
||||
let mut tc_buf: [u8; 128] = [0; 128];
|
||||
|
||||
let mut released = scheduler
|
||||
.release_telecommands(&mut test_closure, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure, &mut pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
assert_eq!(released, 0);
|
||||
|
||||
@ -1185,7 +1222,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn release_with_scheduler_disabled() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
@ -1209,11 +1246,13 @@ mod tests {
|
||||
true
|
||||
};
|
||||
|
||||
let mut tc_buf: [u8; 128] = [0; 128];
|
||||
|
||||
// test 1: too early, no tcs
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
|
||||
|
||||
scheduler
|
||||
.release_telecommands(&mut test_closure_1, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure_1, &mut pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
|
||||
// test 2: exact time stamp of tc, releases 1 tc
|
||||
@ -1253,7 +1292,7 @@ mod tests {
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
|
||||
@ -1267,8 +1306,9 @@ mod tests {
|
||||
|
||||
assert!(pool.has_element_at(&tc_info_0.addr()).unwrap());
|
||||
|
||||
let data = pool.read(&tc_info_0.addr()).unwrap();
|
||||
let check_tc = PusTcReader::new(data).expect("incorrect Pus tc raw data");
|
||||
let mut read_buf: [u8; 64] = [0; 64];
|
||||
pool.read(&tc_info_0.addr(), &mut read_buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
|
||||
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
|
||||
@ -1289,8 +1329,9 @@ mod tests {
|
||||
.release_telecommands(&mut test_closure, &mut pool)
|
||||
.unwrap();
|
||||
|
||||
let data = pool.read(&addr_vec[0]).unwrap();
|
||||
let check_tc = PusTcReader::new(data).expect("incorrect Pus tc raw data");
|
||||
let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
|
||||
assert_eq!(read_len, check_tc.1);
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
}
|
||||
|
||||
@ -1299,7 +1340,7 @@ mod tests {
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc = scheduled_tc(UnixTimestamp::new_only_seconds(100), &mut buf);
|
||||
@ -1313,8 +1354,9 @@ mod tests {
|
||||
|
||||
assert!(pool.has_element_at(&info.addr).unwrap());
|
||||
|
||||
let data = pool.read(&info.addr).unwrap();
|
||||
let check_tc = PusTcReader::new(data).expect("incorrect Pus tc raw data");
|
||||
let read_len = pool.read(&info.addr, &mut buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data");
|
||||
assert_eq!(read_len, check_tc.1);
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
|
||||
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
|
||||
@ -1331,12 +1373,15 @@ mod tests {
|
||||
false
|
||||
};
|
||||
|
||||
let mut tc_buf: [u8; 64] = [0; 64];
|
||||
|
||||
scheduler
|
||||
.release_telecommands(&mut test_closure, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure, &mut pool, &mut tc_buf)
|
||||
.unwrap();
|
||||
|
||||
let data = pool.read(&addr_vec[0]).unwrap();
|
||||
let check_tc = PusTcReader::new(data).expect("incorrect PUS tc raw data");
|
||||
let read_len = pool.read(&addr_vec[0], &mut buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data");
|
||||
assert_eq!(read_len, check_tc.1);
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
}
|
||||
|
||||
@ -1345,7 +1390,7 @@ mod tests {
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc = wrong_tc_service(UnixTimestamp::new_only_seconds(100), &mut buf);
|
||||
@ -1368,7 +1413,7 @@ mod tests {
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc = wrong_tc_subservice(UnixTimestamp::new_only_seconds(100), &mut buf);
|
||||
@ -1390,7 +1435,7 @@ mod tests {
|
||||
fn insert_wrapped_tc_faulty_app_data() {
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let tc = invalid_time_tagged_cmd();
|
||||
let insert_res = scheduler.insert_wrapped_tc::<cds::TimeProvider>(&tc, &mut pool);
|
||||
assert!(insert_res.is_err());
|
||||
@ -1405,7 +1450,7 @@ mod tests {
|
||||
fn insert_doubly_wrapped_time_tagged_cmd() {
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut buf: [u8; 64] = [0; 64];
|
||||
let tc = double_wrapped_time_tagged_tc(UnixTimestamp::new_only_seconds(50), &mut buf);
|
||||
let insert_res = scheduler.insert_wrapped_tc::<cds::TimeProvider>(&tc, &mut pool);
|
||||
@ -1441,7 +1486,7 @@ mod tests {
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
|
||||
@ -1465,7 +1510,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_store_error_propagation_release() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
@ -1500,7 +1545,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_store_error_propagation_reset() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
@ -1524,7 +1569,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_delete_by_req_id_simple_retrieve_addr() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
@ -1543,7 +1588,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_delete_by_req_id_simple_delete_all() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
@ -1562,7 +1607,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_delete_by_req_id_complex() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
@ -1609,7 +1654,7 @@ mod tests {
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(1, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(1, 64)], false));
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
// Store is full after this.
|
||||
@ -1648,7 +1693,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_time_window_retrieval_select_all() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
|
||||
@ -1679,7 +1724,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_time_window_retrieval_select_from_stamp() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
|
||||
@ -1710,7 +1755,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_time_window_retrieval_select_to_time() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
|
||||
@ -1741,7 +1786,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_time_window_retrieval_select_from_time_to_time() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
|
||||
@ -1776,7 +1821,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_deletion_all() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
|
||||
@ -1803,7 +1848,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_deletion_from_start_time() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
|
||||
@ -1824,7 +1869,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_deletion_to_end_time() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
|
||||
@ -1846,7 +1891,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_deletion_from_start_time_to_end_time() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
let cmd_out_of_range_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
|
||||
@ -1875,7 +1920,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_release_without_deletion() {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler =
|
||||
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
|
||||
|
||||
@ -1903,8 +1948,9 @@ mod tests {
|
||||
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(205));
|
||||
|
||||
let mut tc_buf: [u8; 64] = [0; 64];
|
||||
let tc_info_vec = scheduler
|
||||
.release_telecommands_no_deletion(&mut test_closure_1, &pool)
|
||||
.release_telecommands_no_deletion(&mut test_closure_1, &pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
assert_eq!(tc_info_vec[0], tc_info_0);
|
||||
assert_eq!(tc_info_vec[1], tc_info_1);
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::scheduler::PusSchedulerInterface;
|
||||
use super::scheduler::PusSchedulerProvider;
|
||||
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
|
||||
use crate::pool::PoolProviderMemInPlace;
|
||||
use crate::pool::PoolProvider;
|
||||
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
|
||||
use alloc::string::ToString;
|
||||
use spacepackets::ecss::{scheduling, PusPacket};
|
||||
@ -16,13 +16,13 @@ use spacepackets::time::cds::TimeProvider;
|
||||
/// telecommands when applicable.
|
||||
pub struct PusService11SchedHandler<
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
Scheduler: PusSchedulerInterface,
|
||||
PusScheduler: PusSchedulerProvider,
|
||||
> {
|
||||
pub service_helper: PusServiceHelper<TcInMemConverter>,
|
||||
scheduler: Scheduler,
|
||||
scheduler: PusScheduler,
|
||||
}
|
||||
|
||||
impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
|
||||
impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerProvider>
|
||||
PusService11SchedHandler<TcInMemConverter, Scheduler>
|
||||
{
|
||||
pub fn new(service_helper: PusServiceHelper<TcInMemConverter>, scheduler: Scheduler) -> Self {
|
||||
@ -42,7 +42,7 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
|
||||
|
||||
pub fn handle_one_tc(
|
||||
&mut self,
|
||||
sched_tc_pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
sched_tc_pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
|
||||
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
|
||||
if possible_packet.is_none() {
|
||||
@ -173,7 +173,7 @@ mod tests {
|
||||
use crate::pool::{StaticMemoryPool, StaticPoolConfig};
|
||||
use crate::pus::tests::TEST_APID;
|
||||
use crate::pus::{
|
||||
scheduler::{self, PusSchedulerInterface, TcInfo},
|
||||
scheduler::{self, PusSchedulerProvider, TcInfo},
|
||||
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness},
|
||||
verification::{RequestId, TcStateAccepted, VerificationToken},
|
||||
EcssTcInSharedStoreConverter,
|
||||
@ -201,7 +201,7 @@ mod tests {
|
||||
impl Pus11HandlerWithStoreTester {
|
||||
pub fn new() -> Self {
|
||||
let test_scheduler = TestScheduler::default();
|
||||
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)]);
|
||||
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false);
|
||||
let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone());
|
||||
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
|
||||
Self {
|
||||
@ -232,12 +232,12 @@ mod tests {
|
||||
inserted_tcs: VecDeque<TcInfo>,
|
||||
}
|
||||
|
||||
impl PusSchedulerInterface for TestScheduler {
|
||||
impl PusSchedulerProvider for TestScheduler {
|
||||
type TimeProvider = cds::TimeProvider;
|
||||
|
||||
fn reset(
|
||||
&mut self,
|
||||
_store: &mut (impl crate::pool::PoolProviderMemInPlace + ?Sized),
|
||||
_store: &mut (impl crate::pool::PoolProvider + ?Sized),
|
||||
) -> Result<(), crate::pool::StoreError> {
|
||||
self.reset_count += 1;
|
||||
Ok(())
|
||||
|
@ -15,7 +15,7 @@
|
||||
//! ```
|
||||
//! use std::sync::{Arc, mpsc, RwLock};
|
||||
//! use std::time::Duration;
|
||||
//! use satrs_core::pool::{PoolProviderMemInPlaceWithGuards, StaticMemoryPool, StaticPoolConfig};
|
||||
//! use satrs_core::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
|
||||
//! use satrs_core::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender};
|
||||
//! use satrs_core::seq_count::SeqCountProviderSimple;
|
||||
//! use satrs_core::pus::MpscTmInSharedPoolSender;
|
||||
@ -28,7 +28,7 @@
|
||||
//! const EMPTY_STAMP: [u8; 7] = [0; 7];
|
||||
//! const TEST_APID: u16 = 0x02;
|
||||
//!
|
||||
//! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
|
||||
//! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
|
||||
//! let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
|
||||
//! let shared_tm_store = SharedTmPool::new(tm_pool);
|
||||
//! let tm_store = shared_tm_store.clone_backing_pool();
|
||||
@ -56,9 +56,7 @@
|
||||
//! {
|
||||
//! let mut rg = tm_store.write().expect("Error locking shared pool");
|
||||
//! let store_guard = rg.read_with_guard(addr);
|
||||
//! let slice = store_guard.read().expect("Error reading TM slice");
|
||||
//! tm_len = slice.len();
|
||||
//! tm_buf[0..tm_len].copy_from_slice(slice);
|
||||
//! tm_len = store_guard.read(&mut tm_buf).expect("Error reading TM slice");
|
||||
//! }
|
||||
//! let (pus_tm, _) = PusTmReader::new(&tm_buf[0..tm_len], 7)
|
||||
//! .expect("Error reading verification TM");
|
||||
@ -1325,7 +1323,7 @@ mod std_mod {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::pool::{PoolProviderMemInPlaceWithGuards, StaticMemoryPool, StaticPoolConfig};
|
||||
use crate::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
|
||||
use crate::pus::tests::CommonTmInfo;
|
||||
use crate::pus::verification::{
|
||||
EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone,
|
||||
@ -1486,7 +1484,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_mpsc_verif_send_sync() {
|
||||
let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)]));
|
||||
let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)], false));
|
||||
let shared_tm_store = SharedTmPool::new(pool);
|
||||
let (tx, _) = mpsc::channel();
|
||||
let mpsc_verif_sender =
|
||||
@ -1547,7 +1545,7 @@ mod tests {
|
||||
let mut sender = TestSender::default();
|
||||
let fail_code = EcssEnumU16::new(2);
|
||||
let fail_params = FailParams::new(Some(stamp_buf.as_slice()), &fail_code, None);
|
||||
b.vr.acceptance_failure(tok, &mut sender, fail_params)
|
||||
b.vr.acceptance_failure(tok, &sender, fail_params)
|
||||
.expect("Sending acceptance success failed");
|
||||
acceptance_fail_check(&mut sender, tok.req_id, stamp_buf);
|
||||
}
|
||||
@ -1682,12 +1680,10 @@ mod tests {
|
||||
);
|
||||
|
||||
let accepted_token =
|
||||
b.vr.acceptance_success(tok, &mut sender, Some(&EMPTY_STAMP))
|
||||
b.vr.acceptance_success(tok, &sender, Some(&EMPTY_STAMP))
|
||||
.expect("Sending acceptance success failed");
|
||||
let empty =
|
||||
b.vr.start_failure(accepted_token, &mut sender, fail_params)
|
||||
.expect("Start failure failure");
|
||||
assert_eq!(empty, ());
|
||||
b.vr.start_failure(accepted_token, &mut sender, fail_params)
|
||||
.expect("Start failure failure");
|
||||
start_fail_check(&mut sender, tok.req_id, fail_data_raw);
|
||||
}
|
||||
|
||||
@ -1779,23 +1775,23 @@ mod tests {
|
||||
let mut sender = TestSender::default();
|
||||
let accepted_token = b
|
||||
.rep()
|
||||
.acceptance_success(tok, &mut sender, Some(&EMPTY_STAMP))
|
||||
.acceptance_success(tok, &sender, Some(&EMPTY_STAMP))
|
||||
.expect("Sending acceptance success failed");
|
||||
let started_token = b
|
||||
.rep()
|
||||
.start_success(accepted_token, &mut sender, Some(&[0, 1, 0, 1, 0, 1, 0]))
|
||||
.start_success(accepted_token, &sender, Some(&[0, 1, 0, 1, 0, 1, 0]))
|
||||
.expect("Sending start success failed");
|
||||
b.rep()
|
||||
.step_success(
|
||||
&started_token,
|
||||
&mut sender,
|
||||
&sender,
|
||||
Some(&EMPTY_STAMP),
|
||||
EcssEnumU8::new(0),
|
||||
)
|
||||
.expect("Sending step 0 success failed");
|
||||
b.vr.step_success(
|
||||
&started_token,
|
||||
&mut sender,
|
||||
&sender,
|
||||
Some(&EMPTY_STAMP),
|
||||
EcssEnumU8::new(1),
|
||||
)
|
||||
@ -2139,9 +2135,9 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
// TODO: maybe a bit more extensive testing, all I have time for right now
|
||||
fn test_seq_count_increment() {
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
|
||||
let pool_cfg =
|
||||
StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
|
||||
let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
|
||||
let shared_tm_store = SharedTmPool::new(tm_pool);
|
||||
let shared_tm_pool = shared_tm_store.clone_backing_pool();
|
||||
@ -2176,9 +2172,9 @@ mod tests {
|
||||
{
|
||||
let mut rg = shared_tm_pool.write().expect("Error locking shared pool");
|
||||
let store_guard = rg.read_with_guard(addr);
|
||||
let slice = store_guard.read().expect("Error reading TM slice");
|
||||
tm_len = slice.len();
|
||||
tm_buf[0..tm_len].copy_from_slice(slice);
|
||||
tm_len = store_guard
|
||||
.read(&mut tm_buf)
|
||||
.expect("Error reading TM slice");
|
||||
}
|
||||
let (pus_tm, _) =
|
||||
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");
|
||||
|
@ -8,9 +8,7 @@ pub use std_mod::*;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod std_mod {
|
||||
use crate::pool::{
|
||||
PoolProviderMemInPlace, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr,
|
||||
};
|
||||
use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr};
|
||||
use crate::pus::EcssTmtcError;
|
||||
use spacepackets::ecss::tm::PusTmCreator;
|
||||
use spacepackets::ecss::WritablePusPacket;
|
||||
@ -37,10 +35,11 @@ pub mod std_mod {
|
||||
|
||||
pub fn add_pus_tm(&self, pus_tm: &PusTmCreator) -> Result<StoreAddr, EcssTmtcError> {
|
||||
let mut pg = self.0.write().map_err(|_| EcssTmtcError::StoreLock)?;
|
||||
let (addr, buf) = pg.free_element(pus_tm.len_written())?;
|
||||
pus_tm
|
||||
.write_to_bytes(buf)
|
||||
.expect("writing PUS TM to store failed");
|
||||
let addr = pg.free_element(pus_tm.len_written(), |buf| {
|
||||
pus_tm
|
||||
.write_to_bytes(buf)
|
||||
.expect("writing PUS TM to store failed");
|
||||
})?;
|
||||
Ok(addr)
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,4 @@
|
||||
use satrs_core::pool::{
|
||||
PoolGuard, PoolProviderMemInPlace, StaticMemoryPool, StaticPoolConfig, StoreAddr,
|
||||
};
|
||||
use satrs_core::pool::{PoolGuard, PoolProvider, StaticMemoryPool, StaticPoolConfig, StoreAddr};
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
@ -11,7 +9,7 @@ const DUMMY_DATA: [u8; 4] = [0, 1, 2, 3];
|
||||
|
||||
#[test]
|
||||
fn threaded_usage() {
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)]);
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false);
|
||||
let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
|
||||
let shared_clone = shared_pool.clone();
|
||||
let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel();
|
||||
@ -27,8 +25,10 @@ fn threaded_usage() {
|
||||
addr = rx.recv().expect("Receiving store address failed");
|
||||
let mut pool_access = shared_clone.write().unwrap();
|
||||
let pg = PoolGuard::new(pool_access.deref_mut(), addr);
|
||||
let read_res = pg.read().expect("Reading failed");
|
||||
assert_eq!(read_res, DUMMY_DATA);
|
||||
let mut read_buf: [u8; 4] = [0; 4];
|
||||
let read_bytes = pg.read(&mut read_buf).expect("Reading failed");
|
||||
assert_eq!(read_buf, DUMMY_DATA);
|
||||
assert_eq!(read_bytes, 4);
|
||||
}
|
||||
let pool_access = shared_clone.read().unwrap();
|
||||
assert!(!pool_access.has_element_at(&addr).expect("Invalid address"));
|
||||
|
@ -2,8 +2,7 @@
|
||||
pub mod crossbeam_test {
|
||||
use hashbrown::HashMap;
|
||||
use satrs_core::pool::{
|
||||
PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, StaticMemoryPool,
|
||||
StaticPoolConfig,
|
||||
PoolProvider, PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig,
|
||||
};
|
||||
use satrs_core::pus::verification::{
|
||||
FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender,
|
||||
@ -36,7 +35,8 @@ pub mod crossbeam_test {
|
||||
// each reporter have an own sequence count provider.
|
||||
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
|
||||
// Shared pool object to store the verification PUS telemetry
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
|
||||
let pool_cfg =
|
||||
StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
|
||||
let shared_tm_pool = SharedTmPool::new(StaticMemoryPool::new(pool_cfg.clone()));
|
||||
let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
|
||||
let shared_tc_pool_1 = shared_tc_pool_0.clone();
|
||||
@ -59,15 +59,21 @@ pub mod crossbeam_test {
|
||||
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
|
||||
let pus_tc_0 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true);
|
||||
req_id_0 = RequestId::new(&pus_tc_0);
|
||||
let (addr, buf) = tc_guard.free_element(pus_tc_0.len_written()).unwrap();
|
||||
pus_tc_0.write_to_bytes(buf).unwrap();
|
||||
let addr = tc_guard
|
||||
.free_element(pus_tc_0.len_written(), |buf| {
|
||||
pus_tc_0.write_to_bytes(buf).unwrap();
|
||||
})
|
||||
.unwrap();
|
||||
tx_tc_0.send(addr).unwrap();
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID, 1, 0).unwrap();
|
||||
let tc_header = PusTcSecondaryHeader::new_simple(5, 1);
|
||||
let pus_tc_1 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true);
|
||||
req_id_1 = RequestId::new(&pus_tc_1);
|
||||
let (addr, buf) = tc_guard.free_element(pus_tc_0.len_written()).unwrap();
|
||||
pus_tc_1.write_to_bytes(buf).unwrap();
|
||||
let addr = tc_guard
|
||||
.free_element(pus_tc_0.len_written(), |buf| {
|
||||
pus_tc_1.write_to_bytes(buf).unwrap();
|
||||
})
|
||||
.unwrap();
|
||||
tx_tc_1.send(addr).unwrap();
|
||||
}
|
||||
let verif_sender_0 = thread::spawn(move || {
|
||||
@ -79,9 +85,7 @@ pub mod crossbeam_test {
|
||||
{
|
||||
let mut tc_guard = shared_tc_pool_0.write().unwrap();
|
||||
let pg = tc_guard.read_with_guard(tc_addr);
|
||||
let buf = pg.read().unwrap();
|
||||
tc_len = buf.len();
|
||||
tc_buf[0..tc_len].copy_from_slice(buf);
|
||||
tc_len = pg.read(&mut tc_buf).unwrap();
|
||||
}
|
||||
let (_tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
|
||||
|
||||
@ -117,9 +121,7 @@ pub mod crossbeam_test {
|
||||
{
|
||||
let mut tc_guard = shared_tc_pool_1.write().unwrap();
|
||||
let pg = tc_guard.read_with_guard(tc_addr);
|
||||
let buf = pg.read().unwrap();
|
||||
tc_len = buf.len();
|
||||
tc_buf[0..tc_len].copy_from_slice(buf);
|
||||
tc_len = pg.read(&mut tc_buf).unwrap();
|
||||
}
|
||||
let (tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
|
||||
let token = reporter_with_sender_1.add_tc(&tc);
|
||||
@ -149,9 +151,9 @@ pub mod crossbeam_test {
|
||||
{
|
||||
let mut rg = shared_tm_store.write().expect("Error locking shared pool");
|
||||
let store_guard = rg.read_with_guard(verif_addr);
|
||||
let slice = store_guard.read().expect("Error reading TM slice");
|
||||
tm_len = slice.len();
|
||||
tm_buf[0..tm_len].copy_from_slice(slice);
|
||||
tm_len = store_guard
|
||||
.read(&mut tm_buf)
|
||||
.expect("Error reading TM slice");
|
||||
}
|
||||
let (pus_tm, _) =
|
||||
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");
|
||||
|
@ -103,34 +103,43 @@ pub mod pool {
|
||||
use super::*;
|
||||
pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) {
|
||||
(
|
||||
StaticMemoryPool::new(StaticPoolConfig::new(vec![
|
||||
(30, 32),
|
||||
(15, 64),
|
||||
(15, 128),
|
||||
(15, 256),
|
||||
(15, 1024),
|
||||
(15, 2048),
|
||||
])),
|
||||
StaticMemoryPool::new(StaticPoolConfig::new(vec![
|
||||
(30, 32),
|
||||
(15, 64),
|
||||
(15, 128),
|
||||
(15, 256),
|
||||
(15, 1024),
|
||||
(15, 2048),
|
||||
])),
|
||||
StaticMemoryPool::new(StaticPoolConfig::new(
|
||||
vec![
|
||||
(30, 32),
|
||||
(15, 64),
|
||||
(15, 128),
|
||||
(15, 256),
|
||||
(15, 1024),
|
||||
(15, 2048),
|
||||
],
|
||||
true,
|
||||
)),
|
||||
StaticMemoryPool::new(StaticPoolConfig::new(
|
||||
vec![
|
||||
(30, 32),
|
||||
(15, 64),
|
||||
(15, 128),
|
||||
(15, 256),
|
||||
(15, 1024),
|
||||
(15, 2048),
|
||||
],
|
||||
true,
|
||||
)),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn create_sched_tc_pool() -> StaticMemoryPool {
|
||||
StaticMemoryPool::new(StaticPoolConfig::new(vec![
|
||||
(30, 32),
|
||||
(15, 64),
|
||||
(15, 128),
|
||||
(15, 256),
|
||||
(15, 1024),
|
||||
(15, 2048),
|
||||
]))
|
||||
StaticMemoryPool::new(StaticPoolConfig::new(
|
||||
vec![
|
||||
(30, 32),
|
||||
(15, 64),
|
||||
(15, 128),
|
||||
(15, 256),
|
||||
(15, 1024),
|
||||
(15, 2048),
|
||||
],
|
||||
true,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@ use std::sync::mpsc;
|
||||
use std::time::Duration;
|
||||
|
||||
use log::{error, info, warn};
|
||||
use satrs_core::pool::{PoolProviderMemInPlace, StaticMemoryPool, StoreAddr};
|
||||
use satrs_core::pool::{PoolProvider, StaticMemoryPool, StoreAddr};
|
||||
use satrs_core::pus::scheduler::{PusScheduler, TcInfo};
|
||||
use satrs_core::pus::scheduler_srv::PusService11SchedHandler;
|
||||
use satrs_core::pus::verification::VerificationReporterWithSender;
|
||||
@ -54,6 +54,7 @@ impl TcReleaser for mpsc::Sender<Vec<u8>> {
|
||||
pub struct Pus11Wrapper<TcInMemConverter: EcssTcInMemConverter> {
|
||||
pub pus_11_handler: PusService11SchedHandler<TcInMemConverter, PusScheduler>,
|
||||
pub sched_tc_pool: StaticMemoryPool,
|
||||
pub releaser_buf: [u8; 4096],
|
||||
pub tc_releaser: Box<dyn TcReleaser + Send>,
|
||||
}
|
||||
|
||||
@ -70,7 +71,11 @@ impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> {
|
||||
let released_tcs = self
|
||||
.pus_11_handler
|
||||
.scheduler_mut()
|
||||
.release_telecommands(releaser, &mut self.sched_tc_pool)
|
||||
.release_telecommands_with_buffer(
|
||||
releaser,
|
||||
&mut self.sched_tc_pool,
|
||||
&mut self.releaser_buf,
|
||||
)
|
||||
.expect("releasing TCs failed");
|
||||
if released_tcs > 0 {
|
||||
info!("{released_tcs} TC(s) released from scheduler");
|
||||
@ -136,6 +141,7 @@ pub fn create_scheduler_service_static(
|
||||
Pus11Wrapper {
|
||||
pus_11_handler,
|
||||
sched_tc_pool,
|
||||
releaser_buf: [0; 4096],
|
||||
tc_releaser: Box::new(tc_releaser),
|
||||
}
|
||||
}
|
||||
@ -172,6 +178,7 @@ pub fn create_scheduler_service_dynamic(
|
||||
Pus11Wrapper {
|
||||
pus_11_handler,
|
||||
sched_tc_pool,
|
||||
releaser_buf: [0; 4096],
|
||||
tc_releaser: Box::new(tc_source_sender),
|
||||
}
|
||||
}
|
||||
|
@ -3,8 +3,9 @@ use std::{
|
||||
sync::mpsc::{Receiver, Sender},
|
||||
};
|
||||
|
||||
use log::info;
|
||||
use satrs_core::{
|
||||
pool::{PoolProviderMemInPlace, StoreAddr},
|
||||
pool::{PoolProvider, StoreAddr},
|
||||
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
|
||||
spacepackets::{
|
||||
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
|
||||
@ -63,9 +64,14 @@ impl TmFunnelCommon {
|
||||
*entry += 1;
|
||||
}
|
||||
|
||||
Self::packet_printout(&zero_copy_writer);
|
||||
// This operation has to come last!
|
||||
zero_copy_writer.finish();
|
||||
}
|
||||
|
||||
fn packet_printout(tm: &PusTmZeroCopyWriter) {
|
||||
info!("Sending PUS TM[{},{}]", tm.service(), tm.subservice());
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TmFunnelStatic {
|
||||
@ -96,16 +102,21 @@ impl TmFunnelStatic {
|
||||
// the CRC.
|
||||
let shared_pool = self.shared_tm_store.clone_backing_pool();
|
||||
let mut pool_guard = shared_pool.write().expect("Locking TM pool failed");
|
||||
let tm_raw = pool_guard
|
||||
.modify(&addr)
|
||||
let mut tm_copy = Vec::new();
|
||||
pool_guard
|
||||
.modify(&addr, |buf| {
|
||||
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN)
|
||||
.expect("Creating TM zero copy writer failed");
|
||||
self.common.apply_packet_processing(zero_copy_writer);
|
||||
tm_copy = buf.to_vec()
|
||||
})
|
||||
.expect("Reading TM from pool failed");
|
||||
let zero_copy_writer = PusTmZeroCopyWriter::new(tm_raw, MIN_CDS_FIELD_LEN)
|
||||
.expect("Creating TM zero copy writer failed");
|
||||
self.common.apply_packet_processing(zero_copy_writer);
|
||||
self.tm_server_tx
|
||||
.send(addr)
|
||||
.expect("Sending TM to server failed");
|
||||
self.common.sync_tm_tcp_source.add_tm(tm_raw);
|
||||
// We could also do this step in the update closure, but I'd rather avoid this, could
|
||||
// lead to nested locking.
|
||||
self.common.sync_tm_tcp_source.add_tm(&tm_copy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ use std::sync::mpsc::{self, Receiver, SendError, Sender, TryRecvError};
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::pus::PusReceiver;
|
||||
use satrs_core::pool::{PoolProviderMemInPlace, SharedStaticMemoryPool, StoreAddr, StoreError};
|
||||
use satrs_core::pool::{PoolProvider, SharedStaticMemoryPool, StoreAddr, StoreError};
|
||||
use satrs_core::spacepackets::ecss::tc::PusTcReader;
|
||||
use satrs_core::spacepackets::ecss::PusPacket;
|
||||
use satrs_core::tmtc::ReceivesCcsdsTc;
|
||||
@ -28,8 +28,9 @@ pub struct SharedTcPool {
|
||||
impl SharedTcPool {
|
||||
pub fn add_pus_tc(&mut self, pus_tc: &PusTcReader) -> Result<StoreAddr, StoreError> {
|
||||
let mut pg = self.pool.write().expect("error locking TC store");
|
||||
let (addr, buf) = pg.free_element(pus_tc.len_packed())?;
|
||||
buf[0..pus_tc.len_packed()].copy_from_slice(pus_tc.raw_data());
|
||||
let addr = pg.free_element(pus_tc.len_packed(), |buf| {
|
||||
buf[0..pus_tc.len_packed()].copy_from_slice(pus_tc.raw_data());
|
||||
})?;
|
||||
Ok(addr)
|
||||
}
|
||||
}
|
||||
@ -125,8 +126,8 @@ impl TcSourceTaskStatic {
|
||||
.pool
|
||||
.read()
|
||||
.expect("locking tc pool failed");
|
||||
let data = pool.read(&addr).expect("reading pool failed");
|
||||
self.tc_buf[0..data.len()].copy_from_slice(data);
|
||||
pool.read(&addr, &mut self.tc_buf)
|
||||
.expect("reading pool failed");
|
||||
drop(pool);
|
||||
match PusTcReader::new(&self.tc_buf) {
|
||||
Ok((pus_tc, _)) => {
|
||||
|
@ -6,7 +6,7 @@ use std::{
|
||||
use log::{info, warn};
|
||||
use satrs_core::{
|
||||
hal::std::udp_server::{ReceiveResult, UdpTcServer},
|
||||
pool::{PoolProviderMemInPlaceWithGuards, SharedStaticMemoryPool, StoreAddr},
|
||||
pool::{PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr},
|
||||
tmtc::CcsdsError,
|
||||
};
|
||||
|
||||
@ -29,20 +29,13 @@ impl UdpTmHandler for StaticUdpTmHandler {
|
||||
}
|
||||
let mut store_lock = store_lock.unwrap();
|
||||
let pg = store_lock.read_with_guard(addr);
|
||||
let read_res = pg.read();
|
||||
let read_res = pg.read_as_vec();
|
||||
if read_res.is_err() {
|
||||
warn!("Error reading TM pool data");
|
||||
continue;
|
||||
}
|
||||
let buf = read_res.unwrap();
|
||||
if buf.len() > 9 {
|
||||
let service = buf[7];
|
||||
let subservice = buf[8];
|
||||
info!("Sending PUS TM[{service},{subservice}]")
|
||||
} else {
|
||||
info!("Sending PUS TM");
|
||||
}
|
||||
let result = socket.send_to(buf, recv_addr);
|
||||
let result = socket.send_to(&buf, recv_addr);
|
||||
if let Err(e) = result {
|
||||
warn!("Sending TM with UDP socket failed: {e}")
|
||||
}
|
||||
|
Reference in New Issue
Block a user