2 Commits

Author SHA1 Message Date
80ed68bd6c Merge branch 'main' into bump-sat-rs-core-deps
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit
2024-02-12 10:54:06 +01:00
1f46099c4a bump core dependencies
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-02-08 18:15:39 +01:00
10 changed files with 84 additions and 211 deletions

View File

@@ -5,14 +5,7 @@ High-level documentation of the [sat-rs project](https://absatsw.irs.uni-stuttga
## Building ## Building
If you have not done so, install the pre-requisites first: If you have not done so, install `mdbook` using `cargo install mdbook --locked`.
```sh
cargo install mdbook --locked
cargo install mdbook-linkcheck --locked
```
After that, you can build the book with:
```sh ```sh
mdbook build mdbook build

View File

@@ -11,8 +11,7 @@ time where the OBSW might be running on Linux based systems with hundreds of MBs
A useful pattern used commonly in space systems is to limit heap allocations to program A useful pattern used commonly in space systems is to limit heap allocations to program
initialization time and avoid frequent run-time allocations. This prevents issues like initialization time and avoid frequent run-time allocations. This prevents issues like
running out of memory (something even Rust can not protect from) or heap fragmentation on systems running out of memory (something even Rust can not protect from) or heap fragmentation.
without a MMU.
# Using pre-allocated pool structures # Using pre-allocated pool structures
@@ -25,12 +24,6 @@ For example, a very small telecommand (TC) pool might look like this:
![Example Pool](images/pools/static-pools.png) ![Example Pool](images/pools/static-pools.png)
The core of the pool abstractions is the
[PoolProvider trait](https://docs.rs/satrs-core/0.1.0-alpha.3/satrs_core/pool/trait.PoolProvider.html).
This trait specifies the general API a pool structure should have without making assumption
of how the data is stored.
This trait is implemented by a static memory pool implementation.
The code to generate this static pool would look like this: The code to generate this static pool would look like this:
```rust ```rust
@@ -46,27 +39,21 @@ let tc_pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![
It should be noted that the buckets only show the maximum size of data being stored inside them. It should be noted that the buckets only show the maximum size of data being stored inside them.
The store will keep a separate structure to track the actual size of the data being stored. The store will keep a separate structure to track the actual size of the data being stored.
<!-- TODO: Add explanation and references for used data-structures. Also explain core trait
to work with the pool. -->
A TC entry inside this pool has a store address which can then be sent around without having A TC entry inside this pool has a store address which can then be sent around without having
to dynamically allocate memory. The same principle can also be applied to the telemetry (TM) and to dynamically allocate memory. The same principle can also be applied to the telemetry (TM) and
inter-process communication (IPC) data. inter-process communication (IPC) data.
You can read
- [`StaticPoolConfig` API](https://docs.rs/satrs-core/0.1.0-alpha.3/satrs_core/pool/struct.StaticPoolConfig.html)
- [`StaticMemoryPool` API](https://docs.rs/satrs-core/0.1.0-alpha.3/satrs_core/pool/struct.StaticMemoryPool.html)
for more details.
In the future, optimized pool structures which use standard containers or are
[`Sync`](https://doc.rust-lang.org/std/marker/trait.Sync.html) by default might be added as well.
# Using special crates to prevent smaller allocations # Using special crates to prevent smaller allocations
Another common way to use the heap on host systems is using containers like `String` and `Vec<u8>` Another common way to use the heap on host systems is using containers like `String` and `Vec<u8>`
to work with data where the size is not known beforehand. The most common solution for embedded to work with data where the size is not known beforehand. The most common solution for embedded
systems is to determine the maximum expected size and then use a pre-allocated `u8` buffer and a systems is to determine the maximum expected size and then use a pre-allocated `u8` buffer and a
size variable. Alternatively, you can use the following crates for more convenience or a smart size variable. Alternatively, you can use the following crates for more convenience or a smart
behaviour which at the very least reduces heap allocations: behaviour which at the very least reduce heap allocations:
1. [`smallvec`](https://docs.rs/smallvec/latest/smallvec/). 1. [`smallvec`](https://docs.rs/smallvec/latest/smallvec/).
2. [`arrayvec`](https://docs.rs/arrayvec/latest/arrayvec/index.html) which also contains an 2. [`arrayvec`](https://docs.rs/arrayvec/latest/arrayvec/index.html) which also contains an

View File

@@ -6,7 +6,7 @@
//! use satrs_core::pool::{PoolProvider, StaticMemoryPool, StaticPoolConfig}; //! use satrs_core::pool::{PoolProvider, StaticMemoryPool, StaticPoolConfig};
//! //!
//! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes //! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
//! let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)], false); //! let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
//! let mut local_pool = StaticMemoryPool::new(pool_cfg); //! let mut local_pool = StaticMemoryPool::new(pool_cfg);
//! let mut read_buf: [u8; 16] = [0; 16]; //! let mut read_buf: [u8; 16] = [0; 16];
//! let mut addr; //! let mut addr;
@@ -203,12 +203,8 @@ impl Error for StoreError {
} }
} }
/// Generic trait for pool providers which provide memory pools for variable sized data. /// Generic trait for pool providers where the data can be modified and read in-place. This
/// /// generally means that a shared pool structure has to be wrapped inside a lock structure.
/// It specifies a basic API to [Self::add], [Self::modify], [Self::read] and [Self::delete] data
/// in the store at its core. The API was designed so internal optimizations can be performed
/// more easily and that is is also possible to make the pool structure [Sync] without the whole
/// pool structure being wrapped inside a lock.
pub trait PoolProvider { pub trait PoolProvider {
/// Add new data to the pool. The provider should attempt to reserve a memory block with the /// Add new data to the pool. The provider should attempt to reserve a memory block with the
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can /// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
@@ -255,7 +251,6 @@ pub trait PoolProvider {
} }
} }
/// Extension trait which adds guarded pool access classes.
pub trait PoolProviderWithGuards: PoolProvider { pub trait PoolProviderWithGuards: PoolProvider {
/// This function behaves like [PoolProvider::read], but consumes the provided address /// This function behaves like [PoolProvider::read], but consumes the provided address
/// and returns a RAII conformant guard object. /// and returns a RAII conformant guard object.
@@ -285,8 +280,7 @@ pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> {
deletion_failed_error: Option<StoreError>, deletion_failed_error: Option<StoreError>,
} }
/// This helper object can be used to safely access pool data without worrying about memory /// This helper object
/// leaks.
impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> { impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> {
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self { pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
Self { Self {
@@ -369,24 +363,16 @@ mod alloc_mod {
/// ///
/// # Parameters /// # Parameters
/// ///
/// * `cfg` - Vector of tuples which represent a subpool. The first entry in the tuple specifies /// * `cfg`: Vector of tuples which represent a subpool. The first entry in the tuple specifies the
/// the number of memory blocks in the subpool, the second entry the size of the blocks /// number of memory blocks in the subpool, the second entry the size of the blocks
/// * `spill_to_higher_subpools` - Specifies whether data will be spilled to higher subpools
/// if the next fitting subpool is full. This is useful to ensure the pool remains useful
/// for all data sizes as long as possible, but it might also lead to frequently used
/// subpools which were not dimensioned properly chocking larger subpools.
#[derive(Clone)] #[derive(Clone)]
pub struct StaticPoolConfig { pub struct StaticPoolConfig {
cfg: Vec<(NumBlocks, usize)>, cfg: Vec<(NumBlocks, usize)>,
spill_to_higher_subpools: bool,
} }
impl StaticPoolConfig { impl StaticPoolConfig {
pub fn new(cfg: Vec<(NumBlocks, usize)>, spill_to_higher_subpools: bool) -> Self { pub fn new(cfg: Vec<(NumBlocks, usize)>) -> Self {
StaticPoolConfig { StaticPoolConfig { cfg }
cfg,
spill_to_higher_subpools,
}
} }
pub fn cfg(&self) -> &Vec<(NumBlocks, usize)> { pub fn cfg(&self) -> &Vec<(NumBlocks, usize)> {
@@ -404,21 +390,16 @@ mod alloc_mod {
/// Pool implementation providing sub-pools with fixed size memory blocks. /// Pool implementation providing sub-pools with fixed size memory blocks.
/// ///
/// This is a simple memory pool implementation which pre-allocates all subpools using a given /// This is a simple memory pool implementation which pre-allocates all sub-pools using a given pool
/// pool configuration. After the pre-allocation, no dynamic memory allocation will be /// configuration. After the pre-allocation, no dynamic memory allocation will be performed
/// performed during run-time. This makes the implementation suitable for real-time /// during run-time. This makes the implementation suitable for real-time applications and
/// applications and embedded environments. /// embedded environments. The pool implementation will also track the size of the data stored
/// /// inside it.
/// The subpool bucket sizes only denote the maximum possible data size being stored inside
/// them and the pool implementation will still track the size of the data stored inside it.
/// The implementation will generally determine the best fitting subpool for given data to
/// add. Currently, the pool does not support spilling to larger subpools if the closest
/// fitting subpool is full. This might be added in the future.
/// ///
/// Transactions with the [pool][StaticMemoryPool] are done using a generic /// Transactions with the [pool][StaticMemoryPool] are done using a generic
/// [address][StoreAddr] type. Adding any data to the pool will yield a store address. /// [address][StoreAddr] type.
/// Modification and read operations are done using a reference to a store address. Deletion /// Adding any data to the pool will yield a store address. Modification and read operations are
/// will consume the store address. /// done using a reference to a store address. Deletion will consume the store address.
pub struct StaticMemoryPool { pub struct StaticMemoryPool {
pool_cfg: StaticPoolConfig, pool_cfg: StaticPoolConfig,
pool: Vec<Vec<u8>>, pool: Vec<Vec<u8>>,
@@ -475,17 +456,7 @@ mod alloc_mod {
} }
fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, StoreError> { fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, StoreError> {
let mut subpool_idx = self.find_subpool(data_len, 0)?; let subpool_idx = self.find_subpool(data_len, 0)?;
if self.pool_cfg.spill_to_higher_subpools {
while let Err(StoreError::StoreFull(_)) = self.find_empty(subpool_idx) {
if (subpool_idx + 1) as usize == self.sizes_lists.len() {
return Err(StoreError::StoreFull(subpool_idx));
}
subpool_idx += 1;
}
}
let (slot, size_slot_ref) = self.find_empty(subpool_idx)?; let (slot, size_slot_ref) = self.find_empty(subpool_idx)?;
*size_slot_ref = data_len; *size_slot_ref = data_len;
Ok(StaticPoolAddr { Ok(StaticPoolAddr {
@@ -657,22 +628,22 @@ mod tests {
fn basic_small_pool() -> StaticMemoryPool { fn basic_small_pool() -> StaticMemoryPool {
// 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)], false); let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
StaticMemoryPool::new(pool_cfg) StaticMemoryPool::new(pool_cfg)
} }
#[test] #[test]
fn test_cfg() { fn test_cfg() {
// Values where number of buckets is 0 or size is too large should be removed // Values where number of buckets is 0 or size is too large should be removed
let mut pool_cfg = StaticPoolConfig::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)], false); let mut pool_cfg = StaticPoolConfig::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)]);
pool_cfg.sanitize(); pool_cfg.sanitize();
assert_eq!(*pool_cfg.cfg(), vec![(1, 0)]); assert_eq!(*pool_cfg.cfg(), vec![(1, 0)]);
// Entries should be ordered according to bucket size // Entries should be ordered according to bucket size
pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false); pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)]);
pool_cfg.sanitize(); pool_cfg.sanitize();
assert_eq!(*pool_cfg.cfg(), vec![(32, 3), (16, 6), (8, 12)]); assert_eq!(*pool_cfg.cfg(), vec![(32, 3), (16, 6), (8, 12)]);
// Unstable sort is used, so order of entries with same block length should not matter // Unstable sort is used, so order of entries with same block length should not matter
pool_cfg = StaticPoolConfig::new(vec![(12, 12), (14, 16), (10, 12)], false); pool_cfg = StaticPoolConfig::new(vec![(12, 12), (14, 16), (10, 12)]);
pool_cfg.sanitize(); pool_cfg.sanitize();
assert!( assert!(
*pool_cfg.cfg() == vec![(12, 12), (10, 12), (14, 16)] *pool_cfg.cfg() == vec![(12, 12), (10, 12), (14, 16)]
@@ -965,72 +936,4 @@ mod tests {
}) })
.expect("Modifying data failed"); .expect("Modifying data failed");
} }
#[test]
fn test_spills_to_higher_subpools() {
let pool_cfg = StaticPoolConfig::new(vec![(2, 8), (2, 16)], true);
let mut local_pool = StaticMemoryPool::new(pool_cfg);
local_pool.free_element(8, |_| {}).unwrap();
local_pool.free_element(8, |_| {}).unwrap();
let mut in_larger_subpool_now = local_pool.free_element(8, |_| {});
assert!(in_larger_subpool_now.is_ok());
let generic_addr = in_larger_subpool_now.unwrap();
let pool_addr = StaticPoolAddr::from(generic_addr);
assert_eq!(pool_addr.pool_idx, 1);
assert_eq!(pool_addr.packet_idx, 0);
assert!(local_pool.has_element_at(&generic_addr).unwrap());
in_larger_subpool_now = local_pool.free_element(8, |_| {});
assert!(in_larger_subpool_now.is_ok());
let generic_addr = in_larger_subpool_now.unwrap();
let pool_addr = StaticPoolAddr::from(generic_addr);
assert_eq!(pool_addr.pool_idx, 1);
assert_eq!(pool_addr.packet_idx, 1);
assert!(local_pool.has_element_at(&generic_addr).unwrap());
}
#[test]
fn test_spillage_fails_as_well() {
let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 16)], true);
let mut local_pool = StaticMemoryPool::new(pool_cfg);
local_pool.free_element(8, |_| {}).unwrap();
local_pool.free_element(8, |_| {}).unwrap();
let should_fail = local_pool.free_element(8, |_| {});
assert!(should_fail.is_err());
if let Err(err) = should_fail {
assert_eq!(err, StoreError::StoreFull(1));
} else {
panic!("unexpected store address");
}
}
#[test]
fn test_spillage_works_across_multiple_subpools() {
let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 12), (1, 16)], true);
let mut local_pool = StaticMemoryPool::new(pool_cfg);
local_pool.free_element(8, |_| {}).unwrap();
local_pool.free_element(12, |_| {}).unwrap();
let in_larger_subpool_now = local_pool.free_element(8, |_| {});
assert!(in_larger_subpool_now.is_ok());
let generic_addr = in_larger_subpool_now.unwrap();
let pool_addr = StaticPoolAddr::from(generic_addr);
assert_eq!(pool_addr.pool_idx, 2);
assert_eq!(pool_addr.packet_idx, 0);
assert!(local_pool.has_element_at(&generic_addr).unwrap());
}
#[test]
fn test_spillage_fails_across_multiple_subpools() {
let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 12), (1, 16)], true);
let mut local_pool = StaticMemoryPool::new(pool_cfg);
local_pool.free_element(8, |_| {}).unwrap();
local_pool.free_element(12, |_| {}).unwrap();
local_pool.free_element(16, |_| {}).unwrap();
let should_fail = local_pool.free_element(8, |_| {});
assert!(should_fail.is_err());
if let Err(err) = should_fail {
assert_eq!(err, StoreError::StoreFull(2));
} else {
panic!("unexpected store address");
}
}
} }

View File

@@ -1017,7 +1017,7 @@ pub mod tests {
/// ///
/// The PUS service handler is instantiated with a [EcssTcInStoreConverter]. /// The PUS service handler is instantiated with a [EcssTcInStoreConverter].
pub fn new() -> (Self, PusServiceHelper<EcssTcInSharedStoreConverter>) { pub fn new() -> (Self, PusServiceHelper<EcssTcInSharedStoreConverter>) {
let pool_cfg = StaticPoolConfig::new(vec![(16, 16), (8, 32), (4, 64)], false); let pool_cfg = StaticPoolConfig::new(vec![(16, 16), (8, 32), (4, 64)]);
let tc_pool = StaticMemoryPool::new(pool_cfg.clone()); let tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let tm_pool = StaticMemoryPool::new(pool_cfg); let tm_pool = StaticMemoryPool::new(pool_cfg);
let shared_tc_pool = SharedStaticMemoryPool::new(RwLock::new(tc_pool)); let shared_tc_pool = SharedStaticMemoryPool::new(RwLock::new(tc_pool));

View File

@@ -945,7 +945,7 @@ mod tests {
#[test] #[test]
fn test_reset() { fn test_reset() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
@@ -1097,7 +1097,7 @@ mod tests {
} }
#[test] #[test]
fn test_release_telecommands() { fn test_release_telecommands() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
@@ -1163,7 +1163,7 @@ mod tests {
#[test] #[test]
fn release_multi_with_same_time() { fn release_multi_with_same_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
@@ -1221,7 +1221,7 @@ mod tests {
#[test] #[test]
fn release_with_scheduler_disabled() { fn release_with_scheduler_disabled() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
@@ -1291,7 +1291,7 @@ mod tests {
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
@@ -1339,7 +1339,7 @@ mod tests {
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc = scheduled_tc(UnixTimestamp::new_only_seconds(100), &mut buf); let tc = scheduled_tc(UnixTimestamp::new_only_seconds(100), &mut buf);
@@ -1389,7 +1389,7 @@ mod tests {
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc = wrong_tc_service(UnixTimestamp::new_only_seconds(100), &mut buf); let tc = wrong_tc_service(UnixTimestamp::new_only_seconds(100), &mut buf);
@@ -1412,7 +1412,7 @@ mod tests {
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc = wrong_tc_subservice(UnixTimestamp::new_only_seconds(100), &mut buf); let tc = wrong_tc_subservice(UnixTimestamp::new_only_seconds(100), &mut buf);
@@ -1434,7 +1434,7 @@ mod tests {
fn insert_wrapped_tc_faulty_app_data() { fn insert_wrapped_tc_faulty_app_data() {
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let tc = invalid_time_tagged_cmd(); let tc = invalid_time_tagged_cmd();
let insert_res = scheduler.insert_wrapped_tc::<cds::TimeProvider>(&tc, &mut pool); let insert_res = scheduler.insert_wrapped_tc::<cds::TimeProvider>(&tc, &mut pool);
assert!(insert_res.is_err()); assert!(insert_res.is_err());
@@ -1449,7 +1449,7 @@ mod tests {
fn insert_doubly_wrapped_time_tagged_cmd() { fn insert_doubly_wrapped_time_tagged_cmd() {
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let tc = double_wrapped_time_tagged_tc(UnixTimestamp::new_only_seconds(50), &mut buf); let tc = double_wrapped_time_tagged_tc(UnixTimestamp::new_only_seconds(50), &mut buf);
let insert_res = scheduler.insert_wrapped_tc::<cds::TimeProvider>(&tc, &mut pool); let insert_res = scheduler.insert_wrapped_tc::<cds::TimeProvider>(&tc, &mut pool);
@@ -1485,7 +1485,7 @@ mod tests {
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
@@ -1509,7 +1509,7 @@ mod tests {
#[test] #[test]
fn test_store_error_propagation_release() { fn test_store_error_propagation_release() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
@@ -1544,7 +1544,7 @@ mod tests {
#[test] #[test]
fn test_store_error_propagation_reset() { fn test_store_error_propagation_reset() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
@@ -1568,7 +1568,7 @@ mod tests {
#[test] #[test]
fn test_delete_by_req_id_simple_retrieve_addr() { fn test_delete_by_req_id_simple_retrieve_addr() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
@@ -1587,7 +1587,7 @@ mod tests {
#[test] #[test]
fn test_delete_by_req_id_simple_delete_all() { fn test_delete_by_req_id_simple_delete_all() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
@@ -1606,7 +1606,7 @@ mod tests {
#[test] #[test]
fn test_delete_by_req_id_complex() { fn test_delete_by_req_id_complex() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
@@ -1653,7 +1653,7 @@ mod tests {
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(1, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(1, 64)]));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
// Store is full after this. // Store is full after this.
@@ -1692,7 +1692,7 @@ mod tests {
#[test] #[test]
fn test_time_window_retrieval_select_all() { fn test_time_window_retrieval_select_all() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50); let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@@ -1723,7 +1723,7 @@ mod tests {
#[test] #[test]
fn test_time_window_retrieval_select_from_stamp() { fn test_time_window_retrieval_select_from_stamp() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50); let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@@ -1754,7 +1754,7 @@ mod tests {
#[test] #[test]
fn test_time_window_retrieval_select_to_time() { fn test_time_window_retrieval_select_to_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50); let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@@ -1785,7 +1785,7 @@ mod tests {
#[test] #[test]
fn test_time_window_retrieval_select_from_time_to_time() { fn test_time_window_retrieval_select_from_time_to_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50); let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@@ -1820,7 +1820,7 @@ mod tests {
#[test] #[test]
fn test_deletion_all() { fn test_deletion_all() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50); insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@@ -1847,7 +1847,7 @@ mod tests {
#[test] #[test]
fn test_deletion_from_start_time() { fn test_deletion_from_start_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50); insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@@ -1868,7 +1868,7 @@ mod tests {
#[test] #[test]
fn test_deletion_to_end_time() { fn test_deletion_to_end_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50); let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@@ -1890,7 +1890,7 @@ mod tests {
#[test] #[test]
fn test_deletion_from_start_time_to_end_time() { fn test_deletion_from_start_time_to_end_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let cmd_out_of_range_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50); let cmd_out_of_range_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@@ -1919,7 +1919,7 @@ mod tests {
#[test] #[test]
fn test_release_without_deletion() { fn test_release_without_deletion() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut scheduler = let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5)); PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));

View File

@@ -201,7 +201,7 @@ mod tests {
impl Pus11HandlerWithStoreTester { impl Pus11HandlerWithStoreTester {
pub fn new() -> Self { pub fn new() -> Self {
let test_scheduler = TestScheduler::default(); let test_scheduler = TestScheduler::default();
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false); let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)]);
let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone()); let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(); let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
Self { Self {

View File

@@ -28,7 +28,7 @@
//! const EMPTY_STAMP: [u8; 7] = [0; 7]; //! const EMPTY_STAMP: [u8; 7] = [0; 7];
//! const TEST_APID: u16 = 0x02; //! const TEST_APID: u16 = 0x02;
//! //!
//! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false); //! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
//! let tm_pool = StaticMemoryPool::new(pool_cfg.clone()); //! let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
//! let shared_tm_store = SharedTmPool::new(tm_pool); //! let shared_tm_store = SharedTmPool::new(tm_pool);
//! let tm_store = shared_tm_store.clone_backing_pool(); //! let tm_store = shared_tm_store.clone_backing_pool();
@@ -1484,7 +1484,7 @@ mod tests {
#[test] #[test]
fn test_mpsc_verif_send_sync() { fn test_mpsc_verif_send_sync() {
let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)], false)); let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)]));
let shared_tm_store = SharedTmPool::new(pool); let shared_tm_store = SharedTmPool::new(pool);
let (tx, _) = mpsc::channel(); let (tx, _) = mpsc::channel();
let mpsc_verif_sender = let mpsc_verif_sender =
@@ -2135,9 +2135,9 @@ mod tests {
} }
#[test] #[test]
// TODO: maybe a bit more extensive testing, all I have time for right now
fn test_seq_count_increment() { fn test_seq_count_increment() {
let pool_cfg = let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
let tm_pool = StaticMemoryPool::new(pool_cfg.clone()); let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
let shared_tm_store = SharedTmPool::new(tm_pool); let shared_tm_store = SharedTmPool::new(tm_pool);
let shared_tm_pool = shared_tm_store.clone_backing_pool(); let shared_tm_pool = shared_tm_store.clone_backing_pool();

View File

@@ -9,7 +9,7 @@ const DUMMY_DATA: [u8; 4] = [0, 1, 2, 3];
#[test] #[test]
fn threaded_usage() { fn threaded_usage() {
let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false); let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)]);
let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg))); let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_clone = shared_pool.clone(); let shared_clone = shared_pool.clone();
let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel(); let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel();

View File

@@ -35,8 +35,7 @@ pub mod crossbeam_test {
// each reporter have an own sequence count provider. // each reporter have an own sequence count provider.
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
// Shared pool object to store the verification PUS telemetry // Shared pool object to store the verification PUS telemetry
let pool_cfg = let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
let shared_tm_pool = SharedTmPool::new(StaticMemoryPool::new(pool_cfg.clone())); let shared_tm_pool = SharedTmPool::new(StaticMemoryPool::new(pool_cfg.clone()));
let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg))); let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_tc_pool_1 = shared_tc_pool_0.clone(); let shared_tc_pool_1 = shared_tc_pool_0.clone();

View File

@@ -103,43 +103,34 @@ pub mod pool {
use super::*; use super::*;
pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) { pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) {
( (
StaticMemoryPool::new(StaticPoolConfig::new( StaticMemoryPool::new(StaticPoolConfig::new(vec![
vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
],
true,
)),
StaticMemoryPool::new(StaticPoolConfig::new(
vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
],
true,
)),
)
}
pub fn create_sched_tc_pool() -> StaticMemoryPool {
StaticMemoryPool::new(StaticPoolConfig::new(
vec![
(30, 32), (30, 32),
(15, 64), (15, 64),
(15, 128), (15, 128),
(15, 256), (15, 256),
(15, 1024), (15, 1024),
(15, 2048), (15, 2048),
], ])),
true, StaticMemoryPool::new(StaticPoolConfig::new(vec![
)) (30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
])),
)
}
pub fn create_sched_tc_pool() -> StaticMemoryPool {
StaticMemoryPool::new(StaticPoolConfig::new(vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
]))
} }
} }