Finish PUS service optimizations
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit

- Better naming for pool abstractions
- Added last unittests for PUS helper services
- Introduce new abstraction for PUS schedulers
- `StoreAddr` is now a generic u64
- `spacepackets` points to 0.7.0 release
This commit is contained in:
Robin Müller 2024-02-03 13:46:05 +01:00
parent a09af65396
commit 11b66c8bf7
30 changed files with 2124 additions and 1153 deletions

View File

@ -13,4 +13,4 @@ event components recommended by this framework do not really need this service.
The following images shows how the flow of events could look like in a system where components The following images shows how the flow of events could look like in a system where components
can generate events, and where other system components might be interested in those events: can generate events, and where other system components might be interested in those events:
![Event flow](../../images/events/event_man_arch.png) ![Event flow](images/events/event_man_arch.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

View File

@ -73,7 +73,7 @@ features = ["all"]
optional = true optional = true
[dependencies.spacepackets] [dependencies.spacepackets]
version = "0.7.0-beta.4" version = "0.7.0"
default-features = false default-features = false
# git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" # git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git"
# rev = "297cfad22637d3b07a1b27abe56d9a607b5b82a7" # rev = "297cfad22637d3b07a1b27abe56d9a607b5b82a7"
@ -123,4 +123,4 @@ doc-images = []
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
rustdoc-args = ["--cfg", "doc_cfg"] rustdoc-args = ["--cfg", "doc_cfg", "--generate-link-to-definition"]

View File

@ -1,49 +0,0 @@
pub enum FsrcGroupIds {
Tmtc = 0,
}
pub struct FsrcErrorRaw {
pub group_id: u8,
pub unique_id: u8,
pub group_name: &'static str,
pub info: &'static str,
}
pub trait FsrcErrorHandler {
fn error(&mut self, e: FsrcErrorRaw);
fn error_with_one_param(&mut self, e: FsrcErrorRaw, _p1: u32) {
self.error(e);
}
fn error_with_two_params(&mut self, e: FsrcErrorRaw, _p1: u32, _p2: u32) {
self.error(e);
}
}
impl FsrcErrorRaw {
pub const fn new(
group_id: u8,
unique_id: u8,
group_name: &'static str,
info: &'static str,
) -> Self {
FsrcErrorRaw {
group_id,
unique_id,
group_name,
info,
}
}
}
#[derive(Clone, Copy, Default)]
pub struct SimpleStdErrorHandler {}
#[cfg(feature = "use_std")]
impl FsrcErrorHandler for SimpleStdErrorHandler {
fn error(&mut self, e: FsrcErrorRaw) {
println!(
"Received error from group {} with ID ({},{}): {}",
e.group_name, e.group_id, e.unique_id, e.info
);
}
}

View File

@ -24,7 +24,6 @@ extern crate std;
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod cfdp; pub mod cfdp;
pub mod encoding; pub mod encoding;
pub mod error;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod event_man; pub mod event_man;

View File

@ -1,23 +1,13 @@
//! # Pool implementation providing pre-allocated sub-pools with fixed size memory blocks //! # Pool implementation providing memory pools for packet storage.
//! //!
//! This is a simple memory pool implementation which pre-allocates all sub-pools using a given pool //! # Example for the [StaticMemoryPool]
//! configuration. After the pre-allocation, no dynamic memory allocation will be performed
//! during run-time. This makes the implementation suitable for real-time applications and
//! embedded environments. The pool implementation will also track the size of the data stored
//! inside it.
//!
//! Transactions with the [pool][LocalPool] are done using a special [address][StoreAddr] type.
//! Adding any data to the pool will yield a store address. Modification and read operations are
//! done using a reference to a store address. Deletion will consume the store address.
//!
//! # Example
//! //!
//! ``` //! ```
//! use satrs_core::pool::{LocalPool, PoolCfg, PoolProvider}; //! use satrs_core::pool::{PoolProviderMemInPlace, StaticMemoryPool, StaticPoolConfig};
//! //!
//! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes //! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
//! let pool_cfg = PoolCfg::new(vec![(4, 4), (2, 8), (1, 16)]); //! let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
//! let mut local_pool = LocalPool::new(pool_cfg); //! let mut local_pool = StaticMemoryPool::new(pool_cfg);
//! let mut addr; //! let mut addr;
//! { //! {
//! // Add new data to the pool //! // Add new data to the pool
@ -77,22 +67,24 @@
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use alloc_mod::*; pub use alloc_mod::*;
use core::fmt::{Display, Formatter}; use core::fmt::{Display, Formatter};
use delegate::delegate;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::error::Error; use std::error::Error;
type NumBlocks = u16; type NumBlocks = u16;
pub type StoreAddr = u64;
/// Simple address type used for transactions with the local pool. /// Simple address type used for transactions with the local pool.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct StoreAddr { pub struct StaticPoolAddr {
pub(crate) pool_idx: u16, pub(crate) pool_idx: u16,
pub(crate) packet_idx: NumBlocks, pub(crate) packet_idx: NumBlocks,
} }
impl StoreAddr { impl StaticPoolAddr {
pub const INVALID_ADDR: u32 = 0xFFFFFFFF; pub const INVALID_ADDR: u32 = 0xFFFFFFFF;
pub fn raw(&self) -> u32 { pub fn raw(&self) -> u32 {
@ -100,7 +92,22 @@ impl StoreAddr {
} }
} }
impl Display for StoreAddr { impl From<StaticPoolAddr> for StoreAddr {
fn from(value: StaticPoolAddr) -> Self {
((value.pool_idx as u64) << 16) | value.packet_idx as u64
}
}
impl From<StoreAddr> for StaticPoolAddr {
fn from(value: StoreAddr) -> Self {
Self {
pool_idx: ((value >> 16) & 0xff) as u16,
packet_idx: (value & 0xff) as u16,
}
}
}
impl Display for StaticPoolAddr {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
write!( write!(
f, f,
@ -180,39 +187,158 @@ impl Error for StoreError {
} }
} }
/// Generic trait for pool providers where the data can be modified and read in-place. This
/// generally means that a shared pool structure has to be wrapped inside a lock structure.
pub trait PoolProviderMemInPlace {
/// Add new data to the pool. The provider should attempt to reserve a memory block with the
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
/// be used to access the data stored in the pool
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>;
/// The provider should attempt to reserve a free memory block with the appropriate size and
/// then return a mutable reference to it. Yields a [StoreAddr] which can be used to access
/// the data stored in the pool
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError>;
/// Modify data added previously using a given [StoreAddr] by yielding a mutable reference
/// to it
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError>;
/// Read data by yielding a read-only reference given a [StoreAddr]
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError>;
/// Delete data inside the pool given a [StoreAddr]
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
/// Retrieve the length of the data at the given store address.
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
if !self.has_element_at(addr)? {
return Err(StoreError::DataDoesNotExist(*addr));
}
Ok(self.read(addr)?.len())
}
}
pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
/// This function behaves like [PoolProviderMemInPlace::read], but consumes the provided address
/// and returns a RAII conformant guard object.
///
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
/// given address will be deleted automatically when the guard is dropped.
/// This can prevent memory leaks. Users can read the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely.
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self>;
/// This function behaves like [PoolProviderMemInPlace::modify], but consumes the provided
/// address and returns a RAII conformant guard object.
///
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
/// given address will be deleted automatically when the guard is dropped.
/// This can prevent memory leaks. Users can read (and modify) the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely.
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self>;
}
pub struct PoolGuard<'a, MemProvider: PoolProviderMemInPlace + ?Sized> {
pool: &'a mut MemProvider,
pub addr: StoreAddr,
no_deletion: bool,
deletion_failed_error: Option<StoreError>,
}
/// This helper object
impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
Self {
pool,
addr,
no_deletion: false,
deletion_failed_error: None,
}
}
pub fn read(&self) -> Result<&[u8], StoreError> {
self.pool.read(&self.addr)
}
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self) {
self.no_deletion = true;
}
}
impl<MemProvider: PoolProviderMemInPlace + ?Sized> Drop for PoolGuard<'_, MemProvider> {
fn drop(&mut self) {
if !self.no_deletion {
if let Err(e) = self.pool.delete(self.addr) {
self.deletion_failed_error = Some(e);
}
}
}
}
pub struct PoolRwGuard<'a, MemProvider: PoolProviderMemInPlace + ?Sized> {
guard: PoolGuard<'a, MemProvider>,
}
impl<'a, MemProvider: PoolProviderMemInPlace> PoolRwGuard<'a, MemProvider> {
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
Self {
guard: PoolGuard::new(pool, addr),
}
}
pub fn modify(&mut self) -> Result<&mut [u8], StoreError> {
self.guard.pool.modify(&self.guard.addr)
}
delegate!(
to self.guard {
pub fn read(&self) -> Result<&[u8], StoreError>;
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self);
}
);
}
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
mod alloc_mod { mod alloc_mod {
use super::{
PoolGuard, PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, PoolRwGuard,
StaticPoolAddr,
};
use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError}; use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError};
use alloc::boxed::Box;
use alloc::vec; use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
use delegate::delegate;
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub type ShareablePoolProvider = Box<dyn PoolProvider + Send + Sync>; pub type SharedStaticMemoryPool = Arc<RwLock<StaticMemoryPool>>;
#[cfg(feature = "std")]
pub type SharedPool = Arc<RwLock<ShareablePoolProvider>>;
type PoolSize = usize; type PoolSize = usize;
const STORE_FREE: PoolSize = PoolSize::MAX; const STORE_FREE: PoolSize = PoolSize::MAX;
pub const POOL_MAX_SIZE: PoolSize = STORE_FREE - 1; pub const POOL_MAX_SIZE: PoolSize = STORE_FREE - 1;
/// Configuration structure of the [local pool][LocalPool] /// Configuration structure of the [static memory pool][StaticMemoryPool]
/// ///
/// # Parameters /// # Parameters
/// ///
/// * `cfg`: Vector of tuples which represent a subpool. The first entry in the tuple specifies the /// * `cfg`: Vector of tuples which represent a subpool. The first entry in the tuple specifies the
/// number of memory blocks in the subpool, the second entry the size of the blocks /// number of memory blocks in the subpool, the second entry the size of the blocks
#[derive(Clone)] #[derive(Clone)]
pub struct PoolCfg { pub struct StaticPoolConfig {
cfg: Vec<(NumBlocks, usize)>, cfg: Vec<(NumBlocks, usize)>,
} }
impl PoolCfg { impl StaticPoolConfig {
pub fn new(cfg: Vec<(NumBlocks, usize)>) -> Self { pub fn new(cfg: Vec<(NumBlocks, usize)>) -> Self {
PoolCfg { cfg } StaticPoolConfig { cfg }
} }
pub fn cfg(&self) -> &Vec<(NumBlocks, usize)> { pub fn cfg(&self) -> &Vec<(NumBlocks, usize)> {
@ -228,135 +354,30 @@ mod alloc_mod {
} }
} }
pub struct PoolGuard<'a> { /// Pool implementation providing sub-pools with fixed size memory blocks.
pool: &'a mut LocalPool, ///
pub addr: StoreAddr, /// This is a simple memory pool implementation which pre-allocates all sub-pools using a given pool
no_deletion: bool, /// configuration. After the pre-allocation, no dynamic memory allocation will be performed
deletion_failed_error: Option<StoreError>, /// during run-time. This makes the implementation suitable for real-time applications and
} /// embedded environments. The pool implementation will also track the size of the data stored
/// inside it.
/// This helper object ///
impl<'a> PoolGuard<'a> { /// Transactions with the [pool][StaticMemoryPool] are done using a generic
pub fn new(pool: &'a mut LocalPool, addr: StoreAddr) -> Self { /// [address][StoreAddr] type.
Self { /// Adding any data to the pool will yield a store address. Modification and read operations are
pool, /// done using a reference to a store address. Deletion will consume the store address.
addr, pub struct StaticMemoryPool {
no_deletion: false, pool_cfg: StaticPoolConfig,
deletion_failed_error: None,
}
}
pub fn read(&self) -> Result<&[u8], StoreError> {
self.pool.read(&self.addr)
}
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self) {
self.no_deletion = true;
}
}
impl Drop for PoolGuard<'_> {
fn drop(&mut self) {
if !self.no_deletion {
if let Err(e) = self.pool.delete(self.addr) {
self.deletion_failed_error = Some(e);
}
}
}
}
pub struct PoolRwGuard<'a> {
guard: PoolGuard<'a>,
}
impl<'a> PoolRwGuard<'a> {
pub fn new(pool: &'a mut LocalPool, addr: StoreAddr) -> Self {
Self {
guard: PoolGuard::new(pool, addr),
}
}
pub fn modify(&mut self) -> Result<&mut [u8], StoreError> {
self.guard.pool.modify(&self.guard.addr)
}
delegate!(
to self.guard {
pub fn read(&self) -> Result<&[u8], StoreError>;
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self);
}
);
}
pub trait PoolProvider {
/// Add new data to the pool. The provider should attempt to reserve a memory block with the
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
/// be used to access the data stored in the pool
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>;
/// The provider should attempt to reserve a free memory block with the appropriate size and
/// then return a mutable reference to it. Yields a [StoreAddr] which can be used to access
/// the data stored in the pool
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError>;
/// Modify data added previously using a given [StoreAddr] by yielding a mutable reference
/// to it
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError>;
/// This function behaves like [Self::modify], but consumes the provided address and returns a
/// RAII conformant guard object.
///
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
/// given address will be deleted automatically when the guard is dropped.
/// This can prevent memory leaks. Users can read (and modify) the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely.
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard;
/// Read data by yielding a read-only reference given a [StoreAddr]
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError>;
/// This function behaves like [Self::read], but consumes the provided address and returns a
/// RAII conformant guard object.
///
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
/// given address will be deleted automatically when the guard is dropped.
/// This can prevent memory leaks. Users can read the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely.
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard;
/// Delete data inside the pool given a [StoreAddr]
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
/// Retrieve the length of the data at the given store address.
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
if !self.has_element_at(addr)? {
return Err(StoreError::DataDoesNotExist(*addr));
}
Ok(self.read(addr)?.len())
}
}
/// Pool implementation providing sub-pools with fixed size memory blocks. More details in
/// the [module documentation][crate::pool]
pub struct LocalPool {
pool_cfg: PoolCfg,
pool: Vec<Vec<u8>>, pool: Vec<Vec<u8>>,
sizes_lists: Vec<Vec<PoolSize>>, sizes_lists: Vec<Vec<PoolSize>>,
} }
impl LocalPool { impl StaticMemoryPool {
/// Create a new local pool from the [given configuration][PoolCfg]. This function will sanitize /// Create a new local pool from the [given configuration][StaticPoolConfig]. This function
/// the given configuration as well. /// will sanitize the given configuration as well.
pub fn new(mut cfg: PoolCfg) -> LocalPool { pub fn new(mut cfg: StaticPoolConfig) -> StaticMemoryPool {
let subpools_num = cfg.sanitize(); let subpools_num = cfg.sanitize();
let mut local_pool = LocalPool { let mut local_pool = StaticMemoryPool {
pool_cfg: cfg, pool_cfg: cfg,
pool: Vec::with_capacity(subpools_num), pool: Vec::with_capacity(subpools_num),
sizes_lists: Vec::with_capacity(subpools_num), sizes_lists: Vec::with_capacity(subpools_num),
@ -372,39 +393,39 @@ mod alloc_mod {
local_pool local_pool
} }
fn addr_check(&self, addr: &StoreAddr) -> Result<usize, StoreError> { fn addr_check(&self, addr: &StaticPoolAddr) -> Result<usize, StoreError> {
self.validate_addr(addr)?; self.validate_addr(addr)?;
let pool_idx = addr.pool_idx as usize; let pool_idx = addr.pool_idx as usize;
let size_list = self.sizes_lists.get(pool_idx).unwrap(); let size_list = self.sizes_lists.get(pool_idx).unwrap();
let curr_size = size_list[addr.packet_idx as usize]; let curr_size = size_list[addr.packet_idx as usize];
if curr_size == STORE_FREE { if curr_size == STORE_FREE {
return Err(StoreError::DataDoesNotExist(*addr)); return Err(StoreError::DataDoesNotExist(StoreAddr::from(*addr)));
} }
Ok(curr_size) Ok(curr_size)
} }
fn validate_addr(&self, addr: &StoreAddr) -> Result<(), StoreError> { fn validate_addr(&self, addr: &StaticPoolAddr) -> Result<(), StoreError> {
let pool_idx = addr.pool_idx as usize; let pool_idx = addr.pool_idx as usize;
if pool_idx >= self.pool_cfg.cfg.len() { if pool_idx >= self.pool_cfg.cfg.len() {
return Err(StoreError::InvalidStoreId( return Err(StoreError::InvalidStoreId(
StoreIdError::InvalidSubpool(addr.pool_idx), StoreIdError::InvalidSubpool(addr.pool_idx),
Some(*addr), Some(StoreAddr::from(*addr)),
)); ));
} }
if addr.packet_idx >= self.pool_cfg.cfg[addr.pool_idx as usize].0 { if addr.packet_idx >= self.pool_cfg.cfg[addr.pool_idx as usize].0 {
return Err(StoreError::InvalidStoreId( return Err(StoreError::InvalidStoreId(
StoreIdError::InvalidPacketIdx(addr.packet_idx), StoreIdError::InvalidPacketIdx(addr.packet_idx),
Some(*addr), Some(StoreAddr::from(*addr)),
)); ));
} }
Ok(()) Ok(())
} }
fn reserve(&mut self, data_len: usize) -> Result<StoreAddr, StoreError> { fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, StoreError> {
let subpool_idx = self.find_subpool(data_len, 0)?; let subpool_idx = self.find_subpool(data_len, 0)?;
let (slot, size_slot_ref) = self.find_empty(subpool_idx)?; let (slot, size_slot_ref) = self.find_empty(subpool_idx)?;
*size_slot_ref = data_len; *size_slot_ref = data_len;
Ok(StoreAddr { Ok(StaticPoolAddr {
pool_idx: subpool_idx, pool_idx: subpool_idx,
packet_idx: slot, packet_idx: slot,
}) })
@ -422,7 +443,7 @@ mod alloc_mod {
Err(StoreError::DataTooLarge(req_size)) Err(StoreError::DataTooLarge(req_size))
} }
fn write(&mut self, addr: &StoreAddr, data: &[u8]) -> Result<(), StoreError> { fn write(&mut self, addr: &StaticPoolAddr, data: &[u8]) -> Result<(), StoreError> {
let packet_pos = self.raw_pos(addr).ok_or(StoreError::InternalError(0))?; let packet_pos = self.raw_pos(addr).ok_or(StoreError::InternalError(0))?;
let subpool = self let subpool = self
.pool .pool
@ -449,13 +470,13 @@ mod alloc_mod {
Err(StoreError::StoreFull(subpool)) Err(StoreError::StoreFull(subpool))
} }
fn raw_pos(&self, addr: &StoreAddr) -> Option<usize> { fn raw_pos(&self, addr: &StaticPoolAddr) -> Option<usize> {
let (_, size) = self.pool_cfg.cfg.get(addr.pool_idx as usize)?; let (_, size) = self.pool_cfg.cfg.get(addr.pool_idx as usize)?;
Some(addr.packet_idx as usize * size) Some(addr.packet_idx as usize * size)
} }
} }
impl PoolProvider for LocalPool { impl PoolProviderMemInPlace for StaticMemoryPool {
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> { fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> {
let data_len = data.len(); let data_len = data.len();
if data_len > POOL_MAX_SIZE { if data_len > POOL_MAX_SIZE {
@ -463,7 +484,7 @@ mod alloc_mod {
} }
let addr = self.reserve(data_len)?; let addr = self.reserve(data_len)?;
self.write(&addr, data)?; self.write(&addr, data)?;
Ok(addr) Ok(addr.into())
} }
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError> { fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError> {
@ -474,34 +495,29 @@ mod alloc_mod {
let raw_pos = self.raw_pos(&addr).unwrap(); let raw_pos = self.raw_pos(&addr).unwrap();
let block = let block =
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len]; &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len];
Ok((addr, block)) Ok((addr.into(), block))
} }
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> { fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> {
let curr_size = self.addr_check(addr)?; let addr = StaticPoolAddr::from(*addr);
let raw_pos = self.raw_pos(addr).unwrap(); let curr_size = self.addr_check(&addr)?;
let raw_pos = self.raw_pos(&addr).unwrap();
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap() let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
[raw_pos..raw_pos + curr_size]; [raw_pos..raw_pos + curr_size];
Ok(block) Ok(block)
} }
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard {
PoolRwGuard::new(self, addr)
}
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError> { fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError> {
let curr_size = self.addr_check(addr)?; let addr = StaticPoolAddr::from(*addr);
let raw_pos = self.raw_pos(addr).unwrap(); let curr_size = self.addr_check(&addr)?;
let raw_pos = self.raw_pos(&addr).unwrap();
let block = let block =
&self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size]; &self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
Ok(block) Ok(block)
} }
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard {
PoolGuard::new(self, addr)
}
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> { fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> {
let addr = StaticPoolAddr::from(addr);
self.addr_check(&addr)?; self.addr_check(&addr)?;
let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1; let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1;
let raw_pos = self.raw_pos(&addr).unwrap(); let raw_pos = self.raw_pos(&addr).unwrap();
@ -514,7 +530,8 @@ mod alloc_mod {
} }
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError> { fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError> {
self.validate_addr(addr)?; let addr = StaticPoolAddr::from(*addr);
self.validate_addr(&addr)?;
let pool_idx = addr.pool_idx as usize; let pool_idx = addr.pool_idx as usize;
let size_list = self.sizes_lists.get(pool_idx).unwrap(); let size_list = self.sizes_lists.get(pool_idx).unwrap();
let curr_size = size_list[addr.packet_idx as usize]; let curr_size = size_list[addr.packet_idx as usize];
@ -524,34 +541,45 @@ mod alloc_mod {
Ok(true) Ok(true)
} }
} }
impl PoolProviderMemInPlaceWithGuards for StaticMemoryPool {
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self> {
PoolRwGuard::new(self, addr)
}
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self> {
PoolGuard::new(self, addr)
}
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::pool::{ use crate::pool::{
LocalPool, PoolCfg, PoolGuard, PoolProvider, PoolRwGuard, StoreAddr, StoreError, PoolGuard, PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, PoolRwGuard,
StoreIdError, POOL_MAX_SIZE, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError,
POOL_MAX_SIZE,
}; };
use std::vec; use std::vec;
fn basic_small_pool() -> LocalPool { fn basic_small_pool() -> StaticMemoryPool {
// 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
let pool_cfg = PoolCfg::new(vec![(4, 4), (2, 8), (1, 16)]); let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
LocalPool::new(pool_cfg) StaticMemoryPool::new(pool_cfg)
} }
#[test] #[test]
fn test_cfg() { fn test_cfg() {
// Values where number of buckets is 0 or size is too large should be removed // Values where number of buckets is 0 or size is too large should be removed
let mut pool_cfg = PoolCfg::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)]); let mut pool_cfg = StaticPoolConfig::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)]);
pool_cfg.sanitize(); pool_cfg.sanitize();
assert_eq!(*pool_cfg.cfg(), vec![(1, 0)]); assert_eq!(*pool_cfg.cfg(), vec![(1, 0)]);
// Entries should be ordered according to bucket size // Entries should be ordered according to bucket size
pool_cfg = PoolCfg::new(vec![(16, 6), (32, 3), (8, 12)]); pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)]);
pool_cfg.sanitize(); pool_cfg.sanitize();
assert_eq!(*pool_cfg.cfg(), vec![(32, 3), (16, 6), (8, 12)]); assert_eq!(*pool_cfg.cfg(), vec![(32, 3), (16, 6), (8, 12)]);
// Unstable sort is used, so order of entries with same block length should not matter // Unstable sort is used, so order of entries with same block length should not matter
pool_cfg = PoolCfg::new(vec![(12, 12), (14, 16), (10, 12)]); pool_cfg = StaticPoolConfig::new(vec![(12, 12), (14, 16), (10, 12)]);
pool_cfg.sanitize(); pool_cfg.sanitize();
assert!( assert!(
*pool_cfg.cfg() == vec![(12, 12), (10, 12), (14, 16)] *pool_cfg.cfg() == vec![(12, 12), (10, 12), (14, 16)]
@ -600,10 +628,10 @@ mod tests {
let (addr, buf_ref) = res.unwrap(); let (addr, buf_ref) = res.unwrap();
assert_eq!( assert_eq!(
addr, addr,
StoreAddr { u64::from(StaticPoolAddr {
pool_idx: 2, pool_idx: 2,
packet_idx: 0 packet_idx: 0
} })
); );
assert_eq!(buf_ref.len(), 12); assert_eq!(buf_ref.len(), 12);
} }
@ -655,10 +683,13 @@ mod tests {
fn test_read_does_not_exist() { fn test_read_does_not_exist() {
let local_pool = basic_small_pool(); let local_pool = basic_small_pool();
// Try to access data which does not exist // Try to access data which does not exist
let res = local_pool.read(&StoreAddr { let res = local_pool.read(
packet_idx: 0, &StaticPoolAddr {
pool_idx: 0, packet_idx: 0,
}); pool_idx: 0,
}
.into(),
);
assert!(res.is_err()); assert!(res.is_err());
assert!(matches!( assert!(matches!(
res.unwrap_err(), res.unwrap_err(),
@ -684,10 +715,11 @@ mod tests {
#[test] #[test]
fn test_invalid_pool_idx() { fn test_invalid_pool_idx() {
let local_pool = basic_small_pool(); let local_pool = basic_small_pool();
let addr = StoreAddr { let addr = StaticPoolAddr {
pool_idx: 3, pool_idx: 3,
packet_idx: 0, packet_idx: 0,
}; }
.into();
let res = local_pool.read(&addr); let res = local_pool.read(&addr);
assert!(res.is_err()); assert!(res.is_err());
let err = res.unwrap_err(); let err = res.unwrap_err();
@ -700,12 +732,12 @@ mod tests {
#[test] #[test]
fn test_invalid_packet_idx() { fn test_invalid_packet_idx() {
let local_pool = basic_small_pool(); let local_pool = basic_small_pool();
let addr = StoreAddr { let addr = StaticPoolAddr {
pool_idx: 2, pool_idx: 2,
packet_idx: 1, packet_idx: 1,
}; };
assert_eq!(addr.raw(), 0x00020001); assert_eq!(addr.raw(), 0x00020001);
let res = local_pool.read(&addr); let res = local_pool.read(&addr.into());
assert!(res.is_err()); assert!(res.is_err());
let err = res.unwrap_err(); let err = res.unwrap_err();
assert!(matches!( assert!(matches!(

View File

@ -82,7 +82,7 @@ pub mod heapless_mod {
} }
} }
#[derive(Debug)] #[derive(Debug, PartialEq, Eq)]
pub enum EventRequest<Event: GenericEvent = EventU32> { pub enum EventRequest<Event: GenericEvent = EventU32> {
Enable(Event), Enable(Event),
Disable(Event), Disable(Event),

View File

@ -1,85 +1,64 @@
use crate::events::EventU32; use crate::events::EventU32;
use crate::pool::{SharedPool, StoreAddr};
use crate::pus::event_man::{EventRequest, EventRequestWithToken}; use crate::pus::event_man::{EventRequest, EventRequestWithToken};
use crate::pus::verification::{ use crate::pus::verification::TcStateToken;
StdVerifReporterWithSender, TcStateAccepted, TcStateToken, VerificationToken, use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError};
};
use crate::pus::{
EcssTcReceiver, EcssTmSender, PartialPusHandlingError, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceBase, PusServiceHandler,
};
use alloc::boxed::Box;
use spacepackets::ecss::event::Subservice; use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::PusPacket; use spacepackets::ecss::PusPacket;
use std::sync::mpsc::Sender; use std::sync::mpsc::Sender;
pub struct PusService5EventHandler { use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
psb: PusServiceBase,
pub struct PusService5EventHandler<TcInMemConverter: EcssTcInMemConverter> {
pub service_helper: PusServiceHelper<TcInMemConverter>,
event_request_tx: Sender<EventRequestWithToken>, event_request_tx: Sender<EventRequestWithToken>,
} }
impl PusService5EventHandler { impl<TcInMemConverter: EcssTcInMemConverter> PusService5EventHandler<TcInMemConverter> {
pub fn new( pub fn new(
tc_receiver: Box<dyn EcssTcReceiver>, service_handler: PusServiceHelper<TcInMemConverter>,
shared_tc_store: SharedPool,
tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16,
verification_handler: StdVerifReporterWithSender,
event_request_tx: Sender<EventRequestWithToken>, event_request_tx: Sender<EventRequestWithToken>,
) -> Self { ) -> Self {
Self { Self {
psb: PusServiceBase::new( service_helper: service_handler,
tc_receiver,
shared_tc_store,
tm_sender,
tm_apid,
verification_handler,
),
event_request_tx, event_request_tx,
} }
} }
}
impl PusServiceHandler for PusService5EventHandler { pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
fn psb_mut(&mut self) -> &mut PusServiceBase { let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
&mut self.psb if possible_packet.is_none() {
} return Ok(PusPacketHandlerResult::Empty);
fn psb(&self) -> &PusServiceBase { }
&self.psb let ecss_tc_and_token = possible_packet.unwrap();
} let tc = self
.service_helper
fn handle_one_tc( .tc_in_mem_converter
&mut self, .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
addr: StoreAddr,
token: VerificationToken<TcStateAccepted>,
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
self.copy_tc_to_buf(addr)?;
let (tc, _) = PusTcReader::new(&self.psb.pus_buf)?;
let subservice = tc.subservice(); let subservice = tc.subservice();
let srv = Subservice::try_from(subservice); let srv = Subservice::try_from(subservice);
if srv.is_err() { if srv.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice( return Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(), tc.subservice(),
token, ecss_tc_and_token.token,
)); ));
} }
let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| { let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| {
if tc.user_data().len() < 4 { if tc.user_data().len() < 4 {
return Err(PusPacketHandlingError::NotEnoughAppData( return Err(PusPacketHandlingError::NotEnoughAppData(
"At least 4 bytes event ID expected".into(), "at least 4 bytes event ID expected".into(),
)); ));
} }
let user_data = tc.user_data(); let user_data = tc.user_data();
let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap())); let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
let start_token = self let start_token = self
.psb .service_helper
.common
.verification_handler .verification_handler
.borrow_mut() .borrow_mut()
.start_success(token, Some(&stamp)) .start_success(ecss_tc_and_token.token, Some(&stamp))
.map_err(|_| PartialPusHandlingError::Verification); .map_err(|_| PartialPusHandlingError::Verification);
let partial_error = start_token.clone().err(); let partial_error = start_token.clone().err();
let mut token: TcStateToken = token.into(); let mut token: TcStateToken = ecss_tc_and_token.token.into();
if let Ok(start_token) = start_token { if let Ok(start_token) = start_token {
token = start_token.into(); token = start_token.into();
} }
@ -107,7 +86,7 @@ impl PusServiceHandler for PusService5EventHandler {
Ok(PusPacketHandlerResult::RequestHandled) Ok(PusPacketHandlerResult::RequestHandled)
}; };
let mut partial_error = None; let mut partial_error = None;
let time_stamp = self.psb().get_current_timestamp(&mut partial_error); let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
match srv.unwrap() { match srv.unwrap() {
Subservice::TmInfoReport Subservice::TmInfoReport
| Subservice::TmLowSeverityReport | Subservice::TmLowSeverityReport
@ -123,7 +102,8 @@ impl PusServiceHandler for PusService5EventHandler {
} }
Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => { Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => {
return Ok(PusPacketHandlerResult::SubserviceNotImplemented( return Ok(PusPacketHandlerResult::SubserviceNotImplemented(
subservice, token, subservice,
ecss_tc_and_token.token,
)); ));
} }
} }
@ -131,3 +111,170 @@ impl PusServiceHandler for PusService5EventHandler {
Ok(PusPacketHandlerResult::RequestHandled) Ok(PusPacketHandlerResult::RequestHandled)
} }
} }
#[cfg(test)]
mod tests {
use delegate::delegate;
use spacepackets::ecss::event::Subservice;
use spacepackets::util::UnsignedEnum;
use spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader,
},
SequenceFlags, SpHeader,
};
use std::sync::mpsc::{self, Sender};
use crate::pus::event_man::EventRequest;
use crate::pus::tests::SimplePusPacketHandler;
use crate::pus::verification::RequestId;
use crate::{
events::EventU32,
pus::{
event_man::EventRequestWithToken,
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness, TEST_APID},
verification::{TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError,
},
};
use super::PusService5EventHandler;
const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25);
struct Pus5HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusService5EventHandler<EcssTcInSharedStoreConverter>,
}
impl Pus5HandlerWithStoreTester {
pub fn new(event_request_tx: Sender<EventRequestWithToken>) -> Self {
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
Self {
common,
handler: PusService5EventHandler::new(srv_handler, event_request_tx),
}
}
}
impl PusTestHarness for Pus5HandlerWithStoreTester {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
}
}
}
impl SimplePusPacketHandler for Pus5HandlerWithStoreTester {
delegate! {
to self.handler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
}
}
}
fn event_test(
test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler),
subservice: Subservice,
expected_event_req: EventRequest,
event_req_receiver: mpsc::Receiver<EventRequestWithToken>,
) {
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8);
let mut app_data = [0; 4];
TEST_EVENT_0
.write_to_be_bytes(&mut app_data)
.expect("writing test event failed");
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true);
let token = test_harness.send_tc(&ping_tc);
let request_id = token.req_id();
test_harness.handle_one_tc().unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);
// Completion TM is not generated for us.
assert!(test_harness.check_no_tm_available());
let event_request = event_req_receiver
.try_recv()
.expect("no event request received");
assert_eq!(expected_event_req, event_request.request);
}
#[test]
fn test_enabling_event_reporting() {
let (event_request_tx, event_request_rx) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
event_test(
&mut test_harness,
Subservice::TcEnableEventGeneration,
EventRequest::Enable(TEST_EVENT_0),
event_request_rx,
);
}
#[test]
fn test_disabling_event_reporting() {
let (event_request_tx, event_request_rx) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
event_test(
&mut test_harness,
Subservice::TcDisableEventGeneration,
EventRequest::Disable(TEST_EVENT_0),
event_request_rx,
);
}
#[test]
fn test_empty_tc_queue() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::Empty = result {
} else {
panic!("unexpected result type {result:?}")
}
}
#[test]
fn test_sending_custom_subservice() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(5, 200);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
test_harness.send_tc(&ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result {
assert_eq!(subservice, 200);
} else {
panic!("unexpected result type {result:?}")
}
}
#[test]
fn test_sending_invalid_app_data() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header =
PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8);
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &[0, 1, 2], true);
test_harness.send_tc(&ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_err());
let result = result.unwrap_err();
if let PusPacketHandlingError::NotEnoughAppData(string) = result {
assert_eq!(string, "at least 4 bytes event ID expected");
} else {
panic!("unexpected result type {result:?}")
}
}
}

View File

@ -55,12 +55,6 @@ impl<'tm> From<PusTmCreator<'tm>> for PusTmWrapper<'tm> {
} }
} }
pub type TcAddrWithToken = (StoreAddr, TcStateToken);
/// Generic abstraction for a telecommand being sent around after is has been accepted.
/// The actual telecommand is stored inside a pre-allocated pool structure.
pub type AcceptedTc = (StoreAddr, VerificationToken<TcStateAccepted>);
/// Generic error type for sending something via a message queue. /// Generic error type for sending something via a message queue.
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub enum GenericSendError { pub enum GenericSendError {
@ -200,11 +194,75 @@ pub trait EcssTcSenderCore: EcssChannel {
fn send_tc(&self, tc: PusTcCreator, token: Option<TcStateToken>) -> Result<(), EcssTmtcError>; fn send_tc(&self, tc: PusTcCreator, token: Option<TcStateToken>) -> Result<(), EcssTmtcError>;
} }
pub struct ReceivedTcWrapper { /// A PUS telecommand packet can be stored in memory using different methods. Right now,
pub store_addr: StoreAddr, /// storage inside a pool structure like [crate::pool::StaticMemoryPool], and storage inside a
/// `Vec<u8>` are supported.
#[non_exhaustive]
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TcInMemory {
StoreAddr(StoreAddr),
#[cfg(feature = "alloc")]
Vec(alloc::vec::Vec<u8>),
}
impl From<StoreAddr> for TcInMemory {
fn from(value: StoreAddr) -> Self {
Self::StoreAddr(value)
}
}
#[cfg(feature = "alloc")]
impl From<alloc::vec::Vec<u8>> for TcInMemory {
fn from(value: alloc::vec::Vec<u8>) -> Self {
Self::Vec(value)
}
}
/// Generic structure for an ECSS PUS Telecommand and its correspoding verification token.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct EcssTcAndToken {
pub tc_in_memory: TcInMemory,
pub token: Option<TcStateToken>, pub token: Option<TcStateToken>,
} }
impl EcssTcAndToken {
pub fn new(tc_in_memory: impl Into<TcInMemory>, token: impl Into<TcStateToken>) -> Self {
Self {
tc_in_memory: tc_in_memory.into(),
token: Some(token.into()),
}
}
}
/// Generic abstraction for a telecommand being sent around after is has been accepted.
pub struct AcceptedEcssTcAndToken {
pub tc_in_memory: TcInMemory,
pub token: VerificationToken<TcStateAccepted>,
}
impl From<AcceptedEcssTcAndToken> for EcssTcAndToken {
fn from(value: AcceptedEcssTcAndToken) -> Self {
EcssTcAndToken {
tc_in_memory: value.tc_in_memory,
token: Some(value.token.into()),
}
}
}
impl TryFrom<EcssTcAndToken> for AcceptedEcssTcAndToken {
type Error = ();
fn try_from(value: EcssTcAndToken) -> Result<Self, Self::Error> {
if let Some(TcStateToken::Accepted(token)) = value.token {
return Ok(AcceptedEcssTcAndToken {
tc_in_memory: value.tc_in_memory,
token,
});
}
Err(())
}
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum TryRecvTmtcError { pub enum TryRecvTmtcError {
Error(EcssTmtcError), Error(EcssTmtcError),
@ -231,7 +289,7 @@ impl From<StoreError> for TryRecvTmtcError {
/// Generic trait for a user supplied receiver object. /// Generic trait for a user supplied receiver object.
pub trait EcssTcReceiverCore: EcssChannel { pub trait EcssTcReceiverCore: EcssChannel {
fn recv_tc(&self) -> Result<ReceivedTcWrapper, TryRecvTmtcError>; fn recv_tc(&self) -> Result<EcssTcAndToken, TryRecvTmtcError>;
} }
/// Generic trait for objects which can receive ECSS PUS telecommands. This trait is /// Generic trait for objects which can receive ECSS PUS telecommands. This trait is
@ -309,21 +367,23 @@ mod alloc_mod {
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod { pub mod std_mod {
use crate::pool::{SharedPool, StoreAddr}; use crate::pool::{PoolProviderMemInPlaceWithGuards, SharedStaticMemoryPool, StoreAddr};
use crate::pus::verification::{ use crate::pus::verification::{
StdVerifReporterWithSender, TcStateAccepted, VerificationToken, StdVerifReporterWithSender, TcStateAccepted, VerificationToken,
}; };
use crate::pus::{ use crate::pus::{
EcssChannel, EcssTcReceiver, EcssTcReceiverCore, EcssTmSender, EcssTmSenderCore, EcssChannel, EcssTcAndToken, EcssTcReceiver, EcssTcReceiverCore, EcssTmSender,
EcssTmtcError, GenericRecvError, GenericSendError, PusTmWrapper, ReceivedTcWrapper, EcssTmSenderCore, EcssTmtcError, GenericRecvError, GenericSendError, PusTmWrapper,
TcAddrWithToken, TryRecvTmtcError, TryRecvTmtcError,
}; };
use crate::tmtc::tm_helper::SharedTmStore; use crate::tmtc::tm_helper::SharedTmStore;
use crate::ChannelId; use crate::ChannelId;
use alloc::boxed::Box; use alloc::boxed::Box;
use alloc::vec::Vec; use alloc::vec::Vec;
use crossbeam_channel as cb; use crossbeam_channel as cb;
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::tm::PusTmCreator; use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::PusError; use spacepackets::ecss::PusError;
use spacepackets::time::cds::TimeProvider; use spacepackets::time::cds::TimeProvider;
@ -335,6 +395,9 @@ pub mod std_mod {
use std::sync::mpsc::TryRecvError; use std::sync::mpsc::TryRecvError;
use thiserror::Error; use thiserror::Error;
use super::verification::VerificationReporterWithSender;
use super::{AcceptedEcssTcAndToken, TcInMemory};
impl From<mpsc::SendError<StoreAddr>> for EcssTmtcError { impl From<mpsc::SendError<StoreAddr>> for EcssTmtcError {
fn from(_: mpsc::SendError<StoreAddr>) -> Self { fn from(_: mpsc::SendError<StoreAddr>) -> Self {
Self::Send(GenericSendError::RxDisconnected) Self::Send(GenericSendError::RxDisconnected)
@ -411,13 +474,13 @@ pub mod std_mod {
} }
} }
pub struct MpscTcInStoreReceiver { pub struct MpscTcReceiver {
id: ChannelId, id: ChannelId,
name: &'static str, name: &'static str,
receiver: mpsc::Receiver<TcAddrWithToken>, receiver: mpsc::Receiver<EcssTcAndToken>,
} }
impl EcssChannel for MpscTcInStoreReceiver { impl EcssChannel for MpscTcReceiver {
fn id(&self) -> ChannelId { fn id(&self) -> ChannelId {
self.id self.id
} }
@ -427,26 +490,22 @@ pub mod std_mod {
} }
} }
impl EcssTcReceiverCore for MpscTcInStoreReceiver { impl EcssTcReceiverCore for MpscTcReceiver {
fn recv_tc(&self) -> Result<ReceivedTcWrapper, TryRecvTmtcError> { fn recv_tc(&self) -> Result<EcssTcAndToken, TryRecvTmtcError> {
let (store_addr, token) = self.receiver.try_recv().map_err(|e| match e { self.receiver.try_recv().map_err(|e| match e {
TryRecvError::Empty => TryRecvTmtcError::Empty, TryRecvError::Empty => TryRecvTmtcError::Empty,
TryRecvError::Disconnected => { TryRecvError::Disconnected => {
TryRecvTmtcError::Error(EcssTmtcError::from(GenericRecvError::TxDisconnected)) TryRecvTmtcError::Error(EcssTmtcError::from(GenericRecvError::TxDisconnected))
} }
})?;
Ok(ReceivedTcWrapper {
store_addr,
token: Some(token),
}) })
} }
} }
impl MpscTcInStoreReceiver { impl MpscTcReceiver {
pub fn new( pub fn new(
id: ChannelId, id: ChannelId,
name: &'static str, name: &'static str,
receiver: mpsc::Receiver<TcAddrWithToken>, receiver: mpsc::Receiver<EcssTcAndToken>,
) -> Self { ) -> Self {
Self { id, name, receiver } Self { id, name, receiver }
} }
@ -459,8 +518,8 @@ pub mod std_mod {
#[derive(Clone)] #[derive(Clone)]
pub struct MpscTmAsVecSender { pub struct MpscTmAsVecSender {
id: ChannelId, id: ChannelId,
sender: mpsc::Sender<Vec<u8>>,
name: &'static str, name: &'static str,
sender: mpsc::Sender<Vec<u8>>,
} }
impl From<mpsc::SendError<Vec<u8>>> for EcssTmtcError { impl From<mpsc::SendError<Vec<u8>>> for EcssTmtcError {
@ -545,23 +604,23 @@ pub mod std_mod {
} }
} }
pub struct CrossbeamTcInStoreReceiver { pub struct CrossbeamTcReceiver {
id: ChannelId, id: ChannelId,
name: &'static str, name: &'static str,
receiver: cb::Receiver<TcAddrWithToken>, receiver: cb::Receiver<EcssTcAndToken>,
} }
impl CrossbeamTcInStoreReceiver { impl CrossbeamTcReceiver {
pub fn new( pub fn new(
id: ChannelId, id: ChannelId,
name: &'static str, name: &'static str,
receiver: cb::Receiver<TcAddrWithToken>, receiver: cb::Receiver<EcssTcAndToken>,
) -> Self { ) -> Self {
Self { id, name, receiver } Self { id, name, receiver }
} }
} }
impl EcssChannel for CrossbeamTcInStoreReceiver { impl EcssChannel for CrossbeamTcReceiver {
fn id(&self) -> ChannelId { fn id(&self) -> ChannelId {
self.id self.id
} }
@ -571,17 +630,13 @@ pub mod std_mod {
} }
} }
impl EcssTcReceiverCore for CrossbeamTcInStoreReceiver { impl EcssTcReceiverCore for CrossbeamTcReceiver {
fn recv_tc(&self) -> Result<ReceivedTcWrapper, TryRecvTmtcError> { fn recv_tc(&self) -> Result<EcssTcAndToken, TryRecvTmtcError> {
let (store_addr, token) = self.receiver.try_recv().map_err(|e| match e { self.receiver.try_recv().map_err(|e| match e {
cb::TryRecvError::Empty => TryRecvTmtcError::Empty, cb::TryRecvError::Empty => TryRecvTmtcError::Empty,
cb::TryRecvError::Disconnected => { cb::TryRecvError::Disconnected => {
TryRecvTmtcError::Error(EcssTmtcError::from(GenericRecvError::TxDisconnected)) TryRecvTmtcError::Error(EcssTmtcError::from(GenericRecvError::TxDisconnected))
} }
})?;
Ok(ReceivedTcWrapper {
store_addr,
token: Some(token),
}) })
} }
} }
@ -596,8 +651,12 @@ pub mod std_mod {
InvalidSubservice(u8), InvalidSubservice(u8),
#[error("not enough application data available: {0}")] #[error("not enough application data available: {0}")]
NotEnoughAppData(String), NotEnoughAppData(String),
#[error("PUS packet too large, does not fit in buffer: {0}")]
PusPacketTooLarge(usize),
#[error("invalid application data")] #[error("invalid application data")]
InvalidAppData(String), InvalidAppData(String),
#[error("invalid format of TC in memory: {0:?}")]
InvalidTcInMemoryFormat(TcInMemory),
#[error("generic ECSS tmtc error: {0}")] #[error("generic ECSS tmtc error: {0}")]
EcssTmtc(#[from] EcssTmtcError), EcssTmtc(#[from] EcssTmtcError),
#[error("invalid verification token")] #[error("invalid verification token")]
@ -634,42 +693,126 @@ pub mod std_mod {
} }
} }
/// Base class for handlers which can handle PUS TC packets. Right now, the verification pub trait EcssTcInMemConverter {
/// reporter is constrained to the [StdVerifReporterWithSender] and the service handler fn cache_ecss_tc_in_memory(
/// relies on TMTC packets being exchanged via a [SharedPool]. &mut self,
possible_packet: &TcInMemory,
) -> Result<(), PusPacketHandlingError>;
fn tc_slice_raw(&self) -> &[u8];
fn convert_ecss_tc_in_memory_to_reader(
&mut self,
possible_packet: &TcInMemory,
) -> Result<PusTcReader<'_>, PusPacketHandlingError> {
self.cache_ecss_tc_in_memory(possible_packet)?;
Ok(PusTcReader::new(self.tc_slice_raw())?.0)
}
}
/// Converter structure for PUS telecommands which are stored inside a `Vec<u8>` structure.
/// Please note that this structure is not able to convert TCs which are stored inside a
/// [SharedStaticMemoryPool].
#[derive(Default, Clone)]
pub struct EcssTcInVecConverter {
pub pus_tc_raw: Option<Vec<u8>>,
}
impl EcssTcInMemConverter for EcssTcInVecConverter {
fn cache_ecss_tc_in_memory(
&mut self,
tc_in_memory: &TcInMemory,
) -> Result<(), PusPacketHandlingError> {
self.pus_tc_raw = None;
match tc_in_memory {
super::TcInMemory::StoreAddr(_) => {
return Err(PusPacketHandlingError::InvalidTcInMemoryFormat(
tc_in_memory.clone(),
));
}
super::TcInMemory::Vec(vec) => {
self.pus_tc_raw = Some(vec.clone());
}
};
Ok(())
}
fn tc_slice_raw(&self) -> &[u8] {
if self.pus_tc_raw.is_none() {
return &[];
}
self.pus_tc_raw.as_ref().unwrap()
}
}
/// Converter structure for PUS telecommands which are stored inside
/// [SharedStaticMemoryPool] structure. This is useful if run-time allocation for these
/// packets should be avoided. Please note that this structure is not able to convert TCs which
/// are stored as a `Vec<u8>`.
pub struct EcssTcInSharedStoreConverter {
shared_tc_store: SharedStaticMemoryPool,
pus_buf: Vec<u8>,
}
impl EcssTcInSharedStoreConverter {
pub fn new(shared_tc_store: SharedStaticMemoryPool, max_expected_tc_size: usize) -> Self {
Self {
shared_tc_store,
pus_buf: alloc::vec![0; max_expected_tc_size],
}
}
pub fn copy_tc_to_buf(&mut self, addr: StoreAddr) -> Result<(), PusPacketHandlingError> {
// Keep locked section as short as possible.
let mut tc_pool = self
.shared_tc_store
.write()
.map_err(|_| PusPacketHandlingError::EcssTmtc(EcssTmtcError::StoreLock))?;
let tc_guard = tc_pool.read_with_guard(addr);
let tc_raw = tc_guard.read().unwrap();
if tc_raw.len() > self.pus_buf.len() {
return Err(PusPacketHandlingError::PusPacketTooLarge(tc_raw.len()));
}
self.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw);
Ok(())
}
}
impl EcssTcInMemConverter for EcssTcInSharedStoreConverter {
fn cache_ecss_tc_in_memory(
&mut self,
tc_in_memory: &TcInMemory,
) -> Result<(), PusPacketHandlingError> {
match tc_in_memory {
super::TcInMemory::StoreAddr(addr) => {
self.copy_tc_to_buf(*addr)?;
}
super::TcInMemory::Vec(_) => {
return Err(PusPacketHandlingError::InvalidTcInMemoryFormat(
tc_in_memory.clone(),
));
}
};
Ok(())
}
fn tc_slice_raw(&self) -> &[u8] {
self.pus_buf.as_ref()
}
}
pub struct PusServiceBase { pub struct PusServiceBase {
pub tc_receiver: Box<dyn EcssTcReceiver>, pub tc_receiver: Box<dyn EcssTcReceiver>,
pub shared_tc_store: SharedPool,
pub tm_sender: Box<dyn EcssTmSender>, pub tm_sender: Box<dyn EcssTmSender>,
pub tm_apid: u16, pub tm_apid: u16,
/// The verification handler is wrapped in a [RefCell] to allow the interior mutability /// The verification handler is wrapped in a [RefCell] to allow the interior mutability
/// pattern. This makes writing methods which are not mutable a lot easier. /// pattern. This makes writing methods which are not mutable a lot easier.
pub verification_handler: RefCell<StdVerifReporterWithSender>, pub verification_handler: RefCell<StdVerifReporterWithSender>,
pub pus_buf: [u8; 2048],
pub pus_size: usize,
} }
impl PusServiceBase { impl PusServiceBase {
pub fn new( #[cfg(feature = "std")]
tc_receiver: Box<dyn EcssTcReceiver>,
shared_tc_store: SharedPool,
tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16,
verification_handler: StdVerifReporterWithSender,
) -> Self {
Self {
tc_receiver,
shared_tc_store,
tm_apid,
tm_sender,
verification_handler: RefCell::new(verification_handler),
pus_buf: [0; 2048],
pus_size: 0,
}
}
pub fn get_current_timestamp( pub fn get_current_timestamp(
&self,
partial_error: &mut Option<PartialPusHandlingError>, partial_error: &mut Option<PartialPusHandlingError>,
) -> [u8; 7] { ) -> [u8; 7] {
let mut time_stamp: [u8; 7] = [0; 7]; let mut time_stamp: [u8; 7] = [0; 7];
@ -684,48 +827,73 @@ pub mod std_mod {
time_stamp time_stamp
} }
pub fn get_current_timestamp_ignore_error(&self) -> [u8; 7] { #[cfg(feature = "std")]
pub fn get_current_timestamp_ignore_error() -> [u8; 7] {
let mut dummy = None; let mut dummy = None;
self.get_current_timestamp(&mut dummy) Self::get_current_timestamp(&mut dummy)
} }
} }
pub trait PusServiceHandler { /// This is a high-level PUS packet handler helper.
fn psb_mut(&mut self) -> &mut PusServiceBase; ///
fn psb(&self) -> &PusServiceBase; /// It performs some of the boilerplate acitivities involved when handling PUS telecommands and
fn handle_one_tc( /// it can be used to implement the handling of PUS telecommands for certain PUS telecommands
&mut self, /// groups (for example individual services).
addr: StoreAddr, ///
token: VerificationToken<TcStateAccepted>, /// This base class can handle PUS telecommands backed by different memory storage machanisms
) -> Result<PusPacketHandlerResult, PusPacketHandlingError>; /// by using the [EcssTcInMemConverter] abstraction. This object provides some convenience
/// methods to make the generic parts of TC handling easier.
pub struct PusServiceHelper<TcInMemConverter: EcssTcInMemConverter> {
pub common: PusServiceBase,
pub tc_in_mem_converter: TcInMemConverter,
}
fn copy_tc_to_buf(&mut self, addr: StoreAddr) -> Result<(), PusPacketHandlingError> { impl<TcInMemConverter: EcssTcInMemConverter> PusServiceHelper<TcInMemConverter> {
// Keep locked section as short as possible. pub fn new(
let psb_mut = self.psb_mut(); tc_receiver: Box<dyn EcssTcReceiver>,
let mut tc_pool = psb_mut tm_sender: Box<dyn EcssTmSender>,
.shared_tc_store tm_apid: u16,
.write() verification_handler: VerificationReporterWithSender,
.map_err(|_| PusPacketHandlingError::EcssTmtc(EcssTmtcError::StoreLock))?; tc_in_mem_converter: TcInMemConverter,
let tc_guard = tc_pool.read_with_guard(addr); ) -> Self {
let tc_raw = tc_guard.read().unwrap(); Self {
psb_mut.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw); common: PusServiceBase {
Ok(()) tc_receiver,
tm_sender,
tm_apid,
verification_handler: RefCell::new(verification_handler),
},
tc_in_mem_converter,
}
} }
fn handle_next_packet(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { /// This function can be used to poll the internal [EcssTcReceiver] object for the next
match self.psb().tc_receiver.recv_tc() { /// telecommand packet. It will return `Ok(None)` if there are not packets available.
Ok(ReceivedTcWrapper { store_addr, token }) => { /// In any other case, it will perform the acceptance of the ECSS TC packet using the
/// internal [VerificationReporterWithSender] object. It will then return the telecommand
/// and the according accepted token.
pub fn retrieve_and_accept_next_packet(
&mut self,
) -> Result<Option<AcceptedEcssTcAndToken>, PusPacketHandlingError> {
match self.common.tc_receiver.recv_tc() {
Ok(EcssTcAndToken {
tc_in_memory,
token,
}) => {
if token.is_none() { if token.is_none() {
return Err(PusPacketHandlingError::InvalidVerificationToken); return Err(PusPacketHandlingError::InvalidVerificationToken);
} }
let token = token.unwrap(); let token = token.unwrap();
let accepted_token = VerificationToken::<TcStateAccepted>::try_from(token) let accepted_token = VerificationToken::<TcStateAccepted>::try_from(token)
.map_err(|_| PusPacketHandlingError::InvalidVerificationToken)?; .map_err(|_| PusPacketHandlingError::InvalidVerificationToken)?;
self.handle_one_tc(store_addr, accepted_token) Ok(Some(AcceptedEcssTcAndToken {
tc_in_memory,
token: accepted_token,
}))
} }
Err(e) => match e { Err(e) => match e {
TryRecvTmtcError::Error(e) => Err(PusPacketHandlingError::EcssTmtc(e)), TryRecvTmtcError::Error(e) => Err(PusPacketHandlingError::EcssTmtc(e)),
TryRecvTmtcError::Empty => Ok(PusPacketHandlerResult::Empty), TryRecvTmtcError::Empty => Ok(None),
}, },
} }
} }
@ -746,10 +914,35 @@ pub(crate) fn source_buffer_large_enough(cap: usize, len: usize) -> Result<(), E
} }
#[cfg(test)] #[cfg(test)]
pub(crate) mod tests { pub mod tests {
use spacepackets::ecss::tm::{GenericPusTmSecondaryHeader, PusTmCreator}; use std::sync::mpsc::TryRecvError;
use std::sync::{mpsc, RwLock};
use alloc::boxed::Box;
use alloc::vec;
use spacepackets::ecss::tc::PusTcCreator;
use spacepackets::ecss::tm::{GenericPusTmSecondaryHeader, PusTmCreator, PusTmReader};
use spacepackets::ecss::{PusPacket, WritablePusPacket};
use spacepackets::CcsdsPacket; use spacepackets::CcsdsPacket;
use crate::pool::{
PoolProviderMemInPlace, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig,
StoreAddr,
};
use crate::pus::verification::RequestId;
use crate::tmtc::tm_helper::SharedTmStore;
use super::verification::{
TcStateAccepted, VerificationReporterCfg, VerificationReporterWithSender, VerificationToken,
};
use super::{
EcssTcAndToken, EcssTcInSharedStoreConverter, EcssTcInVecConverter, MpscTcReceiver,
MpscTmAsVecSender, MpscTmInStoreSender, PusPacketHandlerResult, PusPacketHandlingError,
PusServiceHelper, TcInMemory,
};
pub const TEST_APID: u16 = 0x101;
#[derive(Debug, Eq, PartialEq, Clone)] #[derive(Debug, Eq, PartialEq, Clone)]
pub(crate) struct CommonTmInfo { pub(crate) struct CommonTmInfo {
pub subservice: u8, pub subservice: u8,
@ -759,12 +952,23 @@ pub(crate) mod tests {
pub time_stamp: [u8; 7], pub time_stamp: [u8; 7],
} }
pub trait PusTestHarness {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
}
pub trait SimplePusPacketHandler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
}
impl CommonTmInfo { impl CommonTmInfo {
pub fn new_from_tm(tm: &PusTmCreator) -> Self { pub fn new_from_tm(tm: &PusTmCreator) -> Self {
let mut time_stamp = [0; 7]; let mut time_stamp = [0; 7];
time_stamp.clone_from_slice(&tm.timestamp()[0..7]); time_stamp.clone_from_slice(&tm.timestamp()[0..7]);
Self { Self {
subservice: tm.subservice(), subservice: PusPacket::subservice(tm),
apid: tm.apid(), apid: tm.apid(),
msg_counter: tm.msg_counter(), msg_counter: tm.msg_counter(),
dest_id: tm.dest_id(), dest_id: tm.dest_id(),
@ -772,4 +976,192 @@ pub(crate) mod tests {
} }
} }
} }
/// Common fields for a PUS service test harness.
pub struct PusServiceHandlerWithSharedStoreCommon {
pus_buf: [u8; 2048],
tm_buf: [u8; 2048],
tc_pool: SharedStaticMemoryPool,
tm_pool: SharedTmStore,
tc_sender: mpsc::Sender<EcssTcAndToken>,
tm_receiver: mpsc::Receiver<StoreAddr>,
verification_handler: VerificationReporterWithSender,
}
impl PusServiceHandlerWithSharedStoreCommon {
/// This function generates the structure in addition to the PUS service handler
/// [PusServiceHandler] which might be required for a specific PUS service handler.
///
/// The PUS service handler is instantiated with a [EcssTcInStoreConverter].
pub fn new() -> (Self, PusServiceHelper<EcssTcInSharedStoreConverter>) {
let pool_cfg = StaticPoolConfig::new(vec![(16, 16), (8, 32), (4, 64)]);
let tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let tm_pool = StaticMemoryPool::new(pool_cfg);
let shared_tc_pool = SharedStaticMemoryPool::new(RwLock::new(tc_pool));
let shared_tm_pool = SharedTmStore::new(tm_pool);
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let verif_sender =
MpscTmInStoreSender::new(0, "verif_sender", shared_tm_pool.clone(), tm_tx.clone());
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let verification_handler =
VerificationReporterWithSender::new(&verif_cfg, Box::new(verif_sender));
let test_srv_tm_sender =
MpscTmInStoreSender::new(0, "TEST_SENDER", shared_tm_pool.clone(), tm_tx);
let test_srv_tc_receiver = MpscTcReceiver::new(0, "TEST_RECEIVER", test_srv_tc_rx);
let in_store_converter =
EcssTcInSharedStoreConverter::new(shared_tc_pool.clone(), 2048);
(
Self {
pus_buf: [0; 2048],
tm_buf: [0; 2048],
tc_pool: shared_tc_pool,
tm_pool: shared_tm_pool,
tc_sender: test_srv_tc_tx,
tm_receiver: tm_rx,
verification_handler: verification_handler.clone(),
},
PusServiceHelper::new(
Box::new(test_srv_tc_receiver),
Box::new(test_srv_tm_sender),
TEST_APID,
verification_handler,
in_store_converter,
),
)
}
pub fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let token = self.verification_handler.add_tc(tc);
let token = self
.verification_handler
.acceptance_success(token, Some(&[0; 7]))
.unwrap();
let tc_size = tc.write_to_bytes(&mut self.pus_buf).unwrap();
let mut tc_pool = self.tc_pool.write().unwrap();
let addr = tc_pool.add(&self.pus_buf[..tc_size]).unwrap();
drop(tc_pool);
// Send accepted TC to test service handler.
self.tc_sender
.send(EcssTcAndToken::new(addr, token))
.expect("sending tc failed");
token
}
pub fn read_next_tm(&mut self) -> PusTmReader<'_> {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
let tm_addr = next_msg.unwrap();
let tm_pool = self.tm_pool.shared_pool.read().unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
self.tm_buf[0..tm_raw.len()].copy_from_slice(tm_raw);
PusTmReader::new(&self.tm_buf, 7).unwrap().0
}
pub fn check_no_tm_available(&self) -> bool {
let next_msg = self.tm_receiver.try_recv();
if let TryRecvError::Empty = next_msg.unwrap_err() {
return true;
}
false
}
pub fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId) {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
let tm_addr = next_msg.unwrap();
let tm_pool = self.tm_pool.shared_pool.read().unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
let tm = PusTmReader::new(tm_raw, 7).unwrap().0;
assert_eq!(PusPacket::service(&tm), 1);
assert_eq!(PusPacket::subservice(&tm), subservice);
assert_eq!(tm.apid(), TEST_APID);
let req_id =
RequestId::from_bytes(tm.user_data()).expect("generating request ID failed");
assert_eq!(req_id, expected_request_id);
}
}
pub struct PusServiceHandlerWithVecCommon {
current_tm: Option<alloc::vec::Vec<u8>>,
tc_sender: mpsc::Sender<EcssTcAndToken>,
tm_receiver: mpsc::Receiver<alloc::vec::Vec<u8>>,
verification_handler: VerificationReporterWithSender,
}
impl PusServiceHandlerWithVecCommon {
pub fn new() -> (Self, PusServiceHelper<EcssTcInVecConverter>) {
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let verif_sender = MpscTmAsVecSender::new(0, "verififcatio-sender", tm_tx.clone());
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let verification_handler =
VerificationReporterWithSender::new(&verif_cfg, Box::new(verif_sender));
let test_srv_tm_sender = MpscTmAsVecSender::new(0, "test-sender", tm_tx);
let test_srv_tc_receiver = MpscTcReceiver::new(0, "test-receiver", test_srv_tc_rx);
let in_store_converter = EcssTcInVecConverter::default();
(
Self {
current_tm: None,
tc_sender: test_srv_tc_tx,
tm_receiver: tm_rx,
verification_handler: verification_handler.clone(),
},
PusServiceHelper::new(
Box::new(test_srv_tc_receiver),
Box::new(test_srv_tm_sender),
TEST_APID,
verification_handler,
in_store_converter,
),
)
}
pub fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let token = self.verification_handler.add_tc(tc);
let token = self
.verification_handler
.acceptance_success(token, Some(&[0; 7]))
.unwrap();
// Send accepted TC to test service handler.
self.tc_sender
.send(EcssTcAndToken::new(
TcInMemory::Vec(tc.to_vec().expect("pus tc conversion to vec failed")),
token,
))
.expect("sending tc failed");
token
}
pub fn read_next_tm(&mut self) -> PusTmReader<'_> {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
self.current_tm = Some(next_msg.unwrap());
PusTmReader::new(self.current_tm.as_ref().unwrap(), 7)
.unwrap()
.0
}
pub fn check_no_tm_available(&self) -> bool {
let next_msg = self.tm_receiver.try_recv();
if let TryRecvError::Empty = next_msg.unwrap_err() {
return true;
}
false
}
pub fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId) {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
let next_msg = next_msg.unwrap();
let tm = PusTmReader::new(next_msg.as_slice(), 7).unwrap().0;
assert_eq!(PusPacket::service(&tm), 1);
assert_eq!(PusPacket::subservice(&tm), subservice);
assert_eq!(tm.apid(), TEST_APID);
let req_id =
RequestId::from_bytes(tm.user_data()).expect("generating request ID failed");
assert_eq!(req_id, expected_request_id);
}
}
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,141 +1,130 @@
use crate::pool::{SharedPool, StoreAddr}; use super::scheduler::PusSchedulerInterface;
use crate::pus::scheduler::PusScheduler; use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
use crate::pus::verification::{StdVerifReporterWithSender, TcStateAccepted, VerificationToken}; use crate::pool::PoolProviderMemInPlace;
use crate::pus::{ use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
EcssTcReceiver, EcssTmSender, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, use alloc::string::ToString;
PusServiceHandler,
};
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::{scheduling, PusPacket}; use spacepackets::ecss::{scheduling, PusPacket};
use spacepackets::time::cds::TimeProvider; use spacepackets::time::cds::TimeProvider;
use std::boxed::Box;
/// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service) /// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service)
/// packets. This handler is constrained to using the [PusScheduler], but is able to process /// packets. This handler is able to handle the most important PUS requests for a scheduling
/// the most important PUS requests for a scheduling service. /// service which provides the [PusSchedulerInterface].
/// ///
/// Please note that this class does not do the regular periodic handling like releasing any /// Please note that this class does not do the regular periodic handling like releasing any
/// telecommands inside the scheduler. The user can retrieve the wrapped scheduler via the /// telecommands inside the scheduler. The user can retrieve the wrapped scheduler via the
/// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release /// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release
/// telecommands when applicable. /// telecommands when applicable.
pub struct PusService11SchedHandler { pub struct PusService11SchedHandler<
psb: PusServiceBase, TcInMemConverter: EcssTcInMemConverter,
scheduler: PusScheduler, Scheduler: PusSchedulerInterface,
> {
pub service_helper: PusServiceHelper<TcInMemConverter>,
scheduler: Scheduler,
} }
impl PusService11SchedHandler { impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
pub fn new( PusService11SchedHandler<TcInMemConverter, Scheduler>
tc_receiver: Box<dyn EcssTcReceiver>, {
shared_tc_store: SharedPool, pub fn new(service_helper: PusServiceHelper<TcInMemConverter>, scheduler: Scheduler) -> Self {
tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16,
verification_handler: StdVerifReporterWithSender,
scheduler: PusScheduler,
) -> Self {
Self { Self {
psb: PusServiceBase::new( service_helper,
tc_receiver,
shared_tc_store,
tm_sender,
tm_apid,
verification_handler,
),
scheduler, scheduler,
} }
} }
pub fn scheduler_mut(&mut self) -> &mut PusScheduler { pub fn scheduler_mut(&mut self) -> &mut Scheduler {
&mut self.scheduler &mut self.scheduler
} }
pub fn scheduler(&self) -> &PusScheduler { pub fn scheduler(&self) -> &Scheduler {
&self.scheduler &self.scheduler
} }
}
impl PusServiceHandler for PusService11SchedHandler { pub fn handle_one_tc(
fn psb_mut(&mut self) -> &mut PusServiceBase {
&mut self.psb
}
fn psb(&self) -> &PusServiceBase {
&self.psb
}
fn handle_one_tc(
&mut self, &mut self,
addr: StoreAddr, sched_tc_pool: &mut (impl PoolProviderMemInPlace + ?Sized),
token: VerificationToken<TcStateAccepted>,
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { ) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
self.copy_tc_to_buf(addr)?; let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
let (tc, _) = PusTcReader::new(&self.psb.pus_buf)?; if possible_packet.is_none() {
let subservice = tc.subservice(); return Ok(PusPacketHandlerResult::Empty);
let std_service = scheduling::Subservice::try_from(subservice); }
if std_service.is_err() { let ecss_tc_and_token = possible_packet.unwrap();
let tc = self
.service_helper
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let subservice = PusPacket::subservice(&tc);
let standard_subservice = scheduling::Subservice::try_from(subservice);
if standard_subservice.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice( return Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(), subservice,
token, ecss_tc_and_token.token,
)); ));
} }
let mut partial_error = None; let mut partial_error = None;
let time_stamp = self.psb().get_current_timestamp(&mut partial_error); let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
match std_service.unwrap() { match standard_subservice.unwrap() {
scheduling::Subservice::TcEnableScheduling => { scheduling::Subservice::TcEnableScheduling => {
let start_token = self let start_token = self
.psb .service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.start_success(token, Some(&time_stamp)) .start_success(ecss_tc_and_token.token, Some(&time_stamp))
.expect("Error sending start success"); .expect("Error sending start success");
self.scheduler.enable(); self.scheduler.enable();
if self.scheduler.is_enabled() { if self.scheduler.is_enabled() {
self.psb self.service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.completion_success(start_token, Some(&time_stamp)) .completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success"); .expect("Error sending completion success");
} else { } else {
panic!("Failed to enable scheduler"); return Err(PusPacketHandlingError::Other(
"failed to enabled scheduler".to_string(),
));
} }
} }
scheduling::Subservice::TcDisableScheduling => { scheduling::Subservice::TcDisableScheduling => {
let start_token = self let start_token = self
.psb .service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.start_success(token, Some(&time_stamp)) .start_success(ecss_tc_and_token.token, Some(&time_stamp))
.expect("Error sending start success"); .expect("Error sending start success");
self.scheduler.disable(); self.scheduler.disable();
if !self.scheduler.is_enabled() { if !self.scheduler.is_enabled() {
self.psb self.service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.completion_success(start_token, Some(&time_stamp)) .completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success"); .expect("Error sending completion success");
} else { } else {
panic!("Failed to disable scheduler"); return Err(PusPacketHandlingError::Other(
"failed to disable scheduler".to_string(),
));
} }
} }
scheduling::Subservice::TcResetScheduling => { scheduling::Subservice::TcResetScheduling => {
let start_token = self let start_token = self
.psb .service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.start_success(token, Some(&time_stamp)) .start_success(ecss_tc_and_token.token, Some(&time_stamp))
.expect("Error sending start success"); .expect("Error sending start success");
let mut pool = self
.psb
.shared_tc_store
.write()
.expect("Locking pool failed");
self.scheduler self.scheduler
.reset(pool.as_mut()) .reset(sched_tc_pool)
.expect("Error resetting TC Pool"); .expect("Error resetting TC Pool");
self.psb self.service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.completion_success(start_token, Some(&time_stamp)) .completion_success(start_token, Some(&time_stamp))
@ -143,31 +132,30 @@ impl PusServiceHandler for PusService11SchedHandler {
} }
scheduling::Subservice::TcInsertActivity => { scheduling::Subservice::TcInsertActivity => {
let start_token = self let start_token = self
.psb .service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.start_success(token, Some(&time_stamp)) .start_success(ecss_tc_and_token.token, Some(&time_stamp))
.expect("error sending start success"); .expect("error sending start success");
let mut pool = self // let mut pool = self.sched_tc_pool.write().expect("locking pool failed");
.psb
.shared_tc_store
.write()
.expect("locking pool failed");
self.scheduler self.scheduler
.insert_wrapped_tc::<TimeProvider>(&tc, pool.as_mut()) .insert_wrapped_tc::<TimeProvider>(&tc, sched_tc_pool)
.expect("insertion of activity into pool failed"); .expect("insertion of activity into pool failed");
self.psb self.service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.completion_success(start_token, Some(&time_stamp)) .completion_success(start_token, Some(&time_stamp))
.expect("sending completion success failed"); .expect("sending completion success failed");
} }
_ => { _ => {
// Treat unhandled standard subservices as custom subservices for now.
return Ok(PusPacketHandlerResult::CustomSubservice( return Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(), subservice,
token, ecss_tc_and_token.token,
)); ));
} }
} }
@ -176,9 +164,192 @@ impl PusServiceHandler for PusService11SchedHandler {
partial_error, partial_error,
)); ));
} }
Ok(PusPacketHandlerResult::CustomSubservice( Ok(PusPacketHandlerResult::RequestHandled)
tc.subservice(), }
token, }
))
#[cfg(test)]
mod tests {
use crate::pool::{StaticMemoryPool, StaticPoolConfig};
use crate::pus::tests::TEST_APID;
use crate::pus::{
scheduler::{self, PusSchedulerInterface, TcInfo},
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness},
verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter,
};
use alloc::collections::VecDeque;
use delegate::delegate;
use spacepackets::ecss::scheduling::Subservice;
use spacepackets::ecss::tc::PusTcSecondaryHeader;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::time::TimeWriter;
use spacepackets::SpHeader;
use spacepackets::{
ecss::{tc::PusTcCreator, tm::PusTmReader},
time::cds,
};
use super::PusService11SchedHandler;
struct Pus11HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusService11SchedHandler<EcssTcInSharedStoreConverter, TestScheduler>,
sched_tc_pool: StaticMemoryPool,
}
impl Pus11HandlerWithStoreTester {
pub fn new() -> Self {
let test_scheduler = TestScheduler::default();
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)]);
let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
Self {
common,
handler: PusService11SchedHandler::new(srv_handler, test_scheduler),
sched_tc_pool,
}
}
}
impl PusTestHarness for Pus11HandlerWithStoreTester {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
}
}
}
#[derive(Default)]
pub struct TestScheduler {
reset_count: u32,
enabled: bool,
enabled_count: u32,
disabled_count: u32,
inserted_tcs: VecDeque<TcInfo>,
}
impl PusSchedulerInterface for TestScheduler {
type TimeProvider = cds::TimeProvider;
fn reset(
&mut self,
_store: &mut (impl crate::pool::PoolProviderMemInPlace + ?Sized),
) -> Result<(), crate::pool::StoreError> {
self.reset_count += 1;
Ok(())
}
fn is_enabled(&self) -> bool {
self.enabled
}
fn enable(&mut self) {
self.enabled_count += 1;
self.enabled = true;
}
fn disable(&mut self) {
self.disabled_count += 1;
self.enabled = false;
}
fn insert_unwrapped_and_stored_tc(
&mut self,
_time_stamp: spacepackets::time::UnixTimestamp,
info: crate::pus::scheduler::TcInfo,
) -> Result<(), crate::pus::scheduler::ScheduleError> {
self.inserted_tcs.push_back(info);
Ok(())
}
}
fn generic_subservice_send(
test_harness: &mut Pus11HandlerWithStoreTester,
subservice: Subservice,
) {
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8);
let enable_scheduling = PusTcCreator::new(&mut reply_header, tc_header, &[0; 7], true);
let token = test_harness.send_tc(&enable_scheduling);
let request_id = token.req_id();
test_harness
.handler
.handle_one_tc(&mut test_harness.sched_tc_pool)
.unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);
test_harness.check_next_verification_tm(7, request_id);
}
#[test]
fn test_scheduling_enabling_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
test_harness.handler.scheduler_mut().disable();
assert!(!test_harness.handler.scheduler().is_enabled());
generic_subservice_send(&mut test_harness, Subservice::TcEnableScheduling);
assert!(test_harness.handler.scheduler().is_enabled());
assert_eq!(test_harness.handler.scheduler().enabled_count, 1);
}
#[test]
fn test_scheduling_disabling_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
test_harness.handler.scheduler_mut().enable();
assert!(test_harness.handler.scheduler().is_enabled());
generic_subservice_send(&mut test_harness, Subservice::TcDisableScheduling);
assert!(!test_harness.handler.scheduler().is_enabled());
assert_eq!(test_harness.handler.scheduler().disabled_count, 1);
}
#[test]
fn test_reset_scheduler_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
generic_subservice_send(&mut test_harness, Subservice::TcResetScheduling);
assert_eq!(test_harness.handler.scheduler().reset_count, 1);
}
#[test]
fn test_insert_activity_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap();
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new(&mut reply_header, sec_header, &[], true);
let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc);
let stamper = cds::TimeProvider::from_now_with_u16_days().expect("time provider failed");
let mut sched_app_data: [u8; 64] = [0; 64];
let mut written_len = stamper.write_to_bytes(&mut sched_app_data).unwrap();
let ping_raw = ping_tc.to_vec().expect("generating raw tc failed");
sched_app_data[written_len..written_len + ping_raw.len()].copy_from_slice(&ping_raw);
written_len += ping_raw.len();
reply_header = SpHeader::tm_unseg(TEST_APID, 1, 0).unwrap();
sec_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcInsertActivity as u8);
let enable_scheduling = PusTcCreator::new(
&mut reply_header,
sec_header,
&sched_app_data[..written_len],
true,
);
let token = test_harness.send_tc(&enable_scheduling);
let request_id = token.req_id();
test_harness
.handler
.handle_one_tc(&mut test_harness.sched_tc_pool)
.unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);
test_harness.check_next_verification_tm(7, request_id);
let tc_info = test_harness
.handler
.scheduler_mut()
.inserted_tcs
.pop_front()
.unwrap();
assert_eq!(tc_info.request_id(), req_id_ping_tc);
} }
} }

View File

@ -1,67 +1,45 @@
use crate::pool::{SharedPool, StoreAddr};
use crate::pus::verification::{StdVerifReporterWithSender, TcStateAccepted, VerificationToken};
use crate::pus::{ use crate::pus::{
EcssTcReceiver, EcssTmSender, PartialPusHandlingError, PusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmWrapper,
PusPacketHandlingError, PusServiceBase, PusServiceHandler, PusTmWrapper,
}; };
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader}; use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::PusPacket; use spacepackets::ecss::PusPacket;
use spacepackets::SpHeader; use spacepackets::SpHeader;
use std::boxed::Box;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets. /// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
/// This handler only processes ping requests and generates a ping reply for them accordingly. /// This handler only processes ping requests and generates a ping reply for them accordingly.
pub struct PusService17TestHandler { pub struct PusService17TestHandler<TcInMemConverter: EcssTcInMemConverter> {
psb: PusServiceBase, pub service_helper: PusServiceHelper<TcInMemConverter>,
} }
impl PusService17TestHandler { impl<TcInMemConverter: EcssTcInMemConverter> PusService17TestHandler<TcInMemConverter> {
pub fn new( pub fn new(service_helper: PusServiceHelper<TcInMemConverter>) -> Self {
tc_receiver: Box<dyn EcssTcReceiver>, Self { service_helper }
shared_tc_store: SharedPool, }
tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16, pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
verification_handler: StdVerifReporterWithSender, let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
) -> Self { if possible_packet.is_none() {
Self { return Ok(PusPacketHandlerResult::Empty);
psb: PusServiceBase::new(
tc_receiver,
shared_tc_store,
tm_sender,
tm_apid,
verification_handler,
),
} }
} let ecss_tc_and_token = possible_packet.unwrap();
} let tc = self
.service_helper
impl PusServiceHandler for PusService17TestHandler { .tc_in_mem_converter
fn psb_mut(&mut self) -> &mut PusServiceBase { .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
&mut self.psb
}
fn psb(&self) -> &PusServiceBase {
&self.psb
}
fn handle_one_tc(
&mut self,
addr: StoreAddr,
token: VerificationToken<TcStateAccepted>,
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
self.copy_tc_to_buf(addr)?;
let (tc, _) = PusTcReader::new(&self.psb.pus_buf)?;
if tc.service() != 17 { if tc.service() != 17 {
return Err(PusPacketHandlingError::WrongService(tc.service())); return Err(PusPacketHandlingError::WrongService(tc.service()));
} }
if tc.subservice() == 1 { if tc.subservice() == 1 {
let mut partial_error = None; let mut partial_error = None;
let time_stamp = self.psb().get_current_timestamp(&mut partial_error); let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
let result = self let result = self
.psb .service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.start_success(token, Some(&time_stamp)) .start_success(ecss_tc_and_token.token, Some(&time_stamp))
.map_err(|_| PartialPusHandlingError::Verification); .map_err(|_| PartialPusHandlingError::Verification);
let start_token = if let Ok(result) = result { let start_token = if let Ok(result) = result {
Some(result) Some(result)
@ -70,11 +48,13 @@ impl PusServiceHandler for PusService17TestHandler {
None None
}; };
// Sequence count will be handled centrally in TM funnel. // Sequence count will be handled centrally in TM funnel.
let mut reply_header = SpHeader::tm_unseg(self.psb.tm_apid, 0, 0).unwrap(); let mut reply_header =
SpHeader::tm_unseg(self.service_helper.common.tm_apid, 0, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp); let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp);
let ping_reply = PusTmCreator::new(&mut reply_header, tc_header, &[], true); let ping_reply = PusTmCreator::new(&mut reply_header, tc_header, &[], true);
let result = self let result = self
.psb .service_helper
.common
.tm_sender .tm_sender
.send_tm(PusTmWrapper::Direct(ping_reply)) .send_tm(PusTmWrapper::Direct(ping_reply))
.map_err(PartialPusHandlingError::TmSend); .map_err(PartialPusHandlingError::TmSend);
@ -84,7 +64,8 @@ impl PusServiceHandler for PusService17TestHandler {
if let Some(start_token) = start_token { if let Some(start_token) = start_token {
if self if self
.psb .service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.completion_success(start_token, Some(&time_stamp)) .completion_success(start_token, Some(&time_stamp))
@ -98,121 +79,194 @@ impl PusServiceHandler for PusService17TestHandler {
partial_error, partial_error,
)); ));
}; };
return Ok(PusPacketHandlerResult::RequestHandled); } else {
return Ok(PusPacketHandlerResult::CustomSubservice(
tc.subservice(),
ecss_tc_and_token.token,
));
} }
Ok(PusPacketHandlerResult::CustomSubservice( Ok(PusPacketHandlerResult::RequestHandled)
tc.subservice(),
token,
))
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::pool::{LocalPool, PoolCfg, SharedPool}; use crate::pus::tests::{
use crate::pus::test::PusService17TestHandler; PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon, PusTestHarness,
use crate::pus::verification::{ SimplePusPacketHandler, TEST_APID,
RequestId, StdVerifReporterWithSender, VerificationReporterCfg,
}; };
use crate::pus::{MpscTcInStoreReceiver, MpscTmInStoreSender, PusServiceHandler}; use crate::pus::verification::RequestId;
use crate::tmtc::tm_helper::SharedTmStore; use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{
EcssTcInSharedStoreConverter, EcssTcInVecConverter, PusPacketHandlerResult,
PusPacketHandlingError,
};
use delegate::delegate;
use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader}; use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader};
use spacepackets::ecss::tm::PusTmReader; use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::{PusPacket, WritablePusPacket}; use spacepackets::ecss::PusPacket;
use spacepackets::{SequenceFlags, SpHeader}; use spacepackets::{SequenceFlags, SpHeader};
use std::boxed::Box;
use std::sync::{mpsc, RwLock};
use std::vec;
const TEST_APID: u16 = 0x101; use super::PusService17TestHandler;
#[test] struct Pus17HandlerWithStoreTester {
fn test_basic_ping_processing() { common: PusServiceHandlerWithSharedStoreCommon,
let mut pus_buf: [u8; 64] = [0; 64]; handler: PusService17TestHandler<EcssTcInSharedStoreConverter>,
let pool_cfg = PoolCfg::new(vec![(16, 16), (8, 32), (4, 64)]); }
let tc_pool = LocalPool::new(pool_cfg.clone());
let tm_pool = LocalPool::new(pool_cfg); impl Pus17HandlerWithStoreTester {
let tc_pool_shared = SharedPool::new(RwLock::new(Box::new(tc_pool))); pub fn new() -> Self {
let shared_tm_store = SharedTmStore::new(Box::new(tm_pool)); let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
let tm_pool_shared = shared_tm_store.clone_backing_pool(); let pus_17_handler = PusService17TestHandler::new(srv_handler);
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel(); Self {
let (tm_tx, tm_rx) = mpsc::channel(); common,
let verif_sender = handler: pus_17_handler,
MpscTmInStoreSender::new(0, "verif_sender", shared_tm_store.clone(), tm_tx.clone()); }
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); }
let mut verification_handler = }
StdVerifReporterWithSender::new(&verif_cfg, Box::new(verif_sender));
let test_srv_tm_sender = MpscTmInStoreSender::new(0, "TEST_SENDER", shared_tm_store, tm_tx); impl PusTestHarness for Pus17HandlerWithStoreTester {
let test_srv_tc_receiver = MpscTcInStoreReceiver::new(0, "TEST_RECEIVER", test_srv_tc_rx); delegate! {
let mut pus_17_handler = PusService17TestHandler::new( to self.common {
Box::new(test_srv_tc_receiver), fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
tc_pool_shared.clone(), fn read_next_tm(&mut self) -> PusTmReader<'_>;
Box::new(test_srv_tm_sender), fn check_no_tm_available(&self) -> bool;
TEST_APID, fn check_next_verification_tm(
verification_handler.clone(), &self,
); subservice: u8,
expected_request_id: RequestId
);
}
}
}
impl SimplePusPacketHandler for Pus17HandlerWithStoreTester {
delegate! {
to self.handler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
}
}
}
struct Pus17HandlerWithVecTester {
common: PusServiceHandlerWithVecCommon,
handler: PusService17TestHandler<EcssTcInVecConverter>,
}
impl Pus17HandlerWithVecTester {
pub fn new() -> Self {
let (common, srv_handler) = PusServiceHandlerWithVecCommon::new();
Self {
common,
handler: PusService17TestHandler::new(srv_handler),
}
}
}
impl PusTestHarness for Pus17HandlerWithVecTester {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(
&self,
subservice: u8,
expected_request_id: RequestId,
);
}
}
}
impl SimplePusPacketHandler for Pus17HandlerWithVecTester {
delegate! {
to self.handler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
}
}
}
fn ping_test(test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler)) {
// Create a ping TC, verify acceptance. // Create a ping TC, verify acceptance.
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(17, 1); let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
let token = verification_handler.add_tc(&ping_tc); let token = test_harness.send_tc(&ping_tc);
let token = verification_handler let request_id = token.req_id();
.acceptance_success(token, None) let result = test_harness.handle_one_tc();
.unwrap();
let tc_size = ping_tc.write_to_bytes(&mut pus_buf).unwrap();
let mut tc_pool = tc_pool_shared.write().unwrap();
let addr = tc_pool.add(&pus_buf[..tc_size]).unwrap();
drop(tc_pool);
// Send accepted TC to test service handler.
test_srv_tc_tx.send((addr, token.into())).unwrap();
let result = pus_17_handler.handle_next_packet();
assert!(result.is_ok()); assert!(result.is_ok());
// We should see 4 replies in the TM queue now: Acceptance TM, Start TM, ping reply and // We should see 4 replies in the TM queue now: Acceptance TM, Start TM, ping reply and
// Completion TM // Completion TM
let mut next_msg = tm_rx.try_recv();
assert!(next_msg.is_ok());
let mut tm_addr = next_msg.unwrap();
let tm_pool = tm_pool_shared.read().unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
let (tm, _) = PusTmReader::new(&tm_raw, 0).unwrap();
assert_eq!(tm.service(), 1);
assert_eq!(tm.subservice(), 1);
let req_id = RequestId::from_bytes(tm.user_data()).expect("generating request ID failed");
assert_eq!(req_id, token.req_id());
// Acceptance TM // Acceptance TM
next_msg = tm_rx.try_recv(); test_harness.check_next_verification_tm(1, request_id);
assert!(next_msg.is_ok());
tm_addr = next_msg.unwrap(); // Start TM
let tm_raw = tm_pool.read(&tm_addr).unwrap(); test_harness.check_next_verification_tm(3, request_id);
// Is generated with CDS short timestamp.
let (tm, _) = PusTmReader::new(&tm_raw, 7).unwrap();
assert_eq!(tm.service(), 1);
assert_eq!(tm.subservice(), 3);
let req_id = RequestId::from_bytes(tm.user_data()).expect("generating request ID failed");
assert_eq!(req_id, token.req_id());
// Ping reply // Ping reply
next_msg = tm_rx.try_recv(); let tm = test_harness.read_next_tm();
assert!(next_msg.is_ok());
tm_addr = next_msg.unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
// Is generated with CDS short timestamp.
let (tm, _) = PusTmReader::new(&tm_raw, 7).unwrap();
assert_eq!(tm.service(), 17); assert_eq!(tm.service(), 17);
assert_eq!(tm.subservice(), 2); assert_eq!(tm.subservice(), 2);
assert!(tm.user_data().is_empty()); assert!(tm.user_data().is_empty());
// TM completion // TM completion
next_msg = tm_rx.try_recv(); test_harness.check_next_verification_tm(7, request_id);
assert!(next_msg.is_ok()); }
tm_addr = next_msg.unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap(); #[test]
// Is generated with CDS short timestamp. fn test_basic_ping_processing_using_store() {
let (tm, _) = PusTmReader::new(&tm_raw, 7).unwrap(); let mut test_harness = Pus17HandlerWithStoreTester::new();
assert_eq!(tm.service(), 1); ping_test(&mut test_harness);
assert_eq!(tm.subservice(), 7); }
let req_id = RequestId::from_bytes(tm.user_data()).expect("generating request ID failed");
assert_eq!(req_id, token.req_id()); #[test]
fn test_basic_ping_processing_using_vec() {
let mut test_harness = Pus17HandlerWithVecTester::new();
ping_test(&mut test_harness);
}
#[test]
fn test_empty_tc_queue() {
let mut test_harness = Pus17HandlerWithStoreTester::new();
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::Empty = result {
} else {
panic!("unexpected result type {result:?}")
}
}
#[test]
fn test_sending_unsupported_service() {
let mut test_harness = Pus17HandlerWithStoreTester::new();
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(3, 1);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
test_harness.send_tc(&ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_err());
let error = result.unwrap_err();
if let PusPacketHandlingError::WrongService(num) = error {
assert_eq!(num, 3);
} else {
panic!("unexpected error type {error}")
}
}
#[test]
fn test_sending_custom_subservice() {
let mut test_harness = Pus17HandlerWithStoreTester::new();
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(17, 200);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
test_harness.send_tc(&ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result {
assert_eq!(subservice, 200);
} else {
panic!("unexpected result type {result:?}")
}
} }
} }

View File

@ -15,7 +15,7 @@
//! ``` //! ```
//! use std::sync::{Arc, mpsc, RwLock}; //! use std::sync::{Arc, mpsc, RwLock};
//! use std::time::Duration; //! use std::time::Duration;
//! use satrs_core::pool::{LocalPool, PoolCfg, PoolProvider, SharedPool}; //! use satrs_core::pool::{PoolProviderMemInPlaceWithGuards, StaticMemoryPool, StaticPoolConfig};
//! use satrs_core::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender}; //! use satrs_core::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender};
//! use satrs_core::seq_count::SeqCountProviderSimple; //! use satrs_core::seq_count::SeqCountProviderSimple;
//! use satrs_core::pus::MpscTmInStoreSender; //! use satrs_core::pus::MpscTmInStoreSender;
@ -28,9 +28,9 @@
//! const EMPTY_STAMP: [u8; 7] = [0; 7]; //! const EMPTY_STAMP: [u8; 7] = [0; 7];
//! const TEST_APID: u16 = 0x02; //! const TEST_APID: u16 = 0x02;
//! //!
//! let pool_cfg = PoolCfg::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]); //! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
//! let tm_pool = LocalPool::new(pool_cfg.clone()); //! let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
//! let shared_tm_store = SharedTmStore::new(Box::new(tm_pool)); //! let shared_tm_store = SharedTmStore::new(tm_pool);
//! let tm_store = shared_tm_store.clone_backing_pool(); //! let tm_store = shared_tm_store.clone_backing_pool();
//! let (verif_tx, verif_rx) = mpsc::channel(); //! let (verif_tx, verif_rx) = mpsc::channel();
//! let sender = MpscTmInStoreSender::new(0, "Test Sender", shared_tm_store, verif_tx); //! let sender = MpscTmInStoreSender::new(0, "Test Sender", shared_tm_store, verif_tx);
@ -208,6 +208,8 @@ impl WasAtLeastAccepted for TcStateAccepted {}
impl WasAtLeastAccepted for TcStateStarted {} impl WasAtLeastAccepted for TcStateStarted {}
impl WasAtLeastAccepted for TcStateCompleted {} impl WasAtLeastAccepted for TcStateCompleted {}
/// Token wrapper to model all possible verification tokens. These tokens are used to
/// enforce the correct order for the verification steps when doing verification reporting.
#[derive(Debug, Copy, Clone, Eq, PartialEq)] #[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum TcStateToken { pub enum TcStateToken {
None(VerificationToken<TcStateNone>), None(VerificationToken<TcStateNone>),
@ -1323,7 +1325,7 @@ mod std_mod {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::pool::{LocalPool, PoolCfg}; use crate::pool::{PoolProviderMemInPlaceWithGuards, StaticMemoryPool, StaticPoolConfig};
use crate::pus::tests::CommonTmInfo; use crate::pus::tests::CommonTmInfo;
use crate::pus::verification::{ use crate::pus::verification::{
EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone, EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone,
@ -1447,12 +1449,11 @@ mod tests {
fn base_init(api_sel: bool) -> (TestBase<'static>, VerificationToken<TcStateNone>) { fn base_init(api_sel: bool) -> (TestBase<'static>, VerificationToken<TcStateNone>) {
let mut reporter = base_reporter(); let mut reporter = base_reporter();
let (tc, req_id) = base_tc_init(None); let (tc, req_id) = base_tc_init(None);
let init_tok; let init_tok = if api_sel {
if api_sel { reporter.add_tc_with_req_id(req_id)
init_tok = reporter.add_tc_with_req_id(req_id);
} else { } else {
init_tok = reporter.add_tc(&tc); reporter.add_tc(&tc)
} };
(TestBase { vr: reporter, tc }, init_tok) (TestBase { vr: reporter, tc }, init_tok)
} }
@ -1475,7 +1476,7 @@ mod tests {
time_stamp: EMPTY_STAMP, time_stamp: EMPTY_STAMP,
}, },
additional_data: None, additional_data: None,
req_id: req_id.clone(), req_id: *req_id,
}; };
let mut service_queue = sender.service_queue.borrow_mut(); let mut service_queue = sender.service_queue.borrow_mut();
assert_eq!(service_queue.len(), 1); assert_eq!(service_queue.len(), 1);
@ -1485,9 +1486,8 @@ mod tests {
#[test] #[test]
fn test_mpsc_verif_send_sync() { fn test_mpsc_verif_send_sync() {
let pool = LocalPool::new(PoolCfg::new(vec![(8, 8)])); let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)]));
let tm_store = Box::new(pool); let shared_tm_store = SharedTmStore::new(pool);
let shared_tm_store = SharedTmStore::new(tm_store);
let (tx, _) = mpsc::channel(); let (tx, _) = mpsc::channel();
let mpsc_verif_sender = MpscTmInStoreSender::new(0, "verif_sender", shared_tm_store, tx); let mpsc_verif_sender = MpscTmInStoreSender::new(0, "verif_sender", shared_tm_store, tx);
is_send(&mpsc_verif_sender); is_send(&mpsc_verif_sender);
@ -1505,7 +1505,7 @@ mod tests {
fn test_basic_acceptance_success() { fn test_basic_acceptance_success() {
let (b, tok) = base_init(false); let (b, tok) = base_init(false);
let mut sender = TestSender::default(); let mut sender = TestSender::default();
b.vr.acceptance_success(tok, &mut sender, Some(&EMPTY_STAMP)) b.vr.acceptance_success(tok, &sender, Some(&EMPTY_STAMP))
.expect("Sending acceptance success failed"); .expect("Sending acceptance success failed");
acceptance_check(&mut sender, &tok.req_id); acceptance_check(&mut sender, &tok.req_id);
} }
@ -1605,7 +1605,7 @@ mod tests {
#[test] #[test]
fn test_basic_acceptance_failure_with_fail_data() { fn test_basic_acceptance_failure_with_fail_data() {
let (b, tok) = base_init(false); let (b, tok) = base_init(false);
let mut sender = TestSender::default(); let sender = TestSender::default();
let fail_code = EcssEnumU8::new(10); let fail_code = EcssEnumU8::new(10);
let fail_data = EcssEnumU32::new(12); let fail_data = EcssEnumU32::new(12);
let mut fail_data_raw = [0; 4]; let mut fail_data_raw = [0; 4];
@ -1615,7 +1615,7 @@ mod tests {
&fail_code, &fail_code,
Some(fail_data_raw.as_slice()), Some(fail_data_raw.as_slice()),
); );
b.vr.acceptance_failure(tok, &mut sender, fail_params) b.vr.acceptance_failure(tok, &sender, fail_params)
.expect("Sending acceptance success failed"); .expect("Sending acceptance success failed");
let cmp_info = TmInfo { let cmp_info = TmInfo {
common: CommonTmInfo { common: CommonTmInfo {
@ -1784,8 +1784,7 @@ mod tests {
.rep() .rep()
.start_success(accepted_token, &mut sender, Some(&[0, 1, 0, 1, 0, 1, 0])) .start_success(accepted_token, &mut sender, Some(&[0, 1, 0, 1, 0, 1, 0]))
.expect("Sending start success failed"); .expect("Sending start success failed");
let mut empty = b b.rep()
.rep()
.step_success( .step_success(
&started_token, &started_token,
&mut sender, &mut sender,
@ -1793,16 +1792,13 @@ mod tests {
EcssEnumU8::new(0), EcssEnumU8::new(0),
) )
.expect("Sending step 0 success failed"); .expect("Sending step 0 success failed");
assert_eq!(empty, ()); b.vr.step_success(
empty = &started_token,
b.vr.step_success( &mut sender,
&started_token, Some(&EMPTY_STAMP),
&mut sender, EcssEnumU8::new(1),
Some(&EMPTY_STAMP), )
EcssEnumU8::new(1), .expect("Sending step 1 success failed");
)
.expect("Sending step 1 success failed");
assert_eq!(empty, ());
assert_eq!(sender.service_queue.borrow().len(), 4); assert_eq!(sender.service_queue.borrow().len(), 4);
step_success_check(&mut sender, tok.req_id); step_success_check(&mut sender, tok.req_id);
} }
@ -1818,16 +1814,12 @@ mod tests {
.helper .helper
.start_success(accepted_token, Some(&[0, 1, 0, 1, 0, 1, 0])) .start_success(accepted_token, Some(&[0, 1, 0, 1, 0, 1, 0]))
.expect("Sending start success failed"); .expect("Sending start success failed");
let mut empty = b b.helper
.helper
.step_success(&started_token, Some(&EMPTY_STAMP), EcssEnumU8::new(0)) .step_success(&started_token, Some(&EMPTY_STAMP), EcssEnumU8::new(0))
.expect("Sending step 0 success failed"); .expect("Sending step 0 success failed");
assert_eq!(empty, ()); b.helper
empty = b
.helper
.step_success(&started_token, Some(&EMPTY_STAMP), EcssEnumU8::new(1)) .step_success(&started_token, Some(&EMPTY_STAMP), EcssEnumU8::new(1))
.expect("Sending step 1 success failed"); .expect("Sending step 1 success failed");
assert_eq!(empty, ());
let sender: &mut TestSender = b.helper.sender.downcast_mut().unwrap(); let sender: &mut TestSender = b.helper.sender.downcast_mut().unwrap();
assert_eq!(sender.service_queue.borrow().len(), 4); assert_eq!(sender.service_queue.borrow().len(), 4);
step_success_check(sender, tok.req_id); step_success_check(sender, tok.req_id);
@ -2122,10 +2114,8 @@ mod tests {
let started_token = let started_token =
b.vr.start_success(accepted_token, &mut sender, Some(&[0, 1, 0, 1, 0, 1, 0])) b.vr.start_success(accepted_token, &mut sender, Some(&[0, 1, 0, 1, 0, 1, 0]))
.expect("Sending start success failed"); .expect("Sending start success failed");
let empty = b.vr.completion_success(started_token, &mut sender, Some(&EMPTY_STAMP))
b.vr.completion_success(started_token, &mut sender, Some(&EMPTY_STAMP)) .expect("Sending completion success failed");
.expect("Sending completion success failed");
assert_eq!(empty, ());
completion_success_check(&mut sender, tok.req_id); completion_success_check(&mut sender, tok.req_id);
} }
@ -2140,11 +2130,9 @@ mod tests {
.helper .helper
.start_success(accepted_token, Some(&[0, 1, 0, 1, 0, 1, 0])) .start_success(accepted_token, Some(&[0, 1, 0, 1, 0, 1, 0]))
.expect("Sending start success failed"); .expect("Sending start success failed");
let empty = b b.helper
.helper
.completion_success(started_token, Some(&EMPTY_STAMP)) .completion_success(started_token, Some(&EMPTY_STAMP))
.expect("Sending completion success failed"); .expect("Sending completion success failed");
assert_eq!(empty, ());
let sender: &mut TestSender = b.helper.sender.downcast_mut().unwrap(); let sender: &mut TestSender = b.helper.sender.downcast_mut().unwrap();
completion_success_check(sender, tok.req_id); completion_success_check(sender, tok.req_id);
} }
@ -2152,8 +2140,8 @@ mod tests {
#[test] #[test]
// TODO: maybe a bit more extensive testing, all I have time for right now // TODO: maybe a bit more extensive testing, all I have time for right now
fn test_seq_count_increment() { fn test_seq_count_increment() {
let pool_cfg = PoolCfg::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]); let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
let tm_pool = Box::new(LocalPool::new(pool_cfg.clone())); let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
let shared_tm_store = SharedTmStore::new(tm_pool); let shared_tm_store = SharedTmStore::new(tm_pool);
let shared_tm_pool = shared_tm_store.clone_backing_pool(); let shared_tm_pool = shared_tm_store.clone_backing_pool();
let (verif_tx, verif_rx) = mpsc::channel(); let (verif_tx, verif_rx) = mpsc::channel();

View File

@ -8,7 +8,9 @@ pub use std_mod::*;
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub mod std_mod { pub mod std_mod {
use crate::pool::{ShareablePoolProvider, SharedPool, StoreAddr}; use crate::pool::{
PoolProviderMemInPlace, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr,
};
use crate::pus::EcssTmtcError; use crate::pus::EcssTmtcError;
use spacepackets::ecss::tm::PusTmCreator; use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::WritablePusPacket; use spacepackets::ecss::WritablePusPacket;
@ -16,22 +18,25 @@ pub mod std_mod {
#[derive(Clone)] #[derive(Clone)]
pub struct SharedTmStore { pub struct SharedTmStore {
pool: SharedPool, pub shared_pool: SharedStaticMemoryPool,
} }
impl SharedTmStore { impl SharedTmStore {
pub fn new(backing_pool: ShareablePoolProvider) -> Self { pub fn new(shared_pool: StaticMemoryPool) -> Self {
Self { Self {
pool: Arc::new(RwLock::new(backing_pool)), shared_pool: Arc::new(RwLock::new(shared_pool)),
} }
} }
pub fn clone_backing_pool(&self) -> SharedPool { pub fn clone_backing_pool(&self) -> SharedStaticMemoryPool {
self.pool.clone() self.shared_pool.clone()
} }
pub fn add_pus_tm(&self, pus_tm: &PusTmCreator) -> Result<StoreAddr, EcssTmtcError> { pub fn add_pus_tm(&self, pus_tm: &PusTmCreator) -> Result<StoreAddr, EcssTmtcError> {
let mut pg = self.pool.write().map_err(|_| EcssTmtcError::StoreLock)?; let mut pg = self
.shared_pool
.write()
.map_err(|_| EcssTmtcError::StoreLock)?;
let (addr, buf) = pg.free_element(pus_tm.len_written())?; let (addr, buf) = pg.free_element(pus_tm.len_written())?;
pus_tm pus_tm
.write_to_bytes(buf) .write_to_bytes(buf)
@ -91,3 +96,33 @@ impl PusTmWithCdsShortHelper {
PusTmCreator::new(&mut reply_header, tc_header, source_data, true) PusTmCreator::new(&mut reply_header, tc_header, source_data, true)
} }
} }
#[cfg(test)]
mod tests {
use spacepackets::{ecss::PusPacket, time::cds::TimeProvider, CcsdsPacket};
use super::PusTmWithCdsShortHelper;
#[test]
fn test_helper_with_stamper() {
let mut pus_tm_helper = PusTmWithCdsShortHelper::new(0x123);
let stamper = TimeProvider::new_with_u16_days(0, 0);
let tm = pus_tm_helper.create_pus_tm_with_stamper(17, 1, &[1, 2, 3, 4], &stamper, 25);
assert_eq!(tm.service(), 17);
assert_eq!(tm.subservice(), 1);
assert_eq!(tm.user_data(), &[1, 2, 3, 4]);
assert_eq!(tm.seq_count(), 25);
assert_eq!(tm.timestamp(), [64, 0, 0, 0, 0, 0, 0])
}
#[test]
fn test_helper_from_now() {
let mut pus_tm_helper = PusTmWithCdsShortHelper::new(0x123);
let tm = pus_tm_helper.create_pus_tm_timestamp_now(17, 1, &[1, 2, 3, 4], 25);
assert_eq!(tm.service(), 17);
assert_eq!(tm.subservice(), 1);
assert_eq!(tm.user_data(), &[1, 2, 3, 4]);
assert_eq!(tm.seq_count(), 25);
assert_eq!(tm.timestamp().len(), 7);
}
}

View File

@ -1,4 +1,6 @@
use satrs_core::pool::{LocalPool, PoolCfg, PoolGuard, PoolProvider, StoreAddr}; use satrs_core::pool::{
PoolGuard, PoolProviderMemInPlace, StaticMemoryPool, StaticPoolConfig, StoreAddr,
};
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::mpsc; use std::sync::mpsc;
use std::sync::mpsc::{Receiver, Sender}; use std::sync::mpsc::{Receiver, Sender};
@ -9,8 +11,8 @@ const DUMMY_DATA: [u8; 4] = [0, 1, 2, 3];
#[test] #[test]
fn threaded_usage() { fn threaded_usage() {
let pool_cfg = PoolCfg::new(vec![(16, 6), (32, 3), (8, 12)]); let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)]);
let shared_pool = Arc::new(RwLock::new(LocalPool::new(pool_cfg))); let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_clone = shared_pool.clone(); let shared_clone = shared_pool.clone();
let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel(); let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel();
let jh0 = thread::spawn(move || { let jh0 = thread::spawn(move || {

View File

@ -1,7 +1,10 @@
#[cfg(feature = "crossbeam")] #[cfg(feature = "crossbeam")]
pub mod crossbeam_test { pub mod crossbeam_test {
use hashbrown::HashMap; use hashbrown::HashMap;
use satrs_core::pool::{LocalPool, PoolCfg, PoolProvider}; use satrs_core::pool::{
PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, StaticMemoryPool,
StaticPoolConfig,
};
use satrs_core::pus::verification::{ use satrs_core::pus::verification::{
FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender, FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender,
}; };
@ -33,9 +36,9 @@ pub mod crossbeam_test {
// each reporter have an own sequence count provider. // each reporter have an own sequence count provider.
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
// Shared pool object to store the verification PUS telemetry // Shared pool object to store the verification PUS telemetry
let pool_cfg = PoolCfg::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]); let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
let shared_tm_store = SharedTmStore::new(Box::new(LocalPool::new(pool_cfg.clone()))); let shared_tm_store = SharedTmStore::new(StaticMemoryPool::new(pool_cfg.clone()));
let shared_tc_pool_0 = Arc::new(RwLock::new(LocalPool::new(pool_cfg))); let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_tc_pool_1 = shared_tc_pool_0.clone(); let shared_tc_pool_1 = shared_tc_pool_0.clone();
let (tx, rx) = crossbeam_channel::bounded(10); let (tx, rx) = crossbeam_channel::bounded(10);
let sender = let sender =

View File

@ -6,3 +6,4 @@ __pycache__
!/.idea/runConfigurations !/.idea/runConfigurations
/seqcnt.txt /seqcnt.txt
/.tmtc-history.txt

View File

@ -44,10 +44,6 @@ class AcsHkIds(enum.IntEnum):
MGM_SET = 1 MGM_SET = 1
class HkOpCodes:
GENERATE_ONE_SHOT = ["0", "oneshot"]
def make_addressable_id(target_id: int, unique_id: int) -> bytes: def make_addressable_id(target_id: int, unique_id: int) -> bytes:
byte_string = bytearray(struct.pack("!I", target_id)) byte_string = bytearray(struct.pack("!I", target_id))
byte_string.extend(struct.pack("!I", unique_id)) byte_string.extend(struct.pack("!I", unique_id))

View File

@ -4,6 +4,8 @@ import logging
import sys import sys
import time import time
from typing import Optional from typing import Optional
from prompt_toolkit.history import History
from prompt_toolkit.history import FileHistory
import tmtccmd import tmtccmd
from spacepackets.ecss import PusTelemetry, PusVerificator from spacepackets.ecss import PusTelemetry, PusVerificator
@ -11,16 +13,16 @@ from spacepackets.ecss.pus_17_test import Service17Tm
from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm
from spacepackets.ccsds.time import CdsShortTimestamp from spacepackets.ccsds.time import CdsShortTimestamp
from tmtccmd import CcsdsTmtcBackend, TcHandlerBase, ProcedureParamsWrapper from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest from tmtccmd.core.base import BackendRequest
from tmtccmd.pus import VerificationWrapper from tmtccmd.pus import VerificationWrapper
from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.com import ComInterface from tmtccmd.com import ComInterface
from tmtccmd.config import ( from tmtccmd.config import (
CmdTreeNode,
default_json_path, default_json_path,
SetupParams, SetupParams,
HookBase, HookBase,
TmtcDefinitionWrapper,
params_to_procedure_conversion, params_to_procedure_conversion,
) )
from tmtccmd.config import PreArgsParsingWrapper, SetupWrapper from tmtccmd.config import PreArgsParsingWrapper, SetupWrapper
@ -39,12 +41,11 @@ from tmtccmd.tmtc import (
DefaultPusQueueHelper, DefaultPusQueueHelper,
QueueWrapper, QueueWrapper,
) )
from tmtccmd.util import FileSeqCountProvider, PusFileSeqCountProvider from spacepackets.seqcount import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.util.obj_id import ObjectIdDictT from tmtccmd.util.obj_id import ObjectIdDictT
import pus_tc import pus_tc
import tc_definitions
from common import EXAMPLE_PUS_APID, TM_PACKET_IDS, EventU32 from common import EXAMPLE_PUS_APID, TM_PACKET_IDS, EventU32
_LOGGER = logging.getLogger() _LOGGER = logging.getLogger()
@ -54,25 +55,29 @@ class SatRsConfigHook(HookBase):
def __init__(self, json_cfg_path: str): def __init__(self, json_cfg_path: str):
super().__init__(json_cfg_path=json_cfg_path) super().__init__(json_cfg_path=json_cfg_path)
def assign_communication_interface(self, com_if_key: str) -> Optional[ComInterface]: def get_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com import ( from tmtccmd.config.com import (
create_com_interface_default, create_com_interface_default,
create_com_interface_cfg_default, create_com_interface_cfg_default,
) )
assert self.cfg_path is not None
cfg = create_com_interface_cfg_default( cfg = create_com_interface_cfg_default(
com_if_key=com_if_key, com_if_key=com_if_key,
json_cfg_path=self.cfg_path, json_cfg_path=self.cfg_path,
space_packet_ids=TM_PACKET_IDS, space_packet_ids=TM_PACKET_IDS,
) )
assert cfg is not None
return create_com_interface_default(cfg) return create_com_interface_default(cfg)
def get_tmtc_definitions(self) -> TmtcDefinitionWrapper: def get_command_definitions(self) -> CmdTreeNode:
return tc_definitions.tc_definitions() """This function should return the root node of the command definition tree."""
return pus_tc.create_cmd_definition_tree()
def perform_mode_operation(self, tmtc_backend: CcsdsTmtcBackend, mode: int): def get_cmd_history(self) -> Optional[History]:
_LOGGER.info("Mode operation hook was called") """Optionlly return a history class for the past command paths which will be used
pass when prompting a command path from the user in CLI mode."""
return FileHistory(".tmtc-history.txt")
def get_object_ids(self) -> ObjectIdDictT: def get_object_ids(self) -> ObjectIdDictT:
from tmtccmd.config.objects import get_core_object_ids from tmtccmd.config.objects import get_core_object_ids
@ -94,15 +99,12 @@ class PusHandler(SpecificApidHandlerBase):
def handle_tm(self, packet: bytes, _user_args: any): def handle_tm(self, packet: bytes, _user_args: any):
try: try:
tm_packet = PusTelemetry.unpack( pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
packet, time_reader=CdsShortTimestamp.empty()
)
except ValueError as e: except ValueError as e:
_LOGGER.warning("Could not generate PUS TM object from raw data") _LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}") _LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
raise e raise e
service = tm_packet.service service = pus_tm.service
dedicated_handler = False
if service == 1: if service == 1:
tm_packet = Service1Tm.unpack( tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(CdsShortTimestamp.empty(), 1, 2) data=packet, params=UnpackParams(CdsShortTimestamp.empty(), 1, 2)
@ -119,8 +121,7 @@ class PusHandler(SpecificApidHandlerBase):
else: else:
self.verif_wrapper.log_to_console(tm_packet, res) self.verif_wrapper.log_to_console(tm_packet, res)
self.verif_wrapper.log_to_file(tm_packet, res) self.verif_wrapper.log_to_file(tm_packet, res)
dedicated_handler = True elif service == 3:
if service == 3:
_LOGGER.info("No handling for HK packets implemented") _LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]") _LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty()) pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
@ -129,8 +130,7 @@ class PusHandler(SpecificApidHandlerBase):
raise ValueError("No addressable ID in HK packet") raise ValueError("No addressable ID in HK packet")
json_str = pus_tm.source_data[8:] json_str = pus_tm.source_data[8:]
_LOGGER.info(json_str) _LOGGER.info(json_str)
dedicated_handler = True elif service == 5:
if service == 5:
tm_packet = PusTelemetry.unpack( tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty() packet, time_reader=CdsShortTimestamp.empty()
) )
@ -139,11 +139,10 @@ class PusHandler(SpecificApidHandlerBase):
_LOGGER.info(f"Received event packet. Event: {event_u32}") _LOGGER.info(f"Received event packet. Event: {event_u32}")
if event_u32.group_id == 0 and event_u32.unique_id == 0: if event_u32.group_id == 0 and event_u32.unique_id == 0:
_LOGGER.info("Received test event") _LOGGER.info("Received test event")
if service == 17: elif service == 17:
tm_packet = Service17Tm.unpack( tm_packet = Service17Tm.unpack(
packet, time_reader=CdsShortTimestamp.empty() packet, time_reader=CdsShortTimestamp.empty()
) )
dedicated_handler = True
if tm_packet.subservice == 2: if tm_packet.subservice == 2:
self.file_logger.info("Received Ping Reply TM[17,2]") self.file_logger.info("Received Ping Reply TM[17,2]")
_LOGGER.info("Received Ping Reply TM[17,2]") _LOGGER.info("Received Ping Reply TM[17,2]")
@ -154,17 +153,14 @@ class PusHandler(SpecificApidHandlerBase):
_LOGGER.info( _LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}" f"Received Test Packet with unknown subservice {tm_packet.subservice}"
) )
if tm_packet is None: else:
_LOGGER.info( _LOGGER.info(
f"The service {service} is not implemented in Telemetry Factory" f"The service {service} is not implemented in Telemetry Factory"
) )
tm_packet = PusTelemetry.unpack( tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty() packet, time_reader=CdsShortTimestamp.empty()
) )
self.raw_logger.log_tm(tm_packet) self.raw_logger.log_tm(pus_tm)
if not dedicated_handler and tm_packet is not None:
pass
# self.printer.handle_long_tm_print(packet_if=tm_packet, info_if=tm_packet)
class TcHandler(TcHandlerBase): class TcHandler(TcHandlerBase):
@ -196,22 +192,18 @@ class TcHandler(TcHandlerBase):
log_entry = entry_helper.to_log_entry() log_entry = entry_helper.to_log_entry()
_LOGGER.info(log_entry.log_str) _LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, helper: ProcedureWrapper): def queue_finished_cb(self, info: ProcedureWrapper):
if helper.proc_type == TcProcedureType.DEFAULT: if info.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure() def_proc = info.to_def_procedure()
_LOGGER.info( _LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
f"Queue handling finished for service {def_proc.service} and "
f"op code {def_proc.op_code}"
)
def feed_cb(self, helper: ProcedureWrapper, wrapper: FeedWrapper): def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper q.queue_wrapper = wrapper.queue_wrapper
if helper.proc_type == TcProcedureType.DEFAULT: if info.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure() def_proc = info.to_def_procedure()
service = def_proc.service assert def_proc.cmd_path is not None
op_code = def_proc.op_code pus_tc.pack_pus_telecommands(q, def_proc.cmd_path)
pus_tc.pack_pus_telecommands(q, service, op_code)
def main(): def main():

View File

@ -1,50 +1,85 @@
import datetime import datetime
import logging
from spacepackets.ccsds import CdsShortTimestamp from spacepackets.ccsds import CdsShortTimestamp
from spacepackets.ecss import PusTelecommand from spacepackets.ecss import PusTelecommand
from tmtccmd.config import CoreServiceList from tmtccmd.config import CmdTreeNode
from tmtccmd.tmtc import DefaultPusQueueHelper from tmtccmd.tmtc import DefaultPusQueueHelper
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
from tmtccmd.pus.tc.s3_fsfw_hk import create_request_one_hk_command from tmtccmd.pus.tc.s3_fsfw_hk import create_request_one_hk_command
from common import ( from common import (
EXAMPLE_PUS_APID, EXAMPLE_PUS_APID,
HkOpCodes,
make_addressable_id, make_addressable_id,
RequestTargetId, RequestTargetId,
AcsHkIds, AcsHkIds,
) )
_LOGGER = logging.getLogger(__name__)
def pack_pus_telecommands(q: DefaultPusQueueHelper, service: str, op_code: str):
if ( def create_cmd_definition_tree() -> CmdTreeNode:
service == CoreServiceList.SERVICE_17
or service == CoreServiceList.SERVICE_17_ALT root_node = CmdTreeNode.root_node()
):
if op_code == "ping": test_node = CmdTreeNode("test", "Test Node")
test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event"))
root_node.add_child(test_node)
scheduler_node = CmdTreeNode("scheduler", "Scheduler Node")
scheduler_node.add_child(
CmdTreeNode(
"schedule_ping_10_secs_ahead", "Schedule Ping to execute in 10 seconds"
)
)
root_node.add_child(scheduler_node)
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
mgm_node = CmdTreeNode("mgms", "MGM devices node")
mgm_node.add_child(CmdTreeNode("one_shot_hk", "Request one shot HK"))
acs_node.add_child(mgm_node)
root_node.add_child(acs_node)
return root_node
def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
# It should always be at least the root path "/", so we split of the empty portion left of it.
cmd_path_list = cmd_path.split("/")[1:]
if len(cmd_path_list) == 0:
_LOGGER.warning("empty command path")
return
if cmd_path_list[0] == "test":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "ping":
q.add_log_cmd("Sending PUS ping telecommand") q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(PusTelecommand(service=17, subservice=1)) return q.add_pus_tc(PusTelecommand(service=17, subservice=1))
elif op_code == "trigger_event": elif cmd_path_list[1] == "trigger_event":
q.add_log_cmd("Triggering test event") q.add_log_cmd("Triggering test event")
return q.add_pus_tc(PusTelecommand(service=17, subservice=128)) return q.add_pus_tc(PusTelecommand(service=17, subservice=128))
if service == CoreServiceList.SERVICE_11: if cmd_path_list[0] == "scheduler":
q.add_log_cmd("Sending PUS scheduled TC telecommand") assert len(cmd_path_list) >= 2
crt_time = CdsShortTimestamp.from_now() if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
time_stamp = crt_time + datetime.timedelta(seconds=10) q.add_log_cmd("Sending PUS scheduled TC telecommand")
time_stamp = time_stamp.pack() crt_time = CdsShortTimestamp.from_now()
return q.add_pus_tc( time_stamp = crt_time + datetime.timedelta(seconds=10)
create_time_tagged_cmd( time_stamp = time_stamp.pack()
time_stamp, return q.add_pus_tc(
PusTelecommand(service=17, subservice=1), create_time_tagged_cmd(
apid=EXAMPLE_PUS_APID, time_stamp,
) PusTelecommand(service=17, subservice=1),
) apid=EXAMPLE_PUS_APID,
if service == CoreServiceList.SERVICE_3:
if op_code in HkOpCodes.GENERATE_ONE_SHOT:
q.add_log_cmd("Sending HK one shot request")
q.add_pus_tc(
create_request_one_hk_command(
make_addressable_id(RequestTargetId.ACS, AcsHkIds.MGM_SET)
) )
) )
pass if cmd_path_list[0] == "acs":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "mgm":
assert len(cmd_path_list) >= 3
if cmd_path_list[2] == "one_shot_hk":
q.add_log_cmd("Sending HK one shot request")
q.add_pus_tc(
create_request_one_hk_command(
make_addressable_id(RequestTargetId.ACS, AcsHkIds.MGM_SET)
)
)

View File

@ -1,2 +1,2 @@
tmtccmd == 7.0.0 tmtccmd == 8.0.0rc1
# -e git+https://github.com/robamu-org/tmtccmd@97e5e51101a08b21472b3ddecc2063359f7e307a#egg=tmtccmd # -e git+https://github.com/robamu-org/tmtccmd@97e5e51101a08b21472b3ddecc2063359f7e307a#egg=tmtccmd

View File

@ -29,7 +29,7 @@ use satrs_core::event_man::{
}; };
use satrs_core::events::EventU32; use satrs_core::events::EventU32;
use satrs_core::hk::HkRequest; use satrs_core::hk::HkRequest;
use satrs_core::pool::{LocalPool, PoolCfg}; use satrs_core::pool::{PoolProviderMemInPlace, StaticMemoryPool, StaticPoolConfig};
use satrs_core::pus::event_man::{ use satrs_core::pus::event_man::{
DefaultPusMgmtBackendProvider, EventReporter, EventRequest, EventRequestWithToken, DefaultPusMgmtBackendProvider, EventReporter, EventRequest, EventRequestWithToken,
PusEventDispatcher, PusEventDispatcher,
@ -42,7 +42,9 @@ use satrs_core::pus::test::PusService17TestHandler;
use satrs_core::pus::verification::{ use satrs_core::pus::verification::{
TcStateStarted, VerificationReporterCfg, VerificationReporterWithSender, VerificationToken, TcStateStarted, VerificationReporterCfg, VerificationReporterWithSender, VerificationToken,
}; };
use satrs_core::pus::{MpscTcInStoreReceiver, MpscTmInStoreSender}; use satrs_core::pus::{
EcssTcInSharedStoreConverter, MpscTcReceiver, MpscTmInStoreSender, PusServiceHelper,
};
use satrs_core::seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}; use satrs_core::seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore};
use satrs_core::spacepackets::ecss::tm::{PusTmCreator, PusTmZeroCopyWriter}; use satrs_core::spacepackets::ecss::tm::{PusTmCreator, PusTmZeroCopyWriter};
use satrs_core::spacepackets::{ use satrs_core::spacepackets::{
@ -66,7 +68,7 @@ use std::time::Duration;
fn main() { fn main() {
setup_logger().expect("setting up logging with fern failed"); setup_logger().expect("setting up logging with fern failed");
println!("Running OBSW example"); println!("Running OBSW example");
let tm_pool = LocalPool::new(PoolCfg::new(vec![ let tm_pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![
(30, 32), (30, 32),
(15, 64), (15, 64),
(15, 128), (15, 128),
@ -74,9 +76,9 @@ fn main() {
(15, 1024), (15, 1024),
(15, 2048), (15, 2048),
])); ]));
let shared_tm_store = SharedTmStore::new(Box::new(tm_pool)); let shared_tm_store = SharedTmStore::new(tm_pool);
let tm_store_event = shared_tm_store.clone(); let tm_store_event = shared_tm_store.clone();
let tc_pool = LocalPool::new(PoolCfg::new(vec![ let tc_pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![
(30, 32), (30, 32),
(15, 64), (15, 64),
(15, 128), (15, 128),
@ -85,8 +87,16 @@ fn main() {
(15, 2048), (15, 2048),
])); ]));
let tc_store = TcStore { let tc_store = TcStore {
pool: Arc::new(RwLock::new(Box::new(tc_pool))), pool: Arc::new(RwLock::new(tc_pool)),
}; };
let sched_tc_pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
]));
let seq_count_provider = CcsdsSimpleSeqCountProvider::new(); let seq_count_provider = CcsdsSimpleSeqCountProvider::new();
let mut msg_counter_map: HashMap<u8, u16> = HashMap::new(); let mut msg_counter_map: HashMap<u8, u16> = HashMap::new();
@ -172,18 +182,18 @@ fn main() {
shared_tm_store.clone(), shared_tm_store.clone(),
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
); );
let test_srv_receiver = MpscTcInStoreReceiver::new( let test_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusTest as ChannelId, TcReceiverId::PusTest as ChannelId,
"PUS_17_TC_RECV", "PUS_17_TC_RECV",
pus_test_rx, pus_test_rx,
); );
let pus17_handler = PusService17TestHandler::new( let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
Box::new(test_srv_receiver), Box::new(test_srv_receiver),
tc_store.pool.clone(),
Box::new(test_srv_tm_sender), Box::new(test_srv_tm_sender),
PUS_APID, PUS_APID,
verif_reporter.clone(), verif_reporter.clone(),
); EcssTcInSharedStoreConverter::new(tc_store.pool.clone(), 2048),
));
let mut pus_17_wrapper = Service17CustomWrapper { let mut pus_17_wrapper = Service17CustomWrapper {
pus17_handler, pus17_handler,
test_srv_event_sender, test_srv_event_sender,
@ -195,7 +205,7 @@ fn main() {
shared_tm_store.clone(), shared_tm_store.clone(),
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
); );
let sched_srv_receiver = MpscTcInStoreReceiver::new( let sched_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusSched as ChannelId, TcReceiverId::PusSched as ChannelId,
"PUS_11_TC_RECV", "PUS_11_TC_RECV",
pus_sched_rx, pus_sched_rx,
@ -203,15 +213,18 @@ fn main() {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed"); .expect("Creating PUS Scheduler failed");
let pus_11_handler = PusService11SchedHandler::new( let pus_11_handler = PusService11SchedHandler::new(
Box::new(sched_srv_receiver), PusServiceHelper::new(
tc_store.pool.clone(), Box::new(sched_srv_receiver),
Box::new(sched_srv_tm_sender), Box::new(sched_srv_tm_sender),
PUS_APID, PUS_APID,
verif_reporter.clone(), verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_store.pool.clone(), 2048),
),
scheduler, scheduler,
); );
let mut pus_11_wrapper = Pus11Wrapper { let mut pus_11_wrapper = Pus11Wrapper {
pus_11_handler, pus_11_handler,
sched_tc_pool,
tc_source_wrapper, tc_source_wrapper,
}; };
@ -221,17 +234,19 @@ fn main() {
shared_tm_store.clone(), shared_tm_store.clone(),
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
); );
let event_srv_receiver = MpscTcInStoreReceiver::new( let event_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusEvent as ChannelId, TcReceiverId::PusEvent as ChannelId,
"PUS_5_TC_RECV", "PUS_5_TC_RECV",
pus_event_rx, pus_event_rx,
); );
let pus_5_handler = PusService5EventHandler::new( let pus_5_handler = PusService5EventHandler::new(
Box::new(event_srv_receiver), PusServiceHelper::new(
tc_store.pool.clone(), Box::new(event_srv_receiver),
Box::new(event_srv_tm_sender), Box::new(event_srv_tm_sender),
PUS_APID, PUS_APID,
verif_reporter.clone(), verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_store.pool.clone(), 2048),
),
event_request_tx, event_request_tx,
); );
let mut pus_5_wrapper = Pus5Wrapper { pus_5_handler }; let mut pus_5_wrapper = Pus5Wrapper { pus_5_handler };
@ -242,17 +257,17 @@ fn main() {
shared_tm_store.clone(), shared_tm_store.clone(),
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
); );
let action_srv_receiver = MpscTcInStoreReceiver::new( let action_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusAction as ChannelId, TcReceiverId::PusAction as ChannelId,
"PUS_8_TC_RECV", "PUS_8_TC_RECV",
pus_action_rx, pus_action_rx,
); );
let pus_8_handler = PusService8ActionHandler::new( let pus_8_handler = PusService8ActionHandler::new(
Box::new(action_srv_receiver), Box::new(action_srv_receiver),
tc_store.pool.clone(),
Box::new(action_srv_tm_sender), Box::new(action_srv_tm_sender),
PUS_APID, PUS_APID,
verif_reporter.clone(), verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_store.pool.clone(), 2048),
request_map.clone(), request_map.clone(),
); );
let mut pus_8_wrapper = Pus8Wrapper { pus_8_handler }; let mut pus_8_wrapper = Pus8Wrapper { pus_8_handler };
@ -264,13 +279,13 @@ fn main() {
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
); );
let hk_srv_receiver = let hk_srv_receiver =
MpscTcInStoreReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx); MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx);
let pus_3_handler = PusService3HkHandler::new( let pus_3_handler = PusService3HkHandler::new(
Box::new(hk_srv_receiver), Box::new(hk_srv_receiver),
tc_store.pool.clone(),
Box::new(hk_srv_tm_sender), Box::new(hk_srv_tm_sender),
PUS_APID, PUS_APID,
verif_reporter.clone(), verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_store.pool.clone(), 2048),
request_map, request_map,
); );
let mut pus_3_wrapper = Pus3Wrapper { pus_3_handler }; let mut pus_3_wrapper = Pus3Wrapper { pus_3_handler };

View File

@ -1,12 +1,11 @@
use crate::requests::{ActionRequest, Request, RequestWithToken}; use crate::requests::{ActionRequest, Request, RequestWithToken};
use log::{error, warn}; use log::{error, warn};
use satrs_core::pool::{SharedPool, StoreAddr};
use satrs_core::pus::verification::{ use satrs_core::pus::verification::{
FailParams, StdVerifReporterWithSender, TcStateAccepted, VerificationToken, FailParams, TcStateAccepted, VerificationReporterWithSender, VerificationToken,
}; };
use satrs_core::pus::{ use satrs_core::pus::{
EcssTcReceiver, EcssTmSender, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcReceiver, EcssTmSender,
PusServiceHandler, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, PusServiceHelper,
}; };
use satrs_core::spacepackets::ecss::tc::PusTcReader; use satrs_core::spacepackets::ecss::tc::PusTcReader;
use satrs_core::spacepackets::ecss::PusPacket; use satrs_core::spacepackets::ecss::PusPacket;
@ -14,34 +13,32 @@ use satrs_example::{tmtc_err, TargetIdWithApid};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::mpsc::Sender; use std::sync::mpsc::Sender;
pub struct PusService8ActionHandler { pub struct PusService8ActionHandler<TcInMemConverter: EcssTcInMemConverter> {
psb: PusServiceBase, service_helper: PusServiceHelper<TcInMemConverter>,
request_handlers: HashMap<TargetIdWithApid, Sender<RequestWithToken>>, request_handlers: HashMap<TargetIdWithApid, Sender<RequestWithToken>>,
} }
impl PusService8ActionHandler { impl<TcInMemConverter: EcssTcInMemConverter> PusService8ActionHandler<TcInMemConverter> {
pub fn new( pub fn new(
tc_receiver: Box<dyn EcssTcReceiver>, tc_receiver: Box<dyn EcssTcReceiver>,
shared_tc_pool: SharedPool,
tm_sender: Box<dyn EcssTmSender>, tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16, tm_apid: u16,
verification_handler: StdVerifReporterWithSender, verification_handler: VerificationReporterWithSender,
tc_in_mem_converter: TcInMemConverter,
request_handlers: HashMap<TargetIdWithApid, Sender<RequestWithToken>>, request_handlers: HashMap<TargetIdWithApid, Sender<RequestWithToken>>,
) -> Self { ) -> Self {
Self { Self {
psb: PusServiceBase::new( service_helper: PusServiceHelper::new(
tc_receiver, tc_receiver,
shared_tc_pool,
tm_sender, tm_sender,
tm_apid, tm_apid,
verification_handler, verification_handler,
tc_in_mem_converter,
), ),
request_handlers, request_handlers,
} }
} }
}
impl PusService8ActionHandler {
fn handle_action_request_with_id( fn handle_action_request_with_id(
&self, &self,
token: VerificationToken<TcStateAccepted>, token: VerificationToken<TcStateAccepted>,
@ -50,7 +47,8 @@ impl PusService8ActionHandler {
) -> Result<(), PusPacketHandlingError> { ) -> Result<(), PusPacketHandlingError> {
let user_data = tc.user_data(); let user_data = tc.user_data();
if user_data.len() < 8 { if user_data.len() < 8 {
self.psb() self.service_helper
.common
.verification_handler .verification_handler
.borrow_mut() .borrow_mut()
.start_failure( .start_failure(
@ -79,7 +77,8 @@ impl PusService8ActionHandler {
} else { } else {
let mut fail_data: [u8; 4] = [0; 4]; let mut fail_data: [u8; 4] = [0; 4];
fail_data.copy_from_slice(&target_id.target.to_be_bytes()); fail_data.copy_from_slice(&target_id.target.to_be_bytes());
self.psb() self.service_helper
.common
.verification_handler .verification_handler
.borrow_mut() .borrow_mut()
.start_failure( .start_failure(
@ -97,37 +96,32 @@ impl PusService8ActionHandler {
} }
Ok(()) Ok(())
} }
}
impl PusServiceHandler for PusService8ActionHandler { fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
fn psb_mut(&mut self) -> &mut PusServiceBase { let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
&mut self.psb if possible_packet.is_none() {
} return Ok(PusPacketHandlerResult::Empty);
fn psb(&self) -> &PusServiceBase { }
&self.psb let ecss_tc_and_token = possible_packet.unwrap();
} self.service_helper
.tc_in_mem_converter
fn handle_one_tc( .cache_ecss_tc_in_memory(&ecss_tc_and_token.tc_in_memory)?;
&mut self, let tc = PusTcReader::new(self.service_helper.tc_in_mem_converter.tc_slice_raw())?.0;
addr: StoreAddr,
token: VerificationToken<TcStateAccepted>,
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
self.copy_tc_to_buf(addr)?;
let (tc, _) = PusTcReader::new(&self.psb().pus_buf).unwrap();
let subservice = tc.subservice(); let subservice = tc.subservice();
let mut partial_error = None; let mut partial_error = None;
let time_stamp = self.psb().get_current_timestamp(&mut partial_error); let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
match subservice { match subservice {
128 => { 128 => {
self.handle_action_request_with_id(token, &tc, &time_stamp)?; self.handle_action_request_with_id(ecss_tc_and_token.token, &tc, &time_stamp)?;
} }
_ => { _ => {
let fail_data = [subservice]; let fail_data = [subservice];
self.psb_mut() self.service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.start_failure( .start_failure(
token, ecss_tc_and_token.token,
FailParams::new( FailParams::new(
Some(&time_stamp), Some(&time_stamp),
&tmtc_err::INVALID_PUS_SUBSERVICE, &tmtc_err::INVALID_PUS_SUBSERVICE,
@ -148,12 +142,12 @@ impl PusServiceHandler for PusService8ActionHandler {
} }
pub struct Pus8Wrapper { pub struct Pus8Wrapper {
pub(crate) pus_8_handler: PusService8ActionHandler, pub(crate) pus_8_handler: PusService8ActionHandler<EcssTcInSharedStoreConverter>,
} }
impl Pus8Wrapper { impl Pus8Wrapper {
pub fn handle_next_packet(&mut self) -> bool { pub fn handle_next_packet(&mut self) -> bool {
match self.pus_8_handler.handle_next_packet() { match self.pus_8_handler.handle_one_tc() {
Ok(result) => match result { Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {

View File

@ -1,14 +1,14 @@
use log::{error, warn}; use log::{error, warn};
use satrs_core::pus::event_srv::PusService5EventHandler; use satrs_core::pus::event_srv::PusService5EventHandler;
use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; use satrs_core::pus::{EcssTcInSharedStoreConverter, PusPacketHandlerResult};
pub struct Pus5Wrapper { pub struct Pus5Wrapper {
pub pus_5_handler: PusService5EventHandler, pub pus_5_handler: PusService5EventHandler<EcssTcInSharedStoreConverter>,
} }
impl Pus5Wrapper { impl Pus5Wrapper {
pub fn handle_next_packet(&mut self) -> bool { pub fn handle_next_packet(&mut self) -> bool {
match self.pus_5_handler.handle_next_packet() { match self.pus_5_handler.handle_one_tc() {
Ok(result) => match result { Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {

View File

@ -1,72 +1,63 @@
use crate::requests::{Request, RequestWithToken}; use crate::requests::{Request, RequestWithToken};
use log::{error, warn}; use log::{error, warn};
use satrs_core::hk::{CollectionIntervalFactor, HkRequest}; use satrs_core::hk::{CollectionIntervalFactor, HkRequest};
use satrs_core::pool::{SharedPool, StoreAddr}; use satrs_core::pus::verification::{FailParams, StdVerifReporterWithSender};
use satrs_core::pus::verification::{
FailParams, StdVerifReporterWithSender, TcStateAccepted, VerificationToken,
};
use satrs_core::pus::{ use satrs_core::pus::{
EcssTcReceiver, EcssTmSender, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcReceiver, EcssTmSender,
PusServiceHandler, PusPacketHandlerResult, PusPacketHandlingError, PusServiceBase, PusServiceHelper,
}; };
use satrs_core::spacepackets::ecss::tc::PusTcReader;
use satrs_core::spacepackets::ecss::{hk, PusPacket}; use satrs_core::spacepackets::ecss::{hk, PusPacket};
use satrs_example::{hk_err, tmtc_err, TargetIdWithApid}; use satrs_example::{hk_err, tmtc_err, TargetIdWithApid};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::mpsc::Sender; use std::sync::mpsc::Sender;
pub struct PusService3HkHandler { pub struct PusService3HkHandler<TcInMemConverter: EcssTcInMemConverter> {
psb: PusServiceBase, psb: PusServiceHelper<TcInMemConverter>,
request_handlers: HashMap<TargetIdWithApid, Sender<RequestWithToken>>, request_handlers: HashMap<TargetIdWithApid, Sender<RequestWithToken>>,
} }
impl PusService3HkHandler { impl<TcInMemConverter: EcssTcInMemConverter> PusService3HkHandler<TcInMemConverter> {
pub fn new( pub fn new(
tc_receiver: Box<dyn EcssTcReceiver>, tc_receiver: Box<dyn EcssTcReceiver>,
shared_tc_pool: SharedPool,
tm_sender: Box<dyn EcssTmSender>, tm_sender: Box<dyn EcssTmSender>,
tm_apid: u16, tm_apid: u16,
verification_handler: StdVerifReporterWithSender, verification_handler: StdVerifReporterWithSender,
tc_in_mem_converter: TcInMemConverter,
request_handlers: HashMap<TargetIdWithApid, Sender<RequestWithToken>>, request_handlers: HashMap<TargetIdWithApid, Sender<RequestWithToken>>,
) -> Self { ) -> Self {
Self { Self {
psb: PusServiceBase::new( psb: PusServiceHelper::new(
tc_receiver, tc_receiver,
shared_tc_pool,
tm_sender, tm_sender,
tm_apid, tm_apid,
verification_handler, verification_handler,
tc_in_mem_converter,
), ),
request_handlers, request_handlers,
} }
} }
}
impl PusServiceHandler for PusService3HkHandler { fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
fn psb_mut(&mut self) -> &mut PusServiceBase { let possible_packet = self.psb.retrieve_and_accept_next_packet()?;
&mut self.psb if possible_packet.is_none() {
} return Ok(PusPacketHandlerResult::Empty);
fn psb(&self) -> &PusServiceBase { }
&self.psb let ecss_tc_and_token = possible_packet.unwrap();
} let tc = self
.psb
fn handle_one_tc( .tc_in_mem_converter
&mut self, .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
addr: StoreAddr,
token: VerificationToken<TcStateAccepted>,
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
self.copy_tc_to_buf(addr)?;
let (tc, _) = PusTcReader::new(&self.psb().pus_buf).unwrap();
let subservice = tc.subservice(); let subservice = tc.subservice();
let mut partial_error = None; let mut partial_error = None;
let time_stamp = self.psb().get_current_timestamp(&mut partial_error); let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
let user_data = tc.user_data(); let user_data = tc.user_data();
if user_data.is_empty() { if user_data.is_empty() {
self.psb self.psb
.common
.verification_handler .verification_handler
.borrow_mut() .borrow_mut()
.start_failure( .start_failure(
token, ecss_tc_and_token.token,
FailParams::new(Some(&time_stamp), &tmtc_err::NOT_ENOUGH_APP_DATA, None), FailParams::new(Some(&time_stamp), &tmtc_err::NOT_ENOUGH_APP_DATA, None),
) )
.expect("Sending start failure TM failed"); .expect("Sending start failure TM failed");
@ -81,9 +72,13 @@ impl PusServiceHandler for PusService3HkHandler {
&hk_err::UNIQUE_ID_MISSING &hk_err::UNIQUE_ID_MISSING
}; };
self.psb self.psb
.common
.verification_handler .verification_handler
.borrow_mut() .borrow_mut()
.start_failure(token, FailParams::new(Some(&time_stamp), err, None)) .start_failure(
ecss_tc_and_token.token,
FailParams::new(Some(&time_stamp), err, None),
)
.expect("Sending start failure TM failed"); .expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::NotEnoughAppData( return Err(PusPacketHandlingError::NotEnoughAppData(
"Expected at least 8 bytes of app data".into(), "Expected at least 8 bytes of app data".into(),
@ -93,10 +88,11 @@ impl PusServiceHandler for PusService3HkHandler {
let unique_id = u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()); let unique_id = u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap());
if !self.request_handlers.contains_key(&target_id) { if !self.request_handlers.contains_key(&target_id) {
self.psb self.psb
.common
.verification_handler .verification_handler
.borrow_mut() .borrow_mut()
.start_failure( .start_failure(
token, ecss_tc_and_token.token,
FailParams::new(Some(&time_stamp), &hk_err::UNKNOWN_TARGET_ID, None), FailParams::new(Some(&time_stamp), &hk_err::UNKNOWN_TARGET_ID, None),
) )
.expect("Sending start failure TM failed"); .expect("Sending start failure TM failed");
@ -107,7 +103,11 @@ impl PusServiceHandler for PusService3HkHandler {
let send_request = |target: TargetIdWithApid, request: HkRequest| { let send_request = |target: TargetIdWithApid, request: HkRequest| {
let sender = self.request_handlers.get(&target).unwrap(); let sender = self.request_handlers.get(&target).unwrap();
sender sender
.send(RequestWithToken::new(target, Request::Hk(request), token)) .send(RequestWithToken::new(
target,
Request::Hk(request),
ecss_tc_and_token.token,
))
.unwrap_or_else(|_| panic!("Sending HK request {request:?} failed")); .unwrap_or_else(|_| panic!("Sending HK request {request:?} failed"));
}; };
if subservice == hk::Subservice::TcEnableHkGeneration as u8 { if subservice == hk::Subservice::TcEnableHkGeneration as u8 {
@ -119,10 +119,11 @@ impl PusServiceHandler for PusService3HkHandler {
} else if subservice == hk::Subservice::TcModifyHkCollectionInterval as u8 { } else if subservice == hk::Subservice::TcModifyHkCollectionInterval as u8 {
if user_data.len() < 12 { if user_data.len() < 12 {
self.psb self.psb
.common
.verification_handler .verification_handler
.borrow_mut() .borrow_mut()
.start_failure( .start_failure(
token, ecss_tc_and_token.token,
FailParams::new( FailParams::new(
Some(&time_stamp), Some(&time_stamp),
&hk_err::COLLECTION_INTERVAL_MISSING, &hk_err::COLLECTION_INTERVAL_MISSING,
@ -147,12 +148,12 @@ impl PusServiceHandler for PusService3HkHandler {
} }
pub struct Pus3Wrapper { pub struct Pus3Wrapper {
pub(crate) pus_3_handler: PusService3HkHandler, pub(crate) pus_3_handler: PusService3HkHandler<EcssTcInSharedStoreConverter>,
} }
impl Pus3Wrapper { impl Pus3Wrapper {
pub fn handle_next_packet(&mut self) -> bool { pub fn handle_next_packet(&mut self) -> bool {
match self.pus_3_handler.handle_next_packet() { match self.pus_3_handler.handle_one_tc() {
Ok(result) => match result { Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {

View File

@ -1,8 +1,7 @@
use crate::tmtc::MpscStoreAndSendError; use crate::tmtc::MpscStoreAndSendError;
use log::warn; use log::warn;
use satrs_core::pool::StoreAddr;
use satrs_core::pus::verification::{FailParams, StdVerifReporterWithSender}; use satrs_core::pus::verification::{FailParams, StdVerifReporterWithSender};
use satrs_core::pus::{PusPacketHandlerResult, TcAddrWithToken}; use satrs_core::pus::{EcssTcAndToken, PusPacketHandlerResult, TcInMemory};
use satrs_core::spacepackets::ecss::tc::PusTcReader; use satrs_core::spacepackets::ecss::tc::PusTcReader;
use satrs_core::spacepackets::ecss::PusServiceId; use satrs_core::spacepackets::ecss::PusServiceId;
use satrs_core::spacepackets::time::cds::TimeProvider; use satrs_core::spacepackets::time::cds::TimeProvider;
@ -17,11 +16,11 @@ pub mod scheduler;
pub mod test; pub mod test;
pub struct PusTcMpscRouter { pub struct PusTcMpscRouter {
pub test_service_receiver: Sender<TcAddrWithToken>, pub test_service_receiver: Sender<EcssTcAndToken>,
pub event_service_receiver: Sender<TcAddrWithToken>, pub event_service_receiver: Sender<EcssTcAndToken>,
pub sched_service_receiver: Sender<TcAddrWithToken>, pub sched_service_receiver: Sender<EcssTcAndToken>,
pub hk_service_receiver: Sender<TcAddrWithToken>, pub hk_service_receiver: Sender<EcssTcAndToken>,
pub action_service_receiver: Sender<TcAddrWithToken>, pub action_service_receiver: Sender<EcssTcAndToken>,
} }
pub struct PusReceiver { pub struct PusReceiver {
@ -70,7 +69,7 @@ impl PusReceiver {
impl PusReceiver { impl PusReceiver {
pub fn handle_tc_packet( pub fn handle_tc_packet(
&mut self, &mut self,
store_addr: StoreAddr, tc_in_memory: TcInMemory,
service: u8, service: u8,
pus_tc: &PusTcReader, pus_tc: &PusTcReader,
) -> Result<PusPacketHandlerResult, MpscStoreAndSendError> { ) -> Result<PusPacketHandlerResult, MpscStoreAndSendError> {
@ -84,22 +83,33 @@ impl PusReceiver {
match service { match service {
Ok(standard_service) => match standard_service { Ok(standard_service) => match standard_service {
PusServiceId::Test => { PusServiceId::Test => {
self.pus_router self.pus_router.test_service_receiver.send(EcssTcAndToken {
.test_service_receiver tc_in_memory,
.send((store_addr, accepted_token.into()))?; token: Some(accepted_token.into()),
})?
}
PusServiceId::Housekeeping => {
self.pus_router.hk_service_receiver.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?
}
PusServiceId::Event => {
self.pus_router
.event_service_receiver
.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?
}
PusServiceId::Scheduling => {
self.pus_router
.sched_service_receiver
.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?
} }
PusServiceId::Housekeeping => self
.pus_router
.hk_service_receiver
.send((store_addr, accepted_token.into()))?,
PusServiceId::Event => self
.pus_router
.event_service_receiver
.send((store_addr, accepted_token.into()))?,
PusServiceId::Scheduling => self
.pus_router
.sched_service_receiver
.send((store_addr, accepted_token.into()))?,
_ => { _ => {
let result = self.verif_reporter.start_failure( let result = self.verif_reporter.start_failure(
accepted_token, accepted_token,

View File

@ -1,50 +1,54 @@
use crate::tmtc::PusTcSource; use crate::tmtc::PusTcSource;
use log::{error, info, warn}; use log::{error, info, warn};
use satrs_core::pus::scheduler::TcInfo; use satrs_core::pool::{PoolProviderMemInPlace, StaticMemoryPool};
use satrs_core::pus::scheduler::{PusScheduler, TcInfo};
use satrs_core::pus::scheduler_srv::PusService11SchedHandler; use satrs_core::pus::scheduler_srv::PusService11SchedHandler;
use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; use satrs_core::pus::{EcssTcInSharedStoreConverter, PusPacketHandlerResult};
pub struct Pus11Wrapper { pub struct Pus11Wrapper {
pub pus_11_handler: PusService11SchedHandler, pub pus_11_handler: PusService11SchedHandler<EcssTcInSharedStoreConverter, PusScheduler>,
pub sched_tc_pool: StaticMemoryPool,
pub tc_source_wrapper: PusTcSource, pub tc_source_wrapper: PusTcSource,
} }
impl Pus11Wrapper { impl Pus11Wrapper {
pub fn release_tcs(&mut self) { pub fn release_tcs(&mut self) {
let releaser = |enabled: bool, info: &TcInfo| -> bool { let releaser = |enabled: bool, _info: &TcInfo, tc: &[u8]| -> bool {
if enabled { if enabled {
// Transfer TC from scheduler TC pool to shared TC pool.
let released_tc_addr = self
.tc_source_wrapper
.tc_store
.pool
.write()
.expect("locking pool failed")
.add(tc)
.expect("adding TC to shared pool failed");
self.tc_source_wrapper self.tc_source_wrapper
.tc_source .tc_source
.send(info.addr()) .send(released_tc_addr)
.expect("sending TC to TC source failed"); .expect("sending TC to TC source failed");
} }
true true
}; };
let mut pool = self
.tc_source_wrapper
.tc_store
.pool
.write()
.expect("error locking pool");
self.pus_11_handler self.pus_11_handler
.scheduler_mut() .scheduler_mut()
.update_time_from_now() .update_time_from_now()
.unwrap(); .unwrap();
if let Ok(released_tcs) = self let released_tcs = self
.pus_11_handler .pus_11_handler
.scheduler_mut() .scheduler_mut()
.release_telecommands(releaser, pool.as_mut()) .release_telecommands(releaser, &mut self.sched_tc_pool)
{ .expect("releasing TCs failed");
if released_tcs > 0 { if released_tcs > 0 {
info!("{released_tcs} TC(s) released from scheduler"); info!("{released_tcs} TC(s) released from scheduler");
}
} }
} }
pub fn handle_next_packet(&mut self) -> bool { pub fn handle_next_packet(&mut self) -> bool {
match self.pus_11_handler.handle_next_packet() { match self.pus_11_handler.handle_one_tc(&mut self.sched_tc_pool) {
Ok(result) => match result { Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {

View File

@ -1,24 +1,24 @@
use log::{info, warn}; use log::{info, warn};
use satrs_core::events::EventU32;
use satrs_core::params::Params; use satrs_core::params::Params;
use satrs_core::pus::test::PusService17TestHandler; use satrs_core::pus::test::PusService17TestHandler;
use satrs_core::pus::verification::FailParams; use satrs_core::pus::verification::FailParams;
use satrs_core::pus::{PusPacketHandlerResult, PusServiceHandler}; use satrs_core::pus::{EcssTcInMemConverter, PusPacketHandlerResult};
use satrs_core::spacepackets::ecss::tc::PusTcReader; use satrs_core::spacepackets::ecss::tc::PusTcReader;
use satrs_core::spacepackets::ecss::PusPacket; use satrs_core::spacepackets::ecss::PusPacket;
use satrs_core::spacepackets::time::cds::TimeProvider; use satrs_core::spacepackets::time::cds::TimeProvider;
use satrs_core::spacepackets::time::TimeWriter; use satrs_core::spacepackets::time::TimeWriter;
use satrs_core::{events::EventU32, pus::EcssTcInSharedStoreConverter};
use satrs_example::{tmtc_err, TEST_EVENT}; use satrs_example::{tmtc_err, TEST_EVENT};
use std::sync::mpsc::Sender; use std::sync::mpsc::Sender;
pub struct Service17CustomWrapper { pub struct Service17CustomWrapper {
pub pus17_handler: PusService17TestHandler, pub pus17_handler: PusService17TestHandler<EcssTcInSharedStoreConverter>,
pub test_srv_event_sender: Sender<(EventU32, Option<Params>)>, pub test_srv_event_sender: Sender<(EventU32, Option<Params>)>,
} }
impl Service17CustomWrapper { impl Service17CustomWrapper {
pub fn handle_next_packet(&mut self) -> bool { pub fn handle_next_packet(&mut self) -> bool {
let res = self.pus17_handler.handle_next_packet(); let res = self.pus17_handler.handle_one_tc();
if res.is_err() { if res.is_err() {
warn!("PUS17 handler failed with error {:?}", res.unwrap_err()); warn!("PUS17 handler failed with error {:?}", res.unwrap_err());
return true; return true;
@ -38,9 +38,13 @@ impl Service17CustomWrapper {
warn!("PUS17: Subservice {subservice} not implemented") warn!("PUS17: Subservice {subservice} not implemented")
} }
PusPacketHandlerResult::CustomSubservice(subservice, token) => { PusPacketHandlerResult::CustomSubservice(subservice, token) => {
let psb_mut = self.pus17_handler.psb_mut(); let (tc, _) = PusTcReader::new(
let buf = psb_mut.pus_buf; self.pus17_handler
let (tc, _) = PusTcReader::new(&buf).unwrap(); .service_helper
.tc_in_mem_converter
.tc_slice_raw(),
)
.unwrap();
let time_stamper = TimeProvider::from_now_with_u16_days().unwrap(); let time_stamper = TimeProvider::from_now_with_u16_days().unwrap();
let mut stamp_buf: [u8; 7] = [0; 7]; let mut stamp_buf: [u8; 7] = [0; 7];
time_stamper.write_to_bytes(&mut stamp_buf).unwrap(); time_stamper.write_to_bytes(&mut stamp_buf).unwrap();
@ -49,12 +53,17 @@ impl Service17CustomWrapper {
self.test_srv_event_sender self.test_srv_event_sender
.send((TEST_EVENT.into(), None)) .send((TEST_EVENT.into(), None))
.expect("Sending test event failed"); .expect("Sending test event failed");
let start_token = psb_mut let start_token = self
.pus17_handler
.service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.start_success(token, Some(&stamp_buf)) .start_success(token, Some(&stamp_buf))
.expect("Error sending start success"); .expect("Error sending start success");
psb_mut self.pus17_handler
.service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.completion_success(start_token, Some(&stamp_buf)) .completion_success(start_token, Some(&stamp_buf))
@ -62,7 +71,8 @@ impl Service17CustomWrapper {
} else { } else {
let fail_data = [tc.subservice()]; let fail_data = [tc.subservice()];
self.pus17_handler self.pus17_handler
.psb_mut() .service_helper
.common
.verification_handler .verification_handler
.get_mut() .get_mut()
.start_failure( .start_failure(

View File

@ -1,12 +1,11 @@
use log::warn; use log::warn;
use satrs_core::pus::ReceivesEcssPusTc; use satrs_core::pus::{EcssTcAndToken, ReceivesEcssPusTc};
use satrs_core::spacepackets::SpHeader; use satrs_core::spacepackets::SpHeader;
use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError}; use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError};
use thiserror::Error; use thiserror::Error;
use crate::pus::PusReceiver; use crate::pus::PusReceiver;
use satrs_core::pool::{SharedPool, StoreAddr, StoreError}; use satrs_core::pool::{PoolProviderMemInPlace, SharedStaticMemoryPool, StoreAddr, StoreError};
use satrs_core::pus::TcAddrWithToken;
use satrs_core::spacepackets::ecss::tc::PusTcReader; use satrs_core::spacepackets::ecss::tc::PusTcReader;
use satrs_core::spacepackets::ecss::PusPacket; use satrs_core::spacepackets::ecss::PusPacket;
use satrs_core::tmtc::tm_helper::SharedTmStore; use satrs_core::tmtc::tm_helper::SharedTmStore;
@ -35,14 +34,14 @@ pub enum MpscStoreAndSendError {
#[error("Store error: {0}")] #[error("Store error: {0}")]
Store(#[from] StoreError), Store(#[from] StoreError),
#[error("TC send error: {0}")] #[error("TC send error: {0}")]
TcSend(#[from] SendError<TcAddrWithToken>), TcSend(#[from] SendError<EcssTcAndToken>),
#[error("TMTC send error: {0}")] #[error("TMTC send error: {0}")]
TmTcSend(#[from] SendError<StoreAddr>), TmTcSend(#[from] SendError<StoreAddr>),
} }
#[derive(Clone)] #[derive(Clone)]
pub struct TcStore { pub struct TcStore {
pub pool: SharedPool, pub pool: SharedStaticMemoryPool,
} }
impl TcStore { impl TcStore {
@ -103,7 +102,6 @@ impl TmtcTask {
} }
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
//while self.poll_tc() {}
self.poll_tc(); self.poll_tc();
} }
@ -123,7 +121,11 @@ impl TmtcTask {
match PusTcReader::new(&self.tc_buf) { match PusTcReader::new(&self.tc_buf) {
Ok((pus_tc, _)) => { Ok((pus_tc, _)) => {
self.pus_receiver self.pus_receiver
.handle_tc_packet(addr, pus_tc.service(), &pus_tc) .handle_tc_packet(
satrs_core::pus::TcInMemory::StoreAddr(addr),
pus_tc.service(),
&pus_tc,
)
.ok(); .ok();
true true
} }

View File

@ -3,7 +3,7 @@ use std::{net::SocketAddr, sync::mpsc::Receiver};
use log::{info, warn}; use log::{info, warn};
use satrs_core::{ use satrs_core::{
hal::std::udp_server::{ReceiveResult, UdpTcServer}, hal::std::udp_server::{ReceiveResult, UdpTcServer},
pool::{SharedPool, StoreAddr}, pool::{PoolProviderMemInPlaceWithGuards, SharedStaticMemoryPool, StoreAddr},
tmtc::CcsdsError, tmtc::CcsdsError,
}; };
@ -12,7 +12,7 @@ use crate::tmtc::MpscStoreAndSendError;
pub struct UdpTmtcServer { pub struct UdpTmtcServer {
pub udp_tc_server: UdpTcServer<CcsdsError<MpscStoreAndSendError>>, pub udp_tc_server: UdpTcServer<CcsdsError<MpscStoreAndSendError>>,
pub tm_rx: Receiver<StoreAddr>, pub tm_rx: Receiver<StoreAddr>,
pub tm_store: SharedPool, pub tm_store: SharedStaticMemoryPool,
} }
impl UdpTmtcServer { impl UdpTmtcServer {
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {