this works!
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit

This commit is contained in:
2024-02-02 14:00:31 +01:00
parent 0a21fcf23a
commit c45eb1495c
4 changed files with 64 additions and 67 deletions

View File

@ -610,16 +610,17 @@ pub mod alloc_mod {
/// Utility method which calls [Self::telecommands_to_release] and then calls a releaser
/// closure for each telecommand which should be released. This function will also delete
/// the telecommands from the holding store after calling the release closure, if the scheduler
/// is disabled.
/// the telecommands from the holding store after calling the release closure if the user
/// returns [true] from the release closure.
///
/// # Arguments
///
/// * `releaser` - Closure where the first argument is whether the scheduler is enabled and
/// the second argument is the telecommand information also containing the store address.
/// This closure should return whether the command should be deleted if the scheduler is
/// disabled to prevent memory leaks.
/// * `store` - The holding store of the telecommands.
/// the second argument is the telecommand information also containing the store
/// address. This closure should return whether the command should be deleted. Please
/// note that returning false might lead to memory leaks if the TC is not cleared from
/// the store in some other way.
/// * `tc_store` - The holding store of the telecommands.
pub fn release_telecommands<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
&mut self,
mut releaser: R,
@ -633,7 +634,7 @@ pub mod alloc_mod {
let tc = tc_store.read(&info.addr).map_err(|e| (released_tcs, e))?;
let should_delete = releaser(self.enabled, info, tc);
released_tcs += 1;
if should_delete && !self.is_enabled() {
if should_delete {
let res = tc_store.delete(info.addr);
if res.is_err() {
store_error = res;
@ -647,6 +648,32 @@ pub mod alloc_mod {
.map_err(|e| (released_tcs, e))
}
/// This utility method is similar to [Self::release_telecommands] but will not perform
/// store deletions and thus does not require a mutable reference of the TC store.
///
/// It will returns a [Vec] of [TcInfo]s to transfer the list of released
/// telecommands to the user. The user should take care of deleting those telecommands
/// from the holding store to prevent memory leaks.
pub fn release_telecommands_no_deletion<R: FnMut(bool, &TcInfo, &[u8])>(
&mut self,
mut releaser: R,
tc_store: &(impl PoolProviderMemInPlace + ?Sized),
) -> Result<Vec<TcInfo>, (Vec<TcInfo>, StoreError)> {
let tcs_to_release = self.telecommands_to_release();
let mut released_tcs = Vec::new();
for tc in tcs_to_release {
for info in tc.1 {
let tc = tc_store
.read(&info.addr)
.map_err(|e| (released_tcs.clone(), e))?;
releaser(self.is_enabled(), info, tc);
released_tcs.push(*info);
}
}
self.tc_map.retain(|k, _| k > &self.current_time);
Ok(released_tcs)
}
/// Retrieve all telecommands which should be release based on the current time.
pub fn telecommands_to_release(&self) -> Range<'_, UnixTimestamp, Vec<TcInfo>> {
self.tc_map.range(..=self.current_time)

View File

@ -15,28 +15,18 @@ use spacepackets::time::cds::TimeProvider;
/// telecommands when applicable.
pub struct PusService11SchedHandler<
TcInMemConverter: EcssTcInMemConverter,
MemPool: PoolProviderMemInPlace,
Scheduler: PusSchedulerInterface,
> {
pub service_helper: PusServiceHelper<TcInMemConverter>,
pub sched_tc_pool: MemPool,
scheduler: Scheduler,
}
impl<
TcInMemConverter: EcssTcInMemConverter,
MemPool: PoolProviderMemInPlace,
Scheduler: PusSchedulerInterface,
> PusService11SchedHandler<TcInMemConverter, MemPool, Scheduler>
impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
PusService11SchedHandler<TcInMemConverter, Scheduler>
{
pub fn new(
service_helper: PusServiceHelper<TcInMemConverter>,
sched_tc_pool: MemPool,
scheduler: Scheduler,
) -> Self {
pub fn new(service_helper: PusServiceHelper<TcInMemConverter>, scheduler: Scheduler) -> Self {
Self {
service_helper,
sched_tc_pool,
scheduler,
}
}
@ -49,7 +39,10 @@ impl<
&self.scheduler
}
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
pub fn handle_one_tc(
&mut self,
sched_tc_pool: &mut (impl PoolProviderMemInPlace + ?Sized),
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
@ -124,7 +117,7 @@ impl<
// let mut pool = self.shared_tc_store.write().expect("Locking pool failed");
self.scheduler
.reset(&mut self.sched_tc_pool)
.reset(sched_tc_pool)
.expect("Error resetting TC Pool");
self.service_helper
@ -145,7 +138,7 @@ impl<
// let mut pool = self.sched_tc_pool.write().expect("locking pool failed");
self.scheduler
.insert_wrapped_tc::<TimeProvider>(&tc, &mut self.sched_tc_pool)
.insert_wrapped_tc::<TimeProvider>(&tc, sched_tc_pool)
.expect("insertion of activity into pool failed");
self.service_helper
@ -175,7 +168,6 @@ impl<
#[cfg(test)]
mod tests {
use crate::pool::StaticMemoryPool;
use crate::pus::scheduler::RequestId as RequestIdSched;
use crate::{
events::EventU32,
pus::{