scheduler service basic tests done

This commit is contained in:
Robin Müller 2024-02-03 00:02:00 +01:00
parent 195a7513ec
commit 475a587f78
Signed by: muellerr
GPG Key ID: A649FB78196E3849

View File

@ -2,6 +2,7 @@ use super::scheduler::PusSchedulerInterface;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
use crate::pool::PoolProviderMemInPlace;
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
use alloc::string::ToString;
use spacepackets::ecss::{scheduling, PusPacket};
use spacepackets::time::cds::TimeProvider;
@ -53,8 +54,8 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let subservice = PusPacket::subservice(&tc);
let std_service = scheduling::Subservice::try_from(subservice);
if std_service.is_err() {
let standard_subservice = scheduling::Subservice::try_from(subservice);
if standard_subservice.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice(
subservice,
ecss_tc_and_token.token,
@ -62,7 +63,7 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
}
let mut partial_error = None;
let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
match std_service.unwrap() {
match standard_subservice.unwrap() {
scheduling::Subservice::TcEnableScheduling => {
let start_token = self
.service_helper
@ -81,7 +82,9 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
.completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success");
} else {
panic!("Failed to enable scheduler");
return Err(PusPacketHandlingError::Other(
"failed to enabled scheduler".to_string(),
));
}
}
scheduling::Subservice::TcDisableScheduling => {
@ -102,7 +105,9 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
.completion_success(start_token, Some(&time_stamp))
.expect("Error sending completion success");
} else {
panic!("Failed to disable scheduler");
return Err(PusPacketHandlingError::Other(
"failed to disable scheduler".to_string(),
));
}
}
scheduling::Subservice::TcResetScheduling => {
@ -114,8 +119,6 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
.expect("Error sending start success");
// let mut pool = self.shared_tc_store.write().expect("Locking pool failed");
self.scheduler
.reset(sched_tc_pool)
.expect("Error resetting TC Pool");
@ -167,24 +170,20 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
#[cfg(test)]
mod tests {
use core::time::Duration;
use crate::pool::{StaticMemoryPool, StaticPoolConfig};
use crate::pus::tests::TEST_APID;
use crate::{
events::EventU32,
pus::{
scheduler::{PusScheduler, PusSchedulerInterface, TcInfo},
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness},
verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError,
},
use crate::pus::{
scheduler::{self, PusSchedulerInterface, TcInfo},
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness},
verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter,
};
use alloc::collections::VecDeque;
use delegate::delegate;
use spacepackets::ecss::scheduling::Subservice;
use spacepackets::ecss::tc::PusTcSecondaryHeader;
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::WritablePusPacket;
use spacepackets::time::TimeWriter;
use spacepackets::SpHeader;
use spacepackets::{
ecss::{tc::PusTcCreator, tm::PusTmReader},
@ -193,22 +192,21 @@ mod tests {
use super::PusService11SchedHandler;
const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25);
struct Pus11HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusService11SchedHandler<EcssTcInSharedStoreConverter, PusScheduler>,
handler: PusService11SchedHandler<EcssTcInSharedStoreConverter, TestScheduler>,
sched_tc_pool: StaticMemoryPool,
}
impl Pus11HandlerWithStoreTester {
pub fn new(scheduler: PusScheduler) -> Self {
pub fn new() -> Self {
let test_scheduler = TestScheduler::default();
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)]);
let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
Self {
common,
handler: PusService11SchedHandler::new(srv_handler, scheduler),
handler: PusService11SchedHandler::new(srv_handler, test_scheduler),
sched_tc_pool,
}
}
@ -225,13 +223,13 @@ mod tests {
}
}
#[derive(Default)]
pub struct TestScheduler {
reset_count: u32,
enabled: bool,
enabled_count: u32,
disabled_count: u32,
inserted_tcs: VecDeque<TcInfo>,
current_addr: u64,
}
impl PusSchedulerInterface for TestScheduler {
@ -269,15 +267,89 @@ mod tests {
}
}
#[test]
fn test_scheduling_tc() {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Error creating scheduler");
let mut test_harness = Pus11HandlerWithStoreTester::new(scheduler);
fn generic_subservice_send(
test_harness: &mut Pus11HandlerWithStoreTester,
subservice: Subservice,
) {
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcEnableSubschedule as u8);
let enable_scheduling = PusTcCreator::new(&mut reply_header, tc_header, &[], true);
let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8);
let enable_scheduling = PusTcCreator::new(&mut reply_header, tc_header, &[0; 7], true);
let token = test_harness.send_tc(&enable_scheduling);
let request_id = token.req_id();
test_harness
.handler
.handle_one_tc(&mut test_harness.sched_tc_pool)
.unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);
test_harness.check_next_verification_tm(7, request_id);
}
#[test]
fn test_scheduling_enabling_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
test_harness.handler.scheduler_mut().disable();
assert!(!test_harness.handler.scheduler().is_enabled());
generic_subservice_send(&mut test_harness, Subservice::TcEnableScheduling);
assert!(test_harness.handler.scheduler().is_enabled());
assert_eq!(test_harness.handler.scheduler().enabled_count, 1);
}
#[test]
fn test_scheduling_disabling_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
test_harness.handler.scheduler_mut().enable();
assert!(test_harness.handler.scheduler().is_enabled());
generic_subservice_send(&mut test_harness, Subservice::TcDisableScheduling);
assert!(!test_harness.handler.scheduler().is_enabled());
assert_eq!(test_harness.handler.scheduler().disabled_count, 1);
}
#[test]
fn test_reset_scheduler_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
generic_subservice_send(&mut test_harness, Subservice::TcResetScheduling);
assert_eq!(test_harness.handler.scheduler().reset_count, 1);
}
#[test]
fn test_insert_activity_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap();
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new(&mut reply_header, sec_header, &[], true);
let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc);
let stamper = cds::TimeProvider::from_now_with_u16_days().expect("time provider failed");
let mut sched_app_data: [u8; 64] = [0; 64];
let mut written_len = stamper.write_to_bytes(&mut sched_app_data).unwrap();
let ping_raw = ping_tc.to_vec().expect("generating raw tc failed");
sched_app_data[written_len..written_len + ping_raw.len()].copy_from_slice(&ping_raw);
written_len += ping_raw.len();
reply_header = SpHeader::tm_unseg(TEST_APID, 1, 0).unwrap();
sec_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcInsertActivity as u8);
let enable_scheduling = PusTcCreator::new(
&mut reply_header,
sec_header,
&sched_app_data[..written_len],
true,
);
let token = test_harness.send_tc(&enable_scheduling);
let request_id = token.req_id();
test_harness
.handler
.handle_one_tc(&mut test_harness.sched_tc_pool)
.unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);
test_harness.check_next_verification_tm(7, request_id);
let tc_info = test_harness
.handler
.scheduler_mut()
.inserted_tcs
.pop_front()
.unwrap();
assert_eq!(tc_info.request_id(), req_id_ping_tc);
}
}