Implementation of scheduler in pus and tmtc handler #29

Merged
muellerr merged 13 commits from pus_schedule_implementation into main 2023-02-01 13:40:49 +01:00
3 changed files with 108 additions and 0 deletions
Showing only changes of commit dce29035a2 - Show all commits

View File

@ -7,6 +7,18 @@ use std::time::SystemTimeError;
use std::vec;
use std::vec::Vec;
//TODO: Move to spacepackets
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum ScheduleSubservice {
lkoester marked this conversation as resolved Outdated

should be removed (or put behind std feature and `#[allow(unused_imports)]

should be removed (or put behind std feature and `#[allow(unused_imports)]
EnableScheduling = 1,
DisableScheduling = 2,
ResetScheduling = 3,
InsertActivity = 4,
DeleteActivity = 5,
}
#[derive(Debug)]
lkoester marked this conversation as resolved Outdated

TC -> Tc for consistency

TC -> Tc for consistency

Passt

Passt
pub struct PusScheduler {
tc_map: BTreeMap<UnixTimestamp, Vec<StoreAddr>>,

View File

@ -19,6 +19,9 @@ use satrs_core::{
use satrs_example::{hk_err, tmtc_err};
use std::collections::HashMap;
use std::sync::mpsc::Sender;
use std::time::Duration;
use satrs_core::pus::scheduling::{PusScheduler, ScheduleSubservice};
use satrs_core::spacepackets::time::{CcsdsTimeProvider, UnixTimestamp};
pub struct PusReceiver {
pub tm_helper: PusTmWithCdsShortHelper,
@ -31,6 +34,7 @@ pub struct PusReceiver {
request_map: HashMap<u32, Sender<RequestWithToken>>,
stamper: TimeProvider,
time_stamp: [u8; 7],
scheduler: PusScheduler,
}
impl PusReceiver {
@ -43,6 +47,7 @@ impl PusReceiver {
event_request_tx: Sender<EventRequestWithToken>,
request_map: HashMap<u32, Sender<RequestWithToken>>,
) -> Self {
let scheduler = PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
Self {
tm_helper: PusTmWithCdsShortHelper::new(apid),
tm_tx,
@ -53,6 +58,7 @@ impl PusReceiver {
request_map,
stamper: TimeProvider::new_with_u16_days(0, 0),
time_stamp: [0; 7],
scheduler
}
}
}
@ -78,6 +84,8 @@ impl PusServiceProvider for PusReceiver {
self.handle_event_request(pus_tc, accepted_token);
} else if service == 3 {
self.handle_hk_request(pus_tc, accepted_token);
} else if service == 11 {
self.handle_scheduled_tc(pus_tc, accepted_token);
} else {
self.update_time_stamp();
self.verif_reporter
@ -201,6 +209,7 @@ impl PusReceiver {
));
}
}
fn handle_event_request(&mut self, pus_tc: &PusTc, token: VerificationToken<TcStateAccepted>) {
let send_start_failure = |verif_reporter: &mut StdVerifReporterWithSender,
timestamp: &[u8; 7],
@ -273,4 +282,86 @@ impl PusReceiver {
}
}
}
fn handle_scheduled_tc(&mut self, pus_tc: &PusTc, token: VerificationToken<TcStateAccepted>) {
if pus_tc.user_data().is_none() {
self.update_time_stamp();
self.verif_reporter
.start_failure(
token,
FailParams::new(Some(&self.time_stamp), &tmtc_err::NOT_ENOUGH_APP_DATA, None),
)
.expect("Sending start failure TM failed");
return;
}
self.update_time_stamp();
match pus_tc.subservice() {
1 => {
let start_token = self
.verif_reporter
.start_success(token, Some(&self.time_stamp))
.expect("Error sending start success");
self.scheduler.enable();
if self.scheduler.is_enabled() {
self.verif_reporter
.completion_success(start_token, Some(&self.time_stamp))
.expect("Error sending completion success");
} else {
// TODO: ???
//self.verif_reporter
// .completion_failure(start_token, &tmtc_err::NOT_ENOUGH_APP_DATA, none)
}
},
2 => {
let start_token = self
.verif_reporter
.start_success(token, Some(&self.time_stamp))
.expect("Error sending start success");
self.scheduler.disable();
if ! self.scheduler.is_enabled() {
self.verif_reporter
.completion_success(start_token, Some(&self.time_stamp))
.expect("Error sending completion success");
} else {
// TODO: ???
//self.verif_reporter
// .completion_failure(start_token, &tmtc_err::NOT_ENOUGH_APP_DATA, none)
}
},
3 => {
let start_token = self
.verif_reporter
.start_success(token, Some(&self.time_stamp))
.expect("Error sending start success");
self.scheduler.reset();
if !self.scheduler.is_enabled() && self.scheduler.num_scheduled_telecommands() == 0 {
self.verif_reporter
.completion_success(start_token, Some(&self.time_stamp))
.expect("Error sending completion success");
} else {
// TODO: ???
//self.verif_reporter
// .completion_failure(start_token, &tmtc_err::NOT_ENOUGH_APP_DATA, none)
}
},
4 => {
self.update_time_stamp();
let unix_time = UnixTimestamp::new_only_seconds(self.stamper.unix_seconds());
let worked = self.scheduler.insert_tc(unix_time, );
},
_ => {
self.verif_reporter
.start_failure(
token,
FailParams::new(Some(&self.time_stamp), &tmtc_err::NOT_ENOUGH_APP_DATA, None),
)
.expect("Sending start failure TM failed");
return;
}
}
}
}

View File

@ -14,6 +14,7 @@ use crate::pus::PusReceiver;
use crate::requests::RequestWithToken;
use satrs_core::pool::{SharedPool, StoreAddr, StoreError};
use satrs_core::pus::event_man::EventRequestWithToken;
use satrs_core::pus::scheduling::PusScheduler;
use satrs_core::pus::verification::StdVerifReporterWithSender;
use satrs_core::spacepackets::{ecss::PusPacket, tc::PusTc, tm::PusTm, SpHeader};
use satrs_core::tmtc::{
@ -240,6 +241,10 @@ fn poll_tc_server(udp_tmtc_server: &mut UdpTmtcServer) -> bool {
}
}
fn poll_tc_scheduler(scheduler: &mut PusScheduler) {
match scheduler.release_telecommands()
}
lkoester marked this conversation as resolved Outdated

can be deleted

can be deleted
fn core_tm_handling(udp_tmtc_server: &mut UdpTmtcServer, recv_addr: &SocketAddr) {
while let Ok(addr) = udp_tmtc_server.tm_rx.try_recv() {
let mut store_lock = udp_tmtc_server