merged main

This commit is contained in:
lkoester
2023-01-26 10:58:44 +01:00
parent dce29035a2
commit 8df56ca63a
4 changed files with 329 additions and 114 deletions

View File

@ -1,3 +1,5 @@
extern crate core;
mod ccsds;
mod hk;
mod pus;

View File

@ -2,7 +2,7 @@ use crate::hk::{CollectionIntervalFactor, HkRequest};
use crate::requests::{Request, RequestWithToken};
use crate::tmtc::{PusTcSource, TmStore};
use satrs_core::events::EventU32;
use satrs_core::pool::StoreAddr;
use satrs_core::pool::{StoreAddr, StoreError};
use satrs_core::pus::event::Subservices;
use satrs_core::pus::event_man::{EventRequest, EventRequestWithToken};
use satrs_core::pus::hk;
@ -13,15 +13,18 @@ use satrs_core::res_code::ResultU16;
use satrs_core::tmtc::tm_helper::PusTmWithCdsShortHelper;
use satrs_core::tmtc::{AddressableId, PusServiceProvider};
use satrs_core::{
spacepackets::ecss::PusPacket, spacepackets::tc::PusTc, spacepackets::time::cds::TimeProvider,
spacepackets::time::TimeWriter, spacepackets::SpHeader,
spacepackets, spacepackets::ecss::PusPacket, spacepackets::tc::PusTc,
spacepackets::time::cds::TimeProvider, spacepackets::time::TimeWriter, spacepackets::SpHeader,
};
use satrs_example::{hk_err, tmtc_err};
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::mpsc::Sender;
use std::time::Duration;
use satrs_core::pus::scheduling::{PusScheduler, ScheduleSubservice};
use satrs_core::spacepackets::time::{CcsdsTimeProvider, UnixTimestamp};
use std::sync::{Arc, LockResult, Mutex};
pub struct PusReceiver {
pub tm_helper: PusTmWithCdsShortHelper,
@ -34,7 +37,7 @@ pub struct PusReceiver {
request_map: HashMap<u32, Sender<RequestWithToken>>,
stamper: TimeProvider,
time_stamp: [u8; 7],
scheduler: PusScheduler,
scheduler: Arc<Mutex<PusScheduler>>,
}
impl PusReceiver {
@ -46,8 +49,8 @@ impl PusReceiver {
tc_source: PusTcSource,
event_request_tx: Sender<EventRequestWithToken>,
request_map: HashMap<u32, Sender<RequestWithToken>>,
scheduler: Arc<Mutex<PusScheduler>>,
) -> Self {
let scheduler = PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
Self {
tm_helper: PusTmWithCdsShortHelper::new(apid),
tm_tx,
@ -58,7 +61,7 @@ impl PusReceiver {
request_map,
stamper: TimeProvider::new_with_u16_days(0, 0),
time_stamp: [0; 7],
scheduler
scheduler,
}
}
}
@ -299,69 +302,95 @@ impl PusReceiver {
match pus_tc.subservice() {
1 => {
let mut scheduler = self.scheduler.lock().expect("Lock of scheduler failed");
let start_token = self
.verif_reporter
.start_success(token, Some(&self.time_stamp))
.expect("Error sending start success");
self.scheduler.enable();
if self.scheduler.is_enabled() {
scheduler.enable();
if scheduler.is_enabled() {
self.verif_reporter
.completion_success(start_token, Some(&self.time_stamp))
.expect("Error sending completion success");
} else {
// TODO: ???
//self.verif_reporter
// .completion_failure(start_token, &tmtc_err::NOT_ENOUGH_APP_DATA, none)
panic!("Failed to enable scheduler");
}
},
}
2 => {
let start_token = self
.verif_reporter
.start_success(token, Some(&self.time_stamp))
.expect("Error sending start success");
self.scheduler.disable();
if ! self.scheduler.is_enabled() {
self.verif_reporter
.completion_success(start_token, Some(&self.time_stamp))
.expect("Error sending completion success");
} else {
// TODO: ???
//self.verif_reporter
// .completion_failure(start_token, &tmtc_err::NOT_ENOUGH_APP_DATA, none)
}
},
3 => {
let mut scheduler = self.scheduler.lock().expect("Lock of scheduler failed");
let start_token = self
.verif_reporter
.start_success(token, Some(&self.time_stamp))
.expect("Error sending start success");
self.scheduler.reset();
if !self.scheduler.is_enabled() && self.scheduler.num_scheduled_telecommands() == 0 {
scheduler.disable();
if !scheduler.is_enabled() {
self.verif_reporter
.completion_success(start_token, Some(&self.time_stamp))
.expect("Error sending completion success");
} else {
// TODO: ???
//self.verif_reporter
// .completion_failure(start_token, &tmtc_err::NOT_ENOUGH_APP_DATA, none)
panic!("Failed to disable scheduler");
}
},
}
3 => {
let mut scheduler = self.scheduler.lock().expect("Lock of scheduler failed");
let start_token = self
.verif_reporter
.start_success(token, Some(&self.time_stamp))
.expect("Error sending start success");
match self.tc_source.tc_store.pool.write() {
Ok(mut pool) => {
match scheduler.reset(pool.as_mut()) {
Ok(_) => {
self.verif_reporter
.completion_success(start_token, Some(&self.time_stamp))
.expect("Error sending completion success");
}
Err(_) => {
// TODO
}
}
}
Err(_) => {}
}
}
4 => {
self.update_time_stamp();
let unix_time = UnixTimestamp::new_only_seconds(self.stamper.unix_seconds());
let worked = self.scheduler.insert_tc(unix_time, );
},
let mut scheduler = self.scheduler.lock().expect("Lock of scheduler failed");
let start_token = self
.verif_reporter
.start_success(token, Some(&self.time_stamp))
.expect("Error sending start success");
match self.tc_source.tc_store.pool.write() {
Ok(mut pool) => {
scheduler.insert_wrapped_tc::<spacepackets::time::cds::TimeProvider>(
pus_tc,
pool.as_mut(),
);
}
Err(_) => {}
}
//let addr = self.tc_source.tc_store.add_pus_tc().unwrap();
//let unix_time = UnixTimestamp::new_only_seconds(self.stamper.unix_seconds());
//let worked = self.scheduler.insert_tc(unix_time, );
}
_ => {
self.verif_reporter
.start_failure(
token,
FailParams::new(Some(&self.time_stamp), &tmtc_err::NOT_ENOUGH_APP_DATA, None),
)
.expect("Sending start failure TM failed");
.start_failure(
token,
FailParams::new(
Some(&self.time_stamp),
&tmtc_err::NOT_ENOUGH_APP_DATA,
None,
),
)
.expect("Sending start failure TM failed");
return;
}
}
}
}

View File

@ -5,7 +5,9 @@ use std::collections::HashMap;
use std::error::Error;
use std::fmt::{Display, Formatter};
use std::net::SocketAddr;
use std::rc::Rc;
use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError};
use std::sync::{Arc, LockResult, Mutex};
use std::thread;
use std::time::Duration;
@ -42,6 +44,12 @@ pub struct TcArgs {
pub tc_receiver: Receiver<StoreAddr>,
}
impl TcArgs {
fn split(self) -> (PusTcSource, Receiver<StoreAddr>) {
(self.tc_source, self.tc_receiver)
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum MpscStoreAndSendError {
StoreError(StoreError),
@ -153,6 +161,10 @@ impl ReceivesCcsdsTc for PusTcSource {
}
}
pub fn core_tmtc_task(args: OtherArgs, mut tc_args: TcArgs, tm_args: TmArgs) {
let mut scheduler = Arc::new(Mutex::new(
PusScheduler::new_with_current_init_time(Duration::from_secs(5)).unwrap(),
));
let mut sched_clone = scheduler.clone();
let mut pus_receiver = PusReceiver::new(
PUS_APID,
tm_args.tm_sink_sender,
@ -161,6 +173,7 @@ pub fn core_tmtc_task(args: OtherArgs, mut tc_args: TcArgs, tm_args: TmArgs) {
tc_args.tc_source.clone(),
args.event_request_tx,
args.request_map,
sched_clone,
);
let ccsds_receiver = CcsdsReceiver {
tc_source: tc_args.tc_source.clone(),
@ -174,22 +187,48 @@ pub fn core_tmtc_task(args: OtherArgs, mut tc_args: TcArgs, tm_args: TmArgs) {
tm_rx: tm_args.tm_server_rx,
tm_store: tm_args.tm_store.pool.clone(),
};
let mut test_closure = |boolvar: bool, store_addr: &StoreAddr| true;
let (mut tc_source, mut tc_receiver) = tc_args.split();
loop {
core_tmtc_loop(&mut udp_tmtc_server, &mut tc_args, &mut pus_receiver);
let mut tmtc_sched = scheduler.clone();
core_tmtc_loop(
&mut udp_tmtc_server,
&mut tc_source,
&mut tc_receiver,
&mut pus_receiver,
tmtc_sched,
);
thread::sleep(Duration::from_millis(400));
}
}
fn core_tmtc_loop(
udp_tmtc_server: &mut UdpTmtcServer,
tc_args: &mut TcArgs,
tc_source: &mut PusTcSource,
tc_receiver: &mut Receiver<StoreAddr>,
pus_receiver: &mut PusReceiver,
scheduler: Arc<Mutex<PusScheduler>>,
) {
let releaser = |enabled: bool, addr: &StoreAddr| {
tc_source.tc_source.send(*addr);
true
};
let mut scheduler = scheduler.lock().expect("Lock of scheduler failed");
match tc_source.tc_store.pool.write() {
Ok(mut pool) => match scheduler.release_telecommands(releaser, pool.as_mut()) {
Ok(_) => {}
Err(_) => {}
},
Err(_) => {}
}
while poll_tc_server(udp_tmtc_server) {}
match tc_args.tc_receiver.try_recv() {
match tc_receiver.try_recv() {
Ok(addr) => {
let pool = tc_args
.tc_source
let pool = tc_source
.tc_store
.pool
.read()
@ -241,9 +280,6 @@ fn poll_tc_server(udp_tmtc_server: &mut UdpTmtcServer) -> bool {
}
}
fn poll_tc_scheduler(scheduler: &mut PusScheduler) {
match scheduler.release_telecommands()
}
fn core_tm_handling(udp_tmtc_server: &mut UdpTmtcServer, recv_addr: &SocketAddr) {
while let Ok(addr) = udp_tmtc_server.tm_rx.try_recv() {