Merge remote-tracking branch 'origin/main' into TargetIdWithApid
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit

This commit is contained in:
2024-01-29 23:38:32 +01:00
25 changed files with 596 additions and 318 deletions

View File

@ -3,6 +3,7 @@ use satrs_core::spacepackets::{CcsdsPacket, SpHeader};
use satrs_core::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc};
use satrs_example::PUS_APID;
#[derive(Clone)]
pub struct CcsdsReceiver {
pub tc_source: PusTcSource,
}

View File

@ -3,12 +3,16 @@ mod hk;
mod logging;
mod pus;
mod requests;
mod tcp;
mod tmtc;
//mod can;
//mod can_ids;
mod udp;
use log::{info, warn};
use satrs_core::hal::std::tcp_server::ServerConfig;
use satrs_core::hal::std::udp_server::UdpTcServer;
use crate::ccsds::CcsdsReceiver;
use crate::hk::AcsHkIds;
use crate::hk::{AcsHkIds, HkUniqueId};
use crate::logging::setup_logger;
use crate::pus::action::{Pus8Wrapper, PusService8ActionHandler};
@ -16,9 +20,11 @@ use crate::pus::event::Pus5Wrapper;
use crate::pus::hk::{Pus3Wrapper, PusService3HkHandler};
use crate::pus::scheduler::Pus11Wrapper;
use crate::pus::test::Service17CustomWrapper;
use crate::pus::PusTcMpscRouter;
use crate::pus::{PusReceiver, PusTcMpscRouter};
use crate::requests::{Request, RequestWithToken};
use crate::tmtc::{core_tmtc_task, PusTcSource, TcArgs, TcStore, TmArgs, TmFunnel};
use crate::tcp::{SyncTcpTmSource, TcpTask};
use crate::tmtc::{PusTcSource, TcArgs, TcStore, TmArgs, TmFunnel, TmtcTask};
use crate::udp::UdpTmtcServer;
use satrs_core::event_man::{
EventManagerWithMpscQueue, MpscEventReceiver, MpscEventU32SendProvider, SendEventProvider,
};
@ -143,7 +149,7 @@ fn main() {
let tm_args = TmArgs {
tm_store: shared_tm_store.clone(),
tm_sink_sender: tm_funnel_tx.clone(),
tm_server_rx,
tm_udp_server_rx: tm_server_rx,
};
let aocs_tm_funnel = tm_funnel_tx.clone();
@ -270,11 +276,50 @@ fn main() {
);
let mut pus_3_wrapper = Pus3Wrapper { pus_3_handler };
info!("Starting TMTC task");
let jh0 = thread::Builder::new()
.name("TMTC".to_string())
let ccsds_receiver = CcsdsReceiver {
tc_source: tc_args.tc_source.clone(),
};
let mut tmtc_task = TmtcTask::new(tc_args, PusReceiver::new(verif_reporter, pus_router));
let udp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver.clone()));
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
.expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server,
tm_rx: tm_args.tm_udp_server_rx,
tm_store: tm_args.tm_store.clone_backing_pool(),
};
info!("Starting TMTC and UDP task");
let jh_udp_tmtc = thread::Builder::new()
.name("TMTC and UDP".to_string())
.spawn(move || {
core_tmtc_task(sock_addr, tc_args, tm_args, verif_reporter, pus_router);
info!("Running UDP server on port {SERVER_PORT}");
loop {
udp_tmtc_server.periodic_operation();
tmtc_task.periodic_operation();
thread::sleep(Duration::from_millis(400));
}
})
.unwrap();
let tcp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver));
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
let mut sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new(
tcp_server_cfg,
sync_tm_tcp_source.clone(),
tcp_ccsds_distributor,
)
.expect("tcp server creation failed");
info!("Starting TCP task");
let jh_tcp = thread::Builder::new()
.name("TCP".to_string())
.spawn(move || {
info!("Running TCP server on port {SERVER_PORT}");
loop {
tcp_server.periodic_operation();
}
})
.unwrap();
@ -315,6 +360,7 @@ fn main() {
.tm_server_tx
.send(addr)
.expect("Sending TM to server failed");
sync_tm_tcp_source.add_tm(tm_raw);
}
}
})
@ -386,6 +432,7 @@ fn main() {
let mut timestamp: [u8; 7] = [0; 7];
let mut time_provider = TimeProvider::new_with_u16_days(0, 0);
loop {
// TODO: Move this into a separate function/task/module..
match acs_thread_rx.try_recv() {
Ok(request) => {
info!(
@ -488,7 +535,12 @@ fn main() {
thread::sleep(Duration::from_millis(200));
})
.unwrap();
jh0.join().expect("Joining UDP TMTC server thread failed");
jh_udp_tmtc
.join()
.expect("Joining UDP TMTC server thread failed");
jh_tcp
.join()
.expect("Joining TCP TMTC server thread failed");
jh1.join().expect("Joining TM Funnel thread failed");
jh2.join().expect("Joining Event Manager thread failed");
jh3.join().expect("Joining AOCS thread failed");

115
satrs-example/src/tcp.rs Normal file
View File

@ -0,0 +1,115 @@
use std::{
collections::VecDeque,
sync::{Arc, Mutex},
};
use log::{info, warn};
use satrs_core::{
hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer},
spacepackets::PacketId,
tmtc::{CcsdsDistributor, CcsdsError, TmPacketSourceCore},
};
use satrs_example::PUS_APID;
use crate::tmtc::MpscStoreAndSendError;
pub const PACKET_ID_LOOKUP: &[PacketId] = &[PacketId::const_tc(true, PUS_APID)];
#[derive(Default, Clone)]
pub struct SyncTcpTmSource {
tm_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
max_packets_stored: usize,
pub silent_packet_overwrite: bool,
}
impl SyncTcpTmSource {
pub fn new(max_packets_stored: usize) -> Self {
Self {
tm_queue: Arc::default(),
max_packets_stored,
silent_packet_overwrite: true,
}
}
pub fn add_tm(&mut self, tm: &[u8]) {
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failec");
if tm_queue.len() > self.max_packets_stored {
if !self.silent_packet_overwrite {
warn!("TPC TM source is full, deleting oldest packet");
}
tm_queue.pop_front();
}
tm_queue.push_back(tm.to_vec());
}
}
impl TmPacketSourceCore for SyncTcpTmSource {
type Error = ();
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failed");
if !tm_queue.is_empty() {
let next_vec = tm_queue.front().unwrap();
if buffer.len() < next_vec.len() {
panic!(
"provided buffer too small, must be at least {} bytes",
next_vec.len()
);
}
let next_vec = tm_queue.pop_front().unwrap();
buffer[0..next_vec.len()].copy_from_slice(&next_vec);
if next_vec.len() > 9 {
let service = next_vec[7];
let subservice = next_vec[8];
info!("Sending PUS TM[{service},{subservice}]")
} else {
info!("Sending PUS TM");
}
return Ok(next_vec.len());
}
Ok(0)
}
}
pub struct TcpTask {
server: TcpSpacepacketsServer<
(),
CcsdsError<MpscStoreAndSendError>,
SyncTcpTmSource,
CcsdsDistributor<MpscStoreAndSendError>,
>,
}
impl TcpTask {
pub fn new(
cfg: ServerConfig,
tm_source: SyncTcpTmSource,
tc_receiver: CcsdsDistributor<MpscStoreAndSendError>,
) -> Result<Self, std::io::Error> {
Ok(Self {
server: TcpSpacepacketsServer::new(
cfg,
tm_source,
tc_receiver,
Box::new(PACKET_ID_LOOKUP),
)?,
})
}
pub fn periodic_operation(&mut self) {
loop {
let result = self.server.handle_next_connection();
match result {
Ok(conn_result) => {
info!(
"Served {} TMs and {} TCs for client {:?}",
conn_result.num_sent_tms, conn_result.num_received_tcs, conn_result.addr
);
}
Err(e) => {
warn!("TCP server error: {e:?}");
}
}
}
}
}

View File

@ -1,26 +1,21 @@
use log::{info, warn};
use satrs_core::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use std::net::SocketAddr;
use log::warn;
use satrs_core::pus::ReceivesEcssPusTc;
use satrs_core::spacepackets::SpHeader;
use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError};
use std::thread;
use std::time::Duration;
use thiserror::Error;
use crate::ccsds::CcsdsReceiver;
use crate::pus::{PusReceiver, PusTcMpscRouter};
use crate::pus::PusReceiver;
use satrs_core::pool::{SharedPool, StoreAddr, StoreError};
use satrs_core::pus::verification::StdVerifReporterWithSender;
use satrs_core::pus::{ReceivesEcssPusTc, TcAddrWithToken};
use satrs_core::pus::TcAddrWithToken;
use satrs_core::spacepackets::ecss::tc::PusTcReader;
use satrs_core::spacepackets::ecss::PusPacket;
use satrs_core::spacepackets::SpHeader;
use satrs_core::tmtc::tm_helper::SharedTmStore;
use satrs_core::tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc};
use satrs_core::tmtc::ReceivesCcsdsTc;
pub struct TmArgs {
pub tm_store: SharedTmStore,
pub tm_sink_sender: Sender<StoreAddr>,
pub tm_server_rx: Receiver<StoreAddr>,
pub tm_udp_server_rx: Receiver<StoreAddr>,
}
pub struct TcArgs {
@ -64,12 +59,6 @@ pub struct TmFunnel {
pub tm_server_tx: Sender<StoreAddr>,
}
pub struct UdpTmtcServer {
udp_tc_server: UdpTcServer<CcsdsError<MpscStoreAndSendError>>,
tm_rx: Receiver<StoreAddr>,
tm_store: SharedPool,
}
#[derive(Clone)]
pub struct PusTcSource {
pub tc_source: Sender<StoreAddr>,
@ -98,131 +87,60 @@ impl ReceivesCcsdsTc for PusTcSource {
}
}
pub fn core_tmtc_task(
socket_addr: SocketAddr,
mut tc_args: TcArgs,
tm_args: TmArgs,
verif_reporter: StdVerifReporterWithSender,
pus_router: PusTcMpscRouter,
) {
let mut pus_receiver = PusReceiver::new(verif_reporter, pus_router);
let ccsds_receiver = CcsdsReceiver {
tc_source: tc_args.tc_source.clone(),
};
let ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver));
let udp_tc_server = UdpTcServer::new(socket_addr, 2048, Box::new(ccsds_distributor))
.expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server,
tm_rx: tm_args.tm_server_rx,
tm_store: tm_args.tm_store.clone_backing_pool(),
};
let mut tc_buf: [u8; 4096] = [0; 4096];
loop {
core_tmtc_loop(
&mut udp_tmtc_server,
&mut tc_args,
&mut tc_buf,
&mut pus_receiver,
);
thread::sleep(Duration::from_millis(400));
}
pub struct TmtcTask {
tc_args: TcArgs,
tc_buf: [u8; 4096],
pus_receiver: PusReceiver,
}
fn core_tmtc_loop(
udp_tmtc_server: &mut UdpTmtcServer,
tc_args: &mut TcArgs,
tc_buf: &mut [u8],
pus_receiver: &mut PusReceiver,
) {
while poll_tc_server(udp_tmtc_server) {}
match tc_args.tc_receiver.try_recv() {
Ok(addr) => {
let pool = tc_args
.tc_source
.tc_store
.pool
.read()
.expect("locking tc pool failed");
let data = pool.read(&addr).expect("reading pool failed");
tc_buf[0..data.len()].copy_from_slice(data);
drop(pool);
match PusTcReader::new(tc_buf) {
Ok((pus_tc, _)) => {
pus_receiver
.handle_tc_packet(addr, pus_tc.service(), &pus_tc)
.ok();
}
Err(e) => {
warn!("error creating PUS TC from raw data: {e}");
warn!("raw data: {tc_buf:x?}");
}
}
}
Err(e) => {
if let TryRecvError::Disconnected = e {
warn!("tmtc thread: sender disconnected")
}
impl TmtcTask {
pub fn new(tc_args: TcArgs, pus_receiver: PusReceiver) -> Self {
Self {
tc_args,
tc_buf: [0; 4096],
pus_receiver,
}
}
if let Some(recv_addr) = udp_tmtc_server.udp_tc_server.last_sender() {
core_tm_handling(udp_tmtc_server, &recv_addr);
}
}
fn poll_tc_server(udp_tmtc_server: &mut UdpTmtcServer) -> bool {
match udp_tmtc_server.udp_tc_server.try_recv_tc() {
Ok(_) => true,
Err(e) => match e {
ReceiveResult::ReceiverError(e) => match e {
CcsdsError::ByteConversionError(e) => {
warn!("packet error: {e:?}");
true
pub fn periodic_operation(&mut self) {
//while self.poll_tc() {}
self.poll_tc();
}
pub fn poll_tc(&mut self) -> bool {
match self.tc_args.tc_receiver.try_recv() {
Ok(addr) => {
let pool = self
.tc_args
.tc_source
.tc_store
.pool
.read()
.expect("locking tc pool failed");
let data = pool.read(&addr).expect("reading pool failed");
self.tc_buf[0..data.len()].copy_from_slice(data);
drop(pool);
match PusTcReader::new(&self.tc_buf) {
Ok((pus_tc, _)) => {
self.pus_receiver
.handle_tc_packet(addr, pus_tc.service(), &pus_tc)
.ok();
true
}
Err(e) => {
warn!("error creating PUS TC from raw data: {e}");
warn!("raw data: {:x?}", self.tc_buf);
true
}
}
CcsdsError::CustomError(e) => {
warn!("mpsc store and send error {e:?}");
true
}
Err(e) => match e {
TryRecvError::Empty => false,
TryRecvError::Disconnected => {
warn!("tmtc thread: sender disconnected");
false
}
},
ReceiveResult::IoError(e) => {
warn!("IO error {e}");
false
}
ReceiveResult::NothingReceived => false,
},
}
}
fn core_tm_handling(udp_tmtc_server: &mut UdpTmtcServer, recv_addr: &SocketAddr) {
while let Ok(addr) = udp_tmtc_server.tm_rx.try_recv() {
let store_lock = udp_tmtc_server.tm_store.write();
if store_lock.is_err() {
warn!("Locking TM store failed");
continue;
}
let mut store_lock = store_lock.unwrap();
let pg = store_lock.read_with_guard(addr);
let read_res = pg.read();
if read_res.is_err() {
warn!("Error reading TM pool data");
continue;
}
let buf = read_res.unwrap();
if buf.len() > 9 {
let service = buf[7];
let subservice = buf[8];
info!("Sending PUS TM[{service},{subservice}]")
} else {
info!("Sending PUS TM");
}
let result = udp_tmtc_server.udp_tc_server.socket.send_to(buf, recv_addr);
if let Err(e) = result {
warn!("Sending TM with UDP socket failed: {e}")
}
}
}

76
satrs-example/src/udp.rs Normal file
View File

@ -0,0 +1,76 @@
use std::{net::SocketAddr, sync::mpsc::Receiver};
use log::{info, warn};
use satrs_core::{
hal::std::udp_server::{ReceiveResult, UdpTcServer},
pool::{SharedPool, StoreAddr},
tmtc::CcsdsError,
};
use crate::tmtc::MpscStoreAndSendError;
pub struct UdpTmtcServer {
pub udp_tc_server: UdpTcServer<CcsdsError<MpscStoreAndSendError>>,
pub tm_rx: Receiver<StoreAddr>,
pub tm_store: SharedPool,
}
impl UdpTmtcServer {
pub fn periodic_operation(&mut self) {
while self.poll_tc_server() {}
if let Some(recv_addr) = self.udp_tc_server.last_sender() {
self.send_tm_to_udp_client(&recv_addr);
}
}
fn poll_tc_server(&mut self) -> bool {
match self.udp_tc_server.try_recv_tc() {
Ok(_) => true,
Err(e) => match e {
ReceiveResult::ReceiverError(e) => match e {
CcsdsError::ByteConversionError(e) => {
warn!("packet error: {e:?}");
true
}
CcsdsError::CustomError(e) => {
warn!("mpsc store and send error {e:?}");
true
}
},
ReceiveResult::IoError(e) => {
warn!("IO error {e}");
false
}
ReceiveResult::NothingReceived => false,
},
}
}
fn send_tm_to_udp_client(&mut self, recv_addr: &SocketAddr) {
while let Ok(addr) = self.tm_rx.try_recv() {
let store_lock = self.tm_store.write();
if store_lock.is_err() {
warn!("Locking TM store failed");
continue;
}
let mut store_lock = store_lock.unwrap();
let pg = store_lock.read_with_guard(addr);
let read_res = pg.read();
if read_res.is_err() {
warn!("Error reading TM pool data");
continue;
}
let buf = read_res.unwrap();
if buf.len() > 9 {
let service = buf[7];
let subservice = buf[8];
info!("Sending PUS TM[{service},{subservice}]")
} else {
info!("Sending PUS TM");
}
let result = self.udp_tc_server.socket.send_to(buf, recv_addr);
if let Err(e) = result {
warn!("Sending TM with UDP socket failed: {e}")
}
}
}
}