More re-working
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit

This commit is contained in:
Robin Müller 2024-04-15 11:11:34 +02:00
parent 6c7f9d10fb
commit 2292b1118a
Signed by: muellerr
GPG Key ID: A649FB78196E3849
31 changed files with 715 additions and 1074 deletions

View File

@ -132,6 +132,7 @@ pub mod components {
GenericPus = 2, GenericPus = 2,
Acs = 3, Acs = 3,
Cfdp = 4, Cfdp = 4,
Tmtc = 5,
} }
// Component IDs for components with the PUS APID. // Component IDs for components with the PUS APID.
@ -150,6 +151,12 @@ pub mod components {
Mgm0 = 0, Mgm0 = 0,
} }
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum TmtcId {
UdpServer = 0,
TcpServer = 1,
}
pub const PUS_ACTION_SERVICE: UniqueApidTargetId = pub const PUS_ACTION_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32); UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32);
pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId = pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId =
@ -166,6 +173,10 @@ pub mod components {
UniqueApidTargetId::new(Apid::Sched as u16, 0); UniqueApidTargetId::new(Apid::Sched as u16, 0);
pub const MGM_HANDLER_0: UniqueApidTargetId = pub const MGM_HANDLER_0: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32); UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32);
pub const UDP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::UdpServer as u32);
pub const TCP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::TcpServer as u32);
} }
pub mod pool { pub mod pool {

View File

@ -9,7 +9,7 @@ use log::{info, warn};
use satrs::{ use satrs::{
hal::std::tcp_server::{HandledConnectionHandler, ServerConfig, TcpSpacepacketsServer}, hal::std::tcp_server::{HandledConnectionHandler, ServerConfig, TcpSpacepacketsServer},
spacepackets::PacketId, spacepackets::PacketId,
tmtc::{PacketSenderRaw, TmPacketSource}, tmtc::{PacketSenderRaw, PacketSource},
}; };
#[derive(Default)] #[derive(Default)]
@ -52,7 +52,7 @@ impl SyncTcpTmSource {
} }
} }
impl TmPacketSource for SyncTcpTmSource { impl PacketSource for SyncTcpTmSource {
type Error = (); type Error = ();
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> { fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {

View File

@ -3,7 +3,7 @@ use std::net::{SocketAddr, UdpSocket};
use std::sync::mpsc; use std::sync::mpsc;
use log::{info, warn}; use log::{info, warn};
use satrs::pus::{PusTmAsVec, PusTmInPool}; use satrs::pus::{PacketAsVec, PacketInPool};
use satrs::tmtc::PacketSenderRaw; use satrs::tmtc::PacketSenderRaw;
use satrs::{ use satrs::{
hal::std::udp_server::{ReceiveResult, UdpTcServer}, hal::std::udp_server::{ReceiveResult, UdpTcServer},
@ -17,7 +17,7 @@ pub trait UdpTmHandler {
} }
pub struct StaticUdpTmHandler { pub struct StaticUdpTmHandler {
pub tm_rx: mpsc::Receiver<PusTmInPool>, pub tm_rx: mpsc::Receiver<PacketInPool>,
pub tm_store: SharedStaticMemoryPool, pub tm_store: SharedStaticMemoryPool,
} }
@ -46,7 +46,7 @@ impl UdpTmHandler for StaticUdpTmHandler {
} }
pub struct DynamicUdpTmHandler { pub struct DynamicUdpTmHandler {
pub tm_rx: mpsc::Receiver<PusTmAsVec>, pub tm_rx: mpsc::Receiver<PacketAsVec>,
} }
impl UdpTmHandler for DynamicUdpTmHandler { impl UdpTmHandler for DynamicUdpTmHandler {
@ -128,21 +128,25 @@ mod tests {
SpHeader, SpHeader,
}, },
tmtc::PacketSenderRaw, tmtc::PacketSenderRaw,
ComponentId,
}; };
use satrs_example::config::{components, OBSW_SERVER_ADDR}; use satrs_example::config::{components, OBSW_SERVER_ADDR};
use super::*; use super::*;
const UDP_SERVER_ID: ComponentId = 0x05;
#[derive(Default, Debug)] #[derive(Default, Debug)]
pub struct TestSender { pub struct TestSender {
tc_vec: RefCell<VecDeque<Vec<u8>>>, tc_vec: RefCell<VecDeque<PacketAsVec>>,
} }
impl PacketSenderRaw for TestSender { impl PacketSenderRaw for TestSender {
type Error = (); type Error = ();
fn send_raw_tc(&self, tc_raw: &[u8]) -> Result<(), Self::Error> {
fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
let mut mut_queue = self.tc_vec.borrow_mut(); let mut mut_queue = self.tc_vec.borrow_mut();
mut_queue.push_back(tc_raw.to_vec()); mut_queue.push_back(PacketAsVec::new(sender_id, tc_raw.to_vec()));
Ok(()) Ok(())
} }
} }
@ -163,7 +167,8 @@ mod tests {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
let test_receiver = TestSender::default(); let test_receiver = TestSender::default();
// let tc_queue = test_receiver.tc_vec.clone(); // let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, test_receiver).unwrap(); let udp_tc_server =
UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, test_receiver).unwrap();
let tm_handler = TestTmHandler::default(); let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone(); let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
let mut udp_dyn_server = UdpTmtcServer { let mut udp_dyn_server = UdpTmtcServer {
@ -181,7 +186,8 @@ mod tests {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
let test_receiver = TestSender::default(); let test_receiver = TestSender::default();
// let tc_queue = test_receiver.tc_vec.clone(); // let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, test_receiver).unwrap(); let udp_tc_server =
UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, test_receiver).unwrap();
let server_addr = udp_tc_server.socket.local_addr().unwrap(); let server_addr = udp_tc_server.socket.local_addr().unwrap();
let tm_handler = TestTmHandler::default(); let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone(); let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
@ -201,7 +207,9 @@ mod tests {
{ {
let mut queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow_mut(); let mut queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow_mut();
assert!(!queue.is_empty()); assert!(!queue.is_empty());
assert_eq!(queue.pop_front().unwrap(), ping_tc); let packet_with_sender = queue.pop_front().unwrap();
assert_eq!(packet_with_sender.packet, ping_tc);
assert_eq!(packet_with_sender.sender_id, UDP_SERVER_ID);
} }
{ {

View File

@ -17,7 +17,7 @@ use pus::test::create_test_service_dynamic;
use satrs::hal::std::tcp_server::ServerConfig; use satrs::hal::std::tcp_server::ServerConfig;
use satrs::hal::std::udp_server::UdpTcServer; use satrs::hal::std::udp_server::UdpTcServer;
use satrs::request::GenericMessage; use satrs::request::GenericMessage;
use satrs::tmtc::tc_helper::{PacketSenderSharedPool, SharedPacketPool}; use satrs::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools}; use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools};
use satrs_example::config::tasks::{ use satrs_example::config::tasks::{
FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC, FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC,
@ -38,9 +38,8 @@ use crate::pus::{PusTcDistributor, PusTcMpscRouter};
use crate::requests::{CompositeRequest, GenericRequestRouter}; use crate::requests::{CompositeRequest, GenericRequestRouter};
use satrs::mode::ModeRequest; use satrs::mode::ModeRequest;
use satrs::pus::event_man::EventRequestWithToken; use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::TmInSharedPoolSender;
use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter}; use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter};
use satrs_example::config::components::MGM_HANDLER_0; use satrs_example::config::components::{MGM_HANDLER_0, TCP_SERVER, UDP_SERVER};
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::sync::mpsc; use std::sync::mpsc;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@ -50,14 +49,16 @@ use std::time::Duration;
#[allow(dead_code)] #[allow(dead_code)]
fn static_tmtc_pool_main() { fn static_tmtc_pool_main() {
let (tm_pool, tc_pool) = create_static_pools(); let (tm_pool, tc_pool) = create_static_pools();
let shared_tm_pool = (Arc::new(RwLock::new(tm_pool))); let shared_tm_pool = Arc::new(RwLock::new(tm_pool));
let shared_tc_pool = (Arc::new(RwLock::new(tc_pool))); let shared_tc_pool = Arc::new(RwLock::new(tc_pool));
let shared_tm_pool_wrapper = SharedPacketPool::new(&shared_tm_pool);
let shared_tc_pool_wrapper = SharedPacketPool::new(&shared_tc_pool);
let (tc_source_tx, tc_source_rx) = mpsc::sync_channel(50); let (tc_source_tx, tc_source_rx) = mpsc::sync_channel(50);
let (tm_funnel_tx, tm_funnel_rx) = mpsc::sync_channel(50); let (tm_funnel_tx, tm_funnel_rx) = mpsc::sync_channel(50);
let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50); let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50);
let tm_funnel_tx_sender = let tm_funnel_tx_sender =
TmInSharedPoolSender::new(shared_tm_pool.clone(), tm_funnel_tx.clone()); PacketSenderWithSharedPool::new(tm_funnel_tx.clone(), shared_tm_pool_wrapper.clone());
let (mgm_handler_composite_tx, mgm_handler_composite_rx) = let (mgm_handler_composite_tx, mgm_handler_composite_rx) =
mpsc::channel::<GenericMessage<CompositeRequest>>(); mpsc::channel::<GenericMessage<CompositeRequest>>();
@ -74,7 +75,7 @@ fn static_tmtc_pool_main() {
// This helper structure is used by all telecommand providers which need to send telecommands // This helper structure is used by all telecommand providers which need to send telecommands
// to the TC source. // to the TC source.
let tc_source = PacketSenderSharedPool::new(tc_source_tx, shared_tc_pool.clone()); let tc_source = PacketSenderWithSharedPool::new(tc_source_tx, shared_tc_pool_wrapper.clone());
// Create event handling components // Create event handling components
// These sender handles are used to send event requests, for example to enable or disable // These sender handles are used to send event requests, for example to enable or disable
@ -106,7 +107,7 @@ fn static_tmtc_pool_main() {
}; };
let pus_test_service = create_test_service_static( let pus_test_service = create_test_service_static(
tm_funnel_tx_sender.clone(), tm_funnel_tx_sender.clone(),
shared_tc_pool.0.clone(), shared_tc_pool.clone(),
event_handler.clone_event_sender(), event_handler.clone_event_sender(),
pus_test_rx, pus_test_rx,
); );
@ -118,27 +119,27 @@ fn static_tmtc_pool_main() {
); );
let pus_event_service = create_event_service_static( let pus_event_service = create_event_service_static(
tm_funnel_tx_sender.clone(), tm_funnel_tx_sender.clone(),
shared_tc_pool.0.clone(), shared_tc_pool.clone(),
pus_event_rx, pus_event_rx,
event_request_tx, event_request_tx,
); );
let pus_action_service = create_action_service_static( let pus_action_service = create_action_service_static(
tm_funnel_tx_sender.clone(), tm_funnel_tx_sender.clone(),
shared_tc_pool.0.clone(), shared_tc_pool.clone(),
pus_action_rx, pus_action_rx,
request_map.clone(), request_map.clone(),
pus_action_reply_rx, pus_action_reply_rx,
); );
let pus_hk_service = create_hk_service_static( let pus_hk_service = create_hk_service_static(
tm_funnel_tx_sender.clone(), tm_funnel_tx_sender.clone(),
shared_tc_pool.0.clone(), shared_tc_pool.clone(),
pus_hk_rx, pus_hk_rx,
request_map.clone(), request_map.clone(),
pus_hk_reply_rx, pus_hk_reply_rx,
); );
let pus_mode_service = create_mode_service_static( let pus_mode_service = create_mode_service_static(
tm_funnel_tx_sender.clone(), tm_funnel_tx_sender.clone(),
shared_tc_pool.0.clone(), shared_tc_pool.clone(),
pus_mode_rx, pus_mode_rx,
request_map, request_map,
pus_mode_reply_rx, pus_mode_reply_rx,
@ -153,23 +154,29 @@ fn static_tmtc_pool_main() {
); );
let mut tmtc_task = TcSourceTaskStatic::new( let mut tmtc_task = TcSourceTaskStatic::new(
shared_tc_pool.clone(), shared_tc_pool_wrapper.clone(),
tc_source_rx, tc_source_rx,
PusTcDistributor::new(tm_funnel_tx_sender, pus_router), PusTcDistributor::new(tm_funnel_tx_sender, pus_router),
); );
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, tc_source.clone()) let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source.clone())
.expect("creating UDP TMTC server failed"); .expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer { let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server, udp_tc_server,
tm_handler: StaticUdpTmHandler { tm_handler: StaticUdpTmHandler {
tm_rx: tm_server_rx, tm_rx: tm_server_rx,
tm_store: shared_tm_pool.clone_backing_pool(), tm_store: shared_tm_pool.clone(),
}, },
}; };
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192); let tcp_server_cfg = ServerConfig::new(
TCP_SERVER.id(),
sock_addr,
Duration::from_millis(400),
4096,
8192,
);
let sync_tm_tcp_source = SyncTcpTmSource::new(200); let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new( let mut tcp_server = TcpTask::new(
tcp_server_cfg, tcp_server_cfg,
@ -180,7 +187,7 @@ fn static_tmtc_pool_main() {
.expect("tcp server creation failed"); .expect("tcp server creation failed");
let mut tm_funnel = TmFunnelStatic::new( let mut tm_funnel = TmFunnelStatic::new(
shared_tm_pool, shared_tm_pool_wrapper,
sync_tm_tcp_source, sync_tm_tcp_source,
tm_funnel_rx, tm_funnel_rx,
tm_server_tx, tm_server_tx,
@ -379,7 +386,7 @@ fn dyn_tmtc_pool_main() {
); );
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, tc_source_tx.clone()) let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source_tx.clone())
.expect("creating UDP TMTC server failed"); .expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer { let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server, udp_tc_server,
@ -388,7 +395,13 @@ fn dyn_tmtc_pool_main() {
}, },
}; };
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192); let tcp_server_cfg = ServerConfig::new(
TCP_SERVER.id(),
sock_addr,
Duration::from_millis(400),
4096,
8192,
);
let sync_tm_tcp_source = SyncTcpTmSource::new(200); let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new( let mut tcp_server = TcpTask::new(
tcp_server_cfg, tcp_server_cfg,

View File

@ -12,12 +12,13 @@ use satrs::pus::verification::{
use satrs::pus::{ use satrs::pus::{
ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver, EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver,
MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler, MpscTmAsVecSender, PacketAsVec, PusPacketHandlerResult, PusReplyHandler, PusServiceHelper,
PusServiceHelper, PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, PusTcToRequestConverter,
}; };
use satrs::request::{GenericMessage, UniqueApidTargetId}; use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket}; use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket};
use satrs::tmtc::PacketSenderWithSharedPool;
use satrs_example::config::components::PUS_ACTION_SERVICE; use satrs_example::config::components::PUS_ACTION_SERVICE;
use satrs_example::config::tmtc_err; use satrs_example::config::tmtc_err;
use std::sync::mpsc; use std::sync::mpsc;
@ -195,12 +196,12 @@ impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for Actio
} }
pub fn create_action_service_static( pub fn create_action_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter, action_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>, reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
) -> ActionServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> ActionServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let action_request_handler = PusTargetedRequestService::new( let action_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.id(),
@ -223,7 +224,7 @@ pub fn create_action_service_static(
} }
pub fn create_action_service_dynamic( pub fn create_action_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter, action_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>, reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,

View File

@ -8,19 +8,20 @@ use satrs::pus::event_srv::PusEventServiceHandler;
use satrs::pus::verification::VerificationReporter; use satrs::pus::verification::VerificationReporter;
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, PacketAsVec, PusPacketHandlerResult,
PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, PusServiceHelper,
}; };
use satrs::tmtc::PacketSenderWithSharedPool;
use satrs_example::config::components::PUS_EVENT_MANAGEMENT; use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
use super::HandlingStatus; use super::HandlingStatus;
pub fn create_event_service_static( pub fn create_event_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>, pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>, event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> EventServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> EventServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let pus_5_handler = PusEventServiceHandler::new( let pus_5_handler = PusEventServiceHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.id(),
@ -37,7 +38,7 @@ pub fn create_event_service_static(
} }
pub fn create_event_service_dynamic( pub fn create_event_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>, pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>, event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> EventServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> EventServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {

View File

@ -9,13 +9,13 @@ use satrs::pus::verification::{
use satrs::pus::{ use satrs::pus::{
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken, ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender,
EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender, EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender, PacketAsVec,
MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler, PusServiceHelper, PusPacketHandlerResult, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
}; };
use satrs::request::{GenericMessage, UniqueApidTargetId}; use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{hk, PusPacket}; use satrs::spacepackets::ecss::{hk, PusPacket};
use satrs::tmtc::PacketSenderWithSharedPool;
use satrs_example::config::components::PUS_HK_SERVICE; use satrs_example::config::components::PUS_HK_SERVICE;
use satrs_example::config::{hk_err, tmtc_err}; use satrs_example::config::{hk_err, tmtc_err};
use std::sync::mpsc; use std::sync::mpsc;
@ -232,12 +232,12 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
} }
pub fn create_hk_service_static( pub fn create_hk_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>, pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter, request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>, reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
) -> HkServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> HkServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let pus_3_handler = PusTargetedRequestService::new( let pus_3_handler = PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_HK_SERVICE.id(), PUS_HK_SERVICE.id(),
@ -258,7 +258,7 @@ pub fn create_hk_service_static(
} }
pub fn create_hk_service_dynamic( pub fn create_hk_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>, pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter, request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>, reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,

View File

@ -6,7 +6,7 @@ use satrs::pus::verification::{
}; };
use satrs::pus::{ use satrs::pus::{
ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter,
EcssTcReceiverCore, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError, EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError,
PusPacketHandlerResult, PusPacketHandlingError, PusReplyHandler, PusRequestRouter, PusPacketHandlerResult, PusPacketHandlingError, PusReplyHandler, PusRequestRouter,
PusServiceHelper, PusTcToRequestConverter, TcInMemory, PusServiceHelper, PusTcToRequestConverter, TcInMemory,
}; };
@ -185,7 +185,7 @@ pub trait TargetedPusService {
/// 2. [Self::poll_and_check_next_reply] which tries to poll and handle one reply, covering step 6. /// 2. [Self::poll_and_check_next_reply] which tries to poll and handle one reply, covering step 6.
/// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7. /// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7.
pub struct PusTargetedRequestService< pub struct PusTargetedRequestService<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -207,7 +207,7 @@ pub struct PusTargetedRequestService<
} }
impl< impl<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -461,7 +461,7 @@ pub(crate) mod tests {
use std::time::Duration; use std::time::Duration;
use satrs::pus::test_util::TEST_COMPONENT_ID_0; use satrs::pus::test_util::TEST_COMPONENT_ID_0;
use satrs::pus::{MpscTmAsVecSender, PusTmAsVec, PusTmVariant}; use satrs::pus::{MpscTmAsVecSender, PacketAsVec, PusTmVariant};
use satrs::request::RequestId; use satrs::request::RequestId;
use satrs::{ use satrs::{
pus::{ pus::{
@ -491,7 +491,7 @@ pub(crate) mod tests {
pub id: ComponentId, pub id: ComponentId,
pub verif_reporter: TestVerificationReporter, pub verif_reporter: TestVerificationReporter,
pub reply_handler: ReplyHandler, pub reply_handler: ReplyHandler,
pub tm_receiver: mpsc::Receiver<PusTmAsVec>, pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub default_timeout: Duration, pub default_timeout: Duration,
tm_sender: MpscTmAsVecSender, tm_sender: MpscTmAsVecSender,
phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>, phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>,
@ -698,7 +698,7 @@ pub(crate) mod tests {
ReplyType, ReplyType,
>, >,
pub request_id: Option<RequestId>, pub request_id: Option<RequestId>,
pub tm_funnel_rx: mpsc::Receiver<PusTmAsVec>, pub tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>, pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>,
pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>, pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>,
pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>, pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,

View File

@ -1,5 +1,6 @@
use derive_new::new; use derive_new::new;
use log::{error, warn}; use log::{error, warn};
use satrs::tmtc::PacketSenderWithSharedPool;
use std::sync::mpsc; use std::sync::mpsc;
use std::time::Duration; use std::time::Duration;
@ -8,8 +9,8 @@ use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::verification::VerificationReporter; use satrs::pus::verification::VerificationReporter;
use satrs::pus::{ use satrs::pus::{
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, PacketAsVec, PusPacketHandlerResult,
PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, PusServiceHelper,
}; };
use satrs::request::GenericMessage; use satrs::request::GenericMessage;
use satrs::{ use satrs::{
@ -203,12 +204,12 @@ impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestCo
} }
pub fn create_mode_service_static( pub fn create_mode_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter, mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>, reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> ModeServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let mode_request_handler = PusTargetedRequestService::new( let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.id(),
@ -229,7 +230,7 @@ pub fn create_mode_service_static(
} }
pub fn create_mode_service_dynamic( pub fn create_mode_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter, mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>, reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,

View File

@ -9,20 +9,27 @@ use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporter; use satrs::pus::verification::VerificationReporter;
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, PacketAsVec, PacketInPool,
PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, PusPacketHandlerResult, PusServiceHelper,
}; };
use satrs::tmtc::tc_helper::{PacketSenderSharedPool, SharedPacketPool}; use satrs::tmtc::PacketSenderWithSharedPool;
use satrs::ComponentId;
use satrs_example::config::components::PUS_SCHED_SERVICE; use satrs_example::config::components::PUS_SCHED_SERVICE;
use super::HandlingStatus; use super::HandlingStatus;
pub trait TcReleaser { pub trait TcReleaser {
fn release(&mut self, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool; fn release(&mut self, sender_id: ComponentId, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool;
} }
impl TcReleaser for PacketSenderSharedPool<SharedPacketPool> { impl TcReleaser for PacketSenderWithSharedPool {
fn release(&mut self, enabled: bool, _info: &TcInfo, tc: &[u8]) -> bool { fn release(
&mut self,
sender_id: ComponentId,
enabled: bool,
_info: &TcInfo,
tc: &[u8],
) -> bool {
if enabled { if enabled {
let shared_pool = self.shared_pool.get_mut(); let shared_pool = self.shared_pool.get_mut();
// Transfer TC from scheduler TC pool to shared TC pool. // Transfer TC from scheduler TC pool to shared TC pool.
@ -32,19 +39,25 @@ impl TcReleaser for PacketSenderSharedPool<SharedPacketPool> {
.expect("locking pool failed") .expect("locking pool failed")
.add(tc) .add(tc)
.expect("adding TC to shared pool failed"); .expect("adding TC to shared pool failed");
self.tc_source self.sender
.send(released_tc_addr) .send(PacketInPool::new(sender_id, released_tc_addr))
.expect("sending TC to TC source failed"); .expect("sending TC to TC source failed");
} }
true true
} }
} }
impl TcReleaser for mpsc::Sender<Vec<u8>> { impl TcReleaser for mpsc::Sender<PacketAsVec> {
fn release(&mut self, enabled: bool, _info: &TcInfo, tc: &[u8]) -> bool { fn release(
&mut self,
sender_id: ComponentId,
enabled: bool,
_info: &TcInfo,
tc: &[u8],
) -> bool {
if enabled { if enabled {
// Send released TC to centralized TC source. // Send released TC to centralized TC source.
self.send(tc.to_vec()) self.send(PacketAsVec::new(sender_id, tc.to_vec()))
.expect("sending TC to TC source failed"); .expect("sending TC to TC source failed");
} }
true true
@ -69,8 +82,9 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
SchedulingServiceWrapper<TmSender, TcInMemConverter> SchedulingServiceWrapper<TmSender, TcInMemConverter>
{ {
pub fn release_tcs(&mut self) { pub fn release_tcs(&mut self) {
let id = self.pus_11_handler.service_helper.id();
let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool { let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool {
self.tc_releaser.release(enabled, info, tc) self.tc_releaser.release(id, enabled, info, tc)
}; };
self.pus_11_handler self.pus_11_handler
@ -118,11 +132,11 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
} }
pub fn create_scheduler_service_static( pub fn create_scheduler_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_releaser: PacketSenderSharedPool<SharedPacketPool>, tc_releaser: PacketSenderWithSharedPool,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>, pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool, sched_tc_pool: StaticMemoryPool,
) -> SchedulingServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> SchedulingServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed"); .expect("Creating PUS Scheduler failed");
let pus_11_handler = PusSchedServiceHandler::new( let pus_11_handler = PusSchedServiceHandler::new(
@ -144,8 +158,8 @@ pub fn create_scheduler_service_static(
} }
pub fn create_scheduler_service_dynamic( pub fn create_scheduler_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
tc_source_sender: mpsc::Sender<Vec<u8>>, tc_source_sender: mpsc::Sender<PacketAsVec>,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>, pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool, sched_tc_pool: StaticMemoryPool,
) -> SchedulingServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> SchedulingServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {

View File

@ -7,13 +7,13 @@ use satrs::pus::verification::{FailParams, VerificationReporter, VerificationRep
use satrs::pus::EcssTcInSharedStoreConverter; use satrs::pus::EcssTcInSharedStoreConverter;
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver, EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusServiceHelper, MpscTmAsVecSender, PacketAsVec, PusPacketHandlerResult, PusServiceHelper,
PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
}; };
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket; use satrs::spacepackets::ecss::PusPacket;
use satrs::spacepackets::time::cds::CdsTime; use satrs::spacepackets::time::cds::CdsTime;
use satrs::spacepackets::time::TimeWriter; use satrs::spacepackets::time::TimeWriter;
use satrs::tmtc::PacketSenderWithSharedPool;
use satrs_example::config::components::PUS_TEST_SERVICE; use satrs_example::config::components::PUS_TEST_SERVICE;
use satrs_example::config::{tmtc_err, TEST_EVENT}; use satrs_example::config::{tmtc_err, TEST_EVENT};
use std::sync::mpsc; use std::sync::mpsc;
@ -21,11 +21,11 @@ use std::sync::mpsc;
use super::HandlingStatus; use super::HandlingStatus;
pub fn create_test_service_static( pub fn create_test_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
event_sender: mpsc::Sender<EventMessageU32>, event_sender: mpsc::Sender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>, pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> TestCustomServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> TestCustomServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.id(),
pus_test_rx, pus_test_rx,
@ -40,7 +40,7 @@ pub fn create_test_service_static(
} }
pub fn create_test_service_dynamic( pub fn create_test_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
event_sender: mpsc::Sender<EventMessageU32>, event_sender: mpsc::Sender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>, pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> TestCustomServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> TestCustomServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {

View File

@ -1,9 +1,12 @@
use satrs::{pool::PoolProvider, tmtc::tc_helper::SharedPacketPool}; use satrs::{
pool::PoolProvider,
pus::{PacketAsVec, PacketInPool},
tmtc::{PacketSenderWithSharedPool, SharedPacketPool},
};
use std::sync::mpsc::{self, TryRecvError}; use std::sync::mpsc::{self, TryRecvError};
use satrs::{ use satrs::{
pool::StoreAddr, pus::MpscTmAsVecSender,
pus::{MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded},
spacepackets::ecss::{tc::PusTcReader, PusPacket}, spacepackets::ecss::{tc::PusTcReader, PusPacket},
}; };
@ -12,16 +15,16 @@ use crate::pus::PusTcDistributor;
// TC source components where static pools are the backing memory of the received telecommands. // TC source components where static pools are the backing memory of the received telecommands.
pub struct TcSourceTaskStatic { pub struct TcSourceTaskStatic {
shared_tc_pool: SharedPacketPool, shared_tc_pool: SharedPacketPool,
tc_receiver: mpsc::Receiver<StoreAddr>, tc_receiver: mpsc::Receiver<PacketInPool>,
tc_buf: [u8; 4096], tc_buf: [u8; 4096],
pus_receiver: PusTcDistributor<MpscTmInSharedPoolSenderBounded>, pus_receiver: PusTcDistributor<PacketSenderWithSharedPool>,
} }
impl TcSourceTaskStatic { impl TcSourceTaskStatic {
pub fn new( pub fn new(
shared_tc_pool: SharedPacketPool, shared_tc_pool: SharedPacketPool,
tc_receiver: mpsc::Receiver<StoreAddr>, tc_receiver: mpsc::Receiver<PacketInPool>,
pus_receiver: PusTcDistributor<MpscTmInSharedPoolSenderBounded>, pus_receiver: PusTcDistributor<PacketSenderWithSharedPool>,
) -> Self { ) -> Self {
Self { Self {
shared_tc_pool, shared_tc_pool,
@ -37,20 +40,20 @@ impl TcSourceTaskStatic {
pub fn poll_tc(&mut self) -> bool { pub fn poll_tc(&mut self) -> bool {
match self.tc_receiver.try_recv() { match self.tc_receiver.try_recv() {
Ok(addr) => { Ok(packet_in_pool) => {
let pool = self let pool = self
.shared_tc_pool .shared_tc_pool
.0 .0
.read() .read()
.expect("locking tc pool failed"); .expect("locking tc pool failed");
pool.read(&addr, &mut self.tc_buf) pool.read(&packet_in_pool.store_addr, &mut self.tc_buf)
.expect("reading pool failed"); .expect("reading pool failed");
drop(pool); drop(pool);
match PusTcReader::new(&self.tc_buf) { match PusTcReader::new(&self.tc_buf) {
Ok((pus_tc, _)) => { Ok((pus_tc, _)) => {
self.pus_receiver self.pus_receiver
.handle_tc_packet( .handle_tc_packet(
satrs::pus::TcInMemory::StoreAddr(addr), satrs::pus::TcInMemory::StoreAddr(packet_in_pool.store_addr),
pus_tc.service(), pus_tc.service(),
&pus_tc, &pus_tc,
) )
@ -77,13 +80,13 @@ impl TcSourceTaskStatic {
// TC source components where the heap is the backing memory of the received telecommands. // TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic { pub struct TcSourceTaskDynamic {
pub tc_receiver: mpsc::Receiver<Vec<u8>>, pub tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_receiver: PusTcDistributor<MpscTmAsVecSender>, pus_receiver: PusTcDistributor<MpscTmAsVecSender>,
} }
impl TcSourceTaskDynamic { impl TcSourceTaskDynamic {
pub fn new( pub fn new(
tc_receiver: mpsc::Receiver<Vec<u8>>, tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_receiver: PusTcDistributor<MpscTmAsVecSender>, pus_receiver: PusTcDistributor<MpscTmAsVecSender>,
) -> Self { ) -> Self {
Self { Self {
@ -99,11 +102,11 @@ impl TcSourceTaskDynamic {
pub fn poll_tc(&mut self) -> bool { pub fn poll_tc(&mut self) -> bool {
// Right now, we only expect PUS packets. // Right now, we only expect PUS packets.
match self.tc_receiver.try_recv() { match self.tc_receiver.try_recv() {
Ok(tc) => match PusTcReader::new(&tc) { Ok(packet_as_vec) => match PusTcReader::new(&packet_as_vec.packet) {
Ok((pus_tc, _)) => { Ok((pus_tc, _)) => {
self.pus_receiver self.pus_receiver
.handle_tc_packet( .handle_tc_packet(
satrs::pus::TcInMemory::Vec(tc.clone()), satrs::pus::TcInMemory::Vec(packet_as_vec.packet.clone()),
pus_tc.service(), pus_tc.service(),
&pus_tc, &pus_tc,
) )
@ -112,7 +115,7 @@ impl TcSourceTaskDynamic {
} }
Err(e) => { Err(e) => {
log::warn!("error creating PUS TC from raw data: {e}"); log::warn!("error creating PUS TC from raw data: {e}");
log::warn!("raw data: {:x?}", tc); log::warn!("raw data: {:x?}", packet_as_vec.packet);
true true
} }
}, },

View File

@ -4,7 +4,6 @@ use std::{
}; };
use log::info; use log::info;
use satrs::pus::{PusTmAsVec, PusTmInPool};
use satrs::{ use satrs::{
pool::PoolProvider, pool::PoolProvider,
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}, seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
@ -13,7 +12,10 @@ use satrs::{
time::cds::MIN_CDS_FIELD_LEN, time::cds::MIN_CDS_FIELD_LEN,
CcsdsPacket, CcsdsPacket,
}, },
tmtc::tm_helper::SharedTmPool, };
use satrs::{
pus::{PacketAsVec, PacketInPool},
tmtc::SharedPacketPool,
}; };
use crate::interface::tcp::SyncTcpTmSource; use crate::interface::tcp::SyncTcpTmSource;
@ -77,17 +79,17 @@ impl TmFunnelCommon {
pub struct TmFunnelStatic { pub struct TmFunnelStatic {
common: TmFunnelCommon, common: TmFunnelCommon,
shared_tm_store: SharedTmPool, shared_tm_store: SharedPacketPool,
tm_funnel_rx: mpsc::Receiver<PusTmInPool>, tm_funnel_rx: mpsc::Receiver<PacketInPool>,
tm_server_tx: mpsc::SyncSender<PusTmInPool>, tm_server_tx: mpsc::SyncSender<PacketInPool>,
} }
impl TmFunnelStatic { impl TmFunnelStatic {
pub fn new( pub fn new(
shared_tm_store: SharedTmPool, shared_tm_store: SharedPacketPool,
sync_tm_tcp_source: SyncTcpTmSource, sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PusTmInPool>, tm_funnel_rx: mpsc::Receiver<PacketInPool>,
tm_server_tx: mpsc::SyncSender<PusTmInPool>, tm_server_tx: mpsc::SyncSender<PacketInPool>,
) -> Self { ) -> Self {
Self { Self {
common: TmFunnelCommon::new(sync_tm_tcp_source), common: TmFunnelCommon::new(sync_tm_tcp_source),
@ -101,7 +103,7 @@ impl TmFunnelStatic {
if let Ok(pus_tm_in_pool) = self.tm_funnel_rx.recv() { if let Ok(pus_tm_in_pool) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update // Read the TM, set sequence counter and message counter, and finally update
// the CRC. // the CRC.
let shared_pool = self.shared_tm_store.clone_backing_pool(); let shared_pool = self.shared_tm_store.0.clone();
let mut pool_guard = shared_pool.write().expect("Locking TM pool failed"); let mut pool_guard = shared_pool.write().expect("Locking TM pool failed");
let mut tm_copy = Vec::new(); let mut tm_copy = Vec::new();
pool_guard pool_guard
@ -124,15 +126,15 @@ impl TmFunnelStatic {
pub struct TmFunnelDynamic { pub struct TmFunnelDynamic {
common: TmFunnelCommon, common: TmFunnelCommon,
tm_funnel_rx: mpsc::Receiver<PusTmAsVec>, tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_server_tx: mpsc::Sender<PusTmAsVec>, tm_server_tx: mpsc::Sender<PacketAsVec>,
} }
impl TmFunnelDynamic { impl TmFunnelDynamic {
pub fn new( pub fn new(
sync_tm_tcp_source: SyncTcpTmSource, sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PusTmAsVec>, tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_server_tx: mpsc::Sender<PusTmAsVec>, tm_server_tx: mpsc::Sender<PacketAsVec>,
) -> Self { ) -> Self {
Self { Self {
common: TmFunnelCommon::new(sync_tm_tcp_source), common: TmFunnelCommon::new(sync_tm_tcp_source),

View File

@ -1,4 +1,4 @@
use crate::{tmtc::PacketSenderRaw, ValidatorU16Id}; use crate::{tmtc::PacketSenderRaw, ComponentId, ValidatorU16Id};
/// This function parses a given buffer for tightly packed CCSDS space packets. It uses the /// This function parses a given buffer for tightly packed CCSDS space packets. It uses the
/// [spacepackets::PacketId] field of the CCSDS packets to detect the start of a CCSDS space packet /// [spacepackets::PacketId] field of the CCSDS packets to detect the start of a CCSDS space packet
@ -10,11 +10,12 @@ use crate::{tmtc::PacketSenderRaw, ValidatorU16Id};
/// index for future write operations will be written to the `next_write_idx` argument. /// index for future write operations will be written to the `next_write_idx` argument.
/// ///
/// The parser will forward all packets which were decoded successfully to the given /// The parser will forward all packets which were decoded successfully to the given
/// `packet_sender` and return the number of packets found. If the [PacketSenderRaw::send_raw_tc] /// `packet_sender` and return the number of packets found. If the [PacketSenderRaw::send_packet]
/// calls fails, the error will be returned. /// calls fails, the error will be returned.
pub fn parse_buffer_for_ccsds_space_packets<SendError>( pub fn parse_buffer_for_ccsds_space_packets<SendError>(
buf: &mut [u8], buf: &mut [u8],
packet_id_validator: &(impl ValidatorU16Id + ?Sized), packet_id_validator: &(impl ValidatorU16Id + ?Sized),
sender_id: ComponentId,
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized), packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
next_write_idx: &mut usize, next_write_idx: &mut usize,
) -> Result<u32, SendError> { ) -> Result<u32, SendError> {
@ -32,7 +33,10 @@ pub fn parse_buffer_for_ccsds_space_packets<SendError>(
u16::from_be_bytes(buf[current_idx + 4..current_idx + 6].try_into().unwrap()); u16::from_be_bytes(buf[current_idx + 4..current_idx + 6].try_into().unwrap());
let packet_size = length_field + 7; let packet_size = length_field + 7;
if (current_idx + packet_size as usize) <= buf_len { if (current_idx + packet_size as usize) <= buf_len {
packet_sender.send_raw_tc(&buf[current_idx..current_idx + packet_size as usize])?; packet_sender.send_packet(
sender_id,
&buf[current_idx..current_idx + packet_size as usize],
)?;
packets_found += 1; packets_found += 1;
} else { } else {
// Move packet to start of buffer if applicable. // Move packet to start of buffer if applicable.
@ -56,10 +60,11 @@ mod tests {
PacketId, SpHeader, PacketId, SpHeader,
}; };
use crate::encoding::tests::TcCacher; use crate::{encoding::tests::TcCacher, ComponentId};
use super::parse_buffer_for_ccsds_space_packets; use super::parse_buffer_for_ccsds_space_packets;
const PARSER_ID: ComponentId = 0x05;
const TEST_APID_0: u16 = 0x02; const TEST_APID_0: u16 = 0x02;
const TEST_APID_1: u16 = 0x10; const TEST_APID_1: u16 = 0x10;
const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0); const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
@ -79,6 +84,7 @@ mod tests {
let parse_result = parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut buffer, &mut buffer,
valid_packet_ids.as_slice(), valid_packet_ids.as_slice(),
PARSER_ID,
&tc_cacher, &tc_cacher,
&mut next_write_idx, &mut next_write_idx,
); );
@ -87,7 +93,9 @@ mod tests {
assert_eq!(parsed_packets, 1); assert_eq!(parsed_packets, 1);
let mut queue = tc_cacher.tc_queue.borrow_mut(); let mut queue = tc_cacher.tc_queue.borrow_mut();
assert_eq!(queue.len(), 1); assert_eq!(queue.len(), 1);
assert_eq!(queue.pop_front().unwrap(), buffer[..packet_len]); let packet_with_sender = queue.pop_front().unwrap();
assert_eq!(packet_with_sender.packet, buffer[..packet_len]);
assert_eq!(packet_with_sender.sender_id, PARSER_ID);
} }
#[test] #[test]
@ -108,6 +116,7 @@ mod tests {
let parse_result = parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut buffer, &mut buffer,
valid_packet_ids.as_slice(), valid_packet_ids.as_slice(),
PARSER_ID,
&tc_cacher, &tc_cacher,
&mut next_write_idx, &mut next_write_idx,
); );
@ -116,9 +125,13 @@ mod tests {
assert_eq!(parsed_packets, 2); assert_eq!(parsed_packets, 2);
let mut queue = tc_cacher.tc_queue.borrow_mut(); let mut queue = tc_cacher.tc_queue.borrow_mut();
assert_eq!(queue.len(), 2); assert_eq!(queue.len(), 2);
assert_eq!(queue.pop_front().unwrap(), buffer[..packet_len_ping]); let packet_with_addr = queue.pop_front().unwrap();
assert_eq!(packet_with_addr.packet, buffer[..packet_len_ping]);
assert_eq!(packet_with_addr.sender_id, PARSER_ID);
let packet_with_addr = queue.pop_front().unwrap();
assert_eq!(packet_with_addr.sender_id, PARSER_ID);
assert_eq!( assert_eq!(
queue.pop_front().unwrap(), packet_with_addr.packet,
buffer[packet_len_ping..packet_len_ping + packet_len_action] buffer[packet_len_ping..packet_len_ping + packet_len_action]
); );
} }
@ -142,6 +155,7 @@ mod tests {
let parse_result = parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut buffer, &mut buffer,
valid_packet_ids.as_slice(), valid_packet_ids.as_slice(),
PARSER_ID,
&tc_cacher, &tc_cacher,
&mut next_write_idx, &mut next_write_idx,
); );
@ -150,9 +164,11 @@ mod tests {
assert_eq!(parsed_packets, 2); assert_eq!(parsed_packets, 2);
let mut queue = tc_cacher.tc_queue.borrow_mut(); let mut queue = tc_cacher.tc_queue.borrow_mut();
assert_eq!(queue.len(), 2); assert_eq!(queue.len(), 2);
assert_eq!(queue.pop_front().unwrap(), buffer[..packet_len_ping]); let packet_with_addr = queue.pop_front().unwrap();
assert_eq!(packet_with_addr.packet, buffer[..packet_len_ping]);
let packet_with_addr = queue.pop_front().unwrap();
assert_eq!( assert_eq!(
queue.pop_front().unwrap(), packet_with_addr.packet,
buffer[packet_len_ping..packet_len_ping + packet_len_action] buffer[packet_len_ping..packet_len_ping + packet_len_action]
); );
} }
@ -176,6 +192,7 @@ mod tests {
let parse_result = parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut buffer[..packet_len_ping + packet_len_action - 4], &mut buffer[..packet_len_ping + packet_len_action - 4],
valid_packet_ids.as_slice(), valid_packet_ids.as_slice(),
PARSER_ID,
&tc_cacher, &tc_cacher,
&mut next_write_idx, &mut next_write_idx,
); );
@ -204,6 +221,7 @@ mod tests {
let parse_result = parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut buffer[..packet_len_ping - 4], &mut buffer[..packet_len_ping - 4],
valid_packet_ids.as_slice(), valid_packet_ids.as_slice(),
PARSER_ID,
&tc_cacher, &tc_cacher,
&mut next_write_idx, &mut next_write_idx,
); );

View File

@ -1,4 +1,4 @@
use crate::tmtc::PacketSenderRaw; use crate::{tmtc::PacketSenderRaw, ComponentId};
use cobs::{decode_in_place, encode, max_encoding_length}; use cobs::{decode_in_place, encode, max_encoding_length};
/// This function encodes the given packet with COBS and also wraps the encoded packet with /// This function encodes the given packet with COBS and also wraps the encoded packet with
@ -57,6 +57,7 @@ pub fn encode_packet_with_cobs(
/// The parser will write all packets which were decoded successfully to the given `tc_receiver`. /// The parser will write all packets which were decoded successfully to the given `tc_receiver`.
pub fn parse_buffer_for_cobs_encoded_packets<SendError>( pub fn parse_buffer_for_cobs_encoded_packets<SendError>(
buf: &mut [u8], buf: &mut [u8],
sender_id: ComponentId,
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized), packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
next_write_idx: &mut usize, next_write_idx: &mut usize,
) -> Result<u32, SendError> { ) -> Result<u32, SendError> {
@ -78,8 +79,10 @@ pub fn parse_buffer_for_cobs_encoded_packets<SendError>(
let decode_result = decode_in_place(&mut buf[start_index_packet..i]); let decode_result = decode_in_place(&mut buf[start_index_packet..i]);
if let Ok(packet_len) = decode_result { if let Ok(packet_len) = decode_result {
packets_found += 1; packets_found += 1;
packet_sender packet_sender.send_packet(
.send_raw_tc(&buf[start_index_packet..start_index_packet + packet_len])?; sender_id,
&buf[start_index_packet..start_index_packet + packet_len],
)?;
} }
start_found = false; start_found = false;
} else { } else {
@ -100,10 +103,15 @@ pub fn parse_buffer_for_cobs_encoded_packets<SendError>(
pub(crate) mod tests { pub(crate) mod tests {
use cobs::encode; use cobs::encode;
use crate::encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET}; use crate::{
encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET},
ComponentId,
};
use super::parse_buffer_for_cobs_encoded_packets; use super::parse_buffer_for_cobs_encoded_packets;
const PARSER_ID: ComponentId = 0x05;
#[test] #[test]
fn test_parsing_simple_packet() { fn test_parsing_simple_packet() {
let test_sender = TcCacher::default(); let test_sender = TcCacher::default();
@ -113,6 +121,7 @@ pub(crate) mod tests {
let mut next_read_idx = 0; let mut next_read_idx = 0;
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
&mut encoded_buf[0..current_idx], &mut encoded_buf[0..current_idx],
PARSER_ID,
&test_sender, &test_sender,
&mut next_read_idx, &mut next_read_idx,
) )
@ -121,7 +130,7 @@ pub(crate) mod tests {
let queue = test_sender.tc_queue.borrow(); let queue = test_sender.tc_queue.borrow();
assert_eq!(queue.len(), 1); assert_eq!(queue.len(), 1);
let packet = &queue[0]; let packet = &queue[0];
assert_eq!(packet, &SIMPLE_PACKET); assert_eq!(packet.packet, &SIMPLE_PACKET);
} }
#[test] #[test]
@ -140,6 +149,7 @@ pub(crate) mod tests {
let mut next_read_idx = 0; let mut next_read_idx = 0;
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
&mut encoded_buf[0..current_idx], &mut encoded_buf[0..current_idx],
PARSER_ID,
&test_sender, &test_sender,
&mut next_read_idx, &mut next_read_idx,
) )
@ -148,9 +158,9 @@ pub(crate) mod tests {
let queue = test_sender.tc_queue.borrow(); let queue = test_sender.tc_queue.borrow();
assert_eq!(queue.len(), 2); assert_eq!(queue.len(), 2);
let packet0 = &queue[0]; let packet0 = &queue[0];
assert_eq!(packet0, &SIMPLE_PACKET); assert_eq!(packet0.packet, &SIMPLE_PACKET);
let packet1 = &queue[1]; let packet1 = &queue[1];
assert_eq!(packet1, &INVERTED_PACKET); assert_eq!(packet1.packet, &INVERTED_PACKET);
} }
#[test] #[test]
@ -163,6 +173,7 @@ pub(crate) mod tests {
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
// Cut off the sentinel byte at the end. // Cut off the sentinel byte at the end.
&mut encoded_buf[0..current_idx - 1], &mut encoded_buf[0..current_idx - 1],
PARSER_ID,
&test_sender, &test_sender,
&mut next_read_idx, &mut next_read_idx,
) )
@ -196,6 +207,7 @@ pub(crate) mod tests {
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
// Cut off the sentinel byte at the end. // Cut off the sentinel byte at the end.
&mut encoded_buf[0..current_idx - cut_off], &mut encoded_buf[0..current_idx - cut_off],
PARSER_ID,
&test_sender, &test_sender,
&mut next_write_idx, &mut next_write_idx,
) )
@ -203,7 +215,7 @@ pub(crate) mod tests {
assert_eq!(packets, 1); assert_eq!(packets, 1);
let queue = test_sender.tc_queue.borrow(); let queue = test_sender.tc_queue.borrow();
assert_eq!(queue.len(), 1); assert_eq!(queue.len(), 1);
assert_eq!(&queue[0], &SIMPLE_PACKET); assert_eq!(&queue[0].packet, &SIMPLE_PACKET);
assert_eq!(next_write_idx, next_expected_write_idx); assert_eq!(next_write_idx, next_expected_write_idx);
assert_eq!(encoded_buf[..next_expected_write_idx], expected_at_start); assert_eq!(encoded_buf[..next_expected_write_idx], expected_at_start);
} }
@ -237,6 +249,7 @@ pub(crate) mod tests {
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
// Cut off the sentinel byte at the end. // Cut off the sentinel byte at the end.
&mut encoded_buf[0..current_idx], &mut encoded_buf[0..current_idx],
PARSER_ID,
&test_sender, &test_sender,
&mut next_write_idx, &mut next_write_idx,
) )
@ -244,7 +257,7 @@ pub(crate) mod tests {
assert_eq!(packets, 1); assert_eq!(packets, 1);
let queue = test_sender.tc_queue.borrow_mut(); let queue = test_sender.tc_queue.borrow_mut();
assert_eq!(queue.len(), 1); assert_eq!(queue.len(), 1);
assert_eq!(&queue[0], &SIMPLE_PACKET); assert_eq!(&queue[0].packet, &SIMPLE_PACKET);
assert_eq!(next_write_idx, 1); assert_eq!(next_write_idx, 1);
assert_eq!(encoded_buf[0], 0); assert_eq!(encoded_buf[0], 0);
} }
@ -257,6 +270,7 @@ pub(crate) mod tests {
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
// Cut off the sentinel byte at the end. // Cut off the sentinel byte at the end.
&mut all_zeroes, &mut all_zeroes,
PARSER_ID,
&test_sender, &test_sender,
&mut next_write_idx, &mut next_write_idx,
) )

View File

@ -8,9 +8,9 @@ pub use crate::encoding::cobs::{encode_packet_with_cobs, parse_buffer_for_cobs_e
pub(crate) mod tests { pub(crate) mod tests {
use core::cell::RefCell; use core::cell::RefCell;
use alloc::{collections::VecDeque, vec::Vec}; use alloc::collections::VecDeque;
use crate::tmtc::PacketSenderRaw; use crate::{pus::PacketAsVec, tmtc::PacketSenderRaw, ComponentId};
use super::cobs::encode_packet_with_cobs; use super::cobs::encode_packet_with_cobs;
@ -19,15 +19,15 @@ pub(crate) mod tests {
#[derive(Default)] #[derive(Default)]
pub(crate) struct TcCacher { pub(crate) struct TcCacher {
pub(crate) tc_queue: RefCell<VecDeque<Vec<u8>>>, pub(crate) tc_queue: RefCell<VecDeque<PacketAsVec>>,
} }
impl PacketSenderRaw for TcCacher { impl PacketSenderRaw for TcCacher {
type Error = (); type Error = ();
fn send_raw_tc(&self, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
let mut mut_queue = self.tc_queue.borrow_mut(); let mut mut_queue = self.tc_queue.borrow_mut();
mut_queue.push_back(tc_raw.to_vec()); mut_queue.push_back(PacketAsVec::new(sender_id, tc_raw.to_vec()));
Ok(()) Ok(())
} }
} }

View File

@ -11,11 +11,12 @@ use std::vec::Vec;
use crate::encoding::parse_buffer_for_cobs_encoded_packets; use crate::encoding::parse_buffer_for_cobs_encoded_packets;
use crate::tmtc::PacketSenderRaw; use crate::tmtc::PacketSenderRaw;
use crate::tmtc::TmPacketSource; use crate::tmtc::PacketSource;
use crate::hal::std::tcp_server::{ use crate::hal::std::tcp_server::{
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer, ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
}; };
use crate::ComponentId;
use super::tcp_server::HandledConnectionHandler; use super::tcp_server::HandledConnectionHandler;
use super::tcp_server::HandledConnectionInfo; use super::tcp_server::HandledConnectionInfo;
@ -28,6 +29,7 @@ impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for CobsTcParser {
fn handle_tc_parsing( fn handle_tc_parsing(
&mut self, &mut self,
tc_buffer: &mut [u8], tc_buffer: &mut [u8],
sender_id: ComponentId,
tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized), tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized),
conn_result: &mut HandledConnectionInfo, conn_result: &mut HandledConnectionInfo,
current_write_idx: usize, current_write_idx: usize,
@ -35,6 +37,7 @@ impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for CobsTcParser {
) -> Result<(), TcpTmtcError<TmError, TcError>> { ) -> Result<(), TcpTmtcError<TmError, TcError>> {
conn_result.num_received_tcs += parse_buffer_for_cobs_encoded_packets( conn_result.num_received_tcs += parse_buffer_for_cobs_encoded_packets(
&mut tc_buffer[..current_write_idx], &mut tc_buffer[..current_write_idx],
sender_id,
tc_sender, tc_sender,
next_write_idx, next_write_idx,
) )
@ -62,7 +65,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
fn handle_tm_sending( fn handle_tm_sending(
&mut self, &mut self,
tm_buffer: &mut [u8], tm_buffer: &mut [u8],
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized), tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
conn_result: &mut HandledConnectionInfo, conn_result: &mut HandledConnectionInfo,
stream: &mut TcpStream, stream: &mut TcpStream,
) -> Result<bool, TcpTmtcError<TmError, TcError>> { ) -> Result<bool, TcpTmtcError<TmError, TcError>> {
@ -101,7 +104,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
/// Telemetry will be encoded with the COBS protocol using [cobs::encode] in addition to being /// Telemetry will be encoded with the COBS protocol using [cobs::encode] in addition to being
/// wrapped with the sentinel value 0 as the packet delimiter as well before being sent back to /// wrapped with the sentinel value 0 as the packet delimiter as well before being sent back to
/// the client. Please note that the server will send as much data as it can retrieve from the /// the client. Please note that the server will send as much data as it can retrieve from the
/// [TmPacketSource] in its current implementation. /// [PacketSource] in its current implementation.
/// ///
/// Using a framing protocol like COBS imposes minimal restrictions on the type of TMTC data /// Using a framing protocol like COBS imposes minimal restrictions on the type of TMTC data
/// exchanged while also allowing packets with flexible size and a reliable way to reconstruct full /// exchanged while also allowing packets with flexible size and a reliable way to reconstruct full
@ -115,7 +118,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
/// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs) /// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// test also serves as the example application for this module. /// test also serves as the example application for this module.
pub struct TcpTmtcInCobsServer< pub struct TcpTmtcInCobsServer<
TmSource: TmPacketSource<Error = TmError>, TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = SendError>, TcSender: PacketSenderRaw<Error = SendError>,
HandledConnection: HandledConnectionHandler, HandledConnection: HandledConnectionHandler,
TmError, TmError,
@ -133,7 +136,7 @@ pub struct TcpTmtcInCobsServer<
} }
impl< impl<
TmSource: TmPacketSource<Error = TmError>, TmSource: PacketSource<Error = TmError>,
TcReceiver: PacketSenderRaw<Error = TcError>, TcReceiver: PacketSenderRaw<Error = TcError>,
HandledConnection: HandledConnectionHandler, HandledConnection: HandledConnectionHandler,
TmError: 'static, TmError: 'static,
@ -208,13 +211,17 @@ mod tests {
tests::{ConnectionFinishedHandler, SyncTmSource}, tests::{ConnectionFinishedHandler, SyncTmSource},
ConnectionResult, ServerConfig, ConnectionResult, ServerConfig,
}, },
pus::PacketAsVec,
queue::GenericSendError, queue::GenericSendError,
ComponentId,
}; };
use alloc::{sync::Arc, vec::Vec}; use alloc::sync::Arc;
use cobs::encode; use cobs::encode;
use super::TcpTmtcInCobsServer; use super::TcpTmtcInCobsServer;
const TCP_SERVER_ID: ComponentId = 0x05;
fn encode_simple_packet(encoded_buf: &mut [u8], current_idx: &mut usize) { fn encode_simple_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
encode_packet(&SIMPLE_PACKET, encoded_buf, current_idx) encode_packet(&SIMPLE_PACKET, encoded_buf, current_idx)
} }
@ -233,18 +240,18 @@ mod tests {
fn generic_tmtc_server( fn generic_tmtc_server(
addr: &SocketAddr, addr: &SocketAddr,
tc_sender: mpsc::Sender<Vec<u8>>, tc_sender: mpsc::Sender<PacketAsVec>,
tm_source: SyncTmSource, tm_source: SyncTmSource,
stop_signal: Option<Arc<AtomicBool>>, stop_signal: Option<Arc<AtomicBool>>,
) -> TcpTmtcInCobsServer< ) -> TcpTmtcInCobsServer<
SyncTmSource, SyncTmSource,
mpsc::Sender<Vec<u8>>, mpsc::Sender<PacketAsVec>,
ConnectionFinishedHandler, ConnectionFinishedHandler,
(), (),
GenericSendError, GenericSendError,
> { > {
TcpTmtcInCobsServer::new( TcpTmtcInCobsServer::new(
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024), ServerConfig::new(TCP_SERVER_ID, *addr, Duration::from_millis(2), 1024, 1024),
tm_source, tm_source,
tc_sender, tc_sender,
ConnectionFinishedHandler::default(), ConnectionFinishedHandler::default(),
@ -302,8 +309,8 @@ mod tests {
panic!("connection was not handled properly"); panic!("connection was not handled properly");
} }
// Check that the packet was received and decoded successfully. // Check that the packet was received and decoded successfully.
let packet = tc_receiver.recv().expect("receiving TC failed"); let packet_with_sender = tc_receiver.recv().expect("receiving TC failed");
assert_eq!(packet, &SIMPLE_PACKET); assert_eq!(packet_with_sender.packet, &SIMPLE_PACKET);
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)); matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
} }
@ -410,9 +417,11 @@ mod tests {
panic!("connection was not handled properly"); panic!("connection was not handled properly");
} }
// Check that the packet was received and decoded successfully. // Check that the packet was received and decoded successfully.
let packet = tc_receiver.recv().expect("receiving TC failed"); let packet_with_sender = tc_receiver.recv().expect("receiving TC failed");
let packet = &packet_with_sender.packet;
assert_eq!(packet, &SIMPLE_PACKET); assert_eq!(packet, &SIMPLE_PACKET);
let packet = tc_receiver.recv().expect("receiving TC failed"); let packet_with_sender = tc_receiver.recv().expect("receiving TC failed");
let packet = &packet_with_sender.packet;
assert_eq!(packet, &INVERTED_PACKET); assert_eq!(packet, &INVERTED_PACKET);
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)); matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
} }

View File

@ -13,7 +13,8 @@ use std::net::SocketAddr;
// use std::net::{SocketAddr, TcpStream}; // use std::net::{SocketAddr, TcpStream};
use std::thread; use std::thread;
use crate::tmtc::{PacketSenderRaw, TmPacketSource}; use crate::tmtc::{PacketSenderRaw, PacketSource};
use crate::ComponentId;
use thiserror::Error; use thiserror::Error;
// Re-export the TMTC in COBS server. // Re-export the TMTC in COBS server.
@ -30,7 +31,7 @@ pub use crate::hal::std::tcp_spacepackets_server::{
/// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or /// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or
/// no TM needs to be sent, the TCP server will delay for the specified amount of time /// no TM needs to be sent, the TCP server will delay for the specified amount of time
/// to reduce CPU load. /// to reduce CPU load.
/// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [TmPacketSource] and /// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [PacketSource] and
/// encoding of that data. This buffer should at large enough to hold the maximum expected /// encoding of that data. This buffer should at large enough to hold the maximum expected
/// TM size read from the packet source. /// TM size read from the packet source.
/// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from /// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from
@ -46,6 +47,7 @@ pub use crate::hal::std::tcp_spacepackets_server::{
/// default. /// default.
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct ServerConfig { pub struct ServerConfig {
pub id: ComponentId,
pub addr: SocketAddr, pub addr: SocketAddr,
pub inner_loop_delay: Duration, pub inner_loop_delay: Duration,
pub tm_buffer_size: usize, pub tm_buffer_size: usize,
@ -56,12 +58,14 @@ pub struct ServerConfig {
impl ServerConfig { impl ServerConfig {
pub fn new( pub fn new(
id: ComponentId,
addr: SocketAddr, addr: SocketAddr,
inner_loop_delay: Duration, inner_loop_delay: Duration,
tm_buffer_size: usize, tm_buffer_size: usize,
tc_buffer_size: usize, tc_buffer_size: usize,
) -> Self { ) -> Self {
Self { Self {
id,
addr, addr,
inner_loop_delay, inner_loop_delay,
tm_buffer_size, tm_buffer_size,
@ -122,6 +126,7 @@ pub trait TcpTcParser<TmError, SendError> {
fn handle_tc_parsing( fn handle_tc_parsing(
&mut self, &mut self,
tc_buffer: &mut [u8], tc_buffer: &mut [u8],
sender_id: ComponentId,
tc_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized), tc_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
conn_result: &mut HandledConnectionInfo, conn_result: &mut HandledConnectionInfo,
current_write_idx: usize, current_write_idx: usize,
@ -130,14 +135,14 @@ pub trait TcpTcParser<TmError, SendError> {
} }
/// Generic sender abstraction for an object which can pull telemetry from a given TM source /// Generic sender abstraction for an object which can pull telemetry from a given TM source
/// using a [TmPacketSource] and then send them back to a client using a given [TcpStream]. /// using a [PacketSource] and then send them back to a client using a given [TcpStream].
/// The concrete implementation can also perform any encoding steps which are necessary before /// The concrete implementation can also perform any encoding steps which are necessary before
/// sending back the data to a client. /// sending back the data to a client.
pub trait TcpTmSender<TmError, TcError> { pub trait TcpTmSender<TmError, TcError> {
fn handle_tm_sending( fn handle_tm_sending(
&mut self, &mut self,
tm_buffer: &mut [u8], tm_buffer: &mut [u8],
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized), tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
conn_result: &mut HandledConnectionInfo, conn_result: &mut HandledConnectionInfo,
stream: &mut TcpStream, stream: &mut TcpStream,
) -> Result<bool, TcpTmtcError<TmError, TcError>>; ) -> Result<bool, TcpTmtcError<TmError, TcError>>;
@ -152,7 +157,7 @@ pub trait TcpTmSender<TmError, TcError> {
/// 1. [TcpTcParser] to parse for telecommands from the raw bytestream received from a client. /// 1. [TcpTcParser] to parse for telecommands from the raw bytestream received from a client.
/// 2. Parsed telecommands will be sent using the [PacketSenderRaw] object. /// 2. Parsed telecommands will be sent using the [PacketSenderRaw] object.
/// 3. [TcpTmSender] to send telemetry pulled from a TM source back to the client. /// 3. [TcpTmSender] to send telemetry pulled from a TM source back to the client.
/// 4. [TmPacketSource] as a generic TM source used by the [TcpTmSender]. /// 4. [PacketSource] as a generic TM source used by the [TcpTmSender].
/// ///
/// It is possible to specify custom abstractions to build a dedicated TCP TMTC server without /// It is possible to specify custom abstractions to build a dedicated TCP TMTC server without
/// having to re-implement common logic. /// having to re-implement common logic.
@ -161,7 +166,7 @@ pub trait TcpTmSender<TmError, TcError> {
/// ///
/// 1. [TcpTmtcInCobsServer] to exchange TMTC wrapped inside the COBS framing protocol. /// 1. [TcpTmtcInCobsServer] to exchange TMTC wrapped inside the COBS framing protocol.
pub struct TcpTmtcGenericServer< pub struct TcpTmtcGenericServer<
TmSource: TmPacketSource<Error = TmError>, TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcSendError>, TcSender: PacketSenderRaw<Error = TcSendError>,
TmSender: TcpTmSender<TmError, TcSendError>, TmSender: TcpTmSender<TmError, TcSendError>,
TcParser: TcpTcParser<TmError, TcSendError>, TcParser: TcpTcParser<TmError, TcSendError>,
@ -169,12 +174,13 @@ pub struct TcpTmtcGenericServer<
TmError, TmError,
TcSendError, TcSendError,
> { > {
pub id: ComponentId,
pub finished_handler: HandledConnection, pub finished_handler: HandledConnection,
pub(crate) listener: TcpListener, pub(crate) listener: TcpListener,
pub(crate) inner_loop_delay: Duration, pub(crate) inner_loop_delay: Duration,
pub(crate) tm_source: TmSource, pub(crate) tm_source: TmSource,
pub(crate) tm_buffer: Vec<u8>, pub(crate) tm_buffer: Vec<u8>,
pub(crate) tc_receiver: TcSender, pub(crate) tc_sender: TcSender,
pub(crate) tc_buffer: Vec<u8>, pub(crate) tc_buffer: Vec<u8>,
poll: Poll, poll: Poll,
events: Events, events: Events,
@ -184,7 +190,7 @@ pub struct TcpTmtcGenericServer<
} }
impl< impl<
TmSource: TmPacketSource<Error = TmError>, TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcSendError>, TcSender: PacketSenderRaw<Error = TcSendError>,
TmSender: TcpTmSender<TmError, TcSendError>, TmSender: TcpTmSender<TmError, TcSendError>,
TcParser: TcpTcParser<TmError, TcSendError>, TcParser: TcpTcParser<TmError, TcSendError>,
@ -248,6 +254,7 @@ impl<
.register(&mut mio_listener, Token(0), Interest::READABLE)?; .register(&mut mio_listener, Token(0), Interest::READABLE)?;
Ok(Self { Ok(Self {
id: cfg.id,
tc_handler: tc_parser, tc_handler: tc_parser,
tm_handler: tm_sender, tm_handler: tm_sender,
poll, poll,
@ -256,7 +263,7 @@ impl<
inner_loop_delay: cfg.inner_loop_delay, inner_loop_delay: cfg.inner_loop_delay,
tm_source, tm_source,
tm_buffer: vec![0; cfg.tm_buffer_size], tm_buffer: vec![0; cfg.tm_buffer_size],
tc_receiver, tc_sender: tc_receiver,
tc_buffer: vec![0; cfg.tc_buffer_size], tc_buffer: vec![0; cfg.tc_buffer_size],
stop_signal, stop_signal,
finished_handler, finished_handler,
@ -343,7 +350,8 @@ impl<
if current_write_idx > 0 { if current_write_idx > 0 {
self.tc_handler.handle_tc_parsing( self.tc_handler.handle_tc_parsing(
&mut self.tc_buffer, &mut self.tc_buffer,
&self.tc_receiver, self.id,
&self.tc_sender,
&mut connection_result, &mut connection_result,
current_write_idx, current_write_idx,
&mut next_write_idx, &mut next_write_idx,
@ -357,7 +365,8 @@ impl<
if current_write_idx == self.tc_buffer.capacity() { if current_write_idx == self.tc_buffer.capacity() {
self.tc_handler.handle_tc_parsing( self.tc_handler.handle_tc_parsing(
&mut self.tc_buffer, &mut self.tc_buffer,
&self.tc_receiver, self.id,
&self.tc_sender,
&mut connection_result, &mut connection_result,
current_write_idx, current_write_idx,
&mut next_write_idx, &mut next_write_idx,
@ -371,7 +380,8 @@ impl<
std::io::ErrorKind::WouldBlock | std::io::ErrorKind::TimedOut => { std::io::ErrorKind::WouldBlock | std::io::ErrorKind::TimedOut => {
self.tc_handler.handle_tc_parsing( self.tc_handler.handle_tc_parsing(
&mut self.tc_buffer, &mut self.tc_buffer,
&self.tc_receiver, self.id,
&self.tc_sender,
&mut connection_result, &mut connection_result,
current_write_idx, current_write_idx,
&mut next_write_idx, &mut next_write_idx,
@ -424,7 +434,7 @@ pub(crate) mod tests {
use alloc::{collections::VecDeque, sync::Arc, vec::Vec}; use alloc::{collections::VecDeque, sync::Arc, vec::Vec};
use crate::tmtc::TmPacketSource; use crate::tmtc::PacketSource;
use super::*; use super::*;
@ -440,7 +450,7 @@ pub(crate) mod tests {
} }
} }
impl TmPacketSource for SyncTmSource { impl PacketSource for SyncTmSource {
type Error = (); type Error = ();
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> { fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {

View File

@ -6,8 +6,8 @@ use std::{io::Write, net::SocketAddr};
use crate::{ use crate::{
encoding::parse_buffer_for_ccsds_space_packets, encoding::parse_buffer_for_ccsds_space_packets,
tmtc::{PacketSenderRaw, TmPacketSource}, tmtc::{PacketSenderRaw, PacketSource},
ValidatorU16Id, ComponentId, ValidatorU16Id,
}; };
use super::tcp_server::{ use super::tcp_server::{
@ -32,6 +32,7 @@ impl<PacketIdChecker: ValidatorU16Id, TmError, TcError: 'static> TcpTcParser<TmE
fn handle_tc_parsing( fn handle_tc_parsing(
&mut self, &mut self,
tc_buffer: &mut [u8], tc_buffer: &mut [u8],
sender_id: ComponentId,
tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized), tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized),
conn_result: &mut HandledConnectionInfo, conn_result: &mut HandledConnectionInfo,
current_write_idx: usize, current_write_idx: usize,
@ -41,6 +42,7 @@ impl<PacketIdChecker: ValidatorU16Id, TmError, TcError: 'static> TcpTcParser<TmE
conn_result.num_received_tcs += parse_buffer_for_ccsds_space_packets( conn_result.num_received_tcs += parse_buffer_for_ccsds_space_packets(
&mut tc_buffer[..current_write_idx], &mut tc_buffer[..current_write_idx],
&self.packet_id_lookup, &self.packet_id_lookup,
sender_id,
tc_sender, tc_sender,
next_write_idx, next_write_idx,
) )
@ -57,7 +59,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
fn handle_tm_sending( fn handle_tm_sending(
&mut self, &mut self,
tm_buffer: &mut [u8], tm_buffer: &mut [u8],
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized), tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
conn_result: &mut HandledConnectionInfo, conn_result: &mut HandledConnectionInfo,
stream: &mut TcpStream, stream: &mut TcpStream,
) -> Result<bool, TcpTmtcError<TmError, TcError>> { ) -> Result<bool, TcpTmtcError<TmError, TcError>> {
@ -93,7 +95,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
/// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs) /// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// also serves as the example application for this module. /// also serves as the example application for this module.
pub struct TcpSpacepacketsServer< pub struct TcpSpacepacketsServer<
TmSource: TmPacketSource<Error = TmError>, TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = SendError>, TcSender: PacketSenderRaw<Error = SendError>,
PacketIdChecker: ValidatorU16Id, PacketIdChecker: ValidatorU16Id,
HandledConnection: HandledConnectionHandler, HandledConnection: HandledConnectionHandler,
@ -112,7 +114,7 @@ pub struct TcpSpacepacketsServer<
} }
impl< impl<
TmSource: TmPacketSource<Error = TmError>, TmSource: PacketSource<Error = TmError>,
TcReceiver: PacketSenderRaw<Error = TcError>, TcReceiver: PacketSenderRaw<Error = TcError>,
PacketIdChecker: ValidatorU16Id, PacketIdChecker: ValidatorU16Id,
HandledConnection: HandledConnectionHandler, HandledConnection: HandledConnectionHandler,
@ -191,7 +193,7 @@ mod tests {
thread, thread,
}; };
use alloc::{sync::Arc, vec::Vec}; use alloc::sync::Arc;
use hashbrown::HashSet; use hashbrown::HashSet;
use spacepackets::{ use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket}, ecss::{tc::PusTcCreator, WritablePusPacket},
@ -203,11 +205,14 @@ mod tests {
tests::{ConnectionFinishedHandler, SyncTmSource}, tests::{ConnectionFinishedHandler, SyncTmSource},
ConnectionResult, ServerConfig, ConnectionResult, ServerConfig,
}, },
pus::PacketAsVec,
queue::GenericSendError, queue::GenericSendError,
ComponentId,
}; };
use super::TcpSpacepacketsServer; use super::TcpSpacepacketsServer;
const TCP_SERVER_ID: ComponentId = 0x05;
const TEST_APID_0: u16 = 0x02; const TEST_APID_0: u16 = 0x02;
const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0); const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
const TEST_APID_1: u16 = 0x10; const TEST_APID_1: u16 = 0x10;
@ -215,22 +220,22 @@ mod tests {
fn generic_tmtc_server( fn generic_tmtc_server(
addr: &SocketAddr, addr: &SocketAddr,
tc_receiver: mpsc::Sender<Vec<u8>>, tc_sender: mpsc::Sender<PacketAsVec>,
tm_source: SyncTmSource, tm_source: SyncTmSource,
packet_id_lookup: HashSet<PacketId>, packet_id_lookup: HashSet<PacketId>,
stop_signal: Option<Arc<AtomicBool>>, stop_signal: Option<Arc<AtomicBool>>,
) -> TcpSpacepacketsServer< ) -> TcpSpacepacketsServer<
SyncTmSource, SyncTmSource,
mpsc::Sender<Vec<u8>>, mpsc::Sender<PacketAsVec>,
HashSet<PacketId>, HashSet<PacketId>,
ConnectionFinishedHandler, ConnectionFinishedHandler,
(), (),
GenericSendError, GenericSendError,
> { > {
TcpSpacepacketsServer::new( TcpSpacepacketsServer::new(
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024), ServerConfig::new(TCP_SERVER_ID, *addr, Duration::from_millis(2), 1024, 1024),
tm_source, tm_source,
tc_receiver, tc_sender,
packet_id_lookup, packet_id_lookup,
ConnectionFinishedHandler::default(), ConnectionFinishedHandler::default(),
stop_signal, stop_signal,
@ -294,7 +299,7 @@ mod tests {
panic!("connection was not handled properly"); panic!("connection was not handled properly");
} }
let packet = tc_receiver.try_recv().expect("receiving TC failed"); let packet = tc_receiver.try_recv().expect("receiving TC failed");
assert_eq!(packet, tc_0); assert_eq!(packet.packet, tc_0);
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)); matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
} }
@ -401,9 +406,9 @@ mod tests {
} }
// Check that TC has arrived. // Check that TC has arrived.
let packet_0 = tc_receiver.try_recv().expect("receiving TC failed"); let packet_0 = tc_receiver.try_recv().expect("receiving TC failed");
assert_eq!(packet_0, tc_0); assert_eq!(packet_0.packet, tc_0);
let packet_1 = tc_receiver.try_recv().expect("receiving TC failed"); let packet_1 = tc_receiver.try_recv().expect("receiving TC failed");
assert_eq!(packet_1, tc_1); assert_eq!(packet_1.packet, tc_1);
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)); matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
} }
} }

View File

@ -1,5 +1,6 @@
//! Generic UDP TC server. //! Generic UDP TC server.
use crate::tmtc::PacketSenderRaw; use crate::tmtc::PacketSenderRaw;
use crate::ComponentId;
use core::fmt::Debug; use core::fmt::Debug;
use std::io::{self, ErrorKind}; use std::io::{self, ErrorKind};
use std::net::{SocketAddr, ToSocketAddrs, UdpSocket}; use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
@ -22,13 +23,16 @@ use std::vec::Vec;
/// use std::sync::mpsc; /// use std::sync::mpsc;
/// use spacepackets::ecss::WritablePusPacket; /// use spacepackets::ecss::WritablePusPacket;
/// use satrs::hal::std::udp_server::UdpTcServer; /// use satrs::hal::std::udp_server::UdpTcServer;
/// use satrs::ComponentId;
/// use satrs::tmtc::PacketSenderRaw; /// use satrs::tmtc::PacketSenderRaw;
/// use spacepackets::SpHeader; /// use spacepackets::SpHeader;
/// use spacepackets::ecss::tc::PusTcCreator; /// use spacepackets::ecss::tc::PusTcCreator;
/// ///
/// const UDP_SERVER_ID: ComponentId = 0x05;
///
/// let (packet_sender, packet_receiver) = mpsc::channel(); /// let (packet_sender, packet_receiver) = mpsc::channel();
/// let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777); /// let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
/// let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, packet_sender) /// let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, packet_sender)
/// .expect("Creating UDP TMTC server failed"); /// .expect("Creating UDP TMTC server failed");
/// let sph = SpHeader::new_from_apid(0x02); /// let sph = SpHeader::new_from_apid(0x02);
/// let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); /// let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
@ -43,8 +47,9 @@ use std::vec::Vec;
/// let recv_result = udp_tc_server.try_recv_tc(); /// let recv_result = udp_tc_server.try_recv_tc();
/// assert!(recv_result.is_ok()); /// assert!(recv_result.is_ok());
/// // The packet is received by the UDP TC server and sent via the mpsc channel. /// // The packet is received by the UDP TC server and sent via the mpsc channel.
/// let sent_packet = packet_receiver.try_recv().expect("expected telecommand"); /// let sent_packet_with_sender = packet_receiver.try_recv().expect("expected telecommand");
/// assert_eq!(sent_packet, ping_tc_raw); /// assert_eq!(sent_packet_with_sender.packet, ping_tc_raw);
/// assert_eq!(sent_packet_with_sender.sender_id, UDP_SERVER_ID);
/// // No more packets received. /// // No more packets received.
/// matches!(packet_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)); /// matches!(packet_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
/// ``` /// ```
@ -55,6 +60,7 @@ use std::vec::Vec;
/// on how to use this TC server. It uses the server to receive PUS telecommands on a specific port /// on how to use this TC server. It uses the server to receive PUS telecommands on a specific port
/// and then forwards them to a generic CCSDS packet receiver. /// and then forwards them to a generic CCSDS packet receiver.
pub struct UdpTcServer<TcSender: PacketSenderRaw<Error = SendError>, SendError> { pub struct UdpTcServer<TcSender: PacketSenderRaw<Error = SendError>, SendError> {
pub id: ComponentId,
pub socket: UdpSocket, pub socket: UdpSocket,
recv_buf: Vec<u8>, recv_buf: Vec<u8>,
sender_addr: Option<SocketAddr>, sender_addr: Option<SocketAddr>,
@ -75,11 +81,13 @@ impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
UdpTcServer<TcSender, SendError> UdpTcServer<TcSender, SendError>
{ {
pub fn new<A: ToSocketAddrs>( pub fn new<A: ToSocketAddrs>(
id: ComponentId,
addr: A, addr: A,
max_recv_size: usize, max_recv_size: usize,
tc_sender: TcSender, tc_sender: TcSender,
) -> Result<Self, io::Error> { ) -> Result<Self, io::Error> {
let server = Self { let server = Self {
id,
socket: UdpSocket::bind(addr)?, socket: UdpSocket::bind(addr)?,
recv_buf: vec![0; max_recv_size], recv_buf: vec![0; max_recv_size],
sender_addr: None, sender_addr: None,
@ -103,7 +111,7 @@ impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
let (num_bytes, from) = res; let (num_bytes, from) = res;
self.sender_addr = Some(from); self.sender_addr = Some(from);
self.tc_sender self.tc_sender
.send_raw_tc(&self.recv_buf[0..num_bytes]) .send_packet(self.id, &self.recv_buf[0..num_bytes])
.map_err(ReceiveResult::Send)?; .map_err(ReceiveResult::Send)?;
Ok(res) Ok(res)
} }
@ -118,6 +126,7 @@ mod tests {
use crate::hal::std::udp_server::{ReceiveResult, UdpTcServer}; use crate::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use crate::queue::GenericSendError; use crate::queue::GenericSendError;
use crate::tmtc::PacketSenderRaw; use crate::tmtc::PacketSenderRaw;
use crate::ComponentId;
use core::cell::RefCell; use core::cell::RefCell;
use spacepackets::ecss::tc::PusTcCreator; use spacepackets::ecss::tc::PusTcCreator;
use spacepackets::ecss::WritablePusPacket; use spacepackets::ecss::WritablePusPacket;
@ -128,6 +137,8 @@ mod tests {
fn is_send<T: Send>(_: &T) {} fn is_send<T: Send>(_: &T) {}
const UDP_SERVER_ID: ComponentId = 0x05;
#[derive(Default)] #[derive(Default)]
struct PingReceiver { struct PingReceiver {
pub sent_cmds: RefCell<VecDeque<Vec<u8>>>, pub sent_cmds: RefCell<VecDeque<Vec<u8>>>,
@ -136,7 +147,8 @@ mod tests {
impl PacketSenderRaw for PingReceiver { impl PacketSenderRaw for PingReceiver {
type Error = GenericSendError; type Error = GenericSendError;
fn send_raw_tc(&self, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
assert_eq!(sender_id, UDP_SERVER_ID);
let mut sent_data = Vec::new(); let mut sent_data = Vec::new();
sent_data.extend_from_slice(tc_raw); sent_data.extend_from_slice(tc_raw);
let mut queue = self.sent_cmds.borrow_mut(); let mut queue = self.sent_cmds.borrow_mut();
@ -151,7 +163,7 @@ mod tests {
let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777); let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
let ping_receiver = PingReceiver::default(); let ping_receiver = PingReceiver::default();
is_send(&ping_receiver); is_send(&ping_receiver);
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, ping_receiver) let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, ping_receiver)
.expect("Creating UDP TMTC server failed"); .expect("Creating UDP TMTC server failed");
is_send(&udp_tc_server); is_send(&udp_tc_server);
let sph = SpHeader::new_from_apid(0x02); let sph = SpHeader::new_from_apid(0x02);
@ -182,7 +194,7 @@ mod tests {
fn test_nothing_received() { fn test_nothing_received() {
let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7779); let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7779);
let ping_receiver = PingReceiver::default(); let ping_receiver = PingReceiver::default();
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, ping_receiver) let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, ping_receiver)
.expect("Creating UDP TMTC server failed"); .expect("Creating UDP TMTC server failed");
let res = udp_tc_server.try_recv_tc(); let res = udp_tc_server.try_recv_tc();
assert!(res.is_err()); assert!(res.is_err());

View File

@ -195,610 +195,7 @@ pub mod std_mod {
mpsc::SyncSender<GenericMessage<ActionRequest>>, mpsc::SyncSender<GenericMessage<ActionRequest>>,
mpsc::Receiver<GenericMessage<ActionReplyPus>>, mpsc::Receiver<GenericMessage<ActionReplyPus>>,
>; >;
/*
pub type ModeRequestorAndHandlerMpsc = ModeInterface<
mpsc::Sender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
mpsc::Sender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
pub type ModeRequestorAndHandlerMpscBounded = ModeInterface<
mpsc::SyncSender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
mpsc::SyncSender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
*/
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {}
/*
use core::{cell::RefCell, time::Duration};
use std::{sync::mpsc, time::SystemTimeError};
use alloc::{collections::VecDeque, vec::Vec};
use delegate::delegate;
use spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcReader},
tm::PusTmReader,
PusPacket,
},
time::{cds, TimeWriter},
CcsdsPacket,
};
use crate::{
action::ActionRequestVariant,
params::{self, ParamsRaw, WritableToBeBytes},
pus::{
tests::{
PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler,
TestConverter, TestRouter, APP_DATA_TOO_SHORT,
},
verification::{
self,
tests::{SharedVerificationMap, TestVerificationReporter, VerificationStatus},
FailParams, TcStateAccepted, TcStateNone, TcStateStarted,
VerificationReportingProvider,
},
EcssTcInMemConverter, EcssTcInVecConverter, EcssTmtcError, GenericRoutingError,
MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError, PusRequestRouter,
PusServiceHelper, PusTcToRequestConverter, TmAsVecSenderWithMpsc,
},
};
use super::*;
impl<Request> PusRequestRouter<Request> for TestRouter<Request> {
type Error = GenericRoutingError;
fn route(
&self,
target_id: TargetId,
request: Request,
_token: VerificationToken<TcStateAccepted>,
) -> Result<(), Self::Error> {
self.routing_requests
.borrow_mut()
.push_back((target_id, request));
self.check_for_injected_error()
}
fn handle_error(
&self,
target_id: TargetId,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
error: Self::Error,
time_stamp: &[u8],
verif_reporter: &impl VerificationReportingProvider,
) {
self.routing_errors
.borrow_mut()
.push_back((target_id, error));
}
}
impl PusTcToRequestConverter<ActionRequest> for TestConverter<8> {
type Error = PusPacketHandlingError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
time_stamp: &[u8],
verif_reporter: &impl VerificationReportingProvider,
) -> Result<(TargetId, ActionRequest), Self::Error> {
self.conversion_request.push_back(tc.raw_data().to_vec());
self.check_service(tc)?;
let target_id = tc.apid();
if tc.user_data().len() < 4 {
verif_reporter
.start_failure(
token,
FailParams::new(
time_stamp,
&APP_DATA_TOO_SHORT,
(tc.user_data().len() as u32).to_be_bytes().as_ref(),
),
)
.expect("start success failure");
return Err(PusPacketHandlingError::NotEnoughAppData {
expected: 4,
found: tc.user_data().len(),
});
}
if tc.subservice() == 1 {
verif_reporter
.start_success(token, time_stamp)
.expect("start success failure");
return Ok((
target_id.into(),
ActionRequest {
action_id: u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()),
variant: ActionRequestVariant::VecData(tc.user_data()[4..].to_vec()),
},
));
}
Err(PusPacketHandlingError::InvalidAppData(
"unexpected app data".into(),
))
}
}
pub struct PusDynRequestHandler<const SERVICE: u8, Request> {
srv_helper: PusServiceHelper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
TestVerificationReporter,
>,
request_converter: TestConverter<SERVICE>,
request_router: TestRouter<Request>,
}
struct Pus8RequestTestbenchWithVec {
common: PusServiceHandlerWithVecCommon<TestVerificationReporter>,
handler: PusDynRequestHandler<8, ActionRequest>,
}
impl Pus8RequestTestbenchWithVec {
pub fn new() -> Self {
let (common, srv_helper) = PusServiceHandlerWithVecCommon::new_with_test_verif_sender();
Self {
common,
handler: PusDynRequestHandler {
srv_helper,
request_converter: TestConverter::default(),
request_router: TestRouter::default(),
},
}
}
delegate! {
to self.handler.request_converter {
pub fn check_next_conversion(&mut self, tc: &PusTcCreator);
}
}
delegate! {
to self.handler.request_router {
pub fn retrieve_next_request(&mut self) -> (TargetId, ActionRequest);
}
}
delegate! {
to self.handler.request_router {
pub fn retrieve_next_routing_error(&mut self) -> (TargetId, GenericRoutingError);
}
}
}
impl PusTestHarness for Pus8RequestTestbenchWithVec {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(
&self,
subservice: u8,
expected_request_id: verification::RequestId,
);
}
}
}
impl SimplePusPacketHandler for Pus8RequestTestbenchWithVec {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.handler.srv_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
let tc = self
.handler
.srv_helper
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let time_stamp = cds::TimeProvider::from_now_with_u16_days()
.expect("timestamp generation failed")
.to_vec()
.unwrap();
let (target_id, action_request) = self.handler.request_converter.convert(
ecss_tc_and_token.token,
&tc,
&time_stamp,
&self.handler.srv_helper.common.verification_handler,
)?;
if let Err(e) = self.handler.request_router.route(
target_id,
action_request,
ecss_tc_and_token.token,
) {
self.handler.request_router.handle_error(
target_id,
ecss_tc_and_token.token,
&tc,
e.clone(),
&time_stamp,
&self.handler.srv_helper.common.verification_handler,
);
return Err(e.into());
}
Ok(PusPacketHandlerResult::RequestHandled)
}
}
const TIMEOUT_ERROR_CODE: ResultU16 = ResultU16::new(1, 2);
const COMPLETION_ERROR_CODE: ResultU16 = ResultU16::new(2, 0);
const COMPLETION_ERROR_CODE_STEP: ResultU16 = ResultU16::new(2, 1);
#[derive(Default)]
pub struct TestReplyHandlerHook {
pub unexpected_replies: VecDeque<GenericActionReplyPus>,
pub timeouts: RefCell<VecDeque<ActivePusActionRequest>>,
}
impl ReplyHandlerHook<ActivePusActionRequest, ActionReplyPusWithActionId> for TestReplyHandlerHook {
fn handle_unexpected_reply(&mut self, reply: &GenericActionReplyPus) {
self.unexpected_replies.push_back(reply.clone());
}
fn timeout_callback(&self, active_request: &ActivePusActionRequest) {
self.timeouts.borrow_mut().push_back(active_request.clone());
}
fn timeout_error_code(&self) -> ResultU16 {
TIMEOUT_ERROR_CODE
}
}
pub struct Pus8ReplyTestbench {
verif_reporter: TestVerificationReporter,
#[allow(dead_code)]
ecss_tm_receiver: mpsc::Receiver<Vec<u8>>,
handler: PusService8ReplyHandler<
TestVerificationReporter,
DefaultActiveActionRequestMap,
TestReplyHandlerHook,
mpsc::Sender<Vec<u8>>,
>,
}
impl Pus8ReplyTestbench {
pub fn new(normal_ctor: bool) -> Self {
let reply_handler_hook = TestReplyHandlerHook::default();
let shared_verif_map = SharedVerificationMap::default();
let test_verif_reporter = TestVerificationReporter::new(shared_verif_map.clone());
let (ecss_tm_sender, ecss_tm_receiver) = mpsc::channel();
let reply_handler = if normal_ctor {
PusService8ReplyHandler::new_from_now_with_default_map(
test_verif_reporter.clone(),
128,
reply_handler_hook,
ecss_tm_sender,
)
.expect("creating reply handler failed")
} else {
PusService8ReplyHandler::new_from_now(
test_verif_reporter.clone(),
DefaultActiveActionRequestMap::default(),
128,
reply_handler_hook,
ecss_tm_sender,
)
.expect("creating reply handler failed")
};
Self {
verif_reporter: test_verif_reporter,
ecss_tm_receiver,
handler: reply_handler,
}
}
pub fn init_handling_for_request(
&mut self,
request_id: RequestId,
_action_id: ActionId,
) -> VerificationToken<TcStateStarted> {
assert!(!self.handler.request_active(request_id));
// let action_req = ActionRequest::new(action_id, ActionRequestVariant::NoData);
let token = self.add_tc_with_req_id(request_id.into());
let token = self
.verif_reporter
.acceptance_success(token, &[])
.expect("acceptance success failure");
let token = self
.verif_reporter
.start_success(token, &[])
.expect("start success failure");
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
assert!(verif_info.started.expect("request was not started"));
assert!(verif_info.accepted.expect("request was not accepted"));
token
}
pub fn next_unrequested_reply(&self) -> Option<GenericActionReplyPus> {
self.handler.user_hook.unexpected_replies.front().cloned()
}
pub fn assert_request_completion_success(&self, step: Option<u16>, request_id: RequestId) {
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
self.assert_request_completion_common(request_id, &verif_info, step, true);
}
pub fn assert_request_completion_failure(
&self,
step: Option<u16>,
request_id: RequestId,
fail_enum: ResultU16,
fail_data: &[u8],
) {
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
self.assert_request_completion_common(request_id, &verif_info, step, false);
assert_eq!(verif_info.fail_enum.unwrap(), fail_enum.raw() as u64);
assert_eq!(verif_info.failure_data.unwrap(), fail_data);
}
pub fn assert_request_completion_common(
&self,
request_id: RequestId,
verif_info: &VerificationStatus,
step: Option<u16>,
completion_success: bool,
) {
if let Some(step) = step {
assert!(verif_info.step_status.is_some());
assert!(verif_info.step_status.unwrap());
assert_eq!(step, verif_info.step);
}
assert_eq!(
verif_info.completed.expect("request is not completed"),
completion_success
);
assert!(!self.handler.request_active(request_id));
}
pub fn assert_request_step_failure(&self, step: u16, request_id: RequestId) {
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
assert!(verif_info.step_status.is_some());
assert!(!verif_info.step_status.unwrap());
assert_eq!(step, verif_info.step);
}
pub fn add_routed_request(
&mut self,
request_id: verification::RequestId,
target_id: TargetId,
action_id: ActionId,
token: VerificationToken<TcStateStarted>,
timeout: Duration,
) {
if self.handler.request_active(request_id.into()) {
panic!("request already present");
}
self.handler
.add_routed_action_request(request_id, target_id, action_id, token, timeout);
if !self.handler.request_active(request_id.into()) {
panic!("request should be active now");
}
}
delegate! {
to self.handler {
pub fn request_active(&self, request_id: RequestId) -> bool;
pub fn handle_action_reply(
&mut self,
action_reply_with_ids: GenericMessage<ActionReplyPusWithActionId>,
time_stamp: &[u8]
) -> Result<(), EcssTmtcError>;
pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError>;
pub fn check_for_timeouts(&mut self, time_stamp: &[u8]) -> Result<(), EcssTmtcError>;
}
to self.verif_reporter {
fn add_tc_with_req_id(&mut self, req_id: verification::RequestId) -> VerificationToken<TcStateNone>;
}
}
}
#[test]
fn test_reply_handler_completion_success() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let sender_id = 0x06;
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
assert!(reply_testbench.request_active(request_id));
let action_reply = GenericMessage::new(
request_id,
sender_id,
ActionReplyPusWithActionId {
action_id,
variant: ActionReplyPus::Completed,
},
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_completion_success(None, request_id);
}
#[test]
fn test_reply_handler_step_success() {
let mut reply_testbench = Pus8ReplyTestbench::new(false);
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
action_id,
action_id,
ActionReplyPus::StepSuccess { step: 1 },
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
action_id,
action_id,
ActionReplyPus::Completed,
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_completion_success(Some(1), request_id);
}
#[test]
fn test_reply_handler_completion_failure() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let sender_id = 0x01;
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let params_raw = ParamsRaw::U32(params::U32(5));
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
sender_id,
action_id,
ActionReplyPus::CompletionFailed {
error_code: COMPLETION_ERROR_CODE,
params: params_raw.into(),
},
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_completion_failure(
None,
request_id,
COMPLETION_ERROR_CODE,
&params_raw.to_vec().unwrap(),
);
}
#[test]
fn test_reply_handler_step_failure() {
let mut reply_testbench = Pus8ReplyTestbench::new(false);
let sender_id = 0x01;
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
sender_id,
action_id,
ActionReplyPus::StepFailed {
error_code: COMPLETION_ERROR_CODE_STEP,
step: 2,
params: ParamsRaw::U32(crate::params::U32(5)).into(),
},
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_step_failure(2, request_id);
}
#[test]
fn test_reply_handler_timeout_handling() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let request_id = 0x02;
let target_id = 0x06;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let timeout_param = Duration::from_millis(1).as_millis() as u64;
let timeout_param_raw = timeout_param.to_be_bytes();
std::thread::sleep(Duration::from_millis(2));
reply_testbench
.update_time_from_now()
.expect("time update failure");
reply_testbench.check_for_timeouts(&[]).unwrap();
reply_testbench.assert_request_completion_failure(
None,
request_id,
TIMEOUT_ERROR_CODE,
&timeout_param_raw,
);
}
#[test]
fn test_unrequested_reply() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let sender_id = 0x01;
let request_id = 0x02;
let action_id = 0x03;
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
sender_id,
action_id,
ActionReplyPus::Completed,
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
let reply = reply_testbench.next_unrequested_reply();
assert!(reply.is_some());
let reply = reply.unwrap();
assert_eq!(reply.message.action_id, action_id);
assert_eq!(reply.request_id, request_id);
assert_eq!(reply.message.variant, ActionReplyPus::Completed);
}
*/
}

View File

@ -258,7 +258,7 @@ pub mod alloc_mod {
mod tests { mod tests {
use super::*; use super::*;
use crate::events::SeverityInfo; use crate::events::SeverityInfo;
use crate::pus::PusTmAsVec; use crate::pus::PacketAsVec;
use crate::request::UniqueApidTargetId; use crate::request::UniqueApidTargetId;
use std::sync::mpsc::{self, TryRecvError}; use std::sync::mpsc::{self, TryRecvError};
@ -284,7 +284,7 @@ mod tests {
#[test] #[test]
fn test_basic() { fn test_basic() {
let event_man = create_basic_man_1(); let event_man = create_basic_man_1();
let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>(); let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let event_sent = event_man let event_sent = event_man
.generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None) .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed"); .expect("Sending info event failed");
@ -297,7 +297,7 @@ mod tests {
#[test] #[test]
fn test_disable_event() { fn test_disable_event() {
let mut event_man = create_basic_man_2(); let mut event_man = create_basic_man_2();
let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>(); let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
// let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx); // let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx);
let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT); let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT);
assert!(res.is_ok()); assert!(res.is_ok());
@ -320,7 +320,7 @@ mod tests {
#[test] #[test]
fn test_reenable_event() { fn test_reenable_event() {
let mut event_man = create_basic_man_1(); let mut event_man = create_basic_man_1();
let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>(); let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT); let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok()); assert!(res.is_ok());
assert!(res.unwrap()); assert!(res.unwrap());

View File

@ -9,12 +9,12 @@ use std::sync::mpsc::Sender;
use super::verification::VerificationReportingProvider; use super::verification::VerificationReportingProvider;
use super::{ use super::{
EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSender, GenericConversionError, EcssTcInMemConverter, EcssTcReceiver, EcssTmSender, GenericConversionError,
GenericRoutingError, PusServiceHelper, GenericRoutingError, PusServiceHelper,
}; };
pub struct PusEventServiceHandler< pub struct PusEventServiceHandler<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -25,7 +25,7 @@ pub struct PusEventServiceHandler<
} }
impl< impl<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -167,7 +167,8 @@ mod tests {
use crate::pus::verification::{ use crate::pus::verification::{
RequestId, VerificationReporter, VerificationReportingProvider, RequestId, VerificationReporter, VerificationReportingProvider,
}; };
use crate::pus::{GenericConversionError, MpscTcReceiver, MpscTmInSharedPoolSenderBounded}; use crate::pus::{GenericConversionError, MpscTcReceiver};
use crate::tmtc::PacketSenderWithSharedPool;
use crate::{ use crate::{
events::EventU32, events::EventU32,
pus::{ pus::{
@ -186,7 +187,7 @@ mod tests {
common: PusServiceHandlerWithSharedStoreCommon, common: PusServiceHandlerWithSharedStoreCommon,
handler: PusEventServiceHandler< handler: PusEventServiceHandler<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
>, >,

View File

@ -154,7 +154,7 @@ pub trait ChannelWithId: Send {
/// ///
/// This sender object is responsible for sending PUS telemetry to a TM sink. /// This sender object is responsible for sending PUS telemetry to a TM sink.
pub trait EcssTmSender: Send { pub trait EcssTmSender: Send {
fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError>; fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError>;
} }
/// Generic trait for a user supplied sender object. /// Generic trait for a user supplied sender object.
@ -269,14 +269,19 @@ impl From<StoreError> for TryRecvTmtcError {
} }
/// Generic trait for a user supplied receiver object. /// Generic trait for a user supplied receiver object.
pub trait EcssTcReceiverCore { pub trait EcssTcReceiver {
fn recv_tc(&self) -> Result<EcssTcAndToken, TryRecvTmtcError>; fn recv_tc(&self) -> Result<EcssTcAndToken, TryRecvTmtcError>;
} }
/// Generic trait for objects which can receive ECSS PUS telecommands. /// Generic trait for objects which can send ECSS PUS telecommands.
pub trait ReceivesEcssPusTc: Send { pub trait PacketSenderPusTc: Send {
type Error; type Error;
fn pass_pus_tc(&mut self, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error>; fn send_pus_tc(
&self,
sender_id: ComponentId,
header: &SpHeader,
pus_tc: &PusTcReader,
) -> Result<(), Self::Error>;
} }
pub trait ActiveRequestMapProvider<V>: Sized { pub trait ActiveRequestMapProvider<V>: Sized {
@ -349,7 +354,7 @@ pub mod alloc_mod {
use super::*; use super::*;
/// Extension trait for [EcssTmSenderCore]. /// Extension trait for [EcssTmSender].
/// ///
/// It provides additional functionality, for example by implementing the [Downcast] trait /// It provides additional functionality, for example by implementing the [Downcast] trait
/// and the [DynClone] trait. /// and the [DynClone] trait.
@ -370,7 +375,7 @@ pub mod alloc_mod {
fn upcast_mut(&mut self) -> &mut dyn EcssTmSender; fn upcast_mut(&mut self) -> &mut dyn EcssTmSender;
} }
/// Blanket implementation for all types which implement [EcssTmSenderCore] and are clonable. /// Blanket implementation for all types which implement [EcssTmSender] and are clonable.
impl<T> EcssTmSenderExt for T impl<T> EcssTmSenderExt for T
where where
T: EcssTmSender + Clone + 'static, T: EcssTmSender + Clone + 'static,
@ -390,7 +395,7 @@ pub mod alloc_mod {
dyn_clone::clone_trait_object!(EcssTmSenderExt); dyn_clone::clone_trait_object!(EcssTmSenderExt);
impl_downcast!(EcssTmSenderExt); impl_downcast!(EcssTmSenderExt);
/// Extension trait for [EcssTcSenderCore]. /// Extension trait for [EcssTcSender].
/// ///
/// It provides additional functionality, for example by implementing the [Downcast] trait /// It provides additional functionality, for example by implementing the [Downcast] trait
/// and the [DynClone] trait. /// and the [DynClone] trait.
@ -404,13 +409,13 @@ pub mod alloc_mod {
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub trait EcssTcSenderExt: EcssTcSender + Downcast + DynClone {} pub trait EcssTcSenderExt: EcssTcSender + Downcast + DynClone {}
/// Blanket implementation for all types which implement [EcssTcSenderCore] and are clonable. /// Blanket implementation for all types which implement [EcssTcSender] and are clonable.
impl<T> EcssTcSenderExt for T where T: EcssTcSender + Clone + 'static {} impl<T> EcssTcSenderExt for T where T: EcssTcSender + Clone + 'static {}
dyn_clone::clone_trait_object!(EcssTcSenderExt); dyn_clone::clone_trait_object!(EcssTcSenderExt);
impl_downcast!(EcssTcSenderExt); impl_downcast!(EcssTcSenderExt);
/// Extension trait for [EcssTcReceiverCore]. /// Extension trait for [EcssTcReceiver].
/// ///
/// It provides additional functionality, for example by implementing the [Downcast] trait /// It provides additional functionality, for example by implementing the [Downcast] trait
/// and the [DynClone] trait. /// and the [DynClone] trait.
@ -422,12 +427,12 @@ pub mod alloc_mod {
/// [Clone]. /// [Clone].
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub trait EcssTcReceiver: EcssTcReceiverCore + Downcast {} pub trait EcssTcReceiverExt: EcssTcReceiver + Downcast {}
/// Blanket implementation for all types which implement [EcssTcReceiverCore] and are clonable. /// Blanket implementation for all types which implement [EcssTcReceiver] and are clonable.
impl<T> EcssTcReceiver for T where T: EcssTcReceiverCore + 'static {} impl<T> EcssTcReceiverExt for T where T: EcssTcReceiver + 'static {}
impl_downcast!(EcssTcReceiver); impl_downcast!(EcssTcReceiverExt);
/// This trait is an abstraction for the conversion of a PUS telecommand into a generic request /// This trait is an abstraction for the conversion of a PUS telecommand into a generic request
/// type. /// type.
@ -652,15 +657,14 @@ pub mod std_mod {
}; };
use crate::pus::verification::{TcStateAccepted, VerificationToken}; use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{ use crate::pus::{
EcssTcAndToken, EcssTcReceiverCore, EcssTmSender, EcssTmtcError, GenericReceiveError, EcssTcAndToken, EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericReceiveError,
GenericSendError, PusTmVariant, TryRecvTmtcError, GenericSendError, PusTmVariant, TryRecvTmtcError,
}; };
use crate::tmtc::{PacketSenderSharedPool, PusTmPool, SharedPacketPool}; use crate::tmtc::PacketSenderWithSharedPool;
use crate::ComponentId; use crate::ComponentId;
use alloc::vec::Vec; use alloc::vec::Vec;
use core::time::Duration; use core::time::Duration;
use spacepackets::ecss::tc::PusTcReader; use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::WritablePusPacket; use spacepackets::ecss::WritablePusPacket;
use spacepackets::time::StdTimestampError; use spacepackets::time::StdTimestampError;
use spacepackets::ByteConversionError; use spacepackets::ByteConversionError;
@ -676,23 +680,32 @@ pub mod std_mod {
use super::{AcceptedEcssTcAndToken, ActiveRequestProvider, TcInMemory}; use super::{AcceptedEcssTcAndToken, ActiveRequestProvider, TcInMemory};
#[derive(Debug)] #[derive(Debug)]
pub struct PusTmInPool { pub struct PacketInPool {
pub source_id: ComponentId, pub sender_id: ComponentId,
pub store_addr: StoreAddr, pub store_addr: StoreAddr,
} }
impl PacketInPool {
pub fn new(sender_id: ComponentId, store_addr: StoreAddr) -> Self {
Self {
sender_id,
store_addr,
}
}
}
impl From<mpsc::SendError<StoreAddr>> for EcssTmtcError { impl From<mpsc::SendError<StoreAddr>> for EcssTmtcError {
fn from(_: mpsc::SendError<StoreAddr>) -> Self { fn from(_: mpsc::SendError<StoreAddr>) -> Self {
Self::Send(GenericSendError::RxDisconnected) Self::Send(GenericSendError::RxDisconnected)
} }
} }
impl EcssTmSender for mpsc::Sender<PusTmInPool> { impl EcssTmSender for mpsc::Sender<PacketInPool> {
fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> {
match tm { match tm {
PusTmVariant::InStore(store_addr) => self PusTmVariant::InStore(store_addr) => self
.send(PusTmInPool { .send(PacketInPool {
source_id, sender_id: source_id,
store_addr, store_addr,
}) })
.map_err(|_| GenericSendError::RxDisconnected)?, .map_err(|_| GenericSendError::RxDisconnected)?,
@ -702,12 +715,12 @@ pub mod std_mod {
} }
} }
impl EcssTmSender for mpsc::SyncSender<PusTmInPool> { impl EcssTmSender for mpsc::SyncSender<PacketInPool> {
fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> {
match tm { match tm {
PusTmVariant::InStore(store_addr) => self PusTmVariant::InStore(store_addr) => self
.try_send(PusTmInPool { .try_send(PacketInPool {
source_id, sender_id: source_id,
store_addr, store_addr,
}) })
.map_err(|e| EcssTmtcError::Send(e.into()))?, .map_err(|e| EcssTmtcError::Send(e.into()))?,
@ -718,20 +731,26 @@ pub mod std_mod {
} }
#[derive(Debug)] #[derive(Debug)]
pub struct PusTmAsVec { pub struct PacketAsVec {
pub source_id: ComponentId, pub sender_id: ComponentId,
pub packet: Vec<u8>, pub packet: Vec<u8>,
} }
pub type MpscTmAsVecSender = mpsc::Sender<PusTmAsVec>; impl PacketAsVec {
pub fn new(sender_id: ComponentId, packet: Vec<u8>) -> Self {
Self { sender_id, packet }
}
}
pub type MpscTmAsVecSender = mpsc::Sender<PacketAsVec>;
impl EcssTmSender for MpscTmAsVecSender { impl EcssTmSender for MpscTmAsVecSender {
fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> {
match tm { match tm {
PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
PusTmVariant::Direct(tm) => self PusTmVariant::Direct(tm) => self
.send(PusTmAsVec { .send(PacketAsVec {
source_id, sender_id: source_id,
packet: tm.to_vec()?, packet: tm.to_vec()?,
}) })
.map_err(|e| EcssTmtcError::Send(e.into()))?, .map_err(|e| EcssTmtcError::Send(e.into()))?,
@ -740,15 +759,15 @@ pub mod std_mod {
} }
} }
pub type MpscTmAsVecSenderBounded = mpsc::SyncSender<PusTmAsVec>; pub type MpscTmAsVecSenderBounded = mpsc::SyncSender<PacketAsVec>;
impl EcssTmSender for MpscTmAsVecSenderBounded { impl EcssTmSender for MpscTmAsVecSenderBounded {
fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> {
match tm { match tm {
PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
PusTmVariant::Direct(tm) => self PusTmVariant::Direct(tm) => self
.send(PusTmAsVec { .send(PacketAsVec {
source_id, sender_id: source_id,
packet: tm.to_vec()?, packet: tm.to_vec()?,
}) })
.map_err(|e| EcssTmtcError::Send(e.into()))?, .map_err(|e| EcssTmtcError::Send(e.into()))?,
@ -757,50 +776,9 @@ pub mod std_mod {
} }
} }
// TODO: This is a duplication of an existing shared store packet sender. Try to remove it..
/*
#[derive(Clone)]
pub struct TmInSharedPoolSender<Sender: EcssTmSenderCore> {
shared_tm_store: SharedPacketPool,
sender: Sender,
}
impl<Sender: EcssTmSenderCore> TmInSharedPoolSender<Sender> {
pub fn send_direct_tm(
&self,
source_id: ComponentId,
tm: PusTmCreator,
) -> Result<(), EcssTmtcError> {
let addr = self.shared_tm_store.add_pus_tm_from_creator(&tm)?;
self.sender.send_tm(source_id, PusTmVariant::InStore(addr))
}
}
impl<Sender: EcssTmSenderCore> EcssTmSenderCore for TmInSharedPoolSender<Sender> {
fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> {
if let PusTmVariant::Direct(tm) = tm {
return self.send_direct_tm(source_id, tm);
}
self.sender.send_tm(source_id, tm)
}
}
impl<Sender: EcssTmSenderCore> TmInSharedPoolSender<Sender> {
pub fn new(shared_tm_store: SharedPacketPool, sender: Sender) -> Self {
Self {
shared_tm_store,
sender,
}
}
}
pub type MpscTmInSharedPoolSender = TmInSharedPoolSender<mpsc::Sender<PusTmInPool>>;
pub type MpscTmInSharedPoolSenderBounded = TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>;
*/
pub type MpscTcReceiver = mpsc::Receiver<EcssTcAndToken>; pub type MpscTcReceiver = mpsc::Receiver<EcssTcAndToken>;
impl EcssTcReceiverCore for MpscTcReceiver { impl EcssTcReceiver for MpscTcReceiver {
fn recv_tc(&self) -> Result<EcssTcAndToken, TryRecvTmtcError> { fn recv_tc(&self) -> Result<EcssTcAndToken, TryRecvTmtcError> {
self.try_recv().map_err(|e| match e { self.try_recv().map_err(|e| match e {
TryRecvError::Empty => TryRecvTmtcError::Empty, TryRecvError::Empty => TryRecvTmtcError::Empty,
@ -816,8 +794,6 @@ pub mod std_mod {
use super::*; use super::*;
use crossbeam_channel as cb; use crossbeam_channel as cb;
pub type TmInSharedPoolSenderWithCrossbeam = TmInSharedPoolSender<cb::Sender<PusTmInPool>>;
impl From<cb::SendError<StoreAddr>> for EcssTmtcError { impl From<cb::SendError<StoreAddr>> for EcssTmtcError {
fn from(_: cb::SendError<StoreAddr>) -> Self { fn from(_: cb::SendError<StoreAddr>) -> Self {
Self::Send(GenericSendError::RxDisconnected) Self::Send(GenericSendError::RxDisconnected)
@ -835,37 +811,31 @@ pub mod std_mod {
} }
} }
impl EcssTmSender for cb::Sender<PusTmInPool> { impl EcssTmSender for cb::Sender<PacketInPool> {
fn send_tm( fn send_tm(
&self, &self,
source_id: ComponentId, sender_id: ComponentId,
tm: PusTmVariant, tm: PusTmVariant,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
match tm { match tm {
PusTmVariant::InStore(addr) => self PusTmVariant::InStore(addr) => self
.try_send(PusTmInPool { .try_send(PacketInPool::new(sender_id, addr))
source_id,
store_addr: addr,
})
.map_err(|e| EcssTmtcError::Send(e.into()))?, .map_err(|e| EcssTmtcError::Send(e.into()))?,
PusTmVariant::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), PusTmVariant::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm),
}; };
Ok(()) Ok(())
} }
} }
impl EcssTmSender for cb::Sender<PusTmAsVec> { impl EcssTmSender for cb::Sender<PacketAsVec> {
fn send_tm( fn send_tm(
&self, &self,
source_id: ComponentId, sender_id: ComponentId,
tm: PusTmVariant, tm: PusTmVariant,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
match tm { match tm {
PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
PusTmVariant::Direct(tm) => self PusTmVariant::Direct(tm) => self
.send(PusTmAsVec { .send(PacketAsVec::new(sender_id, tm.to_vec()?))
source_id,
packet: tm.to_vec()?,
})
.map_err(|e| EcssTmtcError::Send(e.into()))?, .map_err(|e| EcssTmtcError::Send(e.into()))?,
}; };
Ok(()) Ok(())
@ -1112,7 +1082,7 @@ pub mod std_mod {
} }
pub struct PusServiceBase< pub struct PusServiceBase<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
> { > {
@ -1132,7 +1102,7 @@ pub mod std_mod {
/// by using the [EcssTcInMemConverter] abstraction. This object provides some convenience /// by using the [EcssTcInMemConverter] abstraction. This object provides some convenience
/// methods to make the generic parts of TC handling easier. /// methods to make the generic parts of TC handling easier.
pub struct PusServiceHelper< pub struct PusServiceHelper<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -1142,7 +1112,7 @@ pub mod std_mod {
} }
impl< impl<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -1174,7 +1144,7 @@ pub mod std_mod {
&self.common.tm_sender &self.common.tm_sender
} }
/// This function can be used to poll the internal [EcssTcReceiverCore] object for the next /// This function can be used to poll the internal [EcssTcReceiver] object for the next
/// telecommand packet. It will return `Ok(None)` if there are not packets available. /// telecommand packet. It will return `Ok(None)` if there are not packets available.
/// In any other case, it will perform the acceptance of the ECSS TC packet using the /// In any other case, it will perform the acceptance of the ECSS TC packet using the
/// internal [VerificationReportingProvider] object. It will then return the telecommand /// internal [VerificationReportingProvider] object. It will then return the telecommand
@ -1233,14 +1203,14 @@ pub mod std_mod {
pub type PusServiceHelperStaticWithMpsc<TcInMemConverter, VerificationReporter> = pub type PusServiceHelperStaticWithMpsc<TcInMemConverter, VerificationReporter> =
PusServiceHelper< PusServiceHelper<
MpscTcReceiver, MpscTcReceiver,
PacketSenderSharedPool, PacketSenderWithSharedPool,
TcInMemConverter, TcInMemConverter,
VerificationReporter, VerificationReporter,
>; >;
pub type PusServiceHelperStaticWithBoundedMpsc<TcInMemConverter, VerificationReporter> = pub type PusServiceHelperStaticWithBoundedMpsc<TcInMemConverter, VerificationReporter> =
PusServiceHelper< PusServiceHelper<
MpscTcReceiver, MpscTcReceiver,
PacketSenderSharedPool, PacketSenderWithSharedPool,
TcInMemConverter, TcInMemConverter,
VerificationReporter, VerificationReporter,
>; >;
@ -1310,7 +1280,7 @@ pub mod tests {
use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig}; use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig};
use crate::pus::verification::{RequestId, VerificationReporter}; use crate::pus::verification::{RequestId, VerificationReporter};
use crate::tmtc::SharedPacketPool; use crate::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
use crate::ComponentId; use crate::ComponentId;
use super::test_util::{TEST_APID, TEST_COMPONENT_ID_0}; use super::test_util::{TEST_APID, TEST_COMPONENT_ID_0};
@ -1369,12 +1339,12 @@ pub mod tests {
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
tm_pool: SharedPacketPool, tm_pool: SharedPacketPool,
tc_sender: mpsc::SyncSender<EcssTcAndToken>, tc_sender: mpsc::SyncSender<EcssTcAndToken>,
tm_receiver: mpsc::Receiver<PusTmInPool>, tm_receiver: mpsc::Receiver<PacketInPool>,
} }
pub type PusServiceHelperStatic = PusServiceHelper< pub type PusServiceHelperStatic = PusServiceHelper<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
>; >;
@ -1390,13 +1360,15 @@ pub mod tests {
let tm_pool = StaticMemoryPool::new(pool_cfg); let tm_pool = StaticMemoryPool::new(pool_cfg);
let shared_tc_pool = SharedStaticMemoryPool::new(RwLock::new(tc_pool)); let shared_tc_pool = SharedStaticMemoryPool::new(RwLock::new(tc_pool));
let shared_tm_pool = SharedStaticMemoryPool::new(RwLock::new(tm_pool)); let shared_tm_pool = SharedStaticMemoryPool::new(RwLock::new(tm_pool));
let shared_tm_pool_wrapper = SharedPacketPool::new(&shared_tm_pool);
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::sync_channel(10); let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::sync_channel(10);
let (tm_tx, tm_rx) = mpsc::sync_channel(10); let (tm_tx, tm_rx) = mpsc::sync_channel(10);
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let verification_handler = let verification_handler =
VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &verif_cfg); VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &verif_cfg);
let test_srv_tm_sender = TmInSharedPoolSender::new(shared_tm_pool.clone(), tm_tx); let test_srv_tm_sender =
PacketSenderWithSharedPool::new(tm_tx, shared_tm_pool_wrapper.clone());
let in_store_converter = let in_store_converter =
EcssTcInSharedStoreConverter::new(shared_tc_pool.clone(), 2048); EcssTcInSharedStoreConverter::new(shared_tc_pool.clone(), 2048);
( (
@ -1404,7 +1376,7 @@ pub mod tests {
pus_buf: RefCell::new([0; 2048]), pus_buf: RefCell::new([0; 2048]),
tm_buf: [0; 2048], tm_buf: [0; 2048],
tc_pool: shared_tc_pool, tc_pool: shared_tc_pool,
tm_pool: shared_tm_pool, tm_pool: shared_tm_pool_wrapper,
tc_sender: test_srv_tc_tx, tc_sender: test_srv_tc_tx,
tm_receiver: tm_rx, tm_receiver: tm_rx,
}, },
@ -1466,7 +1438,7 @@ pub mod tests {
pub struct PusServiceHandlerWithVecCommon { pub struct PusServiceHandlerWithVecCommon {
current_tm: Option<Vec<u8>>, current_tm: Option<Vec<u8>>,
tc_sender: mpsc::Sender<EcssTcAndToken>, tc_sender: mpsc::Sender<EcssTcAndToken>,
tm_receiver: mpsc::Receiver<PusTmAsVec>, tm_receiver: mpsc::Receiver<PacketAsVec>,
} }
pub type PusServiceHelperDynamic = PusServiceHelper< pub type PusServiceHelperDynamic = PusServiceHelper<
MpscTcReceiver, MpscTcReceiver,

View File

@ -1,12 +1,12 @@
use super::scheduler::PusSchedulerProvider; use super::scheduler::PusSchedulerProvider;
use super::verification::{VerificationReporter, VerificationReportingProvider}; use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{ use super::{
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiverCore, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiver,
EcssTmSender, MpscTcReceiver, PusServiceHelper, PusTmAsVec, EcssTmSender, MpscTcReceiver, PacketAsVec, PusServiceHelper,
}; };
use crate::pool::PoolProvider; use crate::pool::PoolProvider;
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError}; use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
use crate::tmtc::PacketSenderSharedPool; use crate::tmtc::PacketSenderWithSharedPool;
use alloc::string::ToString; use alloc::string::ToString;
use spacepackets::ecss::{scheduling, PusPacket}; use spacepackets::ecss::{scheduling, PusPacket};
use spacepackets::time::cds::CdsTime; use spacepackets::time::cds::CdsTime;
@ -21,7 +21,7 @@ use std::sync::mpsc;
/// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release /// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release
/// telecommands when applicable. /// telecommands when applicable.
pub struct PusSchedServiceHandler< pub struct PusSchedServiceHandler<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -33,7 +33,7 @@ pub struct PusSchedServiceHandler<
} }
impl< impl<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -212,7 +212,7 @@ impl<
/// mpsc queues. /// mpsc queues.
pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHandler< pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
mpsc::Sender<PusTmAsVec>, mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter, EcssTcInVecConverter,
VerificationReporter, VerificationReporter,
PusScheduler, PusScheduler,
@ -221,7 +221,7 @@ pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHand
/// queues. /// queues.
pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler< pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
mpsc::SyncSender<PusTmAsVec>, mpsc::SyncSender<PacketAsVec>,
EcssTcInVecConverter, EcssTcInVecConverter,
VerificationReporter, VerificationReporter,
PusScheduler, PusScheduler,
@ -230,7 +230,7 @@ pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServ
/// mpsc queues. /// mpsc queues.
pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceHandler< pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
PacketSenderSharedPool, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
PusScheduler, PusScheduler,
@ -239,7 +239,7 @@ pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceH
/// mpsc queues. /// mpsc queues.
pub type PusService11SchedHandlerStaticWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler< pub type PusService11SchedHandlerStaticWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
PacketSenderSharedPool, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
PusScheduler, PusScheduler,
@ -249,7 +249,7 @@ pub type PusService11SchedHandlerStaticWithBoundedMpsc<PusScheduler> = PusSchedS
mod tests { mod tests {
use crate::pool::{StaticMemoryPool, StaticPoolConfig}; use crate::pool::{StaticMemoryPool, StaticPoolConfig};
use crate::pus::test_util::{PusTestHarness, TEST_APID}; use crate::pus::test_util::{PusTestHarness, TEST_APID};
use crate::pus::verification::VerificationReporter; use crate::pus::verification::{VerificationReporter, VerificationReportingProvider};
use crate::pus::{ use crate::pus::{
scheduler::{self, PusSchedulerProvider, TcInfo}, scheduler::{self, PusSchedulerProvider, TcInfo},
@ -258,7 +258,7 @@ mod tests {
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
}; };
use crate::pus::{MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError}; use crate::pus::{MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError};
use crate::tmtc::PacketSenderSharedPool; use crate::tmtc::PacketSenderWithSharedPool;
use alloc::collections::VecDeque; use alloc::collections::VecDeque;
use delegate::delegate; use delegate::delegate;
use spacepackets::ecss::scheduling::Subservice; use spacepackets::ecss::scheduling::Subservice;
@ -277,7 +277,7 @@ mod tests {
common: PusServiceHandlerWithSharedStoreCommon, common: PusServiceHandlerWithSharedStoreCommon,
handler: PusSchedServiceHandler< handler: PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
PacketSenderSharedPool, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
TestScheduler, TestScheduler,

View File

@ -1,7 +1,8 @@
use crate::pus::{ use crate::pus::{
PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmAsVec, PacketAsVec, PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError,
PusTmInPool, PusTmVariant, PusTmVariant,
}; };
use crate::tmtc::PacketSenderWithSharedPool;
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader}; use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::PusPacket; use spacepackets::ecss::PusPacket;
use spacepackets::SpHeader; use spacepackets::SpHeader;
@ -9,15 +10,14 @@ use std::sync::mpsc;
use super::verification::{VerificationReporter, VerificationReportingProvider}; use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{ use super::{
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiverCore, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiver,
EcssTmSender, GenericConversionError, MpscTcReceiver, MpscTmInSharedPoolSender, EcssTmSender, GenericConversionError, MpscTcReceiver, PusServiceHelper,
MpscTmInSharedPoolSenderBounded, PusServiceHelper,
}; };
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets. /// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
/// This handler only processes ping requests and generates a ping reply for them accordingly. /// This handler only processes ping requests and generates a ping reply for them accordingly.
pub struct PusService17TestHandler< pub struct PusService17TestHandler<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -27,7 +27,7 @@ pub struct PusService17TestHandler<
} }
impl< impl<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
@ -127,7 +127,7 @@ impl<
/// mpsc queues. /// mpsc queues.
pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler< pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler<
MpscTcReceiver, MpscTcReceiver,
mpsc::Sender<PusTmAsVec>, mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter, EcssTcInVecConverter,
VerificationReporter, VerificationReporter,
>; >;
@ -135,23 +135,15 @@ pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler<
/// queues. /// queues.
pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler< pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver, MpscTcReceiver,
mpsc::SyncSender<PusTmInPool>, mpsc::SyncSender<PacketAsVec>,
EcssTcInVecConverter, EcssTcInVecConverter,
VerificationReporter, VerificationReporter,
>; >;
/// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and regular
/// mpsc queues.
pub type PusService17TestHandlerStaticWithMpsc = PusService17TestHandler<
MpscTcReceiver,
MpscTmInSharedPoolSender,
EcssTcInSharedStoreConverter,
VerificationReporter,
>;
/// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and bounded /// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and bounded
/// mpsc queues. /// mpsc queues.
pub type PusService17TestHandlerStaticWithBoundedMpsc = PusService17TestHandler< pub type PusService17TestHandlerStaticWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
>; >;
@ -168,9 +160,9 @@ mod tests {
use crate::pus::verification::{TcStateAccepted, VerificationToken}; use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{ use crate::pus::{
EcssTcInSharedStoreConverter, EcssTcInVecConverter, GenericConversionError, MpscTcReceiver, EcssTcInSharedStoreConverter, EcssTcInVecConverter, GenericConversionError, MpscTcReceiver,
MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, MpscTmAsVecSender, PusPacketHandlerResult, PusPacketHandlingError,
PusPacketHandlingError,
}; };
use crate::tmtc::PacketSenderWithSharedPool;
use crate::ComponentId; use crate::ComponentId;
use delegate::delegate; use delegate::delegate;
use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader}; use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader};
@ -185,7 +177,7 @@ mod tests {
common: PusServiceHandlerWithSharedStoreCommon, common: PusServiceHandlerWithSharedStoreCommon,
handler: PusService17TestHandler< handler: PusService17TestHandler<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
>, >,

View File

@ -19,10 +19,9 @@
//! use satrs::pus::verification::{ //! use satrs::pus::verification::{
//! VerificationReportingProvider, VerificationReporterCfg, VerificationReporter //! VerificationReportingProvider, VerificationReporterCfg, VerificationReporter
//! }; //! };
//! use satrs::tmtc::{SharedStaticMemoryPool, PacketSenderWithSharedPool};
//! use satrs::seq_count::SeqCountProviderSimple; //! use satrs::seq_count::SeqCountProviderSimple;
//! use satrs::request::UniqueApidTargetId; //! use satrs::request::UniqueApidTargetId;
//! use satrs::pus::MpscTmInSharedPoolSender;
//! use satrs::tmtc::tm_helper::SharedTmPool;
//! use spacepackets::ecss::PusPacket; //! use spacepackets::ecss::PusPacket;
//! use spacepackets::SpHeader; //! use spacepackets::SpHeader;
//! use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader}; //! use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader};
@ -34,10 +33,9 @@
//! //!
//! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false); //! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
//! let tm_pool = StaticMemoryPool::new(pool_cfg.clone()); //! let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
//! let shared_tm_store = SharedTmPool::new(tm_pool); //! let shared_tm_pool = SharedStaticMemoryPool::new(RwLock::new(tm_pool));
//! let tm_store = shared_tm_store.clone_backing_pool(); //! let (verif_tx, verif_rx) = mpsc::sync_channel(10);
//! let (verif_tx, verif_rx) = mpsc::channel(); //! let sender = PacketSenderWithSharedPool::new_with_shared_packet_pool(verif_tx, &shared_tm_pool);
//! let sender = MpscTmInSharedPoolSender::new(shared_tm_store, verif_tx);
//! let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); //! let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
//! let mut reporter = VerificationReporter::new(TEST_COMPONENT_ID.id(), &cfg); //! let mut reporter = VerificationReporter::new(TEST_COMPONENT_ID.id(), &cfg);
//! //!
@ -61,7 +59,7 @@
//! let tm_in_store = verif_rx.recv_timeout(Duration::from_millis(10)).unwrap(); //! let tm_in_store = verif_rx.recv_timeout(Duration::from_millis(10)).unwrap();
//! let tm_len; //! let tm_len;
//! { //! {
//! let mut rg = tm_store.write().expect("Error locking shared pool"); //! let mut rg = shared_tm_pool.write().expect("Error locking shared pool");
//! let store_guard = rg.read_with_guard(tm_in_store.store_addr); //! let store_guard = rg.read_with_guard(tm_in_store.store_addr);
//! tm_len = store_guard.read(&mut tm_buf).expect("Error reading TM slice"); //! tm_len = store_guard.read(&mut tm_buf).expect("Error reading TM slice");
//! } //! }
@ -1636,17 +1634,17 @@ pub mod test_util {
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use crate::pool::{StaticMemoryPool, StaticPoolConfig}; use crate::pool::{SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig};
use crate::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0}; use crate::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0};
use crate::pus::tests::CommonTmInfo; use crate::pus::tests::CommonTmInfo;
use crate::pus::verification::{ use crate::pus::verification::{
EcssTmSender, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone, EcssTmSender, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone,
VerificationReporter, VerificationReporterCfg, VerificationToken, VerificationReporter, VerificationReporterCfg, VerificationToken,
}; };
use crate::pus::{ChannelWithId, MpscTmInSharedPoolSender, PusTmVariant}; use crate::pus::{ChannelWithId, PusTmVariant};
use crate::request::MessageMetadata; use crate::request::MessageMetadata;
use crate::seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}; use crate::seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore};
use crate::tmtc::tm_helper::SharedTmPool; use crate::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
use crate::ComponentId; use crate::ComponentId;
use alloc::format; use alloc::format;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}; use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
@ -1658,7 +1656,7 @@ pub mod tests {
use spacepackets::{ByteConversionError, SpHeader}; use spacepackets::{ByteConversionError, SpHeader};
use std::cell::RefCell; use std::cell::RefCell;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::sync::mpsc; use std::sync::{mpsc, RwLock};
use std::vec; use std::vec;
use std::vec::Vec; use std::vec::Vec;
@ -2128,9 +2126,10 @@ pub mod tests {
#[test] #[test]
fn test_mpsc_verif_send() { fn test_mpsc_verif_send() {
let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)], false)); let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)], false));
let shared_tm_store = SharedTmPool::new(pool); let shared_tm_store =
let (tx, _) = mpsc::channel(); SharedPacketPool::new(&SharedStaticMemoryPool::new(RwLock::new(pool)));
let mpsc_verif_sender = MpscTmInSharedPoolSender::new(shared_tm_store, tx); let (tx, _) = mpsc::sync_channel(10);
let mpsc_verif_sender = PacketSenderWithSharedPool::new(tx, shared_tm_store);
is_send(&mpsc_verif_sender); is_send(&mpsc_verif_sender);
} }

View File

@ -7,9 +7,13 @@
//! all received telecommands are sent to a special handler object called TC source. Using //! all received telecommands are sent to a special handler object called TC source. Using
//! a design like this makes it simpler to add new TC packet sources or new telemetry generators: //! a design like this makes it simpler to add new TC packet sources or new telemetry generators:
//! They only need to send the received and generated data to these objects. //! They only need to send the received and generated data to these objects.
use crate::pool::{PoolProvider, SharedStaticMemoryPool, StoreAddr, StoreError};
#[cfg(feature = "std")] #[cfg(feature = "std")]
use crate::queue::GenericSendError; use crate::queue::GenericSendError;
use crate::{
pool::{PoolProvider, StoreAddr, StoreError},
pus::PacketAsVec,
ComponentId,
};
use core::cell::RefCell; use core::cell::RefCell;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use downcast_rs::{impl_downcast, Downcast}; use downcast_rs::{impl_downcast, Downcast};
@ -28,32 +32,30 @@ pub use std_mod::*;
pub mod tm_helper; pub mod tm_helper;
/// Generic trait for object which can receive any telecommands in form of a raw bytestream, with /// Generic trait for object which can send any packets in form of a raw bytestream, with
/// no assumptions about the received protocol. /// no assumptions about the received protocol.
///
/// This trait can also be implemented for sender components which forward the packet.
/// It is implemented for common types like [mpsc::Sender] and [mpsc::SyncSender].
pub trait PacketSenderRaw: Send { pub trait PacketSenderRaw: Send {
type Error; type Error;
fn send_raw_tc(&self, tc_raw: &[u8]) -> Result<(), Self::Error>; fn send_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error>;
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl PacketSenderRaw for mpsc::Sender<alloc::vec::Vec<u8>> { impl PacketSenderRaw for mpsc::Sender<PacketAsVec> {
type Error = GenericSendError; type Error = GenericSendError;
fn send_raw_tc(&self, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error> {
self.send(tc_raw.to_vec()) self.send(PacketAsVec::new(sender_id, packet.to_vec()))
.map_err(|_| GenericSendError::RxDisconnected) .map_err(|_| GenericSendError::RxDisconnected)
} }
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl PacketSenderRaw for mpsc::SyncSender<alloc::vec::Vec<u8>> { impl PacketSenderRaw for mpsc::SyncSender<PacketAsVec> {
type Error = GenericSendError; type Error = GenericSendError;
fn send_raw_tc(&self, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.try_send(tc_raw.to_vec()).map_err(|e| match e { self.try_send(PacketAsVec::new(sender_id, tc_raw.to_vec()))
.map_err(|e| match e {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None), mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected, mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
}) })
@ -93,75 +95,90 @@ where
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
impl_downcast!(PacketSenderRawExt assoc Error); impl_downcast!(PacketSenderRawExt assoc Error);
/// Generic trait for object which can receive CCSDS space packets, for example ECSS PUS packets /// Generic trait for object which can send CCSDS space packets, for example ECSS PUS packets
/// for CCSDS File Delivery Protocol (CFDP) packets. /// or CCSDS File Delivery Protocol (CFDP) packets wrapped in space packets.
/// pub trait PacketSenderCcsds: Send {
/// This trait can also be implemented for sender components which forward the packet.
/// It is implemented for common types like [mpsc::Sender] and [mpsc::SyncSender].
pub trait ReceivesCcsdsTc: Send {
type Error; type Error;
fn pass_ccsds(&mut self, header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error>; fn send_ccsds(
&self,
sender_id: ComponentId,
header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error>;
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl ReceivesCcsdsTc for mpsc::Sender<alloc::vec::Vec<u8>> { impl PacketSenderCcsds for mpsc::Sender<PacketAsVec> {
type Error = GenericSendError; type Error = GenericSendError;
fn pass_ccsds(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_ccsds(
self.send(tc_raw.to_vec()) &self,
sender_id: ComponentId,
_: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
self.send(PacketAsVec::new(sender_id, tc_raw.to_vec()))
.map_err(|_| GenericSendError::RxDisconnected) .map_err(|_| GenericSendError::RxDisconnected)
} }
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl ReceivesCcsdsTc for mpsc::SyncSender<alloc::vec::Vec<u8>> { impl PacketSenderCcsds for mpsc::SyncSender<PacketAsVec> {
type Error = GenericSendError; type Error = GenericSendError;
fn pass_ccsds(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_ccsds(
self.try_send(tc_raw.to_vec()).map_err(|e| match e { &self,
sender_id: ComponentId,
_: &SpHeader,
packet_raw: &[u8],
) -> Result<(), Self::Error> {
self.try_send(PacketAsVec::new(sender_id, packet_raw.to_vec()))
.map_err(|e| match e {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None), mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected, mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
}) })
} }
} }
/// Generic trait for a TM packet source, with no restrictions on the type of TM. /// Generic trait for a packet receiver, with no restrictions on the type of packet.
/// Implementors write the telemetry into the provided buffer and return the size of the telemetry. /// Implementors write the telemetry into the provided buffer and return the size of the telemetry.
pub trait TmPacketSource: Send { pub trait PacketSource: Send {
type Error; type Error;
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error>; fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error>;
} }
/// Extension trait of [TmPacketSource] which allows downcasting by implementing [Downcast]. /// Extension trait of [PacketSource] which allows downcasting by implementing [Downcast].
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub trait TmPacketSourceExt: TmPacketSource + Downcast { pub trait PacketSourceExt: PacketSource + Downcast {
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast(&self) -> &dyn TmPacketSource<Error = Self::Error>; fn upcast(&self) -> &dyn PacketSource<Error = Self::Error>;
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast_mut(&mut self) -> &mut dyn TmPacketSource<Error = Self::Error>; fn upcast_mut(&mut self) -> &mut dyn PacketSource<Error = Self::Error>;
} }
/// Blanket implementation to automatically implement [TmPacketSourceExt] when the [alloc] feature /// Blanket implementation to automatically implement [PacketSourceExt] when the [alloc] feature
/// is enabled. /// is enabled.
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
impl<T> TmPacketSourceExt for T impl<T> PacketSourceExt for T
where where
T: TmPacketSource + 'static, T: PacketSource + 'static,
{ {
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast(&self) -> &dyn TmPacketSource<Error = Self::Error> { fn upcast(&self) -> &dyn PacketSource<Error = Self::Error> {
self self
} }
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast_mut(&mut self) -> &mut dyn TmPacketSource<Error = Self::Error> { fn upcast_mut(&mut self) -> &mut dyn PacketSource<Error = Self::Error> {
self self
} }
} }
/// Newtype wrapper around the [SharedStaticMemoryPool] to enable extension helper traits on
/// top of the regular shared memory pool API.
#[derive(Clone)] #[derive(Clone)]
pub struct SharedPacketPool(pub SharedStaticMemoryPool); pub struct SharedPacketPool(pub SharedStaticMemoryPool);
@ -171,6 +188,7 @@ impl SharedPacketPool {
} }
} }
/// Helper trait for any generic (static) store which allows storing raw or CCSDS packets.
pub trait CcsdsPacketPool { pub trait CcsdsPacketPool {
fn add_ccsds_tc(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<StoreAddr, StoreError> { fn add_ccsds_tc(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<StoreAddr, StoreError> {
self.add_raw_tc(tc_raw) self.add_raw_tc(tc_raw)
@ -179,10 +197,12 @@ pub trait CcsdsPacketPool {
fn add_raw_tc(&mut self, tc_raw: &[u8]) -> Result<StoreAddr, StoreError>; fn add_raw_tc(&mut self, tc_raw: &[u8]) -> Result<StoreAddr, StoreError>;
} }
/// Helper trait for any generic (static) store which allows storing ECSS PUS Telecommand packets.
pub trait PusTcPool { pub trait PusTcPool {
fn add_pus_tc(&mut self, pus_tc: &PusTcReader) -> Result<StoreAddr, StoreError>; fn add_pus_tc(&mut self, pus_tc: &PusTcReader) -> Result<StoreAddr, StoreError>;
} }
/// Helper trait for any generic (static) store which allows storing ECSS PUS Telemetry packets.
pub trait PusTmPool { pub trait PusTmPool {
fn add_pus_tm_from_reader(&mut self, pus_tm: &PusTmReader) -> Result<StoreAddr, StoreError>; fn add_pus_tm_from_reader(&mut self, pus_tm: &PusTmReader) -> Result<StoreAddr, StoreError>;
fn add_pus_tm_from_creator(&mut self, pus_tm: &PusTmCreator) -> Result<StoreAddr, StoreError>; fn add_pus_tm_from_creator(&mut self, pus_tm: &PusTmCreator) -> Result<StoreAddr, StoreError>;
@ -228,13 +248,23 @@ impl CcsdsPacketPool for SharedPacketPool {
} }
} }
/// Generic trait for any sender component able to send packets stored inside a pool structure.
pub trait PacketInPoolSender: Send {
fn send_packet(
&self,
sender_id: ComponentId,
store_addr: StoreAddr,
) -> Result<(), GenericSendError>;
}
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub mod std_mod { pub mod std_mod {
use std::sync::mpsc; #[cfg(feature = "crossbeam")]
use crossbeam_channel as cb;
use thiserror::Error; use thiserror::Error;
use crate::pus::{EcssTmSender, ReceivesEcssPusTc}; use crate::pus::{EcssTmSender, EcssTmtcError, PacketInPool, PacketSenderPusTc};
use super::*; use super::*;
@ -246,77 +276,164 @@ pub mod std_mod {
Send(#[from] GenericSendError), Send(#[from] GenericSendError),
} }
#[derive(Clone)] pub use crate::pool::SharedStaticMemoryPool;
pub struct PacketSenderSharedPool<PacketStore: CcsdsPacketPool = SharedPacketPool> {
pub tc_source: mpsc::SyncSender<StoreAddr>, impl PacketInPoolSender for mpsc::Sender<PacketInPool> {
pub shared_pool: RefCell<PacketStore>, fn send_packet(
&self,
sender_id: ComponentId,
store_addr: StoreAddr,
) -> Result<(), GenericSendError> {
self.send(PacketInPool::new(sender_id, store_addr))
.map_err(|_| GenericSendError::RxDisconnected)
}
} }
impl<PacketStore: CcsdsPacketPool> PacketSenderSharedPool<PacketStore> { impl PacketInPoolSender for mpsc::SyncSender<PacketInPool> {
pub fn new(tc_sender: mpsc::SyncSender<StoreAddr>, shared_pool: PacketStore) -> Self { fn send_packet(
&self,
sender_id: ComponentId,
store_addr: StoreAddr,
) -> Result<(), GenericSendError> {
self.try_send(PacketInPool::new(sender_id, store_addr))
.map_err(|e| match e {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
})
}
}
#[cfg(feature = "crossbeam")]
impl PacketInPoolSender for cb::Sender<PacketInPool> {
fn send_packet(
&self,
sender_id: ComponentId,
store_addr: StoreAddr,
) -> Result<(), GenericSendError> {
self.try_send(PacketInPool::new(sender_id, store_addr))
.map_err(|e| match e {
cb::TrySendError::Full(_) => GenericSendError::QueueFull(None),
cb::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
})
}
}
/// This is the primary structure used to send packets stored in a dedicated memory pool
/// structure.
#[derive(Clone)]
pub struct PacketSenderWithSharedPool<
Sender: PacketInPoolSender = mpsc::SyncSender<PacketInPool>,
PacketPool: CcsdsPacketPool = SharedPacketPool,
> {
pub sender: Sender,
pub shared_pool: RefCell<PacketPool>,
}
impl<Sender: PacketInPoolSender> PacketSenderWithSharedPool<Sender, SharedPacketPool> {
pub fn new_with_shared_packet_pool(
packet_sender: Sender,
shared_pool: &SharedStaticMemoryPool,
) -> Self {
Self { Self {
tc_source: tc_sender, sender: packet_sender,
shared_pool: RefCell::new(SharedPacketPool::new(shared_pool)),
}
}
}
impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool>
PacketSenderWithSharedPool<Sender, PacketStore>
{
pub fn new(packet_sender: Sender, shared_pool: PacketStore) -> Self {
Self {
sender: packet_sender,
shared_pool: RefCell::new(shared_pool), shared_pool: RefCell::new(shared_pool),
} }
} }
} }
impl<PacketStore: CcsdsPacketPool + Clone> PacketSenderSharedPool<PacketStore> { impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + Clone>
PacketSenderWithSharedPool<Sender, PacketStore>
{
pub fn shared_packet_store(&self) -> PacketStore { pub fn shared_packet_store(&self) -> PacketStore {
let pool = self.shared_pool.borrow(); let pool = self.shared_pool.borrow();
pool.clone() pool.clone()
} }
} }
impl<PacketStore: CcsdsPacketPool + Send> PacketSenderRaw for PacketSenderSharedPool<PacketStore> { impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + Send> PacketSenderRaw
type Error = StoreAndSendError; for PacketSenderWithSharedPool<Sender, PacketStore>
fn send_raw_tc(&self, tc_raw: &[u8]) -> Result<(), Self::Error> {
let mut shared_pool = self.shared_pool.borrow_mut();
let addr = shared_pool.add_raw_tc(tc_raw)?;
drop(shared_pool);
self.tc_source.try_send(addr).map_err(|e| match e {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
})?;
Ok(())
}
}
impl<PacketStore: CcsdsPacketPool + PusTcPool + Send> ReceivesEcssPusTc
for PacketSenderSharedPool<PacketStore>
{ {
type Error = StoreAndSendError; type Error = StoreAndSendError;
fn pass_pus_tc(&mut self, _: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> { fn send_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error> {
let mut shared_pool = self.shared_pool.borrow_mut(); let mut shared_pool = self.shared_pool.borrow_mut();
let addr = shared_pool.add_raw_tc(pus_tc.raw_data())?; let store_addr = shared_pool.add_raw_tc(packet)?;
drop(shared_pool); drop(shared_pool);
self.tc_source.try_send(addr).map_err(|e| match e { self.sender
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None), .send_packet(sender_id, store_addr)
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected, .map_err(StoreAndSendError::Send)?;
})?;
Ok(()) Ok(())
} }
} }
impl<PacketStore: CcsdsPacketPool + Send> ReceivesCcsdsTc for PacketSenderSharedPool<PacketStore> { impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + PusTcPool + Send>
PacketSenderPusTc for PacketSenderWithSharedPool<Sender, PacketStore>
{
type Error = StoreAndSendError; type Error = StoreAndSendError;
fn pass_ccsds(&mut self, _sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_pus_tc(
self.send_raw_tc(tc_raw) &self,
sender_id: ComponentId,
_: &SpHeader,
pus_tc: &PusTcReader,
) -> Result<(), Self::Error> {
let mut shared_pool = self.shared_pool.borrow_mut();
let store_addr = shared_pool.add_raw_tc(pus_tc.raw_data())?;
drop(shared_pool);
self.sender
.send_packet(sender_id, store_addr)
.map_err(StoreAndSendError::Send)?;
Ok(())
} }
} }
impl<PacketStore: CcsdsPacketPool + PusTmPool + Send> EcssTmSender impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + Send> PacketSenderCcsds
for PacketSenderSharedPool<PacketStore> for PacketSenderWithSharedPool<Sender, PacketStore>
{
type Error = StoreAndSendError;
fn send_ccsds(
&self,
sender_id: ComponentId,
_sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
self.send_packet(sender_id, tc_raw)
}
}
impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + PusTmPool + Send> EcssTmSender
for PacketSenderWithSharedPool<Sender, PacketStore>
{ {
fn send_tm( fn send_tm(
&self, &self,
source_id: crate::ComponentId, sender_id: crate::ComponentId,
tm: crate::pus::PusTmVariant, tm: crate::pus::PusTmVariant,
) -> Result<(), crate::pus::EcssTmtcError> { ) -> Result<(), crate::pus::EcssTmtcError> {
todo!() let send_addr = |store_addr: StoreAddr| {
self.sender
.send_packet(sender_id, store_addr)
.map_err(EcssTmtcError::Send)
};
match tm {
crate::pus::PusTmVariant::InStore(store_addr) => send_addr(store_addr),
crate::pus::PusTmVariant::Direct(tm_creator) => {
let mut pool = self.shared_pool.borrow_mut();
let store_addr = pool.add_pus_tm_from_creator(&tm_creator)?;
send_addr(store_addr)
}
}
} }
} }
} }
@ -333,19 +450,21 @@ pub(crate) mod tests {
use std::sync::mpsc; use std::sync::mpsc;
pub(crate) fn send_with_sender<SendError>( pub(crate) fn send_with_sender<SendError>(
sender_id: ComponentId,
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized), packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
packet: &[u8], packet: &[u8],
) -> Result<(), SendError> { ) -> Result<(), SendError> {
packet_sender.send_raw_tc(packet) packet_sender.send_packet(sender_id, packet)
} }
#[test] #[test]
fn test_basic_mpsc_channel_sender() { fn test_basic_mpsc_channel_sender_bounded() {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let some_packet = vec![1, 2, 3, 4, 5]; let some_packet = vec![1, 2, 3, 4, 5];
send_with_sender(&tx, &some_packet).expect("failed to send packet"); send_with_sender(1, &tx, &some_packet).expect("failed to send packet");
let rx_packet = rx.try_recv().unwrap(); let rx_packet = rx.try_recv().unwrap();
assert_eq!(some_packet, rx_packet); assert_eq!(some_packet, rx_packet.packet);
assert_eq!(1, rx_packet.sender_id);
} }
#[test] #[test]
@ -353,7 +472,7 @@ pub(crate) mod tests {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let some_packet = vec![1, 2, 3, 4, 5]; let some_packet = vec![1, 2, 3, 4, 5];
drop(rx); drop(rx);
let result = send_with_sender(&tx, &some_packet); let result = send_with_sender(2, &tx, &some_packet);
assert!(result.is_err()); assert!(result.is_err());
matches!(result.unwrap_err(), GenericSendError::RxDisconnected); matches!(result.unwrap_err(), GenericSendError::RxDisconnected);
} }
@ -362,9 +481,10 @@ pub(crate) mod tests {
fn test_basic_mpsc_sync_sender() { fn test_basic_mpsc_sync_sender() {
let (tx, rx) = mpsc::sync_channel(3); let (tx, rx) = mpsc::sync_channel(3);
let some_packet = vec![1, 2, 3, 4, 5]; let some_packet = vec![1, 2, 3, 4, 5];
send_with_sender(&tx, &some_packet).expect("failed to send packet"); send_with_sender(3, &tx, &some_packet).expect("failed to send packet");
let rx_packet = rx.try_recv().unwrap(); let rx_packet = rx.try_recv().unwrap();
assert_eq!(some_packet, rx_packet); assert_eq!(some_packet, rx_packet.packet);
assert_eq!(3, rx_packet.sender_id);
} }
#[test] #[test]
@ -372,7 +492,7 @@ pub(crate) mod tests {
let (tx, rx) = mpsc::sync_channel(3); let (tx, rx) = mpsc::sync_channel(3);
let some_packet = vec![1, 2, 3, 4, 5]; let some_packet = vec![1, 2, 3, 4, 5];
drop(rx); drop(rx);
let result = send_with_sender(&tx, &some_packet); let result = send_with_sender(0, &tx, &some_packet);
assert!(result.is_err()); assert!(result.is_err());
matches!(result.unwrap_err(), GenericSendError::RxDisconnected); matches!(result.unwrap_err(), GenericSendError::RxDisconnected);
} }
@ -381,13 +501,31 @@ pub(crate) mod tests {
fn test_basic_mpsc_sync_sender_queue_full() { fn test_basic_mpsc_sync_sender_queue_full() {
let (tx, rx) = mpsc::sync_channel(1); let (tx, rx) = mpsc::sync_channel(1);
let some_packet = vec![1, 2, 3, 4, 5]; let some_packet = vec![1, 2, 3, 4, 5];
send_with_sender(&tx, &some_packet).expect("failed to send packet"); send_with_sender(0, &tx, &some_packet).expect("failed to send packet");
let result = send_with_sender(&tx, &some_packet); let result = send_with_sender(1, &tx, &some_packet);
assert!(result.is_err()); assert!(result.is_err());
matches!(result.unwrap_err(), GenericSendError::QueueFull(None)); matches!(result.unwrap_err(), GenericSendError::QueueFull(None));
let rx_packet = rx.try_recv().unwrap(); let rx_packet = rx.try_recv().unwrap();
assert_eq!(some_packet, rx_packet); assert_eq!(some_packet, rx_packet.packet);
} }
#[test]
fn test_basic_shared_store_sender_unbounded_sender() {
let (tc_tx, tc_rx) = mpsc::channel();
let pool_cfg = StaticPoolConfig::new(vec![(2, 8)], true);
let shared_pool = SharedPacketPool::new(&SharedStaticMemoryPool::new(RwLock::new(
StaticMemoryPool::new(pool_cfg),
)));
let some_packet = vec![1, 2, 3, 4, 5];
let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
send_with_sender(5, &tc_sender, &some_packet).expect("failed to send packet");
let packet_in_pool = tc_rx.try_recv().unwrap();
let mut pool = shared_pool.0.write().unwrap();
let read_guard = pool.read_with_guard(packet_in_pool.store_addr);
assert_eq!(read_guard.read_as_vec().unwrap(), some_packet);
assert_eq!(packet_in_pool.sender_id, 5)
}
#[test] #[test]
fn test_basic_shared_store_sender() { fn test_basic_shared_store_sender() {
let (tc_tx, tc_rx) = mpsc::sync_channel(10); let (tc_tx, tc_rx) = mpsc::sync_channel(10);
@ -396,12 +534,13 @@ pub(crate) mod tests {
StaticMemoryPool::new(pool_cfg), StaticMemoryPool::new(pool_cfg),
))); )));
let some_packet = vec![1, 2, 3, 4, 5]; let some_packet = vec![1, 2, 3, 4, 5];
let tc_sender = PacketSenderSharedPool::new(tc_tx, shared_pool.clone()); let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
send_with_sender(&tc_sender, &some_packet).expect("failed to send packet"); send_with_sender(5, &tc_sender, &some_packet).expect("failed to send packet");
let tc_tx_addr = tc_rx.try_recv().unwrap(); let packet_in_pool = tc_rx.try_recv().unwrap();
let mut pool = shared_pool.0.write().unwrap(); let mut pool = shared_pool.0.write().unwrap();
let read_guard = pool.read_with_guard(tc_tx_addr); let read_guard = pool.read_with_guard(packet_in_pool.store_addr);
assert_eq!(read_guard.read_as_vec().unwrap(), some_packet); assert_eq!(read_guard.read_as_vec().unwrap(), some_packet);
assert_eq!(packet_in_pool.sender_id, 5)
} }
#[test] #[test]
@ -413,8 +552,8 @@ pub(crate) mod tests {
))); )));
let some_packet = vec![1, 2, 3, 4, 5]; let some_packet = vec![1, 2, 3, 4, 5];
drop(tc_rx); drop(tc_rx);
let tc_sender = PacketSenderSharedPool::new(tc_tx, shared_pool.clone()); let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
let result = send_with_sender(&tc_sender, &some_packet); let result = send_with_sender(2, &tc_sender, &some_packet);
assert!(result.is_err()); assert!(result.is_err());
matches!( matches!(
result.unwrap_err(), result.unwrap_err(),
@ -430,18 +569,19 @@ pub(crate) mod tests {
StaticMemoryPool::new(pool_cfg), StaticMemoryPool::new(pool_cfg),
))); )));
let some_packet = vec![1, 2, 3, 4, 5]; let some_packet = vec![1, 2, 3, 4, 5];
let tc_sender = PacketSenderSharedPool::new(tc_tx, shared_pool.clone()); let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
send_with_sender(&tc_sender, &some_packet).expect("failed to send packet"); send_with_sender(3, &tc_sender, &some_packet).expect("failed to send packet");
let result = send_with_sender(&tc_sender, &some_packet); let result = send_with_sender(3, &tc_sender, &some_packet);
assert!(result.is_err()); assert!(result.is_err());
matches!( matches!(
result.unwrap_err(), result.unwrap_err(),
StoreAndSendError::Send(GenericSendError::RxDisconnected) StoreAndSendError::Send(GenericSendError::RxDisconnected)
); );
let tc_tx_addr = tc_rx.try_recv().unwrap(); let packet_in_pool = tc_rx.try_recv().unwrap();
let mut pool = shared_pool.0.write().unwrap(); let mut pool = shared_pool.0.write().unwrap();
let read_guard = pool.read_with_guard(tc_tx_addr); let read_guard = pool.read_with_guard(packet_in_pool.store_addr);
assert_eq!(read_guard.read_as_vec().unwrap(), some_packet); assert_eq!(read_guard.read_as_vec().unwrap(), some_packet);
assert_eq!(packet_in_pool.sender_id, 3);
} }
#[test] #[test]
@ -452,17 +592,18 @@ pub(crate) mod tests {
StaticMemoryPool::new(pool_cfg), StaticMemoryPool::new(pool_cfg),
))); )));
let some_packet = vec![1, 2, 3, 4, 5]; let some_packet = vec![1, 2, 3, 4, 5];
let tc_sender = PacketSenderSharedPool::new(tc_tx, shared_pool.clone()); let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
send_with_sender(&tc_sender, &some_packet).expect("failed to send packet"); send_with_sender(4, &tc_sender, &some_packet).expect("failed to send packet");
let result = send_with_sender(&tc_sender, &some_packet); let result = send_with_sender(4, &tc_sender, &some_packet);
assert!(result.is_err()); assert!(result.is_err());
matches!( matches!(
result.unwrap_err(), result.unwrap_err(),
StoreAndSendError::Store(StoreError::StoreFull(..)) StoreAndSendError::Store(StoreError::StoreFull(..))
); );
let tc_tx_addr = tc_rx.try_recv().unwrap(); let packet_in_pool = tc_rx.try_recv().unwrap();
let mut pool = shared_pool.0.write().unwrap(); let mut pool = shared_pool.0.write().unwrap();
let read_guard = pool.read_with_guard(tc_tx_addr); let read_guard = pool.read_with_guard(packet_in_pool.store_addr);
assert_eq!(read_guard.read_as_vec().unwrap(), some_packet); assert_eq!(read_guard.read_as_vec().unwrap(), some_packet);
assert_eq!(packet_in_pool.sender_id, 4);
} }
} }

View File

@ -7,7 +7,7 @@ use satrs::params::U32Pair;
use satrs::params::{Params, ParamsHeapless, WritableToBeBytes}; use satrs::params::{Params, ParamsHeapless, WritableToBeBytes};
use satrs::pus::event_man::{DefaultPusEventMgmtBackend, EventReporter, PusEventDispatcher}; use satrs::pus::event_man::{DefaultPusEventMgmtBackend, EventReporter, PusEventDispatcher};
use satrs::pus::test_util::TEST_COMPONENT_ID_0; use satrs::pus::test_util::TEST_COMPONENT_ID_0;
use satrs::pus::PusTmAsVec; use satrs::pus::PacketAsVec;
use satrs::request::UniqueApidTargetId; use satrs::request::UniqueApidTargetId;
use spacepackets::ecss::tm::PusTmReader; use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::{PusError, PusPacket}; use spacepackets::ecss::{PusError, PusPacket};
@ -37,7 +37,7 @@ fn test_threaded_usage() {
let pus_event_man_send_provider = EventU32SenderMpsc::new(1, pus_event_man_tx); let pus_event_man_send_provider = EventU32SenderMpsc::new(1, pus_event_man_tx);
event_man.subscribe_all(pus_event_man_send_provider.target_id()); event_man.subscribe_all(pus_event_man_send_provider.target_id());
event_man.add_sender(pus_event_man_send_provider); event_man.add_sender(pus_event_man_send_provider);
let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>(); let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let reporter = let reporter =
EventReporter::new(TEST_ID.raw(), 0x02, 0, 128).expect("Creating event reporter failed"); EventReporter::new(TEST_ID.raw(), 0x02, 0, 128).expect("Creating event reporter failed");
let pus_event_man = PusEventDispatcher::new(reporter, DefaultPusEventMgmtBackend::default()); let pus_event_man = PusEventDispatcher::new(reporter, DefaultPusEventMgmtBackend::default());

View File

@ -7,13 +7,12 @@ pub mod crossbeam_test {
FailParams, RequestId, VerificationReporter, VerificationReporterCfg, FailParams, RequestId, VerificationReporter, VerificationReporterCfg,
VerificationReportingProvider, VerificationReportingProvider,
}; };
use satrs::pus::TmInSharedPoolSenderWithCrossbeam; use satrs::tmtc::{PacketSenderWithSharedPool, SharedStaticMemoryPool};
use satrs::tmtc::tm_helper::SharedTmPool;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}; use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
use spacepackets::ecss::tm::PusTmReader; use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket, WritablePusPacket}; use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket, WritablePusPacket};
use spacepackets::SpHeader; use spacepackets::SpHeader;
use std::sync::{Arc, RwLock}; use std::sync::RwLock;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
@ -36,12 +35,15 @@ pub mod crossbeam_test {
// Shared pool object to store the verification PUS telemetry // Shared pool object to store the verification PUS telemetry
let pool_cfg = let pool_cfg =
StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false); StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
let shared_tm_pool = SharedTmPool::new(StaticMemoryPool::new(pool_cfg.clone())); let shared_tm_pool =
let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg))); SharedStaticMemoryPool::new(RwLock::new(StaticMemoryPool::new(pool_cfg.clone())));
let shared_tc_pool_1 = shared_tc_pool_0.clone(); let shared_tc_pool =
SharedStaticMemoryPool::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_tc_pool_1 = shared_tc_pool.clone();
let (tx, rx) = crossbeam_channel::bounded(10); let (tx, rx) = crossbeam_channel::bounded(10);
let sender_0 = TmInSharedPoolSenderWithCrossbeam::new(shared_tm_pool.clone(), tx.clone()); let sender =
let sender_1 = sender_0.clone(); PacketSenderWithSharedPool::new_with_shared_packet_pool(tx.clone(), &shared_tm_pool);
let sender_1 = sender.clone();
let mut reporter_with_sender_0 = VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &cfg); let mut reporter_with_sender_0 = VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &cfg);
let mut reporter_with_sender_1 = reporter_with_sender_0.clone(); let mut reporter_with_sender_1 = reporter_with_sender_0.clone();
// For test purposes, we retrieve the request ID from the TCs and pass them to the receiver // For test purposes, we retrieve the request ID from the TCs and pass them to the receiver
@ -52,7 +54,7 @@ pub mod crossbeam_test {
let (tx_tc_0, rx_tc_0) = crossbeam_channel::bounded(3); let (tx_tc_0, rx_tc_0) = crossbeam_channel::bounded(3);
let (tx_tc_1, rx_tc_1) = crossbeam_channel::bounded(3); let (tx_tc_1, rx_tc_1) = crossbeam_channel::bounded(3);
{ {
let mut tc_guard = shared_tc_pool_0.write().unwrap(); let mut tc_guard = shared_tc_pool.write().unwrap();
let sph = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); let sph = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let tc_header = PusTcSecondaryHeader::new_simple(17, 1); let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
let pus_tc_0 = PusTcCreator::new_no_app_data(sph, tc_header, true); let pus_tc_0 = PusTcCreator::new_no_app_data(sph, tc_header, true);
@ -81,7 +83,7 @@ pub mod crossbeam_test {
.expect("Receive timeout"); .expect("Receive timeout");
let tc_len; let tc_len;
{ {
let mut tc_guard = shared_tc_pool_0.write().unwrap(); let mut tc_guard = shared_tc_pool.write().unwrap();
let pg = tc_guard.read_with_guard(tc_addr); let pg = tc_guard.read_with_guard(tc_addr);
tc_len = pg.read(&mut tc_buf).unwrap(); tc_len = pg.read(&mut tc_buf).unwrap();
} }
@ -89,24 +91,24 @@ pub mod crossbeam_test {
let token = reporter_with_sender_0.add_tc_with_req_id(req_id_0); let token = reporter_with_sender_0.add_tc_with_req_id(req_id_0);
let accepted_token = reporter_with_sender_0 let accepted_token = reporter_with_sender_0
.acceptance_success(&sender_0, token, &FIXED_STAMP) .acceptance_success(&sender, token, &FIXED_STAMP)
.expect("Acceptance success failed"); .expect("Acceptance success failed");
// Do some start handling here // Do some start handling here
let started_token = reporter_with_sender_0 let started_token = reporter_with_sender_0
.start_success(&sender_0, accepted_token, &FIXED_STAMP) .start_success(&sender, accepted_token, &FIXED_STAMP)
.expect("Start success failed"); .expect("Start success failed");
// Do some step handling here // Do some step handling here
reporter_with_sender_0 reporter_with_sender_0
.step_success(&sender_0, &started_token, &FIXED_STAMP, EcssEnumU8::new(0)) .step_success(&sender, &started_token, &FIXED_STAMP, EcssEnumU8::new(0))
.expect("Start success failed"); .expect("Start success failed");
// Finish up // Finish up
reporter_with_sender_0 reporter_with_sender_0
.step_success(&sender_0, &started_token, &FIXED_STAMP, EcssEnumU8::new(1)) .step_success(&sender, &started_token, &FIXED_STAMP, EcssEnumU8::new(1))
.expect("Start success failed"); .expect("Start success failed");
reporter_with_sender_0 reporter_with_sender_0
.completion_success(&sender_0, started_token, &FIXED_STAMP) .completion_success(&sender, started_token, &FIXED_STAMP)
.expect("Completion success failed"); .expect("Completion success failed");
}); });
@ -145,9 +147,8 @@ pub mod crossbeam_test {
.recv_timeout(Duration::from_millis(50)) .recv_timeout(Duration::from_millis(50))
.expect("Packet reception timeout"); .expect("Packet reception timeout");
let tm_len; let tm_len;
let shared_tm_store = shared_tm_pool.clone_backing_pool();
{ {
let mut rg = shared_tm_store.write().expect("Error locking shared pool"); let mut rg = shared_tm_pool.write().expect("Error locking shared pool");
let store_guard = rg.read_with_guard(tm_in_pool.store_addr); let store_guard = rg.read_with_guard(tm_in_pool.store_addr);
tm_len = store_guard tm_len = store_guard
.read(&mut tm_buf) .read(&mut tm_buf)

View File

@ -28,7 +28,8 @@ use satrs::{
ConnectionResult, HandledConnectionHandler, HandledConnectionInfo, ServerConfig, ConnectionResult, HandledConnectionHandler, HandledConnectionInfo, ServerConfig,
TcpSpacepacketsServer, TcpTmtcInCobsServer, TcpSpacepacketsServer, TcpTmtcInCobsServer,
}, },
tmtc::TmPacketSource, tmtc::PacketSource,
ComponentId,
}; };
use spacepackets::{ use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket}, ecss::{tc::PusTcCreator, WritablePusPacket},
@ -74,7 +75,7 @@ impl SyncTmSource {
} }
} }
impl TmPacketSource for SyncTmSource { impl PacketSource for SyncTmSource {
type Error = (); type Error = ();
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> { fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
@ -96,6 +97,7 @@ impl TmPacketSource for SyncTmSource {
} }
} }
const TCP_SERVER_ID: ComponentId = 0x05;
const SIMPLE_PACKET: [u8; 5] = [1, 2, 3, 4, 5]; const SIMPLE_PACKET: [u8; 5] = [1, 2, 3, 4, 5];
const INVERTED_PACKET: [u8; 5] = [5, 4, 3, 4, 1]; const INVERTED_PACKET: [u8; 5] = [5, 4, 3, 4, 1];
const AUTO_PORT_ADDR: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0); const AUTO_PORT_ADDR: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
@ -107,7 +109,13 @@ fn test_cobs_server() {
// Insert a telemetry packet which will be read back by the client at a later stage. // Insert a telemetry packet which will be read back by the client at a later stage.
tm_source.add_tm(&INVERTED_PACKET); tm_source.add_tm(&INVERTED_PACKET);
let mut tcp_server = TcpTmtcInCobsServer::new( let mut tcp_server = TcpTmtcInCobsServer::new(
ServerConfig::new(AUTO_PORT_ADDR, Duration::from_millis(2), 1024, 1024), ServerConfig::new(
TCP_SERVER_ID,
AUTO_PORT_ADDR,
Duration::from_millis(2),
1024,
1024,
),
tm_source, tm_source,
tc_sender.clone(), tc_sender.clone(),
ConnectionFinishedHandler::default(), ConnectionFinishedHandler::default(),
@ -175,8 +183,9 @@ fn test_cobs_server() {
panic!("connection was not handled properly"); panic!("connection was not handled properly");
} }
// Check that the packet was received and decoded successfully. // Check that the packet was received and decoded successfully.
let tc = tc_receiver.try_recv().expect("no TC received"); let tc_with_sender = tc_receiver.try_recv().expect("no TC received");
assert_eq!(tc, SIMPLE_PACKET); assert_eq!(tc_with_sender.packet, SIMPLE_PACKET);
assert_eq!(tc_with_sender.sender_id, TCP_SERVER_ID);
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)); matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
} }
@ -194,7 +203,13 @@ fn test_ccsds_server() {
let mut packet_id_lookup = HashSet::new(); let mut packet_id_lookup = HashSet::new();
packet_id_lookup.insert(TEST_PACKET_ID_0); packet_id_lookup.insert(TEST_PACKET_ID_0);
let mut tcp_server = TcpSpacepacketsServer::new( let mut tcp_server = TcpSpacepacketsServer::new(
ServerConfig::new(AUTO_PORT_ADDR, Duration::from_millis(2), 1024, 1024), ServerConfig::new(
TCP_SERVER_ID,
AUTO_PORT_ADDR,
Duration::from_millis(2),
1024,
1024,
),
tm_source, tm_source,
tc_sender, tc_sender,
packet_id_lookup, packet_id_lookup,
@ -263,7 +278,8 @@ fn test_ccsds_server() {
panic!("connection was not handled properly"); panic!("connection was not handled properly");
} }
// Check that TC has arrived. // Check that TC has arrived.
let tc = tc_receiver.try_recv().expect("no TC received"); let tc_with_sender = tc_receiver.try_recv().expect("no TC received");
assert_eq!(tc, tc_0); assert_eq!(tc_with_sender.packet, tc_0);
assert_eq!(tc_with_sender.sender_id, TCP_SERVER_ID);
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)); matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
} }