2024-04-04 13:14:21 +02:00
|
|
|
// #[cfg(feature = "crossbeam")]
|
2023-01-11 10:30:03 +01:00
|
|
|
pub mod crossbeam_test {
|
|
|
|
use hashbrown::HashMap;
|
2024-02-12 15:51:37 +01:00
|
|
|
use satrs::pool::{PoolProvider, PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
|
2024-04-02 20:25:20 +02:00
|
|
|
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0};
|
2024-02-12 15:51:37 +01:00
|
|
|
use satrs::pus::verification::{
|
2024-04-02 16:50:09 +02:00
|
|
|
FailParams, RequestId, VerificationReporter, VerificationReporterCfg,
|
2024-02-20 14:33:21 +01:00
|
|
|
VerificationReportingProvider,
|
2023-01-11 10:30:03 +01:00
|
|
|
};
|
2024-02-26 11:41:42 +01:00
|
|
|
use satrs::pus::TmInSharedPoolSenderWithCrossbeam;
|
2024-02-12 15:51:37 +01:00
|
|
|
use satrs::tmtc::tm_helper::SharedTmPool;
|
2023-07-11 22:25:43 +02:00
|
|
|
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
|
|
|
|
use spacepackets::ecss::tm::PusTmReader;
|
2023-12-14 12:17:16 +01:00
|
|
|
use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket, WritablePusPacket};
|
2023-01-11 10:30:03 +01:00
|
|
|
use spacepackets::SpHeader;
|
2023-07-11 22:52:06 +02:00
|
|
|
use std::sync::{Arc, RwLock};
|
2023-01-11 10:30:03 +01:00
|
|
|
use std::thread;
|
|
|
|
use std::time::Duration;
|
|
|
|
|
|
|
|
const FIXED_STAMP: [u8; 7] = [0; 7];
|
|
|
|
const PACKETS_SENT: u8 = 8;
|
|
|
|
|
|
|
|
/// This test also shows how the verification report could be used in a multi-threaded context,
|
|
|
|
/// wrapping it into an [Arc] and [Mutex] and then passing it to two threads.
|
|
|
|
///
|
|
|
|
/// - The first thread generates a acceptance, a start, two steps and one completion report
|
|
|
|
/// - The second generates an acceptance and start success report and a completion failure
|
|
|
|
/// - The third thread is the verification receiver. In the test case, it verifies the other two
|
|
|
|
/// threads have sent the correct expected verification reports
|
|
|
|
#[test]
|
|
|
|
fn test_shared_reporter() {
|
|
|
|
// We use a synced sequence count provider here because both verification reporters have the
|
|
|
|
// the same APID. If they had distinct APIDs, the more correct approach would be to have
|
|
|
|
// each reporter have an own sequence count provider.
|
2023-07-06 00:58:25 +02:00
|
|
|
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
|
2023-01-11 10:30:03 +01:00
|
|
|
// Shared pool object to store the verification PUS telemetry
|
2024-02-12 11:35:10 +01:00
|
|
|
let pool_cfg =
|
|
|
|
StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
|
2024-02-08 17:42:36 +01:00
|
|
|
let shared_tm_pool = SharedTmPool::new(StaticMemoryPool::new(pool_cfg.clone()));
|
2024-02-03 13:41:51 +01:00
|
|
|
let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
|
2023-01-11 10:30:03 +01:00
|
|
|
let shared_tc_pool_1 = shared_tc_pool_0.clone();
|
2023-07-11 22:52:06 +02:00
|
|
|
let (tx, rx) = crossbeam_channel::bounded(10);
|
2024-04-02 16:50:09 +02:00
|
|
|
let sender_0 = TmInSharedPoolSenderWithCrossbeam::new(shared_tm_pool.clone(), tx.clone());
|
|
|
|
let sender_1 = sender_0.clone();
|
2024-04-02 20:25:20 +02:00
|
|
|
let mut reporter_with_sender_0 = VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &cfg);
|
2023-01-11 10:30:03 +01:00
|
|
|
let mut reporter_with_sender_1 = reporter_with_sender_0.clone();
|
|
|
|
// For test purposes, we retrieve the request ID from the TCs and pass them to the receiver
|
|
|
|
// tread.
|
|
|
|
let req_id_0;
|
|
|
|
let req_id_1;
|
|
|
|
|
|
|
|
let (tx_tc_0, rx_tc_0) = crossbeam_channel::bounded(3);
|
|
|
|
let (tx_tc_1, rx_tc_1) = crossbeam_channel::bounded(3);
|
|
|
|
{
|
|
|
|
let mut tc_guard = shared_tc_pool_0.write().unwrap();
|
2024-04-04 13:14:21 +02:00
|
|
|
let sph = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
2023-01-11 10:30:03 +01:00
|
|
|
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
|
2024-04-04 13:14:21 +02:00
|
|
|
let pus_tc_0 = PusTcCreator::new_no_app_data(sph, tc_header, true);
|
2023-01-11 10:30:03 +01:00
|
|
|
req_id_0 = RequestId::new(&pus_tc_0);
|
2024-02-10 11:59:26 +01:00
|
|
|
let addr = tc_guard
|
|
|
|
.free_element(pus_tc_0.len_written(), |buf| {
|
|
|
|
pus_tc_0.write_to_bytes(buf).unwrap();
|
|
|
|
})
|
|
|
|
.unwrap();
|
2023-01-11 10:30:03 +01:00
|
|
|
tx_tc_0.send(addr).unwrap();
|
2024-04-04 13:14:21 +02:00
|
|
|
let sph = SpHeader::new_for_unseg_tc(TEST_APID, 1, 0);
|
2023-01-11 10:30:03 +01:00
|
|
|
let tc_header = PusTcSecondaryHeader::new_simple(5, 1);
|
2024-04-04 13:14:21 +02:00
|
|
|
let pus_tc_1 = PusTcCreator::new_no_app_data(sph, tc_header, true);
|
2023-01-11 10:30:03 +01:00
|
|
|
req_id_1 = RequestId::new(&pus_tc_1);
|
2024-02-10 11:59:26 +01:00
|
|
|
let addr = tc_guard
|
|
|
|
.free_element(pus_tc_0.len_written(), |buf| {
|
|
|
|
pus_tc_1.write_to_bytes(buf).unwrap();
|
|
|
|
})
|
|
|
|
.unwrap();
|
2023-01-11 10:30:03 +01:00
|
|
|
tx_tc_1.send(addr).unwrap();
|
|
|
|
}
|
|
|
|
let verif_sender_0 = thread::spawn(move || {
|
|
|
|
let mut tc_buf: [u8; 1024] = [0; 1024];
|
|
|
|
let tc_addr = rx_tc_0
|
|
|
|
.recv_timeout(Duration::from_millis(20))
|
|
|
|
.expect("Receive timeout");
|
|
|
|
let tc_len;
|
|
|
|
{
|
|
|
|
let mut tc_guard = shared_tc_pool_0.write().unwrap();
|
|
|
|
let pg = tc_guard.read_with_guard(tc_addr);
|
2024-02-10 11:59:26 +01:00
|
|
|
tc_len = pg.read(&mut tc_buf).unwrap();
|
2023-01-11 10:30:03 +01:00
|
|
|
}
|
2023-07-11 22:25:43 +02:00
|
|
|
let (_tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
|
2023-01-11 10:30:03 +01:00
|
|
|
|
|
|
|
let token = reporter_with_sender_0.add_tc_with_req_id(req_id_0);
|
2023-12-14 12:27:37 +01:00
|
|
|
let accepted_token = reporter_with_sender_0
|
2024-04-02 20:25:20 +02:00
|
|
|
.acceptance_success(&sender_0, token, &FIXED_STAMP)
|
2023-01-11 10:30:03 +01:00
|
|
|
.expect("Acceptance success failed");
|
|
|
|
|
|
|
|
// Do some start handling here
|
2023-12-14 12:27:37 +01:00
|
|
|
let started_token = reporter_with_sender_0
|
2024-04-02 20:25:20 +02:00
|
|
|
.start_success(&sender_0, accepted_token, &FIXED_STAMP)
|
2023-01-11 10:30:03 +01:00
|
|
|
.expect("Start success failed");
|
|
|
|
// Do some step handling here
|
|
|
|
reporter_with_sender_0
|
2024-04-02 20:25:20 +02:00
|
|
|
.step_success(&sender_0, &started_token, &FIXED_STAMP, EcssEnumU8::new(0))
|
2023-01-11 10:30:03 +01:00
|
|
|
.expect("Start success failed");
|
|
|
|
|
|
|
|
// Finish up
|
|
|
|
reporter_with_sender_0
|
2024-04-02 20:25:20 +02:00
|
|
|
.step_success(&sender_0, &started_token, &FIXED_STAMP, EcssEnumU8::new(1))
|
2023-01-11 10:30:03 +01:00
|
|
|
.expect("Start success failed");
|
|
|
|
reporter_with_sender_0
|
2024-04-02 20:25:20 +02:00
|
|
|
.completion_success(&sender_0, started_token, &FIXED_STAMP)
|
2023-01-11 10:30:03 +01:00
|
|
|
.expect("Completion success failed");
|
|
|
|
});
|
|
|
|
|
|
|
|
let verif_sender_1 = thread::spawn(move || {
|
|
|
|
let mut tc_buf: [u8; 1024] = [0; 1024];
|
|
|
|
let tc_addr = rx_tc_1
|
|
|
|
.recv_timeout(Duration::from_millis(20))
|
|
|
|
.expect("Receive timeout");
|
|
|
|
let tc_len;
|
|
|
|
{
|
|
|
|
let mut tc_guard = shared_tc_pool_1.write().unwrap();
|
|
|
|
let pg = tc_guard.read_with_guard(tc_addr);
|
2024-02-10 11:59:26 +01:00
|
|
|
tc_len = pg.read(&mut tc_buf).unwrap();
|
2023-01-11 10:30:03 +01:00
|
|
|
}
|
2023-07-11 22:25:43 +02:00
|
|
|
let (tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
|
2023-01-11 10:30:03 +01:00
|
|
|
let token = reporter_with_sender_1.add_tc(&tc);
|
|
|
|
let accepted_token = reporter_with_sender_1
|
2024-04-02 20:25:20 +02:00
|
|
|
.acceptance_success(&sender_1, token, &FIXED_STAMP)
|
2023-01-11 10:30:03 +01:00
|
|
|
.expect("Acceptance success failed");
|
|
|
|
let started_token = reporter_with_sender_1
|
2024-04-02 20:25:20 +02:00
|
|
|
.start_success(&sender_1, accepted_token, &FIXED_STAMP)
|
2023-01-11 10:30:03 +01:00
|
|
|
.expect("Start success failed");
|
|
|
|
let fail_code = EcssEnumU16::new(2);
|
2024-02-20 14:33:21 +01:00
|
|
|
let params = FailParams::new_no_fail_data(&FIXED_STAMP, &fail_code);
|
2023-01-11 10:30:03 +01:00
|
|
|
reporter_with_sender_1
|
2024-04-02 20:25:20 +02:00
|
|
|
.completion_failure(&sender_1, started_token, params)
|
2023-01-11 10:30:03 +01:00
|
|
|
.expect("Completion success failed");
|
|
|
|
});
|
|
|
|
|
|
|
|
let verif_receiver = thread::spawn(move || {
|
|
|
|
let mut packet_counter = 0;
|
|
|
|
let mut tm_buf: [u8; 1024] = [0; 1024];
|
|
|
|
let mut verif_map = HashMap::new();
|
|
|
|
while packet_counter < PACKETS_SENT {
|
2024-04-02 16:50:09 +02:00
|
|
|
let tm_in_pool = rx
|
2023-01-11 10:30:03 +01:00
|
|
|
.recv_timeout(Duration::from_millis(50))
|
|
|
|
.expect("Packet reception timeout");
|
|
|
|
let tm_len;
|
2024-02-08 17:42:36 +01:00
|
|
|
let shared_tm_store = shared_tm_pool.clone_backing_pool();
|
2023-01-11 10:30:03 +01:00
|
|
|
{
|
2023-07-10 00:29:31 +02:00
|
|
|
let mut rg = shared_tm_store.write().expect("Error locking shared pool");
|
2024-04-02 16:50:09 +02:00
|
|
|
let store_guard = rg.read_with_guard(tm_in_pool.store_addr);
|
2024-02-10 11:59:26 +01:00
|
|
|
tm_len = store_guard
|
|
|
|
.read(&mut tm_buf)
|
|
|
|
.expect("Error reading TM slice");
|
2023-01-11 10:30:03 +01:00
|
|
|
}
|
2023-07-11 22:25:43 +02:00
|
|
|
let (pus_tm, _) =
|
|
|
|
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");
|
2023-07-11 00:28:28 +02:00
|
|
|
let req_id =
|
|
|
|
RequestId::from_bytes(&pus_tm.source_data()[0..RequestId::SIZE_AS_BYTES])
|
|
|
|
.expect("reading request ID from PUS TM source data failed");
|
2023-01-11 10:30:03 +01:00
|
|
|
if !verif_map.contains_key(&req_id) {
|
2023-12-14 14:22:52 +01:00
|
|
|
let content = vec![pus_tm.subservice()];
|
2023-01-11 10:30:03 +01:00
|
|
|
verif_map.insert(req_id, content);
|
|
|
|
} else {
|
|
|
|
let content = verif_map.get_mut(&req_id).unwrap();
|
|
|
|
content.push(pus_tm.subservice())
|
|
|
|
}
|
|
|
|
packet_counter += 1;
|
|
|
|
}
|
|
|
|
for (req_id, content) in verif_map {
|
|
|
|
if req_id == req_id_1 {
|
|
|
|
assert_eq!(content[0], 1);
|
|
|
|
assert_eq!(content[1], 3);
|
|
|
|
assert_eq!(content[2], 8);
|
|
|
|
} else if req_id == req_id_0 {
|
|
|
|
assert_eq!(content[0], 1);
|
|
|
|
assert_eq!(content[1], 3);
|
|
|
|
assert_eq!(content[2], 5);
|
|
|
|
assert_eq!(content[3], 5);
|
|
|
|
assert_eq!(content[4], 7);
|
|
|
|
} else {
|
|
|
|
panic!("Unexpected request ID {:?}", req_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
verif_sender_0.join().expect("Joining thread 0 failed");
|
|
|
|
verif_sender_1.join().expect("Joining thread 1 failed");
|
|
|
|
verif_receiver.join().expect("Joining thread 2 failed");
|
|
|
|
}
|
|
|
|
}
|