Compare commits

...

4 Commits

Author SHA1 Message Date
e2668e68e1 spacepackets update, clippy fixes 2025-05-10 16:06:16 +02:00
9ab36c0362 Merge pull request 'update docs' (#226) from update-docs into main
Reviewed-on: #226
2025-05-06 16:21:17 +02:00
b95769c177 Merge branch 'main' into update-docs 2025-05-06 16:21:10 +02:00
bd6488e87b update docs 2025-04-01 20:58:18 +02:00
28 changed files with 195 additions and 169 deletions

View File

@@ -3,20 +3,20 @@
Software for space systems oftentimes has different requirements than the software for host
systems or servers. Currently, most space systems are considered embedded systems.
For these systems, the computation power and the available heap are the most important resources
which are constrained. This might make completeley heap based memory management schemes which
For these systems, the computation power and the available heap are important resources
which are also constrained. This might make completeley heap based memory management schemes which
are oftentimes used on host and server based systems unfeasable. Still, completely forbidding
heap allocations might make software development unnecessarilly difficult, especially in a
time where the OBSW might be running on Linux based systems with hundreds of MBs of RAM.
A useful pattern used commonly in space systems is to limit heap allocations to program
A useful pattern commonly used in space systems is to limit heap allocations to program
initialization time and avoid frequent run-time allocations. This prevents issues like
running out of memory (something even Rust can not protect from) or heap fragmentation on systems
without a MMU.
# Using pre-allocated pool structures
A huge candidate for heap allocations is the TMTC and handling. TC, TMs and IPC data are all
A candidate for heap allocations is the TMTC and handling. TC, TMs and IPC data are all
candidates where the data size might vary greatly. The regular solution for host systems
might be to send around this data as a `Vec<u8>` until it is dropped. `sat-rs` provides
another solution to avoid run-time allocations by offering pre-allocated static

View File

@@ -29,7 +29,7 @@ fn main() {
let res = client.recv(&mut buf);
match res {
Ok(_len) => {
let (pus_tm, size) = PusTmReader::new(&buf, 7).expect("Parsing PUS TM failed");
let pus_tm = PusTmReader::new(&buf, 7).expect("Parsing PUS TM failed");
if pus_tm.service() == 17 && pus_tm.subservice() == 2 {
println!("Received PUS Ping Reply TM[17,2]")
} else if pus_tm.service() == 1 {

View File

@@ -281,9 +281,7 @@ mod tests {
.try_recv()
.expect("failed to receive TM packet");
assert_eq!(tm_packet.sender_id, PUS_EVENT_MANAGEMENT.id());
let tm_reader = PusTmReader::new(&tm_packet.packet, 7)
.expect("failed to create TM reader")
.0;
let tm_reader = PusTmReader::new(&tm_packet.packet, 7).expect("failed to create TM reader");
assert_eq!(tm_reader.apid(), TEST_CREATOR_ID.apid);
assert_eq!(tm_reader.user_data().len(), 4);
let event_read_back = EventU32::from_be_bytes(tm_reader.user_data().try_into().unwrap());

View File

@@ -367,7 +367,7 @@ mod tests {
if let Err(mpsc::TryRecvError::Empty) = packet {
} else {
let tm = packet.unwrap();
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0;
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap();
panic!("unexpected TM packet {unexpected_tm:?}");
}
}
@@ -410,7 +410,11 @@ mod tests {
pub fn add_tc(&mut self, tc: &PusTcCreator) {
self.request_id = Some(verification::RequestId::new(tc).into());
let token = self.service.service_helper.verif_reporter_mut().add_tc(tc);
let token = self
.service
.service_helper
.verif_reporter_mut()
.start_verification(tc);
let accepted_token = self
.service
.service_helper

View File

@@ -109,8 +109,8 @@ impl PusTcDistributor {
// TODO: Shouldn't this be an error?
return Ok(HandlingStatus::HandledOne);
}
let pus_tc = pus_tc_result.unwrap().0;
let init_token = self.verif_reporter.add_tc(&pus_tc);
let pus_tc = pus_tc_result.unwrap();
let init_token = self.verif_reporter.start_verification(&pus_tc);
self.stamp_helper.update_from_now();
let accepted_token = self
.verif_reporter
@@ -599,7 +599,7 @@ pub(crate) mod tests {
) -> (verification::RequestId, ActivePusRequestStd) {
let sp_header = SpHeader::new_from_apid(apid);
let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0);
let init = self.verif_reporter.add_tc(&PusTcCreator::new(
let init = self.verif_reporter.start_verification(&PusTcCreator::new(
sp_header,
sec_header_dummy,
&[],
@@ -706,7 +706,7 @@ pub(crate) mod tests {
}
pub fn add_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let token = self.verif_reporter.add_tc(tc);
let token = self.verif_reporter.start_verification(tc);
self.current_request_id = Some(verification::RequestId::new(tc));
self.current_packet = Some(tc.to_vec().unwrap());
self.verif_reporter
@@ -734,7 +734,7 @@ pub(crate) mod tests {
let tc_reader = PusTcReader::new(&current_packet).unwrap();
let (active_info, request) = self.converter.convert(
token,
&tc_reader.0,
&tc_reader,
&self.dummy_sender,
&self.verif_reporter,
time_stamp,

View File

@@ -90,7 +90,7 @@ impl DirectPusService for TestCustomServiceWrapper {
);
}
DirectPusPacketHandlerResult::CustomSubservice(subservice, token) => {
let (tc, _) = PusTcReader::new(
let tc = PusTcReader::new(
self.handler
.service_helper
.tc_in_mem_converter

View File

@@ -24,6 +24,7 @@ optional = true
[dependencies.satrs-shared]
version = ">=0.1.3, <=0.2"
path = "../satrs-shared"
features = ["serde"]
[dependencies.satrs-mib-codegen]

View File

@@ -28,6 +28,7 @@ features = ["full"]
trybuild = { version = "1", features = ["diff"] }
[dev-dependencies.satrs-shared]
path = "../../satrs-shared"
version = ">=0.1.3, <=0.2"
[dev-dependencies.satrs-mib]

View File

@@ -8,6 +8,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.2.2] 2025-05-10
- Bump to `spacepackests` v0.14
# [v0.2.1] 2024-11-15
Increased allowed spacepackets to v0.13
@@ -41,3 +45,6 @@ Allow `spacepackets` range starting with v0.10 and v0.11.
# [v0.1.0] 2024-02-12
Initial release.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-shared-v0.2.2...HEAD
[v0.2.2]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-shared-v0.2.1...satrs-shared-v0.2.2

View File

@@ -1,7 +1,7 @@
[package]
name = "satrs-shared"
description = "Components shared by multiple sat-rs crates"
version = "0.2.1"
version = "0.2.2"
edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
@@ -18,11 +18,11 @@ default-features = false
optional = true
[dependencies.defmt]
version = "0.3"
version = "1"
optional = true
[dependencies.spacepackets]
version = ">0.9, <=0.13"
version = "0.14"
default-features = false
[features]

View File

@@ -13,16 +13,13 @@ keywords = ["no-std", "space", "aerospace"]
categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-support", "embedded"]
[dependencies]
satrs-shared = ">=0.1.3, <=0.2"
satrs-shared = { version = ">=0.1.3, <=0.2", path = "../satrs-shared" }
delegate = ">0.7, <=0.13"
paste = "1"
derive-new = ">=0.6, <=0.7"
smallvec = "1"
crc = "3"
num_enum = { version = ">0.5, <=0.7", default-features = false }
spacepackets = { version = "0.13", default-features = false }
cobs = { version = "0.3", default-features = false }
num-traits = { version = "0.2", default-features = false }
spacepackets = { version = "0.14", default-features = false }
cobs = { version = "0.4", default-features = false, git = "https://github.com/jamesmunns/cobs.rs.git", branch = "main" }
thiserror = { version = "2", default-features = false }
hashbrown = { version = ">=0.14, <=0.15", optional = true }

View File

@@ -162,7 +162,7 @@ pub trait SenderMapProvider<
/// * `ListenerMap`: [ListenerMapProvider] which maps listener keys to channel IDs.
/// * `EventSender`: [EventSendProvider] contained within the sender map which sends the events.
/// * `Event`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32]
/// and [EventU16] are supported.
/// and [EventU16] are supported.
/// * `ParamProvider`: Auxiliary data which is sent with the event to provide optional context
/// information
pub struct EventManager<

View File

@@ -312,11 +312,11 @@ impl EventU32 {
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
@@ -486,11 +486,11 @@ impl EventU16 {
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
/// raw event ID
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,

View File

@@ -39,9 +39,9 @@ pub trait ExecutableWithType: Executable {
///
/// * `executable`: Executable task
/// * `task_freq`: Optional frequency of task. Required for periodic and fixed cycle tasks.
/// If [None] is passed, no sleeping will be performed.
/// If [None] is passed, no sleeping will be performed.
/// * `op_code`: Operation code which is passed to the executable task
/// [operation call][Executable::periodic_op]
/// [operation call][Executable::periodic_op]
/// * `termination`: Optional termination handler which can cancel threads with a broadcast
pub fn exec_sched_single<
T: ExecutableWithType<Error = E> + Send + 'static + ?Sized,

View File

@@ -150,9 +150,9 @@ impl<
///
/// * `cfg` - Configuration of the server.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client.
/// then sent back to the client.
/// * `tc_receiver` - Any received telecommands which were decoded successfully will be
/// forwarded to this TC receiver.
/// forwarded to this TC receiver.
pub fn new(
cfg: ServerConfig,
tm_source: TmSource,
@@ -377,13 +377,13 @@ mod tests {
current_idx += 1;
let mut dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
.expect("COBS decoding failed");
assert_eq!(dec_report.dst_used, 5);
assert_eq!(dec_report.frame_size(), 5);
// Skip first sentinel byte.
assert_eq!(
&read_buf[current_idx..current_idx + INVERTED_PACKET.len()],
&INVERTED_PACKET
);
current_idx += dec_report.src_used;
current_idx += dec_report.parsed_size();
// End sentinel.
assert_eq!(read_buf[current_idx], 0, "invalid sentinel end byte");
current_idx += 1;
@@ -393,13 +393,13 @@ mod tests {
current_idx += 1;
dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
.expect("COBS decoding failed");
assert_eq!(dec_report.dst_used, 5);
assert_eq!(dec_report.frame_size(), 5);
// Skip first sentinel byte.
assert_eq!(
&read_buf[current_idx..current_idx + SIMPLE_PACKET.len()],
&SIMPLE_PACKET
);
current_idx += dec_report.src_used;
current_idx += dec_report.parsed_size();
// End sentinel.
assert_eq!(read_buf[current_idx], 0);
break;

View File

@@ -25,22 +25,22 @@ pub use crate::hal::std::tcp_spacepackets_server::{SpacepacketsTmSender, TcpSpac
///
/// * `addr` - Address of the TCP server.
/// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or
/// no TM needs to be sent, the TCP server will delay for the specified amount of time
/// to reduce CPU load.
/// no TM needs to be sent, the TCP server will delay for the specified amount of time
/// to reduce CPU load.
/// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [PacketSource] and
/// encoding of that data. This buffer should at large enough to hold the maximum expected
/// TM size read from the packet source.
/// encoding of that data. This buffer should at large enough to hold the maximum expected
/// TM size read from the packet source.
/// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from
/// the client. It is recommended to make this buffer larger to allow reading multiple
/// consecutive packets as well, for example by using common buffer sizes like 4096 or 8192
/// byte. The buffer should at the very least be large enough to hold the maximum expected
/// telecommand size.
/// the client. It is recommended to make this buffer larger to allow reading multiple
/// consecutive packets as well, for example by using common buffer sizes like 4096 or 8192
/// byte. The buffer should at the very least be large enough to hold the maximum expected
/// telecommand size.
/// * `reuse_addr` - Can be used to set the `SO_REUSEADDR` option on the raw socket. This is
/// especially useful if the address and port are static for the server. Set to false by
/// default.
/// especially useful if the address and port are static for the server. Set to false by
/// default.
/// * `reuse_port` - Can be used to set the `SO_REUSEPORT` option on the raw socket. This is
/// especially useful if the address and port are static for the server. Set to false by
/// default.
/// especially useful if the address and port are static for the server. Set to false by
/// default.
#[derive(Debug, Copy, Clone)]
pub struct ServerConfig {
pub id: ComponentId,
@@ -211,12 +211,12 @@ impl<
///
/// * `cfg` - Configuration of the server.
/// * `tc_parser` - Parser which extracts telecommands from the raw bytestream received from
/// the client.
/// the client.
/// * `tm_sender` - Sends back telemetry to the client using the specified TM source.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client.
/// then sent back to the client.
/// * `tc_sender` - Any received telecommand which was decoded successfully will be forwarded
/// using this TC sender.
/// using this TC sender.
/// * `stop_signal` - Can be used to stop the server even if a connection is ongoing.
pub fn new(
cfg: ServerConfig,

View File

@@ -120,15 +120,15 @@ impl<
///
/// * `cfg` - Configuration of the server.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client.
/// then sent back to the client.
/// * `tc_sender` - Any received telecommands which were decoded successfully will be
/// forwarded using this [PacketSenderRaw].
/// forwarded using this [PacketSenderRaw].
/// * `validator` - Used to determine the space packets relevant for further processing and
/// to detect broken space packets.
/// to detect broken space packets.
/// * `handled_connection_hook` - Called to notify the user about a succesfully handled
/// connection.
/// connection.
/// * `stop_signal` - Can be used to shut down the TCP server even for longer running
/// connections.
/// connections.
pub fn new(
cfg: ServerConfig,
tm_source: TmSource,

View File

@@ -770,11 +770,11 @@ mod alloc_mod {
/// # Parameters
///
/// * `cfg` - Vector of tuples which represent a subpool. The first entry in the tuple specifies
/// the number of memory blocks in the subpool, the second entry the size of the blocks
/// the number of memory blocks in the subpool, the second entry the size of the blocks
/// * `spill_to_higher_subpools` - Specifies whether data will be spilled to higher subpools
/// if the next fitting subpool is full. This is useful to ensure the pool remains useful
/// for all data sizes as long as possible. However, an undesirable side-effect might be
/// the chocking of larger subpools by underdimensioned smaller subpools.
/// if the next fitting subpool is full. This is useful to ensure the pool remains useful
/// for all data sizes as long as possible. However, an undesirable side-effect might be
/// the chocking of larger subpools by underdimensioned smaller subpools.
#[derive(Debug, Clone)]
pub struct StaticPoolConfig {
cfg: Vec<SubpoolConfig>,

View File

@@ -409,7 +409,7 @@ mod tests {
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
let tm = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
@@ -437,7 +437,7 @@ mod tests {
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
let tm = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
assert_eq!(tm.user_data().len(), 4 + param_data.len());

View File

@@ -200,7 +200,11 @@ mod tests {
impl PusTestHarness for Pus5HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()

View File

@@ -959,15 +959,11 @@ pub mod std_mod {
possible_packet: &TcInMemory,
) -> Result<PusTcReader<'_>, PusTcFromMemError> {
self.cache(possible_packet)?;
Ok(PusTcReader::new(self.tc_slice_raw())
.map_err(EcssTmtcError::Pus)?
.0)
Ok(PusTcReader::new(self.tc_slice_raw()).map_err(EcssTmtcError::Pus)?)
}
fn convert(&self) -> Result<PusTcReader<'_>, PusTcFromMemError> {
Ok(PusTcReader::new(self.tc_slice_raw())
.map_err(EcssTmtcError::Pus)?
.0)
Ok(PusTcReader::new(self.tc_slice_raw()).map_err(EcssTmtcError::Pus)?)
}
}
@@ -1459,7 +1455,7 @@ pub mod tests {
let tm_pool = self.tm_pool.0.read().unwrap();
let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap();
self.tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw);
PusTmReader::new(&self.tm_buf, 7).unwrap().0
PusTmReader::new(&self.tm_buf, 7).unwrap()
}
pub fn check_no_tm_available(&self) -> bool {
@@ -1476,7 +1472,7 @@ pub mod tests {
let tm_in_pool = next_msg.unwrap();
let tm_pool = self.tm_pool.0.read().unwrap();
let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap();
let tm = PusTmReader::new(&tm_raw, 7).unwrap().0;
let tm = PusTmReader::new(&tm_raw, 7).unwrap();
assert_eq!(PusPacket::service(&tm), 1);
assert_eq!(PusPacket::subservice(&tm), subservice);
assert_eq!(tm.apid(), TEST_APID);
@@ -1584,9 +1580,7 @@ pub mod tests {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
self.current_tm = Some(next_msg.unwrap().packet);
PusTmReader::new(self.current_tm.as_ref().unwrap(), 7)
.unwrap()
.0
PusTmReader::new(self.current_tm.as_ref().unwrap(), 7).unwrap()
}
pub fn check_no_tm_available(&self) -> bool {
@@ -1601,7 +1595,7 @@ pub mod tests {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
let next_msg = next_msg.unwrap();
let tm = PusTmReader::new(next_msg.packet.as_slice(), 7).unwrap().0;
let tm = PusTmReader::new(next_msg.packet.as_slice(), 7).unwrap();
assert_eq!(PusPacket::service(&tm), 1);
assert_eq!(PusPacket::subservice(&tm), subservice);
assert_eq!(tm.apid(), TEST_APID);

View File

@@ -292,10 +292,10 @@ pub trait PusSchedulerProvider {
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
if PusPacket::service(&check_tc) == 11 && PusPacket::subservice(&check_tc) == 4 {
return Err(ScheduleError::NestedScheduledTc);
}
let req_id = RequestId::from_tc(&check_tc.0);
let req_id = RequestId::from_tc(&check_tc);
match pool.add(tc) {
Ok(addr) => {
@@ -411,10 +411,10 @@ pub mod alloc_mod {
///
/// * `init_current_time` - The time to initialize the scheduler with.
/// * `time_margin` - This time margin is used when inserting new telecommands into the
/// schedule. If the release time of a new telecommand is earlier than the time margin
/// added to the current time, it will not be inserted into the schedule.
/// schedule. If the release time of a new telecommand is earlier than the time margin
/// added to the current time, it will not be inserted into the schedule.
/// * `tc_buf_size` - Buffer for temporary storage of telecommand packets. This buffer
/// should be large enough to accomodate the largest expected TC packets.
/// should be large enough to accomodate the largest expected TC packets.
pub fn new(init_current_time: UnixTime, time_margin: Duration) -> Self {
PusScheduler {
tc_map: Default::default(),
@@ -480,10 +480,10 @@ pub mod alloc_mod {
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
if PusPacket::service(&check_tc) == 11 && PusPacket::subservice(&check_tc) == 4 {
return Err(ScheduleError::NestedScheduledTc);
}
let req_id = RequestId::from_tc(&check_tc.0);
let req_id = RequestId::from_tc(&check_tc);
match pool.add(tc) {
Ok(addr) => {
@@ -683,10 +683,10 @@ pub mod alloc_mod {
/// # Arguments
///
/// * `releaser` - Closure where the first argument is whether the scheduler is enabled and
/// the second argument is the telecommand information also containing the store
/// address. This closure should return whether the command should be deleted. Please
/// note that returning false might lead to memory leaks if the TC is not cleared from
/// the store in some other way.
/// the second argument is the telecommand information also containing the store
/// address. This closure should return whether the command should be deleted. Please
/// note that returning false might lead to memory leaks if the TC is not cleared from
/// the store in some other way.
/// * `tc_store` - The holding store of the telecommands.
/// * `tc_buf` - Buffer to hold each telecommand being released.
pub fn release_telecommands_with_buffer<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
@@ -1313,7 +1313,7 @@ mod tests {
let mut read_buf: [u8; 64] = [0; 64];
pool.read(&tc_info_0.addr(), &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(check_tc, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@@ -1335,8 +1335,8 @@ mod tests {
let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(read_len, check_tc.total_len());
assert_eq!(check_tc, base_ping_tc_simple_ctor(0, &[]));
}
#[test]
@@ -1362,8 +1362,8 @@ mod tests {
let read_len = pool.read(&info.addr, &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(read_len, check_tc.total_len());
assert_eq!(check_tc, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@@ -1387,8 +1387,8 @@ mod tests {
let read_len = pool.read(&addr_vec[0], &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(read_len, check_tc.total_len());
assert_eq!(check_tc, base_ping_tc_simple_ctor(0, &[]));
}
#[test]
@@ -2031,7 +2031,7 @@ mod tests {
assert_eq!(n, 1);
let time_reader = cds::CdsTime::from_bytes_with_u16_days(&buf[2..2 + 7]).unwrap();
assert_eq!(time_reader, time_writer);
let pus_tc_reader = PusTcReader::new(&buf[9..]).unwrap().0;
let pus_tc_reader = PusTcReader::new(&buf[9..]).unwrap();
assert_eq!(pus_tc_reader, ping_tc);
}

View File

@@ -310,7 +310,11 @@ mod tests {
impl PusTestHarness for Pus11HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()

View File

@@ -181,7 +181,11 @@ mod tests {
impl PusTestHarness for Pus17HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
@@ -239,7 +243,11 @@ mod tests {
impl PusTestHarness for Pus17HandlerWithVecTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()

View File

@@ -228,7 +228,7 @@ pub struct VerificationToken<STATE> {
}
impl<STATE> VerificationToken<STATE> {
fn new(req_id: RequestId) -> VerificationToken<TcStateNone> {
pub fn new(req_id: RequestId) -> VerificationToken<TcStateNone> {
VerificationToken {
state: PhantomData,
request_id: req_id,
@@ -408,14 +408,10 @@ pub trait VerificationReportingProvider {
fn set_apid(&mut self, apid: Apid);
fn apid(&self) -> Apid;
fn add_tc(
&mut self,
fn start_verification(
&self,
pus_tc: &(impl CcsdsPacket + IsPusTelecommand),
) -> VerificationToken<TcStateNone> {
self.add_tc_with_req_id(RequestId::new(pus_tc))
}
fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken<TcStateNone>;
) -> VerificationToken<TcStateNone>;
fn acceptance_success(
&self,
@@ -482,7 +478,7 @@ pub trait VerificationReportingProvider {
/// the buffer passed to the API exposes by this struct will be used to serialize the source data.
/// This buffer may not be re-used to serialize the whole telemetry because that would overwrite
/// the source data itself.
#[derive(Clone)]
#[derive(Debug, Clone)]
pub struct VerificationReportCreator {
pub dest_id: u16,
apid: u16,
@@ -518,17 +514,8 @@ impl VerificationReportCreator {
/// Initialize verification handling by passing a TC reference. This returns a token required
/// to call the acceptance functions
pub fn add_tc(
&mut self,
pus_tc: &(impl CcsdsPacket + IsPusTelecommand),
) -> VerificationToken<TcStateNone> {
self.add_tc_with_req_id(RequestId::new(pus_tc))
}
/// Same as [Self::add_tc] but pass a request ID instead of the direct telecommand.
/// This can be useful if the executing thread does not have full access to the telecommand.
pub fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken<TcStateNone> {
VerificationToken::<TcStateNone>::new(req_id)
pub fn read_request_id_from_tc(pus_tc: &(impl CcsdsPacket + IsPusTelecommand)) -> RequestId {
RequestId::new(pus_tc)
}
fn success_verification_no_step<'time, 'src_data>(
@@ -962,12 +949,26 @@ pub mod alloc_mod {
}
}
pub fn start_verification(
&self,
pus_tc: &(impl CcsdsPacket + IsPusTelecommand),
) -> VerificationToken<TcStateNone> {
VerificationToken::<TcStateNone>::new(
VerificationReportCreator::read_request_id_from_tc(pus_tc),
)
}
pub fn start_verification_with_req_id(
&self,
request_id: RequestId,
) -> VerificationToken<TcStateNone> {
VerificationToken::<TcStateNone>::new(request_id)
}
delegate!(
to self.reporter_creator {
pub fn set_apid(&mut self, apid: u16) -> bool;
pub fn apid(&self) -> u16;
pub fn add_tc(&mut self, pus_tc: &(impl CcsdsPacket + IsPusTelecommand)) -> VerificationToken<TcStateNone>;
pub fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken<TcStateNone>;
pub fn dest_id(&self) -> u16;
pub fn set_dest_id(&mut self, dest_id: u16);
}
@@ -985,11 +986,16 @@ pub mod alloc_mod {
to self.reporter_creator {
fn set_apid(&mut self, apid: Apid);
fn apid(&self) -> Apid;
fn add_tc(&mut self, pus_tc: &(impl CcsdsPacket + IsPusTelecommand)) -> VerificationToken<TcStateNone>;
fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken<TcStateNone>;
}
);
fn start_verification(
&self,
pus_tc: &(impl CcsdsPacket + IsPusTelecommand),
) -> VerificationToken<TcStateNone> {
VerificationToken::<TcStateNone>::new(RequestId::new(pus_tc))
}
fn owner_id(&self) -> ComponentId {
self.owner_id
}
@@ -1351,22 +1357,23 @@ pub mod test_util {
}
impl VerificationReportingProvider for TestVerificationReporter {
fn start_verification(
&self,
pus_tc: &(impl CcsdsPacket + IsPusTelecommand),
) -> VerificationToken<TcStateNone> {
let request_id = RequestId::new(pus_tc);
self.report_queue
.borrow_mut()
.push_back((request_id, VerificationReportInfo::Added));
VerificationToken::<TcStateNone>::new(RequestId::new(pus_tc))
}
fn set_apid(&mut self, _apid: Apid) {}
fn apid(&self) -> Apid {
0
}
fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken<TcStateNone> {
self.report_queue
.borrow_mut()
.push_back((req_id, VerificationReportInfo::Added));
VerificationToken {
state: PhantomData,
request_id: req_id,
}
}
fn acceptance_success(
&self,
_sender: &(impl EcssTmSender + ?Sized),
@@ -1823,15 +1830,16 @@ pub mod tests {
}
}
pub fn init_verification(&self) -> VerificationToken<TcStateNone> {
let tc_reader = PusTcReader::new(&self.tc).unwrap();
self.reporter.start_verification(&tc_reader)
}
#[allow(dead_code)]
fn set_dest_id(&mut self, dest_id: u16) {
self.reporter.set_dest_id(dest_id);
}
fn init(&mut self) -> VerificationToken<TcStateNone> {
self.reporter.add_tc(&PusTcReader::new(&self.tc).unwrap().0)
}
fn acceptance_success(
&self,
token: VerificationToken<TcStateNone>,
@@ -1909,7 +1917,7 @@ pub mod tests {
additional_data: None,
};
let mut service_queue = self.sender.service_queue.borrow_mut();
assert!(service_queue.len() >= 1);
assert!(!service_queue.is_empty());
let info = service_queue.pop_front().unwrap();
assert_eq!(info, cmp_info);
}
@@ -2081,8 +2089,8 @@ pub mod tests {
#[test]
fn test_basic_acceptance_success() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
let testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init_verification();
testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("sending acceptance success failed");
@@ -2092,7 +2100,7 @@ pub mod tests {
#[test]
fn test_basic_acceptance_failure() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let init_token = testbench.init();
let init_token = testbench.init_verification();
let timestamp = [1, 2, 3, 4, 5, 6, 7];
let fail_code = EcssEnumU16::new(2);
let fail_params = FailParams::new_no_fail_data(timestamp.as_slice(), &fail_code);
@@ -2105,7 +2113,7 @@ pub mod tests {
#[test]
fn test_basic_acceptance_failure_with_helper() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let init_token = testbench.init();
let init_token = testbench.init_verification();
let timestamp = [1, 2, 3, 4, 5, 6, 7];
let fail_code = EcssEnumU16::new(2);
let fail_params = FailParams::new_no_fail_data(timestamp.as_slice(), &fail_code);
@@ -2117,8 +2125,8 @@ pub mod tests {
#[test]
fn test_acceptance_fail_data_too_large() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 8);
let init_token = testbench.init();
let testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 8);
let init_token = testbench.init_verification();
let stamp_buf = [1, 2, 3, 4, 5, 6, 7];
let fail_code = EcssEnumU16::new(2);
let fail_data: [u8; 16] = [0; 16];
@@ -2149,13 +2157,13 @@ pub mod tests {
#[test]
fn test_basic_acceptance_failure_with_fail_data() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let fail_code = EcssEnumU8::new(10);
let fail_data = EcssEnumU32::new(12);
let mut fail_data_raw = [0; 4];
fail_data.write_to_be_bytes(&mut fail_data_raw).unwrap();
let fail_params = FailParams::new(&EMPTY_STAMP, &fail_code, fail_data_raw.as_slice());
let init_token = testbench.init();
let init_token = testbench.init_verification();
testbench
.acceptance_failure(init_token, fail_params)
.expect("sending acceptance failure failed");
@@ -2173,7 +2181,7 @@ pub mod tests {
#[test]
fn test_start_failure() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let init_token = testbench.init();
let init_token = testbench.init_verification();
let fail_code = EcssEnumU8::new(22);
let fail_data: i32 = -12;
let mut fail_data_raw = [0; 4];
@@ -2192,7 +2200,7 @@ pub mod tests {
#[test]
fn test_start_failure_with_helper() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
let token = testbench.init_verification();
let fail_code = EcssEnumU8::new(22);
let fail_data: i32 = -12;
let mut fail_data_raw = [0; 4];
@@ -2211,7 +2219,7 @@ pub mod tests {
#[test]
fn test_steps_success() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
let token = testbench.init_verification();
let accepted_token = testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("acceptance failed");
@@ -2234,7 +2242,7 @@ pub mod tests {
#[test]
fn test_step_failure() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
let token = testbench.init_verification();
let fail_code = EcssEnumU32::new(0x1020);
let fail_data: f32 = -22.3232;
let mut fail_data_raw = [0; 4];
@@ -2268,7 +2276,7 @@ pub mod tests {
#[test]
fn test_completion_failure() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
let token = testbench.init_verification();
let fail_code = EcssEnumU32::new(0x1020);
let fail_params = FailParams::new_no_fail_data(&EMPTY_STAMP, &fail_code);
@@ -2291,7 +2299,7 @@ pub mod tests {
fn test_complete_success_sequence() {
let mut testbench =
VerificationReporterTestbench::new(TEST_COMPONENT_ID_0.id(), create_generic_ping(), 16);
let token = testbench.init();
let token = testbench.init_verification();
let accepted_token = testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("Sending acceptance success failed");
@@ -2313,7 +2321,7 @@ pub mod tests {
create_generic_ping(),
SequenceCounterHook::default(),
);
let token = testbench.init();
let token = testbench.init_verification();
let accepted_token = testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("Sending acceptance success failed");
@@ -2331,7 +2339,7 @@ pub mod tests {
#[test]
fn test_completion_failure_helper_string_param() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 32);
let token = testbench.init();
let token = testbench.init_verification();
let accepted_token = testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("Sending acceptance success failed");
@@ -2358,7 +2366,7 @@ pub mod tests {
#[test]
fn test_step_failure_helper_string_param() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 32);
let token = testbench.init();
let token = testbench.init_verification();
let accepted_token = testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("Sending acceptance success failed");

View File

@@ -136,7 +136,7 @@ impl SequenceExecutionHelper {
/// with [Self::load]
/// * `sender` - The sender to send mode requests to the components
/// * `children_mode_store` - The mode store vector to keep track of the mode states of
/// children components
/// children components
pub fn run(
&mut self,
table: &SequenceModeTables,

View File

@@ -107,9 +107,9 @@ fn test_threaded_usage() {
Ok(event_tm) => {
let tm = PusTmReader::new(event_tm.packet.as_slice(), 7)
.expect("Deserializing TM failed");
assert_eq!(tm.0.service(), 5);
assert_eq!(tm.0.subservice(), 1);
let src_data = tm.0.source_data();
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), 1);
let src_data = tm.source_data();
assert!(!src_data.is_empty());
assert_eq!(src_data.len(), 4);
let event =
@@ -137,9 +137,9 @@ fn test_threaded_usage() {
Ok(event_tm) => {
let tm = PusTmReader::new(event_tm.packet.as_slice(), 7)
.expect("Deserializing TM failed");
assert_eq!(tm.0.service(), 5);
assert_eq!(tm.0.subservice(), 2);
let src_data = tm.0.source_data();
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), 2);
let src_data = tm.source_data();
assert!(!src_data.is_empty());
assert_eq!(src_data.len(), 12);
let event =

View File

@@ -89,9 +89,9 @@ pub mod crossbeam_test {
let pg = tc_guard.read_with_guard(tc_addr);
tc_len = pg.read(&mut tc_buf).unwrap();
}
let (_tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
let _tc = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
let token = reporter_with_sender_0.add_tc_with_req_id(req_id_0);
let token = reporter_with_sender_0.start_verification_with_req_id(req_id_0);
let accepted_token = reporter_with_sender_0
.acceptance_success(&sender, token, &FIXED_STAMP)
.expect("Acceptance success failed");
@@ -125,8 +125,8 @@ pub mod crossbeam_test {
let pg = tc_guard.read_with_guard(tc_addr);
tc_len = pg.read(&mut tc_buf).unwrap();
}
let (tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
let token = reporter_with_sender_1.add_tc(&tc);
let tc = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
let token = reporter_with_sender_1.start_verification(&tc);
let accepted_token = reporter_with_sender_1
.acceptance_success(&sender_1, token, &FIXED_STAMP)
.expect("Acceptance success failed");
@@ -156,7 +156,7 @@ pub mod crossbeam_test {
.read(&mut tm_buf)
.expect("Error reading TM slice");
}
let (pus_tm, _) =
let pus_tm =
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");
let req_id =
RequestId::from_bytes(&pus_tm.source_data()[0..RequestId::SIZE_AS_BYTES])