Compare commits

..

26 Commits

Author SHA1 Message Date
282f799203 Merge pull request 'prep v0.2.0' (#184) from prep_v0.2.0 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #184
2024-05-02 14:57:14 +02:00
46dbb4309b new clippy check
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-05-02 14:44:22 +02:00
42d1257e83 prepare next release v0.2.0
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-05-02 14:39:30 +02:00
583f6ce4d2 Merge pull request 'small robustness fix' (#183) from robustness-fix into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #183
2024-05-02 13:41:55 +02:00
408803fe86 small robustness fix
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-05-02 13:41:27 +02:00
9ffe4d0ae0 Merge pull request 'smaller improvements' (#182) from smaller-improvements into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #182
2024-05-02 12:28:39 +02:00
e37061dcf0 smaller improvements 2024-05-02 12:28:09 +02:00
3a2ac11407 Merge pull request 'bounded the PUS stack hot loop' (#181) from pus-hot-loop-bounding into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #181
2024-05-02 12:02:02 +02:00
23327a7786 bounded the PUS stack hot loop
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-05-02 12:01:24 +02:00
89d5a1022f Merge pull request 'optimize PUS stack code' (#180) from optimize-pus-stack-code into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #180
2024-05-02 11:59:26 +02:00
a00c843698 optimize PUS stack code
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-05-02 11:58:46 +02:00
c586fd7fef Merge pull request 'try unifying some direct PUS handler code' (#179) from unify-some-example-code into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #179
2024-05-02 11:29:11 +02:00
7e78e70a17 try unifying some direct PUS handler code
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-05-02 11:14:05 +02:00
424dfc439c Merge pull request 'simplified PUS stack' (#178) from simplify-pus-stack into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #178
2024-05-02 10:01:16 +02:00
45eb2f1343 cargo fmt
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-05-01 21:16:26 +02:00
736eb74e66 simplified PUS stack
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-05-01 21:13:08 +02:00
29f71c2a57 Merge pull request 'Reworked generic parameter handling for PUS service 1 and 5' (#175) from rework-generic-params-for-pus into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #175
2024-04-30 15:42:05 +02:00
f0d08b65a4 Merge branch 'main' into rework-generic-params-for-pus
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-30 13:35:08 +02:00
c7a74a844c Merge pull request 'renamed thread name' (#176) from small-tweak into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #176
2024-04-30 13:31:46 +02:00
9c60427f89 Reworked generic parameter handling for PUS service 1 and 5
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-30 13:29:55 +02:00
958ab9bab6 renamed thread name
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-25 11:11:31 +02:00
312849bddb Merge pull request 'More improvements for Event API' (#173) from improve-event-api into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #173
2024-04-24 19:34:33 +02:00
b0159a3ba7 prep next release candidate
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-24 19:18:45 +02:00
c477739f6d more improvements for API, tests for example event module
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-24 18:50:08 +02:00
b7ce039406 add optional defmt support for events
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-24 18:36:00 +02:00
4736d40997 Merge pull request 'simplified event management' (#172) from simplify-event-management into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #172
2024-04-24 15:58:00 +02:00
35 changed files with 1515 additions and 1060 deletions

64
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,64 @@
name: ci
on: [push, pull_request]
jobs:
check:
name: Check build
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo check --release
test:
name: Run Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install nextest
uses: taiki-e/install-action@nextest
- run: cargo nextest run --all-features
- run: cargo test --doc
cross-check:
name: Check Cross-Compilation
runs-on: ubuntu-latest
strategy:
matrix:
target:
- armv7-unknown-linux-gnueabihf
- thumbv7em-none-eabihf
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
targets: "armv7-unknown-linux-gnueabihf, thumbv7em-none-eabihf"
- run: cargo check -p satrs --release --target=${{matrix.target}} --no-default-features
fmt:
name: Check formatting
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo fmt --all -- --check
docs:
name: Check Documentation Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]'
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo clippy -- -D warnings

View File

@@ -33,6 +33,7 @@ pipeline {
stage('Test') {
steps {
sh 'cargo nextest r --all-features'
sh 'cargo test --doc'
}
}
stage('Check with all features') {

View File

@@ -38,8 +38,7 @@ pub enum GroupId {
pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
pub const SERVER_PORT: u16 = 7301;
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(0, 0);
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(0, 0);
lazy_static! {
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = {

View File

@@ -2,7 +2,6 @@ use std::sync::mpsc::{self};
use crate::pus::create_verification_reporter;
use satrs::event_man::{EventMessageU32, EventRoutingError};
use satrs::params::WritableToBeBytes;
use satrs::pus::event::EventTmHookProvider;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::EcssTmSender;
@@ -42,6 +41,7 @@ pub struct PusEventHandler<TmSender: EcssTmSender> {
tm_sender: TmSender,
time_provider: CdsTime,
timestamp: [u8; 7],
small_data_buf: [u8; 64],
verif_handler: VerificationReporter,
}
@@ -82,6 +82,7 @@ impl<TmSender: EcssTmSender> PusEventHandler<TmSender> {
pus_event_man_rx,
time_provider: CdsTime::new_with_u16_days(0, 0),
timestamp: [0; 7],
small_data_buf: [0; 64],
verif_handler,
tm_sender,
}
@@ -132,21 +133,26 @@ impl<TmSender: EcssTmSender> PusEventHandler<TmSender> {
// Perform the generation of PUS event packets
match self.pus_event_man_rx.try_recv() {
Ok(event_msg) => {
update_time(&mut self.time_provider, &mut self.timestamp);
let param_vec = event_msg.params().map_or(Vec::new(), |param| {
param.to_vec().expect("failed to convert params to vec")
});
// We use the TM modification hook to set the sender APID for each event.
self.pus_event_tm_creator.reporter.tm_hook.next_apid =
UniqueApidTargetId::from(event_msg.sender_id()).apid;
self.pus_event_tm_creator
.generate_pus_event_tm_generic(
update_time(&mut self.time_provider, &mut self.timestamp);
let generation_result = self
.pus_event_tm_creator
.generate_pus_event_tm_generic_with_generic_params(
&self.tm_sender,
&self.timestamp,
event_msg.event(),
Some(&param_vec),
&mut self.small_data_buf,
event_msg.params(),
)
.expect("Sending TM as event failed");
if !generation_result.params_were_propagated {
log::warn!(
"Event TM parameters were not propagated: {:?}",
event_msg.params()
);
}
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
@@ -210,4 +216,82 @@ impl<TmSender: EcssTmSender> EventHandler<TmSender> {
}
#[cfg(test)]
mod tests {}
mod tests {
use satrs::{
events::EventU32,
pus::verification::VerificationReporterCfg,
spacepackets::{
ecss::{tm::PusTmReader, PusPacket},
CcsdsPacket,
},
tmtc::PacketAsVec,
};
use super::*;
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(1, 2);
const TEST_EVENT: EventU32 = EventU32::new(satrs::events::Severity::Info, 1, 1);
pub struct EventManagementTestbench {
pub event_tx: mpsc::SyncSender<EventMessageU32>,
pub event_manager: EventManagerWithBoundedMpsc,
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub pus_event_handler: PusEventHandler<mpsc::Sender<PacketAsVec>>,
}
impl EventManagementTestbench {
pub fn new() -> Self {
let (event_tx, event_rx) = mpsc::sync_channel(10);
let (_event_req_tx, event_req_rx) = mpsc::sync_channel(10);
let (tm_sender, tm_receiver) = mpsc::channel();
let verif_reporter_cfg = VerificationReporterCfg::new(0x05, 2, 2, 128).unwrap();
let verif_reporter =
VerificationReporter::new(PUS_EVENT_MANAGEMENT.id(), &verif_reporter_cfg);
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
let pus_event_handler = PusEventHandler::<mpsc::Sender<PacketAsVec>>::new(
tm_sender,
verif_reporter,
&mut event_manager,
event_req_rx,
);
Self {
event_tx,
tm_receiver,
event_manager,
pus_event_handler,
}
}
}
#[test]
fn test_basic_event_generation() {
let mut testbench = EventManagementTestbench::new();
testbench
.event_tx
.send(EventMessageU32::new(
TEST_CREATOR_ID.id(),
EventU32::new(satrs::events::Severity::Info, 1, 1),
))
.expect("failed to send event");
testbench.pus_event_handler.handle_event_requests();
testbench.event_manager.try_event_handling(|_, _| {});
testbench.pus_event_handler.generate_pus_event_tm();
let tm_packet = testbench
.tm_receiver
.try_recv()
.expect("failed to receive TM packet");
assert_eq!(tm_packet.sender_id, PUS_EVENT_MANAGEMENT.id());
let tm_reader = PusTmReader::new(&tm_packet.packet, 7)
.expect("failed to create TM reader")
.0;
assert_eq!(tm_reader.apid(), TEST_CREATOR_ID.apid);
assert_eq!(tm_reader.user_data().len(), 4);
let event_read_back = EventU32::from_be_bytes(tm_reader.user_data().try_into().unwrap());
assert_eq!(event_read_back, TEST_EVENT);
}
#[test]
fn test_basic_event_disabled() {
// TODO: Add test.
}
}

View File

@@ -3,14 +3,13 @@ use std::net::{SocketAddr, UdpSocket};
use std::sync::mpsc;
use log::{info, warn};
use satrs::pus::HandlingStatus;
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderRaw};
use satrs::{
hal::std::udp_server::{ReceiveResult, UdpTcServer},
pool::{PoolProviderWithGuards, SharedStaticMemoryPool},
};
use crate::pus::HandlingStatus;
pub trait UdpTmHandler {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr);
}

View File

@@ -458,7 +458,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting TM funnel task");
let jh_tm_funnel = thread::Builder::new()
.name("sat-rs tm-funnel".to_string())
.name("sat-rs tm-sink".to_string())
.spawn(move || loop {
tm_funnel.operation();
})

View File

@@ -1,23 +1,23 @@
use log::{error, warn};
use log::warn;
use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::params::WritableToBeBytes;
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::action::{
ActionReplyPus, ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap,
};
use satrs::pus::verification::{
FailParams, FailParamsWithStep, TcStateAccepted, TcStateStarted, VerificationReporter,
handle_completion_failure_with_generic_params, handle_step_failure_with_generic_params,
FailParamHelper, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver,
MpscTmAsVecSender, PusPacketHandlerResult, PusReplyHandler, PusServiceHelper,
MpscTmAsVecSender, PusPacketHandlingError, PusReplyHandler, PusServiceHelper,
PusTcToRequestConverter,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket};
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_ACTION_SERVICE;
use satrs_example::config::tmtc_err;
@@ -61,7 +61,7 @@ impl PusReplyHandler<ActivePusActionRequestStd, ActionReplyPus> for ActionReplyH
active_request: &ActivePusActionRequestStd,
tm_sender: &(impl EcssTmSender + ?Sized),
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
timestamp: &[u8],
) -> Result<bool, Self::Error> {
let verif_token: VerificationToken<TcStateStarted> = active_request
.token()
@@ -69,15 +69,23 @@ impl PusReplyHandler<ActivePusActionRequestStd, ActionReplyPus> for ActionReplyH
.expect("invalid token state");
let remove_entry = match &reply.message.variant {
ActionReplyVariant::CompletionFailed { error_code, params } => {
let mut fail_data_len = 0;
if let Some(params) = params {
fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?;
}
verification_handler.completion_failure(
let error_propagated = handle_completion_failure_with_generic_params(
tm_sender,
verif_token,
FailParams::new(time_stamp, error_code, &self.fail_data_buf[..fail_data_len]),
verification_handler,
FailParamHelper {
error_code,
params: params.as_ref(),
timestamp,
small_data_buf: &mut self.fail_data_buf,
},
)?;
if !error_propagated {
log::warn!(
"error params for completion failure were not propated: {:?}",
params.as_ref()
);
}
true
}
ActionReplyVariant::StepFailed {
@@ -85,31 +93,35 @@ impl PusReplyHandler<ActivePusActionRequestStd, ActionReplyPus> for ActionReplyH
step,
params,
} => {
let mut fail_data_len = 0;
if let Some(params) = params {
fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?;
}
verification_handler.step_failure(
let error_propagated = handle_step_failure_with_generic_params(
tm_sender,
verif_token,
FailParamsWithStep::new(
time_stamp,
&EcssEnumU16::new(*step),
verification_handler,
FailParamHelper {
error_code,
&self.fail_data_buf[..fail_data_len],
),
params: params.as_ref(),
timestamp,
small_data_buf: &mut self.fail_data_buf,
},
&EcssEnumU16::new(*step),
)?;
if !error_propagated {
log::warn!(
"error params for completion failure were not propated: {:?}",
params.as_ref()
);
}
true
}
ActionReplyVariant::Completed => {
verification_handler.completion_success(tm_sender, verif_token, time_stamp)?;
verification_handler.completion_success(tm_sender, verif_token, timestamp)?;
true
}
ActionReplyVariant::StepSuccess { step } => {
verification_handler.step_success(
tm_sender,
&verif_token,
time_stamp,
timestamp,
EcssEnumU16::new(*step),
)?;
false
@@ -266,43 +278,23 @@ pub struct ActionServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTc
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
for ActionServiceWrapper<TmSender, TcInMemConverter>
{
/// Returns [true] if the packet handling is finished.
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS 8 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS 8 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS 8 subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS packet handling error: {error:?}");
// To avoid permanent loops on error cases.
return HandlingStatus::Empty;
}
const SERVICE_ID: u8 = PusServiceId::Action as u8;
const SERVICE_STR: &'static str = "action";
delegate::delegate! {
to self.service {
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
HandlingStatus::HandledOne
}
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS 8: Handling reply failed with error {e:?}");
HandlingStatus::Empty
})
}
fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
}
}
@@ -417,7 +409,7 @@ mod tests {
}
let result = result.unwrap();
match result {
PusPacketHandlerResult::RequestHandled => (),
HandlingStatus::HandledOne => (),
_ => panic!("unexpected result {result:?}"),
}
}
@@ -429,19 +421,19 @@ mod tests {
}
let result = result.unwrap();
match result {
PusPacketHandlerResult::Empty => (),
HandlingStatus::Empty => (),
_ => panic!("unexpected result {result:?}"),
}
}
pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_check_next_reply(time_stamp);
let result = self.service.poll_and_handle_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::HandledOne);
}
pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_check_next_reply(time_stamp);
let result = self.service.poll_and_handle_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::Empty);
}

View File

@@ -1,19 +1,20 @@
use std::sync::mpsc;
use crate::pus::create_verification_reporter;
use log::{error, warn};
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::event_srv::PusEventServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, PusPacketHandlerResult, PusServiceHelper,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter,
EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
MpscTmAsVecSender, PartialPusHandlingError, PusServiceHelper,
};
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
use super::HandlingStatus;
use super::{DirectPusService, HandlingStatus};
pub fn create_event_service_static(
tm_sender: PacketSenderWithSharedPool,
@@ -61,26 +62,52 @@ pub struct EventServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcI
PusEventServiceHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
}
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
EventServiceWrapper<TmSender, TcInMemConverter>
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
for EventServiceWrapper<TmSender, TcInMemConverter>
{
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self.handler.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS 5 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS 5 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS 5 subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
const SERVICE_ID: u8 = PusServiceId::Event as u8;
const SERVICE_STR: &'static str = "events";
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let result = self
.handler
.poll_and_handle_next_tc(error_handler, time_stamp);
if let Err(e) = result {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on continuous errors.
return HandlingStatus::Empty;
}
match result.unwrap() {
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
}
HandlingStatus::HandledOne

View File

@@ -1,5 +1,4 @@
use derive_new::new;
use log::{error, warn};
use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId};
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::verification::{
@@ -10,11 +9,11 @@ use satrs::pus::{
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender,
EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender,
PusPacketHandlerResult, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
PusPacketHandlingError, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{hk, PusPacket};
use satrs::spacepackets::ecss::{hk, PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_HK_SERVICE;
use satrs_example::config::{hk_err, tmtc_err};
@@ -24,7 +23,7 @@ use std::time::Duration;
use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler};
use crate::requests::GenericRequestRouter;
use super::{HandlingStatus, PusTargetedRequestService};
use super::{HandlingStatus, PusTargetedRequestService, TargetedPusService};
#[derive(Clone, PartialEq, Debug, new)]
pub struct HkReply {
@@ -297,45 +296,26 @@ pub struct HkServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMe
>,
}
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
HkServiceWrapper<TmSender, TcInMemConverter>
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
for HkServiceWrapper<TmSender, TcInMemConverter>
{
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS 3 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS 3 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS 3 subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS packet handling error: {error:?}");
// To avoid permanent loops on error cases.
return HandlingStatus::Empty;
}
const SERVICE_ID: u8 = PusServiceId::Housekeeping as u8;
const SERVICE_STR: &'static str = "housekeeping";
delegate::delegate! {
to self.service {
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
HandlingStatus::HandledOne
}
pub fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS 3: Handling reply failed with error {e:?}");
HandlingStatus::Empty
})
}
pub fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
}
}

View File

@@ -8,8 +8,8 @@ use satrs::pus::verification::{
use satrs::pus::{
ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter,
EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError,
PusPacketHandlerResult, PusPacketHandlingError, PusReplyHandler, PusRequestRouter,
PusServiceHelper, PusTcToRequestConverter, TcInMemory,
HandlingStatus, PusPacketHandlingError, PusReplyHandler, PusRequestRouter, PusServiceHelper,
PusTcToRequestConverter, TcInMemory,
};
use satrs::queue::{GenericReceiveError, GenericSendError};
use satrs::request::{Apid, GenericMessage, MessageMetadata};
@@ -31,12 +31,6 @@ pub mod scheduler;
pub mod stack;
pub mod test;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum HandlingStatus {
Empty,
HandledOne,
}
pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> VerificationReporter {
let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap();
// Every software component which needs to generate verification telemetry, gets a cloned
@@ -79,7 +73,7 @@ impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
pub fn handle_tc_packet_vec(
&mut self,
packet_as_vec: PacketAsVec,
) -> Result<PusPacketHandlerResult, GenericSendError> {
) -> Result<HandlingStatus, GenericSendError> {
self.handle_tc_generic(packet_as_vec.sender_id, None, &packet_as_vec.packet)
}
@@ -87,7 +81,7 @@ impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
&mut self,
packet_in_pool: PacketInPool,
pus_tc_copy: &[u8],
) -> Result<PusPacketHandlerResult, GenericSendError> {
) -> Result<HandlingStatus, GenericSendError> {
self.handle_tc_generic(
packet_in_pool.sender_id,
Some(packet_in_pool.store_addr),
@@ -100,7 +94,7 @@ impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
sender_id: ComponentId,
addr_opt: Option<PoolAddr>,
raw_tc: &[u8],
) -> Result<PusPacketHandlerResult, GenericSendError> {
) -> Result<HandlingStatus, GenericSendError> {
let pus_tc_result = PusTcReader::new(raw_tc);
if pus_tc_result.is_err() {
log::warn!(
@@ -109,7 +103,8 @@ impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
pus_tc_result.unwrap_err()
);
log::warn!("raw data: {:x?}", raw_tc);
return Ok(PusPacketHandlerResult::RequestHandled);
// TODO: Shouldn't this be an error?
return Ok(HandlingStatus::HandledOne);
}
let pus_tc = pus_tc_result.unwrap().0;
let init_token = self.verif_reporter.add_tc(&pus_tc);
@@ -189,17 +184,65 @@ impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
}
}
}
Ok(PusPacketHandlerResult::RequestHandled)
Ok(HandlingStatus::HandledOne)
}
}
pub trait TargetedPusService {
/// Returns [true] if the packet handling is finished.
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus;
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus;
const SERVICE_ID: u8;
const SERVICE_STR: &'static str;
fn poll_and_handle_next_tc_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let result = self.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
log::error!(
"PUS service {}({})packet handling error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on error cases.
return HandlingStatus::Empty;
}
result.unwrap()
}
fn poll_and_handle_next_reply_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.poll_and_handle_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!(
"PUS servce {}({}): Handling reply failed with error {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
HandlingStatus::Empty
})
}
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
/// Generic trait for services which handle packets directly. Kept minimal right now because
/// of the difficulty to allow flexible user code for these services..
pub trait DirectPusService {
const SERVICE_ID: u8;
const SERVICE_STR: &'static str;
fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus;
}
/// This is a generic handler class for all PUS services where a PUS telecommand is converted
/// to a targeted request.
///
@@ -297,10 +340,10 @@ where
pub fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
) -> Result<HandlingStatus, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
return Ok(HandlingStatus::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper
@@ -356,7 +399,7 @@ where
return Err(e.into());
}
}
Ok(PusPacketHandlerResult::RequestHandled)
Ok(HandlingStatus::HandledOne)
}
fn handle_conversion_to_request_error(
@@ -409,7 +452,7 @@ where
}
}
pub fn poll_and_check_next_reply(
pub fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError> {
@@ -439,20 +482,17 @@ where
return Ok(());
}
let active_request = active_req_opt.unwrap();
let request_finished = self
.reply_handler
.handle_reply(
reply,
active_request,
&self.service_helper.common.tm_sender,
&self.service_helper.common.verif_reporter,
time_stamp,
)
.unwrap_or(false);
if request_finished {
let result = self.reply_handler.handle_reply(
reply,
active_request,
&self.service_helper.common.tm_sender,
&self.service_helper.common.verif_reporter,
time_stamp,
);
if result.is_err() || (result.is_ok() && *result.as_ref().unwrap()) {
self.active_request_map.remove(reply.request_id());
}
Ok(())
result.map(|_| ())
}
pub fn check_for_request_timeouts(&mut self) {

View File

@@ -1,5 +1,4 @@
use derive_new::new;
use log::{error, warn};
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use std::sync::mpsc;
use std::time::Duration;
@@ -9,7 +8,7 @@ use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, PusPacketHandlerResult,
EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, PusPacketHandlingError,
PusServiceHelper,
};
use satrs::request::GenericMessage;
@@ -36,7 +35,7 @@ use satrs::{
ComponentId,
};
use satrs_example::config::components::PUS_MODE_SERVICE;
use satrs_example::config::{mode_err, tmtc_err};
use satrs_example::config::{mode_err, tmtc_err, CustomPusServiceId};
use super::{
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
@@ -272,44 +271,26 @@ pub struct ModeServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcIn
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
for ModeServiceWrapper<TmSender, TcInMemConverter>
{
/// Returns [true] if the packet handling is finished.
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS mode service: partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS mode service: invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS mode service: {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS mode service: packet handling error: {error:?}");
// To avoid permanent loops on error cases.
return HandlingStatus::Empty;
}
const SERVICE_ID: u8 = CustomPusServiceId::Mode as u8;
const SERVICE_STR: &'static str = "mode";
delegate::delegate! {
to self.service {
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
HandlingStatus::HandledOne
}
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus {
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS action service: Handling reply failed with error {e:?}");
HandlingStatus::HandledOne
})
}
fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
}
}
#[cfg(test)]
mod tests {
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0};

View File

@@ -2,20 +2,22 @@ use std::sync::mpsc;
use std::time::Duration;
use crate::pus::create_verification_reporter;
use log::{error, info, warn};
use log::info;
use satrs::pool::{PoolProvider, StaticMemoryPool};
use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, PusPacketHandlerResult, PusServiceHelper,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter,
EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
MpscTmAsVecSender, PartialPusHandlingError, PusServiceHelper,
};
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool};
use satrs::ComponentId;
use satrs_example::config::components::PUS_SCHED_SERVICE;
use super::HandlingStatus;
use super::{DirectPusService, HandlingStatus};
pub trait TcReleaser {
fn release(&mut self, sender_id: ComponentId, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool;
@@ -77,6 +79,61 @@ pub struct SchedulingServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: Ec
pub tc_releaser: Box<dyn TcReleaser + Send>,
}
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
for SchedulingServiceWrapper<TmSender, TcInMemConverter>
{
const SERVICE_ID: u8 = PusServiceId::Verification as u8;
const SERVICE_STR: &'static str = "verification";
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let result = self.pus_11_handler.poll_and_handle_next_tc(
error_handler,
time_stamp,
&mut self.sched_tc_pool,
);
if let Err(e) = result {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on continuous errors.
return HandlingStatus::Empty;
}
match result.unwrap() {
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
}
HandlingStatus::HandledOne
}
}
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
SchedulingServiceWrapper<TmSender, TcInMemConverter>
{
@@ -103,31 +160,6 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
info!("{released_tcs} TC(s) released from scheduler");
}
}
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self
.pus_11_handler
.poll_and_handle_next_tc(time_stamp, &mut self.sched_tc_pool)
{
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS11 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS11 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS11: Subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
}
}
HandlingStatus::HandledOne
}
}
pub fn create_scheduler_service_static(

View File

@@ -7,10 +7,12 @@ use satrs::{
use super::{
action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper,
scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, HandlingStatus,
TargetedPusService,
scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, DirectPusService,
HandlingStatus, TargetedPusService,
};
// TODO: For better extensibility, we could create 2 vectors: One for direct PUS services and one
// for targeted services..
#[derive(new)]
pub struct PusStack<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
test_srv: TestCustomServiceWrapper<TmSender, TcInMemConverter>,
@@ -28,52 +30,28 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
// Release all telecommands which reached their release time before calling the service
// handlers.
self.schedule_srv.release_tcs();
let time_stamp = cds::CdsTime::now_with_u16_days()
let timestamp = cds::CdsTime::now_with_u16_days()
.expect("time stamp generation error")
.to_vec()
.unwrap();
let mut loop_count = 0_u32;
// Hot loop which will run continuously until all request and reply handling is done.
loop {
let mut nothing_to_do = true;
let mut is_srv_finished =
|_srv_id: u8,
tc_handling_status: HandlingStatus,
reply_handling_status: Option<HandlingStatus>| {
if tc_handling_status == HandlingStatus::HandledOne
|| (reply_handling_status.is_some()
&& reply_handling_status.unwrap() == HandlingStatus::HandledOne)
{
nothing_to_do = false;
}
};
is_srv_finished(
17,
self.test_srv.poll_and_handle_next_packet(&time_stamp),
None,
Self::direct_service_checker(&mut self.test_srv, &timestamp, &mut nothing_to_do);
Self::direct_service_checker(&mut self.schedule_srv, &timestamp, &mut nothing_to_do);
Self::direct_service_checker(&mut self.event_srv, &timestamp, &mut nothing_to_do);
Self::targeted_service_checker(
&mut self.action_srv_wrapper,
&timestamp,
&mut nothing_to_do,
);
is_srv_finished(
11,
self.schedule_srv.poll_and_handle_next_tc(&time_stamp),
None,
);
is_srv_finished(5, self.event_srv.poll_and_handle_next_tc(&time_stamp), None);
is_srv_finished(
8,
self.action_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
Some(
self.action_srv_wrapper
.poll_and_handle_next_reply(&time_stamp),
),
);
is_srv_finished(
3,
self.hk_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
Some(self.hk_srv_wrapper.poll_and_handle_next_reply(&time_stamp)),
);
is_srv_finished(
200,
self.mode_srv.poll_and_handle_next_tc(&time_stamp),
Some(self.mode_srv.poll_and_handle_next_reply(&time_stamp)),
Self::targeted_service_checker(
&mut self.hk_srv_wrapper,
&timestamp,
&mut nothing_to_do,
);
Self::targeted_service_checker(&mut self.mode_srv, &timestamp, &mut nothing_to_do);
if nothing_to_do {
// Timeout checking is only done once.
self.action_srv_wrapper.check_for_request_timeouts();
@@ -81,6 +59,37 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
self.mode_srv.check_for_request_timeouts();
break;
}
// Safety mechanism to avoid infinite loops.
loop_count += 1;
if loop_count >= 500 {
log::warn!("reached PUS stack loop count 500, breaking");
break;
}
}
}
pub fn direct_service_checker<S: DirectPusService>(
service: &mut S,
timestamp: &[u8],
nothing_to_do: &mut bool,
) {
let handling_status = service.poll_and_handle_next_tc(timestamp);
if handling_status == HandlingStatus::HandledOne {
*nothing_to_do = false;
}
}
pub fn targeted_service_checker<S: TargetedPusService>(
service: &mut S,
timestamp: &[u8],
nothing_to_do: &mut bool,
) {
let request_handling = service.poll_and_handle_next_tc_default_handler(timestamp);
let reply_handling = service.poll_and_handle_next_reply_default_handler(timestamp);
if request_handling == HandlingStatus::HandledOne
|| reply_handling == HandlingStatus::HandledOne
{
*nothing_to_do = false;
}
}
}

View File

@@ -1,24 +1,22 @@
use crate::pus::create_verification_reporter;
use log::{info, warn};
use log::info;
use satrs::event_man::{EventMessage, EventMessageU32};
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::test::PusService17TestHandler;
use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider};
use satrs::pus::EcssTcInSharedStoreConverter;
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
MpscTmAsVecSender, PusPacketHandlerResult, PusServiceHelper,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter,
EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, PusServiceHelper,
};
use satrs::pus::{EcssTcInSharedStoreConverter, PartialPusHandlingError};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::spacepackets::time::cds::CdsTime;
use satrs::spacepackets::time::TimeWriter;
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_TEST_SERVICE;
use satrs_example::config::{tmtc_err, TEST_EVENT};
use std::sync::mpsc;
use super::HandlingStatus;
use super::{DirectPusService, HandlingStatus};
pub fn create_test_service_static(
tm_sender: PacketSenderWithSharedPool,
@@ -35,7 +33,7 @@ pub fn create_test_service_static(
));
TestCustomServiceWrapper {
handler: pus17_handler,
test_srv_event_sender: event_sender,
event_tx: event_sender,
}
}
@@ -53,7 +51,7 @@ pub fn create_test_service_dynamic(
));
TestCustomServiceWrapper {
handler: pus17_handler,
test_srv_event_sender: event_sender,
event_tx: event_sender,
}
}
@@ -61,33 +59,55 @@ pub struct TestCustomServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: Ec
{
pub handler:
PusService17TestHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub test_srv_event_sender: mpsc::SyncSender<EventMessageU32>,
pub event_tx: mpsc::SyncSender<EventMessageU32>,
}
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
TestCustomServiceWrapper<TmSender, TcInMemConverter>
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
for TestCustomServiceWrapper<TmSender, TcInMemConverter>
{
pub fn poll_and_handle_next_packet(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let res = self.handler.poll_and_handle_next_tc(time_stamp);
if res.is_err() {
warn!("PUS17 handler failed with error {:?}", res.unwrap_err());
return HandlingStatus::HandledOne;
const SERVICE_ID: u8 = PusServiceId::Test as u8;
const SERVICE_STR: &'static str = "test";
fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let res = self
.handler
.poll_and_handle_next_tc(error_handler, timestamp);
if let Err(e) = res {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on continuous errors.
return HandlingStatus::Empty;
}
match res.unwrap() {
PusPacketHandlerResult::RequestHandled => {
info!("Received PUS ping command TC[17,1]");
info!("Sent ping reply PUS TM[17,2]");
DirectPusPacketHandlerResult::Handled(handling_status) => {
if handling_status == HandlingStatus::HandledOne {
info!("Received PUS ping command TC[17,1]");
info!("Sent ping reply PUS TM[17,2]");
}
return handling_status;
}
PusPacketHandlerResult::RequestHandledPartialSuccess(partial_err) => {
warn!(
"Handled PUS ping command with partial success: {:?}",
partial_err
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS17: Subservice {subservice} not implemented")
}
PusPacketHandlerResult::CustomSubservice(subservice, token) => {
DirectPusPacketHandlerResult::CustomSubservice(subservice, token) => {
let (tc, _) = PusTcReader::new(
self.handler
.service_helper
@@ -95,29 +115,34 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
.tc_slice_raw(),
)
.unwrap();
let time_stamper = CdsTime::now_with_u16_days().unwrap();
let mut stamp_buf: [u8; 7] = [0; 7];
time_stamper.write_to_bytes(&mut stamp_buf).unwrap();
if subservice == 128 {
info!("Generating test event");
self.test_srv_event_sender
info!("generating test event");
self.event_tx
.send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
.expect("Sending test event failed");
let start_token = self
.handler
.service_helper
.verif_reporter()
.start_success(self.handler.service_helper.tm_sender(), token, &stamp_buf)
.expect("Error sending start success");
self.handler
.service_helper
.verif_reporter()
.completion_success(
self.handler.service_helper.tm_sender(),
start_token,
&stamp_buf,
)
.expect("Error sending completion success");
match self.handler.service_helper.verif_reporter().start_success(
self.handler.service_helper.tm_sender(),
token,
timestamp,
) {
Ok(started_token) => {
if let Err(e) = self
.handler
.service_helper
.verif_reporter()
.completion_success(
self.handler.service_helper.tm_sender(),
started_token,
timestamp,
)
{
error_handler(&PartialPusHandlingError::Verification(e));
}
}
Err(e) => {
error_handler(&PartialPusHandlingError::Verification(e));
}
}
} else {
let fail_data = [tc.subservice()];
self.handler
@@ -127,7 +152,7 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
self.handler.service_helper.tm_sender(),
token,
FailParams::new(
&stamp_buf,
timestamp,
&tmtc_err::INVALID_PUS_SUBSERVICE,
&fail_data,
),
@@ -135,7 +160,6 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
.expect("Sending start failure verification failed");
}
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
}
HandlingStatus::HandledOne
}

View File

@@ -1,12 +1,13 @@
use satrs::{
pool::PoolProvider,
pus::HandlingStatus,
tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool, SharedPacketPool},
};
use std::sync::mpsc::{self, TryRecvError};
use satrs::pus::MpscTmAsVecSender;
use crate::pus::{HandlingStatus, PusTcDistributor};
use crate::pus::PusTcDistributor;
// TC source components where static pools are the backing memory of the received telecommands.
pub struct TcSourceTaskStatic {

View File

@@ -8,6 +8,15 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.1.4] 2024-04-24
## Added
- `ResultU16::from_be_bytes`
- `From<u16>` impl for `ResultU16`.
- Optional `defmt` support: `defmt::Format` impl on `ResultU16` if the `defmt` feature is
activated.
# [v0.1.3] 2024-04-16
Allow `spacepackets` range starting with v0.10 and v0.11.

View File

@@ -1,7 +1,7 @@
[package]
name = "satrs-shared"
description = "Components shared by multiple sat-rs crates"
version = "0.1.3"
version = "0.1.4"
edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
@@ -17,12 +17,17 @@ version = "1"
default-features = false
optional = true
[dependencies.defmt]
version = "0.3"
optional = true
[dependencies.spacepackets]
version = ">0.9, <=0.11"
default-features = false
[features]
serde = ["dep:serde", "spacepackets/serde"]
spacepackets = ["dep:defmt", "spacepackets/defmt"]
[package.metadata.docs.rs]
rustdoc-args = ["--cfg", "doc_cfg", "--generate-link-to-definition"]
rustdoc-args = ["--cfg", "docs_rs", "--generate-link-to-definition"]

View File

@@ -1,3 +1,4 @@
//! This crates contains modules shared among other sat-rs framework crates.
#![no_std]
#![cfg_attr(docs_rs, feature(doc_auto_cfg))]
pub mod res_code;

View File

@@ -7,6 +7,7 @@ use spacepackets::ByteConversionError;
/// Simple [u16] based result code type which also allows to group related resultcodes.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct ResultU16 {
group_id: u8,
unique_id: u8,
@@ -19,15 +20,28 @@ impl ResultU16 {
unique_id,
}
}
pub fn raw(&self) -> u16 {
((self.group_id as u16) << 8) | self.unique_id as u16
}
pub fn group_id(&self) -> u8 {
self.group_id
}
pub fn unique_id(&self) -> u8 {
self.unique_id
}
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from(u16::from_be_bytes(bytes))
}
}
impl From<u16> for ResultU16 {
fn from(value: u16) -> Self {
Self::new(((value >> 8) & 0xff) as u8, (value & 0xff) as u8)
}
}
impl From<ResultU16> for EcssEnumU16 {
@@ -84,5 +98,14 @@ mod tests {
assert_eq!(written, 2);
assert_eq!(buf[0], 1);
assert_eq!(buf[1], 1);
let read_back = ResultU16::from_be_bytes(buf);
assert_eq!(read_back, result_code);
}
#[test]
fn test_from_u16() {
let result_code = ResultU16::new(1, 1);
let result_code_2 = ResultU16::from(result_code.raw());
assert_eq!(result_code, result_code_2);
}
}

View File

@@ -8,7 +8,21 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.2.0-rc.5] 2024-04-23
# [v0.2.0] 2024-05-02
## Changed
- Various improvements for the PUS stack components.
## Added
- Added `HandlingStatus` enumeration.
# [v0.2.0-rc.5] 2024-04-24
## Added
- Optional `defmt::Format` support for the event types, if the `defmt` feature is activated.
## Changed
@@ -17,6 +31,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- Renamed `PusEventDispatcher` to `PusEventTmCreatorWithMap`.
- Renamed `DefaultPusEventU32Dispatcher` to `DefaultPusEventU32EventCreator`.
- Renamed `PusEventMgmtBackendProvider` renamed to `PusEventReportingMap`.
- Reanmed Event `const_new` methods to `new` and the former `new` methods to `new_checked`
# [v0.2.0-rc.4] 2024-04-23

View File

@@ -1,6 +1,6 @@
[package]
name = "satrs"
version = "0.2.0-rc.4"
version = "0.2.0"
edition = "2021"
rust-version = "1.71.1"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
@@ -15,6 +15,7 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
[dependencies]
delegate = ">0.7, <=0.10"
paste = "1"
derive-new = "0.6"
smallvec = "1"
crc = "3"
@@ -84,10 +85,14 @@ version = "0.8"
features = ["os-poll", "net"]
optional = true
[dependencies.defmt]
version = "0.3"
optional = true
[dev-dependencies]
serde = "1"
zerocopy = "0.7"
once_cell = "1.13"
once_cell = "1"
serde_json = "1"
rand = "0.8"
tempfile = "3"
@@ -120,7 +125,7 @@ alloc = [
serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"]
crossbeam = ["crossbeam-channel"]
heapless = ["dep:heapless"]
defmt = ["spacepackets/defmt"]
defmt = ["dep:defmt", "spacepackets/defmt"]
test_util = []
doc-images = []

View File

@@ -295,7 +295,7 @@ impl<
for id in ids {
if let Some(sender) = self.sender_map.get_send_event_provider(id) {
if let Err(e) = sender.send(EventMessage::new_generic(
*id,
event_msg.sender_id,
event_msg.event,
event_msg.params.as_ref(),
)) {
@@ -597,7 +597,7 @@ mod tests {
use std::format;
use std::sync::mpsc::{self};
const TEST_EVENT: EventU32 = EventU32::const_new(Severity::INFO, 0, 5);
const TEST_EVENT: EventU32 = EventU32::new(Severity::Info, 0, 5);
fn check_next_event(
expected: EventU32,
@@ -614,6 +614,7 @@ mod tests {
res: EventRoutingResult<EventU32, Params>,
expected: EventU32,
expected_num_sent: u32,
expected_sender_id: ComponentId,
) {
assert!(matches!(res, EventRoutingResult::Handled { .. }));
if let EventRoutingResult::Handled {
@@ -622,6 +623,7 @@ mod tests {
} = res
{
assert_eq!(event_msg.event, expected);
assert_eq!(event_msg.sender_id, expected_sender_id);
assert_eq!(num_recipients, expected_num_sent);
}
}
@@ -634,8 +636,8 @@ mod tests {
#[test]
fn test_basic() {
let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let event_grp_1_0 = EventU32::new(Severity::High, 1, 0);
let (single_event_sender, single_event_receiver) = mpsc::channel();
let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.target_id());
@@ -653,8 +655,7 @@ mod tests {
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_grp_0))
.expect("Sending single error failed");
let res = event_man.try_event_handling(&error_handler);
// assert!(res.is_ok());
check_handled_event(res, event_grp_0, 1);
check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
check_next_event(event_grp_0, &single_event_receiver);
// Test event which is sent to all group listeners
@@ -662,7 +663,7 @@ mod tests {
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0))
.expect("Sending group error failed");
let res = event_man.try_event_handling(&error_handler);
check_handled_event(res, event_grp_1_0, 1);
check_handled_event(res, event_grp_1_0, 1, TEST_COMPONENT_ID_1.id());
check_next_event(event_grp_1_0, &group_event_receiver_0);
}
@@ -672,7 +673,7 @@ mod tests {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let (single_event_sender, single_event_receiver) = mpsc::channel();
let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.target_id());
@@ -685,7 +686,7 @@ mod tests {
))
.expect("Sending group error failed");
let res = event_man.try_event_handling(&error_handler);
check_handled_event(res, event_grp_0, 1);
check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
let aux = check_next_event(event_grp_0, &single_event_receiver);
assert!(aux.is_some());
let aux = aux.unwrap();
@@ -707,8 +708,8 @@ mod tests {
let res = event_man.try_event_handling(error_handler);
assert!(matches!(res, EventRoutingResult::Empty));
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let event_grp_1_0 = EventU32::new(Severity::High, 1, 0);
let (event_grp_0_sender, event_grp_0_receiver) = mpsc::channel();
let event_grp_0_and_1_listener = EventU32SenderMpsc::new(0, event_grp_0_sender);
event_man.subscribe_group(
@@ -728,9 +729,9 @@ mod tests {
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0))
.expect("Sendign Event Group 1 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_grp_0, 1);
check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_grp_1_0, 1);
check_handled_event(res, event_grp_1_0, 1, TEST_COMPONENT_ID_1.id());
check_next_event(event_grp_0, &event_grp_0_receiver);
check_next_event(event_grp_1_0, &event_grp_0_receiver);
@@ -744,8 +745,8 @@ mod tests {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man();
let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap();
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let event_0 = EventU32::new(Severity::Info, 0, 5);
let event_1 = EventU32::new(Severity::High, 1, 0);
let (event_0_tx_0, event_0_rx_0) = mpsc::channel();
let (event_0_tx_1, event_0_rx_1) = mpsc::channel();
let event_listener_0 = EventU32SenderMpsc::new(0, event_0_tx_0);
@@ -760,7 +761,7 @@ mod tests {
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering Event 0 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_0, 2);
check_handled_event(res, event_0, 2, TEST_COMPONENT_ID_0.id());
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_0, &event_0_rx_1);
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
@@ -773,9 +774,9 @@ mod tests {
// 3 Events messages will be sent now
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_0, 2);
check_handled_event(res, event_0, 2, TEST_COMPONENT_ID_0.id());
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_1, 1);
check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_1.id());
// Both the single event and the group event should arrive now
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_1, &event_0_rx_0);
@@ -787,7 +788,7 @@ mod tests {
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_1))
.expect("Triggering Event 1 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_1, 1);
check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_0.id());
}
#[test]
@@ -797,8 +798,8 @@ mod tests {
};
let (event_sender, event_receiver) = mpsc::channel();
let mut event_man = EventManagerWithMpsc::new(event_receiver);
let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap();
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let event_0 = EventU32::new(Severity::Info, 0, 5);
let event_1 = EventU32::new(Severity::High, 1, 0);
let (event_0_tx_0, all_events_rx) = mpsc::channel();
let all_events_listener = EventU32SenderMpsc::new(0, event_0_tx_0);
event_man.subscribe_all(all_events_listener.target_id());
@@ -810,9 +811,9 @@ mod tests {
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_1))
.expect("Triggering event 1 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_0, 1);
check_handled_event(res, event_0, 1, TEST_COMPONENT_ID_0.id());
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_1, 1);
check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_1.id());
check_next_event(event_0, &all_events_rx);
check_next_event(event_1, &all_events_rx);
}

View File

@@ -20,12 +20,12 @@
//! ```
//! use satrs::events::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo};
//!
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(1, 0);
//! const MSG_FAILED: EventU32 = EventU32::const_new(Severity::LOW, 1, 1);
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(1, 0);
//! const MSG_FAILED: EventU32 = EventU32::new(Severity::Low, 1, 1);
//!
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::const_new(2, 0);
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(2, 0);
//!
//! let small_event = EventU16::new(Severity::INFO, 3, 0);
//! let small_event = EventU16::new(Severity::Info, 3, 0);
//! ```
use core::fmt::Debug;
use core::hash::Hash;
@@ -40,12 +40,17 @@ pub type LargestEventRaw = u32;
/// Using a type definition allows to change this to u32 in the future more easily
pub type LargestGroupIdRaw = u16;
pub const MAX_GROUP_ID_U32_EVENT: u16 = 2_u16.pow(14) - 1;
pub const MAX_GROUP_ID_U16_EVENT: u16 = 2_u16.pow(6) - 1;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Severity {
INFO = 0,
LOW = 1,
MEDIUM = 2,
HIGH = 3,
Info = 0,
Low = 1,
Medium = 2,
High = 3,
}
pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
@@ -56,28 +61,28 @@ pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityInfo {}
impl HasSeverity for SeverityInfo {
const SEVERITY: Severity = Severity::INFO;
const SEVERITY: Severity = Severity::Info;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityLow {}
impl HasSeverity for SeverityLow {
const SEVERITY: Severity = Severity::LOW;
const SEVERITY: Severity = Severity::Low;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityMedium {}
impl HasSeverity for SeverityMedium {
const SEVERITY: Severity = Severity::MEDIUM;
const SEVERITY: Severity = Severity::Medium;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityHigh {}
impl HasSeverity for SeverityHigh {
const SEVERITY: Severity = Severity::HIGH;
const SEVERITY: Severity = Severity::High;
}
pub trait GenericEvent: EcssEnumeration + Copy + Clone {
@@ -99,27 +104,29 @@ impl TryFrom<u8> for Severity {
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == Severity::INFO as u8 => Ok(Severity::INFO),
x if x == Severity::LOW as u8 => Ok(Severity::LOW),
x if x == Severity::MEDIUM as u8 => Ok(Severity::MEDIUM),
x if x == Severity::HIGH as u8 => Ok(Severity::HIGH),
x if x == Severity::Info as u8 => Ok(Severity::Info),
x if x == Severity::Low as u8 => Ok(Severity::Low),
x if x == Severity::Medium as u8 => Ok(Severity::Medium),
x if x == Severity::High as u8 => Ok(Severity::High),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
struct EventBase<RAW, GID, UID> {
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
struct EventBase<Raw, GroupId, UniqueId> {
severity: Severity,
group_id: GID,
unique_id: UID,
phantom: PhantomData<RAW>,
group_id: GroupId,
unique_id: UniqueId,
phantom: PhantomData<Raw>,
}
impl<RAW: ToBeBytes, GID, UID> EventBase<RAW, GID, UID> {
impl<Raw: ToBeBytes, GroupId, UniqueId> EventBase<Raw, GroupId, UniqueId> {
fn write_to_bytes(
&self,
raw: RAW,
raw: Raw,
buf: &mut [u8],
width: usize,
) -> Result<usize, ByteConversionError> {
@@ -267,6 +274,7 @@ macro_rules! const_from_fn {
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct EventU32 {
base: EventBase<u32, u16, u16>,
}
@@ -309,12 +317,12 @@ impl EventU32 {
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID
pub fn new(
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > (2u16.pow(14) - 1) {
if group_id > MAX_GROUP_ID_U32_EVENT {
return None;
}
Some(Self {
@@ -326,12 +334,14 @@ impl EventU32 {
},
})
}
pub const fn const_new(
/// This constructor will panic if the passed group is is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > (2u16.pow(14) - 1) {
if group_id > MAX_GROUP_ID_U32_EVENT {
panic!("Group ID too large");
}
Self {
@@ -344,50 +354,16 @@ impl EventU32 {
}
}
pub fn from_be_bytes(bytes: [u8; 4]) -> Self {
Self::from(u32::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh);
}
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// Const version of [Self::new], but panics on invalid group ID input values.
pub const fn const_new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU32::const_new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::const_new(
((raw >> 16) & 0x3FFF) as u16,
(raw & 0xFFFF) as u16,
))
}
}
impl From<u32> for EventU32 {
fn from(raw: u32) -> Self {
// Severity conversion from u8 should never fail
@@ -395,15 +371,10 @@ impl From<u32> for EventU32 {
let group_id = ((raw >> 16) & 0x3FFF) as u16;
let unique_id = (raw & 0xFFFF) as u16;
// Sanitized input, should never fail
Self::const_new(severity, group_id, unique_id)
Self::new(severity, group_id, unique_id)
}
}
try_from_impls!(SeverityInfo, Severity::INFO, u32, EventU32TypedSev);
try_from_impls!(SeverityLow, Severity::LOW, u32, EventU32TypedSev);
try_from_impls!(SeverityMedium, Severity::MEDIUM, u32, EventU32TypedSev);
try_from_impls!(SeverityHigh, Severity::HIGH, u32, EventU32TypedSev);
impl UnsignedEnum for EventU32 {
fn size(&self) -> usize {
core::mem::size_of::<u32>()
@@ -424,6 +395,49 @@ impl EcssEnumeration for EventU32 {
}
}
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU32::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(
((raw >> 16) & 0x3FFF) as u16,
(raw & 0xFFFF) as u16,
))
}
}
try_from_impls!(SeverityInfo, Severity::Info, u32, EventU32TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u32, EventU32TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u32, EventU32TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u32, EventU32TypedSev);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
@@ -441,6 +455,8 @@ impl<SEVERITY: HasSeverity> EcssEnumeration for EventU32TypedSev<SEVERITY> {
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EventU16 {
base: EventBase<u16, u8, u8>,
}
@@ -475,7 +491,7 @@ impl EventU16 {
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
/// raw event ID
pub fn new(
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
@@ -493,8 +509,8 @@ impl EventU16 {
})
}
/// Const version of [Self::new], but panics on invalid group ID input values.
pub const fn const_new(
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
@@ -511,52 +527,26 @@ impl EventU16 {
},
}
}
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from(u16::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh);
}
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// Const version of [Self::new], but panics on invalid group ID input values.
pub const fn const_new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU16::const_new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
impl From<u16> for EventU16 {
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::const_new(
((raw >> 8) & 0x3F) as u8,
(raw & 0xFF) as u8,
))
let group_id = ((raw >> 8) & 0x3F) as u8;
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::new(severity, group_id, unique_id)
}
}
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
impl UnsignedEnum for EventU16 {
fn size(&self) -> usize {
core::mem::size_of::<u16>()
@@ -577,6 +567,43 @@ impl EcssEnumeration for EventU16 {
}
}
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU16::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(((raw >> 8) & 0x3F) as u8, (raw & 0xFF) as u8))
}
}
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
@@ -593,20 +620,10 @@ impl<SEVERITY: HasSeverity> EcssEnumeration for EventU16TypedSev<SEVERITY> {
});
}
impl From<u16> for EventU16 {
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
let group_id = ((raw >> 8) & 0x3F) as u8;
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::const_new(severity, group_id, unique_id)
}
}
try_from_impls!(SeverityInfo, Severity::INFO, u16, EventU16TypedSev);
try_from_impls!(SeverityLow, Severity::LOW, u16, EventU16TypedSev);
try_from_impls!(SeverityMedium, Severity::MEDIUM, u16, EventU16TypedSev);
try_from_impls!(SeverityHigh, Severity::HIGH, u16, EventU16TypedSev);
try_from_impls!(SeverityInfo, Severity::Info, u16, EventU16TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u16, EventU16TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u16, EventU16TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u16, EventU16TypedSev);
impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> {
#[inline]
@@ -647,12 +664,10 @@ mod tests {
assert_eq!(size_of::<T>(), val);
}
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(0, 0);
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::const_new(0, 0);
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> =
EventU32TypedSev::const_new(0x3FFF, 0xFFFF);
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> =
EventU16TypedSev::const_new(0x3F, 0xff);
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::new(0, 0);
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(0x3FFF, 0xFFFF);
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> = EventU16TypedSev::new(0x3F, 0xff);
/// This working is a test in itself.
const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT);
@@ -683,7 +698,7 @@ mod tests {
#[test]
fn test_normal_event_getters() {
assert_eq!(INFO_EVENT.severity(), Severity::INFO);
assert_eq!(INFO_EVENT.severity(), Severity::Info);
assert_eq!(INFO_EVENT.unique_id(), 0);
assert_eq!(INFO_EVENT.group_id(), 0);
let raw_event = INFO_EVENT.raw();
@@ -692,7 +707,7 @@ mod tests {
#[test]
fn test_small_event_getters() {
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::INFO);
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::Info);
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
let raw_event = INFO_EVENT_SMALL.raw();
@@ -701,7 +716,7 @@ mod tests {
#[test]
fn all_ones_event_regular() {
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::HIGH);
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
let raw_event = HIGH_SEV_EVENT.raw();
@@ -710,7 +725,7 @@ mod tests {
#[test]
fn all_ones_event_small() {
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::HIGH);
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
let raw_event = HIGH_SEV_EVENT_SMALL.raw();
@@ -719,18 +734,19 @@ mod tests {
#[test]
fn invalid_group_id_normal() {
assert!(EventU32TypedSev::<SeverityMedium>::new(2_u16.pow(14), 0).is_none());
assert!(EventU32TypedSev::<SeverityMedium>::new_checked(2_u16.pow(14), 0).is_none());
}
#[test]
fn invalid_group_id_small() {
assert!(EventU16TypedSev::<SeverityMedium>::new(2_u8.pow(6), 0).is_none());
assert!(EventU16TypedSev::<SeverityMedium>::new_checked(2_u8.pow(6), 0).is_none());
}
#[test]
fn regular_new() {
assert_eq!(
EventU32TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"),
EventU32TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT
);
}
@@ -738,7 +754,8 @@ mod tests {
#[test]
fn small_new() {
assert_eq!(
EventU16TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"),
EventU16TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT_SMALL
);
}
@@ -777,6 +794,8 @@ mod tests {
assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u32::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFFFFFF);
let event_read_back = EventU32::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT);
}
#[test]
@@ -785,6 +804,8 @@ mod tests {
assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u16::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFF);
let event_read_back = EventU16::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT_SMALL);
}
#[test]
@@ -815,13 +836,13 @@ mod tests {
fn severity_from_invalid_raw_val() {
let invalid = 0xFF;
assert!(Severity::try_from(invalid).is_err());
let invalid = Severity::HIGH as u8 + 1;
let invalid = Severity::High as u8 + 1;
assert!(Severity::try_from(invalid).is_err());
}
#[test]
fn reduction() {
let event = EventU32TypedSev::<SeverityInfo>::const_new(1, 1);
let event = EventU32TypedSev::<SeverityInfo>::new(1, 1);
let raw = event.raw();
let reduced: EventU32 = event.into();
assert_eq!(reduced.group_id(), 1);

View File

@@ -643,52 +643,18 @@ impl From<&str> for Params {
}
}
/// Please note while [WritableToBeBytes] is implemented for [Params], the default implementation
/// will not be able to process the [Params::Store] parameter variant.
impl WritableToBeBytes for Params {
impl WritableToBeBytes for ParamsHeapless {
fn written_len(&self) -> usize {
match self {
Params::Heapless(p) => match p {
ParamsHeapless::Raw(raw) => raw.written_len(),
ParamsHeapless::EcssEnum(enumeration) => enumeration.written_len(),
},
Params::Store(_) => 0,
#[cfg(feature = "alloc")]
Params::Vec(vec) => vec.len(),
#[cfg(feature = "alloc")]
Params::String(string) => string.len(),
ParamsHeapless::Raw(raw) => raw.written_len(),
ParamsHeapless::EcssEnum(ecss_enum) => ecss_enum.written_len(),
}
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
match self {
Params::Heapless(p) => match p {
ParamsHeapless::Raw(raw) => raw.write_to_be_bytes(buf),
ParamsHeapless::EcssEnum(enumeration) => enumeration.write_to_be_bytes(buf),
},
Params::Store(_) => Ok(0),
#[cfg(feature = "alloc")]
Params::Vec(vec) => {
if buf.len() < vec.len() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: vec.len(),
});
}
buf[0..vec.len()].copy_from_slice(vec);
Ok(vec.len())
}
#[cfg(feature = "alloc")]
Params::String(string) => {
if buf.len() < string.len() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: string.len(),
});
}
buf[0..string.len()].copy_from_slice(string.as_bytes());
Ok(string.len())
}
ParamsHeapless::Raw(raw) => raw.write_to_be_bytes(buf),
ParamsHeapless::EcssEnum(ecss_enum) => ecss_enum.write_to_be_bytes(buf),
}
}
}
@@ -837,10 +803,9 @@ mod tests {
#[test]
fn test_params_written_len_raw() {
let param_raw = ParamsRaw::from((500_u32, 1000_u32));
let param: Params = Params::Heapless(param_raw.into());
assert_eq!(param.written_len(), 8);
assert_eq!(param_raw.written_len(), 8);
let mut buf: [u8; 8] = [0; 8];
param
param_raw
.write_to_be_bytes(&mut buf)
.expect("writing to buffer failed");
assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500);
@@ -848,21 +813,28 @@ mod tests {
}
#[test]
fn test_params_written_string() {
let string = "Test String".to_string();
let param = Params::String(string.clone());
assert_eq!(param.written_len(), string.len());
let vec = param.to_vec().unwrap();
let string_conv_back = String::from_utf8(vec).expect("conversion to string failed");
assert_eq!(string_conv_back, string);
fn test_heapless_param_writable_trait_raw() {
let param_heapless = ParamsHeapless::Raw(ParamsRaw::from((500_u32, 1000_u32)));
assert_eq!(param_heapless.written_len(), 8);
let mut buf: [u8; 8] = [0; 8];
let size = param_heapless
.write_to_be_bytes(&mut buf)
.expect("writing failed");
assert_eq!(size, 8);
assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500);
assert_eq!(u32::from_be_bytes(buf[4..8].try_into().unwrap()), 1000);
}
#[test]
fn test_params_written_vec() {
let vec: Vec<u8> = alloc::vec![1, 2, 3, 4, 5];
let param = Params::Vec(vec.clone());
assert_eq!(param.written_len(), vec.len());
assert_eq!(param.to_vec().expect("writing vec params failed"), vec);
fn test_heapless_param_writable_trait_ecss_enum() {
let param_heapless = ParamsHeapless::EcssEnum(ParamsEcssEnum::U16(5.into()));
assert_eq!(param_heapless.written_len(), 2);
let mut buf: [u8; 2] = [0; 2];
let size = param_heapless
.write_to_be_bytes(&mut buf)
.expect("writing failed");
assert_eq!(size, 2);
assert_eq!(u16::from_be_bytes(buf[0..2].try_into().unwrap()), 5);
}
#[test]

View File

@@ -54,11 +54,11 @@ pub type GenericActionReplyPus = GenericMessage<ActionReplyPus>;
impl GenericActionReplyPus {
pub fn new_action_reply(
requestor_info: MessageMetadata,
replier_info: MessageMetadata,
action_id: ActionId,
reply: ActionReplyVariant,
) -> Self {
Self::new(requestor_info, ActionReplyPus::new(action_id, reply))
Self::new(replier_info, ActionReplyPus::new(action_id, reply))
}
}

View File

@@ -331,10 +331,10 @@ mod tests {
fn severity_to_subservice(severity: Severity) -> Subservice {
match severity {
Severity::INFO => Subservice::TmInfoReport,
Severity::LOW => Subservice::TmLowSeverityReport,
Severity::MEDIUM => Subservice::TmMediumSeverityReport,
Severity::HIGH => Subservice::TmHighSeverityReport,
Severity::Info => Subservice::TmInfoReport,
Severity::Low => Subservice::TmLowSeverityReport,
Severity::Medium => Subservice::TmMediumSeverityReport,
Severity::High => Subservice::TmHighSeverityReport,
}
}
@@ -347,22 +347,22 @@ mod tests {
aux_data: Option<&[u8]>,
) {
match severity {
Severity::INFO => {
Severity::Info => {
reporter
.event_info(sender, time_stamp, event, aux_data)
.expect("Error reporting info event");
}
Severity::LOW => {
Severity::Low => {
reporter
.event_low_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting low event");
}
Severity::MEDIUM => {
Severity::Medium => {
reporter
.event_medium_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting medium event");
}
Severity::HIGH => {
Severity::High => {
reporter
.event_high_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting high event");
@@ -389,7 +389,7 @@ mod tests {
if let Some(err_data) = error_data {
error_copy.extend_from_slice(err_data);
}
let event = EventU32::new(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
let event = EventU32::new_checked(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event");
report_basic_event(
&mut reporter,
@@ -407,7 +407,7 @@ mod tests {
severity_to_subservice(severity) as u8
);
assert_eq!(tm_info.common.dest_id, 0);
assert_eq!(tm_info.common.time_stamp, time_stamp_empty);
assert_eq!(tm_info.common.timestamp, time_stamp_empty);
assert_eq!(tm_info.common.msg_counter, 0);
assert_eq!(tm_info.common.apid, EXAMPLE_APID);
assert_eq!(tm_info.event, event);
@@ -417,35 +417,35 @@ mod tests {
#[test]
fn basic_info_event_generation() {
basic_event_test(4, Severity::INFO, None);
basic_event_test(4, Severity::Info, None);
}
#[test]
fn basic_low_severity_event() {
basic_event_test(4, Severity::LOW, None);
basic_event_test(4, Severity::Low, None);
}
#[test]
fn basic_medium_severity_event() {
basic_event_test(4, Severity::MEDIUM, None);
basic_event_test(4, Severity::Medium, None);
}
#[test]
fn basic_high_severity_event() {
basic_event_test(4, Severity::HIGH, None);
basic_event_test(4, Severity::High, None);
}
#[test]
fn event_with_info_string() {
let info_string = "Test Information";
basic_event_test(32, Severity::INFO, Some(info_string.as_bytes()));
basic_event_test(32, Severity::Info, Some(info_string.as_bytes()));
}
#[test]
fn low_severity_with_raw_err_data() {
let raw_err_param: i32 = -1;
let raw_err = raw_err_param.to_be_bytes();
basic_event_test(8, Severity::LOW, Some(&raw_err))
basic_event_test(8, Severity::Low, Some(&raw_err))
}
fn check_buf_too_small(
@@ -454,7 +454,7 @@ mod tests {
expected_found_len: usize,
) {
let time_stamp_empty: [u8; 7] = [0; 7];
let event = EventU32::new(Severity::INFO, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
let event = EventU32::new_checked(Severity::Info, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event");
let err = reporter.event_info(sender, &time_stamp_empty, event, None);
assert!(err.is_err());

View File

@@ -107,6 +107,7 @@ pub mod alloc_mod {
use crate::{
events::EventU16,
params::{Params, WritableToBeBytes},
pus::event::{DummyEventHook, EventTmHookProvider},
};
@@ -147,6 +148,12 @@ pub mod alloc_mod {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct EventGenerationResult {
pub event_was_enabled: bool,
pub params_were_propagated: bool,
}
pub struct PusEventTmCreatorWithMap<
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
@@ -190,28 +197,75 @@ pub mod alloc_mod {
return Ok(false);
}
match event.severity() {
Severity::INFO => self
Severity::Info => self
.reporter
.event_info(sender, time_stamp, event, params)
.map(|_| true)
.map_err(|e| e.into()),
Severity::LOW => self
Severity::Low => self
.reporter
.event_low_severity(sender, time_stamp, event, params)
.map(|_| true)
.map_err(|e| e.into()),
Severity::MEDIUM => self
Severity::Medium => self
.reporter
.event_medium_severity(sender, time_stamp, event, params)
.map(|_| true)
.map_err(|e| e.into()),
Severity::HIGH => self
Severity::High => self
.reporter
.event_high_severity(sender, time_stamp, event, params)
.map(|_| true)
.map_err(|e| e.into()),
}
}
pub fn generate_pus_event_tm_generic_with_generic_params(
&self,
sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8],
event: Event,
small_data_buf: &mut [u8],
params: Option<&Params>,
) -> Result<EventGenerationResult, EventManError> {
let mut result = EventGenerationResult {
event_was_enabled: false,
params_were_propagated: true,
};
if params.is_none() {
result.event_was_enabled =
self.generate_pus_event_tm_generic(sender, time_stamp, event, None)?;
return Ok(result);
}
let params = params.unwrap();
result.event_was_enabled = match params {
Params::Heapless(heapless_param) => {
heapless_param
.write_to_be_bytes(&mut small_data_buf[..heapless_param.written_len()])
.map_err(EcssTmtcError::ByteConversion)?;
self.generate_pus_event_tm_generic(
sender,
time_stamp,
event,
Some(small_data_buf),
)?
}
Params::Vec(vec) => {
self.generate_pus_event_tm_generic(sender, time_stamp, event, Some(vec))?
}
Params::String(string) => self.generate_pus_event_tm_generic(
sender,
time_stamp,
event,
Some(string.as_bytes()),
)?,
_ => {
result.params_were_propagated = false;
self.generate_pus_event_tm_generic(sender, time_stamp, event, None)?
}
};
Ok(result)
}
}
impl<Event: GenericEvent + Copy + PartialEq + Eq + Hash, EventTmHook: EventTmHookProvider>
@@ -261,14 +315,19 @@ pub mod alloc_mod {
}
#[cfg(test)]
mod tests {
use alloc::string::{String, ToString};
use alloc::vec;
use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::PusPacket;
use super::*;
use crate::request::UniqueApidTargetId;
use crate::{events::SeverityInfo, tmtc::PacketAsVec};
use std::sync::mpsc::{self, TryRecvError};
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::new(Severity::Low, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
const TEST_APID: u16 = 0x02;
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05);
@@ -337,4 +396,70 @@ mod tests {
assert!(event_sent);
event_rx.try_recv().expect("No info event received");
}
#[test]
fn test_event_with_generic_string_param() {
let event_man = create_basic_man_1();
let mut small_data_buf = [0; 128];
let param_data = "hello world";
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let res = event_man.generate_pus_event_tm_generic_with_generic_params(
&event_tx,
&EMPTY_STAMP,
INFO_EVENT.into(),
&mut small_data_buf,
Some(&param_data.to_string().into()),
);
assert!(res.is_ok());
let res = res.unwrap();
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
assert_eq!(u32_event, INFO_EVENT.raw());
let string_data = String::from_utf8_lossy(&tm.user_data()[4..]);
assert_eq!(string_data, param_data);
}
#[test]
fn test_event_with_generic_vec_param() {
let event_man = create_basic_man_1();
let mut small_data_buf = [0; 128];
let param_data = vec![1, 2, 3, 4];
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let res = event_man.generate_pus_event_tm_generic_with_generic_params(
&event_tx,
&EMPTY_STAMP,
INFO_EVENT.into(),
&mut small_data_buf,
Some(&param_data.clone().into()),
);
assert!(res.is_ok());
let res = res.unwrap();
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
assert_eq!(u32_event, INFO_EVENT.raw());
let vec_data = tm.user_data()[4..].to_vec();
assert_eq!(vec_data, param_data);
}
#[test]
fn test_event_with_generic_store_param_not_propagated() {
// TODO: Test this.
}
#[test]
fn test_event_with_generic_heapless_param() {
// TODO: Test this.
}
}

View File

@@ -1,7 +1,7 @@
use crate::events::EventU32;
use crate::pus::event_man::{EventRequest, EventRequestWithToken};
use crate::pus::verification::TcStateToken;
use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError};
use crate::pus::{DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError};
use crate::queue::GenericSendError;
use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::PusPacket;
@@ -10,7 +10,7 @@ use std::sync::mpsc::Sender;
use super::verification::VerificationReportingProvider;
use super::{
EcssTcInMemConverter, EcssTcReceiver, EcssTmSender, GenericConversionError,
GenericRoutingError, PusServiceHelper,
GenericRoutingError, HandlingStatus, PusServiceHelper,
};
pub struct PusEventServiceHandler<
@@ -46,13 +46,14 @@ impl<
}
}
pub fn poll_and_handle_next_tc(
pub fn poll_and_handle_next_tc<ErrorCb: FnMut(&PartialPusHandlingError)>(
&mut self,
mut error_callback: ErrorCb,
time_stamp: &[u8],
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
return Ok(HandlingStatus::Empty.into());
}
let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper
@@ -62,13 +63,13 @@ impl<
let subservice = tc.subservice();
let srv = Subservice::try_from(subservice);
if srv.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice(
return Ok(DirectPusPacketHandlerResult::CustomSubservice(
tc.subservice(),
ecss_tc_and_token.token,
));
}
let handle_enable_disable_request =
|enable: bool| -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let mut handle_enable_disable_request =
|enable: bool| -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
if tc.user_data().len() < 4 {
return Err(GenericConversionError::NotEnoughAppData {
expected: 4,
@@ -79,21 +80,20 @@ impl<
let user_data = tc.user_data();
let event_u32 =
EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
let start_token = self
.service_helper
.common
.verif_reporter
.start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
)
.map_err(|_| PartialPusHandlingError::Verification);
let partial_error = start_token.clone().err();
let mut token: TcStateToken = ecss_tc_and_token.token.into();
if let Ok(start_token) = start_token {
token = start_token.into();
match self.service_helper.common.verif_reporter.start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
) {
Ok(start_token) => {
token = start_token.into();
}
Err(e) => {
error_callback(&PartialPusHandlingError::Verification(e));
}
}
let event_req_with_token = if enable {
EventRequestWithToken {
request: EventRequest::Enable(event_u32),
@@ -112,12 +112,7 @@ impl<
GenericSendError::RxDisconnected,
))
})?;
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
Ok(HandlingStatus::HandledOne.into())
};
match srv.unwrap() {
@@ -136,14 +131,14 @@ impl<
handle_enable_disable_request(false)?;
}
Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => {
return Ok(PusPacketHandlerResult::SubserviceNotImplemented(
return Ok(DirectPusPacketHandlerResult::SubserviceNotImplemented(
subservice,
ecss_tc_and_token.token,
));
}
}
Ok(PusPacketHandlerResult::RequestHandled)
Ok(HandlingStatus::HandledOne.into())
}
}
@@ -167,7 +162,7 @@ mod tests {
use crate::pus::verification::{
RequestId, VerificationReporter, VerificationReportingProvider,
};
use crate::pus::{GenericConversionError, MpscTcReceiver};
use crate::pus::{GenericConversionError, HandlingStatus, MpscTcReceiver};
use crate::tmtc::PacketSenderWithSharedPool;
use crate::{
events::EventU32,
@@ -175,13 +170,13 @@ mod tests {
event_man::EventRequestWithToken,
tests::PusServiceHandlerWithSharedStoreCommon,
verification::{TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError,
DirectPusPacketHandlerResult, EcssTcInSharedStoreConverter, PusPacketHandlingError,
},
};
use super::PusEventServiceHandler;
const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25);
const TEST_EVENT_0: EventU32 = EventU32::new(crate::events::Severity::Info, 5, 25);
struct Pus5HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
@@ -229,9 +224,11 @@ mod tests {
}
impl SimplePusPacketHandler for Pus5HandlerWithStoreTester {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
fn handle_one_tc(
&mut self,
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
self.handler.poll_and_handle_next_tc(&time_stamp)
self.handler.poll_and_handle_next_tc(|_| {}, &time_stamp)
}
}
@@ -293,10 +290,13 @@ mod tests {
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::Empty = result {
} else {
panic!("unexpected result type {result:?}")
}
assert!(
matches!(
result,
DirectPusPacketHandlerResult::Handled(HandlingStatus::Empty)
),
"unexpected result type {result:?}"
)
}
#[test]
@@ -311,7 +311,7 @@ mod tests {
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result {
if let DirectPusPacketHandlerResult::CustomSubservice(subservice, _) = result {
assert_eq!(subservice, 200);
} else {
panic!("unexpected result type {result:?}")

View File

@@ -45,6 +45,15 @@ pub use std_mod::*;
use self::verification::VerificationReportingProvider;
/// Generic handling status for an object which is able to continuosly handle a queue to handle
/// request or replies until the queue is empty.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum HandlingStatus {
HandledOne,
Empty,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PusTmVariant<'time, 'src_data> {
InStore(PoolAddr),
@@ -649,14 +658,11 @@ pub mod alloc_mod {
#[cfg(feature = "std")]
pub mod std_mod {
use super::*;
use crate::pool::{
PoolAddr, PoolError, PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool,
};
use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{
EcssTcAndToken, EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericReceiveError,
GenericSendError, PusTmVariant, TryRecvTmtcError,
};
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use crate::ComponentId;
use alloc::vec::Vec;
@@ -920,26 +926,24 @@ pub mod std_mod {
#[error("generic timestamp generation error")]
Time(#[from] StdTimestampError),
#[error("error sending telemetry: {0}")]
TmSend(#[from] EcssTmtcError),
TmSend(EcssTmtcError),
#[error("error sending verification message")]
Verification,
Verification(EcssTmtcError),
#[error("invalid verification token")]
NoVerificationToken,
}
/// Generic result type for handlers which can process PUS packets.
#[derive(Debug, Clone)]
pub enum PusPacketHandlerResult {
RequestHandled,
RequestHandledPartialSuccess(PartialPusHandlingError),
pub enum DirectPusPacketHandlerResult {
Handled(HandlingStatus),
SubserviceNotImplemented(u8, VerificationToken<TcStateAccepted>),
CustomSubservice(u8, VerificationToken<TcStateAccepted>),
Empty,
}
impl From<PartialPusHandlingError> for PusPacketHandlerResult {
fn from(value: PartialPusHandlingError) -> Self {
Self::RequestHandledPartialSuccess(value)
impl From<HandlingStatus> for DirectPusPacketHandlerResult {
fn from(value: HandlingStatus) -> Self {
Self::Handled(value)
}
}
@@ -1222,7 +1226,7 @@ pub mod test_util {
use super::{
verification::{self, TcStateAccepted, VerificationToken},
PusPacketHandlerResult, PusPacketHandlingError,
DirectPusPacketHandlerResult, PusPacketHandlingError,
};
pub const TEST_APID: u16 = 0x101;
@@ -1246,7 +1250,8 @@ pub mod test_util {
}
pub trait SimplePusPacketHandler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
fn handle_one_tc(&mut self)
-> Result<DirectPusPacketHandlerResult, PusPacketHandlingError>;
}
}
@@ -1284,36 +1289,46 @@ pub mod tests {
pub seq_count: u16,
pub msg_counter: u16,
pub dest_id: u16,
pub time_stamp: [u8; 7],
pub timestamp: Vec<u8>,
}
impl CommonTmInfo {
pub fn new_zero_seq_count(
pub fn new(
subservice: u8,
apid: u16,
seq_count: u16,
msg_counter: u16,
dest_id: u16,
time_stamp: [u8; 7],
timestamp: &[u8],
) -> Self {
Self {
subservice,
apid,
seq_count: 0,
msg_counter: 0,
seq_count,
msg_counter,
dest_id,
time_stamp,
timestamp: timestamp.to_vec(),
}
}
pub fn new_zero_seq_count(
subservice: u8,
apid: u16,
dest_id: u16,
timestamp: &[u8],
) -> Self {
Self::new(subservice, apid, 0, 0, dest_id, timestamp)
}
pub fn new_from_tm(tm: &PusTmCreator) -> Self {
let mut time_stamp = [0; 7];
time_stamp.clone_from_slice(&tm.timestamp()[0..7]);
let mut timestamp = [0; 7];
timestamp.clone_from_slice(&tm.timestamp()[0..7]);
Self {
subservice: PusPacket::subservice(tm),
apid: tm.apid(),
seq_count: tm.seq_count(),
msg_counter: tm.msg_counter(),
dest_id: tm.dest_id(),
time_stamp,
timestamp: timestamp.to_vec(),
}
}
}

View File

@@ -1,11 +1,12 @@
use super::scheduler::PusSchedulerProvider;
use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiver,
EcssTmSender, MpscTcReceiver, PusServiceHelper,
DirectPusPacketHandlerResult, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, EcssTcReceiver, EcssTmSender, HandlingStatus, MpscTcReceiver,
PartialPusHandlingError, PusServiceHelper,
};
use crate::pool::PoolProvider;
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
use crate::pus::PusPacketHandlingError;
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use alloc::string::ToString;
use spacepackets::ecss::{scheduling, PusPacket};
@@ -64,14 +65,15 @@ impl<
&self.scheduler
}
pub fn poll_and_handle_next_tc(
pub fn poll_and_handle_next_tc<ErrorCb: FnMut(&PartialPusHandlingError)>(
&mut self,
mut error_callback: ErrorCb,
time_stamp: &[u8],
sched_tc_pool: &mut (impl PoolProvider + ?Sized),
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
return Ok(HandlingStatus::Empty.into());
}
let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper
@@ -81,34 +83,34 @@ impl<
let subservice = PusPacket::subservice(&tc);
let standard_subservice = scheduling::Subservice::try_from(subservice);
if standard_subservice.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice(
return Ok(DirectPusPacketHandlerResult::CustomSubservice(
subservice,
ecss_tc_and_token.token,
));
}
let partial_error = None;
match standard_subservice.unwrap() {
scheduling::Subservice::TcEnableScheduling => {
let start_token = self
.service_helper
.verif_reporter()
.start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
)
.expect("Error sending start success");
let opt_started_token = match self.service_helper.verif_reporter().start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
) {
Ok(started_token) => Some(started_token),
Err(e) => {
error_callback(&PartialPusHandlingError::Verification(e));
None
}
};
self.scheduler.enable();
if self.scheduler.is_enabled() {
self.service_helper
.verif_reporter()
.completion_success(
&self.service_helper.common.tm_sender,
start_token,
time_stamp,
)
.expect("Error sending completion success");
if self.scheduler.is_enabled() && opt_started_token.is_some() {
if let Err(e) = self.service_helper.verif_reporter().completion_success(
&self.service_helper.common.tm_sender,
opt_started_token.unwrap(),
time_stamp,
) {
error_callback(&PartialPusHandlingError::Verification(e));
}
} else {
return Err(PusPacketHandlingError::Other(
"failed to enabled scheduler".to_string(),
@@ -116,26 +118,27 @@ impl<
}
}
scheduling::Subservice::TcDisableScheduling => {
let start_token = self
.service_helper
.verif_reporter()
.start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
)
.expect("Error sending start success");
let opt_started_token = match self.service_helper.verif_reporter().start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
) {
Ok(started_token) => Some(started_token),
Err(e) => {
error_callback(&PartialPusHandlingError::Verification(e));
None
}
};
self.scheduler.disable();
if !self.scheduler.is_enabled() {
self.service_helper
.verif_reporter()
.completion_success(
&self.service_helper.common.tm_sender,
start_token,
time_stamp,
)
.expect("Error sending completion success");
if !self.scheduler.is_enabled() && opt_started_token.is_some() {
if let Err(e) = self.service_helper.verif_reporter().completion_success(
&self.service_helper.common.tm_sender,
opt_started_token.unwrap(),
time_stamp,
) {
error_callback(&PartialPusHandlingError::Verification(e));
}
} else {
return Err(PusPacketHandlingError::Other(
"failed to disable scheduler".to_string(),
@@ -194,18 +197,13 @@ impl<
}
_ => {
// Treat unhandled standard subservices as custom subservices for now.
return Ok(PusPacketHandlerResult::CustomSubservice(
return Ok(DirectPusPacketHandlerResult::CustomSubservice(
subservice,
ecss_tc_and_token.token,
));
}
}
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
Ok(HandlingStatus::HandledOne.into())
}
}
/// Helper type definition for a PUS 11 handler with a dynamic TMTC memory backend and regular
@@ -257,7 +255,7 @@ mod tests {
verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter,
};
use crate::pus::{MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError};
use crate::pus::{DirectPusPacketHandlerResult, MpscTcReceiver, PusPacketHandlingError};
use crate::tmtc::PacketSenderWithSharedPool;
use alloc::collections::VecDeque;
use delegate::delegate;
@@ -298,10 +296,12 @@ mod tests {
}
}
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
pub fn handle_one_tc(
&mut self,
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
self.handler
.poll_and_handle_next_tc(&time_stamp, &mut self.sched_tc_pool)
.poll_and_handle_next_tc(|_| {}, &time_stamp, &mut self.sched_tc_pool)
}
}
@@ -387,7 +387,7 @@ mod tests {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
test_harness
.handler
.poll_and_handle_next_tc(&time_stamp, &mut test_harness.sched_tc_pool)
.poll_and_handle_next_tc(|_| {}, &time_stamp, &mut test_harness.sched_tc_pool)
.unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);

View File

@@ -1,5 +1,5 @@
use crate::pus::{
PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmVariant,
DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError, PusTmVariant,
};
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
@@ -10,7 +10,7 @@ use std::sync::mpsc;
use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiver,
EcssTmSender, GenericConversionError, MpscTcReceiver, PusServiceHelper,
EcssTmSender, GenericConversionError, HandlingStatus, MpscTcReceiver, PusServiceHelper,
};
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
@@ -43,13 +43,14 @@ impl<
Self { service_helper }
}
pub fn poll_and_handle_next_tc(
pub fn poll_and_handle_next_tc<ErrorCb: FnMut(&PartialPusHandlingError)>(
&mut self,
mut error_callback: ErrorCb,
time_stamp: &[u8],
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
return Ok(HandlingStatus::Empty.into());
}
let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper
@@ -60,21 +61,16 @@ impl<
return Err(GenericConversionError::WrongService(tc.service()).into());
}
if tc.subservice() == 1 {
let mut partial_error = None;
let result = self
.service_helper
.verif_reporter()
.start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
)
.map_err(|_| PartialPusHandlingError::Verification);
let start_token = if let Ok(result) = result {
Some(result)
} else {
partial_error = Some(result.unwrap_err());
None
let opt_started_token = match self.service_helper.verif_reporter().start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
) {
Ok(token) => Some(token),
Err(e) => {
error_callback(&PartialPusHandlingError::Verification(e));
None
}
};
// Sequence count will be handled centrally in TM funnel.
// It is assumed that the verification reporter was built with a valid APID, so we use
@@ -83,42 +79,30 @@ impl<
SpHeader::new_for_unseg_tm(self.service_helper.verif_reporter().apid(), 0, 0);
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, time_stamp);
let ping_reply = PusTmCreator::new(reply_header, tc_header, &[], true);
let result = self
if let Err(e) = self
.service_helper
.common
.tm_sender
.send_tm(self.service_helper.id(), PusTmVariant::Direct(ping_reply))
.map_err(PartialPusHandlingError::TmSend);
if let Err(err) = result {
partial_error = Some(err);
{
error_callback(&PartialPusHandlingError::TmSend(e));
}
if let Some(start_token) = start_token {
if self
.service_helper
.verif_reporter()
.completion_success(
&self.service_helper.common.tm_sender,
start_token,
time_stamp,
)
.is_err()
{
partial_error = Some(PartialPusHandlingError::Verification)
if let Some(start_token) = opt_started_token {
if let Err(e) = self.service_helper.verif_reporter().completion_success(
&self.service_helper.common.tm_sender,
start_token,
time_stamp,
) {
error_callback(&PartialPusHandlingError::Verification(e));
}
}
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
};
} else {
return Ok(PusPacketHandlerResult::CustomSubservice(
return Ok(DirectPusPacketHandlerResult::CustomSubservice(
tc.subservice(),
ecss_tc_and_token.token,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
Ok(HandlingStatus::HandledOne.into())
}
}
@@ -158,8 +142,9 @@ mod tests {
};
use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{
EcssTcInSharedStoreConverter, EcssTcInVecConverter, GenericConversionError, MpscTcReceiver,
MpscTmAsVecSender, PusPacketHandlerResult, PusPacketHandlingError,
DirectPusPacketHandlerResult, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
GenericConversionError, HandlingStatus, MpscTcReceiver, MpscTmAsVecSender,
PartialPusHandlingError, PusPacketHandlingError,
};
use crate::tmtc::PacketSenderWithSharedPool;
use crate::ComponentId;
@@ -221,9 +206,12 @@ mod tests {
}
}
impl SimplePusPacketHandler for Pus17HandlerWithStoreTester {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
fn handle_one_tc(
&mut self,
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
self.handler.poll_and_handle_next_tc(&time_stamp)
self.handler
.poll_and_handle_next_tc(|_partial_error: &PartialPusHandlingError| {}, &time_stamp)
}
}
@@ -276,9 +264,12 @@ mod tests {
}
}
impl SimplePusPacketHandler for Pus17HandlerWithVecTester {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
fn handle_one_tc(
&mut self,
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
self.handler.poll_and_handle_next_tc(&time_stamp)
self.handler
.poll_and_handle_next_tc(|_partial_error: &PartialPusHandlingError| {}, &time_stamp)
}
}
@@ -328,10 +319,11 @@ mod tests {
let mut test_harness = Pus17HandlerWithStoreTester::new(0);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::Empty = result {
} else {
panic!("unexpected result type {result:?}")
match result.unwrap() {
DirectPusPacketHandlerResult::Handled(handled) => {
assert_eq!(handled, HandlingStatus::Empty);
}
_ => panic!("unexpected result"),
}
}
@@ -367,7 +359,7 @@ mod tests {
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result {
if let DirectPusPacketHandlerResult::CustomSubservice(subservice, _) = result {
assert_eq!(subservice, 200);
} else {
panic!("unexpected result type {result:?}")

View File

@@ -79,6 +79,7 @@
//! The [integration test](https://egit.irs.uni-stuttgart.de/rust/fsrc-launchpad/src/branch/main/fsrc-core/tests/verification_test.rs)
//! for the verification module contains examples how this module could be used in a more complex
//! context involving multiple threads
use crate::params::{Params, WritableToBeBytes};
use crate::pus::{source_buffer_large_enough, EcssTmSender, EcssTmtcError};
use core::fmt::{Debug, Display, Formatter};
use core::hash::{Hash, Hasher};
@@ -353,7 +354,7 @@ pub struct FailParams<'stamp, 'fargs> {
impl<'stamp, 'fargs> FailParams<'stamp, 'fargs> {
pub fn new(
time_stamp: &'stamp [u8],
failure_code: &'fargs impl EcssEnumeration,
failure_code: &'fargs dyn EcssEnumeration,
failure_data: &'fargs [u8],
) -> Self {
Self {
@@ -381,7 +382,7 @@ impl<'stamp, 'fargs> FailParamsWithStep<'stamp, 'fargs> {
pub fn new(
time_stamp: &'stamp [u8],
step: &'fargs impl EcssEnumeration,
failure_code: &'fargs impl EcssEnumeration,
failure_code: &'fargs dyn EcssEnumeration,
failure_data: &'fargs [u8],
) -> Self {
Self {
@@ -1171,26 +1172,143 @@ pub mod alloc_mod {
}
}
/*
#[cfg(feature = "std")]
pub mod std_mod {
use std::sync::mpsc;
use crate::pool::StoreAddr;
use crate::pus::verification::VerificationReporterWithSender;
use super::alloc_mod::VerificationReporterWithSharedPoolSender;
pub type VerificationReporterWithSharedPoolMpscSender =
VerificationReporterWithSharedPoolSender<mpsc::Sender<StoreAddr>>;
pub type VerificationReporterWithSharedPoolMpscBoundedSender =
VerificationReporterWithSharedPoolSender<mpsc::SyncSender<StoreAddr>>;
pub type VerificationReporterWithVecMpscSender =
VerificationReporterWithSender<mpsc::Sender<alloc::vec::Vec<u8>>>;
pub type VerificationReporterWithVecMpscBoundedSender =
VerificationReporterWithSender<mpsc::SyncSender<alloc::vec::Vec<u8>>>;
pub struct FailParamHelper<'stamp, 'fargs, 'buf, 'params> {
pub timestamp: &'stamp [u8],
pub error_code: &'fargs dyn EcssEnumeration,
pub small_data_buf: &'buf mut [u8],
pub params: Option<&'params Params>,
}
/// This helper function simplifies generating completion failures where the error data has
/// the generic [Params] type.
///
/// A small data buffer needs to be supplied for the [Params::Heapless] type because all data
/// suplied as error data must be held in a slice. Passing a static buffer avoids dynamic memory
/// allocation for this case.
///
/// Please note that this specific function can not propagate the [Params::Store] variant.
/// This function also might not be able to propagate other error variants which are added in
/// the future. The returned boolean on success denotes whether the error parameters were
/// propagated properly.
pub fn handle_completion_failure_with_generic_params<TcState: WasAtLeastAccepted + Copy>(
tm_sender: &(impl EcssTmSender + ?Sized),
verif_token: VerificationToken<TcState>,
verif_reporter: &impl VerificationReportingProvider,
helper: FailParamHelper,
) -> Result<bool, EcssTmtcError> {
let mut error_params_propagated = true;
if helper.params.is_none() {
verif_reporter.completion_failure(
tm_sender,
verif_token,
FailParams::new(helper.timestamp, helper.error_code, &[]),
)?;
return Ok(true);
}
let error_params = helper.params.unwrap();
match error_params {
Params::Heapless(heapless_param) => {
heapless_param
.write_to_be_bytes(&mut helper.small_data_buf[..heapless_param.written_len()])?;
verif_reporter.completion_failure(
tm_sender,
verif_token,
FailParams::new(
helper.timestamp,
helper.error_code,
&helper.small_data_buf[..heapless_param.written_len()],
),
)?;
}
#[cfg(feature = "alloc")]
Params::Vec(vec) => {
verif_reporter.completion_failure(
tm_sender,
verif_token,
FailParams::new(helper.timestamp, helper.error_code, vec),
)?;
}
#[cfg(feature = "alloc")]
Params::String(str) => {
verif_reporter.completion_failure(
tm_sender,
verif_token,
FailParams::new(helper.timestamp, helper.error_code, str.as_bytes()),
)?;
}
_ => {
verif_reporter.completion_failure(
tm_sender,
verif_token,
FailParams::new(helper.timestamp, helper.error_code, &[]),
)?;
error_params_propagated = false;
}
}
Ok(error_params_propagated)
}
/// This function is similar to [handle_completion_failure_with_error_as_params] but handles the
/// step failure case.
pub fn handle_step_failure_with_generic_params(
tm_sender: &(impl EcssTmSender + ?Sized),
verif_token: VerificationToken<TcStateStarted>,
verif_reporter: &impl VerificationReportingProvider,
helper: FailParamHelper,
step: &impl EcssEnumeration,
) -> Result<bool, EcssTmtcError> {
if helper.params.is_none() {
verif_reporter.step_failure(
tm_sender,
verif_token,
FailParamsWithStep::new(helper.timestamp, step, helper.error_code, &[]),
)?;
return Ok(true);
}
let error_params = helper.params.unwrap();
let mut error_params_propagated = true;
match error_params {
Params::Heapless(heapless_param) => {
heapless_param
.write_to_be_bytes(&mut helper.small_data_buf[..heapless_param.written_len()])?;
verif_reporter.step_failure(
tm_sender,
verif_token,
FailParamsWithStep::new(
helper.timestamp,
step,
helper.error_code,
&helper.small_data_buf[..heapless_param.written_len()],
),
)?;
}
#[cfg(feature = "alloc")]
Params::Vec(vec) => {
verif_reporter.step_failure(
tm_sender,
verif_token,
FailParamsWithStep::new(helper.timestamp, step, helper.error_code, vec),
)?;
}
#[cfg(feature = "alloc")]
Params::String(str) => {
verif_reporter.step_failure(
tm_sender,
verif_token,
FailParamsWithStep::new(helper.timestamp, step, helper.error_code, str.as_bytes()),
)?;
}
_ => {
verif_reporter.step_failure(
tm_sender,
verif_token,
FailParamsWithStep::new(helper.timestamp, step, helper.error_code, &[]),
)?;
error_params_propagated = false;
}
}
Ok(error_params_propagated)
}
*/
#[cfg(any(feature = "test_util", test))]
pub mod test_util {
@@ -1566,73 +1684,19 @@ pub mod test_util {
.pop_front()
.expect("report queue is empty")
}
/*
pub fn verification_info(&self, req_id: &RequestId) -> Option<VerificationStatus> {
let verif_map = self.verification_map.lock().unwrap();
let value = verif_map.borrow().get(req_id).cloned();
value
}
pub fn check_started(&self, req_id: &RequestId) -> bool {
let verif_map = self.verification_map.lock().unwrap();
if let Some(entry) = verif_map.borrow().get(req_id) {
return entry.started.unwrap_or(false);
}
false
}
fn generic_completion_checks(
entry: &VerificationStatus,
step: Option<u16>,
completion_success: bool,
) {
assert!(entry.accepted.unwrap());
assert!(entry.started.unwrap());
if let Some(step) = step {
assert!(entry.step_status.unwrap());
assert_eq!(entry.step, step);
} else {
assert!(entry.step_status.is_none());
}
assert_eq!(entry.completed.unwrap(), completion_success);
}
pub fn assert_completion_failure(
&self,
req_id: &RequestId,
step: Option<u16>,
error_code: u64,
) {
let verif_map = self.verification_map.lock().unwrap();
if let Some(entry) = verif_map.borrow().get(req_id) {
Self::generic_completion_checks(entry, step, false);
assert_eq!(entry.fail_enum.unwrap(), error_code);
return;
}
panic!("request not in verification map");
}
pub fn completion_status(&self, req_id: &RequestId) -> Option<bool> {
let verif_map = self.verification_map.lock().unwrap();
if let Some(entry) = verif_map.borrow().get(req_id) {
return entry.completed;
}
panic!("request not in verification map");
}
*/
}
}
#[cfg(test)]
pub mod tests {
use crate::params::Params;
use crate::pool::{SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig};
use crate::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0};
use crate::pus::tests::CommonTmInfo;
use crate::pus::verification::{
EcssTmSender, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone,
VerificationReporter, VerificationReporterCfg, VerificationToken,
handle_step_failure_with_generic_params, EcssTmSender, EcssTmtcError, FailParams,
FailParamsWithStep, RequestId, TcStateNone, VerificationReporter, VerificationReporterCfg,
VerificationToken,
};
use crate::pus::{ChannelWithId, PusTmVariant};
use crate::request::MessageMetadata;
@@ -1640,6 +1704,7 @@ pub mod tests {
use crate::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
use crate::ComponentId;
use alloc::format;
use alloc::string::ToString;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
use spacepackets::ecss::{
EcssEnumU16, EcssEnumU32, EcssEnumU8, EcssEnumeration, PusError, PusPacket,
@@ -1654,8 +1719,9 @@ pub mod tests {
use std::vec::Vec;
use super::{
DummyVerificationHook, SeqCountProviderSimple, TcStateAccepted, TcStateStarted,
VerificationHookProvider, VerificationReportingProvider, WasAtLeastAccepted,
handle_completion_failure_with_generic_params, DummyVerificationHook, FailParamHelper,
SeqCountProviderSimple, TcStateAccepted, TcStateStarted, VerificationHookProvider,
VerificationReportingProvider, WasAtLeastAccepted,
};
fn is_send<T: Send>(_: &T) {}
@@ -1663,6 +1729,7 @@ pub mod tests {
fn is_sync<T: Sync>(_: &T) {}
const EMPTY_STAMP: [u8; 7] = [0; 7];
const DUMMY_STAMP: &[u8] = &[0, 1, 0, 1, 0, 1, 0];
#[derive(Debug, Eq, PartialEq, Clone)]
struct TmInfo {
@@ -1740,8 +1807,8 @@ pub mod tests {
tc: Vec<u8>,
}
fn base_reporter(id: ComponentId) -> VerificationReporter {
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
fn base_reporter(id: ComponentId, max_fail_data_len: usize) -> VerificationReporter {
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, max_fail_data_len).unwrap();
VerificationReporter::new(id, &cfg)
}
@@ -1844,66 +1911,57 @@ pub mod tests {
.completion_failure(&self.sender, token, params)
}
fn completion_success_check(&mut self, incrementing_couters: bool) {
assert_eq!(self.sender.service_queue.borrow().len(), 3);
let mut current_seq_count = 0;
fn check_acceptance_success(&self, timestamp: &[u8; 7]) {
let cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo {
subservice: 1,
apid: TEST_APID,
seq_count: current_seq_count,
msg_counter: current_seq_count,
dest_id: self.reporter.dest_id(),
time_stamp: EMPTY_STAMP,
},
common: CommonTmInfo::new(1, TEST_APID, 0, 0, self.reporter.dest_id(), timestamp),
additional_data: None,
};
let mut info = self.sender.service_queue.borrow_mut().pop_front().unwrap();
let mut service_queue = self.sender.service_queue.borrow_mut();
assert!(service_queue.len() >= 1);
let info = service_queue.pop_front().unwrap();
assert_eq!(info, cmp_info);
}
if incrementing_couters {
current_seq_count += 1;
}
fn check_start_success(&mut self, seq_count: u16, msg_counter: u16, timestamp: &[u8]) {
let mut srv_queue = self.sender.service_queue.borrow_mut();
let cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo {
subservice: 3,
apid: TEST_APID,
msg_counter: current_seq_count,
seq_count: current_seq_count,
dest_id: self.reporter.dest_id(),
time_stamp: [0, 1, 0, 1, 0, 1, 0],
},
common: CommonTmInfo::new(
3,
TEST_APID,
seq_count,
msg_counter,
self.reporter.dest_id(),
timestamp,
),
additional_data: None,
};
info = self.sender.service_queue.borrow_mut().pop_front().unwrap();
let info = srv_queue.pop_front().unwrap();
assert_eq!(info, cmp_info);
}
if incrementing_couters {
current_seq_count += 1;
}
fn check_completion_success(&mut self, seq_count: u16, msg_counter: u16) {
let cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo {
subservice: 7,
apid: TEST_APID,
msg_counter: current_seq_count,
seq_count: current_seq_count,
dest_id: self.reporter.dest_id(),
time_stamp: EMPTY_STAMP,
},
common: CommonTmInfo::new(
7,
TEST_APID,
seq_count,
msg_counter,
self.reporter.dest_id(),
&EMPTY_STAMP,
),
additional_data: None,
};
info = self.sender.service_queue.borrow_mut().pop_front().unwrap();
let info = self.sender.service_queue.borrow_mut().pop_front().unwrap();
assert_eq!(info, cmp_info);
}
}
impl VerificationReporterTestbench<DummyVerificationHook> {
fn new(id: ComponentId, tc: PusTcCreator) -> Self {
let reporter = base_reporter(id);
fn new(id: ComponentId, tc: PusTcCreator, max_fail_data_len: usize) -> Self {
let reporter = base_reporter(id, max_fail_data_len);
Self {
id,
sender: TestSender::default(),
@@ -1913,36 +1971,10 @@ pub mod tests {
}
}
fn acceptance_check(&self, time_stamp: &[u8; 7]) {
fn check_acceptance_failure(&mut self, timestamp: &[u8; 7]) {
let cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo {
subservice: 1,
apid: TEST_APID,
seq_count: 0,
msg_counter: 0,
dest_id: self.reporter.dest_id(),
time_stamp: *time_stamp,
},
additional_data: None,
};
let mut service_queue = self.sender.service_queue.borrow_mut();
assert_eq!(service_queue.len(), 1);
let info = service_queue.pop_front().unwrap();
assert_eq!(info, cmp_info);
}
fn acceptance_fail_check(&mut self, stamp_buf: [u8; 7]) {
let cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo {
subservice: 2,
seq_count: 0,
apid: TEST_APID,
msg_counter: 0,
dest_id: self.reporter.dest_id(),
time_stamp: stamp_buf,
},
common: CommonTmInfo::new(2, TEST_APID, 0, 0, self.reporter.dest_id(), timestamp),
additional_data: Some([0, 2].to_vec()),
};
let service_queue = self.sender.service_queue.get_mut();
@@ -1951,12 +1983,12 @@ pub mod tests {
assert_eq!(info, cmp_info);
}
fn start_fail_check(&mut self, fail_data_raw: [u8; 4]) {
fn check_start_failure(&mut self, fail_data_raw: [u8; 4]) {
let mut srv_queue = self.sender.service_queue.borrow_mut();
assert_eq!(srv_queue.len(), 2);
let mut cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(1, TEST_APID, 0, EMPTY_STAMP),
common: CommonTmInfo::new_zero_seq_count(1, TEST_APID, 0, &EMPTY_STAMP),
additional_data: None,
};
let mut info = srv_queue.pop_front().unwrap();
@@ -1964,148 +1996,67 @@ pub mod tests {
cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(4, TEST_APID, 0, EMPTY_STAMP),
common: CommonTmInfo::new_zero_seq_count(4, TEST_APID, 0, &EMPTY_STAMP),
additional_data: Some([&[22], fail_data_raw.as_slice()].concat().to_vec()),
};
info = srv_queue.pop_front().unwrap();
assert_eq!(info, cmp_info);
}
fn step_success_check(&mut self, time_stamp: &[u8; 7]) {
let mut cmp_info = TmInfo {
fn check_step_success(&mut self, step: u8, timestamp: &[u8; 7]) {
let cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(1, TEST_APID, 0, *time_stamp),
additional_data: None,
common: CommonTmInfo::new_zero_seq_count(5, TEST_APID, 0, timestamp),
additional_data: Some([step].to_vec()),
};
let mut srv_queue = self.sender.service_queue.borrow_mut();
let mut info = srv_queue.pop_front().unwrap();
assert_eq!(info, cmp_info);
cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(3, TEST_APID, 0, *time_stamp),
additional_data: None,
};
info = srv_queue.pop_front().unwrap();
assert_eq!(info, cmp_info);
cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(5, TEST_APID, 0, *time_stamp),
additional_data: Some([0].to_vec()),
};
info = srv_queue.pop_front().unwrap();
assert_eq!(info, cmp_info);
cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(5, TEST_APID, 0, *time_stamp),
additional_data: Some([1].to_vec()),
};
info = srv_queue.pop_front().unwrap();
let info = srv_queue.pop_front().unwrap();
assert_eq!(info, cmp_info);
}
fn check_step_failure(&mut self, fail_data_raw: [u8; 4]) {
assert_eq!(self.sender.service_queue.borrow().len(), 4);
let mut cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(
1,
TEST_APID,
self.reporter.dest_id(),
EMPTY_STAMP,
),
additional_data: None,
};
let mut info = self.sender.service_queue.borrow_mut().pop_front().unwrap();
assert_eq!(info, cmp_info);
cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(
3,
TEST_APID,
self.reporter.dest_id(),
[0, 1, 0, 1, 0, 1, 0],
),
additional_data: None,
};
info = self.sender.service_queue.borrow_mut().pop_front().unwrap();
assert_eq!(info, cmp_info);
cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(
5,
TEST_APID,
self.reporter.dest_id(),
EMPTY_STAMP,
),
additional_data: Some([0].to_vec()),
};
info = self.sender.service_queue.get_mut().pop_front().unwrap();
assert_eq!(info, cmp_info);
cmp_info = TmInfo {
fn check_step_failure(
&mut self,
step: &impl EcssEnumeration,
error_code: &impl EcssEnumeration,
fail_data: &[u8],
) {
let mut additional_data = Vec::new();
additional_data.extend(step.to_vec());
additional_data.extend(error_code.to_vec());
additional_data.extend(fail_data);
let cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(
6,
TEST_APID,
self.reporter.dest_id(),
EMPTY_STAMP,
),
additional_data: Some(
[
[1].as_slice(),
&[0, 0, 0x10, 0x20],
fail_data_raw.as_slice(),
]
.concat()
.to_vec(),
&EMPTY_STAMP,
),
additional_data: Some(additional_data),
};
info = self.sender.service_queue.get_mut().pop_front().unwrap();
let info = self.sender.service_queue.get_mut().pop_front().unwrap();
assert_eq!(info, cmp_info);
}
fn completion_fail_check(&mut self) {
assert_eq!(self.sender.service_queue.borrow().len(), 3);
let mut cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(
1,
TEST_APID,
self.reporter.dest_id(),
EMPTY_STAMP,
),
additional_data: None,
};
let mut info = self.sender.service_queue.get_mut().pop_front().unwrap();
assert_eq!(info, cmp_info);
cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(
3,
TEST_APID,
self.reporter.dest_id(),
[0, 1, 0, 1, 0, 1, 0],
),
additional_data: None,
};
info = self.sender.service_queue.get_mut().pop_front().unwrap();
assert_eq!(info, cmp_info);
cmp_info = TmInfo {
fn check_completion_failure(
&mut self,
error_code: &impl EcssEnumeration,
fail_data: &[u8],
) {
let mut additional_data = Vec::new();
additional_data.extend(error_code.to_vec());
additional_data.extend(fail_data);
let cmp_info = TmInfo {
requestor: MessageMetadata::new(self.request_id.into(), self.id),
common: CommonTmInfo::new_zero_seq_count(
8,
TEST_APID,
self.reporter.dest_id(),
EMPTY_STAMP,
&EMPTY_STAMP,
),
additional_data: Some([0, 0, 0x10, 0x20].to_vec()),
additional_data: Some(additional_data),
};
info = self.sender.service_queue.get_mut().pop_front().unwrap();
let info = self.sender.service_queue.get_mut().pop_front().unwrap();
assert_eq!(info, cmp_info);
}
}
@@ -2128,7 +2079,7 @@ pub mod tests {
#[test]
fn test_state() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
assert_eq!(testbench.reporter.apid(), TEST_APID);
testbench.reporter.set_apid(TEST_APID + 1);
assert_eq!(testbench.reporter.apid(), TEST_APID + 1);
@@ -2136,43 +2087,43 @@ pub mod tests {
#[test]
fn test_basic_acceptance_success() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("sending acceptance success failed");
testbench.acceptance_check(&EMPTY_STAMP);
testbench.check_acceptance_success(&EMPTY_STAMP);
}
#[test]
fn test_basic_acceptance_failure() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let init_token = testbench.init();
let stamp_buf = [1, 2, 3, 4, 5, 6, 7];
let timestamp = [1, 2, 3, 4, 5, 6, 7];
let fail_code = EcssEnumU16::new(2);
let fail_params = FailParams::new_no_fail_data(stamp_buf.as_slice(), &fail_code);
let fail_params = FailParams::new_no_fail_data(timestamp.as_slice(), &fail_code);
testbench
.acceptance_failure(init_token, fail_params)
.expect("sending acceptance failure failed");
testbench.acceptance_fail_check(stamp_buf);
testbench.check_acceptance_failure(&timestamp);
}
#[test]
fn test_basic_acceptance_failure_with_helper() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let init_token = testbench.init();
let stamp_buf = [1, 2, 3, 4, 5, 6, 7];
let timestamp = [1, 2, 3, 4, 5, 6, 7];
let fail_code = EcssEnumU16::new(2);
let fail_params = FailParams::new_no_fail_data(stamp_buf.as_slice(), &fail_code);
let fail_params = FailParams::new_no_fail_data(timestamp.as_slice(), &fail_code);
testbench
.acceptance_failure(init_token, fail_params)
.expect("sending acceptance failure failed");
testbench.acceptance_fail_check(stamp_buf);
testbench.check_acceptance_failure(&timestamp);
}
#[test]
fn test_acceptance_fail_data_too_large() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 8);
let init_token = testbench.init();
let stamp_buf = [1, 2, 3, 4, 5, 6, 7];
let fail_code = EcssEnumU16::new(2);
@@ -2204,7 +2155,7 @@ pub mod tests {
#[test]
fn test_basic_acceptance_failure_with_fail_data() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let fail_code = EcssEnumU8::new(10);
let fail_data = EcssEnumU32::new(12);
let mut fail_data_raw = [0; 4];
@@ -2216,7 +2167,7 @@ pub mod tests {
.expect("sending acceptance failure failed");
let cmp_info = TmInfo {
requestor: MessageMetadata::new(testbench.request_id.into(), testbench.id),
common: CommonTmInfo::new_zero_seq_count(2, TEST_APID, 0, EMPTY_STAMP),
common: CommonTmInfo::new_zero_seq_count(2, TEST_APID, 0, &EMPTY_STAMP),
additional_data: Some([10, 0, 0, 0, 12].to_vec()),
};
let mut service_queue = testbench.sender.service_queue.borrow_mut();
@@ -2227,7 +2178,7 @@ pub mod tests {
#[test]
fn test_start_failure() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let init_token = testbench.init();
let fail_code = EcssEnumU8::new(22);
let fail_data: i32 = -12;
@@ -2241,12 +2192,12 @@ pub mod tests {
testbench
.start_failure(accepted_token, fail_params)
.expect("Start failure failure");
testbench.start_fail_check(fail_data_raw);
testbench.check_start_failure(fail_data_raw);
}
#[test]
fn test_start_failure_with_helper() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
let fail_code = EcssEnumU8::new(22);
let fail_data: i32 = -12;
@@ -2260,12 +2211,12 @@ pub mod tests {
testbench
.start_failure(accepted_token, fail_params)
.expect("start failure failed");
testbench.start_fail_check(fail_data_raw);
testbench.check_start_failure(fail_data_raw);
}
#[test]
fn test_steps_success() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
let accepted_token = testbench
.acceptance_success(token, &EMPTY_STAMP)
@@ -2280,12 +2231,15 @@ pub mod tests {
.step_success(&started_token, &EMPTY_STAMP, EcssEnumU8::new(1))
.expect("step 1 failed");
assert_eq!(testbench.sender.service_queue.borrow().len(), 4);
testbench.step_success_check(&EMPTY_STAMP);
testbench.check_acceptance_success(&EMPTY_STAMP);
testbench.check_start_success(0, 0, &EMPTY_STAMP);
testbench.check_step_success(0, &EMPTY_STAMP);
testbench.check_step_success(1, &EMPTY_STAMP);
}
#[test]
fn test_step_failure() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
let fail_code = EcssEnumU32::new(0x1020);
let fail_data: f32 = -22.3232;
@@ -2303,7 +2257,7 @@ pub mod tests {
.acceptance_success(token, &EMPTY_STAMP)
.expect("Sending acceptance success failed");
let started_token = testbench
.start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0])
.start_success(accepted_token, DUMMY_STAMP)
.expect("Sending start success failed");
testbench
.step_success(&started_token, &EMPTY_STAMP, EcssEnumU8::new(0))
@@ -2311,12 +2265,15 @@ pub mod tests {
testbench
.step_failure(started_token, fail_params)
.expect("Step failure failed");
testbench.check_step_failure(fail_data_raw);
testbench.check_acceptance_success(&EMPTY_STAMP);
testbench.check_start_success(0, 0, DUMMY_STAMP);
testbench.check_step_success(0, &EMPTY_STAMP);
testbench.check_step_failure(&fail_step, &fail_code, &fail_data_raw);
}
#[test]
fn test_completion_failure() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping());
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 16);
let token = testbench.init();
let fail_code = EcssEnumU32::new(0x1020);
let fail_params = FailParams::new_no_fail_data(&EMPTY_STAMP, &fail_code);
@@ -2325,29 +2282,34 @@ pub mod tests {
.acceptance_success(token, &EMPTY_STAMP)
.expect("Sending acceptance success failed");
let started_token = testbench
.start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0])
.start_success(accepted_token, DUMMY_STAMP)
.expect("Sending start success failed");
testbench
.completion_failure(started_token, fail_params)
.expect("Completion failure");
testbench.completion_fail_check();
testbench.check_acceptance_success(&EMPTY_STAMP);
testbench.check_start_success(0, 0, DUMMY_STAMP);
testbench.check_completion_failure(&fail_code, &[]);
}
#[test]
fn test_complete_success_sequence() {
let mut testbench =
VerificationReporterTestbench::new(TEST_COMPONENT_ID_0.id(), create_generic_ping());
VerificationReporterTestbench::new(TEST_COMPONENT_ID_0.id(), create_generic_ping(), 16);
let token = testbench.init();
let accepted_token = testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("Sending acceptance success failed");
let started_token = testbench
.start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0])
.start_success(accepted_token, DUMMY_STAMP)
.expect("Sending start success failed");
testbench
.completion_success(started_token, &EMPTY_STAMP)
.expect("Sending completion success failed");
testbench.completion_success_check(false);
testbench.check_acceptance_success(&EMPTY_STAMP);
testbench.check_start_success(0, 0, DUMMY_STAMP);
testbench.check_completion_success(0, 0);
}
#[test]
@@ -2367,6 +2329,83 @@ pub mod tests {
testbench
.completion_success(started_token, &EMPTY_STAMP)
.expect("Sending completion success failed");
testbench.completion_success_check(true);
testbench.check_acceptance_success(&EMPTY_STAMP);
testbench.check_start_success(1, 1, DUMMY_STAMP);
testbench.check_completion_success(2, 2);
}
#[test]
fn test_completion_failure_helper_string_param() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 32);
let token = testbench.init();
let accepted_token = testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("Sending acceptance success failed");
let mut small_data_buf: [u8; 16] = [0; 16];
let fail_code = EcssEnumU8::new(1);
let fail_data = "error 404 oh no".to_string();
let fail_params = Params::String(fail_data.clone());
let result = handle_completion_failure_with_generic_params(
&testbench.sender,
accepted_token,
&testbench.reporter,
FailParamHelper {
timestamp: &EMPTY_STAMP,
error_code: &fail_code,
small_data_buf: &mut small_data_buf,
params: Some(&fail_params),
},
);
assert!(result.unwrap());
testbench.check_acceptance_success(&EMPTY_STAMP);
testbench.check_completion_failure(&fail_code, fail_data.as_bytes());
}
#[test]
fn test_step_failure_helper_string_param() {
let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping(), 32);
let token = testbench.init();
let accepted_token = testbench
.acceptance_success(token, &EMPTY_STAMP)
.expect("Sending acceptance success failed");
let started_token = testbench
.start_success(accepted_token, &EMPTY_STAMP)
.expect("Sending start success failed");
let mut small_data_buf: [u8; 16] = [0; 16];
let step = EcssEnumU8::new(2);
let fail_code = EcssEnumU8::new(1);
let fail_data = "AAAAAAAAAAAHHHHHH".to_string();
let fail_params = Params::String(fail_data.clone());
let result = handle_step_failure_with_generic_params(
&testbench.sender,
started_token,
&testbench.reporter,
FailParamHelper {
timestamp: &EMPTY_STAMP,
error_code: &fail_code,
small_data_buf: &mut small_data_buf,
params: Some(&fail_params),
},
&step,
);
assert!(result.unwrap());
testbench.check_acceptance_success(&EMPTY_STAMP);
testbench.check_start_success(0, 0, &EMPTY_STAMP);
testbench.check_step_failure(&step, &fail_code, fail_data.as_bytes());
}
#[test]
fn test_completion_failure_helper_vec_param() {
// TODO: Test this.
}
#[test]
fn test_completion_failure_helper_raw_param() {
// TODO: Test this.
}
#[test]
fn test_completion_failure_helper_store_param_ignored() {
// TODO: Test this.
}
}

View File

@@ -10,7 +10,7 @@ pub use std_mod::*;
use spacepackets::{
ecss::{tc::IsPusTelecommand, PusPacket},
ByteConversionError, CcsdsPacket,
ByteConversionError,
};
use crate::{queue::GenericTargetedMessagingError, ComponentId};
@@ -47,7 +47,7 @@ impl UniqueApidTargetId {
/// This function attempts to build the ID from a PUS telecommand by extracting the APID
/// and the first four bytes of the application data field as the target field.
pub fn from_pus_tc(
tc: &(impl CcsdsPacket + PusPacket + IsPusTelecommand),
tc: &(impl PusPacket + IsPusTelecommand),
) -> Result<Self, ByteConversionError> {
if tc.user_data().len() < 4 {
return Err(ByteConversionError::FromSliceTooSmall {

View File

@@ -21,7 +21,7 @@ struct EventIntrospection {
}
//#[event(descr="This is some info event")]
const INFO_EVENT_0: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(0, 0);
const INFO_EVENT_0: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
const INFO_EVENT_0_ERASED: EventU32 = EventU32::const_from_info(INFO_EVENT_0);
// This is ideally auto-generated
@@ -36,7 +36,7 @@ const INFO_EVENT_0_INTROSPECTION: EventIntrospection = EventIntrospection {
};
//#[event(descr="This is some low severity event")]
const SOME_LOW_SEV_EVENT: EventU32TypedSev<SeverityLow> = EventU32TypedSev::const_new(0, 12);
const SOME_LOW_SEV_EVENT: EventU32TypedSev<SeverityLow> = EventU32TypedSev::new(0, 12);
//const EVENT_LIST: [&'static Event; 2] = [&INFO_EVENT_0, &SOME_LOW_SEV_EVENT];
@@ -47,7 +47,7 @@ const TEST_GROUP_NAME_NAME: &str = "TEST_GROUP_NAME";
//#[event(desc="Some medium severity event")]
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP: EventU32TypedSev<SeverityMedium> =
EventU32TypedSev::const_new(TEST_GROUP_NAME, 0);
EventU32TypedSev::new(TEST_GROUP_NAME, 0);
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_REDUCED: EventU32 =
EventU32::const_from_medium(MEDIUM_SEV_EVENT_IN_OTHER_GROUP);
@@ -62,7 +62,7 @@ const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_INTROSPECTION: EventIntrospection = EventI
info: "Some medium severity event",
};
const CONST_SLICE: &'static [u8] = &[0, 1, 2, 3];
const CONST_SLICE: &[u8] = &[0, 1, 2, 3];
const INTROSPECTION_FOR_TEST_GROUP_0: [&EventIntrospection; 2] =
[&INFO_EVENT_0_INTROSPECTION, &INFO_EVENT_0_INTROSPECTION];

View File

@@ -14,9 +14,8 @@ use spacepackets::ecss::{PusError, PusPacket};
use std::sync::mpsc::{self, SendError, TryRecvError};
use std::thread;
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::new(Severity::Low, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
const TEST_APID: u16 = 0x02;
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05);