5 Commits

Author SHA1 Message Date
2756670efe cargo fmt
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2022-12-19 17:01:56 +01:00
eb5ace0658 Merge branch 'main' into all_features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2022-12-19 16:36:33 +01:00
b9cd08cefe Merge branch 'main' into all_features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2022-12-19 11:03:46 +01:00
0073db95f9 Merge branch 'add_cuc_time_impl' into all_features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2022-12-19 00:01:28 +01:00
251fcdd6c8 Merge branch 'add_cuc_time_impl' into all_features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2022-12-18 16:23:25 +01:00
34 changed files with 1729 additions and 9412 deletions

View File

@ -20,21 +20,6 @@ jobs:
command: check
args: --release
msrv:
name: Check with MSRV
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.61.0
override: true
profile: minimal
- uses: actions-rs/cargo@v1
with:
command: check
args: --release
cross-check:
name: Check Cross
runs-on: ubuntu-latest
@ -73,21 +58,6 @@ jobs:
command: fmt
args: --all -- --check
check-doc:
name: Check Documentation Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
profile: minimal
- uses: actions-rs/cargo@v1
with:
command: doc
args: --all-features
clippy:
name: Clippy
runs-on: ubuntu-latest

7
.gitignore vendored
View File

@ -1,7 +0,0 @@
# Rust
/target
/Cargo.lock
# CLion
/.idea/*
!/.idea/runConfigurations

View File

@ -1,19 +0,0 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Check" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="check --all-features" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="emulateTerminal" value="false" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="true" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />
<option name="redirectInputPath" value="" />
<method v="2">
<option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
</method>
</configuration>
</component>

View File

@ -1,19 +0,0 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Test" type="CargoCommandRunConfiguration" factoryName="Cargo Command" nameIsGenerated="true">
<option name="command" value="test" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="emulateTerminal" value="false" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="true" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />
<option name="redirectInputPath" value="" />
<method v="2">
<option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
</method>
</configuration>
</component>

View File

@ -8,210 +8,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.7.0-beta.0] 2023-08-16
- Moved MSRV from v1.60 to v1.61.
## Changed
- `PusPacket` trait: `user_data` now returns `&[u8]` instead of `Option<&[u8]>`. Empty user data
can simply be an empty slice.
- Moved ECSS TC components from `tc` to `ecss.tc`.
- Moved ECSS TM components from `tm` to `ecss.tm`.
- Converted `PusTc` class to more specialized `PusTcCreator` and `PusTcReader`
classes. The old `PusTc` class is deprecated now.
- Converted `PusTm` class to more specialized `PusTmCreator` and `PusTmReader`
classes. The old `PusTm` class is deprecated now.
- Implement `Display` and `Error` for `StdTimestampError` properly.
- Remove some redundant `Error` suffixes for enum error variants.
- `CommonPduConfig`: `new_with_defaults` replaced by `new_with_byte_fields`.
## Added
- `source_data` and `app_data` API provided for PUS TM and PUS TC reader classes. These simply
call `user_data` but are also in line with the PUS packet standard names for those fields.
- Added new marker trait `IsPusTelemetry` implemented by `PusTmCreator` and `PusTmReader`.
- Added new marker trait `IsPusTelecommand` implemented by `PusTcCreator` and `PusTcReader`.
- `metadata_param` getter method for the `MetadataPdu` object.
- `Default` impl for CFDP `ChecksumType`
- `Default` impl for CFDP `CommonPduConfig`
## Fixed
- All `MetadataGenericParam` fields are now public.
- New setter method `set_source_and_dest_id` for `CommonPduConfig`.
# [v0.6.0] 2023-07-06
## Added
- Added new `util` module which contains the following (new) helper modules:
- `UnsignedEnum` trait as an abstraction for unsigned byte fields with variable lengths. It is
not tied to the ECSS PFC value like the `EcssEnumeration` trait. The method to retrieve
the size of the unsigned enumeration in bytes is now called `size`.
- `GenericUnsignedByteField<TYPE>` and helper typedefs `UnsignedU8`, `UnsignedU16`, `UnsignedU32`
and `UnsignedU64` as helper types implementing `UnsignedEnum`
- `UnsignedByteField` as a type-erased helper.
- Initial CFDP support: Added PDU packet implementation.
- Added `SerializablePusPacket` as a generic abstraction for PUS packets which are
writable.
- Added new `PusTmZeroCopyWriter` class which allows to set fields on a raw TM packet,
which might be more efficient that modification and re-writing a packet with the
`PusTm` object.
## Changed
- The `EcssEnumeration` now requires the `UnsignedEnum` trait and only adds the `pfc` method to it.
- Renamed `byte_width` usages to `size` (part of new `UnsignedEnum` trait)
- Moved `ecss::CRC_CCITT_FALSE` CRC constant to the root module. This CRC type is not just used by
the PUS standard, but by the CCSDS Telecommand standard and the CFDP standard as well.
# [v0.5.4] 2023-02-12
## Added
- `Clone` trait requirement for `time::cds::ProvidesDaysLen` trait.
- Added `Copy` and `Clone` derives for `DaysLen16Bits` and `DaysLen24Bits`.
# [v0.5.3] 2023-02-05
## Added
- `num_enum` dependency to avoid boilerplate code for primtive to enum conversions, for example
for the PUS subservices.
- `ecss.event` module containing a `Subservice` enum.
- `ecss.verification` module containing a `Subservice` enum.
- `ecss.scheduling` module containing a `Subservice` enum and some other helper enumerations.
- `ecss.hk` module containing a `Subservice` enum.
## Changed
- Added missing Service IDs to `ecss.PusServiceId` and marked in `#[non_exhaustive]`.
## Fixed
- `time.UnixTimestamp`: All constructors and `From` conversions now use the `new` constructor,
which should cause a correct conversion of 0 subsecond milliseconds to a `None` value.
# [v0.5.2] 2023-01-26
## Added
- Added `.gitignore`.
## Fixed
- Correct implementation of Trait `PartialEq` for `PusTc` and `PusTm`. The previous auto-derivation
were incorrect because they also compared fields unrelated to the raw byte representation.
## Changed
- Renamed `PusTc` `raw` method to `raw_bytes` and add better docs to avoid confusion.
Deprecate `raw` to avoid breaking change.
- Added `raw_bytes` method to `PusTm`.
# [v0.5.1] 2023-01-22
## Added
- `time::cds::TimeProvider`
- Add `Ord` and `PartialOrd`, use custom `PartialEq` impl to account for precision correctly.
- Add `precision_as_ns` function which converts microsecond and picosecond precision values
into nanoseconds.
- Add conversion trait to convert `cds::TimeProvider<DaysLen16Bits>` into
`cds::TimeProvider<DaysLen24Bits>` and vice-versa.
- `time::UnixTimestamp`
- Add `Ord` and `PartialOrd` implementations.
- Add `Add<Duration>` and `AddAssign<Duration>` implementations.
## Fixed
- `time::cds::TimeProvider`: Fixed a bug where subsecond milliseconds were not accounted for
when the provider has no submillisecond precision.
# [v0.5.0] 2023-01-20
The timestamp of `PusTm` is now optional. See Added and Changed section for details.
## Added
- `PusTmSecondaryHeader`: New `new_simple_no_timestamp` API to create secondary header without
timestamp.
- `PusTm`: Add `new_simple_no_timestamp` method to create TM without timestamp
- New `UnixTimestamp` abstraction which contains the unix seconds as an `i64`
and an optional subsecond millisecond counter (`u16`)
- `MS_PER_DAY` constant.
- CUC: Added `from_date_time` and `from_unix_stamp` constructors for time provider.
- CUC: Add `Add<Duration>` and `AddAssign<Duration>` impl for time provider.
### CDS time module
- Implement `Add<Duration>` and `AddAssign<Duration>` for time providers, which allows
easily adding offsets to the providers.
- Implement `TryFrom<DateTime<Utc>>` for time providers.
- `get_dyn_time_provider_from_bytes`: Requires `alloc` support and returns
the correct `TimeProvider` instance wrapped as a boxed trait object
`Box<DynCdsTimeProvider>` by checking the length of days field.
- Added constructor function to create the time provider
from `chrono::DateTime<Utc>` and a generic UNIX timestamp (`i64` seconds
and subsecond milliseconds).
- `MAX_DAYS_24_BITS` which contains maximum value which can be supplied
to the days field of a CDS time provider with 24 bits days field width.
- New `CdsTimestamp` trait which encapsulates common fields for all CDS time providers.
- `from_unix_secs_with_u24_days` and `from_unix_secs_with_u16_days` which create
the time provider from a `UnixTimestamp` reference.
- `from_dt_with_u16_days`, `from_dt_with_u24_days` and their `..._us_precision` and
`..._ps_precision` variants which allow to create time providers from
a `chrono::DateTime<Utc>`.
- Add `from_bytes_with_u24_days` and `from_bytes_with_u16_days` associated methods
## Changed
- (breaking) `unix_epoch_to_ccsds_epoch`: Expect and return `i64` instead of `u64` now.
- (breaking) `ccsds_epoch_to_unix_epoch`: Expect and return `i64` instead of `u64` now.
- (breaking) `PusTmSecondaryHeader`: Timestamp is optional now, which translates to a
timestamp of size 0.
- (breaking): `PusTm`: Renamed `time_stamp` method to `timestamp`, also returns
`Optional<&'src_data [u8]>` now.
- (breaking): `PusTmSecondaryHeader`: Renamed `time_stamp` field to `timestamp` for consistency.
- (breaking): Renamed `from_now_with_u24_days_and_us_prec` to `from_now_with_u24_days_us_precision`.
Also did the same for the `u16` variant.
- (breaking): Renamed `from_now_with_u24_days_and_ps_prec` to `from_now_with_u24_days_ps_precision`.
Also did the same for the `u16` variant.
- `CcsdsTimeProvider` trait (breaking):
- Add new `unix_stamp` method returning the new `UnixTimeStamp` struct.
- Add new `subsecond_millis` method returning counter `Option<u16>`.
- Default impl for `unix_stamp` which re-uses `subsecond_millis` and
existing `unix_seconds` method.
- `TimestampError` (breaking): Add `DateBeforeCcsdsEpoch` error type
because new CDS API allow supplying invalid date times before CCSDS epoch.
Make `TimestampError` with `#[non_exhaustive]` attribute to prevent
future breakages if new error variants are added.
# [v0.4.2] 14.01.2023
## Fixed
- CDS timestamp: Fixed another small logic error for stamp creation from the current
time with picosecond precision.
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/8
# [v0.4.1] 14.01.2023
## Fixed
- CDS timestamp: The conversion function from the current time were buggy
when specifying picoseconds precision, which could lead to overflow
multiplications and/or incorrect precision fields.
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/7
# [v0.4.0] 10.01.2023
## Fixed
- Remove `Default` derive on CDS time provider. This can lead to uninitialized preamble fields.
## Changed
- `serde` support is now optional and behind the `serde` feature.
@ -223,18 +19,12 @@ The timestamp of `PusTm` is now optional. See Added and Changed section for deta
The function now returns the remaining slice as well.
- All CDS specific functionality was moved into the `cds` submodule of the `time`
module. `CdsShortTimeProvider` was renamed to `TimeProvider`.
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/3
## Added
- `SpHeader` getter function `sp_header` added for `PusTc`
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/6
- Added PFC enumerations: `ecss::UnsignedPfc` and `ecss::RealPfc`.
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/5
- Added `std::error::Error` implementation for all error enumerations if the `std` feature
is enabled.
- CUC timestamp implementation as specified in CCSDS 301.0-B-4 section 3.2.
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/4/files
- ACII timestamps as specified in CCSDS 301.0-B-4 section 3.5.
- Added MSRV in `Cargo.toml` with the `rust-version` field set to Rust 1.60.
- `serde` `Serialize` and `Deserialize` added to all types.

View File

@ -1,8 +1,8 @@
[package]
name = "spacepackets"
version = "0.7.0-beta.0"
version = "0.3.1"
edition = "2021"
rust-version = "1.61"
rust-version = "1.60"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "Generic implementations for various CCSDS and ECSS packet standards"
homepage = "https://egit.irs.uni-stuttgart.de/rust/spacepackets"
@ -14,19 +14,11 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
[dependencies]
zerocopy = "0.6"
crc = "3"
delegate = ">=0.8, <0.11"
[dependencies.thiserror]
version = "1"
optional = true
[dependencies.num_enum]
version = "0.6"
default-features = false
crc = "3.0"
delegate = "0.8"
[dependencies.serde]
version = "1"
version = "1.0"
optional = true
default-features = false
features = ["derive"]
@ -40,11 +32,11 @@ version = "0.2"
default-features = false
[dev-dependencies.postcard]
version = "1"
version = "1.0"
[features]
default = ["std"]
std = ["chrono/std", "chrono/clock", "alloc", "thiserror"]
std = ["chrono/std", "chrono/clock", "alloc"]
serde = ["dep:serde", "chrono/serde"]
alloc = ["postcard/alloc", "chrono/alloc"]

2
NOTICE
View File

@ -1,3 +1,3 @@
Generic implementations for various CCSDS and ECSS packet standards.
This software contains code developed at the University of Stuttgart's Institute of Space Systems.
This software contains code developed at the University of Stuttgart.

View File

@ -13,15 +13,11 @@ Currently, this includes the following components:
- Space Packet implementation according to
[CCSDS Blue Book 133.0-B-2](https://public.ccsds.org/Pubs/133x0b2e1.pdf)
- CCSDS File Delivery Protocol (CFDP) packet implementations according to
[CCSDS Blue Book 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf)
- PUS Telecommand and PUS Telemetry implementation according to the
[ECSS-E-ST-70-41C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
- CUC (CCSDS Unsegmented Time Code) implementation according to
[CCSDS 301.0-B-4 3.2](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
- CDS (CCSDS Day Segmented Time Code) implementation according to
- CDS Short Time Code implementation according to
[CCSDS 301.0-B-4 3.3](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
- Some helper types to support ASCII timecodes as specified in
- Some helper types to support ASCII timecodes ad specified in
[CCSDS 301.0-B-4 3.5](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
# Features

View File

@ -10,6 +10,6 @@ ARG DEBIAN_FRONTEND=noninteractive
# set CROSS_CONTAINER_IN_CONTAINER to inform `cross` that it is executed from within a container
ENV CROSS_CONTAINER_IN_CONTAINER=true
RUN rustup install nightly && \
rustup target add thumbv7em-none-eabihf armv7-unknown-linux-gnueabihf && \
# TODO: installing cross is problematic, permission issues
RUN rustup target add thumbv7em-none-eabihf armv7-unknown-linux-gnueabihf && \
rustup component add rustfmt clippy

View File

@ -13,29 +13,19 @@ pipeline {
sh 'cargo clippy'
}
}
stage('Docs') {
steps {
sh 'cargo +nightly doc --all-features'
}
}
stage('Rustfmt') {
steps {
sh 'cargo fmt --all --check'
sh 'cargo fmt'
}
}
stage('Test') {
steps {
sh 'cargo test --all-features'
sh 'cargo test'
}
}
stage('Check with all features') {
stage('Check') {
steps {
sh 'cargo check --all-features'
}
}
stage('Check with no features') {
steps {
sh 'cargo check --no-default-features'
sh 'cargo check'
}
}
stage('Check Cross Embedded Bare Metal') {

View File

@ -1,18 +0,0 @@
Checklist for new releases
=======
# Pre-Release
1. Make sure any new modules are documented sufficiently enough and check docs with
`cargo doc --all-features --open`.
2. Bump version specifier in `Cargo.toml`.
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
`unreleased` section.
4. Run `cargo test --all-features`.
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
targets.
# Post-Release
1. Create a new release on `EGit` based on the release branch.

View File

@ -1,309 +0,0 @@
//! Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
use crate::cfdp::TlvLvError;
use crate::{ByteConversionError, SizeMissmatch};
use core::str::Utf8Error;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::string::String;
pub const MIN_LV_LEN: usize = 1;
/// Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
///
/// # Lifetimes
/// * `data`: If the LV is generated from a raw bytestream, this will be the lifetime of
/// the raw bytestream. If the LV is generated from a raw slice or a similar data reference,
/// this will be the lifetime of that data reference.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Lv<'data> {
data: Option<&'data [u8]>,
}
pub(crate) fn generic_len_check_data_serialization(
buf: &[u8],
data_len: usize,
min_overhead: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < data_len + min_overhead {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: data_len + min_overhead,
}));
}
Ok(())
}
pub(crate) fn generic_len_check_deserialization(
buf: &[u8],
min_overhead: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < min_overhead {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: min_overhead,
}));
}
Ok(())
}
impl<'data> Lv<'data> {
pub fn new(data: &[u8]) -> Result<Lv, TlvLvError> {
if data.len() > u8::MAX as usize {
return Err(TlvLvError::DataTooLarge(data.len()));
}
Ok(Lv { data: Some(data) })
}
/// Creates a LV with an empty value field.
pub fn new_empty() -> Lv<'data> {
Lv { data: None }
}
/// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs
pub fn new_from_str(str_slice: &str) -> Result<Lv, TlvLvError> {
Self::new(str_slice.as_bytes())
}
/// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn new_from_string(string: &'data String) -> Result<Lv<'data>, TlvLvError> {
Self::new(string.as_bytes())
}
/// Returns the length of the value part, not including the length byte.
pub fn len_value(&self) -> usize {
if self.data.is_none() {
return 0;
}
self.data.unwrap().len()
}
/// Returns the full raw length, including the length byte.
pub fn len_full(&self) -> usize {
self.len_value() + 1
}
/// Checks whether the value field is empty.
pub fn is_empty(&self) -> bool {
self.data.is_none()
}
pub fn value(&self) -> Option<&[u8]> {
self.data
}
/// Convenience function to extract the value as a [str]. This is useful if the LV is
/// known to contain a [str], for example being a file name.
pub fn value_as_str(&self) -> Option<Result<&'data str, Utf8Error>> {
self.data?;
Some(core::str::from_utf8(self.data.unwrap()))
}
/// Writes the LV to a raw buffer. Please note that the first byte will contain the length
/// of the value, but the values may not exceed a length of [u8::MAX].
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.len_value(), MIN_LV_LEN)?;
Ok(self.write_to_be_bytes_no_len_check(buf))
}
/// Reads a LV from a raw buffer.
pub fn from_bytes(buf: &'data [u8]) -> Result<Lv<'data>, ByteConversionError> {
generic_len_check_deserialization(buf, MIN_LV_LEN)?;
Self::from_be_bytes_no_len_check(buf)
}
pub(crate) fn write_to_be_bytes_no_len_check(&self, buf: &mut [u8]) -> usize {
if self.data.is_none() {
buf[0] = 0;
return MIN_LV_LEN;
}
let data = self.data.unwrap();
// Length check in constructor ensures the length always has a valid value.
buf[0] = data.len() as u8;
buf[MIN_LV_LEN..data.len() + MIN_LV_LEN].copy_from_slice(data);
MIN_LV_LEN + data.len()
}
pub(crate) fn from_be_bytes_no_len_check(
buf: &'data [u8],
) -> Result<Lv<'data>, ByteConversionError> {
let value_len = buf[0] as usize;
generic_len_check_deserialization(buf, value_len + MIN_LV_LEN)?;
let mut data = None;
if value_len > 0 {
data = Some(&buf[MIN_LV_LEN..MIN_LV_LEN + value_len])
}
Ok(Self { data })
}
}
#[cfg(test)]
pub mod tests {
use crate::cfdp::lv::Lv;
use crate::cfdp::TlvLvError;
use crate::ByteConversionError;
use std::string::String;
#[test]
fn test_basic() {
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv_res = Lv::new(&lv_data);
assert!(lv_res.is_ok());
let lv = lv_res.unwrap();
assert!(lv.value().is_some());
let val = lv.value().unwrap();
assert_eq!(val[0], 1);
assert_eq!(val[1], 2);
assert_eq!(val[2], 3);
assert_eq!(val[3], 4);
assert!(!lv.is_empty());
assert_eq!(lv.len_full(), 5);
assert_eq!(lv.len_value(), 4);
}
#[test]
fn test_empty() {
let lv_empty = Lv::new_empty();
assert_eq!(lv_empty.len_value(), 0);
assert_eq!(lv_empty.len_full(), 1);
assert!(lv_empty.is_empty());
assert_eq!(lv_empty.value(), None);
let mut buf: [u8; 4] = [0xff; 4];
let res = lv_empty.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 1);
assert_eq!(buf[0], 0);
}
#[test]
fn test_serialization() {
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv_res = Lv::new(&lv_data);
assert!(lv_res.is_ok());
let lv = lv_res.unwrap();
let mut buf: [u8; 16] = [0; 16];
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 5);
assert_eq!(buf[0], 4);
assert_eq!(buf[1], 1);
assert_eq!(buf[2], 2);
assert_eq!(buf[3], 3);
assert_eq!(buf[4], 4);
}
#[test]
fn test_deserialization() {
let mut buf: [u8; 16] = [0; 16];
buf[0] = 4;
buf[1] = 1;
buf[2] = 2;
buf[3] = 3;
buf[4] = 4;
let lv = Lv::from_bytes(&buf);
assert!(lv.is_ok());
let lv = lv.unwrap();
assert!(!lv.is_empty());
assert!(lv.value().is_some());
assert_eq!(lv.len_value(), 4);
assert_eq!(lv.len_full(), 5);
let val = lv.value().unwrap();
assert_eq!(val[0], 1);
assert_eq!(val[1], 2);
assert_eq!(val[2], 3);
assert_eq!(val[3], 4);
}
#[test]
fn test_deserialization_empty() {
let buf: [u8; 2] = [0; 2];
let lv_empty = Lv::from_bytes(&buf);
assert!(lv_empty.is_ok());
let lv_empty = lv_empty.unwrap();
assert!(lv_empty.is_empty());
assert!(lv_empty.value().is_none());
}
#[test]
fn test_data_too_large() {
let data_big: [u8; u8::MAX as usize + 1] = [0; u8::MAX as usize + 1];
let lv = Lv::new(&data_big);
assert!(lv.is_err());
let error = lv.unwrap_err();
if let TlvLvError::DataTooLarge(size) = error {
assert_eq!(size, u8::MAX as usize + 1);
} else {
panic!("invalid exception {:?}", error)
}
}
#[test]
fn test_serialization_buf_too_small() {
let mut buf: [u8; 3] = [0; 3];
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv = Lv::new(&lv_data).unwrap();
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let ByteConversionError::ToSliceTooSmall(missmatch) = error {
assert_eq!(missmatch.expected, 5);
assert_eq!(missmatch.found, 3);
} else {
panic!("invalid error {}", error);
}
}
#[test]
fn test_deserialization_buf_too_small() {
let mut buf: [u8; 3] = [0; 3];
buf[0] = 4;
let res = Lv::from_bytes(&buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let ByteConversionError::FromSliceTooSmall(missmatch) = error {
assert_eq!(missmatch.found, 3);
assert_eq!(missmatch.expected, 5);
} else {
panic!("invalid error {}", error);
}
}
fn verify_test_str_lv(lv: Lv) {
let mut buf: [u8; 16] = [0; 16];
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let res = res.unwrap();
assert_eq!(res, 8 + 1);
assert_eq!(buf[0], 8);
assert_eq!(buf[1], 't' as u8);
assert_eq!(buf[2], 'e' as u8);
assert_eq!(buf[3], 's' as u8);
assert_eq!(buf[4], 't' as u8);
assert_eq!(buf[5], '.' as u8);
assert_eq!(buf[6], 'b' as u8);
assert_eq!(buf[7], 'i' as u8);
assert_eq!(buf[8], 'n' as u8);
}
#[test]
fn test_str_helper() {
let test_str = "test.bin";
let str_lv = Lv::new_from_str(test_str);
assert!(str_lv.is_ok());
verify_test_str_lv(str_lv.unwrap());
}
#[test]
fn test_string_helper() {
let string = String::from("test.bin");
let str_lv = Lv::new_from_string(&string);
assert!(str_lv.is_ok());
verify_test_str_lv(str_lv.unwrap());
}
}

View File

@ -1,209 +0,0 @@
//! Low-level CCSDS File Delivery Protocol (CFDP) support according to [CCSDS 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf).
use crate::ByteConversionError;
use core::fmt::{Display, Formatter};
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub mod lv;
pub mod pdu;
pub mod tlv;
/// This is the name of the standard this module is based on.
pub const CFDP_VERSION_2_NAME: &str = "CCSDS 727.0-B-5";
/// Currently, only this version is supported.
pub const CFDP_VERSION_2: u8 = 0b001;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum PduType {
FileDirective = 0,
FileData = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Direction {
TowardsReceiver = 0,
TowardsSender = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum TransmissionMode {
Acknowledged = 0,
Unacknowledged = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum CrcFlag {
NoCrc = 0,
WithCrc = 1,
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum SegmentMetadataFlag {
NotPresent = 0,
Present = 1,
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum SegmentationControl {
NoRecordBoundaryPreservation = 0,
WithRecordBoundaryPreservation = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum FaultHandlerCode {
NoticeOfCancellation = 0b0001,
NoticeOfSuspension = 0b0010,
IgnoreError = 0b0011,
AbandonTransaction = 0b0100,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum LenInBytes {
ZeroOrNone = 0,
OneByte = 1,
TwoBytes = 2,
ThreeBytes = 4,
FourBytes = 8,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum ConditionCode {
/// This is not an error condition for which a faulty handler override can be specified
NoError = 0b0000,
PositiveAckLimitReached = 0b0001,
KeepAliveLimitReached = 0b0010,
InvalidTransmissionMode = 0b0011,
FilestoreRejection = 0b0100,
FileChecksumFailure = 0b0101,
FileSizeError = 0b0110,
NakLimitReached = 0b0111,
InactivityDetected = 0b1000,
CheckLimitReached = 0b1001,
UnsupportedChecksumType = 0b1011,
/// Not an actual fault condition for which fault handler overrides can be specified
SuspendRequestReceived = 0b1110,
/// Not an actual fault condition for which fault handler overrides can be specified
CancelRequestReceived = 0b1111,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum LargeFileFlag {
/// 32 bit maximum file size and FSS size
Normal = 0,
/// 64 bit maximum file size and FSS size
Large = 1,
}
/// Checksum types according to the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum ChecksumType {
/// Modular legacy checksum
Modular = 0,
Crc32Proximity1 = 1,
Crc32C = 2,
/// Polynomial: 0x4C11DB7. Preferred checksum for now.
Crc32 = 3,
NullChecksum = 15,
}
impl Default for ChecksumType {
fn default() -> Self {
Self::NullChecksum
}
}
pub const NULL_CHECKSUM_U32: [u8; 4] = [0; 4];
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TlvLvError {
DataTooLarge(usize),
ByteConversionError(ByteConversionError),
/// First value: Found value. Second value: Expected value if there is one.
InvalidTlvTypeField((u8, Option<u8>)),
/// Logically invalid value length detected. The value length may not exceed 255 bytes.
/// Depending on the concrete TLV type, the value length may also be logically invalid.
InvalidValueLength(usize),
/// Only applies to filestore requests and responses. Second name was missing where one is
/// expected.
SecondNameMissing,
/// Invalid action code for filestore requests or responses.
InvalidFilestoreActionCode(u8),
}
impl From<ByteConversionError> for TlvLvError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl Display for TlvLvError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TlvLvError::DataTooLarge(data_len) => {
write!(
f,
"data with size {} larger than allowed {} bytes",
data_len,
u8::MAX
)
}
TlvLvError::ByteConversionError(e) => {
write!(f, "{}", e)
}
TlvLvError::InvalidTlvTypeField((found, expected)) => {
write!(
f,
"invalid TLV type field, found {found}, possibly expected {expected:?}"
)
}
TlvLvError::InvalidValueLength(len) => {
write!(f, "invalid value length {len} detected")
}
TlvLvError::SecondNameMissing => {
write!(f, "second name missing for filestore request or response")
}
TlvLvError::InvalidFilestoreActionCode(raw) => {
write!(f, "invalid filestore action code with raw value {raw}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for TlvLvError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TlvLvError::ByteConversionError(e) => Some(e),
_ => None,
}
}
}

View File

@ -1,206 +0,0 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::EntityIdTlv;
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag};
use crate::{ByteConversionError, SizeMissmatch};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Finished PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.2.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct EofPdu {
pdu_header: PduHeader,
condition_code: ConditionCode,
file_checksum: u32,
file_size: u64,
fault_location: Option<EntityIdTlv>,
}
impl EofPdu {
pub fn new_no_error(pdu_header: PduHeader, file_checksum: u32, file_size: u64) -> Self {
let mut eof_pdu = Self {
pdu_header,
condition_code: ConditionCode::NoError,
file_checksum,
file_size,
fault_location: None,
};
eof_pdu.pdu_header.pdu_datafield_len = eof_pdu.calc_pdu_datafield_len() as u16;
eof_pdu
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn written_len(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn file_checksum(&self) -> u32 {
self.file_checksum
}
pub fn file_size(&self) -> u64 {
self.file_size
}
fn calc_pdu_datafield_len(&self) -> usize {
// One directive type octet, 4 bits condition code, 4 spare bits.
let mut len = 2 + core::mem::size_of::<u32>() + 4;
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4;
}
if let Some(fault_location) = self.fault_location {
len += fault_location.len_full();
}
len
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.written_len();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: expected_len,
})
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::EofPdu as u8;
current_idx += 1;
buf[current_idx] = (self.condition_code as u8) << 4;
current_idx += 1;
buf[current_idx..current_idx + 4].copy_from_slice(&self.file_checksum.to_be_bytes());
current_idx += 4;
current_idx += write_fss_field(
self.pdu_header.pdu_conf.file_flag,
self.file_size,
&mut buf[current_idx..],
)?;
if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_be_bytes(buf)?;
}
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
pub fn from_bytes(buf: &[u8]) -> Result<EofPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let is_large_file = pdu_header.pdu_conf.file_flag == LargeFileFlag::Large;
let mut min_expected_len = 2 + 4 + 4;
if is_large_file {
min_expected_len += 4;
}
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType((buf[current_idx], FileDirectiveType::EofPdu))
})?;
if directive_type != FileDirectiveType::EofPdu {
return Err(PduError::WrongDirectiveType((
directive_type,
FileDirectiveType::EofPdu,
)));
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
current_idx += 1;
let file_checksum =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
current_idx += 4;
let (fss_field_len, file_size) =
read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss_field_len;
let mut fault_location = None;
if condition_code != ConditionCode::NoError && current_idx < full_len_without_crc {
fault_location = Some(EntityIdTlv::from_bytes(&buf[current_idx..])?);
}
Ok(Self {
pdu_header,
condition_code,
file_checksum,
file_size,
fault_location,
})
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::pdu::eof::EofPdu;
use crate::cfdp::pdu::tests::{common_pdu_conf, verify_raw_header};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag};
#[test]
fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
assert_eq!(eof_pdu.written_len(), pdu_header.header_len() + 2 + 4 + 4);
assert_eq!(eof_pdu.file_checksum(), 0x01020304);
assert_eq!(eof_pdu.file_size(), 12);
assert_eq!(eof_pdu.condition_code(), ConditionCode::NoError);
}
#[test]
fn test_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let res = eof_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, eof_pdu.written_len());
verify_raw_header(eof_pdu.pdu_header(), &buf);
let mut current_idx = eof_pdu.pdu_header().header_len();
buf[current_idx] = FileDirectiveType::EofPdu as u8;
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
ConditionCode::NoError as u8
);
current_idx += 1;
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
0x01020304
);
current_idx += 4;
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
12
);
current_idx += 4;
assert_eq!(current_idx, written);
}
#[test]
fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
eof_pdu.write_to_bytes(&mut buf).unwrap();
let eof_read_back = EofPdu::from_bytes(&buf);
if !eof_read_back.is_ok() {
let e = eof_read_back.unwrap_err();
panic!("deserialization failed with: {e}")
}
let eof_read_back = eof_read_back.unwrap();
assert_eq!(eof_read_back, eof_pdu);
}
}

View File

@ -1,397 +0,0 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
PduError, PduHeader,
};
use crate::cfdp::{CrcFlag, LargeFileFlag, PduType, SegmentMetadataFlag};
use crate::{ByteConversionError, SizeMissmatch};
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum RecordContinuationState {
NoStartNoEnd = 0b00,
StartWithoutEnd = 0b01,
EndWithoutStart = 0b10,
StartAndEnd = 0b11,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct SegmentMetadata<'seg_meta> {
record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>,
}
impl<'seg_meta> SegmentMetadata<'seg_meta> {
pub fn new(
record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>,
) -> Option<Self> {
if let Some(metadata) = metadata {
if metadata.len() > 2_usize.pow(6) - 1 {
return None;
}
}
Some(Self {
record_continuation_state,
metadata,
})
}
pub fn written_len(&self) -> usize {
// Map empty metadata to 0 and slice to its length.
1 + self.metadata.map_or(0, |meta| meta.len())
}
pub(crate) fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.written_len() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.written_len(),
}));
}
buf[0] = ((self.record_continuation_state as u8) << 6)
| self.metadata.map_or(0, |meta| meta.len() as u8);
if let Some(metadata) = self.metadata {
buf[1..1 + metadata.len()].copy_from_slice(metadata)
}
Ok(self.written_len())
}
pub(crate) fn from_bytes(buf: &'seg_meta [u8]) -> Result<Self, ByteConversionError> {
if buf.is_empty() {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: 2,
}));
}
let mut metadata = None;
let seg_metadata_len = (buf[0] & 0b111111) as usize;
if seg_metadata_len > 0 {
metadata = Some(&buf[1..1 + seg_metadata_len]);
}
Ok(Self {
// Can't fail, only 2 bits
record_continuation_state: RecordContinuationState::try_from((buf[0] >> 6) & 0b11)
.unwrap(),
metadata,
})
}
}
/// File Data PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPdu<'seg_meta, 'file_data> {
pdu_header: PduHeader,
#[cfg_attr(feature = "serde", serde(borrow))]
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
}
impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, Some(segment_metadata), offset, file_data)
}
pub fn new_no_seg_metadata(
pdu_header: PduHeader,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, None, offset, file_data)
}
pub fn new_generic(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
pdu_header,
segment_metadata,
offset,
file_data,
};
pdu.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut len = core::mem::size_of::<u32>();
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4;
}
if self.segment_metadata.is_some() {
len += self.segment_metadata.as_ref().unwrap().written_len()
}
len += self.file_data.len();
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
len += 2;
}
len
}
pub fn written_len(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
pub fn offset(&self) -> u64 {
self.offset
}
pub fn file_data(&self) -> &'file_data [u8] {
self.file_data
}
pub fn segment_metadata(&self) -> Option<&SegmentMetadata> {
self.segment_metadata.as_ref()
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
if buf.len() < self.written_len() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.written_len(),
})
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
if self.segment_metadata.is_some() {
current_idx += self
.segment_metadata
.as_ref()
.unwrap()
.write_to_bytes(&mut buf[current_idx..])?;
}
current_idx += write_fss_field(
self.pdu_header.common_pdu_conf().file_flag,
self.offset,
&mut buf[current_idx..],
)?;
buf[current_idx..current_idx + self.file_data.len()].copy_from_slice(self.file_data);
current_idx += self.file_data.len();
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
pub fn from_bytes<'longest: 'seg_meta + 'file_data>(
buf: &'longest [u8],
) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + core::mem::size_of::<u32>();
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let mut segment_metadata = None;
if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present {
segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?);
current_idx += segment_metadata.as_ref().unwrap().written_len();
}
let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss;
if current_idx > full_len_without_crc {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: current_idx,
expected: full_len_without_crc,
})
.into());
}
Ok(Self {
pdu_header,
segment_metadata,
offset,
file_data: &buf[current_idx..full_len_without_crc],
})
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::pdu::file_data::{FileDataPdu, RecordContinuationState, SegmentMetadata};
use crate::cfdp::pdu::{CommonPduConfig, PduHeader};
use crate::cfdp::{SegmentMetadataFlag, SegmentationControl};
use crate::util::UbfU8;
#[test]
fn test_basic() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
assert_eq!(fd_pdu.file_data(), file_data);
assert_eq!(fd_pdu.offset(), 10);
assert!(fd_pdu.segment_metadata().is_none());
assert_eq!(
fd_pdu.written_len(),
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4
);
}
#[test]
fn test_serialization() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
let res = fd_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(
written,
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4
);
let mut current_idx = fd_pdu.pdu_header.header_len();
let file_size = u32::from_be_bytes(
buf[fd_pdu.pdu_header.header_len()..fd_pdu.pdu_header.header_len() + 4]
.try_into()
.unwrap(),
);
current_idx += 4;
assert_eq!(file_size, 10);
assert_eq!(buf[current_idx], 1);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 4);
}
#[test]
fn test_deserialization() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
fd_pdu.write_to_bytes(&mut buf).unwrap();
let fd_pdu_read_back = FileDataPdu::from_bytes(&buf);
assert!(fd_pdu_read_back.is_ok());
let fd_pdu_read_back = fd_pdu_read_back.unwrap();
assert_eq!(fd_pdu_read_back, fd_pdu);
}
#[test]
fn test_with_seg_metadata_serialization() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
assert!(fd_pdu.segment_metadata().is_some());
assert_eq!(*fd_pdu.segment_metadata().unwrap(), segment_meta);
assert_eq!(
fd_pdu.written_len(),
fd_pdu.pdu_header.header_len()
+ 1
+ seg_metadata.len()
+ core::mem::size_of::<u32>()
+ 4
);
let mut buf: [u8; 32] = [0; 32];
fd_pdu
.write_to_bytes(&mut buf)
.expect("writing FD PDU failed");
let mut current_idx = fd_pdu.pdu_header.header_len();
assert_eq!(
RecordContinuationState::try_from((buf[current_idx] >> 6) & 0b11).unwrap(),
RecordContinuationState::StartAndEnd
);
assert_eq!((buf[current_idx] & 0b111111) as usize, seg_metadata.len());
current_idx += 1;
assert_eq!(buf[current_idx], 4);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 1);
current_idx += 1;
// Still verify that the rest is written correctly.
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
10
);
current_idx += 4;
assert_eq!(buf[current_idx], 1);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 4);
current_idx += 1;
assert_eq!(current_idx, fd_pdu.written_len());
}
#[test]
fn test_with_seg_metadata_deserialization() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
fd_pdu
.write_to_bytes(&mut buf)
.expect("writing FD PDU failed");
let fd_pdu_read_back = FileDataPdu::from_bytes(&buf);
assert!(fd_pdu_read_back.is_ok());
let fd_pdu_read_back = fd_pdu_read_back.unwrap();
assert_eq!(fd_pdu_read_back, fd_pdu);
}
}

View File

@ -1,336 +0,0 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::{EntityIdTlv, Tlv, TlvType, TlvTypeField};
use crate::cfdp::{ConditionCode, CrcFlag, PduType, TlvLvError};
use crate::{ByteConversionError, SizeMissmatch};
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum DeliveryCode {
Complete = 0,
Incomplete = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum FileStatus {
DiscardDeliberately = 0b00,
DiscardedFsRejection = 0b01,
Retained = 0b10,
Unreported = 0b11,
}
/// Finished PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FinishedPdu<'fs_responses> {
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses: Option<&'fs_responses [u8]>,
fault_location: Option<EntityIdTlv>,
}
impl<'fs_responses> FinishedPdu<'fs_responses> {
/// Default finished PDU: No error (no fault location field) and no filestore responses.
pub fn new_default(
pdu_header: PduHeader,
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> Self {
Self::new_generic(
pdu_header,
ConditionCode::NoError,
delivery_code,
file_status,
None,
None,
)
}
pub fn new_with_error(
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fault_location: EntityIdTlv,
) -> Self {
Self::new_generic(
pdu_header,
condition_code,
delivery_code,
file_status,
None,
Some(fault_location),
)
}
pub fn new_generic(
mut pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses: Option<&'fs_responses [u8]>,
fault_location: Option<EntityIdTlv>,
) -> Self {
pdu_header.pdu_type = PduType::FileDirective;
let mut finished_pdu = Self {
pdu_header,
condition_code,
delivery_code,
file_status,
fs_responses,
fault_location,
};
finished_pdu.pdu_header.pdu_datafield_len = finished_pdu.calc_pdu_datafield_len() as u16;
finished_pdu
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn written_len(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code
}
pub fn file_status(&self) -> FileStatus {
self.file_status
}
pub fn filestore_responses(&self) -> Option<&'fs_responses [u8]> {
self.fs_responses
}
pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut base_len = 2;
if let Some(fs_responses) = self.fs_responses {
base_len += fs_responses.len();
}
if let Some(fault_location) = self.fault_location {
base_len += fault_location.len_full();
}
base_len
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.written_len();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: expected_len,
})
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::FinishedPdu as u8;
current_idx += 1;
buf[current_idx] = ((self.condition_code as u8) << 4)
| ((self.delivery_code as u8) << 2)
| self.file_status as u8;
current_idx += 1;
if let Some(fs_responses) = self.fs_responses {
buf[current_idx..current_idx + fs_responses.len()].copy_from_slice(fs_responses);
current_idx += fs_responses.len();
}
if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_be_bytes(&mut buf[current_idx..])?;
}
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
/// Generates [Self] from a raw bytestream.
pub fn from_bytes(buf: &'fs_responses [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + 2;
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType((buf[current_idx], FileDirectiveType::FinishedPdu))
})?;
if directive_type != FileDirectiveType::FinishedPdu {
return Err(PduError::WrongDirectiveType((
directive_type,
FileDirectiveType::FinishedPdu,
)));
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
// Unwrap is okay here for both of the following operations which can not fail.
let delivery_code = DeliveryCode::try_from((buf[current_idx] >> 2) & 0b1).unwrap();
let file_status = FileStatus::try_from(buf[current_idx] & 0b11).unwrap();
current_idx += 1;
let (fs_responses, fault_location) =
Self::parse_tlv_fields(current_idx, full_len_without_crc, buf)?;
Ok(Self {
pdu_header,
condition_code,
delivery_code,
file_status,
fs_responses,
fault_location,
})
}
fn parse_tlv_fields(
mut current_idx: usize,
full_len_without_crc: usize,
buf: &'fs_responses [u8],
) -> Result<(Option<&'fs_responses [u8]>, Option<EntityIdTlv>), PduError> {
let mut fs_responses = None;
let mut fault_location = None;
let start_of_fs_responses = current_idx;
// There are leftover filestore response(s) and/or a fault location field.
while current_idx < full_len_without_crc {
let next_tlv = Tlv::from_bytes(&buf[current_idx..])?;
match next_tlv.tlv_type_field() {
TlvTypeField::Standard(tlv_type) => {
if tlv_type == TlvType::FilestoreResponse {
current_idx += next_tlv.len_full();
if current_idx == full_len_without_crc {
fs_responses = Some(&buf[start_of_fs_responses..current_idx]);
}
} else if tlv_type == TlvType::EntityId {
// At least one FS response is included.
if current_idx > full_len_without_crc {
fs_responses = Some(&buf[start_of_fs_responses..current_idx]);
}
fault_location = Some(EntityIdTlv::from_bytes(&buf[current_idx..])?);
current_idx += fault_location.as_ref().unwrap().len_full();
// This is considered a configuration error: The entity ID has to be the
// last TLV, everything else would break the whole handling of the packet
// TLVs.
if current_idx != full_len_without_crc {
return Err(PduError::FormatError);
}
} else {
return Err(TlvLvError::InvalidTlvTypeField((tlv_type as u8, None)).into());
}
}
TlvTypeField::Custom(raw) => {
return Err(TlvLvError::InvalidTlvTypeField((raw, None)).into());
}
}
}
Ok((fs_responses, fault_location))
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::pdu::finished::{DeliveryCode, FileStatus, FinishedPdu};
use crate::cfdp::pdu::tests::{common_pdu_conf, verify_raw_header};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag};
fn generic_finished_pdu(
crc_flag: CrcFlag,
fss: LargeFileFlag,
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> FinishedPdu<'static> {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
FinishedPdu::new_default(pdu_header, delivery_code, file_status)
}
#[test]
fn test_basic() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
assert_eq!(finished_pdu.condition_code(), ConditionCode::NoError);
assert_eq!(finished_pdu.delivery_code(), DeliveryCode::Complete);
assert_eq!(finished_pdu.file_status(), FileStatus::Retained);
assert_eq!(finished_pdu.filestore_responses(), None);
assert_eq!(finished_pdu.fault_location(), None);
assert_eq!(finished_pdu.pdu_header().pdu_datafield_len, 2);
}
fn generic_serialization_test_no_error(delivery_code: DeliveryCode, file_status: FileStatus) {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
delivery_code,
file_status,
);
let mut buf: [u8; 64] = [0; 64];
let written = finished_pdu.write_to_bytes(&mut buf);
assert!(written.is_ok());
let written = written.unwrap();
assert_eq!(written, finished_pdu.written_len());
assert_eq!(written, finished_pdu.pdu_header().header_len() + 2);
verify_raw_header(finished_pdu.pdu_header(), &buf);
let mut current_idx = finished_pdu.pdu_header().header_len();
assert_eq!(buf[current_idx], FileDirectiveType::FinishedPdu as u8);
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
ConditionCode::NoError as u8
);
assert_eq!((buf[current_idx] >> 2) & 0b1, delivery_code as u8);
assert_eq!(buf[current_idx] & 0b11, file_status as u8);
assert_eq!(current_idx + 1, written);
}
#[test]
fn test_serialization_simple() {
generic_serialization_test_no_error(DeliveryCode::Complete, FileStatus::Retained);
}
#[test]
fn test_serialization_simple_2() {
generic_serialization_test_no_error(
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
);
}
#[test]
fn test_serialization_simple_3() {
generic_serialization_test_no_error(DeliveryCode::Incomplete, FileStatus::Unreported);
}
#[test]
fn test_deserialization_simple() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 64] = [0; 64];
finished_pdu.write_to_bytes(&mut buf).unwrap();
let read_back = FinishedPdu::from_bytes(&buf);
assert!(read_back.is_ok());
let read_back = read_back.unwrap();
assert_eq!(finished_pdu, read_back);
}
}

View File

@ -1,531 +0,0 @@
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::Tlv;
use crate::cfdp::{ChecksumType, CrcFlag, LargeFileFlag, PduType};
use crate::{ByteConversionError, SizeMissmatch};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MetadataGenericParams {
pub closure_requested: bool,
pub checksum_type: ChecksumType,
pub file_size: u64,
}
impl MetadataGenericParams {
pub fn new(closure_requested: bool, checksum_type: ChecksumType, file_size: u64) -> Self {
Self {
closure_requested,
checksum_type,
file_size,
}
}
}
pub fn build_metadata_opts_from_slice(
buf: &mut [u8],
tlvs: &[Tlv],
) -> Result<usize, ByteConversionError> {
let mut written = 0;
for tlv in tlvs {
written += tlv.write_to_bytes(&mut buf[written..])?;
}
Ok(written)
}
#[cfg(feature = "alloc")]
pub fn build_metadata_opts_from_vec(
buf: &mut [u8],
tlvs: &Vec<Tlv>,
) -> Result<usize, ByteConversionError> {
build_metadata_opts_from_slice(buf, tlvs.as_slice())
}
/// Helper structure to loop through all options of a metadata PDU. It should be noted that
/// iterators in Rust are not fallible, but the TLV creation can fail, for example if the raw TLV
/// data is invalid for some reason. In that case, the iterator will yield [None] because there
/// is no way to recover from this.
///
/// The user can accumulate the length of all TLVs yielded by the iterator and compare it against
/// the full length of the options to check whether the iterator was able to parse all TLVs
/// successfully.
pub struct OptionsIter<'opts> {
opt_buf: &'opts [u8],
current_idx: usize,
}
impl<'opts> Iterator for OptionsIter<'opts> {
type Item = Tlv<'opts>;
fn next(&mut self) -> Option<Self::Item> {
if self.current_idx == self.opt_buf.len() {
return None;
}
let tlv = Tlv::from_bytes(&self.opt_buf[self.current_idx..]);
// There are not really fallible iterators so we can't continue here..
if tlv.is_err() {
return None;
}
let tlv = tlv.unwrap();
self.current_idx += tlv.len_full();
Some(tlv)
}
}
/// Metadata PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.5.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MetadataPdu<'src_name, 'dest_name, 'opts> {
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
#[cfg_attr(feature = "serde", serde(borrow))]
src_file_name: Lv<'src_name>,
#[cfg_attr(feature = "serde", serde(borrow))]
dest_file_name: Lv<'dest_name>,
options: Option<&'opts [u8]>,
}
impl<'src_name, 'dest_name, 'opts> MetadataPdu<'src_name, 'dest_name, 'opts> {
pub fn new_no_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
) -> Self {
Self::new(
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
None,
)
}
pub fn new_with_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [u8],
) -> Self {
Self::new(
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
Some(options),
)
}
pub fn new(
mut pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: Option<&'opts [u8]>,
) -> Self {
pdu_header.pdu_type = PduType::FileDirective;
let mut pdu = Self {
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options,
};
pdu.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params
}
pub fn src_file_name(&self) -> Lv<'src_name> {
self.src_file_name
}
pub fn dest_file_name(&self) -> Lv<'dest_name> {
self.dest_file_name
}
pub fn options(&self) -> Option<&'opts [u8]> {
self.options
}
/// Yield an iterator which can be used to loop through all options. Returns [None] if the
/// options field is empty.
pub fn options_iter(&self) -> Option<OptionsIter<'opts>> {
self.options?;
Some(OptionsIter {
opt_buf: self.options.unwrap(),
current_idx: 0,
})
}
pub fn written_len(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
fn calc_pdu_datafield_len(&self) -> usize {
// One directve type octet and one byte of the directive parameter field.
let mut len = 2;
if self.pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
len += 8;
} else {
len += 4;
}
len += self.src_file_name.len_full();
len += self.dest_file_name.len_full();
if let Some(opts) = self.options {
len += opts.len();
}
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
len += 2;
}
len
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.written_len();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: expected_len,
})
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::MetadataPdu as u8;
current_idx += 1;
buf[current_idx] = ((self.metadata_params.closure_requested as u8) << 7)
| (self.metadata_params.checksum_type as u8);
current_idx += 1;
current_idx += write_fss_field(
self.pdu_header.common_pdu_conf().file_flag,
self.metadata_params.file_size,
&mut buf[current_idx..],
)?;
current_idx += self
.src_file_name
.write_to_be_bytes(&mut buf[current_idx..])?;
current_idx += self
.dest_file_name
.write_to_be_bytes(&mut buf[current_idx..])?;
if let Some(opts) = self.options {
buf[current_idx..current_idx + opts.len()].copy_from_slice(opts);
current_idx += opts.len();
}
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
pub fn from_bytes<'longest: 'src_name + 'dest_name + 'opts>(
buf: &'longest [u8],
) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let is_large_file = pdu_header.pdu_conf.file_flag == LargeFileFlag::Large;
// Minimal length: 1 byte + FSS (4 byte) + 2 empty LV (1 byte)
let mut min_expected_len = current_idx + 7;
if is_large_file {
min_expected_len += 4;
}
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType((buf[current_idx], FileDirectiveType::MetadataPdu))
})?;
if directive_type != FileDirectiveType::MetadataPdu {
return Err(PduError::WrongDirectiveType((
directive_type,
FileDirectiveType::MetadataPdu,
)));
}
current_idx += 1;
let (fss_len, file_size) =
read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx + 1..]);
let metadata_params = MetadataGenericParams {
closure_requested: ((buf[current_idx] >> 6) & 0b1) != 0,
checksum_type: ChecksumType::try_from(buf[current_idx] & 0b1111)
.map_err(|_| PduError::InvalidChecksumType(buf[current_idx] & 0b1111))?,
file_size,
};
current_idx += 1 + fss_len;
let src_file_name = Lv::from_bytes(&buf[current_idx..])?;
current_idx += src_file_name.len_full();
let dest_file_name = Lv::from_bytes(&buf[current_idx..])?;
current_idx += dest_file_name.len_full();
// All left-over bytes are options.
let mut options = None;
if current_idx < full_len_without_crc {
options = Some(&buf[current_idx..full_len_without_crc]);
}
Ok(Self {
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options,
})
}
}
#[cfg(test)]
pub mod tests {
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::metadata::{
build_metadata_opts_from_slice, build_metadata_opts_from_vec, MetadataGenericParams,
MetadataPdu,
};
use crate::cfdp::pdu::tests::{common_pdu_conf, verify_raw_header};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::tlv::{Tlv, TlvType};
use crate::cfdp::{
ChecksumType, CrcFlag, LargeFileFlag, PduType, SegmentMetadataFlag, SegmentationControl,
};
use std::vec;
const SRC_FILENAME: &'static str = "hello-world.txt";
const DEST_FILENAME: &'static str = "hello-world2.txt";
fn generic_metadata_pdu<'opts>(
crc_flag: CrcFlag,
fss: LargeFileFlag,
opts: Option<&'opts [u8]>,
) -> (
Lv<'static>,
Lv<'static>,
MetadataPdu<'static, 'static, 'opts>,
) {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
let metadata_params = MetadataGenericParams::new(false, ChecksumType::Crc32, 0x1010);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename =
Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed");
(
src_filename,
dest_filename,
MetadataPdu::new(
pdu_header,
metadata_params,
src_filename,
dest_filename,
opts,
),
)
}
#[test]
fn test_basic() {
let (src_filename, dest_filename, metadata_pdu) =
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, None);
assert_eq!(
metadata_pdu.written_len(),
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
);
assert_eq!(metadata_pdu.src_file_name(), src_filename);
assert_eq!(metadata_pdu.dest_file_name(), dest_filename);
assert_eq!(metadata_pdu.options(), None);
}
#[test]
fn test_serialization() {
let (src_filename, dest_filename, metadata_pdu) =
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, None);
let mut buf: [u8; 64] = [0; 64];
let res = metadata_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
);
verify_raw_header(metadata_pdu.pdu_header(), &buf);
assert_eq!(buf[7], FileDirectiveType::MetadataPdu as u8);
assert_eq!(buf[8] >> 6, false as u8);
assert_eq!(buf[8] & 0b1111, ChecksumType::Crc32 as u8);
assert_eq!(u32::from_be_bytes(buf[9..13].try_into().unwrap()), 0x1010);
let mut current_idx = 13;
let src_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating source name LV failed");
assert_eq!(src_name_from_raw, src_filename);
current_idx += src_name_from_raw.len_full();
let dest_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating dest name LV failed");
assert_eq!(dest_name_from_raw, dest_filename);
current_idx += dest_name_from_raw.len_full();
// No options, so no additional data here.
assert_eq!(current_idx, written);
}
#[test]
fn test_deserialization() {
let (_, _, metadata_pdu) =
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, None);
let mut buf: [u8; 64] = [0; 64];
metadata_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_read_back = MetadataPdu::from_bytes(&buf);
assert!(pdu_read_back.is_ok());
let pdu_read_back = pdu_read_back.unwrap();
assert_eq!(pdu_read_back, metadata_pdu);
}
#[test]
fn test_with_crc_flag() {
let (src_filename, dest_filename, metadata_pdu) =
generic_metadata_pdu(CrcFlag::WithCrc, LargeFileFlag::Normal, None);
let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ core::mem::size_of::<u32>()
+ src_filename.len_full()
+ dest_filename.len_full()
+ 2
);
let pdu_read_back = MetadataPdu::from_bytes(&buf).unwrap();
assert_eq!(pdu_read_back, metadata_pdu);
}
#[test]
fn test_with_large_file_flag() {
let (src_filename, dest_filename, metadata_pdu) =
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Large, None);
let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ core::mem::size_of::<u64>()
+ src_filename.len_full()
+ dest_filename.len_full()
);
let pdu_read_back = MetadataPdu::from_bytes(&buf).unwrap();
assert_eq!(pdu_read_back, metadata_pdu);
}
#[test]
fn test_opts_builders() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_slice = [tlv1, tlv2];
let mut buf: [u8; 32] = [0; 32];
let opts = build_metadata_opts_from_slice(&mut buf, &tlv_slice);
assert!(opts.is_ok());
let opts_len = opts.unwrap();
assert_eq!(opts_len, tlv1.len_full() + tlv2.len_full());
let tlv1_conv_back = Tlv::from_bytes(&buf).unwrap();
assert_eq!(tlv1_conv_back, tlv1);
let tlv2_conv_back = Tlv::from_bytes(&buf[tlv1_conv_back.len_full()..]).unwrap();
assert_eq!(tlv2_conv_back, tlv2);
}
#[test]
fn test_opts_builders_from_vec() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_vec = vec![tlv1, tlv2];
let mut buf: [u8; 32] = [0; 32];
let opts = build_metadata_opts_from_vec(&mut buf, &tlv_vec);
assert!(opts.is_ok());
let opts_len = opts.unwrap();
assert_eq!(opts_len, tlv1.len_full() + tlv2.len_full());
let tlv1_conv_back = Tlv::from_bytes(&buf).unwrap();
assert_eq!(tlv1_conv_back, tlv1);
let tlv2_conv_back = Tlv::from_bytes(&buf[tlv1_conv_back.len_full()..]).unwrap();
assert_eq!(tlv2_conv_back, tlv2);
}
#[test]
fn test_with_opts() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_vec = vec![tlv1, tlv2];
let mut opts_buf: [u8; 32] = [0; 32];
let opts_len = build_metadata_opts_from_vec(&mut opts_buf, &tlv_vec).unwrap();
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
Some(&opts_buf[..opts_len]),
);
let mut buf: [u8; 128] = [0; 128];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
+ opts_len
);
let pdu_read_back = MetadataPdu::from_bytes(&buf).unwrap();
assert_eq!(pdu_read_back, metadata_pdu);
let opts_iter = pdu_read_back.options_iter();
assert!(opts_iter.is_some());
let opts_iter = opts_iter.unwrap();
let mut accumulated_len = 0;
for (idx, opt) in opts_iter.enumerate() {
assert_eq!(tlv_vec[idx], opt);
accumulated_len += opt.len_full();
}
assert_eq!(accumulated_len, pdu_read_back.options().unwrap().len());
}
#[test]
fn test_corrects_pdu_header() {
let pdu_header = PduHeader::new_for_file_data(
common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
);
let metadata_params = MetadataGenericParams::new(false, ChecksumType::Crc32, 10);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename =
Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed");
let metadata_pdu =
MetadataPdu::new_no_opts(pdu_header, metadata_params, src_filename, dest_filename);
assert_eq!(metadata_pdu.pdu_header().pdu_type(), PduType::FileDirective);
}
}

View File

@ -1,976 +0,0 @@
//! CFDP Packet Data Unit (PDU) support.
use crate::cfdp::*;
use crate::util::{UnsignedByteField, UnsignedByteFieldU8, UnsignedEnum};
use crate::CRC_CCITT_FALSE;
use crate::{ByteConversionError, SizeMissmatch};
use core::fmt::{Display, Formatter};
#[cfg(feature = "std")]
use std::error::Error;
pub mod eof;
pub mod file_data;
pub mod finished;
pub mod metadata;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum FileDirectiveType {
EofPdu = 0x04,
FinishedPdu = 0x05,
AckPdu = 0x06,
MetadataPdu = 0x07,
NakPdu = 0x08,
PromptPdu = 0x09,
KeepAlivePdu = 0x0c,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PduError {
ByteConversionError(ByteConversionError),
/// Found version ID invalid, not equal to [CFDP_VERSION_2].
CfdpVersionMissmatch(u8),
/// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported.
InvalidEntityLen(u8),
/// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported.
InvalidTransactionSeqNumLen(u8),
/// The first entry will be the source entity ID length, the second one the destination entity
/// ID length.
SourceDestIdLenMissmatch((usize, usize)),
/// The first tuple entry will be the found directive type, the second entry the expected entry
/// type.
WrongDirectiveType((FileDirectiveType, FileDirectiveType)),
/// The directive type field contained a value not in the range of permitted values.
/// The first tuple entry will be the found raw number, the second entry the expected entry
/// type.
InvalidDirectiveType((u8, FileDirectiveType)),
/// Invalid condition code. Contains the raw detected value.
InvalidConditionCode(u8),
/// Invalid checksum type which is not part of the checksums listed in the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/).
InvalidChecksumType(u8),
FileSizeTooLarge(u64),
/// If the CRC flag for a PDU is enabled and the checksum check fails. Contains raw 16-bit CRC.
ChecksumError(u16),
/// Generic error for invalid PDU formats.
FormatError,
/// Error handling a TLV field.
TlvLvError(TlvLvError),
}
impl Display for PduError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PduError::InvalidEntityLen(raw_id) => {
write!(
f,
"Invalid PDU entity ID length {raw_id}, only [1, 2, 4, 8] are allowed"
)
}
PduError::InvalidTransactionSeqNumLen(raw_id) => {
write!(
f,
"invalid PDUtransaction seq num length {raw_id}, only [1, 2, 4, 8] are allowed"
)
}
PduError::CfdpVersionMissmatch(raw) => {
write!(
f,
"cfdp version missmatch, found {raw}, expected {CFDP_VERSION_2}"
)
}
PduError::SourceDestIdLenMissmatch((src_len, dest_len)) => {
write!(
f,
"missmatch of PDU source length {src_len} and destination length {dest_len}"
)
}
PduError::ByteConversionError(e) => {
write!(f, "{}", e)
}
PduError::FileSizeTooLarge(value) => {
write!(f, "file size value {value} exceeds allowed 32 bit width")
}
PduError::WrongDirectiveType((found, expected)) => {
write!(f, "found directive type {found:?}, expected {expected:?}")
}
PduError::InvalidConditionCode(raw_code) => {
write!(f, "found invalid condition code with raw value {raw_code}")
}
PduError::InvalidDirectiveType((found, expected)) => {
write!(
f,
"invalid directive type value {found}, expected {expected:?} ({})",
*expected as u8
)
}
PduError::InvalidChecksumType(checksum_type) => {
write!(f, "invalid checksum type {checksum_type}")
}
PduError::ChecksumError(checksum) => {
write!(f, "checksum error for CRC {checksum:#04x}")
}
PduError::TlvLvError(error) => {
write!(f, "pdu tlv error: {error}")
}
PduError::FormatError => {
write!(f, "generic PDU format error")
}
}
}
}
#[cfg(feature = "std")]
impl Error for PduError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
PduError::ByteConversionError(e) => Some(e),
PduError::TlvLvError(e) => Some(e),
_ => None,
}
}
}
impl From<ByteConversionError> for PduError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl From<TlvLvError> for PduError {
fn from(e: TlvLvError) -> Self {
Self::TlvLvError(e)
}
}
/// Common configuration fields for a PDU.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct CommonPduConfig {
source_entity_id: UnsignedByteField,
dest_entity_id: UnsignedByteField,
pub transaction_seq_num: UnsignedByteField,
pub trans_mode: TransmissionMode,
pub file_flag: LargeFileFlag,
pub crc_flag: CrcFlag,
pub direction: Direction,
}
// TODO: Builder pattern might be applicable here..
impl CommonPduConfig {
pub fn new(
source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>,
transaction_seq_num: impl Into<UnsignedByteField>,
trans_mode: TransmissionMode,
file_flag: LargeFileFlag,
crc_flag: CrcFlag,
direction: Direction,
) -> Result<Self, PduError> {
let (source_id, dest_id) = Self::source_dest_id_check(source_id, dest_id)?;
let transaction_seq_num = transaction_seq_num.into();
if transaction_seq_num.size() != 1
&& transaction_seq_num.size() != 2
&& transaction_seq_num.size() != 4
&& transaction_seq_num.size() != 8
{
return Err(PduError::InvalidTransactionSeqNumLen(
transaction_seq_num.size() as u8,
));
}
Ok(Self {
source_entity_id: source_id,
dest_entity_id: dest_id,
transaction_seq_num,
trans_mode,
file_flag,
crc_flag,
direction,
})
}
pub fn new_with_byte_fields(
source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>,
transaction_seq_num: impl Into<UnsignedByteField>,
) -> Result<Self, PduError> {
Self::new(
source_id,
dest_id,
transaction_seq_num,
TransmissionMode::Acknowledged,
LargeFileFlag::Normal,
CrcFlag::NoCrc,
Direction::TowardsReceiver,
)
}
pub fn source_id(&self) -> UnsignedByteField {
self.source_entity_id
}
fn source_dest_id_check(
source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>,
) -> Result<(UnsignedByteField, UnsignedByteField), PduError> {
let source_id = source_id.into();
let dest_id = dest_id.into();
if source_id.size() != dest_id.size() {
return Err(PduError::SourceDestIdLenMissmatch((
source_id.size(),
dest_id.size(),
)));
}
if source_id.size() != 1
&& source_id.size() != 2
&& source_id.size() != 4
&& source_id.size() != 8
{
return Err(PduError::InvalidEntityLen(source_id.size() as u8));
}
Ok((source_id, dest_id))
}
pub fn set_source_and_dest_id(
&mut self,
source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>,
) -> Result<(), PduError> {
let (source_id, dest_id) = Self::source_dest_id_check(source_id, dest_id)?;
self.source_entity_id = source_id;
self.dest_entity_id = dest_id;
Ok(())
}
pub fn dest_id(&self) -> UnsignedByteField {
self.dest_entity_id
}
}
impl Default for CommonPduConfig {
/// The defaults for the source ID, destination ID and the transaction sequence number is the
/// [UnsignedByteFieldU8] with an intitial value of 0
fn default() -> Self {
// The new function can not fail for these input parameters.
Self::new(
UnsignedByteFieldU8::new(0),
UnsignedByteFieldU8::new(0),
UnsignedByteFieldU8::new(0),
TransmissionMode::Acknowledged,
LargeFileFlag::Normal,
CrcFlag::NoCrc,
Direction::TowardsReceiver,
)
.unwrap()
}
}
pub const FIXED_HEADER_LEN: usize = 4;
/// Abstraction for the PDU header common to all CFDP PDUs.
///
/// For detailed information, refer to chapter 5.1 of the CFDP standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PduHeader {
pdu_type: PduType,
pdu_conf: CommonPduConfig,
seg_metadata_flag: SegmentMetadataFlag,
seg_ctrl: SegmentationControl,
pdu_datafield_len: u16,
}
impl PduHeader {
pub fn new_for_file_data(
pdu_conf: CommonPduConfig,
pdu_datafield_len: u16,
seg_metadata_flag: SegmentMetadataFlag,
seg_ctrl: SegmentationControl,
) -> Self {
Self::new_generic(
PduType::FileData,
pdu_conf,
pdu_datafield_len,
seg_metadata_flag,
seg_ctrl,
)
}
pub fn new_for_file_data_default(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
Self::new_generic(
PduType::FileData,
pdu_conf,
pdu_datafield_len,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
)
}
pub fn new_no_file_data(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
Self::new_generic(
PduType::FileDirective,
pdu_conf,
pdu_datafield_len,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
)
}
pub fn new_generic(
pdu_type: PduType,
pdu_conf: CommonPduConfig,
pdu_datafield_len: u16,
seg_metadata_flag: SegmentMetadataFlag,
seg_ctrl: SegmentationControl,
) -> Self {
Self {
pdu_type,
pdu_conf,
seg_metadata_flag,
seg_ctrl,
pdu_datafield_len,
}
}
/// Returns only the length of the PDU header when written to a raw buffer.
pub fn header_len(&self) -> usize {
FIXED_HEADER_LEN
+ self.pdu_conf.source_entity_id.size()
+ self.pdu_conf.transaction_seq_num.size()
+ self.pdu_conf.dest_entity_id.size()
}
/// Returns the full length of the PDU when written to a raw buffer, which is the header length
/// plus the PDU datafield length.
pub fn pdu_len(&self) -> usize {
self.header_len() + self.pdu_datafield_len as usize
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
// Internal note: There is currently no way to pass a PDU configuration like this, but
// this check is still kept for defensive programming.
if self.pdu_conf.source_entity_id.size() != self.pdu_conf.dest_entity_id.size() {
return Err(PduError::SourceDestIdLenMissmatch((
self.pdu_conf.source_entity_id.size(),
self.pdu_conf.dest_entity_id.size(),
)));
}
if buf.len()
< FIXED_HEADER_LEN
+ self.pdu_conf.source_entity_id.size()
+ self.pdu_conf.transaction_seq_num.size()
{
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: FIXED_HEADER_LEN,
})
.into());
}
let mut current_idx = 0;
buf[current_idx] = (CFDP_VERSION_2 << 5)
| ((self.pdu_type as u8) << 4)
| ((self.pdu_conf.direction as u8) << 3)
| ((self.pdu_conf.trans_mode as u8) << 2)
| ((self.pdu_conf.crc_flag as u8) << 1)
| (self.pdu_conf.file_flag as u8);
current_idx += 1;
buf[current_idx..current_idx + 2].copy_from_slice(&self.pdu_datafield_len.to_be_bytes());
current_idx += 2;
buf[current_idx] = ((self.seg_ctrl as u8) << 7)
| (((self.pdu_conf.source_entity_id.size() - 1) as u8) << 4)
| ((self.seg_metadata_flag as u8) << 3)
| ((self.pdu_conf.transaction_seq_num.size() - 1) as u8);
current_idx += 1;
self.pdu_conf.source_entity_id.write_to_be_bytes(
&mut buf[current_idx..current_idx + self.pdu_conf.source_entity_id.size()],
)?;
current_idx += self.pdu_conf.source_entity_id.size();
self.pdu_conf.transaction_seq_num.write_to_be_bytes(
&mut buf[current_idx..current_idx + self.pdu_conf.transaction_seq_num.size()],
)?;
current_idx += self.pdu_conf.transaction_seq_num.size();
self.pdu_conf.dest_entity_id.write_to_be_bytes(
&mut buf[current_idx..current_idx + self.pdu_conf.dest_entity_id.size()],
)?;
current_idx += self.pdu_conf.dest_entity_id.size();
Ok(current_idx)
}
/// This function first verifies that the buffer can hold the full length of the PDU parsed from
/// the header. Then, it verifies the checksum as specified in the standard if the CRC flag
/// of the PDU header is set.
///
/// This function will return the PDU length excluding the 2 CRC bytes on success. If the CRC
/// flag is not set, it will simply return the PDU length.
pub fn verify_length_and_checksum(&self, buf: &[u8]) -> Result<usize, PduError> {
if buf.len() < self.pdu_len() {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.pdu_len(),
})
.into());
}
if self.pdu_conf.crc_flag == CrcFlag::WithCrc {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[..self.pdu_len()]);
if digest.finalize() != 0 {
return Err(PduError::ChecksumError(u16::from_be_bytes(
buf[self.pdu_len() - 2..self.pdu_len()].try_into().unwrap(),
)));
}
return Ok(self.pdu_len() - 2);
}
Ok(self.pdu_len())
}
/// Please note that this function will not verify that the passed buffer can hold the full
/// PDU length. This allows recovering the header portion even if the data field length is
/// invalid. This function will also not do the CRC procedure specified in chapter 4.1.1
/// and 4.1.2 because performing the CRC procedure requires the buffer to be large enough
/// to hold the full PDU.
///
/// Both functions can however be performed with the [Self::verify_length_and_checksum]
/// function.
pub fn from_bytes(buf: &[u8]) -> Result<(Self, usize), PduError> {
if buf.len() < FIXED_HEADER_LEN {
return Err(PduError::ByteConversionError(
ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: FIXED_HEADER_LEN,
}),
));
}
let cfdp_version_raw = (buf[0] >> 5) & 0b111;
if cfdp_version_raw != CFDP_VERSION_2 {
return Err(PduError::CfdpVersionMissmatch(cfdp_version_raw));
}
// unwrap for single bit fields: This operation will always succeed.
let pdu_type = PduType::try_from((buf[0] >> 4) & 0b1).unwrap();
let direction = Direction::try_from((buf[0] >> 3) & 0b1).unwrap();
let trans_mode = TransmissionMode::try_from((buf[0] >> 2) & 0b1).unwrap();
let crc_flag = CrcFlag::try_from((buf[0] >> 1) & 0b1).unwrap();
let file_flag = LargeFileFlag::try_from(buf[0] & 0b1).unwrap();
let pdu_datafield_len = u16::from_be_bytes(buf[1..3].try_into().unwrap());
let seg_ctrl = SegmentationControl::try_from((buf[3] >> 7) & 0b1).unwrap();
let expected_len_entity_ids = (((buf[3] >> 4) & 0b111) + 1) as usize;
if (expected_len_entity_ids != 1)
&& (expected_len_entity_ids != 2)
&& (expected_len_entity_ids != 4)
&& (expected_len_entity_ids != 8)
{
return Err(PduError::InvalidEntityLen(expected_len_entity_ids as u8));
}
let seg_metadata_flag = SegmentMetadataFlag::try_from((buf[3] >> 3) & 0b1).unwrap();
let expected_len_seq_num = ((buf[3] & 0b111) + 1) as usize;
if (expected_len_seq_num != 1)
&& (expected_len_seq_num != 2)
&& (expected_len_seq_num != 4)
&& (expected_len_seq_num != 8)
{
return Err(PduError::InvalidTransactionSeqNumLen(
expected_len_seq_num as u8,
));
}
if buf.len() < (4 + 2 * expected_len_entity_ids + expected_len_seq_num) {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: 4 + 2 * expected_len_entity_ids + expected_len_seq_num,
})
.into());
}
let mut current_idx = 4;
// It is okay to unwrap here because we checked the validity of the expected length and of
// the remaining buffer length.
let source_id =
UnsignedByteField::new_from_be_bytes(expected_len_entity_ids, &buf[current_idx..])
.unwrap();
current_idx += expected_len_entity_ids;
let transaction_seq_num =
UnsignedByteField::new_from_be_bytes(expected_len_seq_num, &buf[current_idx..])
.unwrap();
current_idx += expected_len_seq_num;
let dest_id =
UnsignedByteField::new_from_be_bytes(expected_len_entity_ids, &buf[current_idx..])
.unwrap();
current_idx += expected_len_entity_ids;
let common_pdu_conf = CommonPduConfig::new(
source_id,
dest_id,
transaction_seq_num,
trans_mode,
file_flag,
crc_flag,
direction,
)
.unwrap();
Ok((
PduHeader {
pdu_type,
pdu_conf: common_pdu_conf,
seg_metadata_flag,
seg_ctrl,
pdu_datafield_len,
},
current_idx,
))
}
pub fn pdu_type(&self) -> PduType {
self.pdu_type
}
pub fn common_pdu_conf(&self) -> &CommonPduConfig {
&self.pdu_conf
}
pub fn seg_metadata_flag(&self) -> SegmentMetadataFlag {
self.seg_metadata_flag
}
pub fn seg_ctrl(&self) -> SegmentationControl {
self.seg_ctrl
}
}
pub(crate) fn write_fss_field(
file_flag: LargeFileFlag,
file_size: u64,
buf: &mut [u8],
) -> Result<usize, PduError> {
Ok(if file_flag == LargeFileFlag::Large {
buf[..core::mem::size_of::<u64>()].copy_from_slice(&file_size.to_be_bytes());
core::mem::size_of::<u64>()
} else {
if file_size > u32::MAX as u64 {
return Err(PduError::FileSizeTooLarge(file_size));
}
buf[..core::mem::size_of::<u32>()].copy_from_slice(&(file_size as u32).to_be_bytes());
core::mem::size_of::<u32>()
})
}
pub(crate) fn read_fss_field(file_flag: LargeFileFlag, buf: &[u8]) -> (usize, u64) {
if file_flag == LargeFileFlag::Large {
(
core::mem::size_of::<u64>(),
u64::from_be_bytes(buf[..core::mem::size_of::<u64>()].try_into().unwrap()),
)
} else {
(
core::mem::size_of::<u32>(),
u32::from_be_bytes(buf[..core::mem::size_of::<u32>()].try_into().unwrap()).into(),
)
}
}
// This is a generic length check applicable to most PDU deserializations. It first checks whether
// a given buffer can hold an expected minimum size, and then it checks whether the PDU datafield
// length is larger than that expected minimum size.
pub(crate) fn generic_length_checks_pdu_deserialization(
buf: &[u8],
min_expected_len: usize,
full_len_without_crc: usize,
) -> Result<(), ByteConversionError> {
// Buffer too short to hold additional expected minimum datasize.
if buf.len() < min_expected_len {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: min_expected_len,
}));
}
// This can happen if the PDU datafield length value is invalid.
if full_len_without_crc < min_expected_len {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: full_len_without_crc,
expected: min_expected_len,
}));
}
Ok(())
}
pub(crate) fn add_pdu_crc(buf: &mut [u8], mut current_idx: usize) -> usize {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[..current_idx]);
buf[current_idx..current_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
current_idx += 2;
current_idx
}
#[cfg(test)]
mod tests {
use crate::cfdp::pdu::{CommonPduConfig, PduError, PduHeader, FIXED_HEADER_LEN};
use crate::cfdp::{
CrcFlag, Direction, LargeFileFlag, PduType, SegmentMetadataFlag, SegmentationControl,
TransmissionMode, CFDP_VERSION_2,
};
use crate::util::{
UbfU8, UnsignedByteField, UnsignedByteFieldU16, UnsignedByteFieldU8, UnsignedEnum,
};
use crate::ByteConversionError;
use std::format;
pub(crate) fn common_pdu_conf(crc_flag: CrcFlag, fss: LargeFileFlag) -> CommonPduConfig {
let src_id = UbfU8::new(5);
let dest_id = UbfU8::new(10);
let transaction_seq_num = UbfU8::new(20);
let mut pdu_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num)
.expect("Generating common PDU config");
pdu_conf.crc_flag = crc_flag;
pdu_conf.file_flag = fss;
pdu_conf
}
pub(crate) fn verify_raw_header(pdu_conf: &PduHeader, buf: &[u8]) {
assert_eq!((buf[0] >> 5) & 0b111, CFDP_VERSION_2);
// File directive
assert_eq!((buf[0] >> 4) & 1, pdu_conf.pdu_type as u8);
// Towards receiver
assert_eq!((buf[0] >> 3) & 1, pdu_conf.pdu_conf.direction as u8);
// Acknowledged
assert_eq!((buf[0] >> 2) & 1, pdu_conf.pdu_conf.trans_mode as u8);
// No CRC
assert_eq!((buf[0] >> 1) & 1, pdu_conf.pdu_conf.crc_flag as u8);
// Regular file size
assert_eq!(buf[0] & 1, pdu_conf.pdu_conf.file_flag as u8);
let pdu_datafield_len = u16::from_be_bytes(buf[1..3].try_into().unwrap());
assert_eq!(pdu_datafield_len, pdu_conf.pdu_datafield_len);
// No record boundary preservation
assert_eq!((buf[3] >> 7) & 1, pdu_conf.seg_ctrl as u8);
// Entity ID length raw value is actual number of octets - 1 => 0
let entity_id_len = pdu_conf.pdu_conf.source_entity_id.size();
assert_eq!((buf[3] >> 4) & 0b111, entity_id_len as u8 - 1);
// No segment metadata
assert_eq!((buf[3] >> 3) & 0b1, pdu_conf.seg_metadata_flag as u8);
// Transaction Sequence ID length raw value is actual number of octets - 1 => 0
let seq_num_len = pdu_conf.pdu_conf.transaction_seq_num.size();
assert_eq!(buf[3] & 0b111, seq_num_len as u8 - 1);
let mut current_idx = 4;
let mut byte_field_check = |field_len: usize, ubf: &UnsignedByteField| {
match field_len {
1 => assert_eq!(buf[current_idx], ubf.value() as u8),
2 => assert_eq!(
u16::from_be_bytes(
buf[current_idx..current_idx + field_len]
.try_into()
.unwrap()
),
ubf.value() as u16
),
4 => assert_eq!(
u32::from_be_bytes(
buf[current_idx..current_idx + field_len]
.try_into()
.unwrap()
),
ubf.value() as u32
),
8 => assert_eq!(
u64::from_be_bytes(
buf[current_idx..current_idx + field_len]
.try_into()
.unwrap()
),
ubf.value() as u64
),
_ => panic!("invalid entity ID length"),
}
current_idx += field_len
};
byte_field_check(entity_id_len, &pdu_conf.pdu_conf.source_entity_id);
byte_field_check(seq_num_len, &pdu_conf.pdu_conf.transaction_seq_num);
byte_field_check(entity_id_len, &pdu_conf.pdu_conf.dest_entity_id);
}
#[test]
fn test_basic_state() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
assert_eq!(pdu_header.pdu_type(), PduType::FileDirective);
let common_conf_ref = pdu_header.common_pdu_conf();
assert_eq!(*common_conf_ref, common_pdu_cfg);
// These should be 0 and ignored for non-filedata PDUs
assert_eq!(
pdu_header.seg_metadata_flag(),
SegmentMetadataFlag::NotPresent
);
assert_eq!(
pdu_header.seg_ctrl(),
SegmentationControl::NoRecordBoundaryPreservation
);
assert_eq!(pdu_header.pdu_datafield_len, 5);
assert_eq!(pdu_header.header_len(), 7);
}
#[test]
fn test_basic_state_default() {
let default_conf = CommonPduConfig::default();
assert_eq!(default_conf.source_id(), UnsignedByteFieldU8::new(0).into());
assert_eq!(default_conf.dest_id(), UnsignedByteFieldU8::new(0).into());
assert_eq!(
default_conf.transaction_seq_num,
UnsignedByteFieldU8::new(0).into()
);
assert_eq!(default_conf.trans_mode, TransmissionMode::Acknowledged);
assert_eq!(default_conf.direction, Direction::TowardsReceiver);
assert_eq!(default_conf.crc_flag, CrcFlag::NoCrc);
assert_eq!(default_conf.file_flag, LargeFileFlag::Normal);
}
fn test_pdu_header_setter() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let mut common_pdu_cfg =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let other_src_id = UnsignedByteFieldU16::new(5);
let other_dest_id = UnsignedByteFieldU16::new(6);
let set_result = common_pdu_cfg.set_source_and_dest_id(other_src_id, other_dest_id);
assert!(set_result.is_ok());
assert_eq!(common_pdu_cfg.source_id(), other_src_id.into());
assert_eq!(common_pdu_cfg.dest_id(), other_dest_id.into());
}
#[test]
fn test_serialization_1() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
// 4 byte fixed header plus three bytes src, dest ID and transaction ID
assert_eq!(res.unwrap(), 7);
verify_raw_header(&pdu_header, &buf);
}
#[test]
fn test_deserialization_1() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
let deser_res = PduHeader::from_bytes(&buf);
assert!(deser_res.is_ok());
let (header_read_back, read_size) = deser_res.unwrap();
assert_eq!(read_size, 7);
assert_eq!(header_read_back, pdu_header);
}
#[test]
fn test_serialization_2() {
let src_id = UnsignedByteFieldU16::new(0x0001);
let dest_id = UnsignedByteFieldU16::new(0x0203);
let transaction_id = UnsignedByteFieldU16::new(0x0405);
let mut common_pdu_cfg =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
common_pdu_cfg.crc_flag = CrcFlag::WithCrc;
common_pdu_cfg.direction = Direction::TowardsSender;
common_pdu_cfg.trans_mode = TransmissionMode::Unacknowledged;
common_pdu_cfg.file_flag = LargeFileFlag::Large;
let pdu_header = PduHeader::new_for_file_data(
common_pdu_cfg,
5,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
assert_eq!(pdu_header.header_len(), 10);
let mut buf: [u8; 16] = [0; 16];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok(), "{}", format!("Result {res:?} not okay"));
// 4 byte fixed header, 6 bytes additional fields
assert_eq!(res.unwrap(), 10);
verify_raw_header(&pdu_header, &buf);
}
#[test]
fn test_deserialization_2() {
let src_id = UnsignedByteFieldU16::new(0x0001);
let dest_id = UnsignedByteFieldU16::new(0x0203);
let transaction_id = UnsignedByteFieldU16::new(0x0405);
let mut common_pdu_cfg =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
common_pdu_cfg.crc_flag = CrcFlag::WithCrc;
common_pdu_cfg.direction = Direction::TowardsSender;
common_pdu_cfg.trans_mode = TransmissionMode::Unacknowledged;
common_pdu_cfg.file_flag = LargeFileFlag::Large;
let pdu_header = PduHeader::new_for_file_data(
common_pdu_cfg,
5,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let mut buf: [u8; 16] = [0; 16];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
let deser_res = PduHeader::from_bytes(&buf);
assert!(deser_res.is_ok());
let (header_read_back, read_size) = deser_res.unwrap();
assert_eq!(read_size, 10);
assert_eq!(header_read_back, pdu_header);
}
#[test]
fn test_invalid_raw_version() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
buf[0] &= !0b1110_0000;
buf[0] |= (CFDP_VERSION_2 + 1) << 5;
let res = PduHeader::from_bytes(&buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let PduError::CfdpVersionMissmatch(raw_version) = error {
assert_eq!(raw_version, CFDP_VERSION_2 + 1);
} else {
panic!("invalid exception: {}", error);
}
}
#[test]
fn test_buf_too_small_1() {
let buf: [u8; 3] = [0; 3];
let res = PduHeader::from_bytes(&buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let PduError::ByteConversionError(ByteConversionError::FromSliceTooSmall(missmatch)) =
error
{
assert_eq!(missmatch.found, 3);
assert_eq!(missmatch.expected, FIXED_HEADER_LEN);
} else {
panic!("invalid exception: {}", error);
}
}
#[test]
fn test_buf_too_small_2() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
let header = PduHeader::from_bytes(&buf[0..6]);
assert!(header.is_err());
let error = header.unwrap_err();
if let PduError::ByteConversionError(ByteConversionError::FromSliceTooSmall(missmatch)) =
error
{
assert_eq!(missmatch.found, 6);
assert_eq!(missmatch.expected, 7);
}
}
#[test]
fn test_invalid_seq_len() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_id = UbfU8::new(3);
let invalid_byte_field = UnsignedByteField::new(3, 5);
let pdu_conf_res =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, invalid_byte_field);
assert!(pdu_conf_res.is_err());
let error = pdu_conf_res.unwrap_err();
if let PduError::InvalidTransactionSeqNumLen(len) = error {
assert_eq!(len, 3);
} else {
panic!("Invalid exception: {}", error)
}
let pdu_conf_res = CommonPduConfig::new_with_byte_fields(
invalid_byte_field,
invalid_byte_field,
transaction_seq_id,
);
assert!(pdu_conf_res.is_err());
let error = pdu_conf_res.unwrap_err();
if let PduError::InvalidEntityLen(len) = error {
assert_eq!(len, 3);
} else {
panic!("Invalid exception: {}", error)
}
}
#[test]
fn test_missmatch_src_dest_id() {
let src_id = UnsignedByteField::new(1, 5);
let dest_id = UnsignedByteField::new(2, 5);
let transaction_seq_id = UbfU8::new(3);
let pdu_conf_res =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_id);
assert!(pdu_conf_res.is_err());
let error = pdu_conf_res.unwrap_err();
if let PduError::SourceDestIdLenMissmatch((src_len, dest_len)) = error {
assert_eq!(src_len, 1);
assert_eq!(dest_len, 2);
}
}
#[test]
fn test_invalid_raw_src_id_len() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
buf[3] &= !0b0111_0000;
// Equivalent to the length of three
buf[3] |= 0b10 << 4;
let header_res = PduHeader::from_bytes(&buf);
assert!(header_res.is_err());
let error = header_res.unwrap_err();
if let PduError::InvalidEntityLen(len) = error {
assert_eq!(len, 3);
} else {
panic!("invalid exception {:?}", error)
}
}
#[test]
fn test_invalid_transaction_seq_id_len() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
buf[3] &= !0b0000_0111;
// Equivalent to the length of three
buf[3] |= 0b10;
let header_res = PduHeader::from_bytes(&buf);
assert!(header_res.is_err());
let error = header_res.unwrap_err();
if let PduError::InvalidTransactionSeqNumLen(len) = error {
assert_eq!(len, 3);
} else {
panic!("invalid exception {:?}", error)
}
}
}

View File

@ -1,765 +0,0 @@
//! Generic CFDP type-length-value (TLV) abstraction as specified in CFDP 5.1.9.
use crate::cfdp::lv::{
generic_len_check_data_serialization, generic_len_check_deserialization, Lv, MIN_LV_LEN,
};
use crate::cfdp::TlvLvError;
use crate::util::{UnsignedByteField, UnsignedByteFieldError, UnsignedEnum};
use crate::{ByteConversionError, SizeMissmatch};
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub const MIN_TLV_LEN: usize = 2;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum TlvType {
FilestoreRequest = 0x00,
FilestoreResponse = 0x01,
MsgToUser = 0x02,
FaultHandler = 0x04,
FlowLabel = 0x05,
EntityId = 0x06,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TlvTypeField {
Standard(TlvType),
Custom(u8),
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum FilestoreActionCode {
CreateFile = 0b0000,
DeleteFile = 0b0001,
RenameFile = 0b0010,
/// This operation appends one file to another. The first specified name will form the first
/// part of the new file and the name of the new file. This function can be used to get
/// similar functionality to the UNIX cat utility (albeit for only two files).
AppendFile = 0b0011,
/// This operation replaces the content of the first specified file with the content of
/// the secondly specified file.
ReplaceFile = 0b0100,
CreateDirectory = 0b0101,
RemoveDirectory = 0b0110,
DenyFile = 0b0111,
DenyDirectory = 0b1000,
}
impl From<u8> for TlvTypeField {
fn from(value: u8) -> Self {
match TlvType::try_from(value) {
Ok(tlv_type) => TlvTypeField::Standard(tlv_type),
Err(_) => TlvTypeField::Custom(value),
}
}
}
impl From<TlvTypeField> for u8 {
fn from(value: TlvTypeField) -> Self {
match value {
TlvTypeField::Standard(std) => std as u8,
TlvTypeField::Custom(custom) => custom,
}
}
}
/// Generic CFDP type-length-value (TLV) abstraction as specified in CFDP 5.1.9.
///
/// # Lifetimes
/// * `data`: If the TLV is generated from a raw bytestream, this will be the lifetime of
/// the raw bytestream. If the TLV is generated from a raw slice or a similar data reference,
/// this will be the lifetime of that data reference.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Tlv<'data> {
tlv_type_field: TlvTypeField,
#[cfg_attr(feature = "serde", serde(borrow))]
lv: Lv<'data>,
}
impl<'data> Tlv<'data> {
pub fn new(tlv_type: TlvType, data: &[u8]) -> Result<Tlv, TlvLvError> {
Ok(Tlv {
tlv_type_field: TlvTypeField::Standard(tlv_type),
lv: Lv::new(data)?,
})
}
/// Creates a TLV with an empty value field.
pub fn new_empty(tlv_type: TlvType) -> Tlv<'data> {
Tlv {
tlv_type_field: TlvTypeField::Standard(tlv_type),
lv: Lv::new_empty(),
}
}
pub fn tlv_type_field(&self) -> TlvTypeField {
self.tlv_type_field
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.len_value(), MIN_TLV_LEN)?;
buf[0] = self.tlv_type_field.into();
self.lv.write_to_be_bytes_no_len_check(&mut buf[1..]);
Ok(self.len_full())
}
pub fn value(&self) -> Option<&[u8]> {
self.lv.value()
}
/// Returns the length of the value part, not including the length byte.
pub fn len_value(&self) -> usize {
self.lv.len_value()
}
/// Returns the full raw length, including the length byte.
pub fn len_full(&self) -> usize {
self.lv.len_full() + 1
}
/// Checks whether the value field is empty.
pub fn is_empty(&self) -> bool {
self.lv.is_empty()
}
/// Creates a TLV give a raw bytestream. Please note that is is not necessary to pass the
/// bytestream with the exact size of the expected TLV. This function will take care
/// of parsing the length byte, and the length of the parsed TLV can be retrieved using
/// [Self::len_full].
pub fn from_bytes(buf: &'data [u8]) -> Result<Tlv<'data>, TlvLvError> {
generic_len_check_deserialization(buf, MIN_TLV_LEN)?;
Ok(Self {
tlv_type_field: TlvTypeField::from(buf[0]),
lv: Lv::from_bytes(&buf[MIN_LV_LEN..])?,
})
}
}
pub(crate) fn verify_tlv_type(raw_type: u8, expected_tlv_type: TlvType) -> Result<(), TlvLvError> {
let tlv_type = TlvType::try_from(raw_type)
.map_err(|_| TlvLvError::InvalidTlvTypeField((raw_type, Some(expected_tlv_type as u8))))?;
if tlv_type != expected_tlv_type {
return Err(TlvLvError::InvalidTlvTypeField((
tlv_type as u8,
Some(expected_tlv_type as u8),
)));
}
Ok(())
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct EntityIdTlv {
entity_id: UnsignedByteField,
}
impl EntityIdTlv {
pub fn new(entity_id: UnsignedByteField) -> Self {
Self { entity_id }
}
fn len_check(buf: &[u8]) -> Result<(), ByteConversionError> {
if buf.len() < 2 {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: 2,
}));
}
Ok(())
}
pub fn len_value(&self) -> usize {
self.entity_id.size()
}
pub fn len_full(&self) -> usize {
2 + self.entity_id.size()
}
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
Self::len_check(buf)?;
buf[0] = TlvType::EntityId as u8;
buf[1] = self.entity_id.size() as u8;
self.entity_id.write_to_be_bytes(&mut buf[2..])
}
pub fn from_bytes(buf: &[u8]) -> Result<Self, TlvLvError> {
Self::len_check(buf)?;
verify_tlv_type(buf[0], TlvType::EntityId)?;
let len = buf[1];
if len != 1 && len != 2 && len != 4 && len != 8 {
return Err(TlvLvError::InvalidValueLength(len as usize));
}
// Okay to unwrap here. The checks before make sure that the deserialization never fails
let entity_id = UnsignedByteField::new_from_be_bytes(len as usize, &buf[2..]).unwrap();
Ok(Self { entity_id })
}
/// Convert to a generic [Tlv], which also erases the programmatic type information.
pub fn to_tlv(self, buf: &mut [u8]) -> Result<Tlv, ByteConversionError> {
Self::len_check(buf)?;
self.entity_id
.write_to_be_bytes(&mut buf[2..2 + self.entity_id.size()])?;
Tlv::new(TlvType::EntityId, &buf[2..2 + self.entity_id.size()]).map_err(|e| match e {
TlvLvError::ByteConversionError(e) => e,
// All other errors are impossible.
_ => panic!("unexpected TLV error"),
})
}
}
impl<'data> TryFrom<Tlv<'data>> for EntityIdTlv {
type Error = TlvLvError;
fn try_from(value: Tlv) -> Result<Self, Self::Error> {
match value.tlv_type_field {
TlvTypeField::Standard(tlv_type) => {
if tlv_type != TlvType::EntityId {
return Err(TlvLvError::InvalidTlvTypeField((
tlv_type as u8,
Some(TlvType::EntityId as u8),
)));
}
}
TlvTypeField::Custom(val) => {
return Err(TlvLvError::InvalidTlvTypeField((
val,
Some(TlvType::EntityId as u8),
)));
}
}
if value.len_value() != 1
&& value.len_value() != 2
&& value.len_value() != 4
&& value.len_value() != 8
{
return Err(TlvLvError::InvalidValueLength(value.len_value()));
}
Ok(Self::new(
UnsignedByteField::new_from_be_bytes(value.len_value(), value.value().unwrap())
.map_err(|e| match e {
UnsignedByteFieldError::ByteConversionError(e) => e,
// This can not happen, we checked for the length validity, and the data is always smaller than
// 255 bytes.
_ => panic!("unexpected error"),
})?,
))
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FilestoreRequestTlv<'first_name, 'second_name> {
action_code: FilestoreActionCode,
#[cfg_attr(feature = "serde", serde(borrow))]
first_name: Lv<'first_name>,
#[cfg_attr(feature = "serde", serde(borrow))]
second_name: Option<Lv<'second_name>>,
}
impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
pub fn new_create_file(first_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::CreateFile, first_name, None)
}
pub fn new_delete_file(first_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DeleteFile, first_name, None)
}
pub fn new_rename_file(
source_name: Lv<'first_name>,
target_name: Lv<'second_name>,
) -> Result<Self, TlvLvError> {
Self::new(
FilestoreActionCode::RenameFile,
source_name,
Some(target_name),
)
}
/// This operation appends one file to another. The first specified name will form the first
/// part of the new file and the name of the new file. This function can be used to get
/// similar functionality to the UNIX cat utility (albeit for only two files).
pub fn new_append_file(
first_file: Lv<'first_name>,
second_file: Lv<'second_name>,
) -> Result<Self, TlvLvError> {
Self::new(
FilestoreActionCode::AppendFile,
first_file,
Some(second_file),
)
}
/// This operation replaces the content of the first specified file with the content of
/// the secondly specified file. This function can be used to get similar functionality to
/// the UNIX copy (cp) utility if the target file already exists.
pub fn new_replace_file(
replaced_file: Lv<'first_name>,
new_file: Lv<'second_name>,
) -> Result<Self, TlvLvError> {
Self::new(
FilestoreActionCode::ReplaceFile,
replaced_file,
Some(new_file),
)
}
pub fn new_create_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::CreateDirectory, dir_name, None)
}
pub fn new_remove_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::RemoveDirectory, dir_name, None)
}
pub fn new_deny_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DenyFile, file_name, None)
}
pub fn new_deny_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DenyDirectory, dir_name, None)
}
/// This function will return [None] if the respective action code requires two names but
/// only one is passed. It will also returns [None] if the cumulative length of the first
/// name and the second name exceeds 255 bytes.
///
/// This is the case for the rename, append and replace filestore request.
pub fn new(
action_code: FilestoreActionCode,
first_name: Lv<'first_name>,
second_name: Option<Lv<'second_name>>,
) -> Result<Self, TlvLvError> {
let mut base_value_len = first_name.len_full();
if Self::has_second_filename(action_code) {
if second_name.is_none() {
return Err(TlvLvError::SecondNameMissing);
}
base_value_len += second_name.as_ref().unwrap().len_full();
}
if base_value_len > u8::MAX as usize {
return Err(TlvLvError::InvalidValueLength(base_value_len));
}
Ok(Self {
action_code,
first_name,
second_name,
})
}
pub fn has_second_filename(action_code: FilestoreActionCode) -> bool {
if action_code == FilestoreActionCode::RenameFile
|| action_code == FilestoreActionCode::AppendFile
|| action_code == FilestoreActionCode::ReplaceFile
{
return true;
}
false
}
pub fn action_code(&self) -> FilestoreActionCode {
self.action_code
}
pub fn first_name(&self) -> Lv<'first_name> {
self.first_name
}
pub fn second_name(&self) -> Option<Lv<'second_name>> {
self.second_name
}
pub fn len_value(&self) -> usize {
let mut len = 1 + self.first_name.len_full();
if let Some(second_name) = self.second_name {
len += second_name.len_full();
}
len
}
pub fn len_full(&self) -> usize {
2 + self.len_value()
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.len_full() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.len_full(),
}));
}
buf[0] = TlvType::FilestoreRequest as u8;
buf[1] = self.len_value() as u8;
buf[2] = (self.action_code as u8) << 4;
let mut current_idx = 3;
// Length checks were already performed.
self.first_name.write_to_be_bytes_no_len_check(
&mut buf[current_idx..current_idx + self.first_name.len_full()],
);
current_idx += self.first_name.len_full();
if let Some(second_name) = self.second_name {
second_name.write_to_be_bytes_no_len_check(
&mut buf[current_idx..current_idx + second_name.len_full()],
);
current_idx += second_name.len_full();
}
Ok(current_idx)
}
pub fn from_bytes<'longest: 'first_name + 'second_name>(
buf: &'longest [u8],
) -> Result<Self, TlvLvError> {
if buf.len() < 2 {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: 2,
})
.into());
}
verify_tlv_type(buf[0], TlvType::FilestoreRequest)?;
let len = buf[1] as usize;
let mut current_idx = 2;
let action_code = FilestoreActionCode::try_from((buf[2] >> 4) & 0b1111)
.map_err(|_| TlvLvError::InvalidFilestoreActionCode((buf[2] >> 4) & 0b1111))?;
current_idx += 1;
let first_name = Lv::from_bytes(&buf[current_idx..])?;
let mut second_name = None;
current_idx += first_name.len_full();
if Self::has_second_filename(action_code) {
if current_idx >= 2 + len {
return Err(TlvLvError::SecondNameMissing);
}
second_name = Some(Lv::from_bytes(&buf[current_idx..])?);
}
Ok(Self {
action_code,
first_name,
second_name,
})
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::lv::Lv;
use crate::cfdp::tlv::{FilestoreActionCode, FilestoreRequestTlv, Tlv, TlvType, TlvTypeField};
use crate::cfdp::TlvLvError;
use crate::util::{UbfU8, UnsignedEnum};
const TLV_TEST_STR_0: &'static str = "hello.txt";
const TLV_TEST_STR_1: &'static str = "hello2.txt";
#[test]
fn test_basic() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = Tlv::new(TlvType::EntityId, &buf[0..1]);
assert!(tlv_res.is_ok());
let tlv_res = tlv_res.unwrap();
assert_eq!(
tlv_res.tlv_type_field(),
TlvTypeField::Standard(TlvType::EntityId)
);
assert_eq!(tlv_res.len_full(), 3);
assert_eq!(tlv_res.len_value(), 1);
assert!(!tlv_res.is_empty());
assert!(tlv_res.value().is_some());
assert_eq!(tlv_res.value().unwrap()[0], 5);
}
#[test]
fn test_serialization() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = Tlv::new(TlvType::EntityId, &buf[0..1]);
assert!(tlv_res.is_ok());
let tlv_res = tlv_res.unwrap();
let mut ser_buf: [u8; 4] = [0; 4];
assert!(tlv_res.write_to_bytes(&mut ser_buf).is_ok());
assert_eq!(ser_buf[0], TlvType::EntityId as u8);
assert_eq!(ser_buf[1], 1);
assert_eq!(ser_buf[2], 5);
}
#[test]
fn test_deserialization() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf[2..]).is_ok());
buf[0] = TlvType::EntityId as u8;
buf[1] = 1;
let tlv_from_raw = Tlv::from_bytes(&mut buf);
assert!(tlv_from_raw.is_ok());
let tlv_from_raw = tlv_from_raw.unwrap();
assert_eq!(
tlv_from_raw.tlv_type_field(),
TlvTypeField::Standard(TlvType::EntityId)
);
assert_eq!(tlv_from_raw.len_value(), 1);
assert_eq!(tlv_from_raw.len_full(), 3);
assert!(tlv_from_raw.value().is_some());
assert_eq!(tlv_from_raw.value().unwrap()[0], 5);
}
#[test]
fn test_empty() {
let tlv_empty = Tlv::new_empty(TlvType::MsgToUser);
assert!(tlv_empty.value().is_none());
assert!(tlv_empty.is_empty());
assert_eq!(tlv_empty.len_full(), 2);
assert_eq!(tlv_empty.len_value(), 0);
assert_eq!(
tlv_empty.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
}
#[test]
fn test_empty_serialization() {
let tlv_empty = Tlv::new_empty(TlvType::MsgToUser);
let mut buf: [u8; 4] = [0; 4];
assert!(tlv_empty.write_to_bytes(&mut buf).is_ok());
assert_eq!(buf[0], TlvType::MsgToUser as u8);
assert_eq!(buf[1], 0);
}
#[test]
fn test_empty_deserialization() {
let mut buf: [u8; 4] = [0; 4];
buf[0] = TlvType::MsgToUser as u8;
buf[1] = 0;
let tlv_empty = Tlv::from_bytes(&mut buf);
assert!(tlv_empty.is_ok());
let tlv_empty = tlv_empty.unwrap();
assert!(tlv_empty.is_empty());
assert!(tlv_empty.value().is_none());
assert_eq!(
tlv_empty.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(tlv_empty.len_full(), 2);
assert_eq!(tlv_empty.len_value(), 0);
}
#[test]
fn test_buf_too_large() {
let buf_too_large: [u8; u8::MAX as usize + 1] = [0; u8::MAX as usize + 1];
let tlv_res = Tlv::new(TlvType::MsgToUser, &buf_too_large);
assert!(tlv_res.is_err());
let error = tlv_res.unwrap_err();
if let TlvLvError::DataTooLarge(size) = error {
assert_eq!(size, u8::MAX as usize + 1);
} else {
panic!("unexpected error {:?}", error);
}
}
#[test]
fn test_deserialization_custom_tlv_type() {
let mut buf: [u8; 4] = [0; 4];
buf[0] = 3;
buf[1] = 1;
buf[2] = 5;
let tlv = Tlv::from_bytes(&mut buf);
assert!(tlv.is_ok());
let tlv = tlv.unwrap();
assert_eq!(tlv.tlv_type_field(), TlvTypeField::Custom(3));
assert_eq!(tlv.len_value(), 1);
assert_eq!(tlv.len_full(), 3);
}
fn generic_fs_request_test_one_file(
action_code: FilestoreActionCode,
) -> FilestoreRequestTlv<'static, 'static> {
assert!(!FilestoreRequestTlv::has_second_filename(action_code));
let first_name = Lv::new_from_str(TLV_TEST_STR_0).unwrap();
let fs_request = match action_code {
FilestoreActionCode::CreateFile => FilestoreRequestTlv::new_create_file(first_name),
FilestoreActionCode::DeleteFile => FilestoreRequestTlv::new_delete_file(first_name),
FilestoreActionCode::CreateDirectory => {
FilestoreRequestTlv::new_create_directory(first_name)
}
FilestoreActionCode::RemoveDirectory => {
FilestoreRequestTlv::new_remove_directory(first_name)
}
FilestoreActionCode::DenyFile => FilestoreRequestTlv::new_deny_file(first_name),
FilestoreActionCode::DenyDirectory => {
FilestoreRequestTlv::new_deny_directory(first_name)
}
_ => panic!("invalid action code"),
};
assert!(fs_request.is_ok());
let fs_request = fs_request.unwrap();
assert_eq!(fs_request.len_value(), 1 + first_name.len_full());
assert_eq!(fs_request.len_full(), fs_request.len_value() + 2);
assert_eq!(fs_request.action_code(), action_code);
assert_eq!(fs_request.first_name(), first_name);
assert_eq!(fs_request.second_name(), None);
fs_request
}
fn generic_fs_request_test_two_files(
action_code: FilestoreActionCode,
) -> FilestoreRequestTlv<'static, 'static> {
assert!(FilestoreRequestTlv::has_second_filename(action_code));
let first_name = Lv::new_from_str(TLV_TEST_STR_0).unwrap();
let second_name = Lv::new_from_str(TLV_TEST_STR_1).unwrap();
let fs_request = match action_code {
FilestoreActionCode::ReplaceFile => {
FilestoreRequestTlv::new_replace_file(first_name, second_name)
}
FilestoreActionCode::AppendFile => {
FilestoreRequestTlv::new_append_file(first_name, second_name)
}
FilestoreActionCode::RenameFile => {
FilestoreRequestTlv::new_rename_file(first_name, second_name)
}
_ => panic!("invalid action code"),
};
assert!(fs_request.is_ok());
let fs_request = fs_request.unwrap();
assert_eq!(
fs_request.len_value(),
1 + first_name.len_full() + second_name.len_full()
);
assert_eq!(fs_request.len_full(), fs_request.len_value() + 2);
assert_eq!(fs_request.action_code(), action_code);
assert_eq!(fs_request.first_name(), first_name);
assert!(fs_request.second_name().is_some());
assert_eq!(fs_request.second_name().unwrap(), second_name);
fs_request
}
#[test]
fn test_fs_request_basic_create_file() {
generic_fs_request_test_one_file(FilestoreActionCode::CreateFile);
}
#[test]
fn test_fs_request_basic_delete() {
generic_fs_request_test_one_file(FilestoreActionCode::DeleteFile);
}
#[test]
fn test_fs_request_basic_create_dir() {
generic_fs_request_test_one_file(FilestoreActionCode::CreateDirectory);
}
#[test]
fn test_fs_request_basic_remove_dir() {
generic_fs_request_test_one_file(FilestoreActionCode::RemoveDirectory);
}
#[test]
fn test_fs_request_basic_deny_file() {
generic_fs_request_test_one_file(FilestoreActionCode::DenyFile);
}
#[test]
fn test_fs_request_basic_deny_dir() {
generic_fs_request_test_one_file(FilestoreActionCode::DenyDirectory);
}
#[test]
fn test_fs_request_basic_append_file() {
generic_fs_request_test_two_files(FilestoreActionCode::AppendFile);
}
#[test]
fn test_fs_request_basic_rename_file() {
generic_fs_request_test_two_files(FilestoreActionCode::RenameFile);
}
#[test]
fn test_fs_request_basic_replace_file() {
generic_fs_request_test_two_files(FilestoreActionCode::ReplaceFile);
}
fn check_fs_request_first_part(
buf: &[u8],
action_code: FilestoreActionCode,
expected_val_len: u8,
) -> usize {
assert_eq!(buf[0], TlvType::FilestoreRequest as u8);
assert_eq!(buf[1], expected_val_len);
assert_eq!((buf[2] >> 4) & 0b1111, action_code as u8);
let lv = Lv::from_bytes(&buf[3..]);
assert!(lv.is_ok());
let lv = lv.unwrap();
assert_eq!(lv.value_as_str().unwrap().unwrap(), TLV_TEST_STR_0);
3 + lv.len_full()
}
#[test]
fn test_fs_request_serialization_one_file() {
let req = generic_fs_request_test_one_file(FilestoreActionCode::CreateFile);
let mut buf: [u8; 64] = [0; 64];
let res = req.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 3 + 1 + TLV_TEST_STR_0.len());
assert_eq!(written, req.len_full());
check_fs_request_first_part(
&buf,
FilestoreActionCode::CreateFile,
1 + 1 + TLV_TEST_STR_0.len() as u8,
);
}
#[test]
fn test_fs_request_deserialization_one_file() {
let req = generic_fs_request_test_one_file(FilestoreActionCode::CreateFile);
let mut buf: [u8; 64] = [0; 64];
let res = req.write_to_bytes(&mut buf);
assert!(res.is_ok());
let req_conv_back = FilestoreRequestTlv::from_bytes(&buf);
assert!(req_conv_back.is_ok());
let req_conv_back = req_conv_back.unwrap();
assert_eq!(req_conv_back, req);
}
#[test]
fn test_fs_request_serialization_two_files() {
let req = generic_fs_request_test_two_files(FilestoreActionCode::RenameFile);
let mut buf: [u8; 64] = [0; 64];
let res = req.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, req.len_full());
assert_eq!(
written,
3 + 1 + TLV_TEST_STR_0.len() + 1 + TLV_TEST_STR_1.len()
);
let current_idx = check_fs_request_first_part(
&buf,
FilestoreActionCode::RenameFile,
1 + 1 + TLV_TEST_STR_0.len() as u8 + 1 + TLV_TEST_STR_1.len() as u8,
);
let second_lv = Lv::from_bytes(&buf[current_idx..]);
assert!(second_lv.is_ok());
let second_lv = second_lv.unwrap();
assert_eq!(second_lv.value_as_str().unwrap().unwrap(), TLV_TEST_STR_1);
assert_eq!(current_idx + second_lv.len_full(), req.len_full());
}
#[test]
fn test_fs_request_deserialization_two_files() {
let req = generic_fs_request_test_two_files(FilestoreActionCode::RenameFile);
let mut buf: [u8; 64] = [0; 64];
req.write_to_bytes(&mut buf).unwrap();
let req_conv_back = FilestoreRequestTlv::from_bytes(&buf);
assert!(req_conv_back.is_ok());
let req_conv_back = req_conv_back.unwrap();
assert_eq!(req_conv_back, req);
}
}

View File

@ -1,72 +1,28 @@
//! Common definitions and helpers required to create PUS TMTC packets according to
//! [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/)
//!
//! You can find the PUS telecommand definitions in the [tc] module and ithe PUS telemetry definitions
//! inside the [tm] module.
use crate::{ByteConversionError, CcsdsPacket, CRC_CCITT_FALSE};
use crate::{ByteConversionError, CcsdsPacket, SizeMissmatch};
use core::fmt::{Debug, Display, Formatter};
use core::mem::size_of;
use num_enum::{IntoPrimitive, TryFromPrimitive};
use crc::{Crc, CRC_16_IBM_3740};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub mod event;
pub mod hk;
pub mod scheduling;
pub mod tc;
pub mod tm;
pub mod verification;
pub type CrcType = u16;
/// CRC algorithm used by the PUS standard.
pub const CRC_CCITT_FALSE: Crc<u16> = Crc::<u16>::new(&CRC_16_IBM_3740);
pub const CCSDS_HEADER_LEN: usize = size_of::<crate::zc::SpHeader>();
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
#[non_exhaustive]
pub enum PusServiceId {
/// Service 1
Verification = 1,
/// Service 2
DeviceAccess = 2,
/// Service 3
Housekeeping = 3,
/// Service 4
ParameterStatistics = 4,
/// Service 5
Event = 5,
/// Service 6
MemoryManagement = 6,
/// Service 8
Action = 8,
/// Service 9
TimeManagement = 9,
/// Service 11
Scheduling = 11,
/// Service 12
OnBoardMonitoring = 12,
/// Service 13
LargePacketTransfer = 13,
/// Service 14
RealTimeForwardingControl = 14,
/// Service 15
StorageAndRetrival = 15,
/// Service 17
Test = 17,
/// Service 18
OpsAndProcedures = 18,
/// Service 19
EventAction = 19,
/// Service 20
Parameter = 20,
/// Service 21
RequestSequencing = 21,
/// Service 22
PositionBasedScheduling = 22,
/// Service 23
FileManagement = 23,
}
/// All PUS versions. Only PUS C is supported by this library.
@ -92,7 +48,6 @@ impl TryFrom<u8> for PusVersion {
}
}
/// ECSS Packet Type Codes (PTC)s.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PacketTypeCodes {
@ -112,7 +67,6 @@ pub enum PacketTypeCodes {
pub type Ptc = PacketTypeCodes;
/// ECSS Packet Field Codes (PFC)s for the unsigned [Ptc].
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum UnsignedPfc {
@ -128,7 +82,6 @@ pub enum UnsignedPfc {
ThreeBits = 19,
}
/// ECSS Packet Field Codes (PFC)s for the real (floating point) [Ptc].
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum RealPfc {
@ -151,22 +104,23 @@ pub enum PusError {
NoRawData,
/// CRC16 needs to be calculated first
CrcCalculationMissing,
ByteConversion(ByteConversionError),
ByteConversionError(ByteConversionError),
}
impl Display for PusError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PusError::VersionNotSupported(v) => {
write!(f, "PUS version {v:?} not supported")
write!(f, "PUS version {:?} not supported", v)
}
PusError::IncorrectCrc(crc) => {
write!(f, "crc16 {crc:#04x} is incorrect")
write!(f, "crc16 {:#04x} is incorrect", crc)
}
PusError::RawDataTooShort(size) => {
write!(
f,
"deserialization error, provided raw data with size {size} too short"
"deserialization error, provided raw data with size {} too short",
size
)
}
PusError::NoRawData => {
@ -175,8 +129,8 @@ impl Display for PusError {
PusError::CrcCalculationMissing => {
write!(f, "crc16 was not calculated")
}
PusError::ByteConversion(e) => {
write!(f, "low level byte conversion error: {e}")
PusError::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {}", e)
}
}
}
@ -185,7 +139,7 @@ impl Display for PusError {
#[cfg(feature = "std")]
impl Error for PusError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
if let PusError::ByteConversion(e) = self {
if let PusError::ByteConversionError(e) = self {
return Some(e);
}
None
@ -194,19 +148,18 @@ impl Error for PusError {
impl From<ByteConversionError> for PusError {
fn from(e: ByteConversionError) -> Self {
PusError::ByteConversion(e)
PusError::ByteConversionError(e)
}
}
/// Generic trait to describe common attributes for both PUS Telecommands (TC) and PUS Telemetry
/// (TM) packets. All PUS packets are also a special type of [CcsdsPacket]s.
pub trait PusPacket: CcsdsPacket {
const PUS_VERSION: PusVersion = PusVersion::PusC;
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn user_data(&self) -> &[u8];
fn user_data(&self) -> Option<&[u8]>;
fn crc16(&self) -> Option<u16>;
}
@ -250,29 +203,21 @@ pub(crate) fn user_data_from_raw(
total_len: usize,
raw_data_len: usize,
slice: &[u8],
) -> Result<&[u8], PusError> {
) -> Result<Option<&[u8]>, PusError> {
match current_idx {
_ if current_idx == total_len - 2 => Ok(None),
_ if current_idx > total_len - 2 => Err(PusError::RawDataTooShort(raw_data_len)),
_ => Ok(&slice[current_idx..total_len - 2]),
_ => Ok(Some(&slice[current_idx..total_len - 2])),
}
}
pub(crate) fn verify_crc16_ccitt_false_from_raw_to_pus_error(
raw_data: &[u8],
crc16: u16,
) -> Result<(), PusError> {
verify_crc16_ccitt_false_from_raw(raw_data)
.then(|| ())
.ok_or(PusError::IncorrectCrc(crc16))
}
pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
pub(crate) fn verify_crc16_from_raw(raw_data: &[u8], crc16: u16) -> Result<(), PusError> {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(raw_data);
if digest.finalize() == 0 {
return true;
return Ok(());
}
false
Err(PusError::IncorrectCrc(crc16))
}
macro_rules! ccsds_impl {
@ -296,7 +241,6 @@ macro_rules! sp_header_impls {
}
}
use crate::util::{GenericUnsignedByteField, ToBeBytes, UnsignedEnum};
pub(crate) use ccsds_impl;
pub(crate) use sp_header_impls;
@ -304,17 +248,66 @@ pub(crate) use sp_header_impls;
/// and an unsigned value. The trait makes no assumptions about the actual type of the unsigned
/// value and only requires implementors to implement a function which writes the enumeration into
/// a raw byte format.
pub trait EcssEnumeration: UnsignedEnum {
pub trait EcssEnumeration {
/// Packet Format Code, which denotes the number of bits of the enumeration
fn pfc(&self) -> u8;
fn byte_width(&self) -> usize {
(self.pfc() / 8) as usize
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError>;
}
pub trait EcssEnumerationExt: EcssEnumeration + Debug + Copy + Clone + PartialEq + Eq {}
pub trait ToBeBytes {
type ByteArray: AsRef<[u8]>;
fn to_be_bytes(&self) -> Self::ByteArray;
}
impl ToBeBytes for () {
type ByteArray = [u8; 0];
fn to_be_bytes(&self) -> Self::ByteArray {
[]
}
}
impl ToBeBytes for u8 {
type ByteArray = [u8; 1];
fn to_be_bytes(&self) -> Self::ByteArray {
u8::to_be_bytes(*self)
}
}
impl ToBeBytes for u16 {
type ByteArray = [u8; 2];
fn to_be_bytes(&self) -> Self::ByteArray {
u16::to_be_bytes(*self)
}
}
impl ToBeBytes for u32 {
type ByteArray = [u8; 4];
fn to_be_bytes(&self) -> Self::ByteArray {
u32::to_be_bytes(*self)
}
}
impl ToBeBytes for u64 {
type ByteArray = [u8; 8];
fn to_be_bytes(&self) -> Self::ByteArray {
u64::to_be_bytes(*self)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericEcssEnumWrapper<TYPE> {
field: GenericUnsignedByteField<TYPE>,
val: TYPE,
}
impl<TYPE> GenericEcssEnumWrapper<TYPE> {
@ -323,19 +316,7 @@ impl<TYPE> GenericEcssEnumWrapper<TYPE> {
}
pub fn new(val: TYPE) -> Self {
Self {
field: GenericUnsignedByteField::new(val),
}
}
}
impl<TYPE: ToBeBytes> UnsignedEnum for GenericEcssEnumWrapper<TYPE> {
fn size(&self) -> usize {
(self.pfc() / 8) as usize
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.field.write_to_be_bytes(buf)
Self { val }
}
}
@ -343,6 +324,17 @@ impl<TYPE: ToBeBytes> EcssEnumeration for GenericEcssEnumWrapper<TYPE> {
fn pfc(&self) -> u8 {
size_of::<TYPE>() as u8 * 8_u8
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError> {
if buf.len() < self.byte_width() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.byte_width(),
}));
}
buf[0..self.byte_width()].copy_from_slice(self.val.to_be_bytes().as_ref());
Ok(())
}
}
impl<TYPE: Debug + Copy + Clone + PartialEq + Eq + ToBeBytes> EcssEnumerationExt
@ -355,17 +347,9 @@ pub type EcssEnumU16 = GenericEcssEnumWrapper<u16>;
pub type EcssEnumU32 = GenericEcssEnumWrapper<u32>;
pub type EcssEnumU64 = GenericEcssEnumWrapper<u64>;
/// Generic trait for PUS packet abstractions which can written to a raw slice as their raw
/// byte representation. This is especially useful for generic abstractions which depend only
/// on the serialization of those packets.
pub trait SerializablePusPacket {
fn len_packed(&self) -> usize;
fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError>;
}
#[cfg(test)]
mod tests {
use crate::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, UnsignedEnum};
use crate::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, EcssEnumeration};
use crate::ByteConversionError;
#[test]

View File

@ -1,43 +0,0 @@
//! PUS Service 5 Events
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
TmInfoReport = 1,
TmLowSeverityReport = 2,
TmMediumSeverityReport = 3,
TmHighSeverityReport = 4,
TcEnableEventGeneration = 5,
TcDisableEventGeneration = 6,
TcReportDisabledList = 7,
TmDisabledEventsReport = 8,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_conv_into_u8() {
let subservice: u8 = Subservice::TmLowSeverityReport.into();
assert_eq!(subservice, 2);
}
#[test]
fn test_conv_from_u8() {
let subservice: Subservice = 2.try_into().unwrap();
assert_eq!(subservice, Subservice::TmLowSeverityReport);
}
#[test]
fn test_conv_fails() {
let conversion = Subservice::try_from(9);
assert!(conversion.is_err());
let err = conversion.unwrap_err();
assert_eq!(err.number, 9);
}
}

View File

@ -1,31 +0,0 @@
//! PUS Service 3 Housekeeping
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
// Regular HK
TcCreateHkReportStructure = 1,
TcDeleteHkReportStructures = 3,
TcEnableHkGeneration = 5,
TcDisableHkGeneration = 6,
TcReportHkReportStructures = 9,
TmHkPacket = 25,
TcGenerateOneShotHk = 27,
TcModifyHkCollectionInterval = 31,
// Diagnostics HK
TcCreateDiagReportStructure = 2,
TcDeleteDiagReportStructures = 4,
TcEnableDiagGeneration = 7,
TcDisableDiagGeneration = 8,
TmHkStructuresReport = 10,
TcReportDiagReportStructures = 11,
TmDiagStructuresReport = 12,
TmDiagPacket = 26,
TcGenerateOneShotDiag = 28,
TcModifyDiagCollectionInterval = 32,
}

View File

@ -1,105 +0,0 @@
//! PUS Service 11 Scheduling
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, PartialEq, Eq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
// Core subservices
TcEnableScheduling = 1,
TcDisableScheduling = 2,
TcResetScheduling = 3,
TcInsertActivity = 4,
TcDeleteActivityByRequestId = 5,
TcDeleteActivitiesByFilter = 6,
// Time shift subservices
TcTimeShiftActivityWithRequestId = 7,
TcTimeShiftActivitiesByFilter = 8,
TcTimeShiftAll = 15,
// Reporting subservices
TcDetailReportByRequestId = 9,
TmDetailReport = 10,
TcDetailReportByFilter = 11,
TcSummaryReportByRequestId = 12,
TmSummaryReport = 13,
TcSummaryReportByFilter = 14,
TcDetailReportAll = 16,
TcSummaryReportAll = 17,
// Subschedule subservices
TcReportSubscheduleStatus = 18,
TmReportSubscheduleStatus = 19,
TcEnableSubschedule = 20,
TcDisableSubschedule = 21,
// Group subservices
TcCreateScheduleGroup = 22,
TcDeleteScheduleGroup = 23,
TcEnableScheduleGroup = 24,
TcDisableScheduleGroup = 25,
TcReportAllGroupsStatus = 26,
TmReportAllGroupsStatus = 27,
}
/// This status applies to sub-schedules and groups as well as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum SchedStatus {
Disabled = 0,
Enabled = 1,
}
impl From<bool> for SchedStatus {
fn from(value: bool) -> Self {
if value {
SchedStatus::Enabled
} else {
SchedStatus::Disabled
}
}
}
/// Time window types as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TimeWindowType {
SelectAll = 0,
TimeTagToTimeTag = 1,
FromTimeTag = 2,
ToTimeTag = 3,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_bool_conv_0() {
let enabled = true;
let status: SchedStatus = enabled.into();
assert_eq!(status, SchedStatus::Enabled)
}
#[test]
fn test_bool_conv_1() {
let enabled = false;
let status: SchedStatus = enabled.into();
assert_eq!(status, SchedStatus::Disabled)
}
#[test]
fn test_conv_into_u8() {
let subservice: u8 = Subservice::TcCreateScheduleGroup.into();
assert_eq!(subservice, 22);
}
#[test]
fn test_conv_from_u8() {
let subservice: Subservice = 22u8.try_into().unwrap();
assert_eq!(subservice, Subservice::TcCreateScheduleGroup);
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,35 +0,0 @@
//! PUS Service 1 Verification
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
TmAcceptanceSuccess = 1,
TmAcceptanceFailure = 2,
TmStartSuccess = 3,
TmStartFailure = 4,
TmStepSuccess = 5,
TmStepFailure = 6,
TmCompletionSuccess = 7,
TmCompletionFailure = 8,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_conv_into_u8() {
let subservice: u8 = Subservice::TmCompletionSuccess.into();
assert_eq!(subservice, 7);
}
#[test]
fn test_conv_from_u8() {
let subservice: Subservice = 7.try_into().unwrap();
assert_eq!(subservice, Subservice::TmCompletionSuccess);
}
}

View File

@ -7,15 +7,11 @@
//!
//! - Space Packet implementation according to
//! [CCSDS Blue Book 133.0-B-2](https://public.ccsds.org/Pubs/133x0b2e1.pdf)
//! - CCSDS File Delivery Protocol (CFDP) packet implementations according to
//! [CCSDS Blue Book 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf)
//! - PUS Telecommand and PUS Telemetry implementation according to the
//! [ECSS-E-ST-70-41C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
//! - CUC (CCSDS Unsegmented Time Code) implementation according to
//! [CCSDS 301.0-B-4 3.2](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
//! - CDS (CCSDS Day Segmented Time Code) implementation according to
//! [CCSDS 301.0-B-4 3.3](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
//! - Some helper types to support ASCII timecodes as specified in
//! - CDS Short Time Code implementation according to
//! [CCSDS CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
//! - Some helper types and functions to support ASCII timecodes ad specified in
//! [CCSDS 301.0-B-4 3.5](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
//!
//! ## Features
@ -62,28 +58,23 @@ extern crate alloc;
extern crate std;
use crate::ecss::CCSDS_HEADER_LEN;
use core::fmt::{Debug, Display, Formatter};
use crc::{Crc, CRC_16_IBM_3740};
use core::fmt::{Display, Formatter};
use delegate::delegate;
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub mod cfdp;
pub mod ecss;
pub mod tc;
pub mod time;
pub mod util;
pub mod tm;
mod private {
pub trait Sealed {}
}
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard.
pub const CRC_CCITT_FALSE: Crc<u16> = Crc::<u16>::new(&CRC_16_IBM_3740);
pub const MAX_APID: u16 = 2u16.pow(11) - 1;
pub const MAX_SEQ_COUNT: u16 = 2u16.pow(14) - 1;
@ -136,7 +127,6 @@ impl Display for ByteConversionError {
#[cfg(feature = "std")]
impl Error for ByteConversionError {}
/// CCSDS packet type enumeration.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PacketType {
@ -185,8 +175,6 @@ impl TryFrom<u8> for SequenceFlags {
}
}
/// Abstraction for the CCSDS Packet ID, which forms the last thirteen bits
/// of the first two bytes in the CCSDS primary header.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PacketId {
@ -270,8 +258,6 @@ impl From<u16> for PacketId {
}
}
/// Abstraction for the CCSDS Packet Sequence Control (PSC) field which is the
/// third and the fourth byte in the CCSDS primary header.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PacketSequenceCtrl {

735
src/tc.rs Normal file
View File

@ -0,0 +1,735 @@
//! This module contains all components required to create a ECSS PUS C telecommand packets according
//! to [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
//!
//! # Examples
//!
//! ```rust
//! use spacepackets::{CcsdsPacket, SpHeader};
//! use spacepackets::tc::{PusTc, PusTcSecondaryHeader};
//! use spacepackets::ecss::PusPacket;
//!
//! // Create a ping telecommand with no user application data
//! let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
//! let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
//! let pus_tc = PusTc::new(&mut sph, tc_header, None, true);
//! println!("{:?}", pus_tc);
//! assert_eq!(pus_tc.service(), 17);
//! assert_eq!(pus_tc.subservice(), 1);
//! assert_eq!(pus_tc.apid(), 0x02);
//!
//! // Serialize TC into a raw buffer
//! let mut test_buf: [u8; 32] = [0; 32];
//! let size = pus_tc
//! .write_to_bytes(test_buf.as_mut_slice())
//! .expect("Error writing TC to buffer");
//! assert_eq!(size, 13);
//! println!("{:?}", &test_buf[0..size]);
//!
//! // Deserialize from the raw byte representation
//! let pus_tc_deserialized = PusTc::from_bytes(&test_buf).expect("Deserialization failed");
//! assert_eq!(pus_tc.service(), 17);
//! assert_eq!(pus_tc.subservice(), 1);
//! assert_eq!(pus_tc.apid(), 0x02);
//! ```
use crate::ecss::{
ccsds_impl, crc_from_raw_data, crc_procedure, sp_header_impls, user_data_from_raw,
verify_crc16_from_raw, CrcType, PusError, PusPacket, PusVersion, CRC_CCITT_FALSE,
};
use crate::SpHeader;
use crate::{
ByteConversionError, CcsdsPacket, PacketType, SequenceFlags, SizeMissmatch, CCSDS_HEADER_LEN,
};
use core::mem::size_of;
use delegate::delegate;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zerocopy::AsBytes;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
/// PUS C secondary header length is fixed
pub const PUC_TC_SECONDARY_HEADER_LEN: usize = size_of::<zc::PusTcSecondaryHeader>();
pub const PUS_TC_MIN_LEN_WITHOUT_APP_DATA: usize =
CCSDS_HEADER_LEN + PUC_TC_SECONDARY_HEADER_LEN + size_of::<CrcType>();
const PUS_VERSION: PusVersion = PusVersion::PusC;
#[derive(Copy, Clone, PartialEq, Debug)]
enum AckOpts {
Acceptance = 0b1000,
Start = 0b0100,
Progress = 0b0010,
Completion = 0b0001,
}
pub const ACK_ALL: u8 = AckOpts::Acceptance as u8
| AckOpts::Start as u8
| AckOpts::Progress as u8
| AckOpts::Completion as u8;
pub trait GenericPusTcSecondaryHeader {
fn pus_version(&self) -> PusVersion;
fn ack_flags(&self) -> u8;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn source_id(&self) -> u16;
}
pub mod zc {
use crate::ecss::{PusError, PusVersion};
use crate::tc::GenericPusTcSecondaryHeader;
use zerocopy::{AsBytes, FromBytes, NetworkEndian, Unaligned, U16};
#[derive(FromBytes, AsBytes, Unaligned)]
#[repr(C)]
pub struct PusTcSecondaryHeader {
version_ack: u8,
service: u8,
subservice: u8,
source_id: U16<NetworkEndian>,
}
impl TryFrom<crate::tc::PusTcSecondaryHeader> for PusTcSecondaryHeader {
type Error = PusError;
fn try_from(value: crate::tc::PusTcSecondaryHeader) -> Result<Self, Self::Error> {
if value.version != PusVersion::PusC {
return Err(PusError::VersionNotSupported(value.version));
}
Ok(PusTcSecondaryHeader {
version_ack: ((value.version as u8) << 4) | value.ack,
service: value.service,
subservice: value.subservice,
source_id: U16::from(value.source_id),
})
}
}
impl GenericPusTcSecondaryHeader for PusTcSecondaryHeader {
fn pus_version(&self) -> PusVersion {
PusVersion::try_from(self.version_ack >> 4 & 0b1111).unwrap_or(PusVersion::Invalid)
}
fn ack_flags(&self) -> u8 {
self.version_ack & 0b1111
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn source_id(&self) -> u16 {
self.source_id.get()
}
}
impl PusTcSecondaryHeader {
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Option<()> {
self.write_to(slice)
}
pub fn from_bytes(slice: &[u8]) -> Option<Self> {
Self::read_from(slice)
}
}
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTcSecondaryHeader {
pub service: u8,
pub subservice: u8,
pub source_id: u16,
pub ack: u8,
pub version: PusVersion,
}
impl GenericPusTcSecondaryHeader for PusTcSecondaryHeader {
fn pus_version(&self) -> PusVersion {
self.version
}
fn ack_flags(&self) -> u8 {
self.ack
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn source_id(&self) -> u16 {
self.source_id
}
}
impl TryFrom<zc::PusTcSecondaryHeader> for PusTcSecondaryHeader {
type Error = ();
fn try_from(value: zc::PusTcSecondaryHeader) -> Result<Self, Self::Error> {
Ok(PusTcSecondaryHeader {
service: value.service(),
subservice: value.subservice(),
source_id: value.source_id(),
ack: value.ack_flags(),
version: PUS_VERSION,
})
}
}
impl PusTcSecondaryHeader {
pub fn new_simple(service: u8, subservice: u8) -> Self {
PusTcSecondaryHeader {
service,
subservice,
ack: ACK_ALL,
source_id: 0,
version: PusVersion::PusC,
}
}
pub fn new(service: u8, subservice: u8, ack: u8, source_id: u16) -> Self {
PusTcSecondaryHeader {
service,
subservice,
ack: ack & 0b1111,
source_id,
version: PusVersion::PusC,
}
}
}
/// This class models a PUS telecommand. It is the primary data structure to generate the raw byte
/// representation of a PUS telecommand or to deserialize from one from raw bytes.
///
/// This class also derives the [serde::Serialize] and [serde::Deserialize] trait if the
/// [serde] feature is used, which allows to send around TC packets in a raw byte format using a
/// serde provider like [postcard](https://docs.rs/postcard/latest/postcard/).
///
/// There is no spare bytes support yet.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTc<'slice> {
sp_header: SpHeader,
pub sec_header: PusTcSecondaryHeader,
/// If this is set to false, a manual call to [PusTc::calc_own_crc16] or
/// [PusTc::update_packet_fields] is necessary for the serialized or cached CRC16 to be valid.
pub calc_crc_on_serialization: bool,
#[cfg_attr(feature = "serde", serde(skip))]
raw_data: Option<&'slice [u8]>,
app_data: Option<&'slice [u8]>,
crc16: Option<u16>,
}
impl<'slice> PusTc<'slice> {
/// Generates a new struct instance.
///
/// # Arguments
///
/// * `sp_header` - Space packet header information. The correct packet type will be set
/// automatically
/// * `sec_header` - Information contained in the data field header, including the service
/// and subservice type
/// * `app_data` - Custom application data
/// * `set_ccsds_len` - Can be used to automatically update the CCSDS space packet data length
/// field. If this is not set to true, [PusTc::update_ccsds_data_len] can be called to set
/// the correct value to this field manually
pub fn new(
sp_header: &mut SpHeader,
sec_header: PusTcSecondaryHeader,
app_data: Option<&'slice [u8]>,
set_ccsds_len: bool,
) -> Self {
sp_header.set_packet_type(PacketType::Tc);
sp_header.set_sec_header_flag();
let mut pus_tc = PusTc {
sp_header: *sp_header,
raw_data: None,
app_data,
sec_header,
calc_crc_on_serialization: true,
crc16: None,
};
if set_ccsds_len {
pus_tc.update_ccsds_data_len();
}
pus_tc
}
/// Simplified version of the [PusTc::new] function which allows to only specify service and
/// subservice instead of the full PUS TC secondary header.
pub fn new_simple(
sph: &mut SpHeader,
service: u8,
subservice: u8,
app_data: Option<&'slice [u8]>,
set_ccsds_len: bool,
) -> Self {
Self::new(
sph,
PusTcSecondaryHeader::new(service, subservice, ACK_ALL, 0),
app_data,
set_ccsds_len,
)
}
pub fn len_packed(&self) -> usize {
let mut length = PUS_TC_MIN_LEN_WITHOUT_APP_DATA;
if let Some(app_data) = self.app_data {
length += app_data.len();
}
length
}
pub fn set_ack_field(&mut self, ack: u8) -> bool {
if ack > 0b1111 {
return false;
}
self.sec_header.ack = ack & 0b1111;
true
}
pub fn set_source_id(&mut self, source_id: u16) {
self.sec_header.source_id = source_id;
}
sp_header_impls!();
/// Calculate the CCSDS space packet data length field and sets it
/// This is called automatically if the `set_ccsds_len` argument in the [PusTc::new] call was
/// used.
/// If this was not done or the application data is set or changed after construction,
/// this function needs to be called to ensure that the data length field of the CCSDS header
/// is set correctly.
pub fn update_ccsds_data_len(&mut self) {
self.sp_header.data_len =
self.len_packed() as u16 - size_of::<crate::zc::SpHeader>() as u16 - 1;
}
/// This function should be called before the TC packet is serialized if
/// [PusTc::calc_crc_on_serialization] is set to False. It will calculate and cache the CRC16.
pub fn calc_own_crc16(&mut self) {
let mut digest = CRC_CCITT_FALSE.digest();
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
digest.update(sph_zc.as_bytes());
let pus_tc_header = zc::PusTcSecondaryHeader::try_from(self.sec_header).unwrap();
digest.update(pus_tc_header.as_bytes());
if let Some(app_data) = self.app_data {
digest.update(app_data);
}
self.crc16 = Some(digest.finalize())
}
/// This helper function calls both [PusTc::update_ccsds_data_len] and [PusTc::calc_own_crc16].
pub fn update_packet_fields(&mut self) {
self.update_ccsds_data_len();
self.calc_own_crc16();
}
/// Write the raw PUS byte representation to a provided buffer.
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = 0;
let tc_header_len = size_of::<zc::PusTcSecondaryHeader>();
let total_size = self.len_packed();
if total_size > slice.len() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: slice.len(),
expected: total_size,
})
.into());
}
self.sp_header.write_to_be_bytes(slice)?;
curr_idx += CCSDS_HEADER_LEN;
let sec_header = zc::PusTcSecondaryHeader::try_from(self.sec_header).unwrap();
sec_header
.write_to_bytes(&mut slice[curr_idx..curr_idx + tc_header_len])
.ok_or(ByteConversionError::ZeroCopyToError)?;
curr_idx += tc_header_len;
if let Some(app_data) = self.app_data {
slice[curr_idx..curr_idx + app_data.len()].copy_from_slice(app_data);
curr_idx += app_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
0,
curr_idx,
slice,
)?;
slice[curr_idx..curr_idx + 2].copy_from_slice(crc16.to_be_bytes().as_slice());
curr_idx += 2;
Ok(curr_idx)
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn append_to_vec(&self, vec: &mut Vec<u8>) -> Result<usize, PusError> {
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
let mut appended_len = PUS_TC_MIN_LEN_WITHOUT_APP_DATA;
if let Some(app_data) = self.app_data {
appended_len += app_data.len();
};
let start_idx = vec.len();
let mut ser_len = 0;
vec.extend_from_slice(sph_zc.as_bytes());
ser_len += sph_zc.as_bytes().len();
// The PUS version is hardcoded to PUS C
let pus_tc_header = zc::PusTcSecondaryHeader::try_from(self.sec_header).unwrap();
vec.extend_from_slice(pus_tc_header.as_bytes());
ser_len += pus_tc_header.as_bytes().len();
if let Some(app_data) = self.app_data {
vec.extend_from_slice(app_data);
ser_len += app_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
start_idx,
ser_len,
&vec[start_idx..ser_len],
)?;
vec.extend_from_slice(crc16.to_be_bytes().as_slice());
Ok(appended_len)
}
/// Create a [PusTc] instance from a raw slice. On success, it returns a tuple containing
/// the instance and the found byte length of the packet.
pub fn from_bytes(slice: &'slice [u8]) -> Result<(Self, usize), PusError> {
let raw_data_len = slice.len();
if raw_data_len < PUS_TC_MIN_LEN_WITHOUT_APP_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let mut current_idx = 0;
let (sp_header, _) = SpHeader::from_be_bytes(&slice[0..CCSDS_HEADER_LEN])?;
current_idx += CCSDS_HEADER_LEN;
let total_len = sp_header.total_len();
if raw_data_len < total_len || total_len < PUS_TC_MIN_LEN_WITHOUT_APP_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let sec_header = zc::PusTcSecondaryHeader::from_bytes(
&slice[current_idx..current_idx + PUC_TC_SECONDARY_HEADER_LEN],
)
.ok_or(ByteConversionError::ZeroCopyFromError)?;
current_idx += PUC_TC_SECONDARY_HEADER_LEN;
let raw_data = &slice[0..total_len];
let pus_tc = PusTc {
sp_header,
sec_header: PusTcSecondaryHeader::try_from(sec_header).unwrap(),
raw_data: Some(raw_data),
app_data: user_data_from_raw(current_idx, total_len, raw_data_len, slice)?,
calc_crc_on_serialization: false,
crc16: Some(crc_from_raw_data(raw_data)?),
};
verify_crc16_from_raw(raw_data, pus_tc.crc16.expect("CRC16 invalid"))?;
Ok((pus_tc, total_len))
}
pub fn raw(&self) -> Option<&'slice [u8]> {
self.raw_data
}
}
//noinspection RsTraitImplementation
impl CcsdsPacket for PusTc<'_> {
ccsds_impl!();
}
//noinspection RsTraitImplementation
impl PusPacket for PusTc<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
});
fn user_data(&self) -> Option<&[u8]> {
self.app_data
}
fn crc16(&self) -> Option<u16> {
self.crc16
}
}
//noinspection RsTraitImplementation
impl GenericPusTcSecondaryHeader for PusTc<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn source_id(&self) -> u16;
fn ack_flags(&self) -> u8;
});
}
#[cfg(all(test, feature = "std"))]
mod tests {
use crate::ecss::PusVersion::PusC;
use crate::ecss::{PusError, PusPacket};
use crate::tc::ACK_ALL;
use crate::tc::{GenericPusTcSecondaryHeader, PusTc, PusTcSecondaryHeader};
use crate::{ByteConversionError, SpHeader};
use crate::{CcsdsPacket, SequenceFlags};
use alloc::vec::Vec;
fn base_ping_tc_full_ctor() -> PusTc<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
PusTc::new(&mut sph, tc_header, None, true)
}
fn base_ping_tc_simple_ctor() -> PusTc<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
PusTc::new_simple(&mut sph, 17, 1, None, true)
}
fn base_ping_tc_simple_ctor_with_app_data(app_data: &'static [u8]) -> PusTc<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
PusTc::new_simple(&mut sph, 17, 1, Some(app_data), true)
}
#[test]
fn test_tc_fields() {
let pus_tc = base_ping_tc_full_ctor();
assert_eq!(pus_tc.crc16(), None);
verify_test_tc(&pus_tc, false, 13);
}
#[test]
fn test_serialization() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(size, 13);
}
#[test]
fn test_deserialization() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(size, 13);
let (tc_from_raw, size) =
PusTc::from_bytes(&test_buf).expect("Creating PUS TC struct from raw buffer failed");
assert_eq!(size, 13);
verify_test_tc(&tc_from_raw, false, 13);
assert!(tc_from_raw.user_data().is_none());
verify_test_tc_raw(&test_buf);
verify_crc_no_app_data(&test_buf);
}
#[test]
fn test_update_func() {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
let mut tc = PusTc::new_simple(&mut sph, 17, 1, None, false);
tc.calc_crc_on_serialization = false;
assert_eq!(tc.data_len(), 0);
tc.update_packet_fields();
assert_eq!(tc.data_len(), 6);
}
#[test]
fn test_deserialization_with_app_data() {
let pus_tc = base_ping_tc_simple_ctor_with_app_data(&[1, 2, 3]);
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(size, 16);
let (tc_from_raw, size) =
PusTc::from_bytes(&test_buf).expect("Creating PUS TC struct from raw buffer failed");
assert_eq!(size, 16);
verify_test_tc(&tc_from_raw, true, 16);
let user_data = tc_from_raw.user_data().unwrap();
assert_eq!(user_data[0], 1);
assert_eq!(user_data[1], 2);
assert_eq!(user_data[2], 3);
}
#[test]
fn test_vec_ser_deser() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_vec = Vec::new();
let size = pus_tc
.append_to_vec(&mut test_vec)
.expect("Error writing TC to vector");
assert_eq!(size, 13);
verify_test_tc_raw(&test_vec.as_slice());
verify_crc_no_app_data(&test_vec.as_slice());
}
#[test]
fn test_incorrect_crc() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
test_buf[12] = 0;
let res = PusTc::from_bytes(&test_buf);
assert!(res.is_err());
let err = res.unwrap_err();
assert!(matches!(err, PusError::IncorrectCrc { .. }));
}
#[test]
fn test_manual_crc_calculation() {
let mut pus_tc = base_ping_tc_simple_ctor();
pus_tc.calc_crc_on_serialization = false;
let mut test_buf: [u8; 32] = [0; 32];
pus_tc.calc_own_crc16();
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
verify_test_tc_raw(&test_buf);
verify_crc_no_app_data(&test_buf);
}
#[test]
fn test_manual_crc_calculation_no_calc_call() {
let mut pus_tc = base_ping_tc_simple_ctor();
pus_tc.calc_crc_on_serialization = false;
let mut test_buf: [u8; 32] = [0; 32];
let res = pus_tc.write_to_bytes(test_buf.as_mut_slice());
assert!(res.is_err());
let err = res.unwrap_err();
assert!(matches!(err, PusError::CrcCalculationMissing { .. }));
}
#[test]
fn test_with_application_data_vec() {
let pus_tc = base_ping_tc_simple_ctor_with_app_data(&[1, 2, 3]);
verify_test_tc(&pus_tc, true, 16);
let mut test_vec = Vec::new();
let size = pus_tc
.append_to_vec(&mut test_vec)
.expect("Error writing TC to vector");
assert_eq!(test_vec[11], 1);
assert_eq!(test_vec[12], 2);
assert_eq!(test_vec[13], 3);
assert_eq!(size, 16);
}
#[test]
fn test_write_buf_too_small() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf = [0; 12];
let res = pus_tc.write_to_bytes(test_buf.as_mut_slice());
assert!(res.is_err());
let err = res.unwrap_err();
match err {
PusError::ByteConversionError(err) => match err {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.expected, pus_tc.len_packed());
assert_eq!(missmatch.found, 12);
}
_ => panic!("Unexpected error"),
},
_ => panic!("Unexpected error"),
}
}
#[test]
fn test_with_application_data_buf() {
let pus_tc = base_ping_tc_simple_ctor_with_app_data(&[1, 2, 3]);
verify_test_tc(&pus_tc, true, 16);
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(test_buf[11], 1);
assert_eq!(test_buf[12], 2);
assert_eq!(test_buf[13], 3);
assert_eq!(size, 16);
}
#[test]
fn test_custom_setters() {
let mut pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
pus_tc.set_apid(0x7ff);
pus_tc.set_seq_count(0x3fff);
pus_tc.set_ack_field(0b11);
pus_tc.set_source_id(0xffff);
pus_tc.set_seq_flags(SequenceFlags::Unsegmented);
assert_eq!(pus_tc.source_id(), 0xffff);
assert_eq!(pus_tc.seq_count(), 0x3fff);
assert_eq!(pus_tc.ack_flags(), 0b11);
assert_eq!(pus_tc.apid(), 0x7ff);
assert_eq!(pus_tc.sequence_flags(), SequenceFlags::Unsegmented);
pus_tc.calc_own_crc16();
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(test_buf[0], 0x1f);
assert_eq!(test_buf[1], 0xff);
assert_eq!(test_buf[2], 0xff);
assert_eq!(test_buf[3], 0xff);
assert_eq!(test_buf[6], 0x23);
// Source ID 0
assert_eq!(test_buf[9], 0xff);
assert_eq!(test_buf[10], 0xff);
}
fn verify_test_tc(tc: &PusTc, has_user_data: bool, exp_full_len: usize) {
assert_eq!(PusPacket::service(tc), 17);
assert_eq!(PusPacket::subservice(tc), 1);
assert!(tc.sec_header_flag());
assert_eq!(PusPacket::pus_version(tc), PusC);
if !has_user_data {
assert_eq!(tc.user_data(), None);
}
assert_eq!(tc.seq_count(), 0x34);
assert_eq!(tc.source_id(), 0);
assert_eq!(tc.apid(), 0x02);
assert_eq!(tc.ack_flags(), ACK_ALL);
assert_eq!(tc.len_packed(), exp_full_len);
let mut comp_header = SpHeader::tc_unseg(0x02, 0x34, exp_full_len as u16 - 7).unwrap();
comp_header.set_sec_header_flag();
assert_eq!(tc.sp_header, comp_header);
}
fn verify_test_tc_raw(slice: &impl AsRef<[u8]>) {
// Reference comparison implementation:
// https://github.com/us-irs/py-spacepackets/blob/v0.13.0/tests/ecss/test_pus_tc.py
let slice = slice.as_ref();
// 0x1801 is the generic
assert_eq!(slice[0], 0x18);
// APID is 0x01
assert_eq!(slice[1], 0x02);
// Unsegmented packets
assert_eq!(slice[2], 0xc0);
// Sequence count 0x34
assert_eq!(slice[3], 0x34);
assert_eq!(slice[4], 0x00);
// Space data length of 6 equals total packet length of 13
assert_eq!(slice[5], 0x06);
// PUS Version C 0b0010 and ACK flags 0b1111
assert_eq!(slice[6], 0x2f);
// Service 17
assert_eq!(slice[7], 0x11);
// Subservice 1
assert_eq!(slice[8], 0x01);
// Source ID 0
assert_eq!(slice[9], 0x00);
assert_eq!(slice[10], 0x00);
}
fn verify_crc_no_app_data(slice: &impl AsRef<[u8]>) {
// Reference comparison implementation:
// https://github.com/us-irs/py-spacepackets/blob/v0.13.0/tests/ecss/test_pus_tc.py
let slice = slice.as_ref();
assert_eq!(slice[11], 0xee);
assert_eq!(slice[12], 0x63);
}
}

View File

@ -3,10 +3,8 @@
//! See [chrono::DateTime::format] for a usage example of the generated
//! [chrono::format::DelayedFormat] structs.
#[cfg(feature = "alloc")]
use chrono::{
format::{DelayedFormat, StrftimeItems},
DateTime, Utc,
};
use chrono::format::{DelayedFormat, StrftimeItems};
use chrono::{DateTime, Utc};
/// Tuple of format string and formatted size for time code A.
///

File diff suppressed because it is too large Load Diff

View File

@ -3,15 +3,9 @@
//!
//! The core data structure to do this is the [TimeProviderCcsdsEpoch] struct.
use super::*;
use chrono::Datelike;
use core::fmt::Debug;
use core::ops::{Add, AddAssign};
use core::time::Duration;
const MIN_CUC_LEN: usize = 2;
/// Base value for the preamble field for a time field parser to determine the time field type.
pub const P_FIELD_BASE: u8 = (CcsdsTimeCodes::CucCcsdsEpoch as u8) << 4;
/// Maximum length if the preamble field is not extended.
pub const MAX_CUC_LEN_SMALL_PREAMBLE: usize = 8;
@ -45,14 +39,12 @@ impl TryFrom<u8> for FractionalResolution {
/// Please note that this function will panic if the fractional value is not smaller than
/// the maximum number of fractions allowed for the particular resolution.
/// (e.g. passing 270 when the resolution only allows 255 values).
#[inline]
pub fn convert_fractional_part_to_ns(fractional_part: FractionalPart) -> u64 {
let div = fractional_res_to_div(fractional_part.0);
assert!(fractional_part.1 < div);
10_u64.pow(9) * fractional_part.1 as u64 / div as u64
}
#[inline(always)]
pub const fn fractional_res_to_div(res: FractionalResolution) -> u32 {
2_u32.pow(8 * res as u32) - 1
}
@ -72,20 +64,10 @@ pub fn fractional_part_from_subsec_ns(
if ns > sec_as_ns {
panic!("passed nanosecond value larger than 1 second");
}
let resolution = fractional_res_to_div(res) as u64;
// Use integer division because this can reduce code size of really small systems.
// First determine the nanoseconds for the smallest segment given the resolution.
// Then divide by that to find out the fractional part. For the calculation of the smallest
// fraction, we perform a ceiling division. This is because if we would use the default
// flooring division, we would divide by a smaller value, thereby allowing the calculation to
// invalid fractional parts which are too large. For the division of the nanoseconds by the
// smallest fraction, a flooring division is correct.
// The multiplication with 100000 is necessary to avoid precision loss during integer division.
// TODO: Floating point division might actually be faster option, but requires additional
// code on small embedded systems..
let fractional_part = ns * 100000 / ((sec_as_ns * 100000 + resolution) / resolution);
// Floating point division.
//let fractional_part = (ns as f64 / ((sec_as_ns as f64) / resolution as f64)).floor() as u32;
// Then divide by that to find out the fractional part. An integer division floors
// which is what we want here.
let fractional_part = ns / (sec_as_ns / fractional_res_to_div(res) as u64);
Some(FractionalPart(res, fractional_part as u32))
}
@ -94,7 +76,6 @@ pub fn fractional_part_from_subsec_ns(
pub enum CucError {
InvalidCounterWidth(u8),
InvalidFractionResolution(FractionalResolution),
/// Invalid counter supplied.
InvalidCounter(u8, u64),
InvalidFractions(FractionalResolution, u64),
}
@ -103,16 +84,16 @@ impl Display for CucError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
CucError::InvalidCounterWidth(w) => {
write!(f, "invalid cuc counter byte width {w}")
write!(f, "invalid cuc counter byte width {}", w)
}
CucError::InvalidFractionResolution(w) => {
write!(f, "invalid cuc fractional part byte width {w:?}")
write!(f, "invalid cuc fractional part byte width {:?}", w)
}
CucError::InvalidCounter(w, c) => {
write!(f, "invalid cuc counter {c} for width {w}")
write!(f, "invalid cuc counter {} for width {}", c, w)
}
CucError::InvalidFractions(w, c) => {
write!(f, "invalid cuc fractional part {c} for width {w:?}")
write!(f, "invalid cuc fractional part {} for width {:?}", c, w)
}
}
}
@ -134,8 +115,8 @@ pub struct FractionalPart(FractionalResolution, u32);
/// It has the capability to generate and read timestamps as specified in the CCSDS 301.0-B-4
/// section 3.2 . The preamble field only has one byte, which allows a time code representation
/// through the year 2094. The time is represented as a simple binary counter starting from the
/// fixed CCSDS epoch (1958-01-01T00:00:00+00:00). It is possible to provide subsecond accuracy
/// using the fractional field with various available [resolutions][FractionalResolution].
/// fixed CCSDS epoch (1958-01-01 00:00:00). It is possible to provide subsecond accuracy using the
/// fractional field with various available [resolutions][FractionalResolution].
///
/// Having a preamble field of one byte limits the width of the counter
/// type (generally seconds) to 4 bytes and the width of the fractions type to 3 bytes. This limits
@ -234,26 +215,24 @@ impl TimeProviderCcsdsEpoch {
/// The counter width will always be set to 4 bytes because the normal CCSDS epoch will overflow
/// when using less than that.
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn from_now(fraction_resolution: FractionalResolution) -> Result<Self, StdTimestampError> {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let ccsds_epoch = unix_epoch_to_ccsds_epoch(now.as_secs() as i64);
let ccsds_epoch = unix_epoch_to_ccsds_epoch(now.as_secs());
if fraction_resolution == FractionalResolution::Seconds {
return Ok(Self::new(ccsds_epoch as u32));
}
let fractions =
fractional_part_from_subsec_ns(fraction_resolution, now.subsec_nanos() as u64);
Self::new_with_fractions(ccsds_epoch as u32, fractions.unwrap())
.map_err(|e| StdTimestampError::Timestamp(e.into()))
.map_err(|e| StdTimestampError::TimestampError(e.into()))
}
/// Updates the current time stamp from the current time. The fractional field width remains
/// the same and will be updated accordingly.
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn update_from_now(&mut self) -> Result<(), StdTimestampError> {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
self.counter.1 = unix_epoch_to_ccsds_epoch(now.as_secs() as i64) as u32;
self.counter.1 = unix_epoch_to_ccsds_epoch(now.as_secs()) as u32;
if self.fractions.is_some() {
self.fractions = fractional_part_from_subsec_ns(
self.fractions.unwrap().0,
@ -263,42 +242,6 @@ impl TimeProviderCcsdsEpoch {
Ok(())
}
pub fn from_date_time(
dt: &DateTime<Utc>,
res: FractionalResolution,
) -> Result<Self, TimestampError> {
// Year before CCSDS epoch is invalid.
if dt.year() < 1958 {
return Err(TimestampError::DateBeforeCcsdsEpoch(*dt));
}
Self::new_generic(
WidthCounterPair(4, dt.timestamp() as u32),
fractional_part_from_subsec_ns(res, dt.timestamp_subsec_nanos() as u64),
)
.map_err(|e| e.into())
}
pub fn from_unix_stamp(
unix_stamp: &UnixTimestamp,
res: FractionalResolution,
) -> Result<Self, TimestampError> {
let ccsds_epoch = unix_epoch_to_ccsds_epoch(unix_stamp.unix_seconds);
// Negative CCSDS epoch is invalid.
if ccsds_epoch < 0 {
return Err(TimestampError::DateBeforeCcsdsEpoch(
unix_stamp.as_date_time().unwrap(),
));
}
if ccsds_epoch > u32::MAX as i64 {
return Err(CucError::InvalidCounter(4, ccsds_epoch as u64).into());
}
let mut fractions = None;
if let Some(subsec_millis) = unix_stamp.subsecond_millis {
fractions = fractional_part_from_subsec_ns(res, subsec_millis as u64 * 10_u64.pow(6));
}
Self::new_generic(WidthCounterPair(4, ccsds_epoch as u32), fractions).map_err(|e| e.into())
}
pub fn new_u16_counter(counter: u16) -> Self {
// These values are definitely valid, so it is okay to unwrap here.
Self::new_generic(WidthCounterPair(2, counter as u32), None).unwrap()
@ -357,7 +300,7 @@ impl TimeProviderCcsdsEpoch {
}
fn build_p_field(counter_width: u8, fractions_width: Option<FractionalResolution>) -> u8 {
let mut pfield = P_FIELD_BASE;
let mut pfield = (CcsdsTimeCodes::CucCcsdsEpoch as u8) << 4;
if !(1..=4).contains(&counter_width) {
// Okay to panic here, this function is private and all input values should
// have been sanitized
@ -395,11 +338,6 @@ impl TimeProviderCcsdsEpoch {
pfield & 0b11
}
#[inline]
fn unix_seconds(&self) -> i64 {
ccsds_epoch_to_unix_epoch(self.counter.1 as i64)
}
/// This returns the length of the individual components of the CUC timestamp in addition
/// to the total size.
///
@ -453,7 +391,7 @@ impl TimeReader for TimeProviderCcsdsEpoch {
Self: Sized,
{
if buf.len() < MIN_CUC_LEN {
return Err(TimestampError::ByteConversion(
return Err(TimestampError::ByteConversionError(
ByteConversionError::FromSliceTooSmall(SizeMissmatch {
expected: MIN_CUC_LEN,
found: buf.len(),
@ -479,7 +417,7 @@ impl TimeReader for TimeProviderCcsdsEpoch {
let (cntr_len, fractions_len, total_len) =
Self::len_components_and_total_from_pfield(buf[0]);
if buf.len() < total_len {
return Err(TimestampError::ByteConversion(
return Err(TimestampError::ByteConversionError(
ByteConversionError::FromSliceTooSmall(SizeMissmatch {
expected: total_len,
found: buf.len(),
@ -535,7 +473,7 @@ impl TimeWriter for TimeProviderCcsdsEpoch {
fn write_to_bytes(&self, bytes: &mut [u8]) -> Result<usize, TimestampError> {
// Cross check the sizes of the counters against byte widths in the ctor
if bytes.len() < self.len_as_bytes() {
return Err(TimestampError::ByteConversion(
return Err(TimestampError::ByteConversionError(
ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: bytes.len(),
expected: self.len_as_bytes(),
@ -592,19 +530,10 @@ impl CcsdsTimeProvider for TimeProviderCcsdsEpoch {
CcsdsTimeCodes::CucCcsdsEpoch
}
/// Please note that this function only works as intended if the time counter resolution
/// is one second.
fn unix_seconds(&self) -> i64 {
self.unix_seconds()
}
fn subsecond_millis(&self) -> Option<u16> {
if let Some(fractions) = self.fractions {
if fractions.0 == FractionalResolution::Seconds {
return None;
}
// Rounding down here is the correct approach.
return Some((convert_fractional_part_to_ns(fractions) / 10_u32.pow(6) as u64) as u16);
}
None
ccsds_epoch_to_unix_epoch(self.counter.1 as u64) as i64
}
fn date_time(&self) -> Option<DateTime<Utc>> {
@ -621,95 +550,6 @@ impl CcsdsTimeProvider for TimeProviderCcsdsEpoch {
}
}
fn get_provider_values_after_duration_addition(
provider: &TimeProviderCcsdsEpoch,
duration: Duration,
) -> (u32, Option<FractionalPart>) {
let mut new_counter = provider.counter.1;
let subsec_nanos = duration.subsec_nanos();
let mut increment_counter = |amount: u32| {
let mut sum: u64 = 0;
let mut counter_inc_handler = |max_val: u64| {
sum = new_counter as u64 + amount as u64;
if sum >= max_val {
new_counter = (sum % max_val) as u32;
return;
}
new_counter = sum as u32;
};
match provider.counter.0 {
1 => counter_inc_handler(u8::MAX as u64),
2 => counter_inc_handler(u16::MAX as u64),
3 => counter_inc_handler((2_u32.pow(24) - 1) as u64),
4 => counter_inc_handler(u32::MAX as u64),
_ => {
// Should never happen
panic!("invalid counter width")
}
}
};
let fractional_part = if let Some(fractional_part) = &provider.fractions {
let fractional_increment =
fractional_part_from_subsec_ns(fractional_part.0, subsec_nanos as u64).unwrap();
let mut increment_fractions = |resolution| {
let mut new_fractions = fractional_part.1 + fractional_increment.1;
let max_fractions = fractional_res_to_div(resolution);
if new_fractions > max_fractions {
increment_counter(1);
new_fractions -= max_fractions;
}
Some(FractionalPart(resolution, new_fractions))
};
match fractional_increment.0 {
FractionalResolution::Seconds => None,
_ => increment_fractions(fractional_increment.0),
}
} else {
None
};
increment_counter(duration.as_secs() as u32);
(new_counter, fractional_part)
}
impl AddAssign<Duration> for TimeProviderCcsdsEpoch {
fn add_assign(&mut self, duration: Duration) {
let (new_counter, new_fractional_part) =
get_provider_values_after_duration_addition(self, duration);
self.counter.1 = new_counter;
if self.fractions.is_some() {
self.fractions = new_fractional_part;
}
}
}
impl Add<Duration> for TimeProviderCcsdsEpoch {
type Output = Self;
fn add(self, duration: Duration) -> Self::Output {
let (new_counter, new_fractional_part) =
get_provider_values_after_duration_addition(&self, duration);
if let Some(fractional_part) = new_fractional_part {
// The generated fractional part should always be valid, so its okay to unwrap here.
return Self::new_with_fractions(new_counter, fractional_part).unwrap();
}
Self::new(new_counter)
}
}
impl Add<Duration> for &TimeProviderCcsdsEpoch {
type Output = TimeProviderCcsdsEpoch;
fn add(self, duration: Duration) -> Self::Output {
let (new_counter, new_fractional_part) =
get_provider_values_after_duration_addition(self, duration);
if let Some(fractional_part) = new_fractional_part {
// The generated fractional part should always be valid, so its okay to unwrap here.
return Self::Output::new_with_fractions(new_counter, fractional_part).unwrap();
}
Self::Output::new(new_counter)
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -746,7 +586,6 @@ mod tests {
let zero_cuc = zero_cuc.unwrap();
let res = zero_cuc.write_to_bytes(&mut buf);
assert!(res.is_ok());
assert!(zero_cuc.subsecond_millis().is_none());
assert_eq!(zero_cuc.len_as_bytes(), 5);
assert_eq!(pfield_len(buf[0]), 1);
let written = res.unwrap();
@ -797,7 +636,9 @@ mod tests {
let res = TimeProviderCcsdsEpoch::from_bytes(&buf[0..i]);
assert!(res.is_err());
let err = res.unwrap_err();
if let TimestampError::ByteConversion(ByteConversionError::FromSliceTooSmall(e)) = err {
if let TimestampError::ByteConversionError(ByteConversionError::FromSliceTooSmall(e)) =
err
{
assert_eq!(e.found, i);
assert_eq!(e.expected, 2);
}
@ -808,7 +649,9 @@ mod tests {
let res = TimeProviderCcsdsEpoch::from_bytes(&buf[0..i]);
assert!(res.is_err());
let err = res.unwrap_err();
if let TimestampError::ByteConversion(ByteConversionError::FromSliceTooSmall(e)) = err {
if let TimestampError::ByteConversionError(ByteConversionError::FromSliceTooSmall(e)) =
err
{
assert_eq!(e.found, i);
assert_eq!(e.expected, large_stamp.len_as_bytes());
}
@ -882,7 +725,9 @@ mod tests {
let err = cuc.write_to_bytes(&mut buf[0..i]);
assert!(err.is_err());
let err = err.unwrap_err();
if let TimestampError::ByteConversion(ByteConversionError::ToSliceTooSmall(e)) = err {
if let TimestampError::ByteConversionError(ByteConversionError::ToSliceTooSmall(e)) =
err
{
assert_eq!(e.expected, cuc.len_as_bytes());
assert_eq!(e.found, i);
} else {
@ -1022,39 +867,29 @@ mod tests {
#[test]
fn fractional_part_formula() {
let fractional_part =
fractional_part_from_subsec_ns(FractionalResolution::FourMs, 7843138).unwrap();
assert_eq!(fractional_part.1, 2);
7843137 / (10_u64.pow(9) / fractional_res_to_div(FractionalResolution::FourMs) as u64);
assert_eq!(fractional_part, 2);
}
#[test]
fn fractional_part_formula_2() {
let fractional_part =
fractional_part_from_subsec_ns(FractionalResolution::FourMs, 12000000).unwrap();
assert_eq!(fractional_part.1, 3);
12000000 / (10_u64.pow(9) / fractional_res_to_div(FractionalResolution::FourMs) as u64);
assert_eq!(fractional_part, 3);
}
#[test]
fn fractional_part_formula_3() {
let one_fraction_with_width_two_in_ns =
10_u64.pow(9) as f64 / (2_u32.pow(8 * 2) - 1) as f64;
assert_eq!(one_fraction_with_width_two_in_ns.ceil(), 15260.0);
let hundred_fractions_and_some =
(100.0 * one_fraction_with_width_two_in_ns).floor() as u64 + 7000;
let fractional_part = fractional_part_from_subsec_ns(
FractionalResolution::FifteenUs,
hundred_fractions_and_some,
)
.unwrap();
assert_eq!(fractional_part.1, 100);
// Using exactly 101.0 can yield values which will later be rounded down to 100
let hundred_and_one_fractions =
(101.001 * one_fraction_with_width_two_in_ns).floor() as u64;
let fractional_part = fractional_part_from_subsec_ns(
FractionalResolution::FifteenUs,
hundred_and_one_fractions,
)
.unwrap();
assert_eq!(fractional_part.1, 101);
let one_fraction_with_width_two_in_ns = 10_u64.pow(9) / (2_u32.pow(8 * 2) - 1) as u64;
assert_eq!(one_fraction_with_width_two_in_ns, 15259);
let hundred_fractions_and_some = 100 * one_fraction_with_width_two_in_ns + 7000;
let fractional_part = hundred_fractions_and_some
/ (10_u64.pow(9) / fractional_res_to_div(FractionalResolution::FifteenUs) as u64);
assert_eq!(fractional_part, 100);
let hundred_and_one_fractions = 101 * one_fraction_with_width_two_in_ns;
let fractional_part = hundred_and_one_fractions
/ (10_u64.pow(9) / fractional_res_to_div(FractionalResolution::FifteenUs) as u64);
assert_eq!(fractional_part, 101);
}
#[test]
@ -1079,41 +914,4 @@ mod tests {
let res = stamp.update_from_now();
assert!(res.is_ok());
}
#[test]
fn assert_largest_fractions() {
let fractions =
fractional_part_from_subsec_ns(FractionalResolution::SixtyNs, 10u64.pow(9) - 1)
.unwrap();
// The value can not be larger than representable by 3 bytes
// Assert that the maximum resolution can be reached
assert_eq!(fractions.1, 2_u32.pow(3 * 8) - 2);
}
#[test]
fn add_duration_basic() {
let mut cuc_stamp = TimeProviderCcsdsEpoch::new(200);
cuc_stamp.set_fractional_resolution(FractionalResolution::FifteenUs);
let duration = Duration::from_millis(2500);
cuc_stamp += duration;
assert_eq!(cuc_stamp.width_counter_pair().1, 202);
let fractions = cuc_stamp.width_fractions_pair().unwrap().1;
let expected_val =
(0.5 * fractional_res_to_div(FractionalResolution::FifteenUs) as f64).floor() as u32;
assert_eq!(fractions, expected_val);
let cuc_stamp2 = cuc_stamp + Duration::from_millis(501);
// What I would roughly expect
assert_eq!(cuc_stamp2.counter.1, 203);
assert!(cuc_stamp2.fractions.unwrap().1 < 100);
assert!(cuc_stamp2.subsecond_millis().unwrap() <= 1);
}
#[test]
fn add_duration_overflow() {
let mut cuc_stamp =
TimeProviderCcsdsEpoch::new_generic(WidthCounterPair(1, 255), None).unwrap();
let duration = Duration::from_secs(10);
cuc_stamp += duration;
assert_eq!(cuc_stamp.counter.1, 10);
}
}

View File

@ -1,10 +1,7 @@
//! CCSDS Time Code Formats according to [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
use crate::{ByteConversionError, SizeMissmatch};
use chrono::{DateTime, LocalResult, TimeZone, Utc};
use core::cmp::Ordering;
use core::fmt::{Display, Formatter};
use core::ops::{Add, AddAssign};
use core::time::Duration;
#[allow(unused_imports)]
#[cfg(not(feature = "std"))]
@ -16,8 +13,6 @@ use serde::{Deserialize, Serialize};
use std::error::Error;
#[cfg(feature = "std")]
use std::time::{SystemTime, SystemTimeError};
#[cfg(feature = "std")]
pub use std_mod::*;
pub mod ascii;
pub mod cds;
@ -25,7 +20,6 @@ pub mod cuc;
pub const DAYS_CCSDS_TO_UNIX: i32 = -4383;
pub const SECONDS_PER_DAY: u32 = 86400;
pub const MS_PER_DAY: u32 = SECONDS_PER_DAY * 1000;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
@ -61,38 +55,68 @@ pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCodes, u8> {
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[non_exhaustive]
pub enum TimestampError {
/// Contains tuple where first value is the expected time code and the second
/// value is the found raw value
InvalidTimeCode(CcsdsTimeCodes, u8),
ByteConversion(ByteConversionError),
Cds(cds::CdsError),
Cuc(cuc::CucError),
DateBeforeCcsdsEpoch(DateTime<Utc>),
ByteConversionError(ByteConversionError),
CdsError(cds::CdsError),
CucError(cuc::CucError),
CustomEpochNotSupported,
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::CdsError(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::CucError(e)
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[derive(Debug, Clone)]
pub enum StdTimestampError {
SystemTimeError(SystemTimeError),
TimestampError(TimestampError),
}
#[cfg(feature = "std")]
impl From<TimestampError> for StdTimestampError {
fn from(v: TimestampError) -> Self {
Self::TimestampError(v)
}
}
#[cfg(feature = "std")]
impl From<SystemTimeError> for StdTimestampError {
fn from(v: SystemTimeError) -> Self {
Self::SystemTimeError(v)
}
}
impl Display for TimestampError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TimestampError::InvalidTimeCode(time_code, raw_val) => {
write!(
f,
"invalid raw time code value {raw_val} for time code {time_code:?}"
"invalid raw time code value {} for time code {:?}",
raw_val, time_code
)
}
TimestampError::Cds(e) => {
write!(f, "cds error {e}")
TimestampError::CdsError(e) => {
write!(f, "cds error {}", e)
}
TimestampError::Cuc(e) => {
write!(f, "cuc error {e}")
TimestampError::CucError(e) => {
write!(f, "cuc error {}", e)
}
TimestampError::ByteConversion(e) => {
write!(f, "byte conversion error {e}")
}
TimestampError::DateBeforeCcsdsEpoch(e) => {
write!(f, "datetime with date before ccsds epoch: {e}")
TimestampError::ByteConversionError(e) => {
write!(f, "byte conversion error {}", e)
}
TimestampError::CustomEpochNotSupported => {
write!(f, "custom epochs are not supported")
@ -105,40 +129,13 @@ impl Display for TimestampError {
impl Error for TimestampError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TimestampError::ByteConversion(e) => Some(e),
TimestampError::Cds(e) => Some(e),
TimestampError::Cuc(e) => Some(e),
TimestampError::ByteConversionError(e) => Some(e),
TimestampError::CdsError(e) => Some(e),
TimestampError::CucError(e) => Some(e),
_ => None,
}
}
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::Cds(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::Cuc(e)
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod {
use crate::time::TimestampError;
use std::time::SystemTimeError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
pub enum StdTimestampError {
#[error("system time error: {0}")]
SystemTime(#[from] SystemTimeError),
#[error("timestamp error: {0}")]
Timestamp(#[from] TimestampError),
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
@ -151,28 +148,28 @@ pub fn seconds_since_epoch() -> f64 {
/// Convert UNIX days to CCSDS days
///
/// - CCSDS epoch: 1958-01-01T00:00:00+00:00
/// - UNIX Epoch: 1970-01-01T00:00:00+00:00
/// - CCSDS epoch: 1958 January 1
/// - UNIX Epoch: 1970 January 1
pub const fn unix_to_ccsds_days(unix_days: i64) -> i64 {
unix_days - DAYS_CCSDS_TO_UNIX as i64
}
/// Convert CCSDS days to UNIX days
///
/// - CCSDS epoch: 1958-01-01T00:00:00+00:00
/// - UNIX Epoch: 1970-01-01T00:00:00+00:00
/// - CCSDS epoch: 1958 January 1
/// - UNIX Epoch: 1970 January 1
pub const fn ccsds_to_unix_days(ccsds_days: i64) -> i64 {
ccsds_days + DAYS_CCSDS_TO_UNIX as i64
}
/// Similar to [unix_to_ccsds_days] but converts the epoch instead, which is the number of elpased
/// seconds since the CCSDS and UNIX epoch times.
pub const fn unix_epoch_to_ccsds_epoch(unix_epoch: i64) -> i64 {
unix_epoch - (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
pub const fn unix_epoch_to_ccsds_epoch(unix_epoch: u64) -> u64 {
(unix_epoch as i64 - (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)) as u64
}
pub const fn ccsds_epoch_to_unix_epoch(ccsds_epoch: i64) -> i64 {
ccsds_epoch + (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
pub const fn ccsds_epoch_to_unix_epoch(ccsds_epoch: u64) -> u64 {
(ccsds_epoch as i64 + (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)) as u64
}
#[cfg(feature = "std")]
@ -202,9 +199,6 @@ pub trait TimeReader {
}
/// Trait for generic CCSDS time providers.
///
/// The UNIX helper methods and the [Self::date_time] method are not strictly necessary but extremely
/// practical because they are a very common and simple exchange format for time information.
pub trait CcsdsTimeProvider {
fn len_as_bytes(&self) -> usize;
@ -214,203 +208,10 @@ pub trait CcsdsTimeProvider {
/// in big endian format.
fn p_field(&self) -> (usize, [u8; 2]);
fn ccdsd_time_code(&self) -> CcsdsTimeCodes;
fn unix_seconds(&self) -> i64;
fn subsecond_millis(&self) -> Option<u16>;
fn unix_stamp(&self) -> UnixTimestamp {
if self.subsecond_millis().is_none() {
return UnixTimestamp::new_only_seconds(self.unix_seconds());
}
UnixTimestamp::const_new(self.unix_seconds(), self.subsecond_millis().unwrap())
}
fn date_time(&self) -> Option<DateTime<Utc>>;
}
/// UNIX timestamp: Elapsed seconds since 1970-01-01T00:00:00+00:00.
///
/// Also can optionally include subsecond millisecond for greater accuracy. Please note that a
/// subsecond millisecond value of 0 gets converted to [None].
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct UnixTimestamp {
pub unix_seconds: i64,
subsecond_millis: Option<u16>,
}
impl UnixTimestamp {
/// Returns none if the subsecond millisecond value is larger than 999. 0 is converted to
/// a [None] value.
pub fn new(unix_seconds: i64, subsec_millis: u16) -> Option<Self> {
if subsec_millis > 999 {
return None;
}
Some(Self::const_new(unix_seconds, subsec_millis))
}
/// Like [Self::new] but const. Panics if the subsecond value is larger than 999.
pub const fn const_new(unix_seconds: i64, subsec_millis: u16) -> Self {
if subsec_millis > 999 {
panic!("subsec milliseconds exceeds 999");
}
let subsecond_millis = if subsec_millis == 0 {
None
} else {
Some(subsec_millis)
};
Self {
unix_seconds,
subsecond_millis,
}
}
pub fn new_only_seconds(unix_seconds: i64) -> Self {
Self {
unix_seconds,
subsecond_millis: None,
}
}
pub fn subsecond_millis(&self) -> Option<u16> {
self.subsecond_millis
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn from_now() -> Result<Self, SystemTimeError> {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let epoch = now.as_secs();
Ok(Self::const_new(epoch as i64, now.subsec_millis() as u16))
}
#[inline]
pub fn unix_seconds_f64(&self) -> f64 {
let mut secs = self.unix_seconds as f64;
if let Some(subsec_millis) = self.subsecond_millis {
secs += subsec_millis as f64 / 1000.0;
}
secs
}
pub fn as_date_time(&self) -> LocalResult<DateTime<Utc>> {
Utc.timestamp_opt(
self.unix_seconds,
self.subsecond_millis.unwrap_or(0) as u32 * 10_u32.pow(6),
)
}
}
impl From<DateTime<Utc>> for UnixTimestamp {
fn from(value: DateTime<Utc>) -> Self {
Self::const_new(value.timestamp(), value.timestamp_subsec_millis() as u16)
}
}
impl PartialOrd for UnixTimestamp {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
if self == other {
return Some(Ordering::Equal);
}
match self.unix_seconds.cmp(&other.unix_seconds) {
Ordering::Less => return Some(Ordering::Less),
Ordering::Greater => return Some(Ordering::Greater),
_ => (),
}
match self
.subsecond_millis()
.unwrap_or(0)
.cmp(&other.subsecond_millis().unwrap_or(0))
{
Ordering::Less => {
return if self.unix_seconds < 0 {
Some(Ordering::Greater)
} else {
Some(Ordering::Less)
}
}
Ordering::Greater => {
return if self.unix_seconds < 0 {
Some(Ordering::Less)
} else {
Some(Ordering::Greater)
}
}
Ordering::Equal => (),
}
Some(Ordering::Equal)
}
}
impl Ord for UnixTimestamp {
fn cmp(&self, other: &Self) -> Ordering {
PartialOrd::partial_cmp(self, other).unwrap()
}
}
fn get_new_stamp_after_addition(
current_stamp: &UnixTimestamp,
duration: Duration,
) -> UnixTimestamp {
let mut new_subsec_millis =
current_stamp.subsecond_millis().unwrap_or(0) + duration.subsec_millis() as u16;
let mut new_unix_seconds = current_stamp.unix_seconds;
let mut increment_seconds = |value: u32| {
if new_unix_seconds < 0 {
new_unix_seconds = new_unix_seconds
.checked_sub(value.into())
.expect("new unix seconds would exceed i64::MIN");
} else {
new_unix_seconds = new_unix_seconds
.checked_add(value.into())
.expect("new unix seconds would exceed i64::MAX");
}
};
if new_subsec_millis >= 1000 {
new_subsec_millis -= 1000;
increment_seconds(1);
}
increment_seconds(
duration
.as_secs()
.try_into()
.expect("duration seconds exceeds u32::MAX"),
);
UnixTimestamp::const_new(new_unix_seconds, new_subsec_millis)
}
/// Please note that this operation will panic on the following conditions:
///
/// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN].
/// - Unix seconds after addition exceeds [i64::MAX].
/// - Seconds from duration to add exceeds [u32::MAX].
impl AddAssign<Duration> for UnixTimestamp {
fn add_assign(&mut self, duration: Duration) {
*self = get_new_stamp_after_addition(self, duration);
}
}
/// Please note that this operation will panic for the following conditions:
///
/// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN].
/// - Unix seconds after addition exceeds [i64::MAX].
/// - Unix seconds exceeds [u32::MAX].
impl Add<Duration> for UnixTimestamp {
type Output = Self;
fn add(self, duration: Duration) -> Self::Output {
get_new_stamp_after_addition(&self, duration)
}
}
impl Add<Duration> for &UnixTimestamp {
type Output = UnixTimestamp;
fn add(self, duration: Duration) -> Self::Output {
get_new_stamp_after_addition(self, duration)
}
}
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
@ -433,116 +234,10 @@ mod tests {
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap();
let unix_epoch = now.as_secs();
let ccsds_epoch = unix_epoch_to_ccsds_epoch(now.as_secs() as i64) as u64;
let ccsds_epoch = unix_epoch_to_ccsds_epoch(now.as_secs());
assert!(ccsds_epoch > unix_epoch);
assert_eq!((ccsds_epoch - unix_epoch) % SECONDS_PER_DAY as u64, 0);
let days_diff = (ccsds_epoch - unix_epoch) / SECONDS_PER_DAY as u64;
assert_eq!(days_diff, -DAYS_CCSDS_TO_UNIX as u64);
}
#[test]
fn basic_unix_stamp_test() {
let stamp = UnixTimestamp::new_only_seconds(-200);
assert_eq!(stamp.unix_seconds, -200);
assert!(stamp.subsecond_millis().is_none());
let stamp = UnixTimestamp::new_only_seconds(250);
assert_eq!(stamp.unix_seconds, 250);
assert!(stamp.subsecond_millis().is_none());
}
#[test]
fn basic_float_unix_stamp_test() {
let stamp = UnixTimestamp::new(500, 600).unwrap();
assert!(stamp.subsecond_millis.is_some());
assert_eq!(stamp.unix_seconds, 500);
let subsec_millis = stamp.subsecond_millis().unwrap();
assert_eq!(subsec_millis, 600);
assert!((500.6 - stamp.unix_seconds_f64()).abs() < 0.0001);
}
#[test]
fn test_ord_larger() {
let stamp0 = UnixTimestamp::new_only_seconds(5);
let stamp1 = UnixTimestamp::new(5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(6);
assert!(stamp1 > stamp0);
assert!(stamp2 > stamp0);
assert!(stamp2 > stamp1);
}
#[test]
fn test_ord_smaller() {
let stamp0 = UnixTimestamp::new_only_seconds(5);
let stamp1 = UnixTimestamp::new(5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(6);
assert!(stamp0 < stamp1);
assert!(stamp0 < stamp2);
assert!(stamp1 < stamp2);
}
#[test]
fn test_ord_larger_neg_numbers() {
let stamp0 = UnixTimestamp::new_only_seconds(-5);
let stamp1 = UnixTimestamp::new(-5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(-6);
assert!(stamp0 > stamp1);
assert!(stamp0 > stamp2);
assert!(stamp1 > stamp2);
assert!(stamp1 >= stamp2);
assert!(stamp0 >= stamp1);
}
#[test]
fn test_ord_smaller_neg_numbers() {
let stamp0 = UnixTimestamp::new_only_seconds(-5);
let stamp1 = UnixTimestamp::new(-5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(-6);
assert!(stamp2 < stamp1);
assert!(stamp2 < stamp0);
assert!(stamp1 < stamp0);
assert!(stamp1 <= stamp0);
assert!(stamp2 <= stamp1);
}
#[test]
fn test_eq() {
let stamp0 = UnixTimestamp::new(5, 0).unwrap();
let stamp1 = UnixTimestamp::new_only_seconds(5);
assert_eq!(stamp0, stamp1);
assert!(stamp0 <= stamp1);
assert!(stamp0 >= stamp1);
assert!(!(stamp0 < stamp1));
assert!(!(stamp0 > stamp1));
}
#[test]
fn test_addition() {
let mut stamp0 = UnixTimestamp::new_only_seconds(1);
stamp0 += Duration::from_secs(5);
assert_eq!(stamp0.unix_seconds, 6);
assert!(stamp0.subsecond_millis().is_none());
let stamp1 = stamp0 + Duration::from_millis(500);
assert_eq!(stamp1.unix_seconds, 6);
assert!(stamp1.subsecond_millis().is_some());
assert_eq!(stamp1.subsecond_millis().unwrap(), 500);
}
#[test]
fn test_addition_on_ref() {
let stamp0 = &UnixTimestamp::new(20, 500).unwrap();
let stamp1 = stamp0 + Duration::from_millis(2500);
assert_eq!(stamp1.unix_seconds, 23);
assert!(stamp1.subsecond_millis().is_none());
}
#[test]
fn test_addition_spillover() {
let mut stamp0 = UnixTimestamp::new(1, 900).unwrap();
stamp0 += Duration::from_millis(100);
assert_eq!(stamp0.unix_seconds, 2);
assert!(stamp0.subsecond_millis().is_none());
stamp0 += Duration::from_millis(1100);
assert_eq!(stamp0.unix_seconds, 3);
assert_eq!(stamp0.subsecond_millis().unwrap(), 100);
}
}

668
src/tm.rs Normal file
View File

@ -0,0 +1,668 @@
//! This module contains all components required to create a ECSS PUS C telemetry packets according
//! to [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
use crate::ecss::{
ccsds_impl, crc_from_raw_data, crc_procedure, sp_header_impls, user_data_from_raw,
verify_crc16_from_raw, CrcType, PusError, PusPacket, PusVersion, CRC_CCITT_FALSE,
};
use crate::{
ByteConversionError, CcsdsPacket, PacketType, SequenceFlags, SizeMissmatch, SpHeader,
CCSDS_HEADER_LEN,
};
use core::mem::size_of;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zerocopy::AsBytes;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use delegate::delegate;
/// Length without timestamp
pub const PUC_TM_MIN_SEC_HEADER_LEN: usize = 7;
pub const PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA: usize =
CCSDS_HEADER_LEN + PUC_TM_MIN_SEC_HEADER_LEN + size_of::<CrcType>();
pub trait GenericPusTmSecondaryHeader {
fn pus_version(&self) -> PusVersion;
fn sc_time_ref_status(&self) -> u8;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn msg_counter(&self) -> u16;
fn dest_id(&self) -> u16;
}
pub mod zc {
use super::GenericPusTmSecondaryHeader;
use crate::ecss::{PusError, PusVersion};
use zerocopy::{AsBytes, FromBytes, NetworkEndian, Unaligned, U16};
#[derive(FromBytes, AsBytes, Unaligned)]
#[repr(C)]
pub struct PusTmSecHeaderWithoutTimestamp {
pus_version_and_sc_time_ref_status: u8,
service: u8,
subservice: u8,
msg_counter: U16<NetworkEndian>,
dest_id: U16<NetworkEndian>,
}
pub struct PusTmSecHeader<'slice> {
pub(crate) zc_header: PusTmSecHeaderWithoutTimestamp,
pub(crate) timestamp: &'slice [u8],
}
impl TryFrom<crate::tm::PusTmSecondaryHeader<'_>> for PusTmSecHeaderWithoutTimestamp {
type Error = PusError;
fn try_from(header: crate::tm::PusTmSecondaryHeader) -> Result<Self, Self::Error> {
if header.pus_version != PusVersion::PusC {
return Err(PusError::VersionNotSupported(header.pus_version));
}
Ok(PusTmSecHeaderWithoutTimestamp {
pus_version_and_sc_time_ref_status: ((header.pus_version as u8) << 4)
| header.sc_time_ref_status,
service: header.service,
subservice: header.subservice,
msg_counter: U16::from(header.msg_counter),
dest_id: U16::from(header.dest_id),
})
}
}
impl PusTmSecHeaderWithoutTimestamp {
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Option<()> {
self.write_to(slice)
}
pub fn from_bytes(slice: &[u8]) -> Option<Self> {
Self::read_from(slice)
}
}
impl GenericPusTmSecondaryHeader for PusTmSecHeaderWithoutTimestamp {
fn pus_version(&self) -> PusVersion {
PusVersion::try_from(self.pus_version_and_sc_time_ref_status >> 4 & 0b1111)
.unwrap_or(PusVersion::Invalid)
}
fn sc_time_ref_status(&self) -> u8 {
self.pus_version_and_sc_time_ref_status & 0b1111
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn msg_counter(&self) -> u16 {
self.msg_counter.get()
}
fn dest_id(&self) -> u16 {
self.dest_id.get()
}
}
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTmSecondaryHeader<'slice> {
pus_version: PusVersion,
pub sc_time_ref_status: u8,
pub service: u8,
pub subservice: u8,
pub msg_counter: u16,
pub dest_id: u16,
pub time_stamp: &'slice [u8],
}
impl<'slice> PusTmSecondaryHeader<'slice> {
pub fn new_simple(service: u8, subservice: u8, time_stamp: &'slice [u8]) -> Self {
PusTmSecondaryHeader {
pus_version: PusVersion::PusC,
sc_time_ref_status: 0,
service,
subservice,
msg_counter: 0,
dest_id: 0,
time_stamp,
}
}
pub fn new(
service: u8,
subservice: u8,
msg_counter: u16,
dest_id: u16,
time_stamp: &'slice [u8],
) -> Self {
PusTmSecondaryHeader {
pus_version: PusVersion::PusC,
sc_time_ref_status: 0,
service,
subservice,
msg_counter,
dest_id,
time_stamp,
}
}
}
impl GenericPusTmSecondaryHeader for PusTmSecondaryHeader<'_> {
fn pus_version(&self) -> PusVersion {
self.pus_version
}
fn sc_time_ref_status(&self) -> u8 {
self.sc_time_ref_status
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn msg_counter(&self) -> u16 {
self.msg_counter
}
fn dest_id(&self) -> u16 {
self.dest_id
}
}
impl<'slice> TryFrom<zc::PusTmSecHeader<'slice>> for PusTmSecondaryHeader<'slice> {
type Error = ();
fn try_from(sec_header: zc::PusTmSecHeader<'slice>) -> Result<Self, Self::Error> {
Ok(PusTmSecondaryHeader {
pus_version: sec_header.zc_header.pus_version(),
sc_time_ref_status: sec_header.zc_header.sc_time_ref_status(),
service: sec_header.zc_header.service(),
subservice: sec_header.zc_header.subservice(),
msg_counter: sec_header.zc_header.msg_counter(),
dest_id: sec_header.zc_header.dest_id(),
time_stamp: sec_header.timestamp,
})
}
}
/// This class models a PUS telemetry and which can also be used. It is the primary data
/// structure to generate the raw byte representation of PUS telemetry or to
/// deserialize from one from raw bytes.
///
/// This class also derives the [serde::Serialize] and [serde::Deserialize] trait if the [serde]
/// feature is used which allows to send around TM packets in a raw byte format using a serde
/// provider like [postcard](https://docs.rs/postcard/latest/postcard/).
///
/// There is no spare bytes support yet.
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTm<'slice> {
pub sp_header: SpHeader,
pub sec_header: PusTmSecondaryHeader<'slice>,
/// If this is set to false, a manual call to [PusTm::calc_own_crc16] or
/// [PusTm::update_packet_fields] is necessary for the serialized or cached CRC16 to be valid.
pub calc_crc_on_serialization: bool,
#[cfg_attr(feature = "serde", serde(skip))]
raw_data: Option<&'slice [u8]>,
source_data: Option<&'slice [u8]>,
crc16: Option<u16>,
}
impl<'slice> PusTm<'slice> {
/// Generates a new struct instance.
///
/// # Arguments
///
/// * `sp_header` - Space packet header information. The correct packet type will be set
/// automatically
/// * `sec_header` - Information contained in the secondary header, including the service
/// and subservice type
/// * `app_data` - Custom application data
/// * `set_ccsds_len` - Can be used to automatically update the CCSDS space packet data length
/// field. If this is not set to true, [PusTm::update_ccsds_data_len] can be called to set
/// the correct value to this field manually
pub fn new(
sp_header: &mut SpHeader,
sec_header: PusTmSecondaryHeader<'slice>,
source_data: Option<&'slice [u8]>,
set_ccsds_len: bool,
) -> Self {
sp_header.set_packet_type(PacketType::Tm);
sp_header.set_sec_header_flag();
let mut pus_tm = PusTm {
sp_header: *sp_header,
raw_data: None,
source_data,
sec_header,
calc_crc_on_serialization: true,
crc16: None,
};
if set_ccsds_len {
pus_tm.update_ccsds_data_len();
}
pus_tm
}
pub fn len_packed(&self) -> usize {
let mut length = PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA;
length += self.sec_header.time_stamp.len();
if let Some(src_data) = self.source_data {
length += src_data.len();
}
length
}
pub fn time_stamp(&self) -> &'slice [u8] {
self.sec_header.time_stamp
}
pub fn source_data(&self) -> Option<&'slice [u8]> {
self.source_data
}
pub fn set_dest_id(&mut self, dest_id: u16) {
self.sec_header.dest_id = dest_id;
}
pub fn set_msg_counter(&mut self, msg_counter: u16) {
self.sec_header.msg_counter = msg_counter
}
pub fn set_sc_time_ref_status(&mut self, sc_time_ref_status: u8) {
self.sec_header.sc_time_ref_status = sc_time_ref_status & 0b1111;
}
sp_header_impls!();
/// This is called automatically if the `set_ccsds_len` argument in the [PusTm::new] call was
/// used.
/// If this was not done or the time stamp or source data is set or changed after construction,
/// this function needs to be called to ensure that the data length field of the CCSDS header
/// is set correctly
pub fn update_ccsds_data_len(&mut self) {
self.sp_header.data_len =
self.len_packed() as u16 - size_of::<crate::zc::SpHeader>() as u16 - 1;
}
/// This function should be called before the TM packet is serialized if
/// [PusTm.calc_crc_on_serialization] is set to False. It will calculate and cache the CRC16.
pub fn calc_own_crc16(&mut self) {
let mut digest = CRC_CCITT_FALSE.digest();
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
digest.update(sph_zc.as_bytes());
let pus_tc_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
digest.update(pus_tc_header.as_bytes());
digest.update(self.sec_header.time_stamp);
if let Some(src_data) = self.source_data {
digest.update(src_data);
}
self.crc16 = Some(digest.finalize())
}
/// This helper function calls both [PusTm.update_ccsds_data_len] and [PusTm.calc_own_crc16]
pub fn update_packet_fields(&mut self) {
self.update_ccsds_data_len();
self.calc_own_crc16();
}
/// Write the raw PUS byte representation to a provided buffer.
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = 0;
let total_size = self.len_packed();
if total_size > slice.len() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: slice.len(),
expected: total_size,
})
.into());
}
self.sp_header
.write_to_be_bytes(&mut slice[0..CCSDS_HEADER_LEN])?;
curr_idx += CCSDS_HEADER_LEN;
let sec_header_len = size_of::<zc::PusTmSecHeaderWithoutTimestamp>();
let sec_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
sec_header
.write_to_bytes(&mut slice[curr_idx..curr_idx + sec_header_len])
.ok_or(ByteConversionError::ZeroCopyToError)?;
curr_idx += sec_header_len;
let timestamp_len = self.sec_header.time_stamp.len();
slice[curr_idx..curr_idx + timestamp_len].copy_from_slice(self.sec_header.time_stamp);
curr_idx += timestamp_len;
if let Some(src_data) = self.source_data {
slice[curr_idx..curr_idx + src_data.len()].copy_from_slice(src_data);
curr_idx += src_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
0,
curr_idx,
slice,
)?;
slice[curr_idx..curr_idx + 2].copy_from_slice(crc16.to_be_bytes().as_slice());
curr_idx += 2;
Ok(curr_idx)
}
/// Append the raw PUS byte representation to a provided [alloc::vec::Vec]
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn append_to_vec(&self, vec: &mut Vec<u8>) -> Result<usize, PusError> {
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
let mut appended_len =
PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA + self.sec_header.time_stamp.len();
if let Some(src_data) = self.source_data {
appended_len += src_data.len();
};
let start_idx = vec.len();
let mut ser_len = 0;
vec.extend_from_slice(sph_zc.as_bytes());
ser_len += sph_zc.as_bytes().len();
// The PUS version is hardcoded to PUS C
let sec_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
vec.extend_from_slice(sec_header.as_bytes());
ser_len += sec_header.as_bytes().len();
vec.extend_from_slice(self.sec_header.time_stamp);
ser_len += self.sec_header.time_stamp.len();
if let Some(src_data) = self.source_data {
vec.extend_from_slice(src_data);
ser_len += src_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
start_idx,
ser_len,
&vec[start_idx..start_idx + ser_len],
)?;
vec.extend_from_slice(crc16.to_be_bytes().as_slice());
Ok(appended_len)
}
/// Create a [PusTm] instance from a raw slice. On success, it returns a tuple containing
/// the instance and the found byte length of the packet. The timestamp length needs to be
/// known beforehand.
pub fn from_bytes(
slice: &'slice [u8],
timestamp_len: usize,
) -> Result<(Self, usize), PusError> {
let raw_data_len = slice.len();
if raw_data_len < PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let mut current_idx = 0;
let (sp_header, _) = SpHeader::from_be_bytes(&slice[0..CCSDS_HEADER_LEN])?;
current_idx += 6;
let total_len = sp_header.total_len();
if raw_data_len < total_len || total_len < PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let sec_header_zc = zc::PusTmSecHeaderWithoutTimestamp::from_bytes(
&slice[current_idx..current_idx + PUC_TM_MIN_SEC_HEADER_LEN],
)
.ok_or(ByteConversionError::ZeroCopyFromError)?;
current_idx += PUC_TM_MIN_SEC_HEADER_LEN;
let zc_sec_header_wrapper = zc::PusTmSecHeader {
zc_header: sec_header_zc,
timestamp: &slice[current_idx..current_idx + timestamp_len],
};
current_idx += timestamp_len;
let raw_data = &slice[0..total_len];
let pus_tm = PusTm {
sp_header,
sec_header: PusTmSecondaryHeader::try_from(zc_sec_header_wrapper).unwrap(),
raw_data: Some(&slice[0..total_len]),
source_data: user_data_from_raw(current_idx, total_len, raw_data_len, slice)?,
calc_crc_on_serialization: false,
crc16: Some(crc_from_raw_data(raw_data)?),
};
verify_crc16_from_raw(raw_data, pus_tm.crc16.expect("CRC16 invalid"))?;
Ok((pus_tm, total_len))
}
}
//noinspection RsTraitImplementation
impl CcsdsPacket for PusTm<'_> {
ccsds_impl!();
}
//noinspection RsTraitImplementation
impl PusPacket for PusTm<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
});
fn user_data(&self) -> Option<&[u8]> {
self.source_data
}
fn crc16(&self) -> Option<u16> {
self.crc16
}
}
//noinspection RsTraitImplementation
impl GenericPusTmSecondaryHeader for PusTm<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn dest_id(&self) -> u16;
fn msg_counter(&self) -> u16;
fn sc_time_ref_status(&self) -> u8;
});
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ecss::PusVersion::PusC;
use crate::SpHeader;
fn base_ping_reply_full_ctor(time_stamp: &[u8]) -> PusTm {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp);
PusTm::new(&mut sph, tc_header, None, true)
}
fn base_hk_reply<'a>(time_stamp: &'a [u8], src_data: &'a [u8]) -> PusTm<'a> {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(3, 5, &time_stamp);
PusTm::new(&mut sph, tc_header, Some(src_data), true)
}
fn dummy_time_stamp() -> &'static [u8] {
return &[0, 1, 2, 3, 4, 5, 6];
}
#[test]
fn test_basic() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
verify_ping_reply(&pus_tm, false, 22, dummy_time_stamp());
}
#[test]
fn test_serialization_no_source_data() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
let mut buf: [u8; 32] = [0; 32];
let ser_len = pus_tm
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 22);
verify_raw_ping_reply(&buf);
}
#[test]
fn test_serialization_with_source_data() {
let src_data = [1, 2, 3];
let hk_reply = base_hk_reply(dummy_time_stamp(), &src_data);
let mut buf: [u8; 32] = [0; 32];
let ser_len = hk_reply
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 25);
assert_eq!(buf[20], 1);
assert_eq!(buf[21], 2);
assert_eq!(buf[22], 3);
}
#[test]
fn test_setters() {
let time_stamp = dummy_time_stamp();
let mut pus_tm = base_ping_reply_full_ctor(&time_stamp);
pus_tm.set_sc_time_ref_status(0b1010);
pus_tm.set_dest_id(0x7fff);
pus_tm.set_msg_counter(0x1f1f);
assert_eq!(pus_tm.sc_time_ref_status(), 0b1010);
assert_eq!(pus_tm.dest_id(), 0x7fff);
assert_eq!(pus_tm.msg_counter(), 0x1f1f);
assert!(pus_tm.set_apid(0x7ff));
assert_eq!(pus_tm.apid(), 0x7ff);
}
#[test]
fn test_deserialization_no_source_data() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
let mut buf: [u8; 32] = [0; 32];
let ser_len = pus_tm
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 22);
let (tm_deserialized, size) = PusTm::from_bytes(&buf, 7).expect("Deserialization failed");
assert_eq!(ser_len, size);
verify_ping_reply(&tm_deserialized, false, 22, dummy_time_stamp());
}
#[test]
fn test_manual_field_update() {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, dummy_time_stamp());
let mut tm = PusTm::new(&mut sph, tc_header, None, false);
tm.calc_crc_on_serialization = false;
assert_eq!(tm.data_len(), 0x00);
let mut buf: [u8; 32] = [0; 32];
let res = tm.write_to_bytes(&mut buf);
assert!(res.is_err());
assert!(matches!(res.unwrap_err(), PusError::CrcCalculationMissing));
tm.update_ccsds_data_len();
assert_eq!(tm.data_len(), 15);
tm.calc_own_crc16();
let res = tm.write_to_bytes(&mut buf);
assert!(res.is_ok());
tm.sp_header.data_len = 0;
tm.update_packet_fields();
assert_eq!(tm.data_len(), 15);
}
#[test]
fn test_target_buf_too_small() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
let mut buf: [u8; 16] = [0; 16];
let res = pus_tm.write_to_bytes(&mut buf);
assert!(res.is_err());
let error = res.unwrap_err();
assert!(matches!(error, PusError::ByteConversionError { .. }));
match error {
PusError::ByteConversionError(err) => match err {
ByteConversionError::ToSliceTooSmall(size_missmatch) => {
assert_eq!(size_missmatch.expected, 22);
assert_eq!(size_missmatch.found, 16);
}
_ => panic!("Invalid PUS error {:?}", err),
},
_ => {
panic!("Invalid error {:?}", error);
}
}
}
#[test]
#[cfg(feature = "alloc")]
fn test_append_to_vec() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
let mut vec = Vec::new();
let res = pus_tm.append_to_vec(&mut vec);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 22);
verify_raw_ping_reply(vec.as_slice());
}
#[test]
#[cfg(feature = "alloc")]
fn test_append_to_vec_with_src_data() {
let src_data = [1, 2, 3];
let hk_reply = base_hk_reply(dummy_time_stamp(), &src_data);
let mut vec = Vec::new();
vec.push(4);
let res = hk_reply.append_to_vec(&mut vec);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 25);
assert_eq!(vec.len(), 26);
}
fn verify_raw_ping_reply(buf: &[u8]) {
// Secondary header is set -> 0b0000_1001 , APID occupies last bit of first byte
assert_eq!(buf[0], 0x09);
// Rest of APID 0x123
assert_eq!(buf[1], 0x23);
// Unsegmented is the default, and first byte of 0x234 occupies this byte as well
assert_eq!(buf[2], 0xc2);
assert_eq!(buf[3], 0x34);
assert_eq!(((buf[4] as u16) << 8) | buf[5] as u16, 15);
// SC time ref status is 0
assert_eq!(buf[6], (PusC as u8) << 4);
assert_eq!(buf[7], 17);
assert_eq!(buf[8], 2);
// MSG counter 0
assert_eq!(buf[9], 0x00);
assert_eq!(buf[10], 0x00);
// Destination ID
assert_eq!(buf[11], 0x00);
assert_eq!(buf[12], 0x00);
// Timestamp
assert_eq!(&buf[13..20], dummy_time_stamp());
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[0..20]);
let crc16 = digest.finalize();
assert_eq!(((crc16 >> 8) & 0xff) as u8, buf[20]);
assert_eq!((crc16 & 0xff) as u8, buf[21]);
}
fn verify_ping_reply(
tm: &PusTm,
has_user_data: bool,
exp_full_len: usize,
exp_time_stamp: &[u8],
) {
assert!(tm.is_tm());
assert_eq!(PusPacket::service(tm), 17);
assert_eq!(PusPacket::subservice(tm), 2);
assert!(tm.sec_header_flag());
assert_eq!(tm.len_packed(), exp_full_len);
assert_eq!(tm.time_stamp(), exp_time_stamp);
if has_user_data {
assert!(!tm.user_data().is_none());
}
assert_eq!(PusPacket::pus_version(tm), PusC);
assert_eq!(tm.apid(), 0x123);
assert_eq!(tm.seq_count(), 0x234);
assert_eq!(tm.data_len(), exp_full_len as u16 - 7);
assert_eq!(tm.dest_id(), 0x0000);
assert_eq!(tm.msg_counter(), 0x0000);
assert_eq!(tm.sc_time_ref_status(), 0b0000);
}
}

View File

@ -1,642 +0,0 @@
use crate::{ByteConversionError, SizeMissmatch};
use core::fmt::{Debug, Display, Formatter};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub trait ToBeBytes {
type ByteArray: AsRef<[u8]>;
/// Length when written to big endian bytes.
fn written_len(&self) -> usize;
fn to_be_bytes(&self) -> Self::ByteArray;
}
impl ToBeBytes for () {
type ByteArray = [u8; 0];
fn written_len(&self) -> usize {
0
}
fn to_be_bytes(&self) -> Self::ByteArray {
[]
}
}
impl ToBeBytes for u8 {
type ByteArray = [u8; 1];
fn written_len(&self) -> usize {
1
}
fn to_be_bytes(&self) -> Self::ByteArray {
u8::to_be_bytes(*self)
}
}
impl ToBeBytes for u16 {
type ByteArray = [u8; 2];
fn written_len(&self) -> usize {
2
}
fn to_be_bytes(&self) -> Self::ByteArray {
u16::to_be_bytes(*self)
}
}
impl ToBeBytes for u32 {
type ByteArray = [u8; 4];
fn written_len(&self) -> usize {
4
}
fn to_be_bytes(&self) -> Self::ByteArray {
u32::to_be_bytes(*self)
}
}
impl ToBeBytes for u64 {
type ByteArray = [u8; 8];
fn written_len(&self) -> usize {
8
}
fn to_be_bytes(&self) -> Self::ByteArray {
u64::to_be_bytes(*self)
}
}
pub trait UnsignedEnum {
/// Size of the unsigned enumeration in bytes.
fn size(&self) -> usize;
/// Write the unsigned enumeration to a raw buffer. Returns the written size on success.
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
}
pub trait UnsignedEnumExt: UnsignedEnum + Debug + Copy + Clone + PartialEq + Eq {}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum UnsignedByteFieldError {
/// Value is too large for specified width of byte field. The first value contains the width,
/// the second value contains the detected value.
ValueTooLargeForWidth((usize, u64)),
/// Only 1, 2, 4 and 8 are allow width values. Optionally contains the expected width if
/// applicable, for example for conversions.
InvalidWidth(usize, Option<usize>),
ByteConversionError(ByteConversionError),
}
impl From<ByteConversionError> for UnsignedByteFieldError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl Display for UnsignedByteFieldError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {e}")
}
Self::InvalidWidth(val, _) => {
write!(f, "invalid width {val}, only 1, 2, 4 and 8 are allowed.")
}
Self::ValueTooLargeForWidth((width, value)) => {
write!(f, "value {value} too large for width {width}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for UnsignedByteFieldError {}
/// Type erased variant.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct UnsignedByteField {
width: usize,
value: u64,
}
impl UnsignedByteField {
pub fn new(width: usize, value: u64) -> Self {
Self { width, value }
}
pub fn value(&self) -> u64 {
self.value
}
pub fn new_from_be_bytes(width: usize, buf: &[u8]) -> Result<Self, UnsignedByteFieldError> {
if width > buf.len() {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
expected: width,
found: buf.len(),
})
.into());
}
match width {
0 => Ok(Self::new(width, 0)),
1 => Ok(Self::new(width, buf[0] as u64)),
2 => Ok(Self::new(
width,
u16::from_be_bytes(buf[0..2].try_into().unwrap()) as u64,
)),
4 => Ok(Self::new(
width,
u32::from_be_bytes(buf[0..4].try_into().unwrap()) as u64,
)),
8 => Ok(Self::new(
width,
u64::from_be_bytes(buf[0..8].try_into().unwrap()),
)),
_ => Err(UnsignedByteFieldError::InvalidWidth(width, None)),
}
}
}
impl UnsignedEnum for UnsignedByteField {
fn size(&self) -> usize {
self.width
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.size() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
expected: self.size(),
found: buf.len(),
}));
}
match self.size() {
0 => Ok(0),
1 => {
let u8 = UnsignedByteFieldU8::try_from(*self).unwrap();
u8.write_to_be_bytes(buf)
}
2 => {
let u16 = UnsignedByteFieldU16::try_from(*self).unwrap();
u16.write_to_be_bytes(buf)
}
4 => {
let u32 = UnsignedByteFieldU32::try_from(*self).unwrap();
u32.write_to_be_bytes(buf)
}
8 => {
let u64 = UnsignedByteFieldU64::try_from(*self).unwrap();
u64.write_to_be_bytes(buf)
}
_ => {
// The API does not allow this.
panic!("unexpected written length");
}
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericUnsignedByteField<TYPE> {
value: TYPE,
}
impl<TYPE> GenericUnsignedByteField<TYPE> {
pub fn new(val: TYPE) -> Self {
Self { value: val }
}
}
impl<TYPE: ToBeBytes> UnsignedEnum for GenericUnsignedByteField<TYPE> {
fn size(&self) -> usize {
self.value.written_len()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.size() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.size(),
}));
}
buf[0..self.size()].copy_from_slice(self.value.to_be_bytes().as_ref());
Ok(self.value.written_len())
}
}
pub type UnsignedByteFieldEmpty = GenericUnsignedByteField<()>;
pub type UnsignedByteFieldU8 = GenericUnsignedByteField<u8>;
pub type UnsignedByteFieldU16 = GenericUnsignedByteField<u16>;
pub type UnsignedByteFieldU32 = GenericUnsignedByteField<u32>;
pub type UnsignedByteFieldU64 = GenericUnsignedByteField<u64>;
pub type UbfU8 = UnsignedByteFieldU8;
pub type UbfU16 = UnsignedByteFieldU16;
pub type UbfU32 = UnsignedByteFieldU32;
pub type UbfU64 = UnsignedByteFieldU64;
impl From<UnsignedByteFieldU8> for UnsignedByteField {
fn from(value: UnsignedByteFieldU8) -> Self {
Self::new(1, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU8 {
type Error = UnsignedByteFieldError;
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 1 {
return Err(UnsignedByteFieldError::InvalidWidth(value.width, Some(1)));
}
Ok(Self::new(value.value as u8))
}
}
impl From<UnsignedByteFieldU16> for UnsignedByteField {
fn from(value: UnsignedByteFieldU16) -> Self {
Self::new(2, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU16 {
type Error = UnsignedByteFieldError;
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 2 {
return Err(UnsignedByteFieldError::InvalidWidth(value.width, Some(2)));
}
Ok(Self::new(value.value as u16))
}
}
impl From<UnsignedByteFieldU32> for UnsignedByteField {
fn from(value: UnsignedByteFieldU32) -> Self {
Self::new(4, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU32 {
type Error = UnsignedByteFieldError;
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 4 {
return Err(UnsignedByteFieldError::InvalidWidth(value.width, Some(4)));
}
Ok(Self::new(value.value as u32))
}
}
impl From<UnsignedByteFieldU64> for UnsignedByteField {
fn from(value: UnsignedByteFieldU64) -> Self {
Self::new(8, value.value)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU64 {
type Error = UnsignedByteFieldError;
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 8 {
return Err(UnsignedByteFieldError::InvalidWidth(value.width, Some(8)));
}
Ok(Self::new(value.value))
}
}
#[cfg(test)]
pub mod tests {
use crate::util::{
UnsignedByteField, UnsignedByteFieldError, UnsignedByteFieldU16, UnsignedByteFieldU32,
UnsignedByteFieldU64, UnsignedByteFieldU8, UnsignedEnum,
};
use crate::ByteConversionError;
use std::format;
#[test]
fn test_simple_u8() {
let u8 = UnsignedByteFieldU8::new(5);
assert_eq!(u8.size(), 1);
let mut buf: [u8; 8] = [0; 8];
let len = u8
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 1);
assert_eq!(buf[0], 5);
for i in 1..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn test_simple_u16() {
let u16 = UnsignedByteFieldU16::new(3823);
assert_eq!(u16.size(), 2);
let mut buf: [u8; 8] = [0; 8];
let len = u16
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 2);
let raw_val = u16::from_be_bytes(buf[0..2].try_into().unwrap());
assert_eq!(raw_val, 3823);
for i in 2..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn test_simple_u32() {
let u32 = UnsignedByteFieldU32::new(80932);
assert_eq!(u32.size(), 4);
let mut buf: [u8; 8] = [0; 8];
let len = u32
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 4);
let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap());
assert_eq!(raw_val, 80932);
for i in 4..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn test_simple_u64() {
let u64 = UnsignedByteFieldU64::new(5999999);
assert_eq!(u64.size(), 8);
let mut buf: [u8; 8] = [0; 8];
let len = u64
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 8);
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999);
}
#[test]
fn conversions_u8() {
let u8 = UnsignedByteFieldU8::new(5);
let u8_type_erased = UnsignedByteField::from(u8);
assert_eq!(u8_type_erased.width, 1);
assert_eq!(u8_type_erased.value, 5);
let u8_conv_back =
UnsignedByteFieldU8::try_from(u8_type_erased).expect("conversion failed for u8");
assert_eq!(u8, u8_conv_back);
assert_eq!(u8_conv_back.value, 5);
}
#[test]
fn conversion_u8_fails() {
let field = UnsignedByteField::new(2, 60000);
let conv_fails = UnsignedByteFieldU8::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth(width, Some(expected)) => {
assert_eq!(width, 2);
assert_eq!(expected, 1);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u16() {
let u16 = UnsignedByteFieldU16::new(64444);
let u16_type_erased = UnsignedByteField::from(u16);
assert_eq!(u16_type_erased.width, 2);
assert_eq!(u16_type_erased.value, 64444);
let u16_conv_back =
UnsignedByteFieldU16::try_from(u16_type_erased).expect("conversion failed for u16");
assert_eq!(u16, u16_conv_back);
assert_eq!(u16_conv_back.value, 64444);
}
#[test]
fn conversion_u16_fails() {
let field = UnsignedByteField::new(4, 75000);
let conv_fails = UnsignedByteFieldU16::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth(width, Some(expected)) => {
assert_eq!(width, 4);
assert_eq!(expected, 2);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u32() {
let u32 = UnsignedByteFieldU32::new(75000);
let u32_type_erased = UnsignedByteField::from(u32);
assert_eq!(u32_type_erased.width, 4);
assert_eq!(u32_type_erased.value, 75000);
let u32_conv_back =
UnsignedByteFieldU32::try_from(u32_type_erased).expect("conversion failed for u32");
assert_eq!(u32, u32_conv_back);
assert_eq!(u32_conv_back.value, 75000);
}
#[test]
fn conversion_u32_fails() {
let field = UnsignedByteField::new(8, 75000);
let conv_fails = UnsignedByteFieldU32::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth(width, Some(expected)) => {
assert_eq!(width, 8);
assert_eq!(expected, 4);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u64() {
let u64 = UnsignedByteFieldU64::new(5999999);
let u64_type_erased = UnsignedByteField::from(u64);
assert_eq!(u64_type_erased.width, 8);
assert_eq!(u64_type_erased.value, 5999999);
let u64_conv_back =
UnsignedByteFieldU64::try_from(u64_type_erased).expect("conversion failed for u64");
assert_eq!(u64, u64_conv_back);
assert_eq!(u64_conv_back.value, 5999999);
}
#[test]
fn conversion_u64_fails() {
let field = UnsignedByteField::new(4, 60000);
let conv_fails = UnsignedByteFieldU64::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth(width, Some(expected)) => {
assert_eq!(width, 4);
assert_eq!(expected, 8);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn type_erased_u8_write() {
let u8 = UnsignedByteField::new(1, 5);
assert_eq!(u8.size(), 1);
let mut buf: [u8; 8] = [0; 8];
u8.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(buf[0], 5);
for i in 1..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn type_erased_u16_write() {
let u16 = UnsignedByteField::new(2, 3823);
assert_eq!(u16.size(), 2);
let mut buf: [u8; 8] = [0; 8];
u16.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u16::from_be_bytes(buf[0..2].try_into().unwrap());
assert_eq!(raw_val, 3823);
for i in 2..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn type_erased_u32_write() {
let u32 = UnsignedByteField::new(4, 80932);
assert_eq!(u32.size(), 4);
let mut buf: [u8; 8] = [0; 8];
u32.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap());
assert_eq!(raw_val, 80932);
for i in 4..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn type_erased_u64_write() {
let u64 = UnsignedByteField::new(8, 5999999);
assert_eq!(u64.size(), 8);
let mut buf: [u8; 8] = [0; 8];
u64.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999);
}
#[test]
fn type_erased_u8_construction() {
let buf: [u8; 2] = [5, 10];
let u8 = UnsignedByteField::new_from_be_bytes(1, &buf).expect("construction failed");
assert_eq!(u8.width, 1);
assert_eq!(u8.value, 5);
}
#[test]
fn type_erased_u16_construction() {
let buf: [u8; 2] = [0x10, 0x15];
let u16 = UnsignedByteField::new_from_be_bytes(2, &buf).expect("construction failed");
assert_eq!(u16.width, 2);
assert_eq!(u16.value, 0x1015);
}
#[test]
fn type_erased_u32_construction() {
let buf: [u8; 4] = [0x01, 0x02, 0x03, 0x04];
let u32 = UnsignedByteField::new_from_be_bytes(4, &buf).expect("construction failed");
assert_eq!(u32.width, 4);
assert_eq!(u32.value, 0x01020304);
}
#[test]
fn type_erased_u64_construction() {
let buf: [u8; 8] = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
let u64 = UnsignedByteField::new_from_be_bytes(8, &buf).expect("construction failed");
assert_eq!(u64.width, 8);
assert_eq!(u64.value, 0x0102030405060708);
}
#[test]
fn type_u16_target_buf_too_small() {
let u16 = UnsignedByteFieldU16::new(500);
let mut buf: [u8; 1] = [0; 1];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.found, 1);
assert_eq!(missmatch.expected, 2);
}
_ => {
panic!("invalid exception")
}
}
}
#[test]
fn type_erased_u16_target_buf_too_small() {
let u16 = UnsignedByteField::new(2, 500);
let mut buf: [u8; 1] = [0; 1];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.found, 1);
assert_eq!(missmatch.expected, 2);
}
_ => {
panic!("invalid exception {}", err)
}
}
let u16 = UnsignedByteField::new_from_be_bytes(2, &buf);
assert!(u16.is_err());
let err = u16.unwrap_err();
if let UnsignedByteFieldError::ByteConversionError(
ByteConversionError::FromSliceTooSmall(missmatch),
) = err
{
assert_eq!(missmatch.expected, 2);
assert_eq!(missmatch.found, 1);
} else {
panic!("unexpected exception {}", err);
}
}
#[test]
fn type_u32_target_buf_too_small() {
let u16 = UnsignedByteFieldU32::new(500);
let mut buf: [u8; 3] = [0; 3];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.found, 3);
assert_eq!(missmatch.expected, 4);
}
_ => {
panic!("invalid exception")
}
}
}
}