7 Commits

Author SHA1 Message Date
db471e313c Merge branch 'improve_cds_short_impl' into all_features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2022-12-20 16:29:21 +01:00
6154ce6299 Merge branch 'add_cuc_time_impl' into all_features 2022-12-20 16:28:31 +01:00
2756670efe cargo fmt
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2022-12-19 17:01:56 +01:00
eb5ace0658 Merge branch 'main' into all_features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2022-12-19 16:36:33 +01:00
b9cd08cefe Merge branch 'main' into all_features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2022-12-19 11:03:46 +01:00
0073db95f9 Merge branch 'add_cuc_time_impl' into all_features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2022-12-19 00:01:28 +01:00
251fcdd6c8 Merge branch 'add_cuc_time_impl' into all_features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2022-12-18 16:23:25 +01:00
43 changed files with 2878 additions and 16095 deletions

View File

@ -1,39 +1,27 @@
on: [push]
name: ci
on: [push, pull_request]
jobs:
check:
name: Check build
name: Check
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo check --release
test:
name: Run Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install nextest
uses: taiki-e/install-action@nextest
- run: cargo nextest run --all-features
- run: cargo test --doc
msrv:
name: Check MSRV
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.70.0
- run: cargo check --release
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: actions-rs/cargo@v1
with:
command: check
args: --release
cross-check:
name: Check Cross-Compilation
name: Check Cross
runs-on: ubuntu-latest
strategy:
matrix:
@ -41,32 +29,55 @@ jobs:
- armv7-unknown-linux-gnueabihf
- thumbv7em-none-eabihf
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
targets: "armv7-unknown-linux-gnueabihf, thumbv7em-none-eabihf"
- run: cargo check --release --target=${{matrix.target}} --no-default-features
profile: minimal
toolchain: stable
target: ${{ matrix.target }}
override: true
- uses: actions-rs/cargo@v1
with:
use-cross: true
command: check
args: --release --target=${{ matrix.target }} --no-default-features
fmt:
name: Check formatting
name: Rustfmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo fmt --all -- --check
docs:
name: Check Documentation Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- run: rustup component add rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo clippy -- -D warnings
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings
ci:
if: ${{ success() }}
# all new jobs must be added to this list
needs: [check, fmt, clippy]
runs-on: ubuntu-latest
steps:
- name: CI succeeded
run: exit 0

7
.gitignore vendored
View File

@ -1,7 +0,0 @@
# Rust
/target
/Cargo.lock
# CLion
/.idea/*
!/.idea/runConfigurations

View File

@ -1,19 +0,0 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Check" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="check --all-features" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="emulateTerminal" value="false" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="true" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />
<option name="redirectInputPath" value="" />
<method v="2">
<option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
</method>
</configuration>
</component>

View File

@ -1,19 +0,0 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Test" type="CargoCommandRunConfiguration" factoryName="Cargo Command" nameIsGenerated="true">
<option name="command" value="test" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="emulateTerminal" value="false" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="true" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />
<option name="redirectInputPath" value="" />
<method v="2">
<option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
</method>
</configuration>
</component>

View File

@ -8,514 +8,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.14.0] 2025-05-10
## Changed
- Moved CRC constants/implementations to dedicated `crc` module.
- `crc::CRC_CCITT_FALSE_NO_TABLE` and `crc::CRC_CCITT_FALSE_BIG_TABLE` variants.
- Renamed `PusPacket::crc16` to `PusPacket::opt_crc16`.
## Added
- `WritablePusPacket::write_to_bytes_crc_no_table` and `WritablePusPacket::write_to_bytes_no_crc`
variants.
- `PusTmReader::new_crc_no_table` and `PusTcReader::new_crc_no_table` variants.
- `crc16` methods for PUS TM and PUS TC reader.
- PUS TM and PUS TC reader now return the reader instance directly instead of a tuple of the reader
and the read size. The instance `total_len` method can be used to retrieve the read lenght.
# [v0.13.1] 2025-03-21
- Bugfix due to operator precendence for `PusTcSecondaryHeader::pus_version`,
`PusTcSecondaryHeaderWithoutTimestamp::pus_version`, `CdsTime::from_bytes_with_u16_days` and
`CdsTime::from_bytes_with_u24_days`
# [v0.13.0] 2024-11-08
- Bumped MSRV to 1.81.0
- Bump `zerocopy` to v0.8.0
- Bump `thiserror` to v2.0.0
## Changed
- Migrated all Error implementations to thiserror, improved some naming and error handling in
general
# [v0.12.0] 2024-09-10
- Bumped MSRV to 1.70.0
## Added
- Added new `cfdp::tlv::TlvOwned` type which erases the lifetime and is clonable.
- Dedicated `cfdp::tlv::TlvLvDataTooLarge` error struct for APIs where this is the only possible
API error.
- Added File Data PDU API which expects the expected file data size and then exposes the unwritten
file data field as a mutable slice. This allows to read data from the virtual file system
API to the file data buffer without an intermediate buffer.
- Generic `EofPdu::new` constructor.
- Added generic sequence counter module.
- Added `MsgToUserTlv::to_tlv` converter which reduced the type and converts
it to a generic `Tlv`.
- Implemented `From<MsgToUserTlv> for Tlv` converter trait.
- Added CFDP maximum file segment length calculator method `calculate_max_file_seg_len_for_max_packet_len_and_pdu_header`
## Added and Changed
- Added new `ReadableTlv` to avoid some boilerplate code and have a common abstraction implemented
for both `Tlv` and `TlvOwned` to read the raw TLV data field and its length.
- Replaced `cfdp::tlv::TlvLvError` by `cfdp::tlv::TlvLvDataTooLarge` where applicable.
## Fixed
- Fixed an error in the EOF writer which wrote the fault location to the wrong buffer position.
- cfdp `ConditionCode::CheckLimitReached` previous had the wrong numerical value of `0b1001` (9)
and now has the correct value of `0b1010` (10).
## Changed
- Minor documentation build updates.
- Increased delegate version range to v0.13
# [v0.11.2] 2024-05-19
- Bumped MSRV to 1.68.2
## Fixed
- Removed `defmt::Format` impl for `MetadataPduCreator` which seems to be problematic.
# [v0.11.1] 2024-04-22
## Fixed
- The default data length for for `SpHeader` constructors where the data field length is not
specified is now 0.
- The `SpHeader::new_from_fields` is public now.
## Added
- `SpHeader::to_vec` method.
# [v0.11.0] 2024-04-16
## Changed
- Moved `CCSDS_HEADER_LEN` constant to the crate root.
## Added
- Added `SpacePacketHeader` type alias for `SpHeader` type.
# [v0.11.0-rc.2] 2024-04-04
## Changed
- Renamed `PacketId` and `PacketSequenceCtrl` `new` method to `new_checked` and former
`new_const` method to `new`.
- Renamed `tc`, `tm`, `tc_unseg` and `tm_unseg` variants for `PacketId` and `SpHeader`
to `new_for_tc_checked`, `new_for_tm_checked`, `new_for_unseg_tc_checked` and
`new_for_unseg_tm_checked`.
- `PusTmCreator` and `PusTcCreator` now expect a regular instance of `SpHeader` instead of
a mutable reference.
## Added
- `SpHeader::new_from_apid` and `SpHeader::new_from_apid_checked` constructor.
- `#[inline]` attribute for a lot of small functions.
# [v0.11.0-rc.1] 2024-04-03
Major API changes for the time API. If you are using the time API, it is strongly recommended
to check all the API changes in the **Changed** chapter.
## Fixed
- CUC timestamp was fixed to include leap second corrections because it is based on the TAI
time reference. The default CUC time object do not implement `CcsdsTimeProvider` anymore
because the trait methods require cached leap second information. This task is now performed
by the `cuc::CucTimeWithLeapSecs` which implements the trait.
## Added
- `From<$EcssEnum$TY> from $TY` for the ECSS enum type definitions.
- Added basic support conversions to the `time` library. Introduce new `chrono` and `timelib`
feature gate.
- Added `CcsdsTimeProvider::timelib_date_time`.
- Optional support for `defmt` by adding optional `defmt::Format` derives for common types.
## Changed
- `PusTcCreator::new_simple` now expects a valid slice for the source data instead of an optional
slice. For telecommands without application data, `&[]` can be passed.
- `PusTmSecondaryHeader` constructors now expects a valid slice for the time stamp instead of an
optional slice.
- Renamed `CcsdsTimeProvider::date_time` to `CcsdsTimeProvider::chrono_date_time`
- Renamed `CcsdsTimeCodes` to `CcsdsTimeCode`
- Renamed `cds::TimeProvider` to `cds::CdsTime`
- Renamed `cuc::TimeProviderCcsdsEpoch` to `cuc::CucTime`
- `UnixTimestamp` renamed to `UnixTime`
- `UnixTime` seconds are now private and can be retrieved using the `secs` member method.
- `UnixTime::new` renamed to `UnixTime::new_checked`.
- `UnixTime::secs` renamed to `UnixTime::as_secs`.
- `UnixTime` now has a nanosecond subsecond precision. The `new` constructor now expects
nanoseconds as the second argument.
- Added new `UnixTime::new_subsec_millis` and `UnixTime::new_subsec_millis_checked` API
to still allow creating a timestamp with only millisecond subsecond resolution.
- `CcsdsTimeProvider` now has a new `subsec_nanos` method in addition to a default
implementation for the `subsec_millis` method.
- `CcsdsTimeProvider::date_time` renamed to `CcsdsTimeProvider::chrono_date_time`.
- Added `UnixTime::MIN`, `UnixTime::MAX` and `UnixTime::EPOCH`.
- Added `UnixTime::timelib_date_time`.
- Error handling for ECSS and time module is more granular now, with a new
`DateBeforeCcsdsEpochError` error and a `DateBeforeCcsdsEpoch` enum variant for both
`CdsError` and `CucError`.
- `PusTmCreator` now has two lifetimes: One for the raw source data buffer and one for the
raw timestamp.
- Time API `from_now*` API renamed to `now*`.
## Removed
- Legacy `PusTm` and `PusTc` objects.
# [v0.11.0-rc.0] 2024-03-04
## Added
- `From<$TY>` for the `EcssEnum$TY` ECSS enum type definitions.
- `Sub` implementation for `UnixTimestamp` to calculate the duration between two timestamps.
## Changed
- `CcsdsTimeProvider` `subsecond_millis` function now returns `u16` instead of `Option<u16>`.
- `UnixTimestamp` `subsecond_millis` function now returns `u16` instead of `Option<u16>`.
# [v0.10.0] 2024-02-17
## Added
- Added `value` and `to_vec` methods for the `UnsignedEnum` trait. The value is returned as
as `u64`. Renamed former `value` method on `GenericUnsignedByteField` to `value_typed`.
- Added `value_const` const function for `UnsignedByteField` type.
- Added `value_typed` const functions for `GenericUnsignedByteField` and `GenericEcssEnumWrapper`.
# [v0.9.0] 2024-02-07
## Added
- `CcsdsPacket`, `PusPacket` and `GenericPusTmSecondaryHeader` implementation for
`PusTmZeroCopyWriter`.
- Additional length checks for `PusTmZeroCopyWriter`.
## Changed
- `PusTmZeroCopyWriter`: Added additional timestamp length argument for `new` constructor.
## Fixed
- Typo: `PUC_TM_MIN_HEADER_LEN` -> `PUS_TM_MIN_HEADER_LEN`
# [v0.8.1] 2024-02-05
## Fixed
- Added `pub` visibility for `PacketSequenceCtrl::const_new`.
# [v0.8.0] 2024-02-05
## Added
- Added `len_written` and `to_vec` methods to the `TimeWriter` trait.
# [v0.7.0] 2024-02-01
# [v0.7.0-beta.4] 2024-01-23
## Fixed
- `MetadataPduCreator`: The serialization function shifted the closure requested information
to the wrong position (first reserved bit) inside the raw content field.
# [v0.7.0-beta.3] 2023-12-06
## Added
- Add `WritablePduPacket` trait which is a common trait of all CFDP PDU implementations.
- Add `CfdpPdu` trait which exposes fields and attributes common to all CFDP PDUs.
- Add `GenericTlv` and `WritableTlv` trait as abstractions for the various TLV types.
## Fixed
- Set the direction field inside the PDU header field correctly explicitely for all CFDP PDU
packets.
## Changed
- Split up `FinishedPdu`into `FinishedPduCreator` and `FinishedPduReader` to expose specialized
APIs.
- Split up `MetadataPdu`into `MetadataPduCreator` and `MetadataPduReader` to expose specialized
APIs.
- Cleaned up CUC time implementation. Added `width` and `counter` getter methods.
- Renamed `SerializablePusPacket` to `WritablePusPacket`.
- Renamed `UnsignedPfc` to `PfcUnsigned` and `RealPfc` to `PfcReal`.
- Renamed `WritablePduPacket.written_len` and `SerializablePusPacket.len_packed` to `len_written`.
- Introduce custom implementation of `PartialEq` for `CommonPduConfig` which only compares the
values for the source entity ID, destination entity ID and transaction sequence number field to
allow those fields to have different widths.
- Removed the `PusError::RawDataTooShort` variant which is already covered by
`PusError::ByteConversionError` variant.
- Ranamed `TlvLvError::ByteConversionError` to `TlvLvError::ByteConversion`.
- Renamed `PusError::IncorrectCrc` to `PusError::ChecksumFailure`.
- Some more struct variant changes for error enumerations.
## Removed
- `PusError::NoRawData` variant.
- `cfdp::LenInBytes` which was not used.
# [v0.7.0-beta.2] 2023-09-26
## Added
- `PacketId` trait impls: `Ord`, `PartialOrd` and `Hash`
- `SerializablePusPacket` trait: Add `to_vec` method with default implementation.
# [v0.7.0-beta.1] 2023-08-28
- Bump `zerocopy` dependency to v0.7.0
## Changed
- The `Tlv` and `Lv` API return `&[u8]` instead of `Option<&[u8]>`.
- `ByteConversionError` error variants `ToSliceTooSmall` and `FromSliceTooSmall` are struct
variants now. `SizeMissmatch` was removed appropriately.
- `UnsignedByteFieldError` error variants `ValueTooLargeForWidth` and `InvalidWidth` are struct
variants now.
- `TimestampError` error variant `InvalidTimeCode` is struct variant now.
## Added
- Added `raw_data` API for `Tlv` and `Lv` to retrieve the whole `Lv`/`Tlv` slice if the object
was created from a raw bytestream.
- Added `MsgToUserTlv` helper class which wraps a regular `Tlv` and adds some useful functionality.
- `UnsignedByteField` and `GenericUnsignedByteField` `new` methods are `const` now.
- `PduError` variants which contained a tuple variant with multiple fields were converted to a
struct variant.
# Added
- Added `pdu_datafield_len` getter function for `PduHeader`
## Removed
- `SizeMissmatch` because it is not required for the `ByteConversionError` error enumeration
anymore.
# [v0.7.0-beta.0] 2023-08-16
- Moved MSRV from v1.60 to v1.61.
## Changed
- `PusPacket` trait: `user_data` now returns `&[u8]` instead of `Option<&[u8]>`. Empty user data
can simply be an empty slice.
- Moved ECSS TC components from `tc` to `ecss.tc`.
- Moved ECSS TM components from `tm` to `ecss.tm`.
- Converted `PusTc` class to more specialized `PusTcCreator` and `PusTcReader`
classes. The old `PusTc` class is deprecated now.
- Converted `PusTm` class to more specialized `PusTmCreator` and `PusTmReader`
classes. The old `PusTm` class is deprecated now.
- Implement `Display` and `Error` for `StdTimestampError` properly.
- Remove some redundant `Error` suffixes for enum error variants.
- `CommonPduConfig`: `new_with_defaults` replaced by `new_with_byte_fields`.
## Added
- `source_data` and `app_data` API provided for PUS TM and PUS TC reader classes. These simply
call `user_data` but are also in line with the PUS packet standard names for those fields.
- Added new marker trait `IsPusTelemetry` implemented by `PusTmCreator` and `PusTmReader`.
- Added new marker trait `IsPusTelecommand` implemented by `PusTcCreator` and `PusTcReader`.
- `metadata_param` getter method for the `MetadataPdu` object.
- `Default` impl for CFDP `ChecksumType`
- `Default` impl for CFDP `CommonPduConfig`
## Fixed
- All `MetadataGenericParam` fields are now public.
- New setter method `set_source_and_dest_id` for `CommonPduConfig`.
# [v0.6.0] 2023-07-06
## Added
- Added new `util` module which contains the following (new) helper modules:
- `UnsignedEnum` trait as an abstraction for unsigned byte fields with variable lengths. It is
not tied to the ECSS PFC value like the `EcssEnumeration` trait. The method to retrieve
the size of the unsigned enumeration in bytes is now called `size`.
- `GenericUnsignedByteField<TYPE>` and helper typedefs `UnsignedU8`, `UnsignedU16`, `UnsignedU32`
and `UnsignedU64` as helper types implementing `UnsignedEnum`
- `UnsignedByteField` as a type-erased helper.
- Initial CFDP support: Added PDU packet implementation.
- Added `SerializablePusPacket` as a generic abstraction for PUS packets which are
writable.
- Added new `PusTmZeroCopyWriter` class which allows to set fields on a raw TM packet,
which might be more efficient that modification and re-writing a packet with the
`PusTm` object.
## Changed
- The `EcssEnumeration` now requires the `UnsignedEnum` trait and only adds the `pfc` method to it.
- Renamed `byte_width` usages to `size` (part of new `UnsignedEnum` trait)
- Moved `ecss::CRC_CCITT_FALSE` CRC constant to the root module. This CRC type is not just used by
the PUS standard, but by the CCSDS Telecommand standard and the CFDP standard as well.
# [v0.5.4] 2023-02-12
## Added
- `Clone` trait requirement for `time::cds::ProvidesDaysLen` trait.
- Added `Copy` and `Clone` derives for `DaysLen16Bits` and `DaysLen24Bits`.
# [v0.5.3] 2023-02-05
## Added
- `num_enum` dependency to avoid boilerplate code for primtive to enum conversions, for example
for the PUS subservices.
- `ecss.event` module containing a `Subservice` enum.
- `ecss.verification` module containing a `Subservice` enum.
- `ecss.scheduling` module containing a `Subservice` enum and some other helper enumerations.
- `ecss.hk` module containing a `Subservice` enum.
## Changed
- Added missing Service IDs to `ecss.PusServiceId` and marked in `#[non_exhaustive]`.
## Fixed
- `time.UnixTimestamp`: All constructors and `From` conversions now use the `new` constructor,
which should cause a correct conversion of 0 subsecond milliseconds to a `None` value.
# [v0.5.2] 2023-01-26
## Added
- Added `.gitignore`.
## Fixed
- Correct implementation of Trait `PartialEq` for `PusTc` and `PusTm`. The previous auto-derivation
were incorrect because they also compared fields unrelated to the raw byte representation.
## Changed
- Renamed `PusTc` `raw` method to `raw_bytes` and add better docs to avoid confusion.
Deprecate `raw` to avoid breaking change.
- Added `raw_bytes` method to `PusTm`.
# [v0.5.1] 2023-01-22
## Added
- `time::cds::TimeProvider`
- Add `Ord` and `PartialOrd`, use custom `PartialEq` impl to account for precision correctly.
- Add `precision_as_ns` function which converts microsecond and picosecond precision values
into nanoseconds.
- Add conversion trait to convert `cds::TimeProvider<DaysLen16Bits>` into
`cds::TimeProvider<DaysLen24Bits>` and vice-versa.
- `time::UnixTimestamp`
- Add `Ord` and `PartialOrd` implementations.
- Add `Add<Duration>` and `AddAssign<Duration>` implementations.
## Fixed
- `time::cds::TimeProvider`: Fixed a bug where subsecond milliseconds were not accounted for
when the provider has no submillisecond precision.
# [v0.5.0] 2023-01-20
The timestamp of `PusTm` is now optional. See Added and Changed section for details.
## Added
- `PusTmSecondaryHeader`: New `new_simple_no_timestamp` API to create secondary header without
timestamp.
- `PusTm`: Add `new_simple_no_timestamp` method to create TM without timestamp
- New `UnixTimestamp` abstraction which contains the unix seconds as an `i64`
and an optional subsecond millisecond counter (`u16`)
- `MS_PER_DAY` constant.
- CUC: Added `from_date_time` and `from_unix_stamp` constructors for time provider.
- CUC: Add `Add<Duration>` and `AddAssign<Duration>` impl for time provider.
### CDS time module
- Implement `Add<Duration>` and `AddAssign<Duration>` for time providers, which allows
easily adding offsets to the providers.
- Implement `TryFrom<DateTime<Utc>>` for time providers.
- `get_dyn_time_provider_from_bytes`: Requires `alloc` support and returns
the correct `TimeProvider` instance wrapped as a boxed trait object
`Box<DynCdsTimeProvider>` by checking the length of days field.
- Added constructor function to create the time provider
from `chrono::DateTime<Utc>` and a generic UNIX timestamp (`i64` seconds
and subsecond milliseconds).
- `MAX_DAYS_24_BITS` which contains maximum value which can be supplied
to the days field of a CDS time provider with 24 bits days field width.
- New `CdsTimestamp` trait which encapsulates common fields for all CDS time providers.
- `from_unix_secs_with_u24_days` and `from_unix_secs_with_u16_days` which create
the time provider from a `UnixTimestamp` reference.
- `from_dt_with_u16_days`, `from_dt_with_u24_days` and their `..._us_precision` and
`..._ps_precision` variants which allow to create time providers from
a `chrono::DateTime<Utc>`.
- Add `from_bytes_with_u24_days` and `from_bytes_with_u16_days` associated methods
## Changed
- (breaking) `unix_epoch_to_ccsds_epoch`: Expect and return `i64` instead of `u64` now.
- (breaking) `ccsds_epoch_to_unix_epoch`: Expect and return `i64` instead of `u64` now.
- (breaking) `PusTmSecondaryHeader`: Timestamp is optional now, which translates to a
timestamp of size 0.
- (breaking): `PusTm`: Renamed `time_stamp` method to `timestamp`, also returns
`Optional<&'src_data [u8]>` now.
- (breaking): `PusTmSecondaryHeader`: Renamed `time_stamp` field to `timestamp` for consistency.
- (breaking): Renamed `from_now_with_u24_days_and_us_prec` to `from_now_with_u24_days_us_precision`.
Also did the same for the `u16` variant.
- (breaking): Renamed `from_now_with_u24_days_and_ps_prec` to `from_now_with_u24_days_ps_precision`.
Also did the same for the `u16` variant.
- `CcsdsTimeProvider` trait (breaking):
- Add new `unix_stamp` method returning the new `UnixTimeStamp` struct.
- Add new `subsecond_millis` method returning counter `Option<u16>`.
- Default impl for `unix_stamp` which re-uses `subsecond_millis` and
existing `unix_seconds` method.
- `TimestampError` (breaking): Add `DateBeforeCcsdsEpoch` error type
because new CDS API allow supplying invalid date times before CCSDS epoch.
Make `TimestampError` with `#[non_exhaustive]` attribute to prevent
future breakages if new error variants are added.
# [v0.4.2] 14.01.2023
## Fixed
- CDS timestamp: Fixed another small logic error for stamp creation from the current
time with picosecond precision.
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/8
# [v0.4.1] 14.01.2023
## Fixed
- CDS timestamp: The conversion function from the current time were buggy
when specifying picoseconds precision, which could lead to overflow
multiplications and/or incorrect precision fields.
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/7
# [v0.4.0] 10.01.2023
## Fixed
- Remove `Default` derive on CDS time provider. This can lead to uninitialized preamble fields.
## Changed
- `serde` support is now optional and behind the `serde` feature.
@ -531,14 +23,13 @@ The timestamp of `PusTm` is now optional. See Added and Changed section for deta
## Added
- `SpHeader` getter function `sp_header` added for `PusTc`
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/6
CUC PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/4/files
- Added PFC enumerations: `ecss::UnsignedPfc` and `ecss::RealPfc`.
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/5
- Added `std::error::Error` implementation for all error enumerations if the `std` feature
is enabled.
- CUC timestamp implementation as specified in CCSDS 301.0-B-4 section 3.2.
PR: https://egit.irs.uni-stuttgart.de/rust/spacepackets/pulls/4/files
- ACII timestamps as specified in CCSDS 301.0-B-4 section 3.5.
- Added MSRV in `Cargo.toml` with the `rust-version` field set to Rust 1.60.
- `serde` `Serialize` and `Deserialize` added to all types.
@ -584,9 +75,3 @@ The timestamp of `PusTm` is now optional. See Added and Changed section for deta
Initial release with CCSDS Space Packet Primary Header implementation and basic PUS TC and TM
implementations.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.14.0...HEAD
[v0.14.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.1...v0.14.0
[v0.13.1]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.0...v0.13.1
[v0.13.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.12.0...v0.13.0
[v0.12.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.11.2...v0.12.0

View File

@ -1,8 +1,8 @@
[package]
name = "spacepackets"
version = "0.14.0"
version = "0.3.1"
edition = "2021"
rust-version = "1.70.0"
rust-version = "1.60"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "Generic implementations for various CCSDS and ECSS packet standards"
homepage = "https://egit.irs.uni-stuttgart.de/rust/spacepackets"
@ -13,30 +13,33 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
crc = "3.3"
delegate = ">=0.8, <=0.13"
paste = "1"
zerocopy = { version = "0.8", features = ["derive"] }
thiserror = { version = "2", default-features = false }
num_enum = { version = ">0.5, <=0.7", default-features = false }
num-traits = { version = "0.2", default-features = false }
serde = { version = "1", optional = true, default-features = false, features = ["derive"] }
zerocopy = "0.6"
crc = "3.0"
delegate = "0.8"
time = { version = "0.3", default-features = false, optional = true }
chrono = { version = "0.4", default-features = false, optional = true }
defmt = { version = "1", default-features = false, optional = true }
[dependencies.serde]
version = "1.0"
optional = true
default-features = false
features = ["derive"]
[dependencies.chrono]
version = "0.4"
default-features = false
[dependencies.num-traits]
version = "0.2"
default-features = false
[dev-dependencies.postcard]
version = "1.0"
[features]
default = ["std"]
std = ["alloc", "chrono/std", "chrono/clock", "thiserror/std"]
serde = ["dep:serde", "chrono?/serde"]
alloc = ["chrono?/alloc", "defmt?/alloc", "serde?/alloc"]
timelib = ["dep:time"]
[dev-dependencies]
postcard = { version = "1", features = ["alloc"] }
chrono = "0.4"
std = ["chrono/std", "chrono/clock", "alloc"]
serde = ["dep:serde", "chrono/serde"]
alloc = ["postcard/alloc", "chrono/alloc"]
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--generate-link-to-definition"]
rustdoc-args = ["--cfg", "doc_cfg"]

View File

@ -1 +0,0 @@
github: robamu

2
NOTICE
View File

@ -1,3 +1,3 @@
Generic implementations for various CCSDS and ECSS packet standards.
This software contains code developed at the University of Stuttgart's Institute of Space Systems.
This software contains code developed at the University of Stuttgart.

View File

@ -13,21 +13,21 @@ Currently, this includes the following components:
- Space Packet implementation according to
[CCSDS Blue Book 133.0-B-2](https://public.ccsds.org/Pubs/133x0b2e1.pdf)
- CCSDS File Delivery Protocol (CFDP) packet implementations according to
[CCSDS Blue Book 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf)
- PUS Telecommand and PUS Telemetry implementation according to the
[ECSS-E-ST-70-41C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
- CUC (CCSDS Unsegmented Time Code) implementation according to
[CCSDS 301.0-B-4 3.2](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
- CDS (CCSDS Day Segmented Time Code) implementation according to
- CDS Short Time Code implementation according to
[CCSDS 301.0-B-4 3.3](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
- Some helper types to support ASCII timecodes as specified in
- Some helper types to support ASCII timecodes ad specified in
[CCSDS 301.0-B-4 3.5](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
# Features
`spacepackets` supports various runtime environments and is also suitable for `no_std` environments.
It also offers optional support for [`serde`](https://serde.rs/). This allows serializing and
deserializing them with an appropriate `serde` provider like
[`postcard`](https://github.com/jamesmunns/postcard).
## Default features
- [`std`](https://doc.rust-lang.org/std/): Enables functionality relying on the standard library.
@ -38,35 +38,8 @@ Currently, this includes the following components:
## Optional Features
- [`serde`](https://serde.rs/): Adds `serde` support for most types by adding `Serialize` and `Deserialize` `derive`s
- [`chrono`](https://crates.io/crates/chrono): Add basic support for the `chrono` time library.
- [`timelib`](https://crates.io/crates/time): Add basic support for the `time` time library.
- [`defmt`](https://defmt.ferrous-systems.com/): Add support for the `defmt` by adding the
[`defmt::Format`](https://defmt.ferrous-systems.com/format) derive on many types.
# Examples
You can check the [documentation](https://docs.rs/spacepackets) of individual modules for various
usage examples.
# Coverage
Coverage was generated using [`grcov`](https://github.com/mozilla/grcov). If you have not done so
already, install the `llvm-tools-preview`:
```sh
rustup component add llvm-tools-preview
cargo install grcov --locked
```
After that, you can simply run `coverage.py` to test the project with coverage. You can optionally
supply the `--open` flag to open the coverage report in your webbrowser.
# Miri
You can run the [`miri`](https://github.com/rust-lang/miri) tool on this library to check for
undefined behaviour (UB). This library does not use use any `unsafe` code blocks, but `miri` could
still catch UB from used libraries.
```sh
cargo +nightly miri nextest run --all-features
```

View File

@ -6,23 +6,10 @@ RUN apt-get update
RUN apt-get --yes upgrade
# tzdata is a dependency, won't install otherwise
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get --yes install rsync curl
# set CROSS_CONTAINER_IN_CONTAINER to inform `cross` that it is executed from within a container
ENV CROSS_CONTAINER_IN_CONTAINER=true
RUN rustup install nightly && \
rustup target add thumbv7em-none-eabihf armv7-unknown-linux-gnueabihf && \
rustup component add rustfmt clippy llvm-tools-preview
# Get grcov
RUN curl -sSL https://github.com/mozilla/grcov/releases/download/v0.8.19/grcov-x86_64-unknown-linux-gnu.tar.bz2 | tar -xj --directory /usr/local/bin
# Get nextest
RUN curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin
# SSH stuff to allow deployment to doc server
RUN adduser --uid 114 jenkins
# Add documentation server to known hosts
RUN echo "|1|/LzCV4BuTmTb2wKnD146l9fTKgQ=|NJJtVjvWbtRt8OYqFgcYRnMQyVw= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNL8ssTonYtgiR/6RRlSIK9WU1ywOcJmxFTLcEblAwH7oifZzmYq3XRfwXrgfMpylEfMFYfCU8JRqtmi19xc21A=" >> /etc/ssh/ssh_known_hosts
RUN echo "|1|CcBvBc3EG03G+XM5rqRHs6gK/Gg=|oGeJQ+1I8NGI2THIkJsW92DpTzs= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNL8ssTonYtgiR/6RRlSIK9WU1ywOcJmxFTLcEblAwH7oifZzmYq3XRfwXrgfMpylEfMFYfCU8JRqtmi19xc21A=" >> /etc/ssh/ssh_known_hosts
# TODO: installing cross is problematic, permission issues
RUN rustup target add thumbv7em-none-eabihf armv7-unknown-linux-gnueabihf && \
rustup component add rustfmt clippy

109
automation/Jenkinsfile vendored
View File

@ -1,81 +1,42 @@
pipeline {
agent {
dockerfile {
dir 'automation'
reuseNode true
args '--network host'
agent {
dockerfile {
dir 'automation'
reuseNode true
}
}
}
stages {
stage('Rust Toolchain Info') {
steps {
sh 'rustc --version'
}
}
stage('Clippy') {
steps {
sh 'cargo clippy'
}
}
stage('Docs') {
steps {
sh """
RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features
"""
stages {
stage('Clippy') {
steps {
sh 'cargo clippy'
}
}
stage('Rustfmt') {
steps {
sh 'cargo fmt'
}
}
stage('Test') {
steps {
sh 'cargo test'
}
}
stage('Check') {
steps {
sh 'cargo check'
}
}
stage('Check Cross Embedded Bare Metal') {
steps {
sh 'cargo check --target thumbv7em-none-eabihf --no-default-features'
}
}
stage('Check Cross Embedded Linux') {
steps {
sh 'cargo check --target armv7-unknown-linux-gnueabihf'
}
}
}
stage('Rustfmt') {
steps {
sh 'cargo fmt --all --check'
}
}
stage('Test') {
steps {
sh 'cargo nextest r --all-features'
sh 'cargo test --doc'
}
}
stage('Check with all features') {
steps {
sh 'cargo check --all-features'
}
}
stage('Check with no features') {
steps {
sh 'cargo check --no-default-features'
}
}
stage('Check Cross Embedded Bare Metal') {
steps {
sh 'cargo check --target thumbv7em-none-eabihf --no-default-features'
}
}
stage('Check Cross Embedded Linux') {
steps {
sh 'cargo check --target armv7-unknown-linux-gnueabihf'
}
}
stage('Run test with Coverage') {
when {
anyOf {
branch 'main';
branch pattern: 'cov-deployment*'
}
}
steps {
withEnv(['RUSTFLAGS=-Cinstrument-coverage', 'LLVM_PROFILE_FILE=target/coverage/%p-%m.profraw']) {
echo "Executing tests with coverage"
sh 'cargo clean'
sh 'cargo test --all-features'
sh 'grcov . -s . --binary-path ./target/debug -t html --branch --ignore-not-existing -o ./target/debug/coverage/'
sshagent(credentials: ['documentation-buildfix']) {
// Deploy to Apache webserver
sh 'rsync --mkpath -r --delete ./target/debug/coverage/ buildfix@documentation.irs.uni-stuttgart.de:/projects/spacepackets/coverage-rs/latest/'
}
}
}
}
}
}

View File

@ -1,54 +0,0 @@
#!/usr/bin/env python3
import os
import logging
import argparse
import webbrowser
_LOGGER = logging.getLogger()
def generate_cov_report(open_report: bool, format: str):
logging.basicConfig(level=logging.INFO)
os.environ["RUSTFLAGS"] = "-Cinstrument-coverage"
os.environ["LLVM_PROFILE_FILE"] = "target/coverage/%p-%m.profraw"
_LOGGER.info("Executing tests with coverage")
os.system("cargo test --all-features")
out_path = "./target/debug/coverage"
if format == "lcov":
out_path = "./target/debug/lcov.info"
os.system(
f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing "
f"-o {out_path}"
)
if format == "lcov":
os.system(
"genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source "
"--legend ./target/debug/lcov.info"
)
if open_report:
coverage_report_path = os.path.abspath("./target/debug/coverage/index.html")
webbrowser.open_new_tab(coverage_report_path)
_LOGGER.info("Done")
def main():
parser = argparse.ArgumentParser(
description="Generate coverage report and optionally open it in a browser"
)
parser.add_argument(
"--open", action="store_true", help="Open the coverage report in a browser"
)
parser.add_argument(
"--format",
choices=["html", "lcov"],
default="html",
help="Choose report format (html or lcov)",
)
args = parser.parse_args()
generate_cov_report(args.open, args.format)
if __name__ == "__main__":
main()

View File

@ -1,3 +0,0 @@
#!/bin/sh
export RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options"
cargo +nightly doc --all-features --open

View File

@ -1,25 +0,0 @@
Checklist for new releases
=======
# Pre-Release
1. Make sure any new modules are documented sufficiently enough and check docs with
`RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features --open`
or `cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docsrs" --generate-link-to-definition"]' --open`
(was problematic on more recent nightly versions).
2. Bump version specifier in `Cargo.toml`.
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
`unreleased` section.
4. Run `cargo test --all-features` or `cargo nextest r --all-features` together with
`cargo test --doc`.
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
targets.
# Release
1. `cargo publish`
# Post-Release
1. Create a new release on `EGit` based on the release branch.

View File

@ -1,344 +0,0 @@
//! Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
use crate::ByteConversionError;
use core::str::Utf8Error;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::string::String;
use super::TlvLvDataTooLargeError;
pub const MIN_LV_LEN: usize = 1;
/// Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
///
/// Please note that this class is zero-copy and does not generate a copy of the value data for
/// both the regular [Self::new] constructor and the [Self::from_bytes] constructor.
///
/// # Lifetimes
/// * `data`: If the LV is generated from a raw bytestream, this will be the lifetime of
/// the raw bytestream. If the LV is generated from a raw slice or a similar data reference,
/// this will be the lifetime of that data reference.
#[derive(Debug, Copy, Clone, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct Lv<'data> {
data: &'data [u8],
// If the LV was generated from a raw bytestream, this will contain the start of the
// full LV.
pub(crate) raw_data: Option<&'data [u8]>,
}
impl PartialEq for Lv<'_> {
fn eq(&self, other: &Self) -> bool {
self.data == other.data
}
}
pub(crate) fn generic_len_check_data_serialization(
buf: &[u8],
data_len: usize,
min_overhead: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < data_len + min_overhead {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: data_len + min_overhead,
});
}
Ok(())
}
pub(crate) fn generic_len_check_deserialization(
buf: &[u8],
min_overhead: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < min_overhead {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: min_overhead,
});
}
Ok(())
}
impl<'data> Lv<'data> {
#[inline]
pub fn new(data: &[u8]) -> Result<Lv, TlvLvDataTooLargeError> {
if data.len() > u8::MAX as usize {
return Err(TlvLvDataTooLargeError(data.len()));
}
Ok(Lv {
data,
raw_data: None,
})
}
/// Creates a LV with an empty value field.
#[inline]
pub fn new_empty() -> Lv<'data> {
Lv {
data: &[],
raw_data: None,
}
}
/// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs
#[inline]
pub fn new_from_str(str_slice: &str) -> Result<Lv, TlvLvDataTooLargeError> {
Self::new(str_slice.as_bytes())
}
/// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs
#[cfg(feature = "std")]
#[inline]
pub fn new_from_string(string: &'data String) -> Result<Lv<'data>, TlvLvDataTooLargeError> {
Self::new(string.as_bytes())
}
/// Returns the length of the value part, not including the length byte.
#[inline]
pub fn len_value(&self) -> usize {
self.data.len()
}
/// Returns the full raw length, including the length byte.
#[inline]
pub fn len_full(&self) -> usize {
self.len_value() + 1
}
/// Checks whether the value field is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.data.len() == 0
}
#[inline]
pub fn value(&self) -> &[u8] {
self.data
}
/// If the LV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the LV can be retrieved with this method.
#[inline]
pub fn raw_data(&self) -> Option<&[u8]> {
self.raw_data
}
/// Convenience function to extract the value as a [str]. This is useful if the LV is
/// known to contain a [str], for example being a file name.
#[inline]
pub fn value_as_str(&self) -> Option<Result<&'data str, Utf8Error>> {
if self.is_empty() {
return None;
}
Some(core::str::from_utf8(self.data))
}
/// Writes the LV to a raw buffer. Please note that the first byte will contain the length
/// of the value, but the values may not exceed a length of [u8::MAX].
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.len_value(), MIN_LV_LEN)?;
Ok(self.write_to_be_bytes_no_len_check(buf))
}
/// Reads a LV from a raw buffer.
#[inline]
pub fn from_bytes(buf: &'data [u8]) -> Result<Lv<'data>, ByteConversionError> {
generic_len_check_deserialization(buf, MIN_LV_LEN)?;
Self::from_be_bytes_no_len_check(buf)
}
pub(crate) fn write_to_be_bytes_no_len_check(&self, buf: &mut [u8]) -> usize {
if self.is_empty() {
buf[0] = 0;
return MIN_LV_LEN;
}
// Length check in constructor ensures the length always has a valid value.
buf[0] = self.data.len() as u8;
buf[MIN_LV_LEN..self.data.len() + MIN_LV_LEN].copy_from_slice(self.data);
MIN_LV_LEN + self.data.len()
}
#[inline]
pub(crate) fn from_be_bytes_no_len_check(
buf: &'data [u8],
) -> Result<Lv<'data>, ByteConversionError> {
let value_len = buf[0] as usize;
generic_len_check_deserialization(buf, value_len + MIN_LV_LEN)?;
Ok(Self {
data: &buf[MIN_LV_LEN..MIN_LV_LEN + value_len],
raw_data: Some(buf),
})
}
}
#[cfg(test)]
pub mod tests {
use alloc::string::ToString;
use super::*;
use crate::ByteConversionError;
use std::string::String;
#[test]
fn test_basic() {
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv_res = Lv::new(&lv_data);
assert!(lv_res.is_ok());
let lv = lv_res.unwrap();
assert!(!lv.value().is_empty());
let val = lv.value();
assert_eq!(val[0], 1);
assert_eq!(val[1], 2);
assert_eq!(val[2], 3);
assert_eq!(val[3], 4);
assert!(!lv.is_empty());
assert_eq!(lv.len_full(), 5);
assert_eq!(lv.len_value(), 4);
}
#[test]
fn test_empty() {
let lv_empty = Lv::new_empty();
assert_eq!(lv_empty.len_value(), 0);
assert_eq!(lv_empty.len_full(), 1);
assert!(lv_empty.is_empty());
let mut buf: [u8; 4] = [0xff; 4];
let res = lv_empty.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 1);
assert_eq!(buf[0], 0);
}
#[test]
fn test_serialization() {
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv_res = Lv::new(&lv_data);
assert!(lv_res.is_ok());
let lv = lv_res.unwrap();
let mut buf: [u8; 16] = [0; 16];
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 5);
assert_eq!(buf[0], 4);
assert_eq!(buf[1], 1);
assert_eq!(buf[2], 2);
assert_eq!(buf[3], 3);
assert_eq!(buf[4], 4);
}
#[test]
fn test_deserialization() {
let mut buf: [u8; 16] = [0; 16];
buf[0] = 4;
buf[1] = 1;
buf[2] = 2;
buf[3] = 3;
buf[4] = 4;
let lv = Lv::from_bytes(&buf);
assert!(lv.is_ok());
let lv = lv.unwrap();
assert!(!lv.is_empty());
assert_eq!(lv.len_value(), 4);
assert_eq!(lv.len_full(), 5);
assert!(lv.raw_data().is_some());
assert_eq!(lv.raw_data().unwrap(), buf);
let val = lv.value();
assert_eq!(val[0], 1);
assert_eq!(val[1], 2);
assert_eq!(val[2], 3);
assert_eq!(val[3], 4);
}
#[test]
fn test_deserialization_empty() {
let buf: [u8; 2] = [0; 2];
let lv_empty = Lv::from_bytes(&buf);
assert!(lv_empty.is_ok());
let lv_empty = lv_empty.unwrap();
assert!(lv_empty.is_empty());
}
#[test]
fn test_data_too_large() {
let data_big: [u8; u8::MAX as usize + 1] = [0; u8::MAX as usize + 1];
let lv = Lv::new(&data_big);
assert!(lv.is_err());
let error = lv.unwrap_err();
assert_eq!(error.0, u8::MAX as usize + 1);
assert_eq!(
error.to_string(),
"data with size 256 larger than allowed 255 bytes"
);
}
#[test]
fn test_serialization_buf_too_small() {
let mut buf: [u8; 3] = [0; 3];
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv = Lv::new(&lv_data).unwrap();
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = error {
assert_eq!(expected, 5);
assert_eq!(found, 3);
} else {
panic!("invalid error {}", error);
}
}
#[test]
fn test_deserialization_buf_too_small() {
let mut buf: [u8; 3] = [0; 3];
buf[0] = 4;
let res = Lv::from_bytes(&buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let ByteConversionError::FromSliceTooSmall { found, expected } = error {
assert_eq!(found, 3);
assert_eq!(expected, 5);
} else {
panic!("invalid error {}", error);
}
}
fn verify_test_str_lv(lv: Lv) {
let mut buf: [u8; 16] = [0; 16];
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let res = res.unwrap();
assert_eq!(res, 8 + 1);
assert_eq!(buf[0], 8);
assert_eq!(buf[1], b't');
assert_eq!(buf[2], b'e');
assert_eq!(buf[3], b's');
assert_eq!(buf[4], b't');
assert_eq!(buf[5], b'.');
assert_eq!(buf[6], b'b');
assert_eq!(buf[7], b'i');
assert_eq!(buf[8], b'n');
}
#[test]
fn test_str_helper() {
let test_str = "test.bin";
let str_lv = Lv::new_from_str(test_str);
assert!(str_lv.is_ok());
verify_test_str_lv(str_lv.unwrap());
}
#[test]
fn test_string_helper() {
let string = String::from("test.bin");
let str_lv = Lv::new_from_string(&string);
assert!(str_lv.is_ok());
verify_test_str_lv(str_lv.unwrap());
}
}

View File

@ -1,268 +0,0 @@
//! Low-level CCSDS File Delivery Protocol (CFDP) support according to [CCSDS 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf).
use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub mod lv;
pub mod pdu;
pub mod tlv;
/// This is the name of the standard this module is based on.
pub const CFDP_VERSION_2_NAME: &str = "CCSDS 727.0-B-5";
/// Currently, only this version is supported.
pub const CFDP_VERSION_2: u8 = 0b001;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum PduType {
FileDirective = 0,
FileData = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Direction {
TowardsReceiver = 0,
TowardsSender = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum TransmissionMode {
Acknowledged = 0,
Unacknowledged = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum CrcFlag {
NoCrc = 0,
WithCrc = 1,
}
impl From<bool> for CrcFlag {
fn from(value: bool) -> Self {
if value {
return CrcFlag::WithCrc;
}
CrcFlag::NoCrc
}
}
impl From<CrcFlag> for bool {
fn from(value: CrcFlag) -> Self {
if value == CrcFlag::WithCrc {
return true;
}
false
}
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum SegmentMetadataFlag {
NotPresent = 0,
Present = 1,
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum SegmentationControl {
NoRecordBoundaryPreservation = 0,
WithRecordBoundaryPreservation = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum FaultHandlerCode {
NoticeOfCancellation = 0b0001,
NoticeOfSuspension = 0b0010,
IgnoreError = 0b0011,
AbandonTransaction = 0b0100,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum ConditionCode {
/// This is not an error condition for which a faulty handler override can be specified
NoError = 0b0000,
PositiveAckLimitReached = 0b0001,
KeepAliveLimitReached = 0b0010,
InvalidTransmissionMode = 0b0011,
FilestoreRejection = 0b0100,
FileChecksumFailure = 0b0101,
FileSizeError = 0b0110,
NakLimitReached = 0b0111,
InactivityDetected = 0b1000,
CheckLimitReached = 0b1010,
UnsupportedChecksumType = 0b1011,
/// Not an actual fault condition for which fault handler overrides can be specified
SuspendRequestReceived = 0b1110,
/// Not an actual fault condition for which fault handler overrides can be specified
CancelRequestReceived = 0b1111,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum LargeFileFlag {
/// 32 bit maximum file size and FSS size
Normal = 0,
/// 64 bit maximum file size and FSS size
Large = 1,
}
/// Transaction status for the ACK PDU field according to chapter 5.2.4 of the CFDP standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum TransactionStatus {
/// Transaction is not currently active and the CFDP implementation does not retain a
/// transaction history.
Undefined = 0b00,
Active = 0b01,
/// Transaction was active in the past and was terminated.
Terminated = 0b10,
/// The CFDP implementation does retain a tranaction history, and the transaction is not and
/// never was active at this entity.
Unrecognized = 0b11,
}
/// Checksum types according to the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum ChecksumType {
/// Modular legacy checksum
Modular = 0,
Crc32Proximity1 = 1,
Crc32C = 2,
/// Polynomial: 0x4C11DB7. Preferred checksum for now.
Crc32 = 3,
NullChecksum = 15,
}
impl Default for ChecksumType {
fn default() -> Self {
Self::NullChecksum
}
}
pub const NULL_CHECKSUM_U32: [u8; 4] = [0; 4];
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[error("data with size {0} larger than allowed {max} bytes", max = u8::MAX)]
pub struct TlvLvDataTooLargeError(pub usize);
/// First value: Found value. Second value: Expected value if there is one.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[error("invalid TLV type field, found {found}, expected {expected:?}")]
pub struct InvalidTlvTypeFieldError {
found: u8,
expected: Option<u8>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum TlvLvError {
#[error("{0}")]
DataTooLarge(#[from] TlvLvDataTooLargeError),
#[error("byte conversion error: {0}")]
ByteConversion(#[from] ByteConversionError),
#[error("{0}")]
InvalidTlvTypeField(#[from] InvalidTlvTypeFieldError),
#[error("invalid value length {0}")]
InvalidValueLength(usize),
/// Only applies to filestore requests and responses. Second name was missing where one is
/// expected.
#[error("second name missing for filestore request or response")]
SecondNameMissing,
/// Invalid action code for filestore requests or responses.
#[error("invalid action code {0}")]
InvalidFilestoreActionCode(u8),
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
#[test]
fn test_crc_from_bool() {
assert_eq!(CrcFlag::from(false), CrcFlag::NoCrc);
}
#[test]
fn test_crc_flag_to_bool() {
let is_true: bool = CrcFlag::WithCrc.into();
assert!(is_true);
let is_false: bool = CrcFlag::NoCrc.into();
assert!(!is_false);
}
#[test]
fn test_default_checksum_type() {
let checksum = ChecksumType::default();
assert_eq!(checksum, ChecksumType::NullChecksum);
}
#[test]
fn test_fault_handler_code_from_u8() {
let fault_handler_code_raw = FaultHandlerCode::NoticeOfSuspension as u8;
let fault_handler_code = FaultHandlerCode::try_from(fault_handler_code_raw).unwrap();
assert_eq!(fault_handler_code, FaultHandlerCode::NoticeOfSuspension);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_impl_pdu_type() {
generic_serde_test(PduType::FileData);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_impl_direction() {
generic_serde_test(Direction::TowardsReceiver);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_impl_transmission_mode() {
generic_serde_test(TransmissionMode::Unacknowledged);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_fault_handler_code() {
generic_serde_test(FaultHandlerCode::NoticeOfCancellation);
}
}

View File

@ -1,335 +0,0 @@
use crate::{
cfdp::{ConditionCode, CrcFlag, Direction, TransactionStatus},
ByteConversionError,
};
use super::{
add_pdu_crc, generic_length_checks_pdu_deserialization, CfdpPdu, FileDirectiveType, PduError,
PduHeader, WritablePduPacket,
};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// ACK PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.4.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct AckPdu {
pdu_header: PduHeader,
directive_code_of_acked_pdu: FileDirectiveType,
condition_code: ConditionCode,
transaction_status: TransactionStatus,
}
impl AckPdu {
pub fn new(
mut pdu_header: PduHeader,
directive_code_of_acked_pdu: FileDirectiveType,
condition_code: ConditionCode,
transaction_status: TransactionStatus,
) -> Result<Self, PduError> {
if directive_code_of_acked_pdu == FileDirectiveType::EofPdu {
pdu_header.pdu_conf.direction = Direction::TowardsSender;
} else if directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu {
pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
} else {
return Err(PduError::InvalidDirectiveType {
found: directive_code_of_acked_pdu as u8,
expected: None,
});
}
// Force correct direction flag.
let mut ack_pdu = Self {
pdu_header,
directive_code_of_acked_pdu,
condition_code,
transaction_status,
};
ack_pdu.pdu_header.pdu_datafield_len = ack_pdu.calc_pdu_datafield_len() as u16;
Ok(ack_pdu)
}
pub fn new_for_eof_pdu(
pdu_header: PduHeader,
condition_code: ConditionCode,
transaction_status: TransactionStatus,
) -> Self {
// Unwrap okay here, [new] can only fail on invalid directive codes.
Self::new(
pdu_header,
FileDirectiveType::EofPdu,
condition_code,
transaction_status,
)
.unwrap()
}
pub fn new_for_finished_pdu(
pdu_header: PduHeader,
condition_code: ConditionCode,
transaction_status: TransactionStatus,
) -> Self {
// Unwrap okay here, [new] can only fail on invalid directive codes.
Self::new(
pdu_header,
FileDirectiveType::FinishedPdu,
condition_code,
transaction_status,
)
.unwrap()
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn directive_code_of_acked_pdu(&self) -> FileDirectiveType {
self.directive_code_of_acked_pdu
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn transaction_status(&self) -> TransactionStatus {
self.transaction_status
}
fn calc_pdu_datafield_len(&self) -> usize {
if self.crc_flag() == CrcFlag::WithCrc {
return 5;
}
3
}
pub fn from_bytes(buf: &[u8]) -> Result<AckPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
generic_length_checks_pdu_deserialization(buf, current_idx + 3, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::AckPdu),
}
})?;
if directive_type != FileDirectiveType::AckPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::AckPdu,
});
}
current_idx += 1;
let acked_directive_type =
FileDirectiveType::try_from(buf[current_idx] >> 4).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: None,
}
})?;
if acked_directive_type != FileDirectiveType::EofPdu
&& acked_directive_type != FileDirectiveType::FinishedPdu
{
return Err(PduError::InvalidDirectiveType {
found: acked_directive_type as u8,
expected: None,
});
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
let transaction_status = TransactionStatus::try_from(buf[current_idx] & 0b11).unwrap();
Self::new(
pdu_header,
acked_directive_type,
condition_code,
transaction_status,
)
}
}
impl CfdpPdu for AckPdu {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::AckPdu)
}
}
impl WritablePduPacket for AckPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::AckPdu as u8;
current_idx += 1;
buf[current_idx] = (self.directive_code_of_acked_pdu as u8) << 4;
if self.directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu {
// This is the directive subtype code. It needs to be set to 0b0001 if the ACK PDU
// acknowledges a Finished PDU, and to 0b0000 otherwise.
buf[current_idx] |= 0b0001;
}
current_idx += 1;
buf[current_idx] = ((self.condition_code as u8) << 4) | (self.transaction_status as u8);
current_idx += 1;
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::{
pdu::tests::{common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID},
LargeFileFlag, PduType, TransmissionMode,
};
use super::*;
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
fn verify_state(ack_pdu: &AckPdu, expected_crc_flag: CrcFlag, expected_dir: Direction) {
assert_eq!(ack_pdu.condition_code(), ConditionCode::NoError);
assert_eq!(ack_pdu.transaction_status(), TransactionStatus::Active);
assert_eq!(ack_pdu.crc_flag(), expected_crc_flag);
assert_eq!(ack_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(ack_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
ack_pdu.file_directive_type(),
Some(FileDirectiveType::AckPdu)
);
assert_eq!(ack_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(ack_pdu.direction(), expected_dir);
assert_eq!(ack_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(ack_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(ack_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
#[test]
fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new(
pdu_header,
FileDirectiveType::FinishedPdu,
ConditionCode::NoError,
TransactionStatus::Active,
)
.expect("creating ACK PDU failed");
assert_eq!(
ack_pdu.directive_code_of_acked_pdu(),
FileDirectiveType::FinishedPdu
);
verify_state(&ack_pdu, CrcFlag::NoCrc, Direction::TowardsReceiver);
}
fn generic_serialization_test(
condition_code: ConditionCode,
transaction_status: TransactionStatus,
) {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(pdu_header, condition_code, transaction_status);
let mut buf: [u8; 64] = [0; 64];
let res = ack_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, ack_pdu.len_written());
verify_raw_header(ack_pdu.pdu_header(), &buf);
assert_eq!(buf[7], FileDirectiveType::AckPdu as u8);
assert_eq!((buf[8] >> 4) & 0b1111, FileDirectiveType::FinishedPdu as u8);
assert_eq!(buf[8] & 0b1111, 0b0001);
assert_eq!(buf[9] >> 4 & 0b1111, condition_code as u8);
assert_eq!(buf[9] & 0b11, transaction_status as u8);
assert_eq!(written, 10);
}
#[test]
fn test_serialization_no_error() {
generic_serialization_test(ConditionCode::NoError, TransactionStatus::Active);
}
#[test]
fn test_serialization_fs_error() {
generic_serialization_test(ConditionCode::FileSizeError, TransactionStatus::Terminated);
}
#[test]
fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header,
ConditionCode::NoError,
TransactionStatus::Active,
);
let ack_vec = ack_pdu.to_vec().unwrap();
let ack_deserialized =
AckPdu::from_bytes(&ack_vec).expect("ACK PDU deserialization failed");
assert_eq!(ack_deserialized, ack_pdu);
}
#[test]
fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header,
ConditionCode::NoError,
TransactionStatus::Active,
);
let ack_vec = ack_pdu.to_vec().unwrap();
assert_eq!(ack_vec.len(), ack_pdu.len_written());
assert_eq!(ack_vec.len(), 12);
let ack_deserialized =
AckPdu::from_bytes(&ack_vec).expect("ACK PDU deserialization failed");
assert_eq!(ack_deserialized, ack_pdu);
}
#[test]
fn test_for_eof_pdu() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header,
ConditionCode::NoError,
TransactionStatus::Active,
);
assert_eq!(
ack_pdu.directive_code_of_acked_pdu(),
FileDirectiveType::EofPdu
);
verify_state(&ack_pdu, CrcFlag::WithCrc, Direction::TowardsSender);
}
#[test]
#[cfg(feature = "serde")]
fn test_ack_pdu_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header,
ConditionCode::NoError,
TransactionStatus::Active,
);
generic_serde_test(ack_pdu);
}
}

View File

@ -1,368 +0,0 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::{EntityIdTlv, WritableTlv};
use crate::cfdp::{ConditionCode, CrcFlag, Direction, LargeFileFlag};
use crate::ByteConversionError;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::{CfdpPdu, WritablePduPacket};
/// Finished PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.2.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EofPdu {
pdu_header: PduHeader,
condition_code: ConditionCode,
file_checksum: u32,
file_size: u64,
fault_location: Option<EntityIdTlv>,
}
impl EofPdu {
pub fn new(
mut pdu_header: PduHeader,
condition_code: ConditionCode,
file_checksum: u32,
file_size: u64,
fault_location: Option<EntityIdTlv>,
) -> Self {
// Force correct direction flag.
pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
let mut eof_pdu = Self {
pdu_header,
condition_code,
file_checksum,
file_size,
fault_location,
};
eof_pdu.pdu_header.pdu_datafield_len = eof_pdu.calc_pdu_datafield_len() as u16;
eof_pdu
}
pub fn new_no_error(pdu_header: PduHeader, file_checksum: u32, file_size: u64) -> Self {
Self::new(
pdu_header,
ConditionCode::NoError,
file_checksum,
file_size,
None,
)
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn file_checksum(&self) -> u32 {
self.file_checksum
}
pub fn file_size(&self) -> u64 {
self.file_size
}
fn calc_pdu_datafield_len(&self) -> usize {
// One directive type octet, 4 bits condition code, 4 spare bits.
let mut len = 2 + core::mem::size_of::<u32>() + 4;
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4;
}
if let Some(fault_location) = self.fault_location {
len += fault_location.len_full();
}
if self.crc_flag() == CrcFlag::WithCrc {
len += 2;
}
len
}
pub fn from_bytes(buf: &[u8]) -> Result<EofPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let is_large_file = pdu_header.pdu_conf.file_flag == LargeFileFlag::Large;
let mut min_expected_len = 2 + 4 + 4;
if is_large_file {
min_expected_len += 4;
}
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::EofPdu),
}
})?;
if directive_type != FileDirectiveType::EofPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::EofPdu,
});
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
current_idx += 1;
let file_checksum =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
current_idx += 4;
let (fss_field_len, file_size) =
read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss_field_len;
let mut fault_location = None;
if condition_code != ConditionCode::NoError && current_idx < full_len_without_crc {
fault_location = Some(EntityIdTlv::from_bytes(&buf[current_idx..])?);
}
Ok(Self {
pdu_header,
condition_code,
file_checksum,
file_size,
fault_location,
})
}
}
impl CfdpPdu for EofPdu {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::EofPdu)
}
}
impl WritablePduPacket for EofPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::EofPdu as u8;
current_idx += 1;
buf[current_idx] = (self.condition_code as u8) << 4;
current_idx += 1;
buf[current_idx..current_idx + 4].copy_from_slice(&self.file_checksum.to_be_bytes());
current_idx += 4;
current_idx += write_fss_field(
self.pdu_header.pdu_conf.file_flag,
self.file_size,
&mut buf[current_idx..],
)?;
if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_bytes(&mut buf[current_idx..])?;
}
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cfdp::pdu::tests::{
common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID,
};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag, PduType, TransmissionMode};
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
use crate::util::{UnsignedByteFieldU16, UnsignedEnum};
fn verify_state_no_error_no_crc(eof_pdu: &EofPdu, file_flag: LargeFileFlag) {
verify_state(eof_pdu, CrcFlag::NoCrc, file_flag, ConditionCode::NoError);
}
fn verify_state(
eof_pdu: &EofPdu,
crc_flag: CrcFlag,
file_flag: LargeFileFlag,
cond_code: ConditionCode,
) {
assert_eq!(eof_pdu.file_checksum(), 0x01020304);
assert_eq!(eof_pdu.file_size(), 12);
assert_eq!(eof_pdu.condition_code(), cond_code);
assert_eq!(eof_pdu.crc_flag(), crc_flag);
assert_eq!(eof_pdu.file_flag(), file_flag);
assert_eq!(eof_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
eof_pdu.file_directive_type(),
Some(FileDirectiveType::EofPdu)
);
assert_eq!(eof_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(eof_pdu.direction(), Direction::TowardsReceiver);
assert_eq!(eof_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(eof_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(eof_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
#[test]
fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 4 + 4);
verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Normal);
}
#[test]
fn test_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let res = eof_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, eof_pdu.len_written());
verify_raw_header(eof_pdu.pdu_header(), &buf);
let mut current_idx = eof_pdu.pdu_header().header_len();
buf[current_idx] = FileDirectiveType::EofPdu as u8;
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
ConditionCode::NoError as u8
);
current_idx += 1;
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
0x01020304
);
current_idx += 4;
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
12
);
current_idx += 4;
assert_eq!(current_idx, written);
}
#[test]
fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
eof_pdu.write_to_bytes(&mut buf).unwrap();
let eof_read_back = EofPdu::from_bytes(&buf);
if let Err(e) = eof_read_back {
panic!("deserialization failed with: {e}")
}
let eof_read_back = eof_read_back.unwrap();
assert_eq!(eof_read_back, eof_pdu);
}
#[test]
fn test_write_to_vec() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_vec = eof_pdu.to_vec().unwrap();
assert_eq!(buf[0..written], pdu_vec);
}
#[test]
fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
assert_eq!(written, eof_pdu.len_written());
let eof_from_raw = EofPdu::from_bytes(&buf).expect("creating EOF PDU failed");
assert_eq!(eof_from_raw, eof_pdu);
buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16;
let error = EofPdu::from_bytes(&buf).unwrap_err();
if let PduError::Checksum(e) = error {
assert_eq!(e, crc);
} else {
panic!("expected crc error");
}
}
#[test]
fn test_with_large_file_flag() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Large);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Large);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 8 + 4);
}
#[test]
#[cfg(feature = "serde")]
fn test_eof_serde() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
generic_serde_test(eof_pdu);
}
fn generic_test_with_fault_location_and_error(crc: CrcFlag) {
let pdu_conf = common_pdu_conf(crc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new(
pdu_header,
ConditionCode::FileChecksumFailure,
0x01020304,
12,
Some(EntityIdTlv::new(UnsignedByteFieldU16::new(5).into())),
);
let mut expected_len = pdu_header.header_len() + 2 + 4 + 4 + 4;
if crc == CrcFlag::WithCrc {
expected_len += 2;
}
// Entity ID TLV increaes length by 4.
assert_eq!(eof_pdu.len_written(), expected_len);
verify_state(
&eof_pdu,
crc,
LargeFileFlag::Normal,
ConditionCode::FileChecksumFailure,
);
let eof_vec = eof_pdu.to_vec().unwrap();
let eof_read_back = EofPdu::from_bytes(&eof_vec);
if let Err(e) = eof_read_back {
panic!("deserialization failed with: {e}")
}
let eof_read_back = eof_read_back.unwrap();
assert_eq!(eof_read_back, eof_pdu);
assert!(eof_read_back.fault_location.is_some());
assert_eq!(eof_read_back.fault_location.unwrap().entity_id().value(), 5);
assert_eq!(eof_read_back.fault_location.unwrap().entity_id().size(), 2);
}
#[test]
fn test_with_fault_location_and_error() {
generic_test_with_fault_location_and_error(CrcFlag::NoCrc);
}
#[test]
fn test_with_fault_location_and_error_and_crc() {
generic_test_with_fault_location_and_error(CrcFlag::WithCrc);
}
}

View File

@ -1,814 +0,0 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
PduError, PduHeader,
};
use crate::cfdp::{CrcFlag, LargeFileFlag, PduType, SegmentMetadataFlag};
use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::{CfdpPdu, FileDirectiveType, WritablePduPacket};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum RecordContinuationState {
NoStartNoEnd = 0b00,
StartWithoutEnd = 0b01,
EndWithoutStart = 0b10,
StartAndEnd = 0b11,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct SegmentMetadata<'seg_meta> {
record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>,
}
impl<'seg_meta> SegmentMetadata<'seg_meta> {
pub fn new(
record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>,
) -> Option<Self> {
if let Some(metadata) = metadata {
if metadata.len() > 2_usize.pow(6) - 1 {
return None;
}
}
Some(Self {
record_continuation_state,
metadata,
})
}
pub fn record_continuation_state(&self) -> RecordContinuationState {
self.record_continuation_state
}
pub fn metadata(&self) -> Option<&'seg_meta [u8]> {
self.metadata
}
pub fn written_len(&self) -> usize {
// Map empty metadata to 0 and slice to its length.
1 + self.metadata.map_or(0, |meta| meta.len())
}
pub(crate) fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.written_len() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.written_len(),
});
}
buf[0] = ((self.record_continuation_state as u8) << 6)
| self.metadata.map_or(0, |meta| meta.len() as u8);
if let Some(metadata) = self.metadata {
buf[1..1 + metadata.len()].copy_from_slice(metadata)
}
Ok(self.written_len())
}
pub(crate) fn from_bytes(buf: &'seg_meta [u8]) -> Result<Self, ByteConversionError> {
if buf.is_empty() {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: 2,
});
}
let mut metadata = None;
let seg_metadata_len = (buf[0] & 0b111111) as usize;
if seg_metadata_len > 0 {
metadata = Some(&buf[1..1 + seg_metadata_len]);
}
Ok(Self {
// Can't fail, only 2 bits
record_continuation_state: RecordContinuationState::try_from((buf[0] >> 6) & 0b11)
.unwrap(),
metadata,
})
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
struct FdPduBase<'seg_meta> {
pdu_header: PduHeader,
#[cfg_attr(feature = "serde", serde(borrow))]
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
}
impl CfdpPdu for FdPduBase<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
impl FdPduBase<'_> {
fn calc_pdu_datafield_len(&self, file_data_len: u64) -> usize {
let mut len = core::mem::size_of::<u32>();
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4;
}
if self.segment_metadata.is_some() {
len += self.segment_metadata.as_ref().unwrap().written_len()
}
len += file_data_len as usize;
if self.crc_flag() == CrcFlag::WithCrc {
len += 2;
}
len
}
fn write_common_fields_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
if self.segment_metadata.is_some() {
current_idx += self
.segment_metadata
.as_ref()
.unwrap()
.write_to_bytes(&mut buf[current_idx..])?;
}
current_idx += write_fss_field(
self.pdu_header.common_pdu_conf().file_flag,
self.offset,
&mut buf[current_idx..],
)?;
Ok(current_idx)
}
}
/// File Data PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPdu<'seg_meta, 'file_data> {
#[cfg_attr(feature = "serde", serde(borrow))]
common: FdPduBase<'seg_meta>,
file_data: &'file_data [u8],
}
impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, Some(segment_metadata), offset, file_data)
}
pub fn new_no_seg_metadata(
pdu_header: PduHeader,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, None, offset, file_data)
}
pub fn new_generic(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
common: FdPduBase {
pdu_header,
segment_metadata,
offset,
},
file_data,
};
pdu.common.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
self.common
.calc_pdu_datafield_len(self.file_data.len() as u64)
}
pub fn segment_metadata(&self) -> Option<&SegmentMetadata> {
self.common.segment_metadata.as_ref()
}
pub fn offset(&self) -> u64 {
self.common.offset
}
pub fn file_data(&self) -> &'file_data [u8] {
self.file_data
}
pub fn from_bytes<'buf: 'seg_meta + 'file_data>(buf: &'buf [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + core::mem::size_of::<u32>();
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let mut segment_metadata = None;
if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present {
segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?);
current_idx += segment_metadata.as_ref().unwrap().written_len();
}
let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss;
if current_idx > full_len_without_crc {
return Err(ByteConversionError::FromSliceTooSmall {
found: current_idx,
expected: full_len_without_crc,
}
.into());
}
Ok(Self {
common: FdPduBase {
pdu_header,
segment_metadata,
offset,
},
file_data: &buf[current_idx..full_len_without_crc],
})
}
}
impl CfdpPdu for FileDataPdu<'_, '_> {
fn pdu_header(&self) -> &PduHeader {
&self.common.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
impl WritablePduPacket for FileDataPdu<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_written(),
}
.into());
}
let mut current_idx = self.common.write_common_fields_to_bytes(buf)?;
buf[current_idx..current_idx + self.file_data.len()].copy_from_slice(self.file_data);
current_idx += self.file_data.len();
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.common.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
/// File Data PDU creator abstraction.
///
/// This special creator object allows to read into the file data buffer directly. This avoids
/// the need of an additional buffer to create a file data PDU. This structure therefore
/// does not implement the regular [WritablePduPacket] trait.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPduCreatorWithReservedDatafield<'seg_meta> {
#[cfg_attr(feature = "serde", serde(borrow))]
common: FdPduBase<'seg_meta>,
file_data_len: u64,
}
impl<'seg_meta> FileDataPduCreatorWithReservedDatafield<'seg_meta> {
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data_len: u64,
) -> Self {
Self::new_generic(pdu_header, Some(segment_metadata), offset, file_data_len)
}
pub fn new_no_seg_metadata(pdu_header: PduHeader, offset: u64, file_data_len: u64) -> Self {
Self::new_generic(pdu_header, None, offset, file_data_len)
}
pub fn new_generic(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data_len: u64,
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
common: FdPduBase {
pdu_header,
segment_metadata,
offset,
},
file_data_len,
};
pdu.common.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
self.common.calc_pdu_datafield_len(self.file_data_len)
}
pub fn len_written(&self) -> usize {
self.common.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
/// This function performs a partial write by writing all data except the file data
/// and the CRC.
///
/// It returns a [FileDataPduCreatorWithUnwrittenData] which provides a mutable slice to
/// the reserved file data field. The user can read file data into this field directly and
/// then finish the PDU creation using the [FileDataPduCreatorWithUnwrittenData::finish] call.
pub fn write_to_bytes_partially<'buf>(
&self,
buf: &'buf mut [u8],
) -> Result<FileDataPduCreatorWithUnwrittenData<'buf>, PduError> {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_written(),
}
.into());
}
let mut current_idx = self.common.write_common_fields_to_bytes(buf)?;
let file_data_offset = current_idx as u64;
current_idx += self.file_data_len as usize;
if self.crc_flag() == CrcFlag::WithCrc {
current_idx += 2;
}
Ok(FileDataPduCreatorWithUnwrittenData {
write_buf: &mut buf[0..current_idx],
file_data_offset,
file_data_len: self.file_data_len,
needs_crc: self.crc_flag() == CrcFlag::WithCrc,
})
}
}
impl CfdpPdu for FileDataPduCreatorWithReservedDatafield<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.common.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
/// This structure is created with [FileDataPduCreatorWithReservedDatafield::write_to_bytes_partially]
/// and provides an API to read file data from the virtual filesystem into the file data PDU buffer
/// directly.
///
/// This structure provides a mutable slice to the reserved file data field. The user can read
/// file data into this field directly and then finish the PDU creation using the
/// [FileDataPduCreatorWithUnwrittenData::finish] call.
pub struct FileDataPduCreatorWithUnwrittenData<'buf> {
write_buf: &'buf mut [u8],
file_data_offset: u64,
file_data_len: u64,
needs_crc: bool,
}
impl FileDataPduCreatorWithUnwrittenData<'_> {
pub fn file_data_field_mut(&mut self) -> &mut [u8] {
&mut self.write_buf[self.file_data_offset as usize
..self.file_data_offset as usize + self.file_data_len as usize]
}
/// This functio needs to be called to add a CRC to the file data PDU where applicable.
///
/// It returns the full written size of the PDU.
pub fn finish(self) -> usize {
if self.needs_crc {
add_pdu_crc(
self.write_buf,
self.file_data_offset as usize + self.file_data_len as usize,
);
}
self.write_buf.len()
}
}
/// This function can be used to calculate the maximum allowed file segment size for
/// a given maximum packet length and the segment metadata if there is any.
pub fn calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(
pdu_header: &PduHeader,
max_packet_len: usize,
segment_metadata: Option<&SegmentMetadata>,
) -> usize {
let mut subtract = pdu_header.header_len();
if segment_metadata.is_some() {
subtract += 1 + segment_metadata.as_ref().unwrap().metadata().unwrap().len();
}
if pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
subtract += 8;
} else {
subtract += 4;
}
if pdu_header.common_pdu_conf().crc_flag == CrcFlag::WithCrc {
subtract += 2;
}
max_packet_len.saturating_sub(subtract)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cfdp::pdu::tests::{TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID};
use crate::cfdp::pdu::{CommonPduConfig, PduHeader};
use crate::cfdp::{Direction, SegmentMetadataFlag, SegmentationControl, TransmissionMode};
#[cfg(feature = "serde")]
use postcard::{from_bytes, to_allocvec};
#[test]
fn test_basic() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
assert_eq!(fd_pdu.file_data(), file_data);
assert_eq!(fd_pdu.offset(), 10);
assert!(fd_pdu.segment_metadata().is_none());
assert_eq!(
fd_pdu.len_written(),
fd_pdu.pdu_header().header_len() + core::mem::size_of::<u32>() + 4
);
assert_eq!(fd_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(fd_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(fd_pdu.pdu_type(), PduType::FileData);
assert_eq!(fd_pdu.file_directive_type(), None);
assert_eq!(fd_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(fd_pdu.direction(), Direction::TowardsReceiver);
assert_eq!(fd_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(fd_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(fd_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
#[test]
fn test_serialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
let res = fd_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(
written,
fd_pdu.pdu_header().header_len() + core::mem::size_of::<u32>() + 4
);
let mut current_idx = fd_pdu.pdu_header().header_len();
let file_size = u32::from_be_bytes(
buf[fd_pdu.pdu_header().header_len()..fd_pdu.pdu_header().header_len() + 4]
.try_into()
.unwrap(),
);
current_idx += 4;
assert_eq!(file_size, 10);
assert_eq!(buf[current_idx], 1);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 4);
}
#[test]
fn test_write_to_vec() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 64] = [0; 64];
let written = fd_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_vec = fd_pdu.to_vec().unwrap();
assert_eq!(buf[0..written], pdu_vec);
}
#[test]
fn test_deserialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
fd_pdu.write_to_bytes(&mut buf).unwrap();
let fd_pdu_read_back = FileDataPdu::from_bytes(&buf);
assert!(fd_pdu_read_back.is_ok());
let fd_pdu_read_back = fd_pdu_read_back.unwrap();
assert_eq!(fd_pdu_read_back, fd_pdu);
}
#[test]
fn test_with_crc() {
let mut common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
common_conf.crc_flag = true.into();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 64] = [0; 64];
let written = fd_pdu.write_to_bytes(&mut buf).unwrap();
assert_eq!(written, fd_pdu.len_written());
let finished_pdu_from_raw = FileDataPdu::from_bytes(&buf).unwrap();
assert_eq!(finished_pdu_from_raw, fd_pdu);
buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) | buf[written - 1] as u16;
let error = FileDataPdu::from_bytes(&buf).unwrap_err();
if let PduError::Checksum(e) = error {
assert_eq!(e, crc);
} else {
panic!("expected crc error");
}
}
#[test]
fn test_with_seg_metadata_serialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
assert!(fd_pdu.segment_metadata().is_some());
assert_eq!(*fd_pdu.segment_metadata().unwrap(), segment_meta);
assert_eq!(
fd_pdu.len_written(),
fd_pdu.pdu_header().header_len()
+ 1
+ seg_metadata.len()
+ core::mem::size_of::<u32>()
+ 4
);
let mut buf: [u8; 32] = [0; 32];
fd_pdu
.write_to_bytes(&mut buf)
.expect("writing FD PDU failed");
let mut current_idx = fd_pdu.pdu_header().header_len();
assert_eq!(
RecordContinuationState::try_from((buf[current_idx] >> 6) & 0b11).unwrap(),
RecordContinuationState::StartAndEnd
);
assert_eq!((buf[current_idx] & 0b111111) as usize, seg_metadata.len());
current_idx += 1;
assert_eq!(buf[current_idx], 4);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 1);
current_idx += 1;
// Still verify that the rest is written correctly.
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
10
);
current_idx += 4;
assert_eq!(buf[current_idx], 1);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 4);
current_idx += 1;
assert_eq!(current_idx, fd_pdu.len_written());
}
#[test]
fn test_with_seg_metadata_deserialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
fd_pdu
.write_to_bytes(&mut buf)
.expect("writing FD PDU failed");
let fd_pdu_read_back = FileDataPdu::from_bytes(&buf);
assert!(fd_pdu_read_back.is_ok());
let fd_pdu_read_back = fd_pdu_read_back.unwrap();
assert_eq!(fd_pdu_read_back, fd_pdu);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_serialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let output = to_allocvec(&fd_pdu).unwrap();
let output_converted_back: FileDataPdu = from_bytes(&output).unwrap();
assert_eq!(output_converted_back, fd_pdu);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_serialization_with_seg_metadata() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
let output = to_allocvec(&fd_pdu).unwrap();
let output_converted_back: FileDataPdu = from_bytes(&output).unwrap();
assert_eq!(output_converted_back, fd_pdu);
}
#[test]
fn test_fd_pdu_creator_with_reserved_field_no_crc() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let test_str = "hello world!";
let fd_pdu = FileDataPduCreatorWithReservedDatafield::new_no_seg_metadata(
pdu_header,
10,
test_str.len() as u64,
);
let mut write_buf: [u8; 64] = [0; 64];
let mut pdu_unwritten = fd_pdu
.write_to_bytes_partially(&mut write_buf)
.expect("partial write failed");
pdu_unwritten
.file_data_field_mut()
.copy_from_slice(test_str.as_bytes());
pdu_unwritten.finish();
let pdu_reader = FileDataPdu::from_bytes(&write_buf).expect("reading file data PDU failed");
assert_eq!(
core::str::from_utf8(pdu_reader.file_data()).expect("reading utf8 string failed"),
"hello world!"
);
}
#[test]
fn test_fd_pdu_creator_with_reserved_field_with_crc() {
let mut common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
common_conf.crc_flag = true.into();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let test_str = "hello world!";
let fd_pdu = FileDataPduCreatorWithReservedDatafield::new_no_seg_metadata(
pdu_header,
10,
test_str.len() as u64,
);
let mut write_buf: [u8; 64] = [0; 64];
let mut pdu_unwritten = fd_pdu
.write_to_bytes_partially(&mut write_buf)
.expect("partial write failed");
pdu_unwritten
.file_data_field_mut()
.copy_from_slice(test_str.as_bytes());
pdu_unwritten.finish();
let pdu_reader = FileDataPdu::from_bytes(&write_buf).expect("reading file data PDU failed");
assert_eq!(
core::str::from_utf8(pdu_reader.file_data()).expect("reading utf8 string failed"),
"hello world!"
);
}
#[test]
fn test_fd_pdu_creator_with_reserved_field_with_crc_without_finish_fails() {
let mut common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
common_conf.crc_flag = true.into();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let test_str = "hello world!";
let fd_pdu = FileDataPduCreatorWithReservedDatafield::new_no_seg_metadata(
pdu_header,
10,
test_str.len() as u64,
);
let mut write_buf: [u8; 64] = [0; 64];
let mut pdu_unwritten = fd_pdu
.write_to_bytes_partially(&mut write_buf)
.expect("partial write failed");
pdu_unwritten
.file_data_field_mut()
.copy_from_slice(test_str.as_bytes());
let pdu_reader_error = FileDataPdu::from_bytes(&write_buf);
assert!(pdu_reader_error.is_err());
let error = pdu_reader_error.unwrap_err();
match error {
PduError::Checksum(_) => (),
_ => {
panic!("unexpected PDU error {}", error)
}
}
}
#[test]
fn test_max_file_seg_calculator_0() {
let pdu_header = PduHeader::new_for_file_data_default(CommonPduConfig::default(), 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 64, None),
53
);
}
#[test]
fn test_max_file_seg_calculator_1() {
let common_conf = CommonPduConfig {
crc_flag: CrcFlag::WithCrc,
..Default::default()
};
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 64, None),
51
);
}
#[test]
fn test_max_file_seg_calculator_2() {
let common_conf = CommonPduConfig {
file_flag: LargeFileFlag::Large,
..Default::default()
};
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 64, None),
49
);
}
#[test]
fn test_max_file_seg_calculator_saturating_sub() {
let common_conf = CommonPduConfig {
file_flag: LargeFileFlag::Large,
..Default::default()
};
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 15, None),
0
);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 14, None),
0
);
}
}

View File

@ -1,687 +0,0 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::{
EntityIdTlv, FilestoreResponseTlv, GenericTlv, Tlv, TlvType, TlvTypeField, WritableTlv,
};
use crate::cfdp::{ConditionCode, CrcFlag, Direction, PduType};
use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::tlv::ReadableTlv;
use super::{CfdpPdu, InvalidTlvTypeFieldError, WritablePduPacket};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum DeliveryCode {
Complete = 0,
Incomplete = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum FileStatus {
DiscardDeliberately = 0b00,
DiscardedFsRejection = 0b01,
Retained = 0b10,
Unreported = 0b11,
}
/// Finished PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct FinishedPduCreator<'fs_responses> {
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses:
&'fs_responses [FilestoreResponseTlv<'fs_responses, 'fs_responses, 'fs_responses>],
fault_location: Option<EntityIdTlv>,
}
impl<'fs_responses> FinishedPduCreator<'fs_responses> {
/// Default finished PDU: No error (no fault location field) and no filestore responses.
pub fn new_default(
pdu_header: PduHeader,
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> Self {
Self::new_generic(
pdu_header,
ConditionCode::NoError,
delivery_code,
file_status,
&[],
None,
)
}
pub fn new_with_error(
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fault_location: EntityIdTlv,
) -> Self {
Self::new_generic(
pdu_header,
condition_code,
delivery_code,
file_status,
&[],
Some(fault_location),
)
}
pub fn new_generic(
mut pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses: &'fs_responses [FilestoreResponseTlv<
'fs_responses,
'fs_responses,
'fs_responses,
>],
fault_location: Option<EntityIdTlv>,
) -> Self {
pdu_header.pdu_type = PduType::FileDirective;
// Enforce correct direction bit.
pdu_header.pdu_conf.direction = Direction::TowardsSender;
let mut finished_pdu = Self {
pdu_header,
condition_code,
delivery_code,
file_status,
fs_responses,
fault_location,
};
finished_pdu.pdu_header.pdu_datafield_len = finished_pdu.calc_pdu_datafield_len() as u16;
finished_pdu
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code
}
pub fn file_status(&self) -> FileStatus {
self.file_status
}
// If there are no filestore responses, an empty slice will be returned.
pub fn filestore_responses(&self) -> &[FilestoreResponseTlv<'_, '_, '_>] {
self.fs_responses
}
pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut datafield_len = 2;
for fs_response in self.fs_responses {
datafield_len += fs_response.len_full();
}
if let Some(fault_location) = self.fault_location {
datafield_len += fault_location.len_full();
}
if self.crc_flag() == CrcFlag::WithCrc {
datafield_len += 2;
}
datafield_len
}
}
impl CfdpPdu for FinishedPduCreator<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu)
}
}
impl WritablePduPacket for FinishedPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::FinishedPdu as u8;
current_idx += 1;
buf[current_idx] = ((self.condition_code as u8) << 4)
| ((self.delivery_code as u8) << 2)
| self.file_status as u8;
current_idx += 1;
for fs_responses in self.fs_responses {
current_idx += fs_responses.write_to_bytes(&mut buf[current_idx..])?;
}
if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_bytes(&mut buf[current_idx..])?;
}
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
/// Helper structure to loop through all filestore responses of a read Finished PDU. It should be
/// noted that iterators in Rust are not fallible, but the TLV creation can fail, for example if
/// the raw TLV data is invalid for some reason. In that case, the iterator will yield [None]
/// because there is no way to recover from this.
///
/// The user can accumulate the length of all TLVs yielded by the iterator and compare it against
/// the full length of the options to check whether the iterator was able to parse all TLVs
/// successfully.
pub struct FilestoreResponseIterator<'buf> {
responses_buf: &'buf [u8],
current_idx: usize,
}
impl<'buf> Iterator for FilestoreResponseIterator<'buf> {
type Item = FilestoreResponseTlv<'buf, 'buf, 'buf>;
fn next(&mut self) -> Option<Self::Item> {
if self.current_idx == self.responses_buf.len() {
return None;
}
let tlv = FilestoreResponseTlv::from_bytes(&self.responses_buf[self.current_idx..]);
// There are not really fallible iterators so we can't continue here..
if tlv.is_err() {
return None;
}
let tlv = tlv.unwrap();
self.current_idx += tlv.len_full();
Some(tlv)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct FinishedPduReader<'buf> {
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses_raw: &'buf [u8],
fault_location: Option<EntityIdTlv>,
}
impl<'buf> FinishedPduReader<'buf> {
/// Generates [Self] from a raw bytestream.
pub fn new(buf: &'buf [u8]) -> Result<Self, PduError> {
Self::from_bytes(buf)
}
/// Generates [Self] from a raw bytestream.
pub fn from_bytes(buf: &'buf [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + 2;
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::FinishedPdu),
}
})?;
if directive_type != FileDirectiveType::FinishedPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::FinishedPdu,
});
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
// Unwrap is okay here for both of the following operations which can not fail.
let delivery_code = DeliveryCode::try_from((buf[current_idx] >> 2) & 0b1).unwrap();
let file_status = FileStatus::try_from(buf[current_idx] & 0b11).unwrap();
current_idx += 1;
let (fs_responses_raw, fault_location) =
Self::parse_tlv_fields(current_idx, full_len_without_crc, buf)?;
Ok(Self {
pdu_header,
condition_code,
delivery_code,
file_status,
fs_responses_raw,
fault_location,
})
}
pub fn fs_responses_raw(&self) -> &[u8] {
self.fs_responses_raw
}
pub fn fs_responses_iter(&self) -> FilestoreResponseIterator<'_> {
FilestoreResponseIterator {
responses_buf: self.fs_responses_raw,
current_idx: 0,
}
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code
}
pub fn file_status(&self) -> FileStatus {
self.file_status
}
pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location
}
fn parse_tlv_fields(
mut current_idx: usize,
full_len_without_crc: usize,
buf: &[u8],
) -> Result<(&[u8], Option<EntityIdTlv>), PduError> {
let mut fs_responses: &[u8] = &[];
let mut fault_location = None;
let start_of_fs_responses = current_idx;
// There are leftover filestore response(s) and/or a fault location field.
while current_idx < full_len_without_crc {
let next_tlv = Tlv::from_bytes(&buf[current_idx..])?;
match next_tlv.tlv_type_field() {
TlvTypeField::Standard(tlv_type) => {
if tlv_type == TlvType::FilestoreResponse {
current_idx += next_tlv.len_full();
if current_idx == full_len_without_crc {
fs_responses = &buf[start_of_fs_responses..current_idx];
}
} else if tlv_type == TlvType::EntityId {
// At least one FS response is included.
if current_idx > start_of_fs_responses {
fs_responses = &buf[start_of_fs_responses..current_idx];
}
fault_location = Some(EntityIdTlv::from_bytes(&buf[current_idx..])?);
current_idx += fault_location.as_ref().unwrap().len_full();
// This is considered a configuration error: The entity ID has to be the
// last TLV, everything else would break the whole handling of the packet
// TLVs.
if current_idx != full_len_without_crc {
return Err(PduError::Format);
}
} else {
return Err(PduError::TlvLv(
InvalidTlvTypeFieldError {
found: tlv_type.into(),
expected: Some(TlvType::FilestoreResponse.into()),
}
.into(),
));
}
}
TlvTypeField::Custom(raw) => {
return Err(PduError::TlvLv(
InvalidTlvTypeFieldError {
found: raw,
expected: None,
}
.into(),
));
}
}
}
Ok((fs_responses, fault_location))
}
}
impl CfdpPdu for FinishedPduReader<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu)
}
}
impl PartialEq<FinishedPduCreator<'_>> for FinishedPduReader<'_> {
fn eq(&self, other: &FinishedPduCreator<'_>) -> bool {
self.pdu_header == other.pdu_header
&& self.condition_code == other.condition_code
&& self.delivery_code == other.delivery_code
&& self.file_status == other.file_status
&& self.fault_location == other.fault_location
&& self
.fs_responses_iter()
.zip(other.filestore_responses().iter())
.all(|(a, b)| a == *b)
}
}
impl PartialEq<FinishedPduReader<'_>> for FinishedPduCreator<'_> {
fn eq(&self, other: &FinishedPduReader<'_>) -> bool {
other.eq(self)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::tests::{
common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID,
};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::tlv::FilestoreResponseTlv;
use crate::cfdp::{ConditionCode, CrcFlag, Direction, LargeFileFlag, TransmissionMode};
fn generic_finished_pdu(
crc_flag: CrcFlag,
fss: LargeFileFlag,
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> FinishedPduCreator<'static> {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
FinishedPduCreator::new_default(pdu_header, delivery_code, file_status)
}
#[test]
fn test_basic() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
assert_eq!(finished_pdu.condition_code(), ConditionCode::NoError);
assert_eq!(
finished_pdu.pdu_header().pdu_conf.direction,
Direction::TowardsSender
);
assert_eq!(finished_pdu.delivery_code(), DeliveryCode::Complete);
assert_eq!(finished_pdu.file_status(), FileStatus::Retained);
assert_eq!(finished_pdu.filestore_responses(), &[]);
assert_eq!(finished_pdu.fault_location(), None);
assert_eq!(finished_pdu.pdu_header().pdu_datafield_len, 2);
assert_eq!(finished_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(finished_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(finished_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
finished_pdu.file_directive_type(),
Some(FileDirectiveType::FinishedPdu)
);
assert_eq!(
finished_pdu.transmission_mode(),
TransmissionMode::Acknowledged
);
assert_eq!(finished_pdu.direction(), Direction::TowardsSender);
assert_eq!(finished_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(finished_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(finished_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
fn generic_serialization_test_no_error(delivery_code: DeliveryCode, file_status: FileStatus) {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
delivery_code,
file_status,
);
let mut buf: [u8; 64] = [0; 64];
let written = finished_pdu.write_to_bytes(&mut buf);
assert!(written.is_ok());
let written = written.unwrap();
assert_eq!(written, 9);
assert_eq!(written, finished_pdu.len_written());
assert_eq!(written, finished_pdu.pdu_header().header_len() + 2);
assert_eq!(
finished_pdu.pdu_header().pdu_conf.direction,
Direction::TowardsSender
);
verify_raw_header(finished_pdu.pdu_header(), &buf);
let mut current_idx = finished_pdu.pdu_header().header_len();
assert_eq!(buf[current_idx], FileDirectiveType::FinishedPdu as u8);
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
ConditionCode::NoError as u8
);
assert_eq!((buf[current_idx] >> 2) & 0b1, delivery_code as u8);
assert_eq!(buf[current_idx] & 0b11, file_status as u8);
assert_eq!(current_idx + 1, written);
}
#[test]
fn test_serialization_simple() {
generic_serialization_test_no_error(DeliveryCode::Complete, FileStatus::Retained);
}
#[test]
fn test_serialization_simple_2() {
generic_serialization_test_no_error(
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
);
}
#[test]
fn test_serialization_simple_3() {
generic_serialization_test_no_error(DeliveryCode::Incomplete, FileStatus::Unreported);
}
#[test]
fn test_write_to_vec() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 64] = [0; 64];
let written = finished_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_vec = finished_pdu.to_vec().unwrap();
assert_eq!(buf[0..written], pdu_vec);
}
#[test]
fn test_deserialization_simple() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 64] = [0; 64];
finished_pdu.write_to_bytes(&mut buf).unwrap();
let read_back = FinishedPduReader::from_bytes(&buf);
assert!(read_back.is_ok());
let read_back = read_back.unwrap();
assert_eq!(finished_pdu, read_back);
// Use all getter functions here explicitely once.
assert_eq!(finished_pdu.pdu_header(), read_back.pdu_header());
assert_eq!(finished_pdu.condition_code(), read_back.condition_code());
assert_eq!(finished_pdu.fault_location(), read_back.fault_location());
assert_eq!(finished_pdu.file_status(), read_back.file_status());
assert_eq!(finished_pdu.delivery_code(), read_back.delivery_code());
}
#[test]
fn test_serialization_buf_too_small() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 8] = [0; 8];
let error = finished_pdu.write_to_bytes(&mut buf);
assert!(error.is_err());
if let PduError::ByteConversion(ByteConversionError::ToSliceTooSmall { found, expected }) =
error.unwrap_err()
{
assert_eq!(found, 8);
assert_eq!(expected, 9);
} else {
panic!("expected to_slice_too_small error");
}
}
#[test]
fn test_with_crc() {
let finished_pdu = generic_finished_pdu(
CrcFlag::WithCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 64] = [0; 64];
let written = finished_pdu.write_to_bytes(&mut buf).unwrap();
assert_eq!(written, finished_pdu.len_written());
let finished_pdu_from_raw = FinishedPduReader::new(&buf).unwrap();
assert_eq!(finished_pdu, finished_pdu_from_raw);
buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16;
let error = FinishedPduReader::new(&buf).unwrap_err();
if let PduError::Checksum(e) = error {
assert_eq!(e, crc);
} else {
panic!("expected crc error");
}
}
#[test]
fn test_with_fault_location() {
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
EntityIdTlv::new(TEST_DEST_ID.into()),
);
let finished_pdu_vec = finished_pdu.to_vec().unwrap();
assert_eq!(finished_pdu_vec.len(), 12);
assert_eq!(finished_pdu_vec[9], TlvType::EntityId.into());
assert_eq!(finished_pdu_vec[10], 1);
assert_eq!(finished_pdu_vec[11], TEST_DEST_ID.value_typed());
assert_eq!(
finished_pdu.fault_location().unwrap().entity_id(),
&TEST_DEST_ID.into()
);
}
#[test]
fn test_deserialization_with_fault_location() {
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let entity_id_tlv = EntityIdTlv::new(TEST_DEST_ID.into());
let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
entity_id_tlv,
);
let finished_pdu_vec = finished_pdu.to_vec().unwrap();
let finished_pdu_deserialized = FinishedPduReader::from_bytes(&finished_pdu_vec).unwrap();
assert_eq!(finished_pdu, finished_pdu_deserialized);
}
#[test]
fn test_deserialization_with_fs_responses() {
let entity_id_tlv = EntityIdTlv::new(TEST_DEST_ID.into());
let first_name = "first.txt";
let first_name_lv = Lv::new_from_str(first_name).unwrap();
let fs_response_0 = FilestoreResponseTlv::new_no_filestore_message(
crate::cfdp::tlv::FilestoreActionCode::CreateFile,
0,
first_name_lv,
None,
)
.unwrap();
let fs_response_1 = FilestoreResponseTlv::new_no_filestore_message(
crate::cfdp::tlv::FilestoreActionCode::DeleteFile,
0,
first_name_lv,
None,
)
.unwrap();
let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let finished_pdu = FinishedPduCreator::new_generic(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
fs_responses,
Some(entity_id_tlv),
);
let finished_pdu_vec = finished_pdu.to_vec().unwrap();
let finished_pdu_deserialized = FinishedPduReader::from_bytes(&finished_pdu_vec).unwrap();
assert_eq!(finished_pdu_deserialized, finished_pdu);
}
#[test]
fn test_deserialization_with_fs_responses_and_fault_location() {
let first_name = "first.txt";
let first_name_lv = Lv::new_from_str(first_name).unwrap();
let fs_response_0 = FilestoreResponseTlv::new_no_filestore_message(
crate::cfdp::tlv::FilestoreActionCode::CreateFile,
0,
first_name_lv,
None,
)
.unwrap();
let fs_response_1 = FilestoreResponseTlv::new_no_filestore_message(
crate::cfdp::tlv::FilestoreActionCode::DeleteFile,
0,
first_name_lv,
None,
)
.unwrap();
let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let finished_pdu = FinishedPduCreator::new_generic(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
fs_responses,
None,
);
let finished_pdu_vec = finished_pdu.to_vec().unwrap();
let finished_pdu_deserialized = FinishedPduReader::from_bytes(&finished_pdu_vec).unwrap();
assert_eq!(finished_pdu_deserialized, finished_pdu);
}
}

View File

@ -1,835 +0,0 @@
#[cfg(feature = "alloc")]
use super::tlv::TlvOwned;
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::{Tlv, WritableTlv};
use crate::cfdp::{ChecksumType, CrcFlag, Direction, LargeFileFlag, PduType};
use crate::ByteConversionError;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::tlv::ReadableTlv;
use super::{CfdpPdu, WritablePduPacket};
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct MetadataGenericParams {
pub closure_requested: bool,
pub checksum_type: ChecksumType,
pub file_size: u64,
}
impl MetadataGenericParams {
pub fn new(closure_requested: bool, checksum_type: ChecksumType, file_size: u64) -> Self {
Self {
closure_requested,
checksum_type,
file_size,
}
}
}
pub fn build_metadata_opts_from_slice(
buf: &mut [u8],
tlvs: &[Tlv],
) -> Result<usize, ByteConversionError> {
let mut written = 0;
for tlv in tlvs {
written += tlv.write_to_bytes(&mut buf[written..])?;
}
Ok(written)
}
#[cfg(feature = "alloc")]
pub fn build_metadata_opts_from_vec(
buf: &mut [u8],
tlvs: &Vec<Tlv>,
) -> Result<usize, ByteConversionError> {
build_metadata_opts_from_slice(buf, tlvs.as_slice())
}
#[cfg(feature = "alloc")]
pub fn build_metadata_opts_from_owned_slice(tlvs: &[TlvOwned]) -> Vec<u8> {
let mut sum_vec = Vec::new();
for tlv in tlvs {
sum_vec.extend(tlv.to_vec());
}
sum_vec
}
/// Metadata PDU creator abstraction.
///
/// This abstraction exposes a specialized API for creating metadata PDUs as specified in
/// CFDP chapter 5.2.5.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct MetadataPduCreator<'src_name, 'dest_name, 'opts> {
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [u8],
}
impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'opts> {
pub fn new_no_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
) -> Self {
Self::new(
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
&[],
)
}
pub fn new_with_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [u8],
) -> Self {
Self::new(
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options,
)
}
pub fn new(
mut pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileDirective;
pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
let mut pdu = Self {
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options,
};
pdu.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params
}
pub fn src_file_name(&self) -> Lv<'src_name> {
self.src_file_name
}
pub fn dest_file_name(&self) -> Lv<'dest_name> {
self.dest_file_name
}
pub fn options(&self) -> &'opts [u8] {
self.options
}
/// Yield an iterator which can be used to loop through all options. Returns [None] if the
/// options field is empty.
pub fn options_iter(&self) -> OptionsIter<'_> {
OptionsIter {
opt_buf: self.options,
current_idx: 0,
}
}
fn calc_pdu_datafield_len(&self) -> usize {
// One directve type octet and one byte of the directive parameter field.
let mut len = 2;
if self.pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
len += 8;
} else {
len += 4;
}
len += self.src_file_name.len_full();
len += self.dest_file_name.len_full();
len += self.options().len();
if self.crc_flag() == CrcFlag::WithCrc {
len += 2;
}
len
}
}
impl CfdpPdu for MetadataPduCreator<'_, '_, '_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu)
}
}
impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::MetadataPdu as u8;
current_idx += 1;
buf[current_idx] = ((self.metadata_params.closure_requested as u8) << 6)
| (self.metadata_params.checksum_type as u8);
current_idx += 1;
current_idx += write_fss_field(
self.pdu_header.common_pdu_conf().file_flag,
self.metadata_params.file_size,
&mut buf[current_idx..],
)?;
current_idx += self
.src_file_name
.write_to_be_bytes(&mut buf[current_idx..])?;
current_idx += self
.dest_file_name
.write_to_be_bytes(&mut buf[current_idx..])?;
buf[current_idx..current_idx + self.options.len()].copy_from_slice(self.options);
current_idx += self.options.len();
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
/// Helper structure to loop through all options of a metadata PDU. It should be noted that
/// iterators in Rust are not fallible, but the TLV creation can fail, for example if the raw TLV
/// data is invalid for some reason. In that case, the iterator will yield [None] because there
/// is no way to recover from this.
///
/// The user can accumulate the length of all TLVs yielded by the iterator and compare it against
/// the full length of the options to check whether the iterator was able to parse all TLVs
/// successfully.
pub struct OptionsIter<'opts> {
opt_buf: &'opts [u8],
current_idx: usize,
}
impl<'opts> Iterator for OptionsIter<'opts> {
type Item = Tlv<'opts>;
fn next(&mut self) -> Option<Self::Item> {
if self.current_idx == self.opt_buf.len() {
return None;
}
let tlv = Tlv::from_bytes(&self.opt_buf[self.current_idx..]);
// There are not really fallible iterators so we can't continue here..
if tlv.is_err() {
return None;
}
let tlv = tlv.unwrap();
self.current_idx += tlv.len_full();
Some(tlv)
}
}
/// Metadata PDU reader abstraction.
///
/// This abstraction exposes a specialized API for reading a metadata PDU with minimal copying
/// involved.
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct MetadataPduReader<'buf> {
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
#[cfg_attr(feature = "serde", serde(borrow))]
src_file_name: Lv<'buf>,
#[cfg_attr(feature = "serde", serde(borrow))]
dest_file_name: Lv<'buf>,
options: &'buf [u8],
}
impl<'raw> MetadataPduReader<'raw> {
pub fn new(buf: &'raw [u8]) -> Result<Self, PduError> {
Self::from_bytes(buf)
}
pub fn from_bytes(buf: &'raw [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let is_large_file = pdu_header.pdu_conf.file_flag == LargeFileFlag::Large;
// Minimal length: 1 byte + FSS (4 byte) + 2 empty LV (1 byte)
let mut min_expected_len = current_idx + 7;
if is_large_file {
min_expected_len += 4;
}
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::MetadataPdu),
}
})?;
if directive_type != FileDirectiveType::MetadataPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::MetadataPdu,
});
}
current_idx += 1;
let (fss_len, file_size) =
read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx + 1..]);
let metadata_params = MetadataGenericParams {
closure_requested: ((buf[current_idx] >> 6) & 0b1) != 0,
checksum_type: ChecksumType::try_from(buf[current_idx] & 0b1111)
.map_err(|_| PduError::InvalidChecksumType(buf[current_idx] & 0b1111))?,
file_size,
};
current_idx += 1 + fss_len;
let src_file_name = Lv::from_bytes(&buf[current_idx..])?;
current_idx += src_file_name.len_full();
let dest_file_name = Lv::from_bytes(&buf[current_idx..])?;
current_idx += dest_file_name.len_full();
// All left-over bytes are options.
Ok(Self {
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options: &buf[current_idx..full_len_without_crc],
})
}
/// Yield an iterator which can be used to loop through all options. Returns [None] if the
/// options field is empty.
pub fn options_iter(&self) -> Option<OptionsIter<'_>> {
Some(OptionsIter {
opt_buf: self.options,
current_idx: 0,
})
}
pub fn options(&self) -> &'raw [u8] {
self.options
}
pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params
}
pub fn src_file_name(&self) -> Lv {
self.src_file_name
}
pub fn dest_file_name(&self) -> Lv {
self.dest_file_name
}
}
impl CfdpPdu for MetadataPduReader<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu)
}
}
#[cfg(test)]
pub mod tests {
use alloc::string::ToString;
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::metadata::{
build_metadata_opts_from_slice, build_metadata_opts_from_vec, MetadataGenericParams,
MetadataPduCreator, MetadataPduReader,
};
use crate::cfdp::pdu::tests::{
common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID,
};
use crate::cfdp::pdu::{CfdpPdu, PduError, WritablePduPacket};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::tlv::{ReadableTlv, Tlv, TlvOwned, TlvType, WritableTlv};
use crate::cfdp::{
ChecksumType, CrcFlag, Direction, LargeFileFlag, PduType, SegmentMetadataFlag,
SegmentationControl, TransmissionMode,
};
use std::vec;
const SRC_FILENAME: &str = "hello-world.txt";
const DEST_FILENAME: &str = "hello-world2.txt";
fn generic_metadata_pdu(
crc_flag: CrcFlag,
checksum_type: ChecksumType,
closure_requested: bool,
fss: LargeFileFlag,
opts: &[u8],
) -> (
Lv<'static>,
Lv<'static>,
MetadataPduCreator<'static, 'static, '_>,
) {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
let metadata_params = MetadataGenericParams::new(closure_requested, checksum_type, 0x1010);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename =
Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed");
(
src_filename,
dest_filename,
MetadataPduCreator::new(
pdu_header,
metadata_params,
src_filename,
dest_filename,
opts,
),
)
}
#[test]
fn test_basic() {
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&[],
);
assert_eq!(
metadata_pdu.len_written(),
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
);
assert_eq!(metadata_pdu.src_file_name(), src_filename);
assert_eq!(metadata_pdu.dest_file_name(), dest_filename);
assert!(metadata_pdu.options().is_empty());
assert_eq!(metadata_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(metadata_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(metadata_pdu.pdu_type(), PduType::FileDirective);
assert!(!metadata_pdu.metadata_params().closure_requested);
assert_eq!(
metadata_pdu.metadata_params().checksum_type,
ChecksumType::Crc32
);
assert_eq!(
metadata_pdu.file_directive_type(),
Some(FileDirectiveType::MetadataPdu)
);
assert_eq!(
metadata_pdu.transmission_mode(),
TransmissionMode::Acknowledged
);
assert_eq!(metadata_pdu.direction(), Direction::TowardsReceiver);
assert_eq!(metadata_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(metadata_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(metadata_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
fn check_metadata_raw_fields(
metadata_pdu: &MetadataPduCreator,
buf: &[u8],
written_bytes: usize,
checksum_type: ChecksumType,
closure_requested: bool,
expected_src_filename: &Lv,
expected_dest_filename: &Lv,
) {
verify_raw_header(metadata_pdu.pdu_header(), buf);
assert_eq!(
written_bytes,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ expected_src_filename.len_full()
+ expected_dest_filename.len_full()
);
assert_eq!(buf[7], FileDirectiveType::MetadataPdu as u8);
assert_eq!(buf[8] >> 6, closure_requested as u8);
assert_eq!(buf[8] & 0b1111, checksum_type as u8);
assert_eq!(u32::from_be_bytes(buf[9..13].try_into().unwrap()), 0x1010);
let mut current_idx = 13;
let src_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating source name LV failed");
assert_eq!(src_name_from_raw, *expected_src_filename);
current_idx += src_name_from_raw.len_full();
let dest_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating dest name LV failed");
assert_eq!(dest_name_from_raw, *expected_dest_filename);
current_idx += dest_name_from_raw.len_full();
// No options, so no additional data here.
assert_eq!(current_idx, written_bytes);
}
#[test]
fn test_serialization_0() {
let checksum_type = ChecksumType::Crc32;
let closure_requested = false;
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
checksum_type,
closure_requested,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let res = metadata_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
check_metadata_raw_fields(
&metadata_pdu,
&buf,
written,
checksum_type,
closure_requested,
&src_filename,
&dest_filename,
);
}
#[test]
fn test_serialization_1() {
let checksum_type = ChecksumType::Modular;
let closure_requested = true;
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
checksum_type,
closure_requested,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let res = metadata_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
check_metadata_raw_fields(
&metadata_pdu,
&buf,
written,
checksum_type,
closure_requested,
&src_filename,
&dest_filename,
);
}
#[test]
fn test_write_to_vec() {
let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let pdu_vec = metadata_pdu.to_vec().unwrap();
let written = metadata_pdu.write_to_bytes(&mut buf).unwrap();
assert_eq!(buf[0..written], pdu_vec);
}
fn compare_read_pdu_to_written_pdu(written: &MetadataPduCreator, read: &MetadataPduReader) {
assert_eq!(written.metadata_params(), read.metadata_params());
assert_eq!(written.src_file_name(), read.src_file_name());
assert_eq!(written.dest_file_name(), read.dest_file_name());
let opts = written.options_iter();
for (tlv_written, tlv_read) in opts.zip(read.options_iter().unwrap()) {
assert_eq!(&tlv_written, &tlv_read);
}
}
#[test]
fn test_deserialization() {
let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
true,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
metadata_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_read_back = MetadataPduReader::from_bytes(&buf);
assert!(pdu_read_back.is_ok());
let pdu_read_back = pdu_read_back.unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
}
#[test]
fn test_with_crc_flag() {
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::WithCrc,
ChecksumType::Crc32,
true,
LargeFileFlag::Normal,
&[],
);
assert_eq!(metadata_pdu.crc_flag(), CrcFlag::WithCrc);
let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ core::mem::size_of::<u32>()
+ src_filename.len_full()
+ dest_filename.len_full()
+ 2
);
assert_eq!(written, metadata_pdu.len_written());
let pdu_read_back = MetadataPduReader::new(&buf).unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
}
#[test]
fn test_with_large_file_flag() {
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Large,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ core::mem::size_of::<u64>()
+ src_filename.len_full()
+ dest_filename.len_full()
);
let pdu_read_back = MetadataPduReader::new(&buf).unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
}
#[test]
fn test_opts_builders() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_slice = [tlv1, tlv2];
let mut buf: [u8; 32] = [0; 32];
let opts = build_metadata_opts_from_slice(&mut buf, &tlv_slice);
assert!(opts.is_ok());
let opts_len = opts.unwrap();
assert_eq!(opts_len, tlv1.len_full() + tlv2.len_full());
let tlv1_conv_back = Tlv::from_bytes(&buf).unwrap();
assert_eq!(tlv1_conv_back, tlv1);
let tlv2_conv_back = Tlv::from_bytes(&buf[tlv1_conv_back.len_full()..]).unwrap();
assert_eq!(tlv2_conv_back, tlv2);
}
#[test]
fn test_opts_builders_from_vec() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_vec = vec![tlv1, tlv2];
let mut buf: [u8; 32] = [0; 32];
let opts = build_metadata_opts_from_vec(&mut buf, &tlv_vec);
assert!(opts.is_ok());
let opts_len = opts.unwrap();
assert_eq!(opts_len, tlv1.len_full() + tlv2.len_full());
let tlv1_conv_back = Tlv::from_bytes(&buf).unwrap();
assert_eq!(tlv1_conv_back, tlv1);
let tlv2_conv_back = Tlv::from_bytes(&buf[tlv1_conv_back.len_full()..]).unwrap();
assert_eq!(tlv2_conv_back, tlv2);
}
#[test]
fn test_with_opts() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let mut tlv_buf: [u8; 64] = [0; 64];
let opts_len = build_metadata_opts_from_slice(&mut tlv_buf, &[tlv1, tlv2]).unwrap();
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&tlv_buf[0..opts_len],
);
let mut buf: [u8; 128] = [0; 128];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
+ opts_len
);
let pdu_read_back = MetadataPduReader::from_bytes(&buf).unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
let opts_iter = pdu_read_back.options_iter();
assert!(opts_iter.is_some());
let opts_iter = opts_iter.unwrap();
let mut accumulated_len = 0;
for (idx, opt) in opts_iter.enumerate() {
if idx == 0 {
assert_eq!(tlv1, opt);
} else if idx == 1 {
assert_eq!(tlv2, opt);
}
accumulated_len += opt.len_full();
}
assert_eq!(accumulated_len, pdu_read_back.options().len());
}
#[test]
fn test_with_owned_opts() {
let tlv1 = TlvOwned::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = TlvOwned::new(TlvType::MsgToUser, &msg_to_user);
let mut all_tlvs = tlv1.to_vec();
all_tlvs.extend(tlv2.to_vec());
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&all_tlvs,
);
let mut buf: [u8; 128] = [0; 128];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
+ all_tlvs.len()
);
let pdu_read_back = MetadataPduReader::from_bytes(&buf).unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
let opts_iter = pdu_read_back.options_iter();
assert!(opts_iter.is_some());
let opts_iter = opts_iter.unwrap();
let mut accumulated_len = 0;
for (idx, opt) in opts_iter.enumerate() {
if idx == 0 {
assert_eq!(tlv1, opt);
} else if idx == 1 {
assert_eq!(tlv2, opt);
}
accumulated_len += opt.len_full();
}
assert_eq!(accumulated_len, pdu_read_back.options().len());
}
#[test]
fn test_invalid_directive_code() {
let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
true,
LargeFileFlag::Large,
&[],
);
let mut metadata_vec = metadata_pdu.to_vec().unwrap();
metadata_vec[7] = 0xff;
let metadata_error = MetadataPduReader::from_bytes(&metadata_vec);
assert!(metadata_error.is_err());
let error = metadata_error.unwrap_err();
if let PduError::InvalidDirectiveType { found, expected } = error {
assert_eq!(found, 0xff);
assert_eq!(expected, Some(FileDirectiveType::MetadataPdu));
assert_eq!(
error.to_string(),
"invalid directive type, found 255, expected Some(MetadataPdu)"
);
} else {
panic!("Expected InvalidDirectiveType error, got {:?}", error);
}
}
#[test]
fn test_wrong_directive_code() {
let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Large,
&[],
);
let mut metadata_vec = metadata_pdu.to_vec().unwrap();
metadata_vec[7] = FileDirectiveType::EofPdu as u8;
let metadata_error = MetadataPduReader::from_bytes(&metadata_vec);
assert!(metadata_error.is_err());
let error = metadata_error.unwrap_err();
if let PduError::WrongDirectiveType { found, expected } = error {
assert_eq!(found, FileDirectiveType::EofPdu);
assert_eq!(expected, FileDirectiveType::MetadataPdu);
assert_eq!(
error.to_string(),
"wrong directive type, found EofPdu, expected MetadataPdu"
);
} else {
panic!("Expected InvalidDirectiveType error, got {:?}", error);
}
}
#[test]
fn test_corrects_pdu_header() {
let pdu_header = PduHeader::new_for_file_data(
common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
);
let metadata_params = MetadataGenericParams::new(false, ChecksumType::Crc32, 10);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename =
Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed");
let metadata_pdu = MetadataPduCreator::new_no_opts(
pdu_header,
metadata_params,
src_filename,
dest_filename,
);
assert_eq!(metadata_pdu.pdu_header().pdu_type(), PduType::FileDirective);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,806 +0,0 @@
use crate::{
cfdp::{CrcFlag, Direction, LargeFileFlag},
ByteConversionError,
};
use core::{marker::PhantomData, mem::size_of};
use super::{
add_pdu_crc, generic_length_checks_pdu_deserialization, CfdpPdu, FileDirectiveType, PduError,
PduHeader, WritablePduPacket,
};
/// Helper type to encapsulate both normal file size segment requests and large file size segment
/// requests.
#[derive(Debug, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum SegmentRequests<'a> {
U32Pairs(&'a [(u32, u32)]),
U64Pairs(&'a [(u64, u64)]),
}
impl SegmentRequests<'_> {
pub fn is_empty(&self) -> bool {
match self {
SegmentRequests::U32Pairs(pairs) => pairs.is_empty(),
SegmentRequests::U64Pairs(pairs) => pairs.is_empty(),
}
}
}
/// NAK PDU abstraction specialized in the creation of NAK PDUs.
///
/// It exposes a specialized API which simplifies to generate these NAK PDUs with the
/// format according to CFDP chapter 5.2.6.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct NakPduCreator<'seg_reqs> {
pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
segment_requests: Option<SegmentRequests<'seg_reqs>>,
}
impl<'seg_reqs> NakPduCreator<'seg_reqs> {
/// Please note that the start of scope and the end of scope need to be smaller or equal
/// to [u32::MAX] if the large file flag of the passed PDU configuration is
/// [LargeFileFlag::Normal].
///
/// ## Errrors
///
pub fn new_no_segment_requests(
pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
) -> Result<NakPduCreator<'seg_reqs>, PduError> {
Self::new_generic(pdu_header, start_of_scope, end_of_scope, None)
}
/// Default constructor for normal file sizes.
pub fn new(
pdu_header: PduHeader,
start_of_scope: u32,
end_of_scope: u32,
segment_requests: &'seg_reqs [(u32, u32)],
) -> Result<NakPduCreator<'seg_reqs>, PduError> {
let mut passed_segment_requests = None;
if !segment_requests.is_empty() {
passed_segment_requests = Some(SegmentRequests::U32Pairs(segment_requests));
}
Self::new_generic(
pdu_header,
start_of_scope.into(),
end_of_scope.into(),
passed_segment_requests,
)
}
pub fn new_large_file_size(
pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
segment_requests: &'seg_reqs [(u64, u64)],
) -> Result<NakPduCreator<'seg_reqs>, PduError> {
let mut passed_segment_requests = None;
if !segment_requests.is_empty() {
passed_segment_requests = Some(SegmentRequests::U64Pairs(segment_requests));
}
Self::new_generic(
pdu_header,
start_of_scope,
end_of_scope,
passed_segment_requests,
)
}
fn new_generic(
mut pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
segment_requests: Option<SegmentRequests<'seg_reqs>>,
) -> Result<NakPduCreator<'seg_reqs>, PduError> {
// Force correct direction flag.
pdu_header.pdu_conf.direction = Direction::TowardsSender;
if let Some(ref segment_requests) = segment_requests {
match segment_requests {
SegmentRequests::U32Pairs(_) => {
if start_of_scope > u32::MAX as u64 || end_of_scope > u32::MAX as u64 {
return Err(PduError::InvalidStartOrEndOfScopeValue);
}
pdu_header.pdu_conf.file_flag = LargeFileFlag::Normal;
}
SegmentRequests::U64Pairs(_) => {
pdu_header.pdu_conf.file_flag = LargeFileFlag::Large;
}
}
};
let mut nak_pdu = Self {
pdu_header,
start_of_scope,
end_of_scope,
segment_requests,
};
nak_pdu.pdu_header.pdu_datafield_len = nak_pdu.calc_pdu_datafield_len() as u16;
Ok(nak_pdu)
}
pub fn start_of_scope(&self) -> u64 {
self.start_of_scope
}
pub fn end_of_scope(&self) -> u64 {
self.end_of_scope
}
pub fn segment_requests(&self) -> Option<&SegmentRequests> {
self.segment_requests.as_ref()
}
pub fn num_segment_reqs(&self) -> usize {
match &self.segment_requests {
Some(seg_reqs) => match seg_reqs {
SegmentRequests::U32Pairs(pairs) => pairs.len(),
SegmentRequests::U64Pairs(pairs) => pairs.len(),
},
None => 0,
}
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut datafield_len = 1;
if self.file_flag() == LargeFileFlag::Normal {
datafield_len += 8;
datafield_len += self.num_segment_reqs() * 8;
} else {
datafield_len += 16;
datafield_len += self.num_segment_reqs() * 16;
}
if self.crc_flag() == CrcFlag::WithCrc {
datafield_len += 2;
}
datafield_len
}
}
impl CfdpPdu for NakPduCreator<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::NakPdu)
}
}
impl WritablePduPacket for NakPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::NakPdu as u8;
current_idx += 1;
let mut write_start_end_of_scope_normal = || {
let start_of_scope = u32::try_from(self.start_of_scope).unwrap();
let end_of_scope = u32::try_from(self.end_of_scope).unwrap();
buf[current_idx..current_idx + 4].copy_from_slice(&start_of_scope.to_be_bytes());
current_idx += 4;
buf[current_idx..current_idx + 4].copy_from_slice(&end_of_scope.to_be_bytes());
current_idx += 4;
};
if let Some(ref seg_reqs) = self.segment_requests {
match seg_reqs {
SegmentRequests::U32Pairs(pairs) => {
// Unwrap is okay here, the API should prevent invalid values which would trigger a
// panic here.
write_start_end_of_scope_normal();
for (next_start_offset, next_end_offset) in *pairs {
buf[current_idx..current_idx + 4]
.copy_from_slice(&next_start_offset.to_be_bytes());
current_idx += 4;
buf[current_idx..current_idx + 4]
.copy_from_slice(&next_end_offset.to_be_bytes());
current_idx += 4;
}
}
SegmentRequests::U64Pairs(pairs) => {
buf[current_idx..current_idx + 8]
.copy_from_slice(&self.start_of_scope.to_be_bytes());
current_idx += 8;
buf[current_idx..current_idx + 8]
.copy_from_slice(&self.end_of_scope.to_be_bytes());
current_idx += 8;
for (next_start_offset, next_end_offset) in *pairs {
buf[current_idx..current_idx + 8]
.copy_from_slice(&next_start_offset.to_be_bytes());
current_idx += 8;
buf[current_idx..current_idx + 8]
.copy_from_slice(&next_end_offset.to_be_bytes());
current_idx += 8;
}
}
}
} else {
write_start_end_of_scope_normal();
}
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
/// Special iterator type for the NAK PDU which allows to iterate over both normal and large file
/// segment requests.
#[derive(Debug)]
pub struct SegmentRequestIter<'a, T> {
seq_req_raw: &'a [u8],
current_idx: usize,
phantom: core::marker::PhantomData<T>,
}
pub trait SegReqFromBytes {
fn from_bytes(bytes: &[u8]) -> Self;
}
impl SegReqFromBytes for u32 {
fn from_bytes(bytes: &[u8]) -> u32 {
u32::from_be_bytes(bytes.try_into().unwrap())
}
}
impl SegReqFromBytes for u64 {
fn from_bytes(bytes: &[u8]) -> u64 {
u64::from_be_bytes(bytes.try_into().unwrap())
}
}
impl<T> Iterator for SegmentRequestIter<'_, T>
where
T: SegReqFromBytes,
{
type Item = (T, T);
fn next(&mut self) -> Option<Self::Item> {
let value = self.next_at_offset(self.current_idx);
self.current_idx += 2 * size_of::<T>();
value
}
}
impl<'a> PartialEq<SegmentRequests<'a>> for SegmentRequestIter<'_, u32> {
fn eq(&self, other: &SegmentRequests<'a>) -> bool {
match other {
SegmentRequests::U32Pairs(pairs) => self.compare_pairs(pairs),
SegmentRequests::U64Pairs(pairs) => {
if pairs.is_empty() && self.seq_req_raw.is_empty() {
return true;
}
false
}
}
}
}
impl<'a> PartialEq<SegmentRequests<'a>> for SegmentRequestIter<'_, u64> {
fn eq(&self, other: &SegmentRequests<'a>) -> bool {
match other {
SegmentRequests::U32Pairs(pairs) => {
if pairs.is_empty() && self.seq_req_raw.is_empty() {
return true;
}
false
}
SegmentRequests::U64Pairs(pairs) => self.compare_pairs(pairs),
}
}
}
impl<T> SegmentRequestIter<'_, T>
where
T: SegReqFromBytes + PartialEq,
{
fn compare_pairs(&self, pairs: &[(T, T)]) -> bool {
if pairs.is_empty() && self.seq_req_raw.is_empty() {
return true;
}
let size = size_of::<T>();
if pairs.len() * 2 * size != self.seq_req_raw.len() {
return false;
}
for (i, pair) in pairs.iter().enumerate() {
let next_val = self.next_at_offset(i * 2 * size).unwrap();
if next_val != *pair {
return false;
}
}
true
}
}
impl<T: SegReqFromBytes> SegmentRequestIter<'_, T> {
fn next_at_offset(&self, mut offset: usize) -> Option<(T, T)> {
if offset + size_of::<T>() * 2 > self.seq_req_raw.len() {
return None;
}
let start_offset = T::from_bytes(&self.seq_req_raw[offset..offset + size_of::<T>()]);
offset += size_of::<T>();
let end_offset = T::from_bytes(&self.seq_req_raw[offset..offset + size_of::<T>()]);
Some((start_offset, end_offset))
}
}
/// NAK PDU abstraction specialized in the reading NAK PDUs from a raw bytestream.
///
/// This is a zero-copy class where the segment requests can be read using a special iterator
/// API without the need to copy them.
///
/// The NAK format is expected to be conforming to CFDP chapter 5.2.6.
#[derive(Debug, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct NakPduReader<'seg_reqs> {
pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
seg_reqs_raw: &'seg_reqs [u8],
}
impl CfdpPdu for NakPduReader<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::NakPdu)
}
}
impl<'seg_reqs> NakPduReader<'seg_reqs> {
pub fn new(buf: &'seg_reqs [u8]) -> Result<NakPduReader<'seg_reqs>, PduError> {
Self::from_bytes(buf)
}
pub fn from_bytes(buf: &'seg_reqs [u8]) -> Result<NakPduReader<'seg_reqs>, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
// Minimum length of 9: 1 byte directive field and start and end of scope for normal file
// size.
generic_length_checks_pdu_deserialization(buf, 9, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::NakPdu),
}
})?;
if directive_type != FileDirectiveType::NakPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::AckPdu,
});
}
current_idx += 1;
let start_of_scope;
let end_of_scope;
if pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
if current_idx + 16 > buf.len() {
return Err(PduError::ByteConversion(
ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: current_idx + 16,
},
));
}
start_of_scope =
u64::from_be_bytes(buf[current_idx..current_idx + 8].try_into().unwrap());
current_idx += 8;
end_of_scope =
u64::from_be_bytes(buf[current_idx..current_idx + 8].try_into().unwrap());
current_idx += 8;
} else {
start_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()) as u64;
current_idx += 4;
end_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()) as u64;
current_idx += 4;
}
Ok(Self {
pdu_header,
start_of_scope,
end_of_scope,
seg_reqs_raw: &buf[current_idx..full_len_without_crc],
})
}
pub fn start_of_scope(&self) -> u64 {
self.start_of_scope
}
pub fn end_of_scope(&self) -> u64 {
self.end_of_scope
}
pub fn num_segment_reqs(&self) -> usize {
if self.seg_reqs_raw.is_empty() {
return 0;
}
if self.file_flag() == LargeFileFlag::Normal {
self.seg_reqs_raw.len() / 8
} else {
self.seg_reqs_raw.len() / 16
}
}
/// This function returns [None] if this NAK PDUs contains segment requests for a large file.
pub fn get_normal_segment_requests_iterator(&self) -> Option<SegmentRequestIter<'_, u32>> {
if self.file_flag() == LargeFileFlag::Large {
return None;
}
Some(SegmentRequestIter {
seq_req_raw: self.seg_reqs_raw,
current_idx: 0,
phantom: PhantomData,
})
}
/// This function returns [None] if this NAK PDUs contains segment requests for a normal file.
pub fn get_large_segment_requests_iterator(&self) -> Option<SegmentRequestIter<'_, u64>> {
if self.file_flag() == LargeFileFlag::Normal {
return None;
}
Some(SegmentRequestIter {
seq_req_raw: self.seg_reqs_raw,
current_idx: 0,
phantom: PhantomData,
})
}
}
impl<'a> PartialEq<NakPduCreator<'a>> for NakPduReader<'_> {
fn eq(&self, other: &NakPduCreator<'a>) -> bool {
if self.pdu_header() != other.pdu_header()
|| self.end_of_scope() != other.end_of_scope()
|| self.start_of_scope() != other.start_of_scope()
{
return false;
}
// Check if both segment requests are empty or None
match (self.seg_reqs_raw.is_empty(), other.segment_requests()) {
(true, None) => true,
(true, Some(seg_reqs)) => seg_reqs.is_empty(),
(false, None) => false,
_ => {
// Compare based on file_flag
if self.file_flag() == LargeFileFlag::Normal {
let normal_iter = self.get_normal_segment_requests_iterator().unwrap();
normal_iter == *other.segment_requests().unwrap()
} else {
let large_iter = self.get_large_segment_requests_iterator().unwrap();
large_iter == *other.segment_requests().unwrap()
}
}
}
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use crate::cfdp::{
pdu::tests::{common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID},
PduType, TransmissionMode,
};
use super::*;
fn check_generic_fields(nak_pdu: &impl CfdpPdu) {
assert_eq!(nak_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(nak_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(nak_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
nak_pdu.file_directive_type(),
Some(FileDirectiveType::NakPdu),
);
assert_eq!(nak_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(nak_pdu.direction(), Direction::TowardsSender);
assert_eq!(nak_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(nak_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(nak_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
#[test]
fn test_seg_request_api() {
let seg_req = SegmentRequests::U32Pairs(&[]);
assert!(seg_req.is_empty());
let seg_req = SegmentRequests::U64Pairs(&[]);
assert!(seg_req.is_empty());
}
#[test]
fn test_basic_creator() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 0, 0)
.expect("creating NAK PDU creator failed");
assert_eq!(nak_pdu.start_of_scope(), 0);
assert_eq!(nak_pdu.end_of_scope(), 0);
assert_eq!(nak_pdu.segment_requests(), None);
assert_eq!(nak_pdu.num_segment_reqs(), 0);
check_generic_fields(&nak_pdu);
}
#[test]
fn test_serialization_empty() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 100, 300)
.expect("creating NAK PDU creator failed");
assert_eq!(nak_pdu.start_of_scope(), 100);
assert_eq!(nak_pdu.end_of_scope(), 300);
let mut buf: [u8; 64] = [0; 64];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
verify_raw_header(nak_pdu.pdu_header(), &buf);
let mut current_idx = nak_pdu.pdu_header().header_len();
assert_eq!(current_idx + 9, nak_pdu.len_written());
assert_eq!(buf[current_idx], FileDirectiveType::NakPdu as u8);
current_idx += 1;
let start_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(start_of_scope, 100);
current_idx += 4;
let end_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(end_of_scope, 300);
current_idx += 4;
assert_eq!(current_idx, nak_pdu.len_written());
}
#[test]
fn test_serialization_two_segments() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new(pdu_header, 100, 300, &[(0, 0), (32, 64)])
.expect("creating NAK PDU creator failed");
let mut buf: [u8; 64] = [0; 64];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
verify_raw_header(nak_pdu.pdu_header(), &buf);
let mut current_idx = nak_pdu.pdu_header().header_len();
assert_eq!(current_idx + 9 + 16, nak_pdu.len_written());
assert_eq!(buf[current_idx], FileDirectiveType::NakPdu as u8);
current_idx += 1;
let start_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(start_of_scope, 100);
current_idx += 4;
let end_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(end_of_scope, 300);
current_idx += 4;
let first_seg_start =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(first_seg_start, 0);
current_idx += 4;
let first_seg_end =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(first_seg_end, 0);
current_idx += 4;
let second_seg_start =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(second_seg_start, 32);
current_idx += 4;
let second_seg_end =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(second_seg_end, 64);
current_idx += 4;
assert_eq!(current_idx, nak_pdu.len_written());
}
#[test]
fn test_deserialization_empty() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 100, 300)
.expect("creating NAK PDU creator failed");
let mut buf: [u8; 64] = [0; 64];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
let nak_pdu_deser = NakPduReader::from_bytes(&buf).expect("deserializing NAK PDU failed");
assert_eq!(nak_pdu_deser, nak_pdu);
check_generic_fields(&nak_pdu_deser);
}
#[test]
fn test_deserialization_large_segments() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Large);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu =
NakPduCreator::new_large_file_size(pdu_header, 100, 300, &[(50, 100), (200, 300)])
.expect("creating NAK PDU creator failed");
let mut buf: [u8; 128] = [0; 128];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
let nak_pdu_deser = NakPduReader::from_bytes(&buf).expect("deserializing NAK PDU failed");
assert_eq!(nak_pdu_deser, nak_pdu);
assert_eq!(nak_pdu_deser.start_of_scope(), 100);
assert_eq!(nak_pdu_deser.end_of_scope(), 300);
assert_eq!(nak_pdu_deser.num_segment_reqs(), 2);
assert!(nak_pdu_deser
.get_large_segment_requests_iterator()
.is_some());
assert!(nak_pdu_deser
.get_normal_segment_requests_iterator()
.is_none());
assert_eq!(
nak_pdu_deser
.get_large_segment_requests_iterator()
.unwrap()
.count(),
2
);
for (idx, large_segments) in nak_pdu_deser
.get_large_segment_requests_iterator()
.unwrap()
.enumerate()
{
if idx == 0 {
assert_eq!(large_segments.0, 50);
assert_eq!(large_segments.1, 100);
} else {
assert_eq!(large_segments.0, 200);
assert_eq!(large_segments.1, 300);
}
}
}
#[test]
fn test_deserialization_normal_segments() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new(pdu_header, 100, 300, &[(50, 100), (200, 300)])
.expect("creating NAK PDU creator failed");
let mut buf: [u8; 128] = [0; 128];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
let nak_pdu_deser = NakPduReader::from_bytes(&buf).expect("deserializing NAK PDU failed");
assert_eq!(nak_pdu_deser, nak_pdu);
assert_eq!(nak_pdu_deser.start_of_scope(), 100);
assert_eq!(nak_pdu_deser.end_of_scope(), 300);
assert_eq!(nak_pdu_deser.num_segment_reqs(), 2);
assert!(nak_pdu_deser
.get_normal_segment_requests_iterator()
.is_some());
assert!(nak_pdu_deser
.get_large_segment_requests_iterator()
.is_none());
assert_eq!(
nak_pdu_deser
.get_normal_segment_requests_iterator()
.unwrap()
.count(),
2
);
for (idx, large_segments) in nak_pdu_deser
.get_normal_segment_requests_iterator()
.unwrap()
.enumerate()
{
if idx == 0 {
assert_eq!(large_segments.0, 50);
assert_eq!(large_segments.1, 100);
} else {
assert_eq!(large_segments.0, 200);
assert_eq!(large_segments.1, 300);
}
}
}
#[test]
fn test_empty_is_empty() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu_0 =
NakPduCreator::new(pdu_header, 100, 300, &[]).expect("creating NAK PDU creator failed");
let nak_pdu_1 = NakPduCreator::new_no_segment_requests(pdu_header, 100, 300)
.expect("creating NAK PDU creator failed");
assert_eq!(nak_pdu_0, nak_pdu_1);
// Assert the segment request is mapped to None.
assert!(nak_pdu_0.segment_requests().is_none());
}
#[test]
fn test_new_generic_invalid_input() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let u32_list = SegmentRequests::U32Pairs(&[(0, 50), (50, 100)]);
//let error = NakPduCreator::new_generic(pdu_header, 100, 300, Some(u32_list));
let error = NakPduCreator::new_generic(
pdu_header,
u32::MAX as u64 + 1,
u32::MAX as u64 + 2,
Some(u32_list),
);
assert!(error.is_err());
let error = error.unwrap_err();
if let PduError::InvalidStartOrEndOfScopeValue = error {
assert_eq!(
error.to_string(),
"invalid start or end of scope value for NAK PDU"
);
} else {
panic!("unexpected error {error}");
}
}
#[test]
fn test_target_buf_too_small() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 100, 300)
.expect("creating NAK PDU creator failed");
assert_eq!(nak_pdu.start_of_scope(), 100);
assert_eq!(nak_pdu.end_of_scope(), 300);
let mut buf: [u8; 5] = [0; 5];
let error = nak_pdu.write_to_bytes(&mut buf);
assert!(error.is_err());
let e = error.unwrap_err();
match e {
PduError::ByteConversion(conv_error) => match conv_error {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(expected, nak_pdu.len_written());
assert_eq!(found, 5);
}
_ => panic!("unexpected error {conv_error}"),
},
_ => panic!("unexpected error {e}"),
}
}
#[test]
fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 0, 0)
.expect("creating NAK PDU creator failed");
let mut nak_vec = nak_pdu.to_vec().expect("writing NAK to vector failed");
assert_eq!(nak_vec.len(), pdu_header.header_len() + 9 + 2);
assert_eq!(nak_vec.len(), nak_pdu.len_written());
let nak_pdu_deser = NakPduReader::new(&nak_vec).expect("reading NAK PDU failed");
assert_eq!(nak_pdu_deser, nak_pdu);
nak_vec[nak_pdu.len_written() - 1] -= 1;
let nak_pdu_deser = NakPduReader::new(&nak_vec);
assert!(nak_pdu_deser.is_err());
if let Err(PduError::Checksum(raw)) = nak_pdu_deser {
assert_eq!(
raw,
u16::from_be_bytes(nak_vec[nak_pdu.len_written() - 2..].try_into().unwrap())
);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,229 +0,0 @@
//! Abstractions for the Message to User CFDP TLV subtype.
#[cfg(feature = "alloc")]
use super::TlvOwned;
use super::{GenericTlv, ReadableTlv, Tlv, TlvLvError, TlvType, TlvTypeField, WritableTlv};
use crate::{
cfdp::{InvalidTlvTypeFieldError, TlvLvDataTooLargeError},
ByteConversionError,
};
use delegate::delegate;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct MsgToUserTlv<'data> {
pub tlv: Tlv<'data>,
}
impl<'data> MsgToUserTlv<'data> {
/// Create a new message to user TLV where the type field is set correctly.
pub fn new(value: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvDataTooLargeError> {
Ok(Self {
tlv: Tlv::new(TlvType::MsgToUser, value)?,
})
}
delegate! {
to self.tlv {
pub fn value(&self) -> &[u8];
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value]
pub fn len_value(&self) -> usize;
/// Returns the full raw length, including the length byte.
pub fn len_full(&self) -> usize;
/// Checks whether the value field is empty.
pub fn is_empty(&self) -> bool;
/// If the TLV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the TLV can be retrieved with this method.
pub fn raw_data(&self) -> Option<&[u8]>;
}
}
pub fn is_standard_tlv(&self) -> bool {
true
}
pub fn tlv_type(&self) -> Option<TlvType> {
Some(TlvType::MsgToUser)
}
/// Check whether this message is a reserved CFDP message like a Proxy Operation Message.
pub fn is_reserved_cfdp_msg(&self) -> bool {
if self.value().len() < 4 {
return false;
}
let value = self.value();
if value[0] == b'c' && value[1] == b'f' && value[2] == b'd' && value[3] == b'p' {
return true;
}
false
}
/// This is a thin wrapper around [Tlv::from_bytes] with the additional type check.
pub fn from_bytes(buf: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvError> {
let msg_to_user = Self {
tlv: Tlv::from_bytes(buf)?,
};
match msg_to_user.tlv.tlv_type_field() {
TlvTypeField::Standard(tlv_type) => {
if tlv_type != TlvType::MsgToUser {
return Err(InvalidTlvTypeFieldError {
found: tlv_type as u8,
expected: Some(TlvType::MsgToUser as u8),
}
.into());
}
}
TlvTypeField::Custom(raw) => {
return Err(InvalidTlvTypeFieldError {
found: raw,
expected: Some(TlvType::MsgToUser as u8),
}
.into());
}
}
Ok(msg_to_user)
}
pub fn to_tlv(&self) -> Tlv<'data> {
self.tlv
}
#[cfg(feature = "alloc")]
pub fn to_owned(&self) -> TlvOwned {
self.tlv.to_owned()
}
}
impl<'a> From<MsgToUserTlv<'a>> for Tlv<'a> {
fn from(value: MsgToUserTlv<'a>) -> Tlv<'a> {
value.to_tlv()
}
}
impl WritableTlv for MsgToUserTlv<'_> {
fn len_written(&self) -> usize {
self.len_full()
}
delegate!(
to self.tlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
}
);
}
impl GenericTlv for MsgToUserTlv<'_> {
fn tlv_type_field(&self) -> TlvTypeField {
self.tlv.tlv_type_field()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_basic() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value);
assert!(msg_to_user.is_ok());
let msg_to_user = msg_to_user.unwrap();
assert!(msg_to_user.is_standard_tlv());
assert_eq!(msg_to_user.tlv_type().unwrap(), TlvType::MsgToUser);
assert_eq!(
msg_to_user.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(msg_to_user.value(), custom_value);
assert_eq!(msg_to_user.value().len(), 4);
assert_eq!(msg_to_user.len_value(), 4);
assert_eq!(msg_to_user.len_full(), 6);
assert!(!msg_to_user.is_empty());
assert!(msg_to_user.raw_data().is_none());
assert!(!msg_to_user.is_reserved_cfdp_msg());
}
#[test]
fn test_reserved_msg_serialization() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let mut buf: [u8; 6] = [0; 6];
msg_to_user.write_to_bytes(&mut buf).unwrap();
assert_eq!(
buf,
[
TlvType::MsgToUser as u8,
custom_value.len() as u8,
1,
2,
3,
4
]
);
}
#[test]
fn test_msg_to_user_type_reduction() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let tlv = msg_to_user.to_tlv();
assert_eq!(
tlv.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(tlv.value(), custom_value);
}
#[test]
fn test_msg_to_user_to_tlv() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let tlv: Tlv = msg_to_user.into();
assert_eq!(msg_to_user.to_tlv(), tlv);
}
#[test]
fn test_msg_to_user_owner_converter() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let tlv = msg_to_user.to_owned();
assert_eq!(
tlv.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(tlv.value(), custom_value);
}
#[test]
fn test_reserved_msg_deserialization() {
let custom_value: [u8; 3] = [1, 2, 3];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let msg_to_user_vec = msg_to_user.to_vec();
let msg_to_user_from_bytes = MsgToUserTlv::from_bytes(&msg_to_user_vec).unwrap();
assert!(!msg_to_user.is_reserved_cfdp_msg());
assert_eq!(msg_to_user_from_bytes, msg_to_user);
assert_eq!(msg_to_user_from_bytes.value(), msg_to_user.value());
assert_eq!(msg_to_user_from_bytes.tlv_type(), msg_to_user.tlv_type());
}
#[test]
fn test_reserved_msg_deserialization_invalid_type() {
let trash: [u8; 5] = [TlvType::FlowLabel as u8, 3, 1, 2, 3];
let error = MsgToUserTlv::from_bytes(&trash).unwrap_err();
if let TlvLvError::InvalidTlvTypeField(inner) = error {
assert_eq!(inner.found, TlvType::FlowLabel as u8);
assert_eq!(inner.expected, Some(TlvType::MsgToUser as u8));
} else {
panic!("Wrong error type returned: {:?}", error);
}
}
#[test]
fn test_reserved_msg() {
let reserved_str = "cfdp";
let msg_to_user = MsgToUserTlv::new(reserved_str.as_bytes());
assert!(msg_to_user.is_ok());
let msg_to_user = msg_to_user.unwrap();
assert!(msg_to_user.is_reserved_cfdp_msg());
}
}

View File

@ -1,11 +0,0 @@
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard, using
/// a [crc::NoTable] as the CRC implementation.
pub const CRC_CCITT_FALSE_NO_TABLE: crc::Crc<u16, crc::NoTable> =
crc::Crc::<u16, crc::NoTable>::new(&crc::CRC_16_IBM_3740);
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard, using
/// [crc::Table<1>] as the CRC implementation.
pub const CRC_CCITT_FALSE: crc::Crc<u16> = crc::Crc::<u16>::new(&crc::CRC_16_IBM_3740);
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard, using
/// a [crc::Table<16>] large table as the CRC implementation.
pub const CRC_CCITT_FALSE_BIG_TABLE: crc::Crc<u16, crc::Table<16>> =
crc::Crc::<u16, crc::Table<16>>::new(&crc::CRC_16_IBM_3740);

424
src/ecss.rs Normal file
View File

@ -0,0 +1,424 @@
//! Common definitions and helpers required to create PUS TMTC packets according to
//! [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/)
use crate::{ByteConversionError, CcsdsPacket, SizeMissmatch};
use core::fmt::{Debug, Display, Formatter};
use core::mem::size_of;
use crc::{Crc, CRC_16_IBM_3740};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub type CrcType = u16;
/// CRC algorithm used by the PUS standard.
pub const CRC_CCITT_FALSE: Crc<u16> = Crc::<u16>::new(&CRC_16_IBM_3740);
pub const CCSDS_HEADER_LEN: usize = size_of::<crate::zc::SpHeader>();
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PusServiceId {
Verification = 1,
Housekeeping = 3,
Event = 5,
Action = 8,
Test = 17,
}
/// All PUS versions. Only PUS C is supported by this library.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PusVersion {
EsaPus = 0,
PusA = 1,
PusC = 2,
Invalid = 0b1111,
}
impl TryFrom<u8> for PusVersion {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == PusVersion::EsaPus as u8 => Ok(PusVersion::EsaPus),
x if x == PusVersion::PusA as u8 => Ok(PusVersion::PusA),
x if x == PusVersion::PusC as u8 => Ok(PusVersion::PusC),
_ => Err(()),
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PacketTypeCodes {
Boolean = 1,
Enumerated = 2,
UnsignedInt = 3,
SignedInt = 4,
Real = 5,
BitString = 6,
OctetString = 7,
CharString = 8,
AbsoluteTime = 9,
RelativeTime = 10,
Deduced = 11,
Packet = 12,
}
pub type Ptc = PacketTypeCodes;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum UnsignedPfc {
OneByte = 4,
TwelveBits = 8,
TwoBytes = 12,
ThreeBytes = 13,
FourBytes = 14,
SixBytes = 15,
EightBytes = 16,
OneBit = 17,
TwoBits = 18,
ThreeBits = 19,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum RealPfc {
/// 4 octets simple precision format (IEEE)
Float = 1,
/// 8 octets simple precision format (IEEE)
Double = 2,
/// 4 octets simple precision format (MIL-STD)
FloatMilStd = 3,
/// 8 octets simple precision format (MIL-STD)
DoubleMilStd = 4,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PusError {
VersionNotSupported(PusVersion),
IncorrectCrc(u16),
RawDataTooShort(usize),
NoRawData,
/// CRC16 needs to be calculated first
CrcCalculationMissing,
ByteConversionError(ByteConversionError),
}
impl Display for PusError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PusError::VersionNotSupported(v) => {
write!(f, "PUS version {:?} not supported", v)
}
PusError::IncorrectCrc(crc) => {
write!(f, "crc16 {:#04x} is incorrect", crc)
}
PusError::RawDataTooShort(size) => {
write!(
f,
"deserialization error, provided raw data with size {} too short",
size
)
}
PusError::NoRawData => {
write!(f, "no raw data provided")
}
PusError::CrcCalculationMissing => {
write!(f, "crc16 was not calculated")
}
PusError::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {}", e)
}
}
}
}
#[cfg(feature = "std")]
impl Error for PusError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
if let PusError::ByteConversionError(e) = self {
return Some(e);
}
None
}
}
impl From<ByteConversionError> for PusError {
fn from(e: ByteConversionError) -> Self {
PusError::ByteConversionError(e)
}
}
pub trait PusPacket: CcsdsPacket {
const PUS_VERSION: PusVersion = PusVersion::PusC;
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn user_data(&self) -> Option<&[u8]>;
fn crc16(&self) -> Option<u16>;
}
pub(crate) fn crc_from_raw_data(raw_data: &[u8]) -> Result<u16, PusError> {
if raw_data.len() < 2 {
return Err(PusError::RawDataTooShort(raw_data.len()));
}
Ok(u16::from_be_bytes(
raw_data[raw_data.len() - 2..raw_data.len()]
.try_into()
.unwrap(),
))
}
pub(crate) fn calc_pus_crc16(bytes: &[u8]) -> u16 {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(bytes);
digest.finalize()
}
pub(crate) fn crc_procedure(
calc_on_serialization: bool,
cached_crc16: &Option<u16>,
start_idx: usize,
curr_idx: usize,
slice: &[u8],
) -> Result<u16, PusError> {
let crc16;
if calc_on_serialization {
crc16 = calc_pus_crc16(&slice[start_idx..curr_idx])
} else if cached_crc16.is_none() {
return Err(PusError::CrcCalculationMissing);
} else {
crc16 = cached_crc16.unwrap();
}
Ok(crc16)
}
pub(crate) fn user_data_from_raw(
current_idx: usize,
total_len: usize,
raw_data_len: usize,
slice: &[u8],
) -> Result<Option<&[u8]>, PusError> {
match current_idx {
_ if current_idx == total_len - 2 => Ok(None),
_ if current_idx > total_len - 2 => Err(PusError::RawDataTooShort(raw_data_len)),
_ => Ok(Some(&slice[current_idx..total_len - 2])),
}
}
pub(crate) fn verify_crc16_from_raw(raw_data: &[u8], crc16: u16) -> Result<(), PusError> {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(raw_data);
if digest.finalize() == 0 {
return Ok(());
}
Err(PusError::IncorrectCrc(crc16))
}
macro_rules! ccsds_impl {
() => {
delegate!(to self.sp_header {
fn ccsds_version(&self) -> u8;
fn packet_id(&self) -> crate::PacketId;
fn psc(&self) -> crate::PacketSequenceCtrl;
fn data_len(&self) -> u16;
});
}
}
macro_rules! sp_header_impls {
() => {
delegate!(to self.sp_header {
pub fn set_apid(&mut self, apid: u16) -> bool;
pub fn set_seq_count(&mut self, seq_count: u16) -> bool;
pub fn set_seq_flags(&mut self, seq_flag: SequenceFlags);
});
}
}
pub(crate) use ccsds_impl;
pub(crate) use sp_header_impls;
/// Generic trait for ECSS enumeration which consist of a PFC field denoting their bit length
/// and an unsigned value. The trait makes no assumptions about the actual type of the unsigned
/// value and only requires implementors to implement a function which writes the enumeration into
/// a raw byte format.
pub trait EcssEnumeration {
/// Packet Format Code, which denotes the number of bits of the enumeration
fn pfc(&self) -> u8;
fn byte_width(&self) -> usize {
(self.pfc() / 8) as usize
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError>;
}
pub trait EcssEnumerationExt: EcssEnumeration + Debug + Copy + Clone + PartialEq + Eq {}
pub trait ToBeBytes {
type ByteArray: AsRef<[u8]>;
fn to_be_bytes(&self) -> Self::ByteArray;
}
impl ToBeBytes for () {
type ByteArray = [u8; 0];
fn to_be_bytes(&self) -> Self::ByteArray {
[]
}
}
impl ToBeBytes for u8 {
type ByteArray = [u8; 1];
fn to_be_bytes(&self) -> Self::ByteArray {
u8::to_be_bytes(*self)
}
}
impl ToBeBytes for u16 {
type ByteArray = [u8; 2];
fn to_be_bytes(&self) -> Self::ByteArray {
u16::to_be_bytes(*self)
}
}
impl ToBeBytes for u32 {
type ByteArray = [u8; 4];
fn to_be_bytes(&self) -> Self::ByteArray {
u32::to_be_bytes(*self)
}
}
impl ToBeBytes for u64 {
type ByteArray = [u8; 8];
fn to_be_bytes(&self) -> Self::ByteArray {
u64::to_be_bytes(*self)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericEcssEnumWrapper<TYPE> {
val: TYPE,
}
impl<TYPE> GenericEcssEnumWrapper<TYPE> {
pub const fn ptc() -> PacketTypeCodes {
PacketTypeCodes::Enumerated
}
pub fn new(val: TYPE) -> Self {
Self { val }
}
}
impl<TYPE: ToBeBytes> EcssEnumeration for GenericEcssEnumWrapper<TYPE> {
fn pfc(&self) -> u8 {
size_of::<TYPE>() as u8 * 8_u8
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError> {
if buf.len() < self.byte_width() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.byte_width(),
}));
}
buf[0..self.byte_width()].copy_from_slice(self.val.to_be_bytes().as_ref());
Ok(())
}
}
impl<TYPE: Debug + Copy + Clone + PartialEq + Eq + ToBeBytes> EcssEnumerationExt
for GenericEcssEnumWrapper<TYPE>
{
}
pub type EcssEnumU8 = GenericEcssEnumWrapper<u8>;
pub type EcssEnumU16 = GenericEcssEnumWrapper<u16>;
pub type EcssEnumU32 = GenericEcssEnumWrapper<u32>;
pub type EcssEnumU64 = GenericEcssEnumWrapper<u64>;
#[cfg(test)]
mod tests {
use crate::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, EcssEnumeration};
use crate::ByteConversionError;
#[test]
fn test_enum_u8() {
let mut buf = [0, 0, 0];
let my_enum = EcssEnumU8::new(1);
my_enum
.write_to_be_bytes(&mut buf[1..2])
.expect("To byte conversion of u8 failed");
assert_eq!(buf[1], 1);
}
#[test]
fn test_enum_u16() {
let mut buf = [0, 0, 0];
let my_enum = EcssEnumU16::new(0x1f2f);
my_enum
.write_to_be_bytes(&mut buf[1..3])
.expect("To byte conversion of u8 failed");
assert_eq!(buf[1], 0x1f);
assert_eq!(buf[2], 0x2f);
}
#[test]
fn test_slice_u16_too_small() {
let mut buf = [0];
let my_enum = EcssEnumU16::new(0x1f2f);
let res = my_enum.write_to_be_bytes(&mut buf[0..1]);
assert!(res.is_err());
let error = res.unwrap_err();
match error {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.expected, 2);
assert_eq!(missmatch.found, 1);
}
_ => {
panic!("Unexpected error {:?}", error);
}
}
}
#[test]
fn test_enum_u32() {
let mut buf = [0, 0, 0, 0, 0];
let my_enum = EcssEnumU32::new(0x1f2f3f4f);
my_enum
.write_to_be_bytes(&mut buf[1..5])
.expect("To byte conversion of u8 failed");
assert_eq!(buf[1], 0x1f);
assert_eq!(buf[2], 0x2f);
assert_eq!(buf[3], 0x3f);
assert_eq!(buf[4], 0x4f);
}
#[test]
fn test_slice_u32_too_small() {
let mut buf = [0, 0, 0, 0, 0];
let my_enum = EcssEnumU32::new(0x1f2f3f4f);
let res = my_enum.write_to_be_bytes(&mut buf[0..3]);
assert!(res.is_err());
let error = res.unwrap_err();
match error {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.expected, 4);
assert_eq!(missmatch.found, 3);
}
_ => {
panic!("Unexpected error {:?}", error);
}
}
}
}

View File

@ -1,43 +0,0 @@
//! PUS Service 5 Events
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
TmInfoReport = 1,
TmLowSeverityReport = 2,
TmMediumSeverityReport = 3,
TmHighSeverityReport = 4,
TcEnableEventGeneration = 5,
TcDisableEventGeneration = 6,
TcReportDisabledList = 7,
TmDisabledEventsReport = 8,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_conv_into_u8() {
let subservice: u8 = Subservice::TmLowSeverityReport.into();
assert_eq!(subservice, 2);
}
#[test]
fn test_conv_from_u8() {
let subservice: Subservice = 2.try_into().unwrap();
assert_eq!(subservice, Subservice::TmLowSeverityReport);
}
#[test]
fn test_conv_fails() {
let conversion = Subservice::try_from(9);
assert!(conversion.is_err());
let err = conversion.unwrap_err();
assert_eq!(err.number, 9);
}
}

View File

@ -1,62 +0,0 @@
//! PUS Service 3 Housekeeping
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Subservice {
// Regular HK
TcCreateHkReportStructure = 1,
TcDeleteHkReportStructures = 3,
TcEnableHkGeneration = 5,
TcDisableHkGeneration = 6,
TcReportHkReportStructures = 9,
TmHkPacket = 25,
TcGenerateOneShotHk = 27,
TcModifyHkCollectionInterval = 31,
// Diagnostics HK
TcCreateDiagReportStructure = 2,
TcDeleteDiagReportStructures = 4,
TcEnableDiagGeneration = 7,
TcDisableDiagGeneration = 8,
TmHkStructuresReport = 10,
TcReportDiagReportStructures = 11,
TmDiagStructuresReport = 12,
TmDiagPacket = 26,
TcGenerateOneShotDiag = 28,
TcModifyDiagCollectionInterval = 32,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_try_from_u8() {
let hk_report_subservice_raw = 25;
let hk_report: Subservice = Subservice::try_from(hk_report_subservice_raw).unwrap();
assert_eq!(hk_report, Subservice::TmHkPacket);
}
#[test]
fn test_into_u8() {
let hk_report_raw: u8 = Subservice::TmHkPacket.into();
assert_eq!(hk_report_raw, 25);
}
#[test]
fn test_partial_eq() {
let hk_report_raw = Subservice::TmHkPacket;
assert_ne!(hk_report_raw, Subservice::TcGenerateOneShotHk);
assert_eq!(hk_report_raw, Subservice::TmHkPacket);
}
#[test]
fn test_copy_clone() {
let hk_report = Subservice::TmHkPacket;
let hk_report_copy = hk_report;
assert_eq!(hk_report, hk_report_copy);
}
}

View File

@ -1,610 +0,0 @@
//! Common definitions and helpers required to create PUS TMTC packets according to
//! [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/)
//!
//! You can find the PUS telecommand types in the [tc] module and the the PUS telemetry
//! types inside the [tm] module.
use crate::{
crc::{CRC_CCITT_FALSE, CRC_CCITT_FALSE_NO_TABLE},
ByteConversionError, CcsdsPacket,
};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::fmt::Debug;
use core::mem::size_of;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub mod event;
pub mod hk;
pub mod scheduling;
pub mod tc;
pub mod tm;
pub mod verification;
pub type CrcType = u16;
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
#[non_exhaustive]
pub enum PusServiceId {
/// Service 1
Verification = 1,
/// Service 2
DeviceAccess = 2,
/// Service 3
Housekeeping = 3,
/// Service 4
ParameterStatistics = 4,
/// Service 5
Event = 5,
/// Service 6
MemoryManagement = 6,
/// Service 8
Action = 8,
/// Service 9
TimeManagement = 9,
/// Service 11
Scheduling = 11,
/// Service 12
OnBoardMonitoring = 12,
/// Service 13
LargePacketTransfer = 13,
/// Service 14
RealTimeForwardingControl = 14,
/// Service 15
StorageAndRetrival = 15,
/// Service 17
Test = 17,
/// Service 18
OpsAndProcedures = 18,
/// Service 19
EventAction = 19,
/// Service 20
Parameter = 20,
/// Service 21
RequestSequencing = 21,
/// Service 22
PositionBasedScheduling = 22,
/// Service 23
FileManagement = 23,
}
/// All PUS versions. Only PUS C is supported by this library.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum PusVersion {
EsaPus = 0,
PusA = 1,
PusC = 2,
Invalid = 0b1111,
}
impl TryFrom<u8> for PusVersion {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == PusVersion::EsaPus as u8 => Ok(PusVersion::EsaPus),
x if x == PusVersion::PusA as u8 => Ok(PusVersion::PusA),
x if x == PusVersion::PusC as u8 => Ok(PusVersion::PusC),
_ => Err(()),
}
}
}
/// ECSS Packet Type Codes (PTC)s.
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum PacketTypeCodes {
Boolean = 1,
Enumerated = 2,
UnsignedInt = 3,
SignedInt = 4,
Real = 5,
BitString = 6,
OctetString = 7,
CharString = 8,
AbsoluteTime = 9,
RelativeTime = 10,
Deduced = 11,
Packet = 12,
}
pub type Ptc = PacketTypeCodes;
/// ECSS Packet Field Codes (PFC)s for the unsigned [Ptc].
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum PfcUnsigned {
OneByte = 4,
TwelveBits = 8,
TwoBytes = 12,
ThreeBytes = 13,
FourBytes = 14,
SixBytes = 15,
EightBytes = 16,
OneBit = 17,
TwoBits = 18,
ThreeBits = 19,
}
/// ECSS Packet Field Codes (PFC)s for the real (floating point) [Ptc].
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum PfcReal {
/// 4 octets simple precision format (IEEE)
Float = 1,
/// 8 octets simple precision format (IEEE)
Double = 2,
/// 4 octets simple precision format (MIL-STD)
FloatMilStd = 3,
/// 8 octets simple precision format (MIL-STD)
DoubleMilStd = 4,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PusError {
#[error("PUS version {0:?} not supported")]
VersionNotSupported(PusVersion),
#[error("checksum verification for crc16 {0:#06x} failed")]
ChecksumFailure(u16),
/// CRC16 needs to be calculated first
//#[error("crc16 was not calculated")]
//CrcCalculationMissing,
#[error("pus error: {0}")]
ByteConversion(#[from] ByteConversionError),
}
/// Generic trait to describe common attributes for both PUS Telecommands (TC) and PUS Telemetry
/// (TM) packets. All PUS packets are also a special type of [CcsdsPacket]s.
pub trait PusPacket: CcsdsPacket {
const PUS_VERSION: PusVersion = PusVersion::PusC;
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn user_data(&self) -> &[u8];
fn opt_crc16(&self) -> Option<u16>;
}
pub(crate) fn crc_from_raw_data(raw_data: &[u8]) -> Result<u16, ByteConversionError> {
if raw_data.len() < 2 {
return Err(ByteConversionError::FromSliceTooSmall {
found: raw_data.len(),
expected: 2,
});
}
Ok(u16::from_be_bytes(
raw_data[raw_data.len() - 2..raw_data.len()]
.try_into()
.unwrap(),
))
}
pub(crate) fn calc_pus_crc16(bytes: &[u8]) -> u16 {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(bytes);
digest.finalize()
}
pub(crate) fn user_data_from_raw(
current_idx: usize,
total_len: usize,
slice: &[u8],
) -> Result<&[u8], ByteConversionError> {
match current_idx {
_ if current_idx > total_len - 2 => Err(ByteConversionError::FromSliceTooSmall {
found: total_len - 2,
expected: current_idx,
}),
_ => Ok(&slice[current_idx..total_len - 2]),
}
}
pub fn verify_crc16_ccitt_false_from_raw_to_pus_error(
raw_data: &[u8],
crc16: u16,
) -> Result<(), PusError> {
verify_crc16_ccitt_false_from_raw(raw_data)
.then_some(())
.ok_or(PusError::ChecksumFailure(crc16))
}
pub fn verify_crc16_ccitt_false_from_raw_to_pus_error_no_table(
raw_data: &[u8],
crc16: u16,
) -> Result<(), PusError> {
verify_crc16_ccitt_false_from_raw_no_table(raw_data)
.then_some(())
.ok_or(PusError::ChecksumFailure(crc16))
}
pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(raw_data);
if digest.finalize() == 0 {
return true;
}
false
}
pub(crate) fn verify_crc16_ccitt_false_from_raw_no_table(raw_data: &[u8]) -> bool {
let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest();
digest.update(raw_data);
if digest.finalize() == 0 {
return true;
}
false
}
macro_rules! ccsds_impl {
() => {
delegate!(to self.sp_header {
#[inline]
fn ccsds_version(&self) -> u8;
#[inline]
fn packet_id(&self) -> crate::PacketId;
#[inline]
fn psc(&self) -> crate::PacketSequenceCtrl;
#[inline]
fn data_len(&self) -> u16;
});
}
}
macro_rules! sp_header_impls {
() => {
delegate!(to self.sp_header {
#[inline]
pub fn set_apid(&mut self, apid: u16) -> bool;
#[inline]
pub fn set_seq_count(&mut self, seq_count: u16) -> bool;
#[inline]
pub fn set_seq_flags(&mut self, seq_flag: SequenceFlags);
});
}
}
use crate::util::{GenericUnsignedByteField, ToBeBytes, UnsignedEnum};
pub(crate) use ccsds_impl;
pub(crate) use sp_header_impls;
/// Generic trait for ECSS enumeration which consist of a PFC field denoting their bit length
/// and an unsigned value. The trait makes no assumptions about the actual type of the unsigned
/// value and only requires implementors to implement a function which writes the enumeration into
/// a raw byte format.
pub trait EcssEnumeration: UnsignedEnum {
/// Packet Format Code, which denotes the number of bits of the enumeration
fn pfc(&self) -> u8;
}
pub trait EcssEnumerationExt: EcssEnumeration + Debug + Copy + Clone + PartialEq + Eq {}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericEcssEnumWrapper<TYPE: Copy + Into<u64>> {
field: GenericUnsignedByteField<TYPE>,
}
impl<TYPE: Copy + Into<u64>> GenericEcssEnumWrapper<TYPE> {
pub const fn ptc() -> PacketTypeCodes {
PacketTypeCodes::Enumerated
}
pub const fn value_typed(&self) -> TYPE {
self.field.value_typed()
}
pub fn new(val: TYPE) -> Self {
Self {
field: GenericUnsignedByteField::new(val),
}
}
}
impl<TYPE: Copy + ToBeBytes + Into<u64>> UnsignedEnum for GenericEcssEnumWrapper<TYPE> {
fn size(&self) -> usize {
(self.pfc() / 8) as usize
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.field.write_to_be_bytes(buf)
}
fn value(&self) -> u64 {
self.field.value()
}
}
impl<TYPE: Copy + ToBeBytes + Into<u64>> EcssEnumeration for GenericEcssEnumWrapper<TYPE> {
fn pfc(&self) -> u8 {
size_of::<TYPE>() as u8 * 8_u8
}
}
impl<TYPE: Debug + Copy + Clone + PartialEq + Eq + ToBeBytes + Into<u64>> EcssEnumerationExt
for GenericEcssEnumWrapper<TYPE>
{
}
impl<T: Copy + Into<u64>> From<T> for GenericEcssEnumWrapper<T> {
fn from(value: T) -> Self {
Self::new(value)
}
}
macro_rules! generic_ecss_enum_typedefs_and_from_impls {
($($ty:ty => $Enum:ident),*) => {
$(
pub type $Enum = GenericEcssEnumWrapper<$ty>;
impl From<$Enum> for $ty {
fn from(value: $Enum) -> Self {
value.value_typed()
}
}
)*
};
}
// Generates EcssEnum<$TY> type definitions as well as a From<$TY> for EcssEnum<$TY>
// implementation.
generic_ecss_enum_typedefs_and_from_impls! {
u8 => EcssEnumU8,
u16 => EcssEnumU16,
u32 => EcssEnumU32,
u64 => EcssEnumU64
}
/// Generic trait for PUS packet abstractions which can written to a raw slice as their raw
/// byte representation. This is especially useful for generic abstractions which depend only
/// on the serialization of those packets.
pub trait WritablePusPacket {
fn len_written(&self) -> usize;
/// Writes the packet to the given slice without writing the CRC.
///
/// The returned size is the written size WITHOUT the CRC.
fn write_to_bytes_no_crc(&self, slice: &mut [u8]) -> Result<usize, PusError>;
/// First uses [Self::write_to_bytes_no_crc] to write the packet to the given slice and then
/// uses the [CRC_CCITT_FALS] to calculate the CRC and write it to the slice.
fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = self.write_to_bytes_no_crc(slice)?;
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&slice[0..curr_idx]);
slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
curr_idx += 2;
Ok(curr_idx)
}
/// First uses [Self::write_to_bytes_no_crc] to write the packet to the given slice and then
/// uses the [CRC_CCITT_FALSE_NO_TABLE] to calculate the CRC and write it to the slice.
fn write_to_bytes_crc_no_table(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = self.write_to_bytes_no_crc(slice)?;
let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest();
digest.update(&slice[0..curr_idx]);
slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
curr_idx += 2;
Ok(curr_idx)
}
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<Vec<u8>, PusError> {
// This is the correct way to do this. See
// [this issue](https://github.com/rust-lang/rust-clippy/issues/4483) for caveats of more
// "efficient" implementations.
let mut vec = alloc::vec![0; self.len_written()];
self.write_to_bytes(&mut vec)?;
Ok(vec)
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use crate::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, UnsignedEnum};
use crate::ByteConversionError;
use super::*;
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
#[test]
fn test_enum_u8() {
let mut buf = [0, 0, 0];
let my_enum = EcssEnumU8::new(1);
assert_eq!(EcssEnumU8::ptc(), Ptc::Enumerated);
assert_eq!(my_enum.size(), 1);
assert_eq!(my_enum.pfc(), 8);
my_enum
.write_to_be_bytes(&mut buf[1..2])
.expect("To byte conversion of u8 failed");
assert_eq!(buf[1], 1);
assert_eq!(my_enum.value(), 1);
assert_eq!(my_enum.value_typed(), 1);
let enum_as_u8: u8 = my_enum.into();
assert_eq!(enum_as_u8, 1);
let vec = my_enum.to_vec();
assert_eq!(vec, buf[1..2]);
}
#[test]
fn test_enum_u16() {
let mut buf = [0, 0, 0];
let my_enum = EcssEnumU16::new(0x1f2f);
my_enum
.write_to_be_bytes(&mut buf[1..3])
.expect("To byte conversion of u8 failed");
assert_eq!(my_enum.size(), 2);
assert_eq!(my_enum.pfc(), 16);
assert_eq!(buf[1], 0x1f);
assert_eq!(buf[2], 0x2f);
assert_eq!(my_enum.value(), 0x1f2f);
assert_eq!(my_enum.value_typed(), 0x1f2f);
let enum_as_raw: u16 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f);
let vec = my_enum.to_vec();
assert_eq!(vec, buf[1..3]);
}
#[test]
fn test_slice_u16_too_small() {
let mut buf = [0];
let my_enum = EcssEnumU16::new(0x1f2f);
let res = my_enum.write_to_be_bytes(&mut buf[0..1]);
assert!(res.is_err());
let error = res.unwrap_err();
match error {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(expected, 2);
assert_eq!(found, 1);
}
_ => {
panic!("Unexpected error {:?}", error);
}
}
}
#[test]
fn test_enum_u32() {
let mut buf = [0, 0, 0, 0, 0];
let my_enum = EcssEnumU32::new(0x1f2f3f4f);
my_enum
.write_to_be_bytes(&mut buf[1..5])
.expect("To byte conversion of u8 failed");
assert_eq!(buf[1], 0x1f);
assert_eq!(buf[2], 0x2f);
assert_eq!(buf[3], 0x3f);
assert_eq!(buf[4], 0x4f);
assert_eq!(my_enum.value(), 0x1f2f3f4f);
assert_eq!(my_enum.value_typed(), 0x1f2f3f4f);
let enum_as_raw: u32 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f3f4f);
let vec = my_enum.to_vec();
assert_eq!(vec, buf[1..5]);
}
#[test]
fn test_slice_u32_too_small() {
let mut buf = [0, 0, 0, 0, 0];
let my_enum = EcssEnumU32::new(0x1f2f3f4f);
let res = my_enum.write_to_be_bytes(&mut buf[0..3]);
assert!(res.is_err());
let error = res.unwrap_err();
match error {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(expected, 4);
assert_eq!(found, 3);
}
_ => {
panic!("Unexpected error {:?}", error);
}
}
}
#[test]
fn test_enum_u64() {
let mut buf = [0; 8];
let my_enum = EcssEnumU64::new(0x1f2f3f4f5f);
my_enum
.write_to_be_bytes(&mut buf)
.expect("To byte conversion of u64 failed");
assert_eq!(buf[3], 0x1f);
assert_eq!(buf[4], 0x2f);
assert_eq!(buf[5], 0x3f);
assert_eq!(buf[6], 0x4f);
assert_eq!(buf[7], 0x5f);
assert_eq!(my_enum.value(), 0x1f2f3f4f5f);
assert_eq!(my_enum.value_typed(), 0x1f2f3f4f5f);
let enum_as_raw: u64 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f3f4f5f);
assert_eq!(u64::from_be_bytes(buf), 0x1f2f3f4f5f);
let vec = my_enum.to_vec();
assert_eq!(vec, buf);
}
#[test]
fn test_pus_error_display() {
let unsupport_version = PusError::VersionNotSupported(super::PusVersion::EsaPus);
let write_str = unsupport_version.to_string();
assert_eq!(write_str, "PUS version EsaPus not supported")
}
#[test]
fn test_service_id_from_u8() {
let verification_id_raw = 1;
let verification_id = PusServiceId::try_from(verification_id_raw).unwrap();
assert_eq!(verification_id, PusServiceId::Verification);
}
#[test]
fn test_ptc_from_u8() {
let ptc_raw = Ptc::AbsoluteTime as u8;
let ptc = Ptc::try_from(ptc_raw).unwrap();
assert_eq!(ptc, Ptc::AbsoluteTime);
}
#[test]
fn test_unsigned_pfc_from_u8() {
let pfc_raw = PfcUnsigned::OneByte as u8;
let pfc = PfcUnsigned::try_from(pfc_raw).unwrap();
assert_eq!(pfc, PfcUnsigned::OneByte);
}
#[test]
fn test_real_pfc_from_u8() {
let pfc_raw = PfcReal::Double as u8;
let pfc = PfcReal::try_from(pfc_raw).unwrap();
assert_eq!(pfc, PfcReal::Double);
}
#[test]
fn test_pus_error_eq_impl() {
assert_eq!(
PusError::VersionNotSupported(PusVersion::EsaPus),
PusError::VersionNotSupported(PusVersion::EsaPus)
);
}
#[test]
fn test_pus_error_clonable() {
let pus_error = PusError::ChecksumFailure(0x0101);
let cloned = pus_error;
assert_eq!(pus_error, cloned);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_pus_service_id() {
generic_serde_test(PusServiceId::Verification);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_ptc() {
generic_serde_test(Ptc::AbsoluteTime);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_pfc_unsigned() {
generic_serde_test(PfcUnsigned::EightBytes);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_pfc_real() {
generic_serde_test(PfcReal::Double);
}
}

View File

@ -1,126 +0,0 @@
//! PUS Service 11 Scheduling
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, PartialEq, Eq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
// Core subservices
TcEnableScheduling = 1,
TcDisableScheduling = 2,
TcResetScheduling = 3,
TcInsertActivity = 4,
TcDeleteActivityByRequestId = 5,
TcDeleteActivitiesByFilter = 6,
// Time shift subservices
TcTimeShiftActivityWithRequestId = 7,
TcTimeShiftActivitiesByFilter = 8,
TcTimeShiftAll = 15,
// Reporting subservices
TcDetailReportByRequestId = 9,
TmDetailReport = 10,
TcDetailReportByFilter = 11,
TcSummaryReportByRequestId = 12,
TmSummaryReport = 13,
TcSummaryReportByFilter = 14,
TcDetailReportAll = 16,
TcSummaryReportAll = 17,
// Subschedule subservices
TcReportSubscheduleStatus = 18,
TmReportSubscheduleStatus = 19,
TcEnableSubschedule = 20,
TcDisableSubschedule = 21,
// Group subservices
TcCreateScheduleGroup = 22,
TcDeleteScheduleGroup = 23,
TcEnableScheduleGroup = 24,
TcDisableScheduleGroup = 25,
TcReportAllGroupsStatus = 26,
TmReportAllGroupsStatus = 27,
}
/// This status applies to sub-schedules and groups as well as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum SchedStatus {
Disabled = 0,
Enabled = 1,
}
impl From<bool> for SchedStatus {
#[inline]
fn from(value: bool) -> Self {
if value {
SchedStatus::Enabled
} else {
SchedStatus::Disabled
}
}
}
/// Time window types as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TimeWindowType {
SelectAll = 0,
TimeTagToTimeTag = 1,
FromTimeTag = 2,
ToTimeTag = 3,
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
#[test]
fn test_bool_conv_0() {
let enabled = true;
let status: SchedStatus = enabled.into();
assert_eq!(status, SchedStatus::Enabled)
}
#[test]
fn test_bool_conv_1() {
let enabled = false;
let status: SchedStatus = enabled.into();
assert_eq!(status, SchedStatus::Disabled)
}
#[test]
fn test_conv_into_u8() {
let subservice: u8 = Subservice::TcCreateScheduleGroup.into();
assert_eq!(subservice, 22);
}
#[test]
fn test_conv_from_u8() {
let subservice: Subservice = 22u8.try_into().unwrap();
assert_eq!(subservice, Subservice::TcCreateScheduleGroup);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_subservice_id() {
generic_serde_test(Subservice::TcEnableScheduling);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_sched_status() {
generic_serde_test(SchedStatus::Enabled);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_time_window_type() {
generic_serde_test(TimeWindowType::SelectAll);
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,35 +0,0 @@
//! PUS Service 1 Verification
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
TmAcceptanceSuccess = 1,
TmAcceptanceFailure = 2,
TmStartSuccess = 3,
TmStartFailure = 4,
TmStepSuccess = 5,
TmStepFailure = 6,
TmCompletionSuccess = 7,
TmCompletionFailure = 8,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_conv_into_u8() {
let subservice: u8 = Subservice::TmCompletionSuccess.into();
assert_eq!(subservice, 7);
}
#[test]
fn test_conv_from_u8() {
let subservice: Subservice = 7.try_into().unwrap();
assert_eq!(subservice, Subservice::TmCompletionSuccess);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,250 +0,0 @@
use crate::MAX_SEQ_COUNT;
use core::cell::Cell;
use paste::paste;
#[cfg(feature = "std")]
pub use stdmod::*;
/// Core trait for objects which can provide a sequence count.
///
/// The core functions are not mutable on purpose to allow easier usage with
/// static structs when using the interior mutability pattern. This can be achieved by using
/// [Cell], [core::cell::RefCell] or atomic types.
pub trait SequenceCountProvider {
type Raw: Into<u64>;
const MAX_BIT_WIDTH: usize;
fn get(&self) -> Self::Raw;
fn increment(&self);
fn get_and_increment(&self) -> Self::Raw {
let val = self.get();
self.increment();
val
}
}
#[derive(Clone)]
pub struct SeqCountProviderSimple<T: Copy> {
seq_count: Cell<T>,
max_val: T,
}
macro_rules! impl_for_primitives {
($($ty: ident,)+) => {
$(
paste! {
impl SeqCountProviderSimple<$ty> {
pub fn [<new_custom_max_val_ $ty>](max_val: $ty) -> Self {
Self {
seq_count: Cell::new(0),
max_val,
}
}
pub fn [<new_ $ty>]() -> Self {
Self {
seq_count: Cell::new(0),
max_val: $ty::MAX
}
}
}
impl Default for SeqCountProviderSimple<$ty> {
fn default() -> Self {
Self::[<new_ $ty>]()
}
}
impl SequenceCountProvider for SeqCountProviderSimple<$ty> {
type Raw = $ty;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
fn get(&self) -> Self::Raw {
self.seq_count.get()
}
fn increment(&self) {
self.get_and_increment();
}
fn get_and_increment(&self) -> Self::Raw {
let curr_count = self.seq_count.get();
if curr_count == self.max_val {
self.seq_count.set(0);
} else {
self.seq_count.set(curr_count + 1);
}
curr_count
}
}
}
)+
}
}
impl_for_primitives!(u8, u16, u32, u64,);
/// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT].
#[derive(Clone)]
pub struct CcsdsSimpleSeqCountProvider {
provider: SeqCountProviderSimple<u16>,
}
impl Default for CcsdsSimpleSeqCountProvider {
fn default() -> Self {
Self {
provider: SeqCountProviderSimple::new_custom_max_val_u16(MAX_SEQ_COUNT),
}
}
}
impl SequenceCountProvider for CcsdsSimpleSeqCountProvider {
type Raw = u16;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
delegate::delegate! {
to self.provider {
fn get(&self) -> u16;
fn increment(&self);
fn get_and_increment(&self) -> u16;
}
}
}
#[cfg(feature = "std")]
pub mod stdmod {
use super::*;
use std::sync::{Arc, Mutex};
macro_rules! sync_clonable_seq_counter_impl {
($($ty: ident,)+) => {
$(paste! {
/// These sequence counters can be shared between threads and can also be
/// configured to wrap around at specified maximum values. Please note that
/// that the API provided by this class will not panic und [Mutex] lock errors,
/// but it will yield 0 for the getter functions.
#[derive(Clone, Default)]
pub struct [<SeqCountProviderSync $ty:upper>] {
seq_count: Arc<Mutex<$ty>>,
max_val: $ty
}
impl [<SeqCountProviderSync $ty:upper>] {
pub fn new() -> Self {
Self::new_with_max_val($ty::MAX)
}
pub fn new_with_max_val(max_val: $ty) -> Self {
Self {
seq_count: Arc::default(),
max_val
}
}
}
impl SequenceCountProvider for [<SeqCountProviderSync $ty:upper>] {
type Raw = $ty;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
fn get(&self) -> $ty {
match self.seq_count.lock() {
Ok(counter) => *counter,
Err(_) => 0
}
}
fn increment(&self) {
self.get_and_increment();
}
fn get_and_increment(&self) -> $ty {
match self.seq_count.lock() {
Ok(mut counter) => {
let val = *counter;
if val == self.max_val {
*counter = 0;
} else {
*counter += 1;
}
val
}
Err(_) => 0,
}
}
}
})+
}
}
sync_clonable_seq_counter_impl!(u8, u16, u32, u64,);
}
#[cfg(test)]
mod tests {
use crate::seq_count::{
CcsdsSimpleSeqCountProvider, SeqCountProviderSimple, SeqCountProviderSyncU8,
SequenceCountProvider,
};
use crate::MAX_SEQ_COUNT;
#[test]
fn test_u8_counter() {
let u8_counter = SeqCountProviderSimple::<u8>::default();
assert_eq!(u8_counter.get(), 0);
assert_eq!(u8_counter.get_and_increment(), 0);
assert_eq!(u8_counter.get_and_increment(), 1);
assert_eq!(u8_counter.get(), 2);
}
#[test]
fn test_u8_counter_overflow() {
let u8_counter = SeqCountProviderSimple::new_u8();
for _ in 0..256 {
u8_counter.increment();
}
assert_eq!(u8_counter.get(), 0);
}
#[test]
fn test_ccsds_counter() {
let ccsds_counter = CcsdsSimpleSeqCountProvider::default();
assert_eq!(ccsds_counter.get(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 1);
assert_eq!(ccsds_counter.get(), 2);
}
#[test]
fn test_ccsds_counter_overflow() {
let ccsds_counter = CcsdsSimpleSeqCountProvider::default();
for _ in 0..MAX_SEQ_COUNT + 1 {
ccsds_counter.increment();
}
assert_eq!(ccsds_counter.get(), 0);
}
#[test]
fn test_atomic_ref_counters() {
let sync_u8_counter = SeqCountProviderSyncU8::new();
assert_eq!(sync_u8_counter.get(), 0);
assert_eq!(sync_u8_counter.get_and_increment(), 0);
assert_eq!(sync_u8_counter.get_and_increment(), 1);
assert_eq!(sync_u8_counter.get(), 2);
}
#[test]
fn test_atomic_ref_counters_overflow() {
let sync_u8_counter = SeqCountProviderSyncU8::new();
for _ in 0..u8::MAX as u16 + 1 {
sync_u8_counter.increment();
}
assert_eq!(sync_u8_counter.get(), 0);
}
#[test]
fn test_atomic_ref_counters_overflow_custom_max_val() {
let sync_u8_counter = SeqCountProviderSyncU8::new_with_max_val(128);
for _ in 0..129 {
sync_u8_counter.increment();
}
assert_eq!(sync_u8_counter.get(), 0);
}
}

735
src/tc.rs Normal file
View File

@ -0,0 +1,735 @@
//! This module contains all components required to create a ECSS PUS C telecommand packets according
//! to [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
//!
//! # Examples
//!
//! ```rust
//! use spacepackets::{CcsdsPacket, SpHeader};
//! use spacepackets::tc::{PusTc, PusTcSecondaryHeader};
//! use spacepackets::ecss::PusPacket;
//!
//! // Create a ping telecommand with no user application data
//! let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
//! let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
//! let pus_tc = PusTc::new(&mut sph, tc_header, None, true);
//! println!("{:?}", pus_tc);
//! assert_eq!(pus_tc.service(), 17);
//! assert_eq!(pus_tc.subservice(), 1);
//! assert_eq!(pus_tc.apid(), 0x02);
//!
//! // Serialize TC into a raw buffer
//! let mut test_buf: [u8; 32] = [0; 32];
//! let size = pus_tc
//! .write_to_bytes(test_buf.as_mut_slice())
//! .expect("Error writing TC to buffer");
//! assert_eq!(size, 13);
//! println!("{:?}", &test_buf[0..size]);
//!
//! // Deserialize from the raw byte representation
//! let pus_tc_deserialized = PusTc::from_bytes(&test_buf).expect("Deserialization failed");
//! assert_eq!(pus_tc.service(), 17);
//! assert_eq!(pus_tc.subservice(), 1);
//! assert_eq!(pus_tc.apid(), 0x02);
//! ```
use crate::ecss::{
ccsds_impl, crc_from_raw_data, crc_procedure, sp_header_impls, user_data_from_raw,
verify_crc16_from_raw, CrcType, PusError, PusPacket, PusVersion, CRC_CCITT_FALSE,
};
use crate::SpHeader;
use crate::{
ByteConversionError, CcsdsPacket, PacketType, SequenceFlags, SizeMissmatch, CCSDS_HEADER_LEN,
};
use core::mem::size_of;
use delegate::delegate;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zerocopy::AsBytes;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
/// PUS C secondary header length is fixed
pub const PUC_TC_SECONDARY_HEADER_LEN: usize = size_of::<zc::PusTcSecondaryHeader>();
pub const PUS_TC_MIN_LEN_WITHOUT_APP_DATA: usize =
CCSDS_HEADER_LEN + PUC_TC_SECONDARY_HEADER_LEN + size_of::<CrcType>();
const PUS_VERSION: PusVersion = PusVersion::PusC;
#[derive(Copy, Clone, PartialEq, Debug)]
enum AckOpts {
Acceptance = 0b1000,
Start = 0b0100,
Progress = 0b0010,
Completion = 0b0001,
}
pub const ACK_ALL: u8 = AckOpts::Acceptance as u8
| AckOpts::Start as u8
| AckOpts::Progress as u8
| AckOpts::Completion as u8;
pub trait GenericPusTcSecondaryHeader {
fn pus_version(&self) -> PusVersion;
fn ack_flags(&self) -> u8;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn source_id(&self) -> u16;
}
pub mod zc {
use crate::ecss::{PusError, PusVersion};
use crate::tc::GenericPusTcSecondaryHeader;
use zerocopy::{AsBytes, FromBytes, NetworkEndian, Unaligned, U16};
#[derive(FromBytes, AsBytes, Unaligned)]
#[repr(C)]
pub struct PusTcSecondaryHeader {
version_ack: u8,
service: u8,
subservice: u8,
source_id: U16<NetworkEndian>,
}
impl TryFrom<crate::tc::PusTcSecondaryHeader> for PusTcSecondaryHeader {
type Error = PusError;
fn try_from(value: crate::tc::PusTcSecondaryHeader) -> Result<Self, Self::Error> {
if value.version != PusVersion::PusC {
return Err(PusError::VersionNotSupported(value.version));
}
Ok(PusTcSecondaryHeader {
version_ack: ((value.version as u8) << 4) | value.ack,
service: value.service,
subservice: value.subservice,
source_id: U16::from(value.source_id),
})
}
}
impl GenericPusTcSecondaryHeader for PusTcSecondaryHeader {
fn pus_version(&self) -> PusVersion {
PusVersion::try_from(self.version_ack >> 4 & 0b1111).unwrap_or(PusVersion::Invalid)
}
fn ack_flags(&self) -> u8 {
self.version_ack & 0b1111
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn source_id(&self) -> u16 {
self.source_id.get()
}
}
impl PusTcSecondaryHeader {
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Option<()> {
self.write_to(slice)
}
pub fn from_bytes(slice: &[u8]) -> Option<Self> {
Self::read_from(slice)
}
}
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTcSecondaryHeader {
pub service: u8,
pub subservice: u8,
pub source_id: u16,
pub ack: u8,
pub version: PusVersion,
}
impl GenericPusTcSecondaryHeader for PusTcSecondaryHeader {
fn pus_version(&self) -> PusVersion {
self.version
}
fn ack_flags(&self) -> u8 {
self.ack
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn source_id(&self) -> u16 {
self.source_id
}
}
impl TryFrom<zc::PusTcSecondaryHeader> for PusTcSecondaryHeader {
type Error = ();
fn try_from(value: zc::PusTcSecondaryHeader) -> Result<Self, Self::Error> {
Ok(PusTcSecondaryHeader {
service: value.service(),
subservice: value.subservice(),
source_id: value.source_id(),
ack: value.ack_flags(),
version: PUS_VERSION,
})
}
}
impl PusTcSecondaryHeader {
pub fn new_simple(service: u8, subservice: u8) -> Self {
PusTcSecondaryHeader {
service,
subservice,
ack: ACK_ALL,
source_id: 0,
version: PusVersion::PusC,
}
}
pub fn new(service: u8, subservice: u8, ack: u8, source_id: u16) -> Self {
PusTcSecondaryHeader {
service,
subservice,
ack: ack & 0b1111,
source_id,
version: PusVersion::PusC,
}
}
}
/// This class models a PUS telecommand. It is the primary data structure to generate the raw byte
/// representation of a PUS telecommand or to deserialize from one from raw bytes.
///
/// This class also derives the [serde::Serialize] and [serde::Deserialize] trait if the
/// [serde] feature is used, which allows to send around TC packets in a raw byte format using a
/// serde provider like [postcard](https://docs.rs/postcard/latest/postcard/).
///
/// There is no spare bytes support yet.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTc<'slice> {
sp_header: SpHeader,
pub sec_header: PusTcSecondaryHeader,
/// If this is set to false, a manual call to [PusTc::calc_own_crc16] or
/// [PusTc::update_packet_fields] is necessary for the serialized or cached CRC16 to be valid.
pub calc_crc_on_serialization: bool,
#[cfg_attr(feature = "serde", serde(skip))]
raw_data: Option<&'slice [u8]>,
app_data: Option<&'slice [u8]>,
crc16: Option<u16>,
}
impl<'slice> PusTc<'slice> {
/// Generates a new struct instance.
///
/// # Arguments
///
/// * `sp_header` - Space packet header information. The correct packet type will be set
/// automatically
/// * `sec_header` - Information contained in the data field header, including the service
/// and subservice type
/// * `app_data` - Custom application data
/// * `set_ccsds_len` - Can be used to automatically update the CCSDS space packet data length
/// field. If this is not set to true, [PusTc::update_ccsds_data_len] can be called to set
/// the correct value to this field manually
pub fn new(
sp_header: &mut SpHeader,
sec_header: PusTcSecondaryHeader,
app_data: Option<&'slice [u8]>,
set_ccsds_len: bool,
) -> Self {
sp_header.set_packet_type(PacketType::Tc);
sp_header.set_sec_header_flag();
let mut pus_tc = PusTc {
sp_header: *sp_header,
raw_data: None,
app_data,
sec_header,
calc_crc_on_serialization: true,
crc16: None,
};
if set_ccsds_len {
pus_tc.update_ccsds_data_len();
}
pus_tc
}
/// Simplified version of the [PusTc::new] function which allows to only specify service and
/// subservice instead of the full PUS TC secondary header.
pub fn new_simple(
sph: &mut SpHeader,
service: u8,
subservice: u8,
app_data: Option<&'slice [u8]>,
set_ccsds_len: bool,
) -> Self {
Self::new(
sph,
PusTcSecondaryHeader::new(service, subservice, ACK_ALL, 0),
app_data,
set_ccsds_len,
)
}
pub fn len_packed(&self) -> usize {
let mut length = PUS_TC_MIN_LEN_WITHOUT_APP_DATA;
if let Some(app_data) = self.app_data {
length += app_data.len();
}
length
}
pub fn set_ack_field(&mut self, ack: u8) -> bool {
if ack > 0b1111 {
return false;
}
self.sec_header.ack = ack & 0b1111;
true
}
pub fn set_source_id(&mut self, source_id: u16) {
self.sec_header.source_id = source_id;
}
sp_header_impls!();
/// Calculate the CCSDS space packet data length field and sets it
/// This is called automatically if the `set_ccsds_len` argument in the [PusTc::new] call was
/// used.
/// If this was not done or the application data is set or changed after construction,
/// this function needs to be called to ensure that the data length field of the CCSDS header
/// is set correctly.
pub fn update_ccsds_data_len(&mut self) {
self.sp_header.data_len =
self.len_packed() as u16 - size_of::<crate::zc::SpHeader>() as u16 - 1;
}
/// This function should be called before the TC packet is serialized if
/// [PusTc::calc_crc_on_serialization] is set to False. It will calculate and cache the CRC16.
pub fn calc_own_crc16(&mut self) {
let mut digest = CRC_CCITT_FALSE.digest();
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
digest.update(sph_zc.as_bytes());
let pus_tc_header = zc::PusTcSecondaryHeader::try_from(self.sec_header).unwrap();
digest.update(pus_tc_header.as_bytes());
if let Some(app_data) = self.app_data {
digest.update(app_data);
}
self.crc16 = Some(digest.finalize())
}
/// This helper function calls both [PusTc::update_ccsds_data_len] and [PusTc::calc_own_crc16].
pub fn update_packet_fields(&mut self) {
self.update_ccsds_data_len();
self.calc_own_crc16();
}
/// Write the raw PUS byte representation to a provided buffer.
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = 0;
let tc_header_len = size_of::<zc::PusTcSecondaryHeader>();
let total_size = self.len_packed();
if total_size > slice.len() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: slice.len(),
expected: total_size,
})
.into());
}
self.sp_header.write_to_be_bytes(slice)?;
curr_idx += CCSDS_HEADER_LEN;
let sec_header = zc::PusTcSecondaryHeader::try_from(self.sec_header).unwrap();
sec_header
.write_to_bytes(&mut slice[curr_idx..curr_idx + tc_header_len])
.ok_or(ByteConversionError::ZeroCopyToError)?;
curr_idx += tc_header_len;
if let Some(app_data) = self.app_data {
slice[curr_idx..curr_idx + app_data.len()].copy_from_slice(app_data);
curr_idx += app_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
0,
curr_idx,
slice,
)?;
slice[curr_idx..curr_idx + 2].copy_from_slice(crc16.to_be_bytes().as_slice());
curr_idx += 2;
Ok(curr_idx)
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn append_to_vec(&self, vec: &mut Vec<u8>) -> Result<usize, PusError> {
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
let mut appended_len = PUS_TC_MIN_LEN_WITHOUT_APP_DATA;
if let Some(app_data) = self.app_data {
appended_len += app_data.len();
};
let start_idx = vec.len();
let mut ser_len = 0;
vec.extend_from_slice(sph_zc.as_bytes());
ser_len += sph_zc.as_bytes().len();
// The PUS version is hardcoded to PUS C
let pus_tc_header = zc::PusTcSecondaryHeader::try_from(self.sec_header).unwrap();
vec.extend_from_slice(pus_tc_header.as_bytes());
ser_len += pus_tc_header.as_bytes().len();
if let Some(app_data) = self.app_data {
vec.extend_from_slice(app_data);
ser_len += app_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
start_idx,
ser_len,
&vec[start_idx..ser_len],
)?;
vec.extend_from_slice(crc16.to_be_bytes().as_slice());
Ok(appended_len)
}
/// Create a [PusTc] instance from a raw slice. On success, it returns a tuple containing
/// the instance and the found byte length of the packet.
pub fn from_bytes(slice: &'slice [u8]) -> Result<(Self, usize), PusError> {
let raw_data_len = slice.len();
if raw_data_len < PUS_TC_MIN_LEN_WITHOUT_APP_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let mut current_idx = 0;
let (sp_header, _) = SpHeader::from_be_bytes(&slice[0..CCSDS_HEADER_LEN])?;
current_idx += CCSDS_HEADER_LEN;
let total_len = sp_header.total_len();
if raw_data_len < total_len || total_len < PUS_TC_MIN_LEN_WITHOUT_APP_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let sec_header = zc::PusTcSecondaryHeader::from_bytes(
&slice[current_idx..current_idx + PUC_TC_SECONDARY_HEADER_LEN],
)
.ok_or(ByteConversionError::ZeroCopyFromError)?;
current_idx += PUC_TC_SECONDARY_HEADER_LEN;
let raw_data = &slice[0..total_len];
let pus_tc = PusTc {
sp_header,
sec_header: PusTcSecondaryHeader::try_from(sec_header).unwrap(),
raw_data: Some(raw_data),
app_data: user_data_from_raw(current_idx, total_len, raw_data_len, slice)?,
calc_crc_on_serialization: false,
crc16: Some(crc_from_raw_data(raw_data)?),
};
verify_crc16_from_raw(raw_data, pus_tc.crc16.expect("CRC16 invalid"))?;
Ok((pus_tc, total_len))
}
pub fn raw(&self) -> Option<&'slice [u8]> {
self.raw_data
}
}
//noinspection RsTraitImplementation
impl CcsdsPacket for PusTc<'_> {
ccsds_impl!();
}
//noinspection RsTraitImplementation
impl PusPacket for PusTc<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
});
fn user_data(&self) -> Option<&[u8]> {
self.app_data
}
fn crc16(&self) -> Option<u16> {
self.crc16
}
}
//noinspection RsTraitImplementation
impl GenericPusTcSecondaryHeader for PusTc<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn source_id(&self) -> u16;
fn ack_flags(&self) -> u8;
});
}
#[cfg(all(test, feature = "std"))]
mod tests {
use crate::ecss::PusVersion::PusC;
use crate::ecss::{PusError, PusPacket};
use crate::tc::ACK_ALL;
use crate::tc::{GenericPusTcSecondaryHeader, PusTc, PusTcSecondaryHeader};
use crate::{ByteConversionError, SpHeader};
use crate::{CcsdsPacket, SequenceFlags};
use alloc::vec::Vec;
fn base_ping_tc_full_ctor() -> PusTc<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
PusTc::new(&mut sph, tc_header, None, true)
}
fn base_ping_tc_simple_ctor() -> PusTc<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
PusTc::new_simple(&mut sph, 17, 1, None, true)
}
fn base_ping_tc_simple_ctor_with_app_data(app_data: &'static [u8]) -> PusTc<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
PusTc::new_simple(&mut sph, 17, 1, Some(app_data), true)
}
#[test]
fn test_tc_fields() {
let pus_tc = base_ping_tc_full_ctor();
assert_eq!(pus_tc.crc16(), None);
verify_test_tc(&pus_tc, false, 13);
}
#[test]
fn test_serialization() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(size, 13);
}
#[test]
fn test_deserialization() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(size, 13);
let (tc_from_raw, size) =
PusTc::from_bytes(&test_buf).expect("Creating PUS TC struct from raw buffer failed");
assert_eq!(size, 13);
verify_test_tc(&tc_from_raw, false, 13);
assert!(tc_from_raw.user_data().is_none());
verify_test_tc_raw(&test_buf);
verify_crc_no_app_data(&test_buf);
}
#[test]
fn test_update_func() {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
let mut tc = PusTc::new_simple(&mut sph, 17, 1, None, false);
tc.calc_crc_on_serialization = false;
assert_eq!(tc.data_len(), 0);
tc.update_packet_fields();
assert_eq!(tc.data_len(), 6);
}
#[test]
fn test_deserialization_with_app_data() {
let pus_tc = base_ping_tc_simple_ctor_with_app_data(&[1, 2, 3]);
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(size, 16);
let (tc_from_raw, size) =
PusTc::from_bytes(&test_buf).expect("Creating PUS TC struct from raw buffer failed");
assert_eq!(size, 16);
verify_test_tc(&tc_from_raw, true, 16);
let user_data = tc_from_raw.user_data().unwrap();
assert_eq!(user_data[0], 1);
assert_eq!(user_data[1], 2);
assert_eq!(user_data[2], 3);
}
#[test]
fn test_vec_ser_deser() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_vec = Vec::new();
let size = pus_tc
.append_to_vec(&mut test_vec)
.expect("Error writing TC to vector");
assert_eq!(size, 13);
verify_test_tc_raw(&test_vec.as_slice());
verify_crc_no_app_data(&test_vec.as_slice());
}
#[test]
fn test_incorrect_crc() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
test_buf[12] = 0;
let res = PusTc::from_bytes(&test_buf);
assert!(res.is_err());
let err = res.unwrap_err();
assert!(matches!(err, PusError::IncorrectCrc { .. }));
}
#[test]
fn test_manual_crc_calculation() {
let mut pus_tc = base_ping_tc_simple_ctor();
pus_tc.calc_crc_on_serialization = false;
let mut test_buf: [u8; 32] = [0; 32];
pus_tc.calc_own_crc16();
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
verify_test_tc_raw(&test_buf);
verify_crc_no_app_data(&test_buf);
}
#[test]
fn test_manual_crc_calculation_no_calc_call() {
let mut pus_tc = base_ping_tc_simple_ctor();
pus_tc.calc_crc_on_serialization = false;
let mut test_buf: [u8; 32] = [0; 32];
let res = pus_tc.write_to_bytes(test_buf.as_mut_slice());
assert!(res.is_err());
let err = res.unwrap_err();
assert!(matches!(err, PusError::CrcCalculationMissing { .. }));
}
#[test]
fn test_with_application_data_vec() {
let pus_tc = base_ping_tc_simple_ctor_with_app_data(&[1, 2, 3]);
verify_test_tc(&pus_tc, true, 16);
let mut test_vec = Vec::new();
let size = pus_tc
.append_to_vec(&mut test_vec)
.expect("Error writing TC to vector");
assert_eq!(test_vec[11], 1);
assert_eq!(test_vec[12], 2);
assert_eq!(test_vec[13], 3);
assert_eq!(size, 16);
}
#[test]
fn test_write_buf_too_small() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf = [0; 12];
let res = pus_tc.write_to_bytes(test_buf.as_mut_slice());
assert!(res.is_err());
let err = res.unwrap_err();
match err {
PusError::ByteConversionError(err) => match err {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.expected, pus_tc.len_packed());
assert_eq!(missmatch.found, 12);
}
_ => panic!("Unexpected error"),
},
_ => panic!("Unexpected error"),
}
}
#[test]
fn test_with_application_data_buf() {
let pus_tc = base_ping_tc_simple_ctor_with_app_data(&[1, 2, 3]);
verify_test_tc(&pus_tc, true, 16);
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(test_buf[11], 1);
assert_eq!(test_buf[12], 2);
assert_eq!(test_buf[13], 3);
assert_eq!(size, 16);
}
#[test]
fn test_custom_setters() {
let mut pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
pus_tc.set_apid(0x7ff);
pus_tc.set_seq_count(0x3fff);
pus_tc.set_ack_field(0b11);
pus_tc.set_source_id(0xffff);
pus_tc.set_seq_flags(SequenceFlags::Unsegmented);
assert_eq!(pus_tc.source_id(), 0xffff);
assert_eq!(pus_tc.seq_count(), 0x3fff);
assert_eq!(pus_tc.ack_flags(), 0b11);
assert_eq!(pus_tc.apid(), 0x7ff);
assert_eq!(pus_tc.sequence_flags(), SequenceFlags::Unsegmented);
pus_tc.calc_own_crc16();
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(test_buf[0], 0x1f);
assert_eq!(test_buf[1], 0xff);
assert_eq!(test_buf[2], 0xff);
assert_eq!(test_buf[3], 0xff);
assert_eq!(test_buf[6], 0x23);
// Source ID 0
assert_eq!(test_buf[9], 0xff);
assert_eq!(test_buf[10], 0xff);
}
fn verify_test_tc(tc: &PusTc, has_user_data: bool, exp_full_len: usize) {
assert_eq!(PusPacket::service(tc), 17);
assert_eq!(PusPacket::subservice(tc), 1);
assert!(tc.sec_header_flag());
assert_eq!(PusPacket::pus_version(tc), PusC);
if !has_user_data {
assert_eq!(tc.user_data(), None);
}
assert_eq!(tc.seq_count(), 0x34);
assert_eq!(tc.source_id(), 0);
assert_eq!(tc.apid(), 0x02);
assert_eq!(tc.ack_flags(), ACK_ALL);
assert_eq!(tc.len_packed(), exp_full_len);
let mut comp_header = SpHeader::tc_unseg(0x02, 0x34, exp_full_len as u16 - 7).unwrap();
comp_header.set_sec_header_flag();
assert_eq!(tc.sp_header, comp_header);
}
fn verify_test_tc_raw(slice: &impl AsRef<[u8]>) {
// Reference comparison implementation:
// https://github.com/us-irs/py-spacepackets/blob/v0.13.0/tests/ecss/test_pus_tc.py
let slice = slice.as_ref();
// 0x1801 is the generic
assert_eq!(slice[0], 0x18);
// APID is 0x01
assert_eq!(slice[1], 0x02);
// Unsegmented packets
assert_eq!(slice[2], 0xc0);
// Sequence count 0x34
assert_eq!(slice[3], 0x34);
assert_eq!(slice[4], 0x00);
// Space data length of 6 equals total packet length of 13
assert_eq!(slice[5], 0x06);
// PUS Version C 0b0010 and ACK flags 0b1111
assert_eq!(slice[6], 0x2f);
// Service 17
assert_eq!(slice[7], 0x11);
// Subservice 1
assert_eq!(slice[8], 0x01);
// Source ID 0
assert_eq!(slice[9], 0x00);
assert_eq!(slice[10], 0x00);
}
fn verify_crc_no_app_data(slice: &impl AsRef<[u8]>) {
// Reference comparison implementation:
// https://github.com/us-irs/py-spacepackets/blob/v0.13.0/tests/ecss/test_pus_tc.py
let slice = slice.as_ref();
assert_eq!(slice[11], 0xee);
assert_eq!(slice[12], 0x63);
}
}

View File

@ -2,8 +2,9 @@
//! [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf) section 3.5 .
//! See [chrono::DateTime::format] for a usage example of the generated
//! [chrono::format::DelayedFormat] structs.
#[cfg(all(feature = "alloc", feature = "chrono"))]
pub use alloc_mod_chrono::*;
#[cfg(feature = "alloc")]
use chrono::format::{DelayedFormat, StrftimeItems};
use chrono::{DateTime, Utc};
/// Tuple of format string and formatted size for time code A.
///
@ -31,37 +32,36 @@ pub const FMT_STR_CODE_B_WITH_SIZE: (&str, usize) = ("%Y-%jT%T%.3f", 21);
/// Three digits are used for the decimal fraction and a terminator is added at the end.
pub const FMT_STR_CODE_B_TERMINATED_WITH_SIZE: (&str, usize) = ("%Y-%jT%T%.3fZ", 22);
#[cfg(all(feature = "alloc", feature = "chrono"))]
pub mod alloc_mod_chrono {
use super::*;
use chrono::{
format::{DelayedFormat, StrftimeItems},
DateTime, Utc,
};
/// Generates a time code formatter using the [FMT_STR_CODE_A_WITH_SIZE] format.
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn generate_time_code_a(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_A_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_A_WITH_SIZE] format.
pub fn generate_time_code_a(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_A_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_A_TERMINATED_WITH_SIZE] format.
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn generate_time_code_a_terminated(
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_A_TERMINATED_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_A_TERMINATED_WITH_SIZE] format.
pub fn generate_time_code_a_terminated(
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_A_TERMINATED_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_B_WITH_SIZE] format.
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn generate_time_code_b(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_B_WITH_SIZE] format.
pub fn generate_time_code_b(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_B_TERMINATED_WITH_SIZE] format.
pub fn generate_time_code_b_terminated(
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_TERMINATED_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_B_TERMINATED_WITH_SIZE] format.
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn generate_time_code_b_terminated(
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_TERMINATED_WITH_SIZE.0)
}
#[cfg(test)]
@ -71,54 +71,25 @@ mod tests {
use std::format;
#[test]
fn test_ascii_timestamp_a_unterminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
fn test_ascii_timestamp_a_unterminated() {
let date = Utc::now();
let stamp_formatter = generate_time_code_a(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
let t_sep = stamp.find("T");
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
assert_eq!(stamp.len(), FMT_STR_CODE_A_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_a_unterminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_a(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
assert_eq!(stamp.len(), FMT_STR_CODE_A_WITH_SIZE.1);
}
#[test]
fn test_ascii_timestamp_a_terminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_a_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
FMT_STR_CODE_A_TERMINATED_WITH_SIZE.1 - 1
);
assert_eq!(stamp.len(), FMT_STR_CODE_A_TERMINATED_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_a_terminated_now() {
fn test_ascii_timestamp_a_terminated() {
let date = Utc::now();
let stamp_formatter = generate_time_code_a_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
let t_sep = stamp.find("T");
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
let z_terminator = stamp.find('Z');
let z_terminator = stamp.find("Z");
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
@ -128,55 +99,25 @@ mod tests {
}
#[test]
fn test_ascii_timestamp_b_unterminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
fn test_ascii_timestamp_b_unterminated() {
let date = Utc::now();
let stamp_formatter = generate_time_code_b(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
let t_sep = stamp.find("T");
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
assert_eq!(stamp.len(), FMT_STR_CODE_B_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_b_unterminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_b(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
assert_eq!(stamp.len(), FMT_STR_CODE_B_WITH_SIZE.1);
}
#[test]
fn test_ascii_timestamp_b_terminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_b_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
FMT_STR_CODE_B_TERMINATED_WITH_SIZE.1 - 1
);
assert_eq!(stamp.len(), FMT_STR_CODE_B_TERMINATED_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_b_terminated_now() {
fn test_ascii_timestamp_b_terminated() {
let date = Utc::now();
let stamp_formatter = generate_time_code_b_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
let t_sep = stamp.find("T");
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
let z_terminator = stamp.find('Z');
let z_terminator = stamp.find("Z");
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,7 @@
//! CCSDS Time Code Formats according to [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
use crate::ByteConversionError;
#[cfg(feature = "chrono")]
use chrono::{TimeZone, Utc};
use core::cmp::Ordering;
use crate::{ByteConversionError, SizeMissmatch};
use chrono::{DateTime, LocalResult, TimeZone, Utc};
use core::fmt::{Display, Formatter};
use core::ops::{Add, AddAssign, Sub};
use core::time::Duration;
#[allow(unused_imports)]
#[cfg(not(feature = "std"))]
@ -13,13 +9,10 @@ use num_traits::float::FloatCore;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")]
use std::time::{SystemTime, SystemTimeError};
#[cfg(feature = "std")]
pub use std_mod::*;
pub mod ascii;
pub mod cds;
@ -27,13 +20,10 @@ pub mod cuc;
pub const DAYS_CCSDS_TO_UNIX: i32 = -4383;
pub const SECONDS_PER_DAY: u32 = 86400;
pub const MS_PER_DAY: u32 = SECONDS_PER_DAY * 1000;
pub const NANOS_PER_SECOND: u32 = 1_000_000_000;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum CcsdsTimeCode {
pub enum CcsdsTimeCodes {
CucCcsdsEpoch = 0b001,
CucAgencyEpoch = 0b010,
Cds = 0b100,
@ -41,16 +31,16 @@ pub enum CcsdsTimeCode {
AgencyDefined = 0b110,
}
impl TryFrom<u8> for CcsdsTimeCode {
impl TryFrom<u8> for CcsdsTimeCodes {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == CcsdsTimeCode::CucCcsdsEpoch as u8 => Ok(CcsdsTimeCode::CucCcsdsEpoch),
x if x == CcsdsTimeCode::CucAgencyEpoch as u8 => Ok(CcsdsTimeCode::CucAgencyEpoch),
x if x == CcsdsTimeCode::Cds as u8 => Ok(CcsdsTimeCode::Cds),
x if x == CcsdsTimeCode::Ccs as u8 => Ok(CcsdsTimeCode::Ccs),
x if x == CcsdsTimeCode::AgencyDefined as u8 => Ok(CcsdsTimeCode::AgencyDefined),
x if x == CcsdsTimeCodes::CucCcsdsEpoch as u8 => Ok(CcsdsTimeCodes::CucCcsdsEpoch),
x if x == CcsdsTimeCodes::CucAgencyEpoch as u8 => Ok(CcsdsTimeCodes::CucAgencyEpoch),
x if x == CcsdsTimeCodes::Cds as u8 => Ok(CcsdsTimeCodes::Cds),
x if x == CcsdsTimeCodes::Ccs as u8 => Ok(CcsdsTimeCodes::Ccs),
x if x == CcsdsTimeCodes::AgencyDefined as u8 => Ok(CcsdsTimeCodes::AgencyDefined),
_ => Err(()),
}
}
@ -58,46 +48,75 @@ impl TryFrom<u8> for CcsdsTimeCode {
/// Retrieve the CCSDS time code from the p-field. If no valid time code identifier is found, the
/// value of the raw time code identification field is returned.
pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCode, u8> {
pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCodes, u8> {
let raw_bits = (pfield >> 4) & 0b111;
CcsdsTimeCode::try_from(raw_bits).map_err(|_| raw_bits)
CcsdsTimeCodes::try_from(raw_bits).map_err(|_| raw_bits)
}
#[derive(Debug, PartialEq, Eq, Copy, Clone, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[error("date before ccsds epoch: {0:?}")]
pub struct DateBeforeCcsdsEpochError(UnixTime);
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum TimestampError {
InvalidTimeCode { expected: CcsdsTimeCode, found: u8 },
ByteConversion(ByteConversionError),
Cds(cds::CdsError),
Cuc(cuc::CucError),
/// Contains tuple where first value is the expected time code and the second
/// value is the found raw value
InvalidTimeCode(CcsdsTimeCodes, u8),
ByteConversionError(ByteConversionError),
CdsError(cds::CdsError),
CucError(cuc::CucError),
CustomEpochNotSupported,
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::CdsError(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::CucError(e)
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[derive(Debug, Clone)]
pub enum StdTimestampError {
SystemTimeError(SystemTimeError),
TimestampError(TimestampError),
}
#[cfg(feature = "std")]
impl From<TimestampError> for StdTimestampError {
fn from(v: TimestampError) -> Self {
Self::TimestampError(v)
}
}
#[cfg(feature = "std")]
impl From<SystemTimeError> for StdTimestampError {
fn from(v: SystemTimeError) -> Self {
Self::SystemTimeError(v)
}
}
impl Display for TimestampError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TimestampError::InvalidTimeCode { expected, found } => {
TimestampError::InvalidTimeCode(time_code, raw_val) => {
write!(
f,
"invalid raw time code value {found} for time code {expected:?}"
"invalid raw time code value {} for time code {:?}",
raw_val, time_code
)
}
TimestampError::Cds(e) => {
write!(f, "cds error: {e}")
TimestampError::CdsError(e) => {
write!(f, "cds error {}", e)
}
TimestampError::Cuc(e) => {
write!(f, "cuc error: {e}")
TimestampError::CucError(e) => {
write!(f, "cuc error {}", e)
}
TimestampError::ByteConversion(e) => {
write!(f, "time stamp: {e}")
TimestampError::ByteConversionError(e) => {
write!(f, "byte conversion error {}", e)
}
TimestampError::CustomEpochNotSupported => {
write!(f, "custom epochs are not supported")
@ -110,42 +129,16 @@ impl Display for TimestampError {
impl Error for TimestampError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TimestampError::ByteConversion(e) => Some(e),
TimestampError::Cds(e) => Some(e),
TimestampError::Cuc(e) => Some(e),
TimestampError::ByteConversionError(e) => Some(e),
TimestampError::CdsError(e) => Some(e),
TimestampError::CucError(e) => Some(e),
_ => None,
}
}
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::Cds(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::Cuc(e)
}
}
#[cfg(feature = "std")]
pub mod std_mod {
use crate::time::TimestampError;
use std::time::SystemTimeError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
pub enum StdTimestampError {
#[error("system time error: {0:?}")]
SystemTime(#[from] SystemTimeError),
#[error("timestamp error: {0}")]
Timestamp(#[from] TimestampError),
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn seconds_since_epoch() -> f64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
@ -155,35 +148,32 @@ pub fn seconds_since_epoch() -> f64 {
/// Convert UNIX days to CCSDS days
///
/// - CCSDS epoch: 1958-01-01T00:00:00+00:00
/// - UNIX Epoch: 1970-01-01T00:00:00+00:00
#[inline]
/// - CCSDS epoch: 1958 January 1
/// - UNIX Epoch: 1970 January 1
pub const fn unix_to_ccsds_days(unix_days: i64) -> i64 {
unix_days - DAYS_CCSDS_TO_UNIX as i64
}
/// Convert CCSDS days to UNIX days
///
/// - CCSDS epoch: 1958-01-01T00:00:00+00:00
/// - UNIX Epoch: 1970-01-01T00:00:00+00:00
#[inline]
/// - CCSDS epoch: 1958 January 1
/// - UNIX Epoch: 1970 January 1
pub const fn ccsds_to_unix_days(ccsds_days: i64) -> i64 {
ccsds_days + DAYS_CCSDS_TO_UNIX as i64
}
/// Similar to [unix_to_ccsds_days] but converts the epoch instead, which is the number of elpased
/// seconds since the CCSDS and UNIX epoch times.
#[inline]
pub const fn unix_epoch_to_ccsds_epoch(unix_epoch: i64) -> i64 {
unix_epoch - (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
pub const fn unix_epoch_to_ccsds_epoch(unix_epoch: u64) -> u64 {
(unix_epoch as i64 - (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)) as u64
}
#[inline]
pub const fn ccsds_epoch_to_unix_epoch(ccsds_epoch: i64) -> i64 {
ccsds_epoch + (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
pub const fn ccsds_epoch_to_unix_epoch(ccsds_epoch: u64) -> u64 {
(ccsds_epoch as i64 + (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)) as u64
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn ms_of_day_using_sysclock() -> u32 {
ms_of_day(seconds_since_epoch())
}
@ -197,29 +187,18 @@ pub fn ms_of_day(seconds_since_epoch: f64) -> u32 {
}
pub trait TimeWriter {
fn len_written(&self) -> usize;
/// Generic function to convert write a timestamp into a raw buffer.
/// Returns the number of written bytes on success.
fn write_to_bytes(&self, bytes: &mut [u8]) -> Result<usize, TimestampError>;
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<alloc::vec::Vec<u8>, TimestampError> {
let mut vec = alloc::vec![0; self.len_written()];
self.write_to_bytes(&mut vec)?;
Ok(vec)
}
}
pub trait TimeReader: Sized {
fn from_bytes(buf: &[u8]) -> Result<Self, TimestampError>;
pub trait TimeReader {
fn from_bytes(buf: &[u8]) -> Result<Self, TimestampError>
where
Self: Sized;
}
/// Trait for generic CCSDS time providers.
///
/// The UNIX helper methods and the helper method are not strictly necessary but extremely
/// practical because they are a very common and simple exchange format for time information.
/// Therefore, it was decided to keep them in this trait as well.
pub trait CcsdsTimeProvider {
fn len_as_bytes(&self) -> usize;
@ -228,312 +207,14 @@ pub trait CcsdsTimeProvider {
/// entry denotes the length of the pfield and the second entry is the value of the pfield
/// in big endian format.
fn p_field(&self) -> (usize, [u8; 2]);
fn ccdsd_time_code(&self) -> CcsdsTimeCode;
fn unix_secs(&self) -> i64;
fn subsec_nanos(&self) -> u32;
fn subsec_millis(&self) -> u16 {
(self.subsec_nanos() / 1_000_000) as u16
}
fn unix_time(&self) -> UnixTime {
UnixTime::new(self.unix_secs(), self.subsec_nanos())
}
#[cfg(feature = "chrono")]
fn chrono_date_time(&self) -> chrono::LocalResult<chrono::DateTime<chrono::Utc>> {
chrono::Utc.timestamp_opt(self.unix_secs(), self.subsec_nanos())
}
#[cfg(feature = "timelib")]
fn timelib_date_time(&self) -> Result<time::OffsetDateTime, time::error::ComponentRange> {
Ok(time::OffsetDateTime::from_unix_timestamp(self.unix_secs())?
+ time::Duration::nanoseconds(self.subsec_nanos().into()))
}
}
/// UNIX time: Elapsed non-leap seconds since 1970-01-01T00:00:00+00:00 UTC.
///
/// This is a commonly used time format and can therefore also be used as a generic format to
/// convert other CCSDS time formats to and from. The subsecond precision is in nanoseconds
/// similarly to other common time formats and libraries.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct UnixTime {
secs: i64,
subsec_nanos: u32,
}
impl UnixTime {
/// The UNIX epoch time: 1970-01-01T00:00:00+00:00 UTC.
pub const EPOCH: Self = Self {
secs: 0,
subsec_nanos: 0,
};
/// The minimum possible `UnixTime`.
pub const MIN: Self = Self {
secs: i64::MIN,
subsec_nanos: 0,
};
/// The maximum possible `UnixTime`.
pub const MAX: Self = Self {
secs: i64::MAX,
subsec_nanos: NANOS_PER_SECOND - 1,
};
/// Returns [None] if the subsecond nanosecond value is invalid (larger than fraction of a
/// second)
pub fn new_checked(unix_seconds: i64, subsec_nanos: u32) -> Option<Self> {
if subsec_nanos >= NANOS_PER_SECOND {
return None;
}
Some(Self::new(unix_seconds, subsec_nanos))
}
/// Returns [None] if the subsecond millisecond value is invalid (larger than fraction of a
/// second)
pub fn new_subsec_millis_checked(unix_seconds: i64, subsec_millis: u16) -> Option<Self> {
if subsec_millis >= 1000 {
return None;
}
Self::new_checked(unix_seconds, subsec_millis as u32 * 1_000_000)
}
/// This function will panic if the subsecond value is larger than the fraction of a second.
/// Use [Self::new_checked] if you want to handle this case without a panic.
pub const fn new(unix_seconds: i64, subsecond_nanos: u32) -> Self {
if subsecond_nanos >= NANOS_PER_SECOND {
panic!("invalid subsecond nanos value");
}
Self {
secs: unix_seconds,
subsec_nanos: subsecond_nanos,
}
}
/// This function will panic if the subsecond value is larger than the fraction of a second.
/// Use [Self::new_subsec_millis_checked] if you want to handle this case without a panic.
pub const fn new_subsec_millis(unix_seconds: i64, subsecond_millis: u16) -> Self {
if subsecond_millis >= 1000 {
panic!("invalid subsecond millisecond value");
}
Self {
secs: unix_seconds,
subsec_nanos: subsecond_millis as u32 * 1_000_000,
}
}
pub fn new_only_secs(unix_seconds: i64) -> Self {
Self {
secs: unix_seconds,
subsec_nanos: 0,
}
}
#[inline]
pub fn subsec_millis(&self) -> u16 {
(self.subsec_nanos / 1_000_000) as u16
}
pub fn subsec_nanos(&self) -> u32 {
self.subsec_nanos
}
#[cfg(feature = "std")]
pub fn now() -> Result<Self, SystemTimeError> {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let epoch = now.as_secs();
Ok(Self::new(epoch as i64, now.subsec_nanos()))
}
#[inline]
pub fn unix_secs_f64(&self) -> f64 {
self.secs as f64 + (self.subsec_nanos as f64 / 1_000_000_000.0)
}
pub fn as_secs(&self) -> i64 {
self.secs
}
#[cfg(feature = "chrono")]
pub fn chrono_date_time(&self) -> chrono::LocalResult<chrono::DateTime<chrono::Utc>> {
Utc.timestamp_opt(self.secs, self.subsec_nanos)
}
#[cfg(feature = "timelib")]
pub fn timelib_date_time(&self) -> Result<time::OffsetDateTime, time::error::ComponentRange> {
Ok(time::OffsetDateTime::from_unix_timestamp(self.as_secs())?
+ time::Duration::nanoseconds(self.subsec_nanos().into()))
}
// Calculate the difference in milliseconds between two UnixTimestamps
pub fn diff_in_millis(&self, other: &UnixTime) -> Option<i64> {
let seconds_difference = self.secs.checked_sub(other.secs)?;
// Convert seconds difference to milliseconds
let milliseconds_difference = seconds_difference.checked_mul(1000)?;
// Calculate the difference in subsecond milliseconds directly
let subsecond_difference_nanos = self.subsec_nanos as i64 - other.subsec_nanos as i64;
// Combine the differences
Some(milliseconds_difference + (subsecond_difference_nanos / 1_000_000))
}
}
#[cfg(feature = "chrono")]
impl From<chrono::DateTime<chrono::Utc>> for UnixTime {
fn from(value: chrono::DateTime<chrono::Utc>) -> Self {
Self::new(value.timestamp(), value.timestamp_subsec_nanos())
}
}
#[cfg(feature = "timelib")]
impl From<time::OffsetDateTime> for UnixTime {
fn from(value: time::OffsetDateTime) -> Self {
Self::new(value.unix_timestamp(), value.nanosecond())
}
}
impl PartialOrd for UnixTime {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for UnixTime {
fn cmp(&self, other: &Self) -> Ordering {
if self == other {
return Ordering::Equal;
}
match self.secs.cmp(&other.secs) {
Ordering::Less => return Ordering::Less,
Ordering::Greater => return Ordering::Greater,
_ => (),
}
match self.subsec_millis().cmp(&other.subsec_millis()) {
Ordering::Less => {
return if self.secs < 0 {
Ordering::Greater
} else {
Ordering::Less
}
}
Ordering::Greater => {
return if self.secs < 0 {
Ordering::Less
} else {
Ordering::Greater
}
}
Ordering::Equal => (),
}
Ordering::Equal
}
}
/// Difference between two UNIX timestamps. The [Duration] type can not contain negative durations,
/// so the sign information is supplied separately.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct StampDiff {
pub positive_duration: bool,
pub duration_absolute: Duration,
}
impl Sub for UnixTime {
type Output = Option<StampDiff>;
fn sub(self, rhs: Self) -> Self::Output {
let difference = self.diff_in_millis(&rhs)?;
Some(if difference < 0 {
StampDiff {
positive_duration: false,
duration_absolute: Duration::from_millis(-difference as u64),
}
} else {
StampDiff {
positive_duration: true,
duration_absolute: Duration::from_millis(difference as u64),
}
})
}
}
fn get_new_stamp_after_addition(current_stamp: &UnixTime, duration: Duration) -> UnixTime {
let mut new_subsec_nanos = current_stamp.subsec_nanos() + duration.subsec_nanos();
let mut new_unix_seconds = current_stamp.secs;
let mut increment_seconds = |value: u32| {
if new_unix_seconds < 0 {
new_unix_seconds = new_unix_seconds
.checked_sub(value.into())
.expect("new unix seconds would exceed i64::MIN");
} else {
new_unix_seconds = new_unix_seconds
.checked_add(value.into())
.expect("new unix seconds would exceed i64::MAX");
}
};
if new_subsec_nanos >= 1_000_000_000 {
new_subsec_nanos -= 1_000_000_000;
increment_seconds(1);
}
increment_seconds(
duration
.as_secs()
.try_into()
.expect("duration seconds exceeds u32::MAX"),
);
UnixTime::new(new_unix_seconds, new_subsec_nanos)
}
/// Please note that this operation will panic on the following conditions:
///
/// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN].
/// - Unix seconds after addition exceeds [i64::MAX].
/// - Seconds from duration to add exceeds [u32::MAX].
impl AddAssign<Duration> for UnixTime {
fn add_assign(&mut self, duration: Duration) {
*self = get_new_stamp_after_addition(self, duration);
}
}
/// Please note that this operation will panic for the following conditions:
///
/// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN].
/// - Unix seconds after addition exceeds [i64::MAX].
/// - Unix seconds exceeds [u32::MAX].
impl Add<Duration> for UnixTime {
type Output = Self;
fn add(self, duration: Duration) -> Self::Output {
get_new_stamp_after_addition(&self, duration)
}
}
impl Add<Duration> for &UnixTime {
type Output = UnixTime;
fn add(self, duration: Duration) -> Self::Output {
get_new_stamp_after_addition(self, duration)
}
fn ccdsd_time_code(&self) -> CcsdsTimeCodes;
fn unix_seconds(&self) -> i64;
fn date_time(&self) -> Option<DateTime<Utc>>;
}
#[cfg(all(test, feature = "std"))]
mod tests {
use alloc::string::ToString;
use chrono::{Datelike, Timelike};
use std::{format, println};
use super::{cuc::CucError, *};
#[allow(dead_code)]
const UNIX_STAMP_CONST: UnixTime = UnixTime::new(5, 999_999_999);
#[allow(dead_code)]
const UNIX_STAMP_CONST_2: UnixTime = UnixTime::new_subsec_millis(5, 999);
use super::*;
#[test]
fn test_days_conversion() {
@ -542,221 +223,21 @@ mod tests {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_get_current_time() {
let sec_floats = seconds_since_epoch();
assert!(sec_floats > 0.0);
}
#[test]
fn test_ms_of_day() {
let ms = ms_of_day(0.0);
assert_eq!(ms, 0);
let ms = ms_of_day(5.0);
assert_eq!(ms, 5000);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ccsds_epoch() {
let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap();
let unix_epoch = now.as_secs();
let ccsds_epoch = unix_epoch_to_ccsds_epoch(now.as_secs() as i64) as u64;
let ccsds_epoch = unix_epoch_to_ccsds_epoch(now.as_secs());
assert!(ccsds_epoch > unix_epoch);
assert_eq!((ccsds_epoch - unix_epoch) % SECONDS_PER_DAY as u64, 0);
let days_diff = (ccsds_epoch - unix_epoch) / SECONDS_PER_DAY as u64;
assert_eq!(days_diff, -DAYS_CCSDS_TO_UNIX as u64);
}
#[test]
fn basic_unix_stamp_test() {
let stamp = UnixTime::new_only_secs(-200);
assert_eq!(stamp.secs, -200);
assert_eq!(stamp.subsec_millis(), 0);
let stamp = UnixTime::new_only_secs(250);
assert_eq!(stamp.secs, 250);
assert_eq!(stamp.subsec_millis(), 0);
}
#[test]
fn basic_float_unix_stamp_test() {
let stamp = UnixTime::new_subsec_millis_checked(500, 600).unwrap();
assert_eq!(stamp.secs, 500);
let subsec_millis = stamp.subsec_millis();
assert_eq!(subsec_millis, 600);
println!("{:?}", (500.6 - stamp.unix_secs_f64()).to_string());
assert!((500.6 - stamp.unix_secs_f64()).abs() < 0.0001);
}
#[test]
fn test_ord_larger() {
let stamp0 = UnixTime::new_only_secs(5);
let stamp1 = UnixTime::new_subsec_millis_checked(5, 500).unwrap();
let stamp2 = UnixTime::new_only_secs(6);
assert!(stamp1 > stamp0);
assert!(stamp2 > stamp0);
assert!(stamp2 > stamp1);
}
#[test]
fn test_ord_smaller() {
let stamp0 = UnixTime::new_only_secs(5);
let stamp1 = UnixTime::new_subsec_millis_checked(5, 500).unwrap();
let stamp2 = UnixTime::new_only_secs(6);
assert!(stamp0 < stamp1);
assert!(stamp0 < stamp2);
assert!(stamp1 < stamp2);
}
#[test]
fn test_ord_larger_neg_numbers() {
let stamp0 = UnixTime::new_only_secs(-5);
let stamp1 = UnixTime::new_subsec_millis_checked(-5, 500).unwrap();
let stamp2 = UnixTime::new_only_secs(-6);
assert!(stamp0 > stamp1);
assert!(stamp0 > stamp2);
assert!(stamp1 > stamp2);
assert!(stamp1 >= stamp2);
assert!(stamp0 >= stamp1);
}
#[test]
fn test_ord_smaller_neg_numbers() {
let stamp0 = UnixTime::new_only_secs(-5);
let stamp1 = UnixTime::new_subsec_millis_checked(-5, 500).unwrap();
let stamp2 = UnixTime::new_only_secs(-6);
assert!(stamp2 < stamp1);
assert!(stamp2 < stamp0);
assert!(stamp1 < stamp0);
assert!(stamp1 <= stamp0);
assert!(stamp2 <= stamp1);
}
#[allow(clippy::nonminimal_bool)]
#[test]
fn test_eq() {
let stamp0 = UnixTime::new(5, 0);
let stamp1 = UnixTime::new_only_secs(5);
assert_eq!(stamp0, stamp1);
assert!(stamp0 <= stamp1);
assert!(stamp0 >= stamp1);
assert!(!(stamp0 < stamp1));
assert!(!(stamp0 > stamp1));
}
#[test]
fn test_addition() {
let mut stamp0 = UnixTime::new_only_secs(1);
stamp0 += Duration::from_secs(5);
assert_eq!(stamp0.as_secs(), 6);
assert_eq!(stamp0.subsec_millis(), 0);
let stamp1 = stamp0 + Duration::from_millis(500);
assert_eq!(stamp1.secs, 6);
assert_eq!(stamp1.subsec_millis(), 500);
}
#[test]
fn test_addition_on_ref() {
let stamp0 = &UnixTime::new_subsec_millis_checked(20, 500).unwrap();
let stamp1 = stamp0 + Duration::from_millis(2500);
assert_eq!(stamp1.secs, 23);
assert_eq!(stamp1.subsec_millis(), 0);
}
#[test]
fn test_as_dt() {
let stamp = UnixTime::new_only_secs(0);
let dt = stamp.chrono_date_time().unwrap();
assert_eq!(dt.year(), 1970);
assert_eq!(dt.month(), 1);
assert_eq!(dt.day(), 1);
assert_eq!(dt.hour(), 0);
assert_eq!(dt.minute(), 0);
assert_eq!(dt.second(), 0);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_from_now() {
let stamp_now = UnixTime::now().unwrap();
let dt_now = stamp_now.chrono_date_time().unwrap();
assert!(dt_now.year() >= 2020);
}
#[test]
fn test_stamp_diff_positive_0() {
let stamp_later = UnixTime::new(2, 0);
let StampDiff {
positive_duration,
duration_absolute,
} = (stamp_later - UnixTime::new(1, 0)).expect("stamp diff error");
assert!(positive_duration);
assert_eq!(duration_absolute, Duration::from_secs(1));
}
#[test]
fn test_stamp_diff_positive_1() {
let stamp_later = UnixTime::new(3, 800 * 1_000_000);
let stamp_earlier = UnixTime::new_subsec_millis_checked(1, 900).unwrap();
let StampDiff {
positive_duration,
duration_absolute,
} = (stamp_later - stamp_earlier).expect("stamp diff error");
assert!(positive_duration);
assert_eq!(duration_absolute, Duration::from_millis(1900));
}
#[test]
fn test_stamp_diff_negative() {
let stamp_later = UnixTime::new_subsec_millis_checked(3, 800).unwrap();
let stamp_earlier = UnixTime::new_subsec_millis_checked(1, 900).unwrap();
let StampDiff {
positive_duration,
duration_absolute,
} = (stamp_earlier - stamp_later).expect("stamp diff error");
assert!(!positive_duration);
assert_eq!(duration_absolute, Duration::from_millis(1900));
}
#[test]
fn test_addition_spillover() {
let mut stamp0 = UnixTime::new_subsec_millis_checked(1, 900).unwrap();
stamp0 += Duration::from_millis(100);
assert_eq!(stamp0.secs, 2);
assert_eq!(stamp0.subsec_millis(), 0);
stamp0 += Duration::from_millis(1100);
assert_eq!(stamp0.secs, 3);
assert_eq!(stamp0.subsec_millis(), 100);
}
#[test]
fn test_cuc_error_printout() {
let cuc_error = CucError::InvalidCounterWidth(12);
let stamp_error = TimestampError::from(cuc_error);
assert_eq!(stamp_error.to_string(), format!("cuc error: {cuc_error}"));
}
#[test]
#[cfg(feature = "timelib")]
fn test_unix_stamp_as_timelib_datetime() {
let stamp_epoch = UnixTime::EPOCH;
let timelib_dt = stamp_epoch.timelib_date_time().unwrap();
assert_eq!(timelib_dt.year(), 1970);
assert_eq!(timelib_dt.month(), time::Month::January);
assert_eq!(timelib_dt.day(), 1);
assert_eq!(timelib_dt.hour(), 0);
assert_eq!(timelib_dt.minute(), 0);
assert_eq!(timelib_dt.second(), 0);
}
#[test]
#[cfg(feature = "timelib")]
fn test_unix_stamp_from_timelib_datetime() {
let timelib_dt = time::OffsetDateTime::UNIX_EPOCH;
let unix_time = UnixTime::from(timelib_dt);
let timelib_converted_back = unix_time.timelib_date_time().unwrap();
assert_eq!(timelib_dt, timelib_converted_back);
}
}

668
src/tm.rs Normal file
View File

@ -0,0 +1,668 @@
//! This module contains all components required to create a ECSS PUS C telemetry packets according
//! to [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
use crate::ecss::{
ccsds_impl, crc_from_raw_data, crc_procedure, sp_header_impls, user_data_from_raw,
verify_crc16_from_raw, CrcType, PusError, PusPacket, PusVersion, CRC_CCITT_FALSE,
};
use crate::{
ByteConversionError, CcsdsPacket, PacketType, SequenceFlags, SizeMissmatch, SpHeader,
CCSDS_HEADER_LEN,
};
use core::mem::size_of;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zerocopy::AsBytes;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use delegate::delegate;
/// Length without timestamp
pub const PUC_TM_MIN_SEC_HEADER_LEN: usize = 7;
pub const PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA: usize =
CCSDS_HEADER_LEN + PUC_TM_MIN_SEC_HEADER_LEN + size_of::<CrcType>();
pub trait GenericPusTmSecondaryHeader {
fn pus_version(&self) -> PusVersion;
fn sc_time_ref_status(&self) -> u8;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn msg_counter(&self) -> u16;
fn dest_id(&self) -> u16;
}
pub mod zc {
use super::GenericPusTmSecondaryHeader;
use crate::ecss::{PusError, PusVersion};
use zerocopy::{AsBytes, FromBytes, NetworkEndian, Unaligned, U16};
#[derive(FromBytes, AsBytes, Unaligned)]
#[repr(C)]
pub struct PusTmSecHeaderWithoutTimestamp {
pus_version_and_sc_time_ref_status: u8,
service: u8,
subservice: u8,
msg_counter: U16<NetworkEndian>,
dest_id: U16<NetworkEndian>,
}
pub struct PusTmSecHeader<'slice> {
pub(crate) zc_header: PusTmSecHeaderWithoutTimestamp,
pub(crate) timestamp: &'slice [u8],
}
impl TryFrom<crate::tm::PusTmSecondaryHeader<'_>> for PusTmSecHeaderWithoutTimestamp {
type Error = PusError;
fn try_from(header: crate::tm::PusTmSecondaryHeader) -> Result<Self, Self::Error> {
if header.pus_version != PusVersion::PusC {
return Err(PusError::VersionNotSupported(header.pus_version));
}
Ok(PusTmSecHeaderWithoutTimestamp {
pus_version_and_sc_time_ref_status: ((header.pus_version as u8) << 4)
| header.sc_time_ref_status,
service: header.service,
subservice: header.subservice,
msg_counter: U16::from(header.msg_counter),
dest_id: U16::from(header.dest_id),
})
}
}
impl PusTmSecHeaderWithoutTimestamp {
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Option<()> {
self.write_to(slice)
}
pub fn from_bytes(slice: &[u8]) -> Option<Self> {
Self::read_from(slice)
}
}
impl GenericPusTmSecondaryHeader for PusTmSecHeaderWithoutTimestamp {
fn pus_version(&self) -> PusVersion {
PusVersion::try_from(self.pus_version_and_sc_time_ref_status >> 4 & 0b1111)
.unwrap_or(PusVersion::Invalid)
}
fn sc_time_ref_status(&self) -> u8 {
self.pus_version_and_sc_time_ref_status & 0b1111
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn msg_counter(&self) -> u16 {
self.msg_counter.get()
}
fn dest_id(&self) -> u16 {
self.dest_id.get()
}
}
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTmSecondaryHeader<'slice> {
pus_version: PusVersion,
pub sc_time_ref_status: u8,
pub service: u8,
pub subservice: u8,
pub msg_counter: u16,
pub dest_id: u16,
pub time_stamp: &'slice [u8],
}
impl<'slice> PusTmSecondaryHeader<'slice> {
pub fn new_simple(service: u8, subservice: u8, time_stamp: &'slice [u8]) -> Self {
PusTmSecondaryHeader {
pus_version: PusVersion::PusC,
sc_time_ref_status: 0,
service,
subservice,
msg_counter: 0,
dest_id: 0,
time_stamp,
}
}
pub fn new(
service: u8,
subservice: u8,
msg_counter: u16,
dest_id: u16,
time_stamp: &'slice [u8],
) -> Self {
PusTmSecondaryHeader {
pus_version: PusVersion::PusC,
sc_time_ref_status: 0,
service,
subservice,
msg_counter,
dest_id,
time_stamp,
}
}
}
impl GenericPusTmSecondaryHeader for PusTmSecondaryHeader<'_> {
fn pus_version(&self) -> PusVersion {
self.pus_version
}
fn sc_time_ref_status(&self) -> u8 {
self.sc_time_ref_status
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn msg_counter(&self) -> u16 {
self.msg_counter
}
fn dest_id(&self) -> u16 {
self.dest_id
}
}
impl<'slice> TryFrom<zc::PusTmSecHeader<'slice>> for PusTmSecondaryHeader<'slice> {
type Error = ();
fn try_from(sec_header: zc::PusTmSecHeader<'slice>) -> Result<Self, Self::Error> {
Ok(PusTmSecondaryHeader {
pus_version: sec_header.zc_header.pus_version(),
sc_time_ref_status: sec_header.zc_header.sc_time_ref_status(),
service: sec_header.zc_header.service(),
subservice: sec_header.zc_header.subservice(),
msg_counter: sec_header.zc_header.msg_counter(),
dest_id: sec_header.zc_header.dest_id(),
time_stamp: sec_header.timestamp,
})
}
}
/// This class models a PUS telemetry and which can also be used. It is the primary data
/// structure to generate the raw byte representation of PUS telemetry or to
/// deserialize from one from raw bytes.
///
/// This class also derives the [serde::Serialize] and [serde::Deserialize] trait if the [serde]
/// feature is used which allows to send around TM packets in a raw byte format using a serde
/// provider like [postcard](https://docs.rs/postcard/latest/postcard/).
///
/// There is no spare bytes support yet.
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTm<'slice> {
pub sp_header: SpHeader,
pub sec_header: PusTmSecondaryHeader<'slice>,
/// If this is set to false, a manual call to [PusTm::calc_own_crc16] or
/// [PusTm::update_packet_fields] is necessary for the serialized or cached CRC16 to be valid.
pub calc_crc_on_serialization: bool,
#[cfg_attr(feature = "serde", serde(skip))]
raw_data: Option<&'slice [u8]>,
source_data: Option<&'slice [u8]>,
crc16: Option<u16>,
}
impl<'slice> PusTm<'slice> {
/// Generates a new struct instance.
///
/// # Arguments
///
/// * `sp_header` - Space packet header information. The correct packet type will be set
/// automatically
/// * `sec_header` - Information contained in the secondary header, including the service
/// and subservice type
/// * `app_data` - Custom application data
/// * `set_ccsds_len` - Can be used to automatically update the CCSDS space packet data length
/// field. If this is not set to true, [PusTm::update_ccsds_data_len] can be called to set
/// the correct value to this field manually
pub fn new(
sp_header: &mut SpHeader,
sec_header: PusTmSecondaryHeader<'slice>,
source_data: Option<&'slice [u8]>,
set_ccsds_len: bool,
) -> Self {
sp_header.set_packet_type(PacketType::Tm);
sp_header.set_sec_header_flag();
let mut pus_tm = PusTm {
sp_header: *sp_header,
raw_data: None,
source_data,
sec_header,
calc_crc_on_serialization: true,
crc16: None,
};
if set_ccsds_len {
pus_tm.update_ccsds_data_len();
}
pus_tm
}
pub fn len_packed(&self) -> usize {
let mut length = PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA;
length += self.sec_header.time_stamp.len();
if let Some(src_data) = self.source_data {
length += src_data.len();
}
length
}
pub fn time_stamp(&self) -> &'slice [u8] {
self.sec_header.time_stamp
}
pub fn source_data(&self) -> Option<&'slice [u8]> {
self.source_data
}
pub fn set_dest_id(&mut self, dest_id: u16) {
self.sec_header.dest_id = dest_id;
}
pub fn set_msg_counter(&mut self, msg_counter: u16) {
self.sec_header.msg_counter = msg_counter
}
pub fn set_sc_time_ref_status(&mut self, sc_time_ref_status: u8) {
self.sec_header.sc_time_ref_status = sc_time_ref_status & 0b1111;
}
sp_header_impls!();
/// This is called automatically if the `set_ccsds_len` argument in the [PusTm::new] call was
/// used.
/// If this was not done or the time stamp or source data is set or changed after construction,
/// this function needs to be called to ensure that the data length field of the CCSDS header
/// is set correctly
pub fn update_ccsds_data_len(&mut self) {
self.sp_header.data_len =
self.len_packed() as u16 - size_of::<crate::zc::SpHeader>() as u16 - 1;
}
/// This function should be called before the TM packet is serialized if
/// [PusTm.calc_crc_on_serialization] is set to False. It will calculate and cache the CRC16.
pub fn calc_own_crc16(&mut self) {
let mut digest = CRC_CCITT_FALSE.digest();
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
digest.update(sph_zc.as_bytes());
let pus_tc_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
digest.update(pus_tc_header.as_bytes());
digest.update(self.sec_header.time_stamp);
if let Some(src_data) = self.source_data {
digest.update(src_data);
}
self.crc16 = Some(digest.finalize())
}
/// This helper function calls both [PusTm.update_ccsds_data_len] and [PusTm.calc_own_crc16]
pub fn update_packet_fields(&mut self) {
self.update_ccsds_data_len();
self.calc_own_crc16();
}
/// Write the raw PUS byte representation to a provided buffer.
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = 0;
let total_size = self.len_packed();
if total_size > slice.len() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: slice.len(),
expected: total_size,
})
.into());
}
self.sp_header
.write_to_be_bytes(&mut slice[0..CCSDS_HEADER_LEN])?;
curr_idx += CCSDS_HEADER_LEN;
let sec_header_len = size_of::<zc::PusTmSecHeaderWithoutTimestamp>();
let sec_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
sec_header
.write_to_bytes(&mut slice[curr_idx..curr_idx + sec_header_len])
.ok_or(ByteConversionError::ZeroCopyToError)?;
curr_idx += sec_header_len;
let timestamp_len = self.sec_header.time_stamp.len();
slice[curr_idx..curr_idx + timestamp_len].copy_from_slice(self.sec_header.time_stamp);
curr_idx += timestamp_len;
if let Some(src_data) = self.source_data {
slice[curr_idx..curr_idx + src_data.len()].copy_from_slice(src_data);
curr_idx += src_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
0,
curr_idx,
slice,
)?;
slice[curr_idx..curr_idx + 2].copy_from_slice(crc16.to_be_bytes().as_slice());
curr_idx += 2;
Ok(curr_idx)
}
/// Append the raw PUS byte representation to a provided [alloc::vec::Vec]
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn append_to_vec(&self, vec: &mut Vec<u8>) -> Result<usize, PusError> {
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
let mut appended_len =
PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA + self.sec_header.time_stamp.len();
if let Some(src_data) = self.source_data {
appended_len += src_data.len();
};
let start_idx = vec.len();
let mut ser_len = 0;
vec.extend_from_slice(sph_zc.as_bytes());
ser_len += sph_zc.as_bytes().len();
// The PUS version is hardcoded to PUS C
let sec_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
vec.extend_from_slice(sec_header.as_bytes());
ser_len += sec_header.as_bytes().len();
vec.extend_from_slice(self.sec_header.time_stamp);
ser_len += self.sec_header.time_stamp.len();
if let Some(src_data) = self.source_data {
vec.extend_from_slice(src_data);
ser_len += src_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
start_idx,
ser_len,
&vec[start_idx..start_idx + ser_len],
)?;
vec.extend_from_slice(crc16.to_be_bytes().as_slice());
Ok(appended_len)
}
/// Create a [PusTm] instance from a raw slice. On success, it returns a tuple containing
/// the instance and the found byte length of the packet. The timestamp length needs to be
/// known beforehand.
pub fn from_bytes(
slice: &'slice [u8],
timestamp_len: usize,
) -> Result<(Self, usize), PusError> {
let raw_data_len = slice.len();
if raw_data_len < PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let mut current_idx = 0;
let (sp_header, _) = SpHeader::from_be_bytes(&slice[0..CCSDS_HEADER_LEN])?;
current_idx += 6;
let total_len = sp_header.total_len();
if raw_data_len < total_len || total_len < PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let sec_header_zc = zc::PusTmSecHeaderWithoutTimestamp::from_bytes(
&slice[current_idx..current_idx + PUC_TM_MIN_SEC_HEADER_LEN],
)
.ok_or(ByteConversionError::ZeroCopyFromError)?;
current_idx += PUC_TM_MIN_SEC_HEADER_LEN;
let zc_sec_header_wrapper = zc::PusTmSecHeader {
zc_header: sec_header_zc,
timestamp: &slice[current_idx..current_idx + timestamp_len],
};
current_idx += timestamp_len;
let raw_data = &slice[0..total_len];
let pus_tm = PusTm {
sp_header,
sec_header: PusTmSecondaryHeader::try_from(zc_sec_header_wrapper).unwrap(),
raw_data: Some(&slice[0..total_len]),
source_data: user_data_from_raw(current_idx, total_len, raw_data_len, slice)?,
calc_crc_on_serialization: false,
crc16: Some(crc_from_raw_data(raw_data)?),
};
verify_crc16_from_raw(raw_data, pus_tm.crc16.expect("CRC16 invalid"))?;
Ok((pus_tm, total_len))
}
}
//noinspection RsTraitImplementation
impl CcsdsPacket for PusTm<'_> {
ccsds_impl!();
}
//noinspection RsTraitImplementation
impl PusPacket for PusTm<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
});
fn user_data(&self) -> Option<&[u8]> {
self.source_data
}
fn crc16(&self) -> Option<u16> {
self.crc16
}
}
//noinspection RsTraitImplementation
impl GenericPusTmSecondaryHeader for PusTm<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn dest_id(&self) -> u16;
fn msg_counter(&self) -> u16;
fn sc_time_ref_status(&self) -> u8;
});
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ecss::PusVersion::PusC;
use crate::SpHeader;
fn base_ping_reply_full_ctor(time_stamp: &[u8]) -> PusTm {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp);
PusTm::new(&mut sph, tc_header, None, true)
}
fn base_hk_reply<'a>(time_stamp: &'a [u8], src_data: &'a [u8]) -> PusTm<'a> {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(3, 5, &time_stamp);
PusTm::new(&mut sph, tc_header, Some(src_data), true)
}
fn dummy_time_stamp() -> &'static [u8] {
return &[0, 1, 2, 3, 4, 5, 6];
}
#[test]
fn test_basic() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
verify_ping_reply(&pus_tm, false, 22, dummy_time_stamp());
}
#[test]
fn test_serialization_no_source_data() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
let mut buf: [u8; 32] = [0; 32];
let ser_len = pus_tm
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 22);
verify_raw_ping_reply(&buf);
}
#[test]
fn test_serialization_with_source_data() {
let src_data = [1, 2, 3];
let hk_reply = base_hk_reply(dummy_time_stamp(), &src_data);
let mut buf: [u8; 32] = [0; 32];
let ser_len = hk_reply
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 25);
assert_eq!(buf[20], 1);
assert_eq!(buf[21], 2);
assert_eq!(buf[22], 3);
}
#[test]
fn test_setters() {
let time_stamp = dummy_time_stamp();
let mut pus_tm = base_ping_reply_full_ctor(&time_stamp);
pus_tm.set_sc_time_ref_status(0b1010);
pus_tm.set_dest_id(0x7fff);
pus_tm.set_msg_counter(0x1f1f);
assert_eq!(pus_tm.sc_time_ref_status(), 0b1010);
assert_eq!(pus_tm.dest_id(), 0x7fff);
assert_eq!(pus_tm.msg_counter(), 0x1f1f);
assert!(pus_tm.set_apid(0x7ff));
assert_eq!(pus_tm.apid(), 0x7ff);
}
#[test]
fn test_deserialization_no_source_data() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
let mut buf: [u8; 32] = [0; 32];
let ser_len = pus_tm
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 22);
let (tm_deserialized, size) = PusTm::from_bytes(&buf, 7).expect("Deserialization failed");
assert_eq!(ser_len, size);
verify_ping_reply(&tm_deserialized, false, 22, dummy_time_stamp());
}
#[test]
fn test_manual_field_update() {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, dummy_time_stamp());
let mut tm = PusTm::new(&mut sph, tc_header, None, false);
tm.calc_crc_on_serialization = false;
assert_eq!(tm.data_len(), 0x00);
let mut buf: [u8; 32] = [0; 32];
let res = tm.write_to_bytes(&mut buf);
assert!(res.is_err());
assert!(matches!(res.unwrap_err(), PusError::CrcCalculationMissing));
tm.update_ccsds_data_len();
assert_eq!(tm.data_len(), 15);
tm.calc_own_crc16();
let res = tm.write_to_bytes(&mut buf);
assert!(res.is_ok());
tm.sp_header.data_len = 0;
tm.update_packet_fields();
assert_eq!(tm.data_len(), 15);
}
#[test]
fn test_target_buf_too_small() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
let mut buf: [u8; 16] = [0; 16];
let res = pus_tm.write_to_bytes(&mut buf);
assert!(res.is_err());
let error = res.unwrap_err();
assert!(matches!(error, PusError::ByteConversionError { .. }));
match error {
PusError::ByteConversionError(err) => match err {
ByteConversionError::ToSliceTooSmall(size_missmatch) => {
assert_eq!(size_missmatch.expected, 22);
assert_eq!(size_missmatch.found, 16);
}
_ => panic!("Invalid PUS error {:?}", err),
},
_ => {
panic!("Invalid error {:?}", error);
}
}
}
#[test]
#[cfg(feature = "alloc")]
fn test_append_to_vec() {
let time_stamp = dummy_time_stamp();
let pus_tm = base_ping_reply_full_ctor(&time_stamp);
let mut vec = Vec::new();
let res = pus_tm.append_to_vec(&mut vec);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 22);
verify_raw_ping_reply(vec.as_slice());
}
#[test]
#[cfg(feature = "alloc")]
fn test_append_to_vec_with_src_data() {
let src_data = [1, 2, 3];
let hk_reply = base_hk_reply(dummy_time_stamp(), &src_data);
let mut vec = Vec::new();
vec.push(4);
let res = hk_reply.append_to_vec(&mut vec);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 25);
assert_eq!(vec.len(), 26);
}
fn verify_raw_ping_reply(buf: &[u8]) {
// Secondary header is set -> 0b0000_1001 , APID occupies last bit of first byte
assert_eq!(buf[0], 0x09);
// Rest of APID 0x123
assert_eq!(buf[1], 0x23);
// Unsegmented is the default, and first byte of 0x234 occupies this byte as well
assert_eq!(buf[2], 0xc2);
assert_eq!(buf[3], 0x34);
assert_eq!(((buf[4] as u16) << 8) | buf[5] as u16, 15);
// SC time ref status is 0
assert_eq!(buf[6], (PusC as u8) << 4);
assert_eq!(buf[7], 17);
assert_eq!(buf[8], 2);
// MSG counter 0
assert_eq!(buf[9], 0x00);
assert_eq!(buf[10], 0x00);
// Destination ID
assert_eq!(buf[11], 0x00);
assert_eq!(buf[12], 0x00);
// Timestamp
assert_eq!(&buf[13..20], dummy_time_stamp());
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[0..20]);
let crc16 = digest.finalize();
assert_eq!(((crc16 >> 8) & 0xff) as u8, buf[20]);
assert_eq!((crc16 & 0xff) as u8, buf[21]);
}
fn verify_ping_reply(
tm: &PusTm,
has_user_data: bool,
exp_full_len: usize,
exp_time_stamp: &[u8],
) {
assert!(tm.is_tm());
assert_eq!(PusPacket::service(tm), 17);
assert_eq!(PusPacket::subservice(tm), 2);
assert!(tm.sec_header_flag());
assert_eq!(tm.len_packed(), exp_full_len);
assert_eq!(tm.time_stamp(), exp_time_stamp);
if has_user_data {
assert!(!tm.user_data().is_none());
}
assert_eq!(PusPacket::pus_version(tm), PusC);
assert_eq!(tm.apid(), 0x123);
assert_eq!(tm.seq_count(), 0x234);
assert_eq!(tm.data_len(), exp_full_len as u16 - 7);
assert_eq!(tm.dest_id(), 0x0000);
assert_eq!(tm.msg_counter(), 0x0000);
assert_eq!(tm.sc_time_ref_status(), 0b0000);
}
}

View File

@ -1,734 +0,0 @@
use crate::ByteConversionError;
use core::fmt::{Debug, Display, Formatter};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub trait ToBeBytes {
type ByteArray: AsRef<[u8]>;
/// Length when written to big endian bytes.
fn written_len(&self) -> usize;
fn to_be_bytes(&self) -> Self::ByteArray;
}
impl ToBeBytes for () {
type ByteArray = [u8; 0];
#[inline]
fn written_len(&self) -> usize {
0
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
[]
}
}
impl ToBeBytes for u8 {
type ByteArray = [u8; 1];
#[inline]
fn written_len(&self) -> usize {
1
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
u8::to_be_bytes(*self)
}
}
impl ToBeBytes for u16 {
type ByteArray = [u8; 2];
#[inline]
fn written_len(&self) -> usize {
2
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
u16::to_be_bytes(*self)
}
}
impl ToBeBytes for u32 {
type ByteArray = [u8; 4];
#[inline]
fn written_len(&self) -> usize {
4
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
u32::to_be_bytes(*self)
}
}
impl ToBeBytes for u64 {
type ByteArray = [u8; 8];
#[inline]
fn written_len(&self) -> usize {
8
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
u64::to_be_bytes(*self)
}
}
pub trait UnsignedEnum {
/// Size of the unsigned enumeration in bytes.
fn size(&self) -> usize;
/// Write the unsigned enumeration to a raw buffer. Returns the written size on success.
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value(&self) -> u64;
#[cfg(feature = "alloc")]
fn to_vec(&self) -> alloc::vec::Vec<u8> {
let mut buf = alloc::vec![0; self.size()];
self.write_to_be_bytes(&mut buf).unwrap();
buf
}
}
pub trait UnsignedEnumExt: UnsignedEnum + Debug + Copy + Clone + PartialEq + Eq {}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum UnsignedByteFieldError {
/// Value is too large for specified width of byte field.
ValueTooLargeForWidth {
width: usize,
value: u64,
},
/// Only 1, 2, 4 and 8 are allow width values. Optionally contains the expected width if
/// applicable, for example for conversions.
InvalidWidth {
found: usize,
expected: Option<usize>,
},
ByteConversionError(ByteConversionError),
}
impl From<ByteConversionError> for UnsignedByteFieldError {
#[inline]
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl Display for UnsignedByteFieldError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {e}")
}
Self::InvalidWidth { found, .. } => {
write!(f, "invalid width {found}, only 1, 2, 4 and 8 are allowed.")
}
Self::ValueTooLargeForWidth { width, value } => {
write!(f, "value {value} too large for width {width}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for UnsignedByteFieldError {}
/// Type erased variant.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct UnsignedByteField {
width: usize,
value: u64,
}
impl UnsignedByteField {
#[inline]
pub const fn new(width: usize, value: u64) -> Self {
Self { width, value }
}
#[inline]
pub const fn value_const(&self) -> u64 {
self.value
}
#[inline]
pub fn new_from_be_bytes(width: usize, buf: &[u8]) -> Result<Self, UnsignedByteFieldError> {
if width > buf.len() {
return Err(ByteConversionError::FromSliceTooSmall {
expected: width,
found: buf.len(),
}
.into());
}
match width {
0 => Ok(Self::new(width, 0)),
1 => Ok(Self::new(width, buf[0] as u64)),
2 => Ok(Self::new(
width,
u16::from_be_bytes(buf[0..2].try_into().unwrap()) as u64,
)),
4 => Ok(Self::new(
width,
u32::from_be_bytes(buf[0..4].try_into().unwrap()) as u64,
)),
8 => Ok(Self::new(
width,
u64::from_be_bytes(buf[0..8].try_into().unwrap()),
)),
_ => Err(UnsignedByteFieldError::InvalidWidth {
found: width,
expected: None,
}),
}
}
}
impl UnsignedEnum for UnsignedByteField {
#[inline]
fn size(&self) -> usize {
self.width
}
#[inline]
fn value(&self) -> u64 {
self.value_const()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.size() {
return Err(ByteConversionError::ToSliceTooSmall {
expected: self.size(),
found: buf.len(),
});
}
match self.size() {
0 => Ok(0),
1 => {
let u8 = UnsignedByteFieldU8::try_from(*self).unwrap();
u8.write_to_be_bytes(buf)
}
2 => {
let u16 = UnsignedByteFieldU16::try_from(*self).unwrap();
u16.write_to_be_bytes(buf)
}
4 => {
let u32 = UnsignedByteFieldU32::try_from(*self).unwrap();
u32.write_to_be_bytes(buf)
}
8 => {
let u64 = UnsignedByteFieldU64::try_from(*self).unwrap();
u64.write_to_be_bytes(buf)
}
_ => {
// The API does not allow this.
panic!("unexpected written length");
}
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct GenericUnsignedByteField<TYPE: Copy + Into<u64>> {
value: TYPE,
}
impl<TYPE: Copy + Into<u64>> GenericUnsignedByteField<TYPE> {
pub const fn new(val: TYPE) -> Self {
Self { value: val }
}
pub const fn value_typed(&self) -> TYPE {
self.value
}
}
impl<TYPE: Copy + ToBeBytes + Into<u64>> UnsignedEnum for GenericUnsignedByteField<TYPE> {
#[inline]
fn size(&self) -> usize {
self.value.written_len()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.size() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.size(),
});
}
buf[..self.size()].copy_from_slice(self.value.to_be_bytes().as_ref());
Ok(self.value.written_len())
}
#[inline]
fn value(&self) -> u64 {
self.value_typed().into()
}
}
pub type UnsignedByteFieldEmpty = GenericUnsignedByteField<()>;
pub type UnsignedByteFieldU8 = GenericUnsignedByteField<u8>;
pub type UnsignedByteFieldU16 = GenericUnsignedByteField<u16>;
pub type UnsignedByteFieldU32 = GenericUnsignedByteField<u32>;
pub type UnsignedByteFieldU64 = GenericUnsignedByteField<u64>;
pub type UbfU8 = UnsignedByteFieldU8;
pub type UbfU16 = UnsignedByteFieldU16;
pub type UbfU32 = UnsignedByteFieldU32;
pub type UbfU64 = UnsignedByteFieldU64;
impl From<UnsignedByteFieldU8> for UnsignedByteField {
fn from(value: UnsignedByteFieldU8) -> Self {
Self::new(1, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU8 {
type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 1 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(1),
});
}
Ok(Self::new(value.value as u8))
}
}
impl From<UnsignedByteFieldU16> for UnsignedByteField {
#[inline]
fn from(value: UnsignedByteFieldU16) -> Self {
Self::new(2, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU16 {
type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 2 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(2),
});
}
Ok(Self::new(value.value as u16))
}
}
impl From<UnsignedByteFieldU32> for UnsignedByteField {
#[inline]
fn from(value: UnsignedByteFieldU32) -> Self {
Self::new(4, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU32 {
type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 4 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(4),
});
}
Ok(Self::new(value.value as u32))
}
}
impl From<UnsignedByteFieldU64> for UnsignedByteField {
#[inline]
fn from(value: UnsignedByteFieldU64) -> Self {
Self::new(8, value.value)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU64 {
type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 8 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(8),
});
}
Ok(Self::new(value.value))
}
}
#[cfg(test)]
pub mod tests {
use crate::util::{
UnsignedByteField, UnsignedByteFieldError, UnsignedByteFieldU16, UnsignedByteFieldU32,
UnsignedByteFieldU64, UnsignedByteFieldU8, UnsignedEnum,
};
use crate::ByteConversionError;
use std::format;
#[test]
fn test_simple_u8() {
let u8 = UnsignedByteFieldU8::new(5);
assert_eq!(u8.size(), 1);
let mut buf: [u8; 8] = [0; 8];
let len = u8
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 1);
assert_eq!(buf[0], 5);
for val in buf.iter().skip(1) {
assert_eq!(*val, 0);
}
assert_eq!(u8.value_typed(), 5);
assert_eq!(u8.value(), 5);
}
#[test]
fn test_simple_u16() {
let u16 = UnsignedByteFieldU16::new(3823);
assert_eq!(u16.size(), 2);
let mut buf: [u8; 8] = [0; 8];
let len = u16
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 2);
let raw_val = u16::from_be_bytes(buf[0..2].try_into().unwrap());
assert_eq!(raw_val, 3823);
for val in buf.iter().skip(2) {
assert_eq!(*val, 0);
}
assert_eq!(u16.value_typed(), 3823);
assert_eq!(u16.value(), 3823);
}
#[test]
fn test_simple_u32() {
let u32 = UnsignedByteFieldU32::new(80932);
assert_eq!(u32.size(), 4);
let mut buf: [u8; 8] = [0; 8];
let len = u32
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 4);
let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap());
assert_eq!(raw_val, 80932);
(4..8).for_each(|i| {
assert_eq!(buf[i], 0);
});
assert_eq!(u32.value_typed(), 80932);
assert_eq!(u32.value(), 80932);
}
#[test]
fn test_simple_u64() {
let u64 = UnsignedByteFieldU64::new(5999999);
assert_eq!(u64.size(), 8);
let mut buf: [u8; 8] = [0; 8];
let len = u64
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 8);
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999);
assert_eq!(u64.value_typed(), 5999999);
assert_eq!(u64.value(), 5999999);
}
#[test]
fn conversions_u8() {
let u8 = UnsignedByteFieldU8::new(5);
let u8_type_erased = UnsignedByteField::from(u8);
assert_eq!(u8_type_erased.width, 1);
assert_eq!(u8_type_erased.value, 5);
let u8_conv_back =
UnsignedByteFieldU8::try_from(u8_type_erased).expect("conversion failed for u8");
assert_eq!(u8, u8_conv_back);
assert_eq!(u8_conv_back.value, 5);
}
#[test]
fn conversion_u8_fails() {
let field = UnsignedByteField::new(2, 60000);
let conv_fails = UnsignedByteFieldU8::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 2);
assert_eq!(expected, 1);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u16() {
let u16 = UnsignedByteFieldU16::new(64444);
let u16_type_erased = UnsignedByteField::from(u16);
assert_eq!(u16_type_erased.width, 2);
assert_eq!(u16_type_erased.value, 64444);
let u16_conv_back =
UnsignedByteFieldU16::try_from(u16_type_erased).expect("conversion failed for u16");
assert_eq!(u16, u16_conv_back);
assert_eq!(u16_conv_back.value, 64444);
}
#[test]
fn conversion_u16_fails() {
let field = UnsignedByteField::new(4, 75000);
let conv_fails = UnsignedByteFieldU16::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 4);
assert_eq!(expected, 2);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u32() {
let u32 = UnsignedByteFieldU32::new(75000);
let u32_type_erased = UnsignedByteField::from(u32);
assert_eq!(u32_type_erased.width, 4);
assert_eq!(u32_type_erased.value, 75000);
let u32_conv_back =
UnsignedByteFieldU32::try_from(u32_type_erased).expect("conversion failed for u32");
assert_eq!(u32, u32_conv_back);
assert_eq!(u32_conv_back.value, 75000);
}
#[test]
fn conversion_u32_fails() {
let field = UnsignedByteField::new(8, 75000);
let conv_fails = UnsignedByteFieldU32::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 8);
assert_eq!(expected, 4);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u64() {
let u64 = UnsignedByteFieldU64::new(5999999);
let u64_type_erased = UnsignedByteField::from(u64);
assert_eq!(u64_type_erased.width, 8);
assert_eq!(u64_type_erased.value, 5999999);
let u64_conv_back =
UnsignedByteFieldU64::try_from(u64_type_erased).expect("conversion failed for u64");
assert_eq!(u64, u64_conv_back);
assert_eq!(u64_conv_back.value, 5999999);
}
#[test]
fn conversion_u64_fails() {
let field = UnsignedByteField::new(4, 60000);
let conv_fails = UnsignedByteFieldU64::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 4);
assert_eq!(expected, 8);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn type_erased_u8_write() {
let u8 = UnsignedByteField::new(1, 5);
assert_eq!(u8.size(), 1);
let mut buf: [u8; 8] = [0; 8];
u8.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(buf[0], 5);
(1..8).for_each(|i| {
assert_eq!(buf[i], 0);
});
}
#[test]
fn type_erased_u16_write() {
let u16 = UnsignedByteField::new(2, 3823);
assert_eq!(u16.size(), 2);
let mut buf: [u8; 8] = [0; 8];
u16.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u16::from_be_bytes(buf[0..2].try_into().unwrap());
assert_eq!(raw_val, 3823);
for val in buf.iter().skip(2) {
assert_eq!(*val, 0);
}
}
#[test]
fn type_erased_u32_write() {
let u32 = UnsignedByteField::new(4, 80932);
assert_eq!(u32.size(), 4);
let mut buf: [u8; 8] = [0; 8];
u32.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap());
assert_eq!(raw_val, 80932);
(4..8).for_each(|i| {
assert_eq!(buf[i], 0);
});
}
#[test]
fn type_erased_u64_write() {
let u64 = UnsignedByteField::new(8, 5999999);
assert_eq!(u64.size(), 8);
let mut buf: [u8; 8] = [0; 8];
u64.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999);
}
#[test]
fn type_erased_u8_construction() {
let buf: [u8; 2] = [5, 10];
let u8 = UnsignedByteField::new_from_be_bytes(1, &buf).expect("construction failed");
assert_eq!(u8.width, 1);
assert_eq!(u8.value, 5);
}
#[test]
fn type_erased_u16_construction() {
let buf: [u8; 2] = [0x10, 0x15];
let u16 = UnsignedByteField::new_from_be_bytes(2, &buf).expect("construction failed");
assert_eq!(u16.width, 2);
assert_eq!(u16.value, 0x1015);
}
#[test]
fn type_erased_u32_construction() {
let buf: [u8; 4] = [0x01, 0x02, 0x03, 0x04];
let u32 = UnsignedByteField::new_from_be_bytes(4, &buf).expect("construction failed");
assert_eq!(u32.width, 4);
assert_eq!(u32.value, 0x01020304);
}
#[test]
fn type_erased_u64_construction() {
let buf: [u8; 8] = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
let u64 = UnsignedByteField::new_from_be_bytes(8, &buf).expect("construction failed");
assert_eq!(u64.width, 8);
assert_eq!(u64.value, 0x0102030405060708);
}
#[test]
fn type_u16_target_buf_too_small() {
let u16 = UnsignedByteFieldU16::new(500);
let mut buf: [u8; 1] = [0; 1];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, 1);
assert_eq!(expected, 2);
}
_ => {
panic!("invalid exception")
}
}
}
#[test]
fn type_erased_u16_target_buf_too_small() {
let u16 = UnsignedByteField::new(2, 500);
let mut buf: [u8; 1] = [0; 1];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, 1);
assert_eq!(expected, 2);
}
_ => {
panic!("invalid exception {}", err)
}
}
let u16 = UnsignedByteField::new_from_be_bytes(2, &buf);
assert!(u16.is_err());
let err = u16.unwrap_err();
if let UnsignedByteFieldError::ByteConversionError(
ByteConversionError::FromSliceTooSmall { found, expected },
) = err
{
assert_eq!(expected, 2);
assert_eq!(found, 1);
} else {
panic!("unexpected exception {}", err);
}
}
#[test]
fn type_u32_target_buf_too_small() {
let u16 = UnsignedByteFieldU32::new(500);
let mut buf: [u8; 3] = [0; 3];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, 3);
assert_eq!(expected, 4);
}
_ => {
panic!("invalid exception")
}
}
}
}