64 Commits

Author SHA1 Message Date
0aa41fee92 Merge pull request 'prepare v0.16.0' (#165) from prep-v0.16.0 into main
Reviewed-on: #165
2025-09-24 19:58:02 +02:00
Robin Mueller
d1516d669d prepare v0.16.0 2025-09-24 19:56:52 +02:00
b1ebb4d7c4 Merge pull request 'update docs on coverage' (#164) from update-coverage-docs into main
Reviewed-on: #164
2025-09-24 19:55:47 +02:00
Robin Mueller
cd79af4440 update docs on coverage 2025-09-24 19:54:45 +02:00
6a760c8585 Merge pull request 'improve backwards compatibility' (#163) from improve-backwards-compat into main
Reviewed-on: #163
2025-09-24 19:54:11 +02:00
Robin Mueller
5eb409f1ec improve backwards compatibility 2025-09-24 19:49:51 +02:00
69d416d6ff Merge pull request 'improvement for NAK API' (#162) from nak-api-improvement into main
Reviewed-on: #162
2025-09-23 17:08:15 +02:00
Robin Mueller
e2b239ae61 improvement for NAK API 2025-09-23 17:06:45 +02:00
b06d7c1a87 Merge pull request 'better error handling' (#161) from better-nak-error into main
Reviewed-on: #161
2025-09-18 17:37:00 +02:00
Robin Mueller
ec1ddbde81 better error handling 2025-09-18 17:36:51 +02:00
7f4ada1734 Merge pull request 'NAK constructor is pub' (#160) from nak-new-pub into main
Reviewed-on: #160
2025-09-18 17:35:48 +02:00
Robin Mueller
15f97e960b NAK constructor is pub 2025-09-18 17:32:11 +02:00
49b7c2d072 Merge pull request 'PDU header improvements' (#159) from pdu-header-improvements into main
Reviewed-on: #159
2025-09-18 16:56:26 +02:00
1ed23bd7ef PDU header improvements 2025-09-18 16:54:28 +02:00
a82cdb1e82 Merge pull request 'nak docs' (#158) from nak-docs into main
Reviewed-on: #158
2025-09-17 13:42:04 +02:00
12e7062075 nak docs 2025-09-17 13:40:49 +02:00
a1e40834f5 Merge pull request 'improve ACK PDU' (#157) from improve-ack-pdu into main
Reviewed-on: #157
2025-09-15 13:02:30 +02:00
Robin Mueller
3f6a5df8e7 improve ACK PDU 2025-09-15 13:02:16 +02:00
a8d5fdf8d3 Merge pull request 'extend NAK PDU' (#156) from extend-nak-pdu into main
Reviewed-on: #156
2025-09-15 10:30:02 +02:00
Robin Mueller
62326da276 extend NAK PDU 2025-09-15 10:16:07 +02:00
477890346a Merge pull request 'improve CFDP module' (#154) from cfdp-module-improvements into main
Reviewed-on: #154
2025-09-11 16:10:47 +02:00
Robin Mueller
9394beea38 improve CFDP module 2025-09-11 16:03:58 +02:00
6c425e137a Merge pull request 'add coverage to justfile' (#155) from update-justfile into main
Reviewed-on: #155
2025-09-11 16:03:41 +02:00
24b91a7a83 add coverage to justfile 2025-09-11 13:22:27 +02:00
a7c6ce7d44 Merge pull request 'improve CFDP module' (#153) from cfdp-module-improvements into main
Reviewed-on: #153
2025-09-11 09:12:59 +02:00
Robin Mueller
c68e71a25e improve CFDP module 2025-09-11 09:09:41 +02:00
272a961a70 Merge pull request 'add packet_len direct method for SpHeader' (#152) from sp-header-tweak into main
Reviewed-on: #152
2025-09-10 21:05:56 +02:00
Robin Mueller
6f4df7e3c2 add packet_len direct method for SpHeader 2025-09-10 19:04:47 +02:00
15c477e810 Merge pull request 'prepare v0.16.0' (#151) from prep-v0.16.0 into main
Reviewed-on: #151
2025-09-10 18:08:10 +02:00
Robin Mueller
e5b10920a0 prepare v0.16.0 2025-09-10 18:03:35 +02:00
3f8434e1fa Merge pull request 'add missing Error impls' (#150) from add-missing-error-impls into main
Reviewed-on: #150
2025-09-10 17:54:46 +02:00
Robin Mueller
ec3f462931 add missing Error impls 2025-09-10 17:52:49 +02:00
e6686caba1 Merge pull request 'add-missing-defmt-impls' (#149) from add-missing-defmt-impls into main
Reviewed-on: #149
2025-09-10 17:52:39 +02:00
Robin Mueller
2a0b21983e add some missing defmt impls 2025-09-10 17:48:49 +02:00
4e153e0b68 Merge pull request 'Add TM builder API' (#148) from add-tm-builder-api into main
Reviewed-on: #148
2025-09-10 17:39:05 +02:00
Robin Mueller
aaac15e3d0 Add TM builder API 2025-09-10 17:36:39 +02:00
89788c1341 Merge pull request 'add first builder API' (#147) from add-tc-builder-api into main
Reviewed-on: #147
2025-09-10 16:38:25 +02:00
Robin Mueller
578be2da8f add first TC builder API 2025-09-10 16:12:06 +02:00
3a21daf8de Merge pull request 'refactor and improve ECSS module' (#146) from refactor-improve-ecss-module into main
Reviewed-on: #146
2025-09-10 15:37:27 +02:00
Robin Mueller
8fd46f6a30 refactor and improve ECSS module 2025-09-10 15:28:58 +02:00
c6b74fecbd Merge pull request 'start making ECSS checksum optional' (#144) from ecss-checksum-optional into main
Reviewed-on: #144
2025-09-09 16:14:45 +02:00
Robin Mueller
60e35559e5 start making ECSS checksum optional 2025-09-09 16:14:11 +02:00
e708f1b861 Merge pull request 'some more tests' (#145) from add-some-more-tests into main
Reviewed-on: #145
2025-09-09 15:57:11 +02:00
Robin Mueller
91490b5dd6 some more tests 2025-09-09 15:56:44 +02:00
e151b8e761 Merge pull request 'fix for embedded systems, introduce portable atomic seq counters' (#143) from portable-atomic-seq-counters-embedded-fix into main
Reviewed-on: #143
2025-09-09 13:49:23 +02:00
Robin Mueller
2839174e5f fix for embedded systems, introduce portable atomic seq counters 2025-09-09 13:34:12 +02:00
6e2db87fa9 Merge pull request 'improve sequence counters' (#141) from improve-seq-counters into main
Reviewed-on: #141
2025-09-09 11:53:31 +02:00
Robin Mueller
e8a01dc6b2 improve sequence counters 2025-09-09 11:51:59 +02:00
20403bda32 Merge pull request 'sequence counter improvements' (#140) from seq-counter-improvements into main
Reviewed-on: #140
2025-09-09 10:27:08 +02:00
Robin Mueller
2cbd48331c sequence counter improvements 2025-09-09 10:24:20 +02:00
c1346f2b12 Merge pull request 'add some more tests' (#138) from some-more-tests into main
Reviewed-on: #138
2025-09-08 17:01:45 +02:00
2e3a7849a7 add some more tests 2025-09-08 16:59:41 +02:00
86ebea8eb8 Merge pull request 'Add basic USLP support' (#137) from add-basic-uslp-support into main
Reviewed-on: #137
2025-09-08 16:59:21 +02:00
2c8c77acb8 add basic USLP support 2025-09-08 16:51:33 +02:00
63d74aa58b Merge pull request 'PUS version fixes' (#136) from small-bugfix-pus-tm-a into main
Reviewed-on: #136
2025-08-26 16:41:13 +02:00
5a86f89c83 version fixes 2025-08-26 16:40:44 +02:00
b8ae26c302 Merge pull request 'improvement for naming' (#135) from naming-improvement into main
Reviewed-on: #135
2025-08-26 16:22:19 +02:00
160b1dedf9 improvement for naming 2025-08-26 16:16:54 +02:00
8eccf1fa29 Merge pull request 'NAK PDU reader update' (#134) from nak-pdu-reader-refactoring into main
Reviewed-on: #134
2025-08-20 17:53:32 +02:00
Robin Mueller
8445b7cc31 NAK PDU reader update 2025-08-20 16:02:08 +02:00
a2971f8f73 Merge pull request 'add badge' (#133) from add-chat-badge into main
Reviewed-on: #133
2025-08-14 14:22:09 +02:00
Robin Mueller
ba3b66326d add badge 2025-08-14 14:21:37 +02:00
de2675e602 Merge pull request 'add PUS A support' (#132) from add-pus-a-support into main
Reviewed-on: #132
2025-08-13 17:24:50 +02:00
Robin Mueller
3d344c11cc add PUS A support 2025-08-13 17:04:39 +02:00
32 changed files with 7668 additions and 1596 deletions

View File

@@ -29,7 +29,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.70.0 - uses: dtolnay/rust-toolchain@1.83
- run: cargo check --release - run: cargo check --release
cross-check: cross-check:

View File

@@ -8,6 +8,57 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased] # [unreleased]
# [v0.16.0] 2025-09-24
- Bump Rust MSRV to v1.83
## Changed
- `PusTcCreator` has its own `service`, `subservice` and `apid` methods and does not require trait
imports anymore.
- CFDP NAK PDU `SegmentRequestIter` is not generic over the file size anymore. Instead, the
iterator returns pairs of `u64` for both large and normal file size.
- `PusVersion::VersionNotSupported` contains raw version number instead of `PusVersion` enum now
to make it more flexible.
- `pus_version` API now returns a `Result<PusVersion, u8>` instead of a `PusVersion` to allow
modelling invalid version numbers properly.
- Renamed `CcsdsPacket::total_len` to `CcsdsPacket::packet_len`
- Renamed `SequenceCountProvider` to `SequenceCounter`
- Renamed `SeqCountProviderSimple` to `SequenceCounterSimple`
- Renamed `CcsdsSimpleSeqCountProvider` to `SequenceCounterCcsdsSimple`
- Renamed `SeqCountProviderSync` to `SequenceCounterSync`
- Renamed `PusPacket::opt_crc16` to `PusPacket::checksum`
- Renamed `PacketSequenceCtrl` to `PacketSequenceControl`
- ECSS checksum generation is now optional as specified in the standard. Added `has_checksum`
parameters for ECSS TM/TC creators and readers to reflect this.
- APID is represented by `arbitrary-int::u11` while the sequence count is represented by
`arbitrary-int::u14`. A lot of corresponding checks were removed because the type now ensure
value validity.
- ACK field changed from `u8` to `AckFlags` structure.
- PUS version raw representation is `u4` now.
- SC time reference status representation is `u4` now.
- Renamed `ptype` to `packet_type`
- Renamed `PduHeader::new_no_file_data` to `PduHeader::new_for_file_directive`
- Renamd `FinishedPduCreator::new_generic` to `new` and `new_default` to `new_no_error`
## Removed
- `PusVersion::Invalid`, which will be modelled with `Result<PusVersion, u8>` now.
## Added
- `cfdp::pdu::ack::InvalidAckedDirectiveCodeError` which is returned by the `AckPdu` constructor.
- `cfdp::pdu::nak::NakPduCreatorWithReservedSegReqsBuf` constructor which exposes the segment
request buffer mutably to avoid the need for a separate segment request buffer.
- `SpHeader::packet_len` direct method.
- `AckFlags` which is implemented with `bitbybit::bitfield`
- `ApidOutOfRangeError` and `SequenceCountOutOfRangeError`
- Added PUS A legacy support for telecommands inside the `ecss.tc_pus_a` module
- Added `SequenceCounter::increment_mut` and `SequenceCounter::get_and_increment_mut`
- Implemented `SequenceCounter` for `Atomic` unsigned types and references of them
- `PusPacket::has_checksum` and `WritablePusPacket::has_checksum`
- PUS TC builder API, either via `PusTcBuilder::new`, or `PusTcCreator::builder`
# [v0.15.0] 2025-07-18 # [v0.15.0] 2025-07-18
## Added ## Added
@@ -35,7 +86,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [v0.13.1] 2025-03-21 # [v0.13.1] 2025-03-21
- Bugfix due to operator precendence for `PusTcSecondaryHeader::pus_version`, - Bugfix due to operator precendence for `PusTcSecondaryHeader::pus_version`,
`PusTcSecondaryHeaderWithoutTimestamp::pus_version`, `CdsTime::from_bytes_with_u16_days` and `PusTcSecondaryHeaderWithoutTimestamp::pus_version`, `CdsTime::from_bytes_with_u16_days` and
`CdsTime::from_bytes_with_u24_days` `CdsTime::from_bytes_with_u24_days`
@@ -593,7 +644,8 @@ The timestamp of `PusTm` is now optional. See Added and Changed section for deta
Initial release with CCSDS Space Packet Primary Header implementation and basic PUS TC and TM Initial release with CCSDS Space Packet Primary Header implementation and basic PUS TC and TM
implementations. implementations.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.15.0...HEAD [unreleased]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.16.0...HEAD
[v0.16.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.15.0...v0.16.0
[v0.15.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.14.0...v0.15.0 [v0.15.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.14.0...v0.15.0
[v0.14.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.1...v0.14.0 [v0.14.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.1...v0.14.0
[v0.13.1]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.0...v0.13.1 [v0.13.1]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.0...v0.13.1

View File

@@ -1,8 +1,8 @@
[package] [package]
name = "spacepackets" name = "spacepackets"
version = "0.15.0" version = "0.16.0"
edition = "2021" edition = "2021"
rust-version = "1.70.0" rust-version = "1.83"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"] authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "Generic implementations for various CCSDS and ECSS packet standards" description = "Generic implementations for various CCSDS and ECSS packet standards"
homepage = "https://egit.irs.uni-stuttgart.de/rust/spacepackets" homepage = "https://egit.irs.uni-stuttgart.de/rust/spacepackets"
@@ -21,6 +21,9 @@ thiserror = { version = "2", default-features = false }
num_enum = { version = ">0.5, <=0.7", default-features = false } num_enum = { version = ">0.5, <=0.7", default-features = false }
num-traits = { version = "0.2", default-features = false } num-traits = { version = "0.2", default-features = false }
serde = { version = "1", optional = true, default-features = false, features = ["derive"] } serde = { version = "1", optional = true, default-features = false, features = ["derive"] }
arbitrary-int = { version = "2" }
portable-atomic = "1"
bitbybit = "1.4"
time = { version = "0.3", default-features = false, optional = true } time = { version = "0.3", default-features = false, optional = true }
chrono = { version = "0.4", default-features = false, optional = true } chrono = { version = "0.4", default-features = false, optional = true }
@@ -29,7 +32,8 @@ defmt = { version = "1", default-features = false, optional = true }
[features] [features]
default = ["std"] default = ["std"]
std = ["alloc", "chrono/std", "chrono/clock", "thiserror/std"] std = ["alloc", "chrono/std", "chrono/clock", "thiserror/std"]
serde = ["dep:serde", "chrono?/serde"] defmt = ["dep:defmt", "arbitrary-int/defmt"]
serde = ["dep:serde", "chrono?/serde", "arbitrary-int/serde"]
alloc = ["chrono?/alloc", "defmt?/alloc", "serde?/alloc"] alloc = ["chrono?/alloc", "defmt?/alloc", "serde?/alloc"]
timelib = ["dep:time"] timelib = ["dep:time"]

View File

@@ -1,6 +1,7 @@
[![Crates.io](https://img.shields.io/crates/v/spacepackets)](https://crates.io/crates/spacepackets) [![Crates.io](https://img.shields.io/crates/v/spacepackets)](https://crates.io/crates/spacepackets)
[![docs.rs](https://img.shields.io/docsrs/spacepackets)](https://docs.rs/spacepackets) [![docs.rs](https://img.shields.io/docsrs/spacepackets)](https://docs.rs/spacepackets)
[![ci](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml) [![ci](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml)
[![matrix chat](https://img.shields.io/matrix/sat-rs%3Amatrix.org)](https://matrix.to/#/#sat-rs:matrix.org)
ECSS and CCSDS Spacepackets ECSS and CCSDS Spacepackets
====== ======
@@ -50,16 +51,14 @@ usage examples.
# Coverage # Coverage
Coverage was generated using [`grcov`](https://github.com/mozilla/grcov). If you have not done so Coverage can be generated using [`llvm-cov`](https://github.com/taiki-e/cargo-llvm-cov). If you have not done so
already, install the `llvm-tools-preview`: already, install the tool:
```sh ```sh
rustup component add llvm-tools-preview cargo +stable install cargo-llvm-cov --locked
cargo install grcov --locked
``` ```
After that, you can simply run `coverage.py` to test the project with coverage. You can optionally After this, you can run `cargo llvm-cov nextest` to run all the tests and display coverage.
supply the `--open` flag to open the coverage report in your webbrowser.
# Miri # Miri

View File

@@ -1,54 +0,0 @@
#!/usr/bin/env python3
import os
import logging
import argparse
import webbrowser
_LOGGER = logging.getLogger()
def generate_cov_report(open_report: bool, format: str):
logging.basicConfig(level=logging.INFO)
os.environ["RUSTFLAGS"] = "-Cinstrument-coverage"
os.environ["LLVM_PROFILE_FILE"] = "target/coverage/%p-%m.profraw"
_LOGGER.info("Executing tests with coverage")
os.system("cargo test --all-features")
out_path = "./target/debug/coverage"
if format == "lcov":
out_path = "./target/debug/lcov.info"
os.system(
f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing "
f"-o {out_path}"
)
if format == "lcov":
os.system(
"genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source "
"--legend ./target/debug/lcov.info"
)
if open_report:
coverage_report_path = os.path.abspath("./target/debug/coverage/index.html")
webbrowser.open_new_tab(coverage_report_path)
_LOGGER.info("Done")
def main():
parser = argparse.ArgumentParser(
description="Generate coverage report and optionally open it in a browser"
)
parser.add_argument(
"--open", action="store_true", help="Open the coverage report in a browser"
)
parser.add_argument(
"--format",
choices=["html", "lcov"],
default="html",
help="Choose report format (html or lcov)",
)
args = parser.parse_args()
generate_cov_report(args.open, args.format)
if __name__ == "__main__":
main()

View File

@@ -1,3 +0,0 @@
#!/bin/sh
export RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options"
cargo +nightly doc --all-features --open

31
justfile Normal file
View File

@@ -0,0 +1,31 @@
all: check build test clippy fmt docs coverage
clippy:
cargo clippy -- -D warnings
fmt:
cargo fmt --all -- --check
check:
cargo check --all-features
test:
cargo nextest r --all-features
cargo test --doc
build:
cargo build --all-features
docs:
export RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options"
cargo +nightly doc --all-features
docs-html:
export RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options"
cargo +nightly doc --all-features --open
coverage:
cargo llvm-cov nextest
coverage-html:
cargo llvm-cov nextest --html --open

View File

@@ -63,8 +63,10 @@ pub(crate) fn generic_len_check_deserialization(
} }
impl<'data> Lv<'data> { impl<'data> Lv<'data> {
pub const MIN_LEN: usize = MIN_LV_LEN;
#[inline] #[inline]
pub fn new(data: &[u8]) -> Result<Lv, TlvLvDataTooLargeError> { pub fn new(data: &[u8]) -> Result<Lv<'_>, TlvLvDataTooLargeError> {
if data.len() > u8::MAX as usize { if data.len() > u8::MAX as usize {
return Err(TlvLvDataTooLargeError(data.len())); return Err(TlvLvDataTooLargeError(data.len()));
} }
@@ -86,7 +88,7 @@ impl<'data> Lv<'data> {
/// Helper function to build a string LV. This is especially useful for the file or directory /// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs /// path LVs
#[inline] #[inline]
pub fn new_from_str(str_slice: &str) -> Result<Lv, TlvLvDataTooLargeError> { pub fn new_from_str(str_slice: &str) -> Result<Lv<'_>, TlvLvDataTooLargeError> {
Self::new(str_slice.as_bytes()) Self::new(str_slice.as_bytes())
} }

View File

@@ -13,36 +13,40 @@ pub const CFDP_VERSION_2_NAME: &str = "CCSDS 727.0-B-5";
/// Currently, only this version is supported. /// Currently, only this version is supported.
pub const CFDP_VERSION_2: u8 = 0b001; pub const CFDP_VERSION_2: u8 = 0b001;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum PduType { pub enum PduType {
FileDirective = 0, FileDirective = 0,
FileData = 1, FileData = 1,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum Direction { pub enum Direction {
TowardsReceiver = 0, TowardsReceiver = 0,
TowardsSender = 1, TowardsSender = 1,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum TransmissionMode { pub enum TransmissionMode {
Acknowledged = 0, Acknowledged = 0,
Unacknowledged = 1, Unacknowledged = 1,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum CrcFlag { pub enum CrcFlag {
NoCrc = 0, NoCrc = 0,
@@ -68,9 +72,10 @@ impl From<CrcFlag> for bool {
} }
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75) /// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum SegmentMetadataFlag { pub enum SegmentMetadataFlag {
NotPresent = 0, NotPresent = 0,
@@ -78,18 +83,20 @@ pub enum SegmentMetadataFlag {
} }
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75) /// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum SegmentationControl { pub enum SegmentationControl {
NoRecordBoundaryPreservation = 0, NoRecordBoundaryPreservation = 0,
WithRecordBoundaryPreservation = 1, WithRecordBoundaryPreservation = 1,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u3, exhaustive = false)]
#[repr(u8)] #[repr(u8)]
pub enum FaultHandlerCode { pub enum FaultHandlerCode {
NoticeOfCancellation = 0b0001, NoticeOfCancellation = 0b0001,
@@ -98,9 +105,10 @@ pub enum FaultHandlerCode {
AbandonTransaction = 0b0100, AbandonTransaction = 0b0100,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u4, exhaustive = false)]
#[repr(u8)] #[repr(u8)]
pub enum ConditionCode { pub enum ConditionCode {
/// This is not an error condition for which a faulty handler override can be specified /// This is not an error condition for which a faulty handler override can be specified
@@ -121,9 +129,10 @@ pub enum ConditionCode {
CancelRequestReceived = 0b1111, CancelRequestReceived = 0b1111,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum LargeFileFlag { pub enum LargeFileFlag {
/// 32 bit maximum file size and FSS size /// 32 bit maximum file size and FSS size
@@ -133,9 +142,10 @@ pub enum LargeFileFlag {
} }
/// Transaction status for the ACK PDU field according to chapter 5.2.4 of the CFDP standard. /// Transaction status for the ACK PDU field according to chapter 5.2.4 of the CFDP standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u2, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum TransactionStatus { pub enum TransactionStatus {
/// Transaction is not currently active and the CFDP implementation does not retain a /// Transaction is not currently active and the CFDP implementation does not retain a

View File

@@ -10,6 +10,10 @@ use super::{
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
#[error("invalid directive code of acknowledged PDU")]
pub struct InvalidAckedDirectiveCodeError(pub FileDirectiveType);
/// ACK PDU abstraction. /// ACK PDU abstraction.
/// ///
/// For more information, refer to CFDP chapter 5.2.4. /// For more information, refer to CFDP chapter 5.2.4.
@@ -29,16 +33,13 @@ impl AckPdu {
directive_code_of_acked_pdu: FileDirectiveType, directive_code_of_acked_pdu: FileDirectiveType,
condition_code: ConditionCode, condition_code: ConditionCode,
transaction_status: TransactionStatus, transaction_status: TransactionStatus,
) -> Result<Self, PduError> { ) -> Result<Self, InvalidAckedDirectiveCodeError> {
if directive_code_of_acked_pdu == FileDirectiveType::EofPdu { if directive_code_of_acked_pdu == FileDirectiveType::EofPdu {
pdu_header.pdu_conf.direction = Direction::TowardsSender; pdu_header.pdu_conf.direction = Direction::TowardsSender;
} else if directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu { } else if directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu {
pdu_header.pdu_conf.direction = Direction::TowardsReceiver; pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
} else { } else {
return Err(PduError::InvalidDirectiveType { return Err(InvalidAckedDirectiveCodeError(directive_code_of_acked_pdu));
found: directive_code_of_acked_pdu as u8,
expected: None,
});
} }
// Force correct direction flag. // Force correct direction flag.
let mut ack_pdu = Self { let mut ack_pdu = Self {
@@ -81,22 +82,27 @@ impl AckPdu {
.unwrap() .unwrap()
} }
#[inline]
pub fn pdu_header(&self) -> &PduHeader { pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header &self.pdu_header
} }
#[inline]
pub fn directive_code_of_acked_pdu(&self) -> FileDirectiveType { pub fn directive_code_of_acked_pdu(&self) -> FileDirectiveType {
self.directive_code_of_acked_pdu self.directive_code_of_acked_pdu
} }
#[inline]
pub fn condition_code(&self) -> ConditionCode { pub fn condition_code(&self) -> ConditionCode {
self.condition_code self.condition_code
} }
#[inline]
pub fn transaction_status(&self) -> TransactionStatus { pub fn transaction_status(&self) -> TransactionStatus {
self.transaction_status self.transaction_status
} }
#[inline]
fn calc_pdu_datafield_len(&self) -> usize { fn calc_pdu_datafield_len(&self) -> usize {
if self.crc_flag() == CrcFlag::WithCrc { if self.crc_flag() == CrcFlag::WithCrc {
return 5; return 5;
@@ -140,27 +146,18 @@ impl AckPdu {
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111) let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?; .map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
let transaction_status = TransactionStatus::try_from(buf[current_idx] & 0b11).unwrap(); let transaction_status = TransactionStatus::try_from(buf[current_idx] & 0b11).unwrap();
Self::new( // Unwrap okay, validity of acked directive code was checked.
Ok(Self::new(
pdu_header, pdu_header,
acked_directive_type, acked_directive_type,
condition_code, condition_code,
transaction_status, transaction_status,
) )
} .unwrap())
}
impl CfdpPdu for AckPdu {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
} }
fn file_directive_type(&self) -> Option<FileDirectiveType> { /// Write [Self] to the provided buffer and returns the written size.
Some(FileDirectiveType::AckPdu) pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
}
}
impl WritablePduPacket for AckPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written(); let expected_len = self.len_written();
if buf.len() < expected_len { if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -188,11 +185,33 @@ impl WritablePduPacket for AckPdu {
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len() self.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for AckPdu {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::AckPdu)
}
}
impl WritablePduPacket for AckPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::cfdp::{ use crate::cfdp::{
@@ -225,7 +244,7 @@ mod tests {
#[test] #[test]
fn test_basic() { fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new( let ack_pdu = AckPdu::new(
pdu_header, pdu_header,
FileDirectiveType::FinishedPdu, FileDirectiveType::FinishedPdu,
@@ -245,7 +264,7 @@ mod tests {
transaction_status: TransactionStatus, transaction_status: TransactionStatus,
) { ) {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(pdu_header, condition_code, transaction_status); let ack_pdu = AckPdu::new_for_finished_pdu(pdu_header, condition_code, transaction_status);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let res = ack_pdu.write_to_bytes(&mut buf); let res = ack_pdu.write_to_bytes(&mut buf);
@@ -267,15 +286,53 @@ mod tests {
generic_serialization_test(ConditionCode::NoError, TransactionStatus::Active); generic_serialization_test(ConditionCode::NoError, TransactionStatus::Active);
} }
#[test]
fn test_serialization_too_small() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new(
pdu_header,
FileDirectiveType::FinishedPdu,
ConditionCode::NoError,
TransactionStatus::Active,
)
.expect("creating ACK PDU failed");
if let Err(PduError::ByteConversion(ByteConversionError::ToSliceTooSmall {
found,
expected,
})) = ack_pdu.write_to_bytes(&mut [0; 5])
{
assert_eq!(found, 5);
assert_eq!(expected, ack_pdu.len_written());
} else {
panic!("serialization should have failed");
}
}
#[test] #[test]
fn test_serialization_fs_error() { fn test_serialization_fs_error() {
generic_serialization_test(ConditionCode::FileSizeError, TransactionStatus::Terminated); generic_serialization_test(ConditionCode::FileSizeError, TransactionStatus::Terminated);
} }
#[test]
fn test_invalid_directive_code_of_acked_pdu() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
assert_eq!(
AckPdu::new(
pdu_header,
FileDirectiveType::MetadataPdu,
ConditionCode::NoError,
TransactionStatus::Active,
)
.unwrap_err(),
InvalidAckedDirectiveCodeError(FileDirectiveType::MetadataPdu)
);
}
#[test] #[test]
fn test_deserialization() { fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu( let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,
@@ -290,7 +347,7 @@ mod tests {
#[test] #[test]
fn test_with_crc() { fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu( let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,
@@ -307,7 +364,7 @@ mod tests {
#[test] #[test]
fn test_for_eof_pdu() { fn test_for_eof_pdu() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu( let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,
@@ -324,7 +381,7 @@ mod tests {
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
fn test_ack_pdu_serialization() { fn test_ack_pdu_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu( let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,

View File

@@ -55,18 +55,22 @@ impl EofPdu {
) )
} }
#[inline]
pub fn pdu_header(&self) -> &PduHeader { pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header &self.pdu_header
} }
#[inline]
pub fn condition_code(&self) -> ConditionCode { pub fn condition_code(&self) -> ConditionCode {
self.condition_code self.condition_code
} }
#[inline]
pub fn file_checksum(&self) -> u32 { pub fn file_checksum(&self) -> u32 {
self.file_checksum self.file_checksum
} }
#[inline]
pub fn file_size(&self) -> u64 { pub fn file_size(&self) -> u64 {
self.file_size self.file_size
} }
@@ -129,20 +133,9 @@ impl EofPdu {
fault_location, fault_location,
}) })
} }
}
impl CfdpPdu for EofPdu { /// Write [Self] to the provided buffer and returns the written size.
fn pdu_header(&self) -> &PduHeader { pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::EofPdu)
}
}
impl WritablePduPacket for EofPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written(); let expected_len = self.len_written();
if buf.len() < expected_len { if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -172,11 +165,33 @@ impl WritablePduPacket for EofPdu {
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len() self.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for EofPdu {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::EofPdu)
}
}
impl WritablePduPacket for EofPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -220,7 +235,7 @@ mod tests {
#[test] #[test]
fn test_basic() { fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 4 + 4); assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 4 + 4);
verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Normal); verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Normal);
@@ -229,7 +244,7 @@ mod tests {
#[test] #[test]
fn test_serialization() { fn test_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let res = eof_pdu.write_to_bytes(&mut buf); let res = eof_pdu.write_to_bytes(&mut buf);
@@ -261,7 +276,7 @@ mod tests {
#[test] #[test]
fn test_deserialization() { fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
eof_pdu.write_to_bytes(&mut buf).unwrap(); eof_pdu.write_to_bytes(&mut buf).unwrap();
@@ -276,7 +291,7 @@ mod tests {
#[test] #[test]
fn test_write_to_vec() { fn test_write_to_vec() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap(); let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
@@ -287,7 +302,7 @@ mod tests {
#[test] #[test]
fn test_with_crc() { fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap(); let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
@@ -307,7 +322,7 @@ mod tests {
#[test] #[test]
fn test_with_large_file_flag() { fn test_with_large_file_flag() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Large); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Large);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Large); verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Large);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 8 + 4); assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 8 + 4);
@@ -317,14 +332,14 @@ mod tests {
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
fn test_eof_serde() { fn test_eof_serde() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
generic_serde_test(eof_pdu); generic_serde_test(eof_pdu);
} }
fn generic_test_with_fault_location_and_error(crc: CrcFlag) { fn generic_test_with_fault_location_and_error(crc: CrcFlag) {
let pdu_conf = common_pdu_conf(crc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(crc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new( let eof_pdu = EofPdu::new(
pdu_header, pdu_header,
ConditionCode::FileChecksumFailure, ConditionCode::FileChecksumFailure,

View File

@@ -10,8 +10,10 @@ use serde::{Deserialize, Serialize};
use super::{CfdpPdu, FileDirectiveType, WritablePduPacket}; use super::{CfdpPdu, FileDirectiveType, WritablePduPacket};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u2, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum RecordContinuationState { pub enum RecordContinuationState {
NoStartNoEnd = 0b00, NoStartNoEnd = 0b00,
@@ -43,24 +45,27 @@ impl<'seg_meta> SegmentMetadata<'seg_meta> {
}) })
} }
#[inline]
pub fn record_continuation_state(&self) -> RecordContinuationState { pub fn record_continuation_state(&self) -> RecordContinuationState {
self.record_continuation_state self.record_continuation_state
} }
#[inline]
pub fn metadata(&self) -> Option<&'seg_meta [u8]> { pub fn metadata(&self) -> Option<&'seg_meta [u8]> {
self.metadata self.metadata
} }
pub fn written_len(&self) -> usize { #[inline]
pub fn len_written(&self) -> usize {
// Map empty metadata to 0 and slice to its length. // Map empty metadata to 0 and slice to its length.
1 + self.metadata.map_or(0, |meta| meta.len()) 1 + self.metadata.map_or(0, |meta| meta.len())
} }
pub(crate) fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { pub(crate) fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.written_len() { if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(), found: buf.len(),
expected: self.written_len(), expected: self.len_written(),
}); });
} }
buf[0] = ((self.record_continuation_state as u8) << 6) buf[0] = ((self.record_continuation_state as u8) << 6)
@@ -68,7 +73,7 @@ impl<'seg_meta> SegmentMetadata<'seg_meta> {
if let Some(metadata) = self.metadata { if let Some(metadata) = self.metadata {
buf[1..1 + metadata.len()].copy_from_slice(metadata) buf[1..1 + metadata.len()].copy_from_slice(metadata)
} }
Ok(self.written_len()) Ok(self.len_written())
} }
pub(crate) fn from_bytes(buf: &'seg_meta [u8]) -> Result<Self, ByteConversionError> { pub(crate) fn from_bytes(buf: &'seg_meta [u8]) -> Result<Self, ByteConversionError> {
@@ -102,10 +107,12 @@ struct FdPduBase<'seg_meta> {
} }
impl CfdpPdu for FdPduBase<'_> { impl CfdpPdu for FdPduBase<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader { fn pdu_header(&self) -> &PduHeader {
&self.pdu_header self.pdu_header()
} }
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> { fn file_directive_type(&self) -> Option<FileDirectiveType> {
None None
} }
@@ -118,7 +125,7 @@ impl FdPduBase<'_> {
len += 4; len += 4;
} }
if self.segment_metadata.is_some() { if self.segment_metadata.is_some() {
len += self.segment_metadata.as_ref().unwrap().written_len() len += self.segment_metadata.as_ref().unwrap().len_written()
} }
len += file_data_len as usize; len += file_data_len as usize;
if self.crc_flag() == CrcFlag::WithCrc { if self.crc_flag() == CrcFlag::WithCrc {
@@ -143,6 +150,11 @@ impl FdPduBase<'_> {
)?; )?;
Ok(current_idx) Ok(current_idx)
} }
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
} }
/// File Data PDU abstraction. /// File Data PDU abstraction.
@@ -201,14 +213,22 @@ impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
.calc_pdu_datafield_len(self.file_data.len() as u64) .calc_pdu_datafield_len(self.file_data.len() as u64)
} }
pub fn segment_metadata(&self) -> Option<&SegmentMetadata> { #[inline]
pub fn segment_metadata(&self) -> Option<&SegmentMetadata<'_>> {
self.common.segment_metadata.as_ref() self.common.segment_metadata.as_ref()
} }
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
self.common.pdu_header()
}
#[inline]
pub fn offset(&self) -> u64 { pub fn offset(&self) -> u64 {
self.common.offset self.common.offset
} }
#[inline]
pub fn file_data(&self) -> &'file_data [u8] { pub fn file_data(&self) -> &'file_data [u8] {
self.file_data self.file_data
} }
@@ -221,7 +241,7 @@ impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
let mut segment_metadata = None; let mut segment_metadata = None;
if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present { if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present {
segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?); segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?);
current_idx += segment_metadata.as_ref().unwrap().written_len(); current_idx += segment_metadata.as_ref().unwrap().len_written();
} }
let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]); let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss; current_idx += fss;
@@ -241,19 +261,9 @@ impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
file_data: &buf[current_idx..full_len_without_crc], file_data: &buf[current_idx..full_len_without_crc],
}) })
} }
}
impl CfdpPdu for FileDataPdu<'_, '_> {
fn pdu_header(&self) -> &PduHeader {
&self.common.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> { /// Write [Self] to the provided buffer and returns the written size.
None pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
}
}
impl WritablePduPacket for FileDataPdu<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
if buf.len() < self.len_written() { if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(), found: buf.len(),
@@ -271,10 +281,31 @@ impl WritablePduPacket for FileDataPdu<'_, '_> {
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { pub fn len_written(&self) -> usize {
self.common.pdu_header.header_len() + self.calc_pdu_datafield_len() self.common.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for FileDataPdu<'_, '_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
&self.common.pdu_header
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
impl WritablePduPacket for FileDataPdu<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
/// File Data PDU creator abstraction. /// File Data PDU creator abstraction.
/// ///
@@ -419,8 +450,8 @@ pub fn calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(
segment_metadata: Option<&SegmentMetadata>, segment_metadata: Option<&SegmentMetadata>,
) -> usize { ) -> usize {
let mut subtract = pdu_header.header_len(); let mut subtract = pdu_header.header_len();
if segment_metadata.is_some() { if let Some(segment_metadata) = segment_metadata {
subtract += 1 + segment_metadata.as_ref().unwrap().metadata().unwrap().len(); subtract += 1 + segment_metadata.metadata().unwrap().len();
} }
if pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large { if pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
subtract += 8; subtract += 8;

View File

@@ -50,12 +50,12 @@ pub struct FinishedPduCreator<'fs_responses> {
impl<'fs_responses> FinishedPduCreator<'fs_responses> { impl<'fs_responses> FinishedPduCreator<'fs_responses> {
/// Default finished PDU: No error (no fault location field) and no filestore responses. /// Default finished PDU: No error (no fault location field) and no filestore responses.
pub fn new_default( pub fn new_no_error(
pdu_header: PduHeader, pdu_header: PduHeader,
delivery_code: DeliveryCode, delivery_code: DeliveryCode,
file_status: FileStatus, file_status: FileStatus,
) -> Self { ) -> Self {
Self::new_generic( Self::new(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,
delivery_code, delivery_code,
@@ -72,7 +72,7 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
file_status: FileStatus, file_status: FileStatus,
fault_location: EntityIdTlv, fault_location: EntityIdTlv,
) -> Self { ) -> Self {
Self::new_generic( Self::new(
pdu_header, pdu_header,
condition_code, condition_code,
delivery_code, delivery_code,
@@ -82,7 +82,7 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
) )
} }
pub fn new_generic( pub fn new(
mut pdu_header: PduHeader, mut pdu_header: PduHeader,
condition_code: ConditionCode, condition_code: ConditionCode,
delivery_code: DeliveryCode, delivery_code: DeliveryCode,
@@ -109,23 +109,33 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
finished_pdu finished_pdu
} }
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
#[inline]
pub fn condition_code(&self) -> ConditionCode { pub fn condition_code(&self) -> ConditionCode {
self.condition_code self.condition_code
} }
#[inline]
pub fn delivery_code(&self) -> DeliveryCode { pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code self.delivery_code
} }
#[inline]
pub fn file_status(&self) -> FileStatus { pub fn file_status(&self) -> FileStatus {
self.file_status self.file_status
} }
// If there are no filestore responses, an empty slice will be returned. // If there are no filestore responses, an empty slice will be returned.
#[inline]
pub fn filestore_responses(&self) -> &[FilestoreResponseTlv<'_, '_, '_>] { pub fn filestore_responses(&self) -> &[FilestoreResponseTlv<'_, '_, '_>] {
self.fs_responses self.fs_responses
} }
#[inline]
pub fn fault_location(&self) -> Option<EntityIdTlv> { pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location self.fault_location
} }
@@ -143,20 +153,9 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
} }
datafield_len datafield_len
} }
}
impl CfdpPdu for FinishedPduCreator<'_> { /// Write [Self] to the provided buffer and returns the written size.
fn pdu_header(&self) -> &PduHeader { pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu)
}
}
impl WritablePduPacket for FinishedPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written(); let expected_len = self.len_written();
if buf.len() < expected_len { if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -185,11 +184,33 @@ impl WritablePduPacket for FinishedPduCreator<'_> {
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len() self.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for FinishedPduCreator<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu)
}
}
impl WritablePduPacket for FinishedPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
/// Helper structure to loop through all filestore responses of a read Finished PDU. It should be /// Helper structure to loop through all filestore responses of a read Finished PDU. It should be
/// noted that iterators in Rust are not fallible, but the TLV creation can fail, for example if /// noted that iterators in Rust are not fallible, but the TLV creation can fail, for example if
/// the raw TLV data is invalid for some reason. In that case, the iterator will yield [None] /// the raw TLV data is invalid for some reason. In that case, the iterator will yield [None]
@@ -276,10 +297,12 @@ impl<'buf> FinishedPduReader<'buf> {
}) })
} }
#[inline]
pub fn fs_responses_raw(&self) -> &[u8] { pub fn fs_responses_raw(&self) -> &[u8] {
self.fs_responses_raw self.fs_responses_raw
} }
#[inline]
pub fn fs_responses_iter(&self) -> FilestoreResponseIterator<'_> { pub fn fs_responses_iter(&self) -> FilestoreResponseIterator<'_> {
FilestoreResponseIterator { FilestoreResponseIterator {
responses_buf: self.fs_responses_raw, responses_buf: self.fs_responses_raw,
@@ -287,22 +310,31 @@ impl<'buf> FinishedPduReader<'buf> {
} }
} }
#[inline]
pub fn condition_code(&self) -> ConditionCode { pub fn condition_code(&self) -> ConditionCode {
self.condition_code self.condition_code
} }
#[inline]
pub fn delivery_code(&self) -> DeliveryCode { pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code self.delivery_code
} }
#[inline]
pub fn file_status(&self) -> FileStatus { pub fn file_status(&self) -> FileStatus {
self.file_status self.file_status
} }
#[inline]
pub fn fault_location(&self) -> Option<EntityIdTlv> { pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location self.fault_location
} }
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn parse_tlv_fields( fn parse_tlv_fields(
mut current_idx: usize, mut current_idx: usize,
full_len_without_crc: usize, full_len_without_crc: usize,
@@ -360,10 +392,12 @@ impl<'buf> FinishedPduReader<'buf> {
} }
impl CfdpPdu for FinishedPduReader<'_> { impl CfdpPdu for FinishedPduReader<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader { fn pdu_header(&self) -> &PduHeader {
&self.pdu_header self.pdu_header()
} }
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> { fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu) Some(FileDirectiveType::FinishedPdu)
} }
@@ -406,8 +440,8 @@ mod tests {
delivery_code: DeliveryCode, delivery_code: DeliveryCode,
file_status: FileStatus, file_status: FileStatus,
) -> FinishedPduCreator<'static> { ) -> FinishedPduCreator<'static> {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0); let pdu_header = PduHeader::new_for_file_directive(common_pdu_conf(crc_flag, fss), 0);
FinishedPduCreator::new_default(pdu_header, delivery_code, file_status) FinishedPduCreator::new_no_error(pdu_header, delivery_code, file_status)
} }
#[test] #[test]
@@ -577,8 +611,10 @@ mod tests {
#[test] #[test]
fn test_with_fault_location() { fn test_with_fault_location() {
let pdu_header = let pdu_header = PduHeader::new_for_file_directive(
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0); common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
);
let finished_pdu = FinishedPduCreator::new_with_error( let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header, pdu_header,
ConditionCode::NakLimitReached, ConditionCode::NakLimitReached,
@@ -599,8 +635,10 @@ mod tests {
#[test] #[test]
fn test_deserialization_with_fault_location() { fn test_deserialization_with_fault_location() {
let pdu_header = let pdu_header = PduHeader::new_for_file_directive(
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0); common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
);
let entity_id_tlv = EntityIdTlv::new(TEST_DEST_ID.into()); let entity_id_tlv = EntityIdTlv::new(TEST_DEST_ID.into());
let finished_pdu = FinishedPduCreator::new_with_error( let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header, pdu_header,
@@ -635,9 +673,11 @@ mod tests {
.unwrap(); .unwrap();
let fs_responses = &[fs_response_0, fs_response_1]; let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header = let pdu_header = PduHeader::new_for_file_directive(
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0); common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
let finished_pdu = FinishedPduCreator::new_generic( 0,
);
let finished_pdu = FinishedPduCreator::new(
pdu_header, pdu_header,
ConditionCode::NakLimitReached, ConditionCode::NakLimitReached,
DeliveryCode::Incomplete, DeliveryCode::Incomplete,
@@ -670,9 +710,11 @@ mod tests {
.unwrap(); .unwrap();
let fs_responses = &[fs_response_0, fs_response_1]; let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header = let pdu_header = PduHeader::new_for_file_directive(
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0); common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
let finished_pdu = FinishedPduCreator::new_generic( 0,
);
let finished_pdu = FinishedPduCreator::new(
pdu_header, pdu_header,
ConditionCode::NakLimitReached, ConditionCode::NakLimitReached,
DeliveryCode::Incomplete, DeliveryCode::Incomplete,

View File

@@ -128,18 +128,27 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
pdu pdu
} }
#[inline]
pub fn metadata_params(&self) -> &MetadataGenericParams { pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params &self.metadata_params
} }
#[inline]
pub fn src_file_name(&self) -> Lv<'src_name> { pub fn src_file_name(&self) -> Lv<'src_name> {
self.src_file_name self.src_file_name
} }
#[inline]
pub fn dest_file_name(&self) -> Lv<'dest_name> { pub fn dest_file_name(&self) -> Lv<'dest_name> {
self.dest_file_name self.dest_file_name
} }
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
#[inline]
pub fn options(&self) -> &'opts [u8] { pub fn options(&self) -> &'opts [u8] {
self.options self.options
} }
@@ -169,20 +178,9 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
} }
len len
} }
}
impl CfdpPdu for MetadataPduCreator<'_, '_, '_> { /// Write [Self] to the provided buffer and returns the written size.
fn pdu_header(&self) -> &PduHeader { pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu)
}
}
impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written(); let expected_len = self.len_written();
if buf.len() < expected_len { if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -217,11 +215,33 @@ impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len() self.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for MetadataPduCreator<'_, '_, '_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu)
}
}
impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
/// Helper structure to loop through all options of a metadata PDU. It should be noted that /// Helper structure to loop through all options of a metadata PDU. It should be noted that
/// iterators in Rust are not fallible, but the TLV creation can fail, for example if the raw TLV /// iterators in Rust are not fallible, but the TLV creation can fail, for example if the raw TLV
/// data is invalid for some reason. In that case, the iterator will yield [None] because there /// data is invalid for some reason. In that case, the iterator will yield [None] because there
@@ -330,26 +350,36 @@ impl<'raw> MetadataPduReader<'raw> {
}) })
} }
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
#[inline]
pub fn options(&self) -> &'raw [u8] { pub fn options(&self) -> &'raw [u8] {
self.options self.options
} }
#[inline]
pub fn metadata_params(&self) -> &MetadataGenericParams { pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params &self.metadata_params
} }
pub fn src_file_name(&self) -> Lv { #[inline]
pub fn src_file_name(&self) -> Lv<'_> {
self.src_file_name self.src_file_name
} }
pub fn dest_file_name(&self) -> Lv { #[inline]
pub fn dest_file_name(&self) -> Lv<'_> {
self.dest_file_name self.dest_file_name
} }
} }
impl CfdpPdu for MetadataPduReader<'_> { impl CfdpPdu for MetadataPduReader<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader { fn pdu_header(&self) -> &PduHeader {
&self.pdu_header self.pdu_header()
} }
fn file_directive_type(&self) -> Option<FileDirectiveType> { fn file_directive_type(&self) -> Option<FileDirectiveType> {
@@ -392,7 +422,7 @@ pub mod tests {
Lv<'static>, Lv<'static>,
MetadataPduCreator<'static, 'static, '_>, MetadataPduCreator<'static, 'static, '_>,
) { ) {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0); let pdu_header = PduHeader::new_for_file_directive(common_pdu_conf(crc_flag, fss), 0);
let metadata_params = MetadataGenericParams::new(closure_requested, checksum_type, 0x1010); let metadata_params = MetadataGenericParams::new(closure_requested, checksum_type, 0x1010);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed"); let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename = let dest_filename =

View File

@@ -1,4 +1,6 @@
//! CFDP Packet Data Unit (PDU) support. //! CFDP Packet Data Unit (PDU) support.
use crate::cfdp::pdu::ack::InvalidAckedDirectiveCodeError;
use crate::cfdp::pdu::nak::InvalidStartOrEndOfScopeError;
use crate::cfdp::*; use crate::cfdp::*;
use crate::crc::CRC_CCITT_FALSE; use crate::crc::CRC_CCITT_FALSE;
use crate::util::{UnsignedByteField, UnsignedByteFieldU8, UnsignedEnum}; use crate::util::{UnsignedByteField, UnsignedByteFieldU8, UnsignedEnum};
@@ -57,14 +59,14 @@ pub enum PduError {
expected: FileDirectiveType, expected: FileDirectiveType,
}, },
/// The directive type field contained a value not in the range of permitted values. This can /// The directive type field contained a value not in the range of permitted values. This can
/// also happen if an invalid value is passed to the ACK PDU constructor. /// also happen if an invalid value is passed to the ACK PDU reader.
#[error("invalid directive type, found {found:?}, expected {expected:?}")] #[error("invalid directive type, found {found:?}, expected {expected:?}")]
InvalidDirectiveType { InvalidDirectiveType {
found: u8, found: u8,
expected: Option<FileDirectiveType>, expected: Option<FileDirectiveType>,
}, },
#[error("invalid start or end of scope value for NAK PDU")] #[error("nak pdu: {0}")]
InvalidStartOrEndOfScopeValue, InvalidStartOrEndOfScope(#[from] InvalidStartOrEndOfScopeError),
/// Invalid condition code. Contains the raw detected value. /// Invalid condition code. Contains the raw detected value.
#[error("invalid condition code {0}")] #[error("invalid condition code {0}")]
InvalidConditionCode(u8), InvalidConditionCode(u8),
@@ -85,6 +87,15 @@ pub enum PduError {
TlvLv(#[from] TlvLvError), TlvLv(#[from] TlvLvError),
} }
impl From<InvalidAckedDirectiveCodeError> for PduError {
fn from(value: InvalidAckedDirectiveCodeError) -> Self {
Self::InvalidDirectiveType {
found: value.0 as u8,
expected: None,
}
}
}
pub trait WritablePduPacket { pub trait WritablePduPacket {
fn len_written(&self) -> usize; fn len_written(&self) -> usize;
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError>; fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError>;
@@ -311,6 +322,8 @@ pub struct PduHeader {
} }
impl PduHeader { impl PduHeader {
pub const FIXED_LEN: usize = FIXED_HEADER_LEN;
#[inline] #[inline]
pub fn new_for_file_data( pub fn new_for_file_data(
pdu_conf: CommonPduConfig, pdu_conf: CommonPduConfig,
@@ -337,8 +350,9 @@ impl PduHeader {
SegmentationControl::NoRecordBoundaryPreservation, SegmentationControl::NoRecordBoundaryPreservation,
) )
} }
#[inline] #[inline]
pub fn new_no_file_data(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self { pub fn new_for_file_directive(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
Self::new_generic( Self::new_generic(
PduType::FileDirective, PduType::FileDirective,
pdu_conf, pdu_conf,
@@ -348,6 +362,17 @@ impl PduHeader {
) )
} }
#[inline]
pub fn from_pdu_conf_for_file_directive(pdu_conf: CommonPduConfig) -> Self {
Self::new_generic(
PduType::FileDirective,
pdu_conf,
0,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
)
}
#[inline] #[inline]
pub fn new_generic( pub fn new_generic(
pdu_type: PduType, pdu_type: PduType,
@@ -386,15 +411,14 @@ impl PduHeader {
self.header_len() + self.pdu_datafield_len as usize self.header_len() + self.pdu_datafield_len as usize
} }
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> { pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
// Internal note: There is currently no way to pass a PDU configuration like this, but // The API does not allow passing entity IDs with different sizes, so this should
// this check is still kept for defensive programming. // never happen.
if self.pdu_conf.source_entity_id.size() != self.pdu_conf.dest_entity_id.size() { assert_eq!(
return Err(PduError::SourceDestIdLenMissmatch { self.pdu_conf.source_entity_id.size(),
src_id_len: self.pdu_conf.source_entity_id.size(), self.pdu_conf.dest_entity_id.size(),
dest_id_len: self.pdu_conf.dest_entity_id.size(), "unexpected missmatch of source and destination entity ID length"
}); );
}
if buf.len() if buf.len()
< FIXED_HEADER_LEN < FIXED_HEADER_LEN
+ self.pdu_conf.source_entity_id.size() + self.pdu_conf.source_entity_id.size()
@@ -403,8 +427,7 @@ impl PduHeader {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(), found: buf.len(),
expected: FIXED_HEADER_LEN, expected: FIXED_HEADER_LEN,
} });
.into());
} }
let mut current_idx = 0; let mut current_idx = 0;
buf[current_idx] = (CFDP_VERSION_2 << 5) buf[current_idx] = (CFDP_VERSION_2 << 5)
@@ -565,6 +588,7 @@ impl PduHeader {
&self.pdu_conf &self.pdu_conf
} }
#[inline]
pub fn seg_metadata_flag(&self) -> SegmentMetadataFlag { pub fn seg_metadata_flag(&self) -> SegmentMetadataFlag {
self.seg_metadata_flag self.seg_metadata_flag
} }
@@ -734,7 +758,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
assert_eq!(pdu_header.pdu_type(), PduType::FileDirective); assert_eq!(pdu_header.pdu_type(), PduType::FileDirective);
let common_conf_ref = pdu_header.common_pdu_conf(); let common_conf_ref = pdu_header.common_pdu_conf();
assert_eq!(*common_conf_ref, common_pdu_cfg); assert_eq!(*common_conf_ref, common_pdu_cfg);
@@ -800,7 +824,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -817,7 +841,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -890,7 +914,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -935,7 +959,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -1018,7 +1042,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -1042,7 +1066,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -1074,4 +1098,12 @@ mod tests {
let common_pdu_cfg_1 = common_pdu_cfg_0; let common_pdu_cfg_1 = common_pdu_cfg_0;
assert_eq!(common_pdu_cfg_0, common_pdu_cfg_1); assert_eq!(common_pdu_cfg_0, common_pdu_cfg_1);
} }
#[test]
fn test_ctor_from_pdu_conf() {
assert_eq!(
PduHeader::from_pdu_conf_for_file_directive(CommonPduConfig::default()),
PduHeader::new_for_file_directive(CommonPduConfig::default(), 0)
);
}
} }

File diff suppressed because it is too large Load Diff

View File

@@ -26,6 +26,7 @@ pub trait GenericTlv {
/// Checks whether the type field contains one of the standard types specified in the CFDP /// Checks whether the type field contains one of the standard types specified in the CFDP
/// standard and is part of the [TlvType] enum. /// standard and is part of the [TlvType] enum.
#[inline]
fn is_standard_tlv(&self) -> bool { fn is_standard_tlv(&self) -> bool {
if let TlvTypeField::Standard(_) = self.tlv_type_field() { if let TlvTypeField::Standard(_) = self.tlv_type_field() {
return true; return true;
@@ -34,6 +35,7 @@ pub trait GenericTlv {
} }
/// Returns the standard TLV type if the TLV field is not a custom field /// Returns the standard TLV type if the TLV field is not a custom field
#[inline]
fn tlv_type(&self) -> Option<TlvType> { fn tlv_type(&self) -> Option<TlvType> {
if let TlvTypeField::Standard(tlv_type) = self.tlv_type_field() { if let TlvTypeField::Standard(tlv_type) = self.tlv_type_field() {
Some(tlv_type) Some(tlv_type)
@@ -47,17 +49,20 @@ pub trait ReadableTlv {
fn value(&self) -> &[u8]; fn value(&self) -> &[u8];
/// Checks whether the value field is empty. /// Checks whether the value field is empty.
#[inline]
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.value().is_empty() self.value().is_empty()
} }
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of /// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value] /// [Self::value]
#[inline]
fn len_value(&self) -> usize { fn len_value(&self) -> usize {
self.value().len() self.value().len()
} }
/// Returns the full raw length, including the length byte. /// Returns the full raw length, including the length byte.
#[inline]
fn len_full(&self) -> usize { fn len_full(&self) -> usize {
self.len_value() + 2 self.len_value() + 2
} }
@@ -153,14 +158,19 @@ pub struct Tlv<'data> {
} }
impl<'data> Tlv<'data> { impl<'data> Tlv<'data> {
pub fn new(tlv_type: TlvType, data: &[u8]) -> Result<Tlv, TlvLvDataTooLargeError> { pub const MIN_LEN: usize = MIN_TLV_LEN;
pub fn new(tlv_type: TlvType, data: &[u8]) -> Result<Tlv<'_>, TlvLvDataTooLargeError> {
Ok(Tlv { Ok(Tlv {
tlv_type_field: TlvTypeField::Standard(tlv_type), tlv_type_field: TlvTypeField::Standard(tlv_type),
lv: Lv::new(data)?, lv: Lv::new(data)?,
}) })
} }
pub fn new_with_custom_type(tlv_type: u8, data: &[u8]) -> Result<Tlv, TlvLvDataTooLargeError> { pub fn new_with_custom_type(
tlv_type: u8,
data: &[u8],
) -> Result<Tlv<'_>, TlvLvDataTooLargeError> {
Ok(Tlv { Ok(Tlv {
tlv_type_field: TlvTypeField::Custom(tlv_type), tlv_type_field: TlvTypeField::Custom(tlv_type),
lv: Lv::new(data)?, lv: Lv::new(data)?,
@@ -193,6 +203,7 @@ impl<'data> Tlv<'data> {
/// If the TLV was generated from a raw bytestream using [Self::from_bytes], the raw start /// If the TLV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the TLV can be retrieved with this method. /// of the TLV can be retrieved with this method.
#[inline]
pub fn raw_data(&self) -> Option<&[u8]> { pub fn raw_data(&self) -> Option<&[u8]> {
self.lv.raw_data() self.lv.raw_data()
} }
@@ -226,12 +237,15 @@ impl WritableTlv for Tlv<'_> {
self.lv.write_to_be_bytes_no_len_check(&mut buf[1..]); self.lv.write_to_be_bytes_no_len_check(&mut buf[1..]);
Ok(self.len_full()) Ok(self.len_full())
} }
#[inline]
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.len_full() self.len_full()
} }
} }
impl GenericTlv for Tlv<'_> { impl GenericTlv for Tlv<'_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
self.tlv_type_field self.tlv_type_field
} }
@@ -274,6 +288,19 @@ pub mod alloc_mod {
} }
} }
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.data.len(), MIN_TLV_LEN)?;
buf[0] = self.tlv_type_field.into();
buf[1] = self.data.len() as u8;
buf[2..2 + self.data.len()].copy_from_slice(&self.data);
Ok(self.len_written())
}
#[inline]
fn len_written(&self) -> usize {
self.data.len() + 2
}
pub fn as_tlv(&self) -> Tlv<'_> { pub fn as_tlv(&self) -> Tlv<'_> {
Tlv { Tlv {
tlv_type_field: self.tlv_type_field, tlv_type_field: self.tlv_type_field,
@@ -285,6 +312,7 @@ pub mod alloc_mod {
} }
impl ReadableTlv for TlvOwned { impl ReadableTlv for TlvOwned {
#[inline]
fn value(&self) -> &[u8] { fn value(&self) -> &[u8] {
&self.data &self.data
} }
@@ -292,19 +320,17 @@ pub mod alloc_mod {
impl WritableTlv for TlvOwned { impl WritableTlv for TlvOwned {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.data.len(), MIN_TLV_LEN)?; self.write_to_bytes(buf)
buf[0] = self.tlv_type_field.into();
buf[1] = self.data.len() as u8;
buf[2..2 + self.data.len()].copy_from_slice(&self.data);
Ok(self.len_written())
} }
#[inline]
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.data.len() + 2 self.len_written()
} }
} }
impl GenericTlv for TlvOwned { impl GenericTlv for TlvOwned {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
self.tlv_type_field self.tlv_type_field
} }
@@ -331,6 +357,7 @@ pub struct EntityIdTlv {
} }
impl EntityIdTlv { impl EntityIdTlv {
#[inline]
pub fn new(entity_id: UnsignedByteField) -> Self { pub fn new(entity_id: UnsignedByteField) -> Self {
Self { entity_id } Self { entity_id }
} }
@@ -345,14 +372,17 @@ impl EntityIdTlv {
Ok(()) Ok(())
} }
#[inline]
pub fn entity_id(&self) -> &UnsignedByteField { pub fn entity_id(&self) -> &UnsignedByteField {
&self.entity_id &self.entity_id
} }
#[inline]
pub fn len_value(&self) -> usize { pub fn len_value(&self) -> usize {
self.entity_id.size() self.entity_id.size()
} }
#[inline]
pub fn len_full(&self) -> usize { pub fn len_full(&self) -> usize {
2 + self.entity_id.size() 2 + self.entity_id.size()
} }
@@ -370,16 +400,14 @@ impl EntityIdTlv {
} }
/// Convert to a generic [Tlv], which also erases the programmatic type information. /// Convert to a generic [Tlv], which also erases the programmatic type information.
pub fn to_tlv(self, buf: &mut [u8]) -> Result<Tlv, ByteConversionError> { pub fn to_tlv(self, buf: &mut [u8]) -> Result<Tlv<'_>, ByteConversionError> {
Self::len_check(buf)?; Self::len_check(buf)?;
self.entity_id self.entity_id
.write_to_be_bytes(&mut buf[2..2 + self.entity_id.size()])?; .write_to_be_bytes(&mut buf[2..2 + self.entity_id.size()])?;
// Can't fail. // Can't fail.
Ok(Tlv::new(TlvType::EntityId, &buf[2..2 + self.entity_id.size()]).unwrap()) Ok(Tlv::new(TlvType::EntityId, &buf[2..2 + self.entity_id.size()]).unwrap())
} }
}
impl WritableTlv for EntityIdTlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
Self::len_check(buf)?; Self::len_check(buf)?;
buf[0] = TlvType::EntityId as u8; buf[0] = TlvType::EntityId as u8;
@@ -387,12 +415,25 @@ impl WritableTlv for EntityIdTlv {
Ok(2 + self.entity_id.write_to_be_bytes(&mut buf[2..])?) Ok(2 + self.entity_id.write_to_be_bytes(&mut buf[2..])?)
} }
#[inline]
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.len_full() self.len_full()
} }
} }
impl WritableTlv for EntityIdTlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for EntityIdTlv { impl GenericTlv for EntityIdTlv {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
TlvTypeField::Standard(TlvType::EntityId) TlvTypeField::Standard(TlvType::EntityId)
} }
@@ -437,6 +478,7 @@ impl TryFrom<Tlv<'_>> for EntityIdTlv {
} }
} }
#[inline]
pub fn fs_request_has_second_filename(action_code: FilestoreActionCode) -> bool { pub fn fs_request_has_second_filename(action_code: FilestoreActionCode) -> bool {
if action_code == FilestoreActionCode::RenameFile if action_code == FilestoreActionCode::RenameFile
|| action_code == FilestoreActionCode::AppendFile || action_code == FilestoreActionCode::AppendFile
@@ -459,6 +501,7 @@ struct FilestoreTlvBase<'first_name, 'second_name> {
} }
impl FilestoreTlvBase<'_, '_> { impl FilestoreTlvBase<'_, '_> {
#[inline]
fn base_len_value(&self) -> usize { fn base_len_value(&self) -> usize {
let mut len = 1 + self.first_name.len_full(); let mut len = 1 + self.first_name.len_full();
if let Some(second_name) = self.second_name { if let Some(second_name) = self.second_name {
@@ -568,22 +611,27 @@ impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
}) })
} }
#[inline]
pub fn action_code(&self) -> FilestoreActionCode { pub fn action_code(&self) -> FilestoreActionCode {
self.base.action_code self.base.action_code
} }
#[inline]
pub fn first_name(&self) -> Lv<'first_name> { pub fn first_name(&self) -> Lv<'first_name> {
self.base.first_name self.base.first_name
} }
#[inline]
pub fn second_name(&self) -> Option<Lv<'second_name>> { pub fn second_name(&self) -> Option<Lv<'second_name>> {
self.base.second_name self.base.second_name
} }
#[inline]
pub fn len_value(&self) -> usize { pub fn len_value(&self) -> usize {
self.base.base_len_value() self.base.base_len_value()
} }
#[inline]
pub fn len_full(&self) -> usize { pub fn len_full(&self) -> usize {
2 + self.len_value() 2 + self.len_value()
} }
@@ -622,9 +670,7 @@ impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
}, },
}) })
} }
}
impl WritableTlv for FilestoreRequestTlv<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.len_full() { if buf.len() < self.len_full() {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -650,12 +696,25 @@ impl WritableTlv for FilestoreRequestTlv<'_, '_> {
Ok(current_idx) Ok(current_idx)
} }
#[inline]
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.len_full() self.len_full()
} }
} }
impl WritableTlv for FilestoreRequestTlv<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for FilestoreRequestTlv<'_, '_> { impl GenericTlv for FilestoreRequestTlv<'_, '_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
TlvTypeField::Standard(TlvType::FilestoreRequest) TlvTypeField::Standard(TlvType::FilestoreRequest)
} }
@@ -730,26 +789,32 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
false false
} }
#[inline]
pub fn action_code(&self) -> FilestoreActionCode { pub fn action_code(&self) -> FilestoreActionCode {
self.base.action_code self.base.action_code
} }
#[inline]
pub fn status_code(&self) -> u8 { pub fn status_code(&self) -> u8 {
self.status_code self.status_code
} }
#[inline]
pub fn first_name(&self) -> Lv<'first_name> { pub fn first_name(&self) -> Lv<'first_name> {
self.base.first_name self.base.first_name
} }
#[inline]
pub fn second_name(&self) -> Option<Lv<'second_name>> { pub fn second_name(&self) -> Option<Lv<'second_name>> {
self.base.second_name self.base.second_name
} }
#[inline]
pub fn len_value(&self) -> usize { pub fn len_value(&self) -> usize {
self.base.base_len_value() + self.filestore_message.len_full() self.base.base_len_value() + self.filestore_message.len_full()
} }
#[inline]
pub fn len_full(&self) -> usize { pub fn len_full(&self) -> usize {
2 + self.len_value() 2 + self.len_value()
} }
@@ -807,9 +872,7 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
filestore_message, filestore_message,
}) })
} }
}
impl WritableTlv for FilestoreResponseTlv<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.len_full() { if buf.len() < self.len_full() {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -842,7 +905,19 @@ impl WritableTlv for FilestoreResponseTlv<'_, '_, '_> {
} }
} }
impl WritableTlv for FilestoreResponseTlv<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for FilestoreResponseTlv<'_, '_, '_> { impl GenericTlv for FilestoreResponseTlv<'_, '_, '_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
TlvTypeField::Standard(TlvType::FilestoreResponse) TlvTypeField::Standard(TlvType::FilestoreResponse)
} }

View File

@@ -37,10 +37,12 @@ impl<'data> MsgToUserTlv<'data> {
} }
} }
#[inline]
pub fn is_standard_tlv(&self) -> bool { pub fn is_standard_tlv(&self) -> bool {
true true
} }
#[inline]
pub fn tlv_type(&self) -> Option<TlvType> { pub fn tlv_type(&self) -> Option<TlvType> {
Some(TlvType::MsgToUser) Some(TlvType::MsgToUser)
} }
@@ -83,6 +85,7 @@ impl<'data> MsgToUserTlv<'data> {
Ok(msg_to_user) Ok(msg_to_user)
} }
#[inline]
pub fn to_tlv(&self) -> Tlv<'data> { pub fn to_tlv(&self) -> Tlv<'data> {
self.tlv self.tlv
} }
@@ -91,6 +94,17 @@ impl<'data> MsgToUserTlv<'data> {
pub fn to_owned(&self) -> TlvOwned { pub fn to_owned(&self) -> TlvOwned {
self.tlv.to_owned() self.tlv.to_owned()
} }
#[inline]
fn len_written(&self) -> usize {
self.len_full()
}
delegate!(
to self.tlv {
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
}
);
} }
impl<'a> From<MsgToUserTlv<'a>> for Tlv<'a> { impl<'a> From<MsgToUserTlv<'a>> for Tlv<'a> {
@@ -100,18 +114,18 @@ impl<'a> From<MsgToUserTlv<'a>> for Tlv<'a> {
} }
impl WritableTlv for MsgToUserTlv<'_> { impl WritableTlv for MsgToUserTlv<'_> {
#[inline]
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.len_full() self.len_written()
} }
delegate!( fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
to self.tlv { self.tlv.write_to_bytes(buf)
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>; }
}
);
} }
impl GenericTlv for MsgToUserTlv<'_> { impl GenericTlv for MsgToUserTlv<'_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
self.tlv.tlv_type_field() self.tlv.tlv_type_field()
} }

View File

@@ -9,6 +9,7 @@ use crate::{
}; };
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use alloc::vec::Vec; use alloc::vec::Vec;
use arbitrary_int::u4;
use core::fmt::Debug; use core::fmt::Debug;
use core::mem::size_of; use core::mem::size_of;
use num_enum::{IntoPrimitive, TryFromPrimitive}; use num_enum::{IntoPrimitive, TryFromPrimitive};
@@ -19,13 +20,16 @@ pub mod event;
pub mod hk; pub mod hk;
pub mod scheduling; pub mod scheduling;
pub mod tc; pub mod tc;
pub mod tc_pus_a;
pub mod tm; pub mod tm;
pub mod tm_pus_a;
pub mod verification; pub mod verification;
pub type CrcType = u16; pub type CrcType = u16;
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
#[non_exhaustive] #[non_exhaustive]
pub enum PusServiceId { pub enum PusServiceId {
@@ -72,26 +76,27 @@ pub enum PusServiceId {
} }
/// All PUS versions. Only PUS C is supported by this library. /// All PUS versions. Only PUS C is supported by this library.
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Debug, num_enum::TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u4, exhaustive = false)]
#[repr(u8)]
#[non_exhaustive] #[non_exhaustive]
pub enum PusVersion { pub enum PusVersion {
EsaPus = 0, EsaPus = 0,
PusA = 1, PusA = 1,
PusC = 2, PusC = 2,
Invalid = 0b1111,
} }
impl TryFrom<u8> for PusVersion { impl TryFrom<u4> for PusVersion {
type Error = (); type Error = u4;
fn try_from(value: u8) -> Result<Self, Self::Error> { fn try_from(value: u4) -> Result<Self, Self::Error> {
match value { match value {
x if x == PusVersion::EsaPus as u8 => Ok(PusVersion::EsaPus), x if x == PusVersion::EsaPus.raw_value() => Ok(PusVersion::EsaPus),
x if x == PusVersion::PusA as u8 => Ok(PusVersion::PusA), x if x == PusVersion::PusA.raw_value() => Ok(PusVersion::PusA),
x if x == PusVersion::PusC as u8 => Ok(PusVersion::PusC), x if x == PusVersion::PusC.raw_value() => Ok(PusVersion::PusC),
_ => Err(()), _ => Err(value),
} }
} }
} }
@@ -99,6 +104,7 @@ impl TryFrom<u8> for PusVersion {
/// ECSS Packet Type Codes (PTC)s. /// ECSS Packet Type Codes (PTC)s.
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum PacketTypeCodes { pub enum PacketTypeCodes {
Boolean = 1, Boolean = 1,
@@ -120,6 +126,7 @@ pub type Ptc = PacketTypeCodes;
/// ECSS Packet Field Codes (PFC)s for the unsigned [Ptc]. /// ECSS Packet Field Codes (PFC)s for the unsigned [Ptc].
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum PfcUnsigned { pub enum PfcUnsigned {
OneByte = 4, OneByte = 4,
@@ -137,6 +144,7 @@ pub enum PfcUnsigned {
/// ECSS Packet Field Codes (PFC)s for the real (floating point) [Ptc]. /// ECSS Packet Field Codes (PFC)s for the real (floating point) [Ptc].
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum PfcReal { pub enum PfcReal {
/// 4 octets simple precision format (IEEE) /// 4 octets simple precision format (IEEE)
@@ -154,7 +162,7 @@ pub enum PfcReal {
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PusError { pub enum PusError {
#[error("PUS version {0:?} not supported")] #[error("PUS version {0:?} not supported")]
VersionNotSupported(PusVersion), VersionNotSupported(u4),
#[error("checksum verification for crc16 {0:#06x} failed")] #[error("checksum verification for crc16 {0:#06x} failed")]
ChecksumFailure(u16), ChecksumFailure(u16),
/// CRC16 needs to be calculated first /// CRC16 needs to be calculated first
@@ -167,13 +175,16 @@ pub enum PusError {
/// Generic trait to describe common attributes for both PUS Telecommands (TC) and PUS Telemetry /// Generic trait to describe common attributes for both PUS Telecommands (TC) and PUS Telemetry
/// (TM) packets. All PUS packets are also a special type of [CcsdsPacket]s. /// (TM) packets. All PUS packets are also a special type of [CcsdsPacket]s.
pub trait PusPacket: CcsdsPacket { pub trait PusPacket: CcsdsPacket {
const PUS_VERSION: PusVersion = PusVersion::PusC; fn pus_version(&self) -> Result<PusVersion, u4>;
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8; fn service(&self) -> u8;
fn subservice(&self) -> u8; fn subservice(&self) -> u8;
fn user_data(&self) -> &[u8]; fn user_data(&self) -> &[u8];
fn opt_crc16(&self) -> Option<u16>; /// CRC-16-CCITT checksum.
fn checksum(&self) -> Option<u16>;
/// The presence of the CRC-16-CCITT checksum is optional.
fn has_checksum(&self) -> bool {
self.checksum().is_some()
}
} }
pub(crate) fn crc_from_raw_data(raw_data: &[u8]) -> Result<u16, ByteConversionError> { pub(crate) fn crc_from_raw_data(raw_data: &[u8]) -> Result<u16, ByteConversionError> {
@@ -200,13 +211,18 @@ pub(crate) fn user_data_from_raw(
current_idx: usize, current_idx: usize,
total_len: usize, total_len: usize,
slice: &[u8], slice: &[u8],
has_checksum: bool,
) -> Result<&[u8], ByteConversionError> { ) -> Result<&[u8], ByteConversionError> {
match current_idx { if has_checksum {
_ if current_idx > total_len - 2 => Err(ByteConversionError::FromSliceTooSmall { if current_idx > total_len - 2 {
found: total_len - 2, return Err(ByteConversionError::FromSliceTooSmall {
expected: current_idx, found: total_len - 2,
}), expected: current_idx,
_ => Ok(&slice[current_idx..total_len - 2]), });
}
Ok(&slice[current_idx..total_len - 2])
} else {
Ok(&slice[current_idx..total_len])
} }
} }
@@ -228,7 +244,8 @@ pub fn verify_crc16_ccitt_false_from_raw_to_pus_error_no_table(
.ok_or(PusError::ChecksumFailure(crc16)) .ok_or(PusError::ChecksumFailure(crc16))
} }
pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool { /// Verify the CRC16 of a raw packet.
pub fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
let mut digest = CRC_CCITT_FALSE.digest(); let mut digest = CRC_CCITT_FALSE.digest();
digest.update(raw_data); digest.update(raw_data);
if digest.finalize() == 0 { if digest.finalize() == 0 {
@@ -237,7 +254,8 @@ pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
false false
} }
pub(crate) fn verify_crc16_ccitt_false_from_raw_no_table(raw_data: &[u8]) -> bool { /// Verify the CRC16 of a raw packet, using the table-less implementation.
pub fn verify_crc16_ccitt_false_from_raw_no_table(raw_data: &[u8]) -> bool {
let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest(); let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest();
digest.update(raw_data); digest.update(raw_data);
if digest.finalize() == 0 { if digest.finalize() == 0 {
@@ -246,28 +264,13 @@ pub(crate) fn verify_crc16_ccitt_false_from_raw_no_table(raw_data: &[u8]) -> boo
false false
} }
macro_rules! ccsds_impl {
() => {
delegate!(to self.sp_header {
#[inline]
fn ccsds_version(&self) -> u8;
#[inline]
fn packet_id(&self) -> crate::PacketId;
#[inline]
fn psc(&self) -> crate::PacketSequenceCtrl;
#[inline]
fn data_len(&self) -> u16;
});
}
}
macro_rules! sp_header_impls { macro_rules! sp_header_impls {
() => { () => {
delegate!(to self.sp_header { delegate!(to self.sp_header {
#[inline] #[inline]
pub fn set_apid(&mut self, apid: u16) -> bool; pub fn set_apid(&mut self, apid: u11);
#[inline] #[inline]
pub fn set_seq_count(&mut self, seq_count: u16) -> bool; pub fn set_seq_count(&mut self, seq_count: u14);
#[inline] #[inline]
pub fn set_seq_flags(&mut self, seq_flag: SequenceFlags); pub fn set_seq_flags(&mut self, seq_flag: SequenceFlags);
}); });
@@ -275,7 +278,6 @@ macro_rules! sp_header_impls {
} }
use crate::util::{GenericUnsignedByteField, ToBeBytes, UnsignedEnum}; use crate::util::{GenericUnsignedByteField, ToBeBytes, UnsignedEnum};
pub(crate) use ccsds_impl;
pub(crate) use sp_header_impls; pub(crate) use sp_header_impls;
/// Generic trait for ECSS enumeration which consist of a PFC field denoting their bit length /// Generic trait for ECSS enumeration which consist of a PFC field denoting their bit length
@@ -369,32 +371,44 @@ generic_ecss_enum_typedefs_and_from_impls! {
/// byte representation. This is especially useful for generic abstractions which depend only /// byte representation. This is especially useful for generic abstractions which depend only
/// on the serialization of those packets. /// on the serialization of those packets.
pub trait WritablePusPacket { pub trait WritablePusPacket {
/// The length here also includes the CRC length.
fn len_written(&self) -> usize; fn len_written(&self) -> usize;
/// Writes the packet to the given slice without writing the CRC. /// Checksum generation is enabled for the packet.
/// fn has_checksum(&self) -> bool;
/// The returned size is the written size WITHOUT the CRC.
fn write_to_bytes_no_crc(&self, slice: &mut [u8]) -> Result<usize, PusError>;
/// First uses [Self::write_to_bytes_no_crc] to write the packet to the given slice and then /// Writes the packet to the given slice without writing the CRC checksum.
/// uses the [CRC_CCITT_FALSE] to calculate the CRC and write it to the slice. ///
/// The returned size is the written size WITHOUT the CRC checksum.
/// If the checksum generation is disabled, this function is identical to the APIs which
/// generate a checksum.
fn write_to_bytes_no_checksum(&self, slice: &mut [u8]) -> Result<usize, PusError>;
/// First uses [Self::write_to_bytes_no_checksum] to write the packet to the given slice and
/// then uses the [CRC_CCITT_FALSE] to calculate the CRC and write it to the slice if the
/// packet is configured to include a checksum.
fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> { fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = self.write_to_bytes_no_crc(slice)?; let mut curr_idx = self.write_to_bytes_no_checksum(slice)?;
let mut digest = CRC_CCITT_FALSE.digest(); if self.has_checksum() {
digest.update(&slice[0..curr_idx]); let mut digest = CRC_CCITT_FALSE.digest();
slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes()); digest.update(&slice[0..curr_idx]);
curr_idx += 2; slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
curr_idx += 2;
}
Ok(curr_idx) Ok(curr_idx)
} }
/// First uses [Self::write_to_bytes_no_crc] to write the packet to the given slice and then /// First uses [Self::write_to_bytes_no_checksum] to write the packet to the given slice and then
/// uses the [CRC_CCITT_FALSE_NO_TABLE] to calculate the CRC and write it to the slice. /// uses the [CRC_CCITT_FALSE_NO_TABLE] to calculate the CRC and write it to the slice if
fn write_to_bytes_crc_no_table(&self, slice: &mut [u8]) -> Result<usize, PusError> { /// the paket is configured to include a checksum.
let mut curr_idx = self.write_to_bytes_no_crc(slice)?; fn write_to_bytes_checksum_no_table(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest(); let mut curr_idx = self.write_to_bytes_no_checksum(slice)?;
digest.update(&slice[0..curr_idx]); if self.has_checksum() {
slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes()); let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest();
curr_idx += 2; digest.update(&slice[0..curr_idx]);
slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
curr_idx += 2;
}
Ok(curr_idx) Ok(curr_idx)
} }
@@ -409,6 +423,25 @@ pub trait WritablePusPacket {
} }
} }
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct CreatorConfig {
/// Set the CCSDS data length field on construction.
pub set_ccsds_len: bool,
/// CRC-16-CCITT Checksum is present.
pub has_checksum: bool,
}
impl Default for CreatorConfig {
fn default() -> Self {
Self {
set_ccsds_len: true,
has_checksum: true,
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use alloc::string::ToString; use alloc::string::ToString;
@@ -536,9 +569,10 @@ mod tests {
#[test] #[test]
fn test_pus_error_display() { fn test_pus_error_display() {
let unsupport_version = PusError::VersionNotSupported(super::PusVersion::EsaPus); let unsupport_version =
PusError::VersionNotSupported(super::PusVersion::EsaPus.raw_value());
let write_str = unsupport_version.to_string(); let write_str = unsupport_version.to_string();
assert_eq!(write_str, "PUS version EsaPus not supported") assert_eq!(write_str, "PUS version 0 not supported")
} }
#[test] #[test]
@@ -572,8 +606,8 @@ mod tests {
#[test] #[test]
fn test_pus_error_eq_impl() { fn test_pus_error_eq_impl() {
assert_eq!( assert_eq!(
PusError::VersionNotSupported(PusVersion::EsaPus), PusError::VersionNotSupported(PusVersion::EsaPus.raw_value()),
PusError::VersionNotSupported(PusVersion::EsaPus) PusError::VersionNotSupported(PusVersion::EsaPus.raw_value())
); );
} }

View File

@@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize};
#[derive(Debug, PartialEq, Eq, Copy, Clone, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, PartialEq, Eq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum Subservice { pub enum Subservice {
// Core subservices // Core subservices
@@ -47,6 +48,7 @@ pub enum Subservice {
/// This status applies to sub-schedules and groups as well as specified in ECSS-E-ST-70-41C 8.11.3 /// This status applies to sub-schedules and groups as well as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum SchedStatus { pub enum SchedStatus {
Disabled = 0, Disabled = 0,
@@ -66,6 +68,7 @@ impl From<bool> for SchedStatus {
/// Time window types as specified in ECSS-E-ST-70-41C 8.11.3 /// Time window types as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TimeWindowType { pub enum TimeWindowType {
SelectAll = 0, SelectAll = 0,

File diff suppressed because it is too large Load Diff

1376
src/ecss/tc_pus_a.rs Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1996
src/ecss/tm_pus_a.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum Subservice { pub enum Subservice {
TmAcceptanceSuccess = 1, TmAcceptanceSuccess = 1,

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +1,14 @@
use crate::MAX_SEQ_COUNT; use crate::MAX_SEQ_COUNT;
use arbitrary_int::traits::Integer;
use core::cell::Cell; use core::cell::Cell;
use paste::paste; use paste::paste;
#[cfg(feature = "std")]
pub use stdmod::*;
/// Core trait for objects which can provide a sequence count. /// Core trait for objects which can provide a sequence count.
/// ///
/// The core functions are not mutable on purpose to allow easier usage with /// The core functions are not mutable on purpose to allow easier usage with
/// static structs when using the interior mutability pattern. This can be achieved by using /// static structs when using the interior mutability pattern. This can be achieved by using
/// [Cell], [core::cell::RefCell] or atomic types. /// [Cell], [core::cell::RefCell] or atomic types.
pub trait SequenceCountProvider { pub trait SequenceCounter {
type Raw: Into<u64>; type Raw: Into<u64>;
const MAX_BIT_WIDTH: usize; const MAX_BIT_WIDTH: usize;
@@ -17,16 +16,25 @@ pub trait SequenceCountProvider {
fn increment(&self); fn increment(&self);
fn increment_mut(&mut self) {
self.increment();
}
fn get_and_increment(&self) -> Self::Raw { fn get_and_increment(&self) -> Self::Raw {
let val = self.get(); let val = self.get();
self.increment(); self.increment();
val val
} }
fn get_and_increment_mut(&mut self) -> Self::Raw {
self.get_and_increment()
}
} }
#[derive(Clone)] #[derive(Clone)]
pub struct SeqCountProviderSimple<T: Copy> { pub struct SequenceCounterSimple<T: Copy> {
seq_count: Cell<T>, seq_count: Cell<T>,
// The maximum value
max_val: T, max_val: T,
} }
@@ -34,7 +42,7 @@ macro_rules! impl_for_primitives {
($($ty: ident,)+) => { ($($ty: ident,)+) => {
$( $(
paste! { paste! {
impl SeqCountProviderSimple<$ty> { impl SequenceCounterSimple<$ty> {
pub fn [<new_custom_max_val_ $ty>](max_val: $ty) -> Self { pub fn [<new_custom_max_val_ $ty>](max_val: $ty) -> Self {
Self { Self {
seq_count: Cell::new(0), seq_count: Cell::new(0),
@@ -49,13 +57,13 @@ macro_rules! impl_for_primitives {
} }
} }
impl Default for SeqCountProviderSimple<$ty> { impl Default for SequenceCounterSimple<$ty> {
fn default() -> Self { fn default() -> Self {
Self::[<new_ $ty>]() Self::[<new_ $ty>]()
} }
} }
impl SequenceCountProvider for SeqCountProviderSimple<$ty> { impl SequenceCounter for SequenceCounterSimple<$ty> {
type Raw = $ty; type Raw = $ty;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8; const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
@@ -87,19 +95,19 @@ impl_for_primitives!(u8, u16, u32, u64,);
/// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT]. /// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT].
#[derive(Clone)] #[derive(Clone)]
pub struct CcsdsSimpleSeqCountProvider { pub struct SequenceCounterCcsdsSimple {
provider: SeqCountProviderSimple<u16>, provider: SequenceCounterSimple<u16>,
} }
impl Default for CcsdsSimpleSeqCountProvider { impl Default for SequenceCounterCcsdsSimple {
fn default() -> Self { fn default() -> Self {
Self { Self {
provider: SeqCountProviderSimple::new_custom_max_val_u16(MAX_SEQ_COUNT), provider: SequenceCounterSimple::new_custom_max_val_u16(MAX_SEQ_COUNT.as_u16()),
} }
} }
} }
impl SequenceCountProvider for CcsdsSimpleSeqCountProvider { impl SequenceCounter for SequenceCounterCcsdsSimple {
type Raw = u16; type Raw = u16;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8; const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
delegate::delegate! { delegate::delegate! {
@@ -111,83 +119,202 @@ impl SequenceCountProvider for CcsdsSimpleSeqCountProvider {
} }
} }
#[cfg(feature = "std")] impl SequenceCounter for core::sync::atomic::AtomicU8 {
pub mod stdmod { type Raw = u8;
use super::*;
use std::sync::{Arc, Mutex};
macro_rules! sync_clonable_seq_counter_impl { const MAX_BIT_WIDTH: usize = 8;
($($ty: ident,)+) => {
$(paste! {
/// These sequence counters can be shared between threads and can also be
/// configured to wrap around at specified maximum values. Please note that
/// that the API provided by this class will not panic und [Mutex] lock errors,
/// but it will yield 0 for the getter functions.
#[derive(Clone, Default)]
pub struct [<SeqCountProviderSync $ty:upper>] {
seq_count: Arc<Mutex<$ty>>,
max_val: $ty
}
impl [<SeqCountProviderSync $ty:upper>] { fn get(&self) -> Self::Raw {
pub fn new() -> Self { self.load(core::sync::atomic::Ordering::Relaxed)
Self::new_with_max_val($ty::MAX) }
}
fn increment(&self) {
pub fn new_with_max_val(max_val: $ty) -> Self { self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
Self {
seq_count: Arc::default(),
max_val
}
}
}
impl SequenceCountProvider for [<SeqCountProviderSync $ty:upper>] {
type Raw = $ty;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
fn get(&self) -> $ty {
match self.seq_count.lock() {
Ok(counter) => *counter,
Err(_) => 0
}
}
fn increment(&self) {
self.get_and_increment();
}
fn get_and_increment(&self) -> $ty {
match self.seq_count.lock() {
Ok(mut counter) => {
let val = *counter;
if val == self.max_val {
*counter = 0;
} else {
*counter += 1;
}
val
}
Err(_) => 0,
}
}
}
})+
}
} }
sync_clonable_seq_counter_impl!(u8, u16, u32, u64,);
} }
impl SequenceCounter for core::sync::atomic::AtomicU16 {
type Raw = u16;
const MAX_BIT_WIDTH: usize = 16;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
impl SequenceCounter for core::sync::atomic::AtomicU32 {
type Raw = u32;
const MAX_BIT_WIDTH: usize = 32;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(target_has_atomic = "64")]
impl SequenceCounter for core::sync::atomic::AtomicU64 {
type Raw = u64;
const MAX_BIT_WIDTH: usize = 64;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
impl SequenceCounter for portable_atomic::AtomicU8 {
type Raw = u8;
const MAX_BIT_WIDTH: usize = 8;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
impl SequenceCounter for portable_atomic::AtomicU16 {
type Raw = u16;
const MAX_BIT_WIDTH: usize = 16;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
impl SequenceCounter for portable_atomic::AtomicU32 {
type Raw = u32;
const MAX_BIT_WIDTH: usize = 32;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
impl SequenceCounter for portable_atomic::AtomicU64 {
type Raw = u64;
const MAX_BIT_WIDTH: usize = 64;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
impl<T: SequenceCounter + ?Sized> SequenceCounter for &T {
type Raw = T::Raw;
const MAX_BIT_WIDTH: usize = T::MAX_BIT_WIDTH;
fn get(&self) -> Self::Raw {
(**self).get()
}
fn increment(&self) {
(**self).increment()
}
}
macro_rules! sync_clonable_seq_counter_impl {
($ty: ident) => {
paste::paste! {
/// This can be used if a custom wrap value is required when using a thread-safe
/// atomic based sequence counter.
#[derive(Debug)]
pub struct [<SequenceCounterSyncCustomWrap $ty:upper>] {
seq_count: core::sync::atomic::[<Atomic $ty:upper>],
max_val: $ty,
}
impl [<SequenceCounterSyncCustomWrap $ty:upper>] {
pub fn new(max_val: $ty) -> Self {
Self {
seq_count: core::sync::atomic::[<Atomic $ty:upper>]::new(0),
max_val,
}
}
}
impl SequenceCounter for [<SequenceCounterSyncCustomWrap $ty:upper>] {
type Raw = $ty;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
fn get(&self) -> $ty {
self.seq_count.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.get_and_increment();
}
fn get_and_increment(&self) -> $ty {
self.seq_count.fetch_update(
core::sync::atomic::Ordering::Relaxed,
core::sync::atomic::Ordering::Relaxed,
|cur| {
// compute the next value, wrapping at MAX_VAL
let next = if cur == self.max_val { 0 } else { cur + 1 };
Some(next)
},
).unwrap()
}
}
}
};
}
#[cfg(target_has_atomic = "8")]
sync_clonable_seq_counter_impl!(u8);
#[cfg(target_has_atomic = "16")]
sync_clonable_seq_counter_impl!(u16);
#[cfg(target_has_atomic = "32")]
sync_clonable_seq_counter_impl!(u32);
#[cfg(target_has_atomic = "64")]
sync_clonable_seq_counter_impl!(u64);
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use core::sync::atomic::{AtomicU16, AtomicU32, AtomicU64, AtomicU8};
use crate::seq_count::{ use crate::seq_count::{
CcsdsSimpleSeqCountProvider, SeqCountProviderSimple, SeqCountProviderSyncU8, SequenceCounter, SequenceCounterCcsdsSimple, SequenceCounterSimple,
SequenceCountProvider, SequenceCounterSyncCustomWrapU8,
}; };
use crate::MAX_SEQ_COUNT; use crate::MAX_SEQ_COUNT;
#[test] #[test]
fn test_u8_counter() { fn test_u8_counter() {
let u8_counter = SeqCountProviderSimple::<u8>::default(); let u8_counter = SequenceCounterSimple::<u8>::default();
assert_eq!(u8_counter.get(), 0); assert_eq!(u8_counter.get(), 0);
assert_eq!(u8_counter.get_and_increment(), 0); assert_eq!(u8_counter.get_and_increment(), 0);
assert_eq!(u8_counter.get_and_increment(), 1); assert_eq!(u8_counter.get_and_increment(), 1);
@@ -196,7 +323,7 @@ mod tests {
#[test] #[test]
fn test_u8_counter_overflow() { fn test_u8_counter_overflow() {
let u8_counter = SeqCountProviderSimple::new_u8(); let u8_counter = SequenceCounterSimple::new_u8();
for _ in 0..256 { for _ in 0..256 {
u8_counter.increment(); u8_counter.increment();
} }
@@ -205,7 +332,7 @@ mod tests {
#[test] #[test]
fn test_ccsds_counter() { fn test_ccsds_counter() {
let ccsds_counter = CcsdsSimpleSeqCountProvider::default(); let ccsds_counter = SequenceCounterCcsdsSimple::default();
assert_eq!(ccsds_counter.get(), 0); assert_eq!(ccsds_counter.get(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 0); assert_eq!(ccsds_counter.get_and_increment(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 1); assert_eq!(ccsds_counter.get_and_increment(), 1);
@@ -214,34 +341,94 @@ mod tests {
#[test] #[test]
fn test_ccsds_counter_overflow() { fn test_ccsds_counter_overflow() {
let ccsds_counter = CcsdsSimpleSeqCountProvider::default(); let ccsds_counter = SequenceCounterCcsdsSimple::default();
for _ in 0..MAX_SEQ_COUNT + 1 { for _ in 0..MAX_SEQ_COUNT.value() + 1 {
ccsds_counter.increment(); ccsds_counter.increment();
} }
assert_eq!(ccsds_counter.get(), 0); assert_eq!(ccsds_counter.get(), 0);
} }
#[test] fn common_counter_test(seq_counter: &mut impl SequenceCounter) {
fn test_atomic_ref_counters() { assert_eq!(seq_counter.get().into(), 0);
let sync_u8_counter = SeqCountProviderSyncU8::new(); assert_eq!(seq_counter.get_and_increment().into(), 0);
assert_eq!(sync_u8_counter.get(), 0); assert_eq!(seq_counter.get_and_increment().into(), 1);
assert_eq!(sync_u8_counter.get_and_increment(), 0); assert_eq!(seq_counter.get().into(), 2);
assert_eq!(sync_u8_counter.get_and_increment(), 1); seq_counter.increment_mut();
assert_eq!(sync_u8_counter.get(), 2); assert_eq!(seq_counter.get().into(), 3);
assert_eq!(seq_counter.get_and_increment_mut().into(), 3);
assert_eq!(seq_counter.get().into(), 4);
} }
#[test] #[test]
fn test_atomic_ref_counters_overflow() { fn test_atomic_counter_u8() {
let sync_u8_counter = SeqCountProviderSyncU8::new(); let mut sync_u8_counter = AtomicU8::new(0);
common_counter_test(&mut sync_u8_counter);
}
#[test]
fn test_atomic_counter_u16() {
let mut sync_u16_counter = AtomicU16::new(0);
common_counter_test(&mut sync_u16_counter);
}
#[test]
fn test_atomic_counter_u32() {
let mut sync_u32_counter = AtomicU32::new(0);
common_counter_test(&mut sync_u32_counter);
}
#[test]
fn test_atomic_counter_u64() {
let mut sync_u64_counter = AtomicU64::new(0);
common_counter_test(&mut sync_u64_counter);
}
#[test]
fn test_portable_atomic_counter_u8() {
let mut sync_u8_counter = portable_atomic::AtomicU8::new(0);
common_counter_test(&mut sync_u8_counter);
}
#[test]
fn test_portable_atomic_counter_u16() {
let mut sync_u16_counter = portable_atomic::AtomicU16::new(0);
common_counter_test(&mut sync_u16_counter);
}
#[test]
fn test_portable_atomic_counter_u32() {
let mut sync_u32_counter = portable_atomic::AtomicU32::new(0);
common_counter_test(&mut sync_u32_counter);
}
#[test]
fn test_portable_atomic_counter_u64() {
let mut sync_u64_counter = portable_atomic::AtomicU64::new(0);
common_counter_test(&mut sync_u64_counter);
}
fn common_overflow_test_u8(seq_counter: &impl SequenceCounter) {
for _ in 0..u8::MAX as u16 + 1 { for _ in 0..u8::MAX as u16 + 1 {
sync_u8_counter.increment(); seq_counter.increment();
} }
assert_eq!(sync_u8_counter.get(), 0); assert_eq!(seq_counter.get().into(), 0);
}
#[test]
fn test_atomic_u8_counter_overflow() {
let sync_u8_counter = AtomicU8::new(0);
common_overflow_test_u8(&sync_u8_counter);
}
#[test]
fn test_portable_atomic_u8_counter_overflow() {
let sync_u8_counter = portable_atomic::AtomicU8::new(0);
common_overflow_test_u8(&sync_u8_counter);
} }
#[test] #[test]
fn test_atomic_ref_counters_overflow_custom_max_val() { fn test_atomic_ref_counters_overflow_custom_max_val() {
let sync_u8_counter = SeqCountProviderSyncU8::new_with_max_val(128); let sync_u8_counter = SequenceCounterSyncCustomWrapU8::new(128);
for _ in 0..129 { for _ in 0..129 {
sync_u8_counter.increment(); sync_u8_counter.increment();
} }

View File

@@ -82,6 +82,7 @@ pub enum LengthOfDaySegment {
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum SubmillisPrecision { pub enum SubmillisPrecision {
Absent = 0b00, Absent = 0b00,
Microseconds = 0b01, Microseconds = 0b01,
@@ -641,14 +642,12 @@ impl<ProvidesDaysLen: ProvidesDaysLength> CdsTime<ProvidesDaysLen> {
self.unix_time = UnixTime::new(unix_days_seconds, subsec_nanos); self.unix_time = UnixTime::new(unix_days_seconds, subsec_nanos);
} }
fn length_check(&self, buf: &[u8], len_as_bytes: usize) -> Result<(), TimestampError> { fn length_check(&self, buf: &[u8], len_as_bytes: usize) -> Result<(), ByteConversionError> {
if buf.len() < len_as_bytes { if buf.len() < len_as_bytes {
return Err(TimestampError::ByteConversion( return Err(ByteConversionError::ToSliceTooSmall {
ByteConversionError::ToSliceTooSmall { expected: len_as_bytes,
expected: len_as_bytes, found: buf.len(),
found: buf.len(), });
},
));
} }
Ok(()) Ok(())
} }
@@ -956,6 +955,23 @@ impl CdsTime<DaysLen16Bits> {
Self::from_now_generic_ps_prec(LengthOfDaySegment::Short16Bits) Self::from_now_generic_ps_prec(LengthOfDaySegment::Short16Bits)
} }
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.length_check(buf, self.len_as_bytes())?;
buf[0] = self.pfield;
buf[1..3].copy_from_slice(self.ccsds_days.to_be_bytes().as_slice());
buf[3..7].copy_from_slice(self.ms_of_day.to_be_bytes().as_slice());
match self.submillis_precision() {
SubmillisPrecision::Microseconds => {
buf[7..9].copy_from_slice((self.submillis() as u16).to_be_bytes().as_slice());
}
SubmillisPrecision::Picoseconds => {
buf[7..11].copy_from_slice(self.submillis().to_be_bytes().as_slice());
}
_ => (),
}
Ok(self.len_as_bytes())
}
pub fn from_bytes_with_u16_days(buf: &[u8]) -> Result<Self, TimestampError> { pub fn from_bytes_with_u16_days(buf: &[u8]) -> Result<Self, TimestampError> {
let submillis_precision = let submillis_precision =
Self::generic_raw_read_checks(buf, LengthOfDaySegment::Short16Bits)?; Self::generic_raw_read_checks(buf, LengthOfDaySegment::Short16Bits)?;
@@ -1211,20 +1227,7 @@ impl TimeReader for CdsTime<DaysLen24Bits> {
impl TimeWriter for CdsTime<DaysLen16Bits> { impl TimeWriter for CdsTime<DaysLen16Bits> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, TimestampError> { fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, TimestampError> {
self.length_check(buf, self.len_as_bytes())?; self.write_to_bytes(buf).map_err(TimestampError::from)
buf[0] = self.pfield;
buf[1..3].copy_from_slice(self.ccsds_days.to_be_bytes().as_slice());
buf[3..7].copy_from_slice(self.ms_of_day.to_be_bytes().as_slice());
match self.submillis_precision() {
SubmillisPrecision::Microseconds => {
buf[7..9].copy_from_slice((self.submillis() as u16).to_be_bytes().as_slice());
}
SubmillisPrecision::Picoseconds => {
buf[7..11].copy_from_slice(self.submillis().to_be_bytes().as_slice());
}
_ => (),
}
Ok(self.len_as_bytes())
} }
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
@@ -1232,8 +1235,8 @@ impl TimeWriter for CdsTime<DaysLen16Bits> {
} }
} }
impl TimeWriter for CdsTime<DaysLen24Bits> { impl CdsTime<DaysLen24Bits> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, TimestampError> { pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.length_check(buf, self.len_as_bytes())?; self.length_check(buf, self.len_as_bytes())?;
buf[0] = self.pfield; buf[0] = self.pfield;
let be_days = self.ccsds_days.to_be_bytes(); let be_days = self.ccsds_days.to_be_bytes();
@@ -1250,6 +1253,12 @@ impl TimeWriter for CdsTime<DaysLen24Bits> {
} }
Ok(self.len_as_bytes()) Ok(self.len_as_bytes())
} }
}
impl TimeWriter for CdsTime<DaysLen24Bits> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, TimestampError> {
self.write_to_bytes(buf).map_err(TimestampError::from)
}
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.len_as_bytes() self.len_as_bytes()
@@ -1330,7 +1339,7 @@ mod tests {
use super::*; use super::*;
use crate::time::TimestampError::{ByteConversion, InvalidTimeCode}; use crate::time::TimestampError::{ByteConversion, InvalidTimeCode};
use crate::time::{UnixTime, DAYS_CCSDS_TO_UNIX, MS_PER_DAY}; use crate::time::{UnixTime, DAYS_CCSDS_TO_UNIX, MS_PER_DAY};
use crate::ByteConversionError::{FromSliceTooSmall, ToSliceTooSmall}; use crate::ByteConversionError::FromSliceTooSmall;
use alloc::string::ToString; use alloc::string::ToString;
use chrono::{Datelike, NaiveDate, Timelike}; use chrono::{Datelike, NaiveDate, Timelike};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
@@ -1485,12 +1494,14 @@ mod tests {
assert!(res.is_err()); assert!(res.is_err());
let error = res.unwrap_err(); let error = res.unwrap_err();
match error { match error {
ByteConversion(ToSliceTooSmall { found, expected }) => { ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, i); assert_eq!(found, i);
assert_eq!(expected, 7); assert_eq!(expected, 7);
assert_eq!( assert_eq!(
error.to_string(), error.to_string(),
format!("time stamp: target slice with size {i} is too small, expected size of at least 7") format!(
"target slice with size {i} is too small, expected size of at least 7"
)
); );
} }
_ => panic!( _ => panic!(
@@ -1536,10 +1547,7 @@ mod tests {
if let InvalidTimeCode { expected, found } = err { if let InvalidTimeCode { expected, found } = err {
assert_eq!(expected, CcsdsTimeCode::Cds); assert_eq!(expected, CcsdsTimeCode::Cds);
assert_eq!(found, 0); assert_eq!(found, 0);
assert_eq!( assert_eq!(err.to_string(), "invalid time code, expected Cds, found 0");
err.to_string(),
"invalid raw time code value 0 for time code Cds"
);
} }
} }
@@ -2300,7 +2308,7 @@ mod tests {
fn test_serialization() { fn test_serialization() {
let stamp_now = CdsTime::now_with_u16_days().expect("Error retrieving time"); let stamp_now = CdsTime::now_with_u16_days().expect("Error retrieving time");
let val = to_allocvec(&stamp_now).expect("Serializing timestamp failed"); let val = to_allocvec(&stamp_now).expect("Serializing timestamp failed");
assert!(val.len() > 0); assert!(!val.is_empty());
let stamp_deser: CdsTime = from_bytes(&val).expect("Stamp deserialization failed"); let stamp_deser: CdsTime = from_bytes(&val).expect("Stamp deserialization failed");
assert_eq!(stamp_deser, stamp_now); assert_eq!(stamp_deser, stamp_now);
} }

View File

@@ -6,7 +6,7 @@
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use core::fmt::{Debug, Display, Formatter}; use core::fmt::Debug;
use core::ops::{Add, AddAssign}; use core::ops::{Add, AddAssign};
use core::time::Duration; use core::time::Duration;
@@ -20,8 +20,6 @@ use super::{
TimestampError, UnixTime, TimestampError, UnixTime,
}; };
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")]
use std::time::SystemTime; use std::time::SystemTime;
#[cfg(feature = "chrono")] #[cfg(feature = "chrono")]
@@ -103,63 +101,24 @@ pub fn fractional_part_from_subsec_ns(res: FractionalResolution, ns: u64) -> Fra
} }
} }
#[derive(Copy, Clone, PartialEq, Eq, Debug)] #[derive(Copy, Clone, PartialEq, Eq, Debug, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum CucError { pub enum CucError {
#[error("invalid cuc counter byte width {0}")]
InvalidCounterWidth(u8), InvalidCounterWidth(u8),
/// Invalid counter supplied. /// Invalid counter supplied.
InvalidCounter { #[error("invalid cuc counter {counter} for width {width}")]
width: u8, InvalidCounter { width: u8, counter: u64 },
counter: u64, #[error("invalid cuc fractional part {value} for resolution {resolution:?}")]
},
InvalidFractions { InvalidFractions {
resolution: FractionalResolution, resolution: FractionalResolution,
value: u64, value: u64,
}, },
#[error("error while correcting for leap seconds")]
LeapSecondCorrectionError, LeapSecondCorrectionError,
DateBeforeCcsdsEpoch(DateBeforeCcsdsEpochError), #[error("date before ccsds epoch: {0}")]
} DateBeforeCcsdsEpoch(#[from] DateBeforeCcsdsEpochError),
impl Display for CucError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
CucError::InvalidCounterWidth(w) => {
write!(f, "invalid cuc counter byte width {w}")
}
CucError::InvalidCounter { width, counter } => {
write!(f, "invalid cuc counter {counter} for width {width}")
}
CucError::InvalidFractions { resolution, value } => {
write!(
f,
"invalid cuc fractional part {value} for resolution {resolution:?}"
)
}
CucError::LeapSecondCorrectionError => {
write!(f, "error while correcting for leap seconds")
}
CucError::DateBeforeCcsdsEpoch(e) => {
write!(f, "date before ccsds epoch: {e}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for CucError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
CucError::DateBeforeCcsdsEpoch(e) => Some(e),
_ => None,
}
}
}
impl From<DateBeforeCcsdsEpochError> for CucError {
fn from(e: DateBeforeCcsdsEpochError) -> Self {
Self::DateBeforeCcsdsEpoch(e)
}
} }
/// Tuple object where the first value is the width of the counter and the second value /// Tuple object where the first value is the width of the counter and the second value

View File

@@ -3,7 +3,6 @@ use crate::ByteConversionError;
#[cfg(feature = "chrono")] #[cfg(feature = "chrono")]
use chrono::{TimeZone, Utc}; use chrono::{TimeZone, Utc};
use core::cmp::Ordering; use core::cmp::Ordering;
use core::fmt::{Display, Formatter};
use core::ops::{Add, AddAssign, Sub}; use core::ops::{Add, AddAssign, Sub};
use core::time::Duration; use core::time::Duration;
@@ -14,8 +13,6 @@ use num_traits::float::FloatCore;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::time::{SystemTime, SystemTimeError}; use std::time::{SystemTime, SystemTimeError};
#[cfg(feature = "std")] #[cfg(feature = "std")]
@@ -69,67 +66,23 @@ pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCode, u8> {
#[error("date before ccsds epoch: {0:?}")] #[error("date before ccsds epoch: {0:?}")]
pub struct DateBeforeCcsdsEpochError(UnixTime); pub struct DateBeforeCcsdsEpochError(UnixTime);
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive] #[non_exhaustive]
pub enum TimestampError { pub enum TimestampError {
#[error("invalid time code, expected {expected:?}, found {found}")]
InvalidTimeCode { expected: CcsdsTimeCode, found: u8 }, InvalidTimeCode { expected: CcsdsTimeCode, found: u8 },
ByteConversion(ByteConversionError), #[error("time stamp: byte conversion error: {0}")]
Cds(cds::CdsError), ByteConversion(#[from] ByteConversionError),
Cuc(cuc::CucError), #[error("CDS error: {0}")]
Cds(#[from] cds::CdsError),
#[error("CUC error: {0}")]
Cuc(#[from] cuc::CucError),
#[error("custom epoch not supported")]
CustomEpochNotSupported, CustomEpochNotSupported,
} }
impl Display for TimestampError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TimestampError::InvalidTimeCode { expected, found } => {
write!(
f,
"invalid raw time code value {found} for time code {expected:?}"
)
}
TimestampError::Cds(e) => {
write!(f, "cds error: {e}")
}
TimestampError::Cuc(e) => {
write!(f, "cuc error: {e}")
}
TimestampError::ByteConversion(e) => {
write!(f, "time stamp: {e}")
}
TimestampError::CustomEpochNotSupported => {
write!(f, "custom epochs are not supported")
}
}
}
}
#[cfg(feature = "std")]
impl Error for TimestampError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TimestampError::ByteConversion(e) => Some(e),
TimestampError::Cds(e) => Some(e),
TimestampError::Cuc(e) => Some(e),
_ => None,
}
}
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::Cds(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::Cuc(e)
}
}
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub mod std_mod { pub mod std_mod {
use crate::time::TimestampError; use crate::time::TimestampError;
@@ -735,7 +688,7 @@ mod tests {
fn test_cuc_error_printout() { fn test_cuc_error_printout() {
let cuc_error = CucError::InvalidCounterWidth(12); let cuc_error = CucError::InvalidCounterWidth(12);
let stamp_error = TimestampError::from(cuc_error); let stamp_error = TimestampError::from(cuc_error);
assert_eq!(stamp_error.to_string(), format!("cuc error: {cuc_error}")); assert_eq!(stamp_error.to_string(), format!("CUC error: {cuc_error}"));
} }
#[test] #[test]

948
src/uslp/mod.rs Normal file
View File

@@ -0,0 +1,948 @@
/// # Support of the CCSDS Unified Space Data Link Protocol (USLP)
use crate::{crc::CRC_CCITT_FALSE, ByteConversionError};
/// Only this version is supported by the library
pub const USLP_VERSION_NUMBER: u8 = 0b1100;
/// Identifies the association of the data contained in the transfer frame.
#[derive(Debug, PartialEq, Eq, num_enum::TryFromPrimitive, num_enum::IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)]
pub enum SourceOrDestField {
/// SCID refers to the source of the transfer frame.
Source = 0,
/// SCID refers to the destination of the transfer frame.
Dest = 1,
}
#[derive(Debug, PartialEq, Eq, num_enum::TryFromPrimitive, num_enum::IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)]
pub enum BypassSequenceControlFlag {
/// Acceptance of this frame on the receiving end is subject to normal frame acceptance
/// checks of FARM.
SequenceControlledQoS = 0,
/// Frame Acceptance Checks of FARM by the receiving end shall be bypassed.
ExpeditedQoS = 1,
}
#[derive(
Debug, Copy, Clone, PartialEq, Eq, num_enum::TryFromPrimitive, num_enum::IntoPrimitive,
)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum ProtocolControlCommandFlag {
TfdfContainsUserData = 0,
TfdfContainsProtocolInfo = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
pub enum UslpError {
#[error("byte conversion error: {0}")]
ByteConversion(#[from] ByteConversionError),
#[error("header is truncated, which is not supported")]
HeaderIsTruncated,
#[error("invalid protocol id: {0}")]
InvalidProtocolId(u8),
#[error("invalid construction rule: {0}")]
InvalidConstructionRule(u8),
#[error("invalid version number: {0}")]
InvalidVersionNumber(u8),
#[error("invalid virtual channel ID: {0}")]
InvalidVcId(u8),
#[error("invalid MAP ID: {0}")]
InvalidMapId(u8),
#[error("checksum failure")]
ChecksumFailure(u16),
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct InvalidValueForLen {
value: u64,
len: u8,
}
#[derive(Debug, Copy, Clone, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct PrimaryHeader {
pub spacecraft_id: u16,
pub source_or_dest_field: SourceOrDestField,
pub vc_id: u8,
pub map_id: u8,
frame_len_field: u16,
pub sequence_control_flag: BypassSequenceControlFlag,
pub protocol_control_command_flag: ProtocolControlCommandFlag,
pub ocf_flag: bool,
vc_frame_count_len: u8,
vc_frame_count: u64,
}
impl PrimaryHeader {
pub fn new(
spacecraft_id: u16,
source_or_dest_field: SourceOrDestField,
vc_id: u8,
map_id: u8,
frame_len: u16,
) -> Result<Self, UslpError> {
if vc_id > 0b111111 {
return Err(UslpError::InvalidVcId(vc_id));
}
if map_id > 0b1111 {
return Err(UslpError::InvalidMapId(map_id));
}
Ok(Self {
spacecraft_id,
source_or_dest_field,
vc_id,
map_id,
frame_len_field: frame_len.saturating_sub(1),
sequence_control_flag: BypassSequenceControlFlag::SequenceControlledQoS,
protocol_control_command_flag: ProtocolControlCommandFlag::TfdfContainsUserData,
ocf_flag: false,
vc_frame_count_len: 0,
vc_frame_count: 0,
})
}
pub fn set_vc_frame_count(
&mut self,
count_len: u8,
count: u64,
) -> Result<(), InvalidValueForLen> {
if count > 2_u64.pow(count_len as u32 * 8) - 1 {
return Err(InvalidValueForLen {
value: count,
len: count_len,
});
}
self.vc_frame_count_len = count_len;
self.vc_frame_count = count;
Ok(())
}
#[inline]
pub fn vc_frame_count(&self) -> u64 {
self.vc_frame_count
}
#[inline]
pub fn vc_frame_count_len(&self) -> u8 {
self.vc_frame_count_len
}
pub fn from_bytes(buf: &[u8]) -> Result<Self, UslpError> {
if buf.len() < 4 {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: 4,
}
.into());
}
// Can only deal with regular frames for now.
if (buf[3] & 0b1) == 1 {
return Err(UslpError::HeaderIsTruncated);
}
// We could check this above, but this is a better error for the case where the user
// tries to read a truncated frame.
if buf.len() < 7 {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: 7,
}
.into());
}
let version_number = (buf[0] >> 4) & 0b1111;
if version_number != USLP_VERSION_NUMBER {
return Err(UslpError::InvalidVersionNumber(version_number));
}
let source_or_dest_field = match (buf[2] >> 3) & 1 {
0 => SourceOrDestField::Source,
1 => SourceOrDestField::Dest,
_ => unreachable!(),
};
let vc_frame_count_len = buf[6] & 0b111;
if buf.len() < 7 + vc_frame_count_len as usize {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: 7 + vc_frame_count_len as usize,
}
.into());
}
let vc_frame_count = match vc_frame_count_len {
1 => buf[7] as u64,
2 => u16::from_be_bytes(buf[7..9].try_into().unwrap()) as u64,
4 => u32::from_be_bytes(buf[7..11].try_into().unwrap()) as u64,
len => {
let mut vcf_count = 0u64;
let mut end = len;
for byte in buf[7..7 + len as usize].iter() {
vcf_count |= (*byte as u64) << ((end - 1) * 8);
end -= 1;
}
vcf_count
}
};
Ok(Self {
spacecraft_id: (((buf[0] as u16) & 0b1111) << 12)
| ((buf[1] as u16) << 4)
| ((buf[2] as u16) >> 4) & 0b1111,
source_or_dest_field,
vc_id: ((buf[2] & 0b111) << 3) | (buf[3] >> 5) & 0b111,
map_id: (buf[3] >> 1) & 0b1111,
frame_len_field: ((buf[4] as u16) << 8) | buf[5] as u16,
sequence_control_flag: ((buf[6] >> 7) & 0b1).try_into().unwrap(),
protocol_control_command_flag: ((buf[6] >> 6) & 0b1).try_into().unwrap(),
ocf_flag: ((buf[6] >> 3) & 0b1) != 0,
vc_frame_count_len,
vc_frame_count,
})
}
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.len_header() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_header(),
});
}
buf[0] = (USLP_VERSION_NUMBER << 4) | ((self.spacecraft_id >> 12) as u8) & 0b1111;
buf[1] = (self.spacecraft_id >> 4) as u8;
buf[2] = (((self.spacecraft_id & 0b1111) as u8) << 4)
| ((self.source_or_dest_field as u8) << 3)
| (self.vc_id >> 3) & 0b111;
buf[3] = ((self.vc_id & 0b111) << 5) | (self.map_id << 1);
buf[4..6].copy_from_slice(&self.frame_len_field.to_be_bytes());
buf[6] = ((self.sequence_control_flag as u8) << 7)
| ((self.protocol_control_command_flag as u8) << 6)
| ((self.ocf_flag as u8) << 3)
| self.vc_frame_count_len;
let mut packet_idx = 7;
for idx in (0..self.vc_frame_count_len).rev() {
buf[packet_idx] = ((self.vc_frame_count >> (idx * 8)) & 0xff) as u8;
packet_idx += 1;
}
Ok(self.len_header())
}
#[inline(always)]
pub fn set_frame_len(&mut self, frame_len: usize) {
// 4.1.2.7.2
// The field contains a length count C that equals one fewer than the total octets
// in the transfer frame.
self.frame_len_field = frame_len.saturating_sub(1) as u16;
}
#[inline(always)]
pub fn len_header(&self) -> usize {
7 + self.vc_frame_count_len as usize
}
#[inline(always)]
pub fn len_frame(&self) -> usize {
// 4.1.2.7.2
// The field contains a length count C that equals one fewer than the total octets
// in the transfer frame.
self.frame_len_field as usize + 1
}
}
/// Custom implementation which skips the check whether the VC frame count length field is equal.
/// Only the actual VC count value is compared.
impl PartialEq for PrimaryHeader {
fn eq(&self, other: &Self) -> bool {
self.spacecraft_id == other.spacecraft_id
&& self.source_or_dest_field == other.source_or_dest_field
&& self.vc_id == other.vc_id
&& self.map_id == other.map_id
&& self.frame_len_field == other.frame_len_field
&& self.sequence_control_flag == other.sequence_control_flag
&& self.protocol_control_command_flag == other.protocol_control_command_flag
&& self.ocf_flag == other.ocf_flag
&& self.vc_frame_count == other.vc_frame_count
}
}
#[derive(Debug, PartialEq, Eq, num_enum::TryFromPrimitive, num_enum::IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u5, exhaustive = false)]
#[repr(u8)]
#[non_exhaustive]
pub enum UslpProtocolId {
SpacePacketsOrEncapsulation = 0b00000,
/// COP-1 control commands within the TFDZ.
Cop1ControlCommands = 0b00001,
/// COP-P control commands within the TFDZ.
CopPControlCommands = 0b00010,
/// SDLS control commands within the TFDZ.
Sdls = 0b00011,
UserDefinedOctetStream = 0b00100,
/// Proximity-1 Supervisory Protocol Data Units (SPDUs) within the TFDZ.
Spdu = 0b00111,
/// Entire fixed-length TFDZ contains idle data.
Idle = 0b11111,
}
#[derive(Debug, PartialEq, Eq, num_enum::TryFromPrimitive, num_enum::IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u3, exhaustive = true)]
#[repr(u8)]
pub enum ConstructionRule {
/// Indicated fixed-length TFDZ whose contents are CCSDS packets concatenated together, which
/// span transfer frame boundaries. The First Header Pointer (FHP) is required for packet
/// extraction.
PacketSpanningMultipleFrames = 0b000,
StartOfMapaSduOrVcaSdu = 0b001,
ContinuingPortionOfMapaSdu = 0b010,
OctetStream = 0b011,
StartingSegment = 0b100,
ContinuingSegment = 0b101,
LastSegment = 0b110,
NoSegmentation = 0b111,
}
impl ConstructionRule {
pub const fn applicable_to_fixed_len_tfdz(&self) -> bool {
match self {
ConstructionRule::PacketSpanningMultipleFrames => true,
ConstructionRule::StartOfMapaSduOrVcaSdu => true,
ConstructionRule::ContinuingPortionOfMapaSdu => true,
ConstructionRule::OctetStream => false,
ConstructionRule::StartingSegment => false,
ConstructionRule::ContinuingSegment => false,
ConstructionRule::LastSegment => false,
ConstructionRule::NoSegmentation => false,
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct TransferFrameDataFieldHeader {
/// Construction rule for the TFDZ.
construction_rule: ConstructionRule,
uslp_protocol_id: UslpProtocolId,
/// First header or last valid octet pointer. Only present if the constuction rule indicated
/// a fixed-length TFDZ.
fhp_or_lvo: Option<u16>,
}
impl TransferFrameDataFieldHeader {
#[inline]
pub const fn len_header(&self) -> usize {
if self.construction_rule.applicable_to_fixed_len_tfdz() {
3
} else {
1
}
}
#[inline]
pub const fn construction_rule(&self) -> ConstructionRule {
self.construction_rule
}
#[inline]
pub const fn uslp_protocol_id(&self) -> UslpProtocolId {
self.uslp_protocol_id
}
#[inline]
pub const fn fhp_or_lvo(&self) -> Option<u16> {
self.fhp_or_lvo
}
pub fn from_bytes(buf: &[u8]) -> Result<Self, UslpError> {
if buf.is_empty() {
return Err(ByteConversionError::FromSliceTooSmall {
found: 0,
expected: 1,
}
.into());
}
let construction_rule = ConstructionRule::try_from((buf[0] >> 5) & 0b111)
.map_err(|e| UslpError::InvalidConstructionRule(e.number))?;
let mut fhp_or_lvo = None;
if construction_rule.applicable_to_fixed_len_tfdz() {
if buf.len() < 3 {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: 3,
}
.into());
}
fhp_or_lvo = Some(u16::from_be_bytes(buf[1..3].try_into().unwrap()));
}
Ok(Self {
construction_rule,
uslp_protocol_id: UslpProtocolId::try_from(buf[0] & 0b11111)
.map_err(|e| UslpError::InvalidProtocolId(e.number))?,
fhp_or_lvo,
})
}
}
/// Simple USLP transfer frame reader.
///
/// Currently, only insert zone lengths of 0 are supported.
#[derive(Debug)]
pub struct TransferFrameReader<'buf> {
primary_header: PrimaryHeader,
data_field_header: TransferFrameDataFieldHeader,
data: &'buf [u8],
operational_control_field: Option<u32>,
}
impl<'buf> TransferFrameReader<'buf> {
/// This function assumes an insert zone length of 0.
pub fn from_bytes(
buf: &'buf [u8],
has_fecf: bool,
) -> Result<TransferFrameReader<'buf>, UslpError> {
let primary_header = PrimaryHeader::from_bytes(buf)?;
if primary_header.len_frame() > buf.len() {
return Err(ByteConversionError::FromSliceTooSmall {
expected: primary_header.len_frame(),
found: buf.len(),
}
.into());
}
let data_field_header =
TransferFrameDataFieldHeader::from_bytes(&buf[primary_header.len_header()..])?;
let data_idx = primary_header.len_header() + data_field_header.len_header();
let frame_len = primary_header.len_frame();
let mut operational_control_field = None;
let mut data_len = frame_len - data_idx;
if has_fecf {
data_len -= 2;
}
if primary_header.ocf_flag {
data_len -= 4;
operational_control_field = Some(u32::from_be_bytes(
buf[data_idx + data_len..data_idx + data_len + 4]
.try_into()
.unwrap(),
));
}
let data_end = data_idx + data_len;
if has_fecf {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[0..frame_len]);
if digest.finalize() != 0 {
return Err(UslpError::ChecksumFailure(u16::from_be_bytes(
buf[frame_len - 2..frame_len].try_into().unwrap(),
)));
}
}
Ok(Self {
primary_header,
data_field_header,
data: buf[data_idx..data_end].try_into().unwrap(),
operational_control_field,
})
}
#[inline]
pub fn len_frame(&self) -> usize {
self.primary_header.len_frame()
}
#[inline]
pub fn primary_header(&self) -> &PrimaryHeader {
&self.primary_header
}
#[inline]
pub fn data_field_header(&self) -> &TransferFrameDataFieldHeader {
&self.data_field_header
}
#[inline]
pub fn data(&self) -> &'buf [u8] {
self.data
}
#[inline]
pub fn operational_control_field(&self) -> &Option<u32> {
&self.operational_control_field
}
}
#[cfg(test)]
mod tests {
use super::*;
fn common_basic_check(buf: &[u8]) {
assert_eq!(buf[0] >> 4, USLP_VERSION_NUMBER);
// First four bits SCID.
assert_eq!(buf[0] & 0b1111, 0b1010);
// Next eight bits SCID.
assert_eq!(buf[1], 0b01011100);
// Last four bits SCID.
assert_eq!(buf[2] >> 4, 0b0011);
assert_eq!((buf[2] >> 3) & 0b1, SourceOrDestField::Dest as u8);
// First three bits VCID.
assert_eq!(buf[2] & 0b111, 0b110);
// Last three bits VCID.
assert_eq!(buf[3] >> 5, 0b101);
// MAP ID
assert_eq!((buf[3] >> 1) & 0b1111, 0b1010);
// End of primary header flag
assert_eq!(buf[3] & 0b1, 0);
assert_eq!(u16::from_be_bytes(buf[4..6].try_into().unwrap()), 0x2344);
}
#[test]
fn test_basic_0() {
let mut buf: [u8; 8] = [0; 8];
// Should be all zeros after writing.
buf[6] = 0xff;
let primary_header = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b1010,
0x2345,
)
.unwrap();
// Virtual channel count 0.
assert_eq!(primary_header.write_to_be_bytes(&mut buf).unwrap(), 7);
common_basic_check(&buf);
assert_eq!(primary_header.vc_frame_count_len(), 0);
assert_eq!(primary_header.vc_frame_count(), 0);
// Bypass / Sequence Control Flag.
assert_eq!(
(buf[6] >> 7) & 0b1,
BypassSequenceControlFlag::SequenceControlledQoS as u8
);
// Protocol Control Command Flag.
assert_eq!(
(buf[6] >> 6) & 0b1,
ProtocolControlCommandFlag::TfdfContainsUserData as u8
);
// OCF flag.
assert_eq!((buf[6] >> 3) & 0b1, false as u8);
// VCF count length.
assert_eq!(buf[6] & 0b111, 0);
}
#[test]
fn test_basic_1() {
let mut buf: [u8; 16] = [0; 16];
// Should be all zeros after writing.
buf[6] = 0xff;
let mut primary_header = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b1010,
0x2345,
)
.unwrap();
primary_header.sequence_control_flag = BypassSequenceControlFlag::ExpeditedQoS;
primary_header.protocol_control_command_flag =
ProtocolControlCommandFlag::TfdfContainsProtocolInfo;
primary_header.ocf_flag = true;
primary_header.set_vc_frame_count(4, 0x12345678).unwrap();
// Virtual channel count 4.
assert_eq!(primary_header.write_to_be_bytes(&mut buf).unwrap(), 11);
assert_eq!(primary_header.vc_frame_count_len(), 4);
assert_eq!(primary_header.vc_frame_count(), 0x12345678);
common_basic_check(&buf);
// Bypass / Sequence Control Flag.
assert_eq!(
(buf[6] >> 7) & 0b1,
BypassSequenceControlFlag::ExpeditedQoS as u8
);
// Protocol Control Command Flag.
assert_eq!(
(buf[6] >> 6) & 0b1,
ProtocolControlCommandFlag::TfdfContainsProtocolInfo as u8
);
// OCF flag.
assert_eq!((buf[6] >> 3) & 0b1, true as u8);
// VCF count length.
assert_eq!(buf[6] & 0b111, 4);
assert_eq!(
u32::from_be_bytes(buf[7..11].try_into().unwrap()),
0x12345678
);
}
#[test]
fn test_vcf_count_len_two() {
let mut buf: [u8; 16] = [0; 16];
// Should be all zeros after writing.
buf[6] = 0xff;
let mut primary_header = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b1010,
0x2345,
)
.unwrap();
primary_header.set_vc_frame_count(2, 5).unwrap();
assert_eq!(primary_header.vc_frame_count_len(), 2);
assert_eq!(primary_header.vc_frame_count(), 5);
assert_eq!(primary_header.write_to_be_bytes(&mut buf).unwrap(), 9);
assert_eq!(buf[6] & 0b111, 2);
assert_eq!(u16::from_be_bytes(buf[7..9].try_into().unwrap()), 5);
let primary_header = PrimaryHeader::from_bytes(&buf).unwrap();
assert_eq!(primary_header.vc_frame_count_len(), 2);
assert_eq!(primary_header.vc_frame_count(), 5);
}
#[test]
fn test_vcf_count_len_one() {
let mut buf: [u8; 16] = [0; 16];
// Should be all zeros after writing.
buf[6] = 0xff;
let mut primary_header = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b1010,
0x2345,
)
.unwrap();
primary_header.set_vc_frame_count(1, 255).unwrap();
assert_eq!(primary_header.vc_frame_count_len(), 1);
assert_eq!(primary_header.vc_frame_count(), 255);
assert_eq!(primary_header.write_to_be_bytes(&mut buf).unwrap(), 8);
assert_eq!(buf[6] & 0b111, 1);
assert_eq!(buf[7], 255);
let primary_header = PrimaryHeader::from_bytes(&buf).unwrap();
assert_eq!(primary_header.vc_frame_count_len(), 1);
assert_eq!(primary_header.vc_frame_count(), 255);
}
#[test]
fn test_reading_0() {
let mut buf: [u8; 8] = [0; 8];
let primary_header = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b1010,
0x2345,
)
.unwrap();
assert_eq!(primary_header.write_to_be_bytes(&mut buf).unwrap(), 7);
let parsed_header = PrimaryHeader::from_bytes(&buf).unwrap();
assert_eq!(parsed_header, primary_header);
}
#[test]
fn test_reading_1() {
let mut buf: [u8; 16] = [0; 16];
let mut primary_header = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b1010,
0x2345,
)
.unwrap();
primary_header.sequence_control_flag = BypassSequenceControlFlag::ExpeditedQoS;
primary_header.protocol_control_command_flag =
ProtocolControlCommandFlag::TfdfContainsProtocolInfo;
primary_header.ocf_flag = true;
primary_header.set_vc_frame_count(4, 0x12345678).unwrap();
assert_eq!(primary_header.write_to_be_bytes(&mut buf).unwrap(), 11);
let parsed_header = PrimaryHeader::from_bytes(&buf).unwrap();
assert_eq!(parsed_header, primary_header);
}
#[test]
fn test_invalid_vcid() {
let error = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b1101011,
0b1010,
0x2345,
);
assert!(error.is_err());
let error = error.unwrap_err();
matches!(error, UslpError::InvalidVcId(0b1101011));
}
#[test]
fn test_invalid_mapid() {
let error = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b10101,
0x2345,
);
assert!(error.is_err());
let error = error.unwrap_err();
matches!(error, UslpError::InvalidMapId(0b10101));
}
#[test]
fn test_invalid_vc_count() {
let mut primary_header = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b1010,
0x2345,
)
.unwrap();
matches!(
primary_header.set_vc_frame_count(0, 1).unwrap_err(),
InvalidValueForLen { value: 1, len: 0 }
);
matches!(
primary_header.set_vc_frame_count(1, 256).unwrap_err(),
InvalidValueForLen { value: 256, len: 1 }
);
}
#[test]
fn test_frame_parser() {
let mut buf: [u8; 32] = [0; 32];
// Build a variable frame manually.
let mut primary_header =
PrimaryHeader::new(0x01, SourceOrDestField::Dest, 0b110101, 0b1010, 0).unwrap();
let header_len = primary_header.len_header();
buf[header_len] = ((ConstructionRule::NoSegmentation as u8) << 5)
| (UslpProtocolId::UserDefinedOctetStream as u8) & 0b11111;
buf[header_len + 1] = 0x42;
// 1 byte TFDH, 1 byte data, 2 bytes CRC.
primary_header.set_frame_len(header_len + 4);
primary_header.write_to_be_bytes(&mut buf).unwrap();
// Calculate and write CRC16.
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[0..header_len + 2]);
buf[header_len + 2..header_len + 4].copy_from_slice(&digest.finalize().to_be_bytes());
// Now parse the frame.
let frame = TransferFrameReader::from_bytes(&buf, true).unwrap();
assert_eq!(*frame.primary_header(), primary_header);
assert_eq!(frame.data().len(), 1);
assert_eq!(frame.data()[0], 0x42);
assert_eq!(
frame.data_field_header().uslp_protocol_id(),
UslpProtocolId::UserDefinedOctetStream
);
assert_eq!(
frame.data_field_header().construction_rule(),
ConstructionRule::NoSegmentation
);
assert!(frame.data_field_header().fhp_or_lvo().is_none());
assert_eq!(frame.len_frame(), 11);
assert!(frame.operational_control_field().is_none());
}
#[test]
fn test_frame_parser_invalid_checksum() {
let mut buf: [u8; 32] = [0; 32];
// Build a variable frame manually.
let mut primary_header =
PrimaryHeader::new(0x01, SourceOrDestField::Dest, 0b110101, 0b1010, 0).unwrap();
let header_len = primary_header.len_header();
buf[header_len] = ((ConstructionRule::NoSegmentation as u8) << 5)
| (UslpProtocolId::UserDefinedOctetStream as u8) & 0b11111;
buf[header_len + 1] = 0x42;
// 1 byte TFDH, 1 byte data, 2 bytes CRC.
primary_header.set_frame_len(header_len + 4);
primary_header.write_to_be_bytes(&mut buf).unwrap();
// Now parse the frame without having calculated the checksum.
match TransferFrameReader::from_bytes(&buf, true) {
Ok(_) => panic!("transfer frame read call did not fail"),
Err(e) => {
assert_eq!(e, UslpError::ChecksumFailure(0));
}
}
}
#[test]
fn test_frame_parser_buf_too_small() {
let mut buf: [u8; 32] = [0; 32];
// Build a variable frame manually.
let mut primary_header =
PrimaryHeader::new(0x01, SourceOrDestField::Dest, 0b110101, 0b1010, 0).unwrap();
let header_len = primary_header.len_header();
buf[header_len] = ((ConstructionRule::NoSegmentation as u8) << 5)
| (UslpProtocolId::UserDefinedOctetStream as u8) & 0b11111;
buf[header_len + 1] = 0x42;
// 1 byte TFDH, 1 byte data, 2 bytes CRC.
primary_header.set_frame_len(header_len + 4);
primary_header.write_to_be_bytes(&mut buf).unwrap();
// Now parse the frame.
let error = TransferFrameReader::from_bytes(&buf[0..7], true).unwrap_err();
assert_eq!(
error,
ByteConversionError::FromSliceTooSmall {
expected: primary_header.len_frame(),
found: 7
}
.into()
);
}
#[test]
fn test_from_bytes_too_small_0() {
let buf: [u8; 3] = [0; 3];
assert_eq!(
PrimaryHeader::from_bytes(&buf).unwrap_err(),
ByteConversionError::FromSliceTooSmall {
found: 3,
expected: 4
}
.into()
);
}
#[test]
fn test_from_bytes_too_small_1() {
let buf: [u8; 6] = [0; 6];
assert_eq!(
PrimaryHeader::from_bytes(&buf).unwrap_err(),
ByteConversionError::FromSliceTooSmall {
found: 6,
expected: 7
}
.into()
);
}
#[test]
fn test_from_bytes_truncated_not_supported() {
let mut buf: [u8; 7] = [0; 7];
let primary_header =
PrimaryHeader::new(0x01, SourceOrDestField::Dest, 0b110101, 0b1010, 0).unwrap();
primary_header.write_to_be_bytes(&mut buf).unwrap();
// Set truncated header flag manually.
buf[3] |= 0b1;
assert_eq!(
PrimaryHeader::from_bytes(&buf).unwrap_err(),
UslpError::HeaderIsTruncated
);
}
#[test]
fn test_from_bytes_too_small_2() {
let mut buf: [u8; 16] = [0; 16];
// Should be all zeros after writing.
buf[6] = 0xff;
let mut primary_header = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b1010,
0x2345,
)
.unwrap();
primary_header.set_vc_frame_count(4, 0x12345678).unwrap();
primary_header.write_to_be_bytes(&mut buf).unwrap();
assert_eq!(
PrimaryHeader::from_bytes(&buf[0..8]).unwrap_err(),
UslpError::ByteConversion(ByteConversionError::FromSliceTooSmall {
found: 8,
expected: 11
})
);
}
#[test]
fn test_invalid_version_number() {
let mut buf: [u8; 7] = [0; 7];
let primary_header =
PrimaryHeader::new(0x01, SourceOrDestField::Dest, 0b110101, 0b1010, 0).unwrap();
primary_header.write_to_be_bytes(&mut buf).unwrap();
buf[0] &= 0b00001111;
assert_eq!(
PrimaryHeader::from_bytes(&buf).unwrap_err(),
UslpError::InvalidVersionNumber(0)
);
}
#[test]
fn test_primary_header_buf_too_small() {
let primary_header = PrimaryHeader::new(
0b10100101_11000011,
SourceOrDestField::Dest,
0b110101,
0b1010,
0x2345,
)
.unwrap();
if let Err(ByteConversionError::ToSliceTooSmall { found, expected }) =
primary_header.write_to_be_bytes(&mut [0; 4])
{
assert_eq!(found, 4);
assert_eq!(expected, 7);
} else {
panic!("writing primary header did not fail or failed with wrong error");
}
}
#[test]
fn test_applicability_contr_rules() {
assert!(ConstructionRule::PacketSpanningMultipleFrames.applicable_to_fixed_len_tfdz());
assert!(ConstructionRule::StartOfMapaSduOrVcaSdu.applicable_to_fixed_len_tfdz());
assert!(ConstructionRule::ContinuingPortionOfMapaSdu.applicable_to_fixed_len_tfdz());
assert!(!ConstructionRule::OctetStream.applicable_to_fixed_len_tfdz());
assert!(!ConstructionRule::StartingSegment.applicable_to_fixed_len_tfdz());
assert!(!ConstructionRule::ContinuingSegment.applicable_to_fixed_len_tfdz());
assert!(!ConstructionRule::LastSegment.applicable_to_fixed_len_tfdz());
assert!(!ConstructionRule::NoSegmentation.applicable_to_fixed_len_tfdz());
}
#[test]
fn test_header_len_correctness() {
let mut tfdh = TransferFrameDataFieldHeader {
construction_rule: ConstructionRule::PacketSpanningMultipleFrames,
uslp_protocol_id: UslpProtocolId::UserDefinedOctetStream,
fhp_or_lvo: Some(0),
};
assert_eq!(tfdh.len_header(), 3);
tfdh = TransferFrameDataFieldHeader {
construction_rule: ConstructionRule::StartOfMapaSduOrVcaSdu,
uslp_protocol_id: UslpProtocolId::UserDefinedOctetStream,
fhp_or_lvo: Some(0),
};
assert_eq!(tfdh.len_header(), 3);
tfdh = TransferFrameDataFieldHeader {
construction_rule: ConstructionRule::ContinuingPortionOfMapaSdu,
uslp_protocol_id: UslpProtocolId::UserDefinedOctetStream,
fhp_or_lvo: Some(0),
};
assert_eq!(tfdh.len_header(), 3);
tfdh = TransferFrameDataFieldHeader {
construction_rule: ConstructionRule::OctetStream,
uslp_protocol_id: UslpProtocolId::UserDefinedOctetStream,
fhp_or_lvo: None,
};
assert_eq!(tfdh.len_header(), 1);
}
#[test]
fn test_frame_data_field_header_from_bytes_too_small() {
let buf: [u8; 0] = [];
assert_eq!(
TransferFrameDataFieldHeader::from_bytes(&buf).unwrap_err(),
ByteConversionError::FromSliceTooSmall {
found: 0,
expected: 1
}
.into()
);
}
}

View File

@@ -1,9 +1,7 @@
use crate::ByteConversionError; use crate::ByteConversionError;
use core::fmt::{Debug, Display, Formatter}; use core::fmt::Debug;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub trait ToBeBytes { pub trait ToBeBytes {
type ByteArray: AsRef<[u8]>; type ByteArray: AsRef<[u8]>;
@@ -100,49 +98,24 @@ pub trait UnsignedEnum {
pub trait UnsignedEnumExt: UnsignedEnum + Debug + Copy + Clone + PartialEq + Eq {} pub trait UnsignedEnumExt: UnsignedEnum + Debug + Copy + Clone + PartialEq + Eq {}
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum UnsignedByteFieldError { pub enum UnsignedByteFieldError {
/// Value is too large for specified width of byte field. /// Value is too large for specified width of byte field.
ValueTooLargeForWidth { #[error("value {value} too large for width {width}")]
width: usize, ValueTooLargeForWidth { width: usize, value: u64 },
value: u64,
},
/// Only 1, 2, 4 and 8 are allow width values. Optionally contains the expected width if /// Only 1, 2, 4 and 8 are allow width values. Optionally contains the expected width if
/// applicable, for example for conversions. /// applicable, for example for conversions.
#[error("invalid width {found}, expected {expected:?}")]
InvalidWidth { InvalidWidth {
found: usize, found: usize,
expected: Option<usize>, expected: Option<usize>,
}, },
ByteConversionError(ByteConversionError), #[error("byte conversion error: {0}")]
ByteConversionError(#[from] ByteConversionError),
} }
impl From<ByteConversionError> for UnsignedByteFieldError {
#[inline]
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl Display for UnsignedByteFieldError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {e}")
}
Self::InvalidWidth { found, .. } => {
write!(f, "invalid width {found}, only 1, 2, 4 and 8 are allowed.")
}
Self::ValueTooLargeForWidth { width, value } => {
write!(f, "value {value} too large for width {width}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for UnsignedByteFieldError {}
/// Type erased variant. /// Type erased variant.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]