173 Commits

Author SHA1 Message Date
34b58fe9cc Merge pull request 'bump version specifier' (#33) from prep_v0.7.0-beta2 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #33
2023-09-26 17:15:29 +02:00
393c73cedf re-enable failing docs builds
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-09-26 17:11:39 +02:00
3e97bf0c15 bump version specifier
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-09-26 17:10:21 +02:00
7839fb3776 Merge pull request 'use non-deprecated API' (#32) from test-tweaks into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #32
2023-09-26 17:09:24 +02:00
55ad24db34 use non-deprecated API
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2023-09-26 17:08:58 +02:00
3b4a909ce1 Merge pull request 'Added to_vec method for SerializablePusPacket' (#31) from serializable-pus-to-vec into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #31
2023-09-26 16:59:41 +02:00
76ad1c7ead Added to_vec method for SerializablePusPacket
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-09-26 16:55:07 +02:00
79d26e1a67 Merge pull request 'Packet ID trait implementations' (#30) from packet-id-trait-impls into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #30
2023-09-18 18:19:31 +02:00
be37c15478 docs failure should not fail the whole build
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-09-18 18:00:14 +02:00
a6bced7983 this is okay
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-09-18 17:50:50 +02:00
5d8b5ce370 please stop
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-09-18 17:42:10 +02:00
b94d07f6c9 try 2
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-09-18 17:38:56 +02:00
90e48483bb next try
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-09-18 17:36:10 +02:00
963b9dbb5f inline PacketId raw call
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-09-18 17:34:49 +02:00
2a0db6b21c maybe this fixes the issue? 2023-09-18 17:34:04 +02:00
a4b14250c2 add stage to display toolchain info
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-09-18 17:32:54 +02:00
6116cdb27c add some tests
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-09-18 17:13:22 +02:00
6ebdf7e330 added packet ID trait impls
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-09-18 16:59:38 +02:00
e935b3825a Merge pull request 'release-checklist: missing bullet point' (#29) from release-checklist-note into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #29
2023-08-28 18:56:55 +02:00
a49737fc34 release-checklist: missing bullet point
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
2023-08-28 18:56:08 +02:00
3081539bb2 Merge pull request 'prep next beta release' (#28) from prep_v0.7.0-beta.1 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #28
2023-08-28 18:54:32 +02:00
1b01c8bb0b small changelog note
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
Rust/spacepackets/pipeline/pr-main Build started...
2023-08-28 18:54:01 +02:00
2ee3eee32e only allow zerocopy v0.7.0
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-28 18:47:41 +02:00
406d731bbe fix zerocopy usage
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-28 18:45:24 +02:00
49b50ec682 prep next beta release
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-08-28 18:36:00 +02:00
00fdfde015 Merge pull request 'invalid time code struct variant' (#26) from invalid-time-code-struct-variant into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #26
2023-08-28 17:42:46 +02:00
491b03c701 Merge remote-tracking branch 'origin/main' into invalid-time-code-struct-variant
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-28 17:32:10 +02:00
e090beedde CHANGELOG
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head This commit looks good
2023-08-28 17:31:41 +02:00
6f2ed3003f Merge pull request 'UnsignedByteFieldError: Use struct variants' (#27) from ubf-error-struct-variants into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #27
2023-08-28 17:27:34 +02:00
0b5a384743 fix tests
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-28 17:24:54 +02:00
925b2aa8d8 remove obsolete comment
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-08-28 17:22:36 +02:00
d98d4b55c8 convert UnsigedByteFieldError variants to struct variants
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-08-28 17:21:48 +02:00
94c378fa3b Merge branch 'main' into invalid-time-code-struct-variant
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-08-28 17:11:32 +02:00
1fc15230fa Merge pull request 'Size missmatch struct variant' (#25) from size-missmatch-struct-variant into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #25
2023-08-28 17:10:57 +02:00
e78f196a42 invalid time code struct variant
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-08-28 17:10:45 +02:00
2a11359a81 Merge branch 'main' into size-missmatch-struct-variant
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-28 17:01:14 +02:00
c226c5ea0f changelog
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-08-28 17:00:29 +02:00
ab65845573 Merge pull request 'PDU improvements and additions' (#24) from pdu-additions into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #24
2023-08-28 16:52:25 +02:00
3206af690c well that was a lot
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-08-18 10:09:32 +02:00
805065a7b9 cargo fmt
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-17 22:13:00 +02:00
62533bb91c Merge remote-tracking branch 'origin/main' into pdu-additions
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-08-17 21:34:38 +02:00
c085f9ab32 Merge pull request 'update LV and TLV code' (#22) from update-lv-tlv into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #22
2023-08-17 21:33:48 +02:00
f208a9b0f0 fixed a test
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-08-17 21:24:12 +02:00
c96a86a994 this should be better 2023-08-17 21:22:49 +02:00
9dfc593d2a fixes for pdu error enum 2023-08-17 21:04:27 +02:00
990b8de519 changelog
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-08-17 20:42:45 +02:00
965541e422 getter function for datafield len 2023-08-17 20:41:45 +02:00
9a52066314 fmt
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-16 18:19:41 +02:00
6ab05e2d83 fix for docs
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-08-16 18:19:15 +02:00
2d81a79321 Merge remote-tracking branch 'origin/main' into update-lv-tlv
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-08-16 18:17:51 +02:00
fc7bee342c Merge pull request 'Const UBF' (#23) from const-ubf into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #23
2023-08-16 18:17:13 +02:00
1789cff2b8 why is this still not a test
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
2023-08-16 18:16:43 +02:00
5ae5abe09a make UnsignedByteField helpers const
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2023-08-16 18:15:49 +02:00
407d1e1154 additional test for new method
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-08-16 18:10:39 +02:00
3ba575aac1 changelog
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-08-16 18:01:54 +02:00
81db36d159 additional docs
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-08-16 18:01:09 +02:00
081f6e840f added additional API
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-08-16 17:58:19 +02:00
3cb19298c8 some restructuring
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-08-16 17:27:02 +02:00
4e2c0f1aa7 added a few additional tests
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-16 16:36:15 +02:00
83db710950 update LV and TLV code
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-08-16 16:27:10 +02:00
0f49672829 Merge pull request 'v0.7.0 beta' (#21) from prep_v0.7.0-beta into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #21
2023-08-16 14:22:40 +02:00
ffd1bf3769 add missing doc_cfg
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-16 14:11:26 +02:00
ccf7592284 doc fix
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-08-16 14:02:33 +02:00
2b7fcbdd8e changelog 2023-08-16 14:00:38 +02:00
e389f77063 lets release a beta version 2023-08-16 13:59:34 +02:00
fcf4449c14 Merge pull request 'gitlab ci fix' (#20) from gitlab-ci-fix into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #20
2023-08-14 14:06:56 +02:00
05916130a8 gitlab ci fix
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2023-08-14 14:03:25 +02:00
4b7f2b4817 Merge pull request 'bump msrv to v1.61' (#19) from bump_msrv into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #19
2023-08-11 14:01:43 +02:00
91e7d8549c bump msrv to v1.61
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2023-08-11 14:00:46 +02:00
3430275638 Merge pull request 'PDU config tweak' (#17) from pdu-conf-tweaK into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #17
2023-08-10 21:40:43 +02:00
a3bab2619c compile fix
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-08-10 21:28:27 +02:00
70815fa1e3 Merge remote-tracking branch 'origin/main' into pdu-conf-tweaK
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-08-10 21:06:56 +02:00
ebfd3c636d Merge pull request 'Improve CommonPduConfig' (#18) from improve-pdu-conf into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #18
2023-08-10 20:25:33 +02:00
a3da71668f also added a unittest
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-08-10 20:22:07 +02:00
fd893fbf89 changelog fix
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-08-10 20:08:25 +02:00
17296ade19 fmt
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-10 20:04:50 +02:00
85476162cf missing changelog entry
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-10 20:03:50 +02:00
837b412ef0 changelog
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-08-10 20:01:13 +02:00
02098977a5 added pdu conf src and dest id setter 2023-08-10 19:55:42 +02:00
20584b45ca changelog
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-08-10 18:17:32 +02:00
105c598c53 some more extensions for PDU impl
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-07-26 23:27:40 +02:00
c65a024d97 make metadata params accessible
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-26 22:53:05 +02:00
041959e546 no error handling necessary here
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-26 22:00:03 +02:00
cc1ec56b33 change for common pdu conf
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-07-25 00:42:31 +02:00
0a59f3258a Merge pull request 'Refactor ECSS packet modules' (#16) from refactoring-ecss-packet-mods into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #16
2023-07-11 22:26:26 +02:00
62df510147 new TM simple constructor
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-07-11 22:12:17 +02:00
5b8cc30012 oh boy
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-11 14:16:17 +02:00
3c8f9f9f07 added some more deprecation warnings
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-07-11 00:41:35 +02:00
46eab35290 added trait impls to legacy classes
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-07-11 00:07:38 +02:00
7db2190ec3 more improvements
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-07-11 00:03:07 +02:00
77be96e8de changelog
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 23:37:23 +02:00
6eb3cbbd84 added new marker traits for PUS TC and PUS TM
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 23:36:27 +02:00
c0e70daf58 some tweaks and test update
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 23:29:55 +02:00
eca7e09d19 changelog
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 23:18:09 +02:00
98e2a73aa2 move deprecation blocks
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 11:59:45 +02:00
05eb9d44ef how do you deal with these deprecation warnings?
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 11:20:55 +02:00
ba22618449 Merge remote-tracking branch 'origin/main' into refactoring-ecss-packet-mods
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-07-10 10:30:26 +02:00
9ecc1f1ff2 Merge pull request 'try fixing CI' (#15) from try-fix-ci into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #15
2023-07-10 10:27:31 +02:00
240004b814 make thiserror optional
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-07-10 10:25:11 +02:00
79791a75bd try fixing CI
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 10:15:37 +02:00
b553cdc2ec Added PusTmCreator and PusTmReader
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 01:11:42 +02:00
e46de3421e resolve merge conflict
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 00:38:34 +02:00
fd95c86294 Merge remote-tracking branch 'origin/main' into refactoring-ecss-packet-mods 2023-07-10 00:38:07 +02:00
f117c8c4de changelog correction
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 00:37:16 +02:00
784564a20e now its a proper re-export
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-10 00:00:20 +02:00
e3d2d88538 remove another error suffix
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-09 19:31:32 +02:00
4969d6c33c moved re-export
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-09 16:48:53 +02:00
581b51c61c improve std timestamp error further
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-09 16:46:25 +02:00
b5bea3e1c6 implement thiserror::Error for StdTimestampError 2023-07-09 16:43:45 +02:00
defd7609e7 I am not sure this is required..
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-07-09 12:39:00 +02:00
b9774c4c9f doc fixes and tweaks 2023-07-09 12:33:34 +02:00
4fdfb20946 refactoring ECSS packetm odules 2023-07-09 12:27:44 +02:00
2704c589be prep v0.6.0
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-07-06 01:26:53 +02:00
32cbbb1c19 some minor fixes and tweaks 2023-07-06 01:24:40 +02:00
4485ed2669 this API is useful for funnels
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-05 19:28:43 +02:00
2deac938bb better API name
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-05 19:26:22 +02:00
929590ecb0 add additional API to set APID
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-07-05 19:25:05 +02:00
c85177ece9 API update
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-05 19:07:31 +02:00
f406957752 docs
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-03 00:57:38 +02:00
28cd8c02ac move additional function to trait
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-03 00:54:21 +02:00
ef4244c8cb better name for function
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-02 20:57:50 +02:00
94cfe59235 allow delegate update
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-02 18:43:05 +02:00
ec5d98a9b5 require this for less duplicate code
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-02 18:38:18 +02:00
1ddfc432f3 Merge pull request 'CFDP initial packet support' (#14) from cfdp_first_support into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #14
2023-07-02 17:31:16 +02:00
ae8fb8ee14 core compatibility fix
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-07-02 17:25:10 +02:00
188651c4a4 typos
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-07-02 17:22:07 +02:00
ad64957342 some documentation
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-02 17:20:57 +02:00
a313a784ff finish FS request unittests
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-07-02 17:18:33 +02:00
3727e7e668 added more FS request tests
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-06-12 16:19:20 +02:00
0e87039010 added first basic state test
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-06-12 16:04:32 +02:00
d9028d21da start adding tests
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-06-12 15:52:10 +02:00
99dbf9dc85 finished basic FS request impl
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-06-12 14:26:40 +02:00
895080bbc0 add crc serialization for all packets
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-06-12 13:00:58 +02:00
e48c2fe368 some docs 2023-06-12 04:13:41 +02:00
d217a669b2 add more docs 2023-06-12 04:04:52 +02:00
15bc12aede that should be sufficient for the first FSM approach 2023-06-12 04:02:18 +02:00
02675ba086 first basic Finished PDU impl 2023-06-12 03:57:38 +02:00
eb6bc4b8a8 added some basic API for finished PDU
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-06-08 20:55:46 +02:00
006bc39ff6 finished basic EOF unittests
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-06-08 20:50:18 +02:00
bce16a6018 Merge branch 'cfdp_first_support' of https://egit.irs.uni-stuttgart.de/rust/spacepackets into cfdp_first_support
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-06-08 19:25:22 +02:00
84b909b722 continue EOF 2023-06-08 19:24:55 +02:00
ab5c28d304 finish PDU
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-06-08 16:51:22 +02:00
912c03b5c7 base line EOF model
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-06-07 01:12:07 +02:00
0b714b7426 start EOF
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-06-06 08:59:18 +02:00
bb1ecb29b6 fmt and clippy
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-30 19:35:59 +02:00
9f574ff443 continue file data test 2023-05-30 19:35:38 +02:00
0ad8dd6eef add test with segment metadata
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-30 17:39:33 +02:00
81eb8e7887 continue file data PDU
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-30 15:36:02 +02:00
5c3c9a9bde add test run config
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-05-30 11:09:41 +02:00
44223c1c0f add file data test harness
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head There was a failure building this commit
2023-05-30 08:33:03 +02:00
9d758cce45 first basic fd PDU impl
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-05-30 00:16:16 +02:00
3166a280bc metadata PDU done 2023-05-29 23:38:07 +02:00
bf4e841499 added correct PDU datafield length handling
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-29 14:28:15 +02:00
ce0848dc28 base line metadata PDU impl done 2023-05-29 13:46:19 +02:00
e13183764e continue metadata PDU tests 2023-05-29 01:29:04 +02:00
6865898102 add first test
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-05-28 23:50:12 +02:00
e343faa1c5 clippy fixes
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-21 20:30:16 +02:00
f6e309d2ee docs and stuff
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2023-05-19 00:42:31 +02:00
9a9694981a start adding LV tests
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-18 20:37:41 +02:00
b37d932e4f TLV and TV abstractions complete
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2023-05-18 15:01:08 +02:00
0c085ef27b added basic TLV impl 2023-05-18 14:05:51 +02:00
d5a3e7c0d4 PDU header base impl done
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-18 13:32:45 +02:00
02bae5de6c almost completed PDU header impl 2023-05-18 11:08:46 +02:00
0a3848d0a2 add more tests 2023-05-18 00:46:58 +02:00
a67718cff2 bugfix
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-15 01:03:15 +02:00
53eb184534 add first PDU header tests 2023-05-15 01:01:46 +02:00
074882c160 tests done 2023-05-15 00:11:41 +02:00
8bb4d6d32e tests almost complete 2023-05-14 23:56:26 +02:00
8d15091b42 added more to changelog
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-14 20:11:31 +02:00
d2f944580c thats a lot
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-14 20:10:34 +02:00
4bbf38916a completed base definitions
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2023-05-14 16:55:25 +02:00
3457b3a8f9 add first definitions
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-02-19 20:52:42 +01:00
0304f132e3 add cfdp mod
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2023-02-19 18:45:14 +01:00
27 changed files with 7381 additions and 1756 deletions

View File

@ -27,7 +27,7 @@ jobs:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.60.0
toolchain: 1.61.0
override: true
profile: minimal
- uses: actions-rs/cargo@v1

19
.idea/runConfigurations/Check.xml generated Normal file
View File

@ -0,0 +1,19 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Check" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="check --all-features" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="emulateTerminal" value="false" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="true" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />
<option name="redirectInputPath" value="" />
<method v="2">
<option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
</method>
</configuration>
</component>

19
.idea/runConfigurations/Test.xml generated Normal file
View File

@ -0,0 +1,19 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Test" type="CargoCommandRunConfiguration" factoryName="Cargo Command" nameIsGenerated="true">
<option name="command" value="test" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="emulateTerminal" value="false" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="true" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />
<option name="redirectInputPath" value="" />
<method v="2">
<option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
</method>
</configuration>
</component>

View File

@ -8,6 +8,100 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
## Added
- `PacketId` trait impls: `Ord`, `PartialOrd` and `Hash`
- `SerializablePusPacket` trait: Add `to_vec` method with default implementation.
# [v0.7.0-beta.1] 2023-08-28
- Bump `zerocopy` dependency to v0.7.0
## Changed
- The `Tlv` and `Lv` API return `&[u8]` instead of `Option<&[u8]>`.
- `ByteConversionError` error variants `ToSliceTooSmall` and `FromSliceTooSmall` are struct
variants now. `SizeMissmatch` was removed appropriately.
- `UnsignedByteFieldError` error variants `ValueTooLargeForWidth` and `InvalidWidth` are struct
variants now.
- `TimestampError` error variant `InvalidTimeCode` is struct variant now.
## Added
- Added `raw_data` API for `Tlv` and `Lv` to retrieve the whole `Lv`/`Tlv` slice if the object
was created from a raw bytestream.
- Added `MsgToUserTlv` helper class which wraps a regular `Tlv` and adds some useful functionality.
- `UnsignedByteField` and `GenericUnsignedByteField` `new` methods are `const` now.
- `PduError` variants which contained a tuple variant with multiple fields were converted to a
struct variant.
# Added
- Added `pdu_datafield_len` getter function for `PduHeader`
## Removed
- `SizeMissmatch` because it is not required for the `ByteConversionError` error enumeration
anymore.
# [v0.7.0-beta.0] 2023-08-16
- Moved MSRV from v1.60 to v1.61.
## Changed
- `PusPacket` trait: `user_data` now returns `&[u8]` instead of `Option<&[u8]>`. Empty user data
can simply be an empty slice.
- Moved ECSS TC components from `tc` to `ecss.tc`.
- Moved ECSS TM components from `tm` to `ecss.tm`.
- Converted `PusTc` class to more specialized `PusTcCreator` and `PusTcReader`
classes. The old `PusTc` class is deprecated now.
- Converted `PusTm` class to more specialized `PusTmCreator` and `PusTmReader`
classes. The old `PusTm` class is deprecated now.
- Implement `Display` and `Error` for `StdTimestampError` properly.
- Remove some redundant `Error` suffixes for enum error variants.
- `CommonPduConfig`: `new_with_defaults` replaced by `new_with_byte_fields`.
## Added
- `source_data` and `app_data` API provided for PUS TM and PUS TC reader classes. These simply
call `user_data` but are also in line with the PUS packet standard names for those fields.
- Added new marker trait `IsPusTelemetry` implemented by `PusTmCreator` and `PusTmReader`.
- Added new marker trait `IsPusTelecommand` implemented by `PusTcCreator` and `PusTcReader`.
- `metadata_param` getter method for the `MetadataPdu` object.
- `Default` impl for CFDP `ChecksumType`
- `Default` impl for CFDP `CommonPduConfig`
## Fixed
- All `MetadataGenericParam` fields are now public.
- New setter method `set_source_and_dest_id` for `CommonPduConfig`.
# [v0.6.0] 2023-07-06
## Added
- Added new `util` module which contains the following (new) helper modules:
- `UnsignedEnum` trait as an abstraction for unsigned byte fields with variable lengths. It is
not tied to the ECSS PFC value like the `EcssEnumeration` trait. The method to retrieve
the size of the unsigned enumeration in bytes is now called `size`.
- `GenericUnsignedByteField<TYPE>` and helper typedefs `UnsignedU8`, `UnsignedU16`, `UnsignedU32`
and `UnsignedU64` as helper types implementing `UnsignedEnum`
- `UnsignedByteField` as a type-erased helper.
- Initial CFDP support: Added PDU packet implementation.
- Added `SerializablePusPacket` as a generic abstraction for PUS packets which are
writable.
- Added new `PusTmZeroCopyWriter` class which allows to set fields on a raw TM packet,
which might be more efficient that modification and re-writing a packet with the
`PusTm` object.
## Changed
- The `EcssEnumeration` now requires the `UnsignedEnum` trait and only adds the `pfc` method to it.
- Renamed `byte_width` usages to `size` (part of new `UnsignedEnum` trait)
- Moved `ecss::CRC_CCITT_FALSE` CRC constant to the root module. This CRC type is not just used by
the PUS standard, but by the CCSDS Telecommand standard and the CFDP standard as well.
# [v0.5.4] 2023-02-12
## Added

View File

@ -1,8 +1,8 @@
[package]
name = "spacepackets"
version = "0.5.4"
version = "0.7.0-beta.2"
edition = "2021"
rust-version = "1.60"
rust-version = "1.61"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "Generic implementations for various CCSDS and ECSS packet standards"
homepage = "https://egit.irs.uni-stuttgart.de/rust/spacepackets"
@ -13,12 +13,19 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
zerocopy = "0.6"
crc = "3"
delegate = ">=0.8, <0.10"
delegate = ">=0.8, <0.11"
[dependencies.zerocopy]
version = "0.7"
features = ["derive"]
[dependencies.thiserror]
version = "1"
optional = true
[dependencies.num_enum]
version = "0.5"
version = ">0.5, <=0.7"
default-features = false
[dependencies.serde]
@ -36,11 +43,11 @@ version = "0.2"
default-features = false
[dev-dependencies.postcard]
version = "1.0"
version = "1"
[features]
default = ["std"]
std = ["chrono/std", "chrono/clock", "alloc"]
std = ["chrono/std", "chrono/clock", "alloc", "thiserror"]
serde = ["dep:serde", "chrono/serde"]
alloc = ["postcard/alloc", "chrono/alloc"]

View File

@ -13,6 +13,8 @@ Currently, this includes the following components:
- Space Packet implementation according to
[CCSDS Blue Book 133.0-B-2](https://public.ccsds.org/Pubs/133x0b2e1.pdf)
- CCSDS File Delivery Protocol (CFDP) packet implementations according to
[CCSDS Blue Book 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf)
- PUS Telecommand and PUS Telemetry implementation according to the
[ECSS-E-ST-70-41C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
- CUC (CCSDS Unsegmented Time Code) implementation according to

View File

@ -8,6 +8,11 @@ pipeline {
}
stages {
stage('Rust Toolchain Info') {
steps {
sh 'rustc --version'
}
}
stage('Clippy') {
steps {
sh 'cargo clippy'

View File

@ -4,7 +4,7 @@ Checklist for new releases
# Pre-Release
1. Make sure any new modules are documented sufficiently enough and check docs with
`cargo doc --all-features --open`.
`cargo +nightly doc --all-features --open`.
2. Bump version specifier in `Cargo.toml`.
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
`unreleased` section.
@ -13,6 +13,10 @@ Checklist for new releases
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
targets.
# Release
1. `cargo publish`
# Post-Release
1. Create a new release on `EGit` based on the release branch.

329
src/cfdp/lv.rs Normal file
View File

@ -0,0 +1,329 @@
//! Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
use crate::cfdp::TlvLvError;
use crate::ByteConversionError;
use core::str::Utf8Error;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::string::String;
pub const MIN_LV_LEN: usize = 1;
/// Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
///
/// Please note that this class is zero-copy and does not generate a copy of the value data for
/// both the regular [Self::new] constructor and the [Self::from_bytes] constructor.
///
/// # Lifetimes
/// * `data`: If the LV is generated from a raw bytestream, this will be the lifetime of
/// the raw bytestream. If the LV is generated from a raw slice or a similar data reference,
/// this will be the lifetime of that data reference.
#[derive(Debug, Copy, Clone, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Lv<'data> {
data: &'data [u8],
// If the LV was generated from a raw bytestream, this will contain the start of the
// full LV.
pub(crate) raw_data: Option<&'data [u8]>,
}
impl PartialEq for Lv<'_> {
fn eq(&self, other: &Self) -> bool {
self.data == other.data
}
}
pub(crate) fn generic_len_check_data_serialization(
buf: &[u8],
data_len: usize,
min_overhead: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < data_len + min_overhead {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: data_len + min_overhead,
});
}
Ok(())
}
pub(crate) fn generic_len_check_deserialization(
buf: &[u8],
min_overhead: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < min_overhead {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: min_overhead,
});
}
Ok(())
}
impl<'data> Lv<'data> {
pub fn new(data: &[u8]) -> Result<Lv, TlvLvError> {
if data.len() > u8::MAX as usize {
return Err(TlvLvError::DataTooLarge(data.len()));
}
Ok(Lv {
data,
raw_data: None,
})
}
/// Creates a LV with an empty value field.
pub fn new_empty() -> Lv<'data> {
Lv {
data: &[],
raw_data: None,
}
}
/// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs
pub fn new_from_str(str_slice: &str) -> Result<Lv, TlvLvError> {
Self::new(str_slice.as_bytes())
}
/// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn new_from_string(string: &'data String) -> Result<Lv<'data>, TlvLvError> {
Self::new(string.as_bytes())
}
/// Returns the length of the value part, not including the length byte.
pub fn len_value(&self) -> usize {
self.data.len()
}
/// Returns the full raw length, including the length byte.
pub fn len_full(&self) -> usize {
self.len_value() + 1
}
/// Checks whether the value field is empty.
pub fn is_empty(&self) -> bool {
self.data.len() == 0
}
pub fn value(&self) -> &[u8] {
self.data
}
/// If the LV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the LV can be retrieved with this method.
pub fn raw_data(&self) -> Option<&[u8]> {
self.raw_data
}
/// Convenience function to extract the value as a [str]. This is useful if the LV is
/// known to contain a [str], for example being a file name.
pub fn value_as_str(&self) -> Option<Result<&'data str, Utf8Error>> {
if self.is_empty() {
return None;
}
Some(core::str::from_utf8(self.data))
}
/// Writes the LV to a raw buffer. Please note that the first byte will contain the length
/// of the value, but the values may not exceed a length of [u8::MAX].
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.len_value(), MIN_LV_LEN)?;
Ok(self.write_to_be_bytes_no_len_check(buf))
}
/// Reads a LV from a raw buffer.
pub fn from_bytes(buf: &'data [u8]) -> Result<Lv<'data>, ByteConversionError> {
generic_len_check_deserialization(buf, MIN_LV_LEN)?;
Self::from_be_bytes_no_len_check(buf)
}
pub(crate) fn write_to_be_bytes_no_len_check(&self, buf: &mut [u8]) -> usize {
if self.is_empty() {
buf[0] = 0;
return MIN_LV_LEN;
}
// Length check in constructor ensures the length always has a valid value.
buf[0] = self.data.len() as u8;
buf[MIN_LV_LEN..self.data.len() + MIN_LV_LEN].copy_from_slice(self.data);
MIN_LV_LEN + self.data.len()
}
pub(crate) fn from_be_bytes_no_len_check(
buf: &'data [u8],
) -> Result<Lv<'data>, ByteConversionError> {
let value_len = buf[0] as usize;
generic_len_check_deserialization(buf, value_len + MIN_LV_LEN)?;
Ok(Self {
data: &buf[MIN_LV_LEN..MIN_LV_LEN + value_len],
raw_data: Some(buf),
})
}
}
#[cfg(test)]
pub mod tests {
use crate::cfdp::lv::Lv;
use crate::cfdp::TlvLvError;
use crate::ByteConversionError;
use std::string::String;
#[test]
fn test_basic() {
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv_res = Lv::new(&lv_data);
assert!(lv_res.is_ok());
let lv = lv_res.unwrap();
assert!(lv.value().len() > 0);
let val = lv.value();
assert_eq!(val[0], 1);
assert_eq!(val[1], 2);
assert_eq!(val[2], 3);
assert_eq!(val[3], 4);
assert!(!lv.is_empty());
assert_eq!(lv.len_full(), 5);
assert_eq!(lv.len_value(), 4);
}
#[test]
fn test_empty() {
let lv_empty = Lv::new_empty();
assert_eq!(lv_empty.len_value(), 0);
assert_eq!(lv_empty.len_full(), 1);
assert!(lv_empty.is_empty());
let mut buf: [u8; 4] = [0xff; 4];
let res = lv_empty.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 1);
assert_eq!(buf[0], 0);
}
#[test]
fn test_serialization() {
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv_res = Lv::new(&lv_data);
assert!(lv_res.is_ok());
let lv = lv_res.unwrap();
let mut buf: [u8; 16] = [0; 16];
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 5);
assert_eq!(buf[0], 4);
assert_eq!(buf[1], 1);
assert_eq!(buf[2], 2);
assert_eq!(buf[3], 3);
assert_eq!(buf[4], 4);
}
#[test]
fn test_deserialization() {
let mut buf: [u8; 16] = [0; 16];
buf[0] = 4;
buf[1] = 1;
buf[2] = 2;
buf[3] = 3;
buf[4] = 4;
let lv = Lv::from_bytes(&buf);
assert!(lv.is_ok());
let lv = lv.unwrap();
assert!(!lv.is_empty());
assert_eq!(lv.len_value(), 4);
assert_eq!(lv.len_full(), 5);
assert!(lv.raw_data().is_some());
assert_eq!(lv.raw_data().unwrap(), buf);
let val = lv.value();
assert_eq!(val[0], 1);
assert_eq!(val[1], 2);
assert_eq!(val[2], 3);
assert_eq!(val[3], 4);
}
#[test]
fn test_deserialization_empty() {
let buf: [u8; 2] = [0; 2];
let lv_empty = Lv::from_bytes(&buf);
assert!(lv_empty.is_ok());
let lv_empty = lv_empty.unwrap();
assert!(lv_empty.is_empty());
}
#[test]
fn test_data_too_large() {
let data_big: [u8; u8::MAX as usize + 1] = [0; u8::MAX as usize + 1];
let lv = Lv::new(&data_big);
assert!(lv.is_err());
let error = lv.unwrap_err();
if let TlvLvError::DataTooLarge(size) = error {
assert_eq!(size, u8::MAX as usize + 1);
} else {
panic!("invalid exception {:?}", error)
}
}
#[test]
fn test_serialization_buf_too_small() {
let mut buf: [u8; 3] = [0; 3];
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv = Lv::new(&lv_data).unwrap();
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = error {
assert_eq!(expected, 5);
assert_eq!(found, 3);
} else {
panic!("invalid error {}", error);
}
}
#[test]
fn test_deserialization_buf_too_small() {
let mut buf: [u8; 3] = [0; 3];
buf[0] = 4;
let res = Lv::from_bytes(&buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let ByteConversionError::FromSliceTooSmall { found, expected } = error {
assert_eq!(found, 3);
assert_eq!(expected, 5);
} else {
panic!("invalid error {}", error);
}
}
fn verify_test_str_lv(lv: Lv) {
let mut buf: [u8; 16] = [0; 16];
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let res = res.unwrap();
assert_eq!(res, 8 + 1);
assert_eq!(buf[0], 8);
assert_eq!(buf[1], b't');
assert_eq!(buf[2], b'e');
assert_eq!(buf[3], b's');
assert_eq!(buf[4], b't');
assert_eq!(buf[5], b'.');
assert_eq!(buf[6], b'b');
assert_eq!(buf[7], b'i');
assert_eq!(buf[8], b'n');
}
#[test]
fn test_str_helper() {
let test_str = "test.bin";
let str_lv = Lv::new_from_str(test_str);
assert!(str_lv.is_ok());
verify_test_str_lv(str_lv.unwrap());
}
#[test]
fn test_string_helper() {
let string = String::from("test.bin");
let str_lv = Lv::new_from_string(&string);
assert!(str_lv.is_ok());
verify_test_str_lv(str_lv.unwrap());
}
}

209
src/cfdp/mod.rs Normal file
View File

@ -0,0 +1,209 @@
//! Low-level CCSDS File Delivery Protocol (CFDP) support according to [CCSDS 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf).
use crate::ByteConversionError;
use core::fmt::{Display, Formatter};
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub mod lv;
pub mod pdu;
pub mod tlv;
/// This is the name of the standard this module is based on.
pub const CFDP_VERSION_2_NAME: &str = "CCSDS 727.0-B-5";
/// Currently, only this version is supported.
pub const CFDP_VERSION_2: u8 = 0b001;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum PduType {
FileDirective = 0,
FileData = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Direction {
TowardsReceiver = 0,
TowardsSender = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum TransmissionMode {
Acknowledged = 0,
Unacknowledged = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum CrcFlag {
NoCrc = 0,
WithCrc = 1,
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum SegmentMetadataFlag {
NotPresent = 0,
Present = 1,
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum SegmentationControl {
NoRecordBoundaryPreservation = 0,
WithRecordBoundaryPreservation = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum FaultHandlerCode {
NoticeOfCancellation = 0b0001,
NoticeOfSuspension = 0b0010,
IgnoreError = 0b0011,
AbandonTransaction = 0b0100,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum LenInBytes {
ZeroOrNone = 0,
OneByte = 1,
TwoBytes = 2,
ThreeBytes = 4,
FourBytes = 8,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum ConditionCode {
/// This is not an error condition for which a faulty handler override can be specified
NoError = 0b0000,
PositiveAckLimitReached = 0b0001,
KeepAliveLimitReached = 0b0010,
InvalidTransmissionMode = 0b0011,
FilestoreRejection = 0b0100,
FileChecksumFailure = 0b0101,
FileSizeError = 0b0110,
NakLimitReached = 0b0111,
InactivityDetected = 0b1000,
CheckLimitReached = 0b1001,
UnsupportedChecksumType = 0b1011,
/// Not an actual fault condition for which fault handler overrides can be specified
SuspendRequestReceived = 0b1110,
/// Not an actual fault condition for which fault handler overrides can be specified
CancelRequestReceived = 0b1111,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum LargeFileFlag {
/// 32 bit maximum file size and FSS size
Normal = 0,
/// 64 bit maximum file size and FSS size
Large = 1,
}
/// Checksum types according to the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum ChecksumType {
/// Modular legacy checksum
Modular = 0,
Crc32Proximity1 = 1,
Crc32C = 2,
/// Polynomial: 0x4C11DB7. Preferred checksum for now.
Crc32 = 3,
NullChecksum = 15,
}
impl Default for ChecksumType {
fn default() -> Self {
Self::NullChecksum
}
}
pub const NULL_CHECKSUM_U32: [u8; 4] = [0; 4];
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TlvLvError {
DataTooLarge(usize),
ByteConversionError(ByteConversionError),
/// First value: Found value. Second value: Expected value if there is one.
InvalidTlvTypeField((u8, Option<u8>)),
/// Logically invalid value length detected. The value length may not exceed 255 bytes.
/// Depending on the concrete TLV type, the value length may also be logically invalid.
InvalidValueLength(usize),
/// Only applies to filestore requests and responses. Second name was missing where one is
/// expected.
SecondNameMissing,
/// Invalid action code for filestore requests or responses.
InvalidFilestoreActionCode(u8),
}
impl From<ByteConversionError> for TlvLvError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl Display for TlvLvError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TlvLvError::DataTooLarge(data_len) => {
write!(
f,
"data with size {} larger than allowed {} bytes",
data_len,
u8::MAX
)
}
TlvLvError::ByteConversionError(e) => {
write!(f, "{}", e)
}
TlvLvError::InvalidTlvTypeField((found, expected)) => {
write!(
f,
"invalid TLV type field, found {found}, possibly expected {expected:?}"
)
}
TlvLvError::InvalidValueLength(len) => {
write!(f, "invalid value length {len} detected")
}
TlvLvError::SecondNameMissing => {
write!(f, "second name missing for filestore request or response")
}
TlvLvError::InvalidFilestoreActionCode(raw) => {
write!(f, "invalid filestore action code with raw value {raw}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for TlvLvError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TlvLvError::ByteConversionError(e) => Some(e),
_ => None,
}
}
}

209
src/cfdp/pdu/eof.rs Normal file
View File

@ -0,0 +1,209 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::EntityIdTlv;
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag};
use crate::ByteConversionError;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Finished PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.2.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct EofPdu {
pdu_header: PduHeader,
condition_code: ConditionCode,
file_checksum: u32,
file_size: u64,
fault_location: Option<EntityIdTlv>,
}
impl EofPdu {
pub fn new_no_error(pdu_header: PduHeader, file_checksum: u32, file_size: u64) -> Self {
let mut eof_pdu = Self {
pdu_header,
condition_code: ConditionCode::NoError,
file_checksum,
file_size,
fault_location: None,
};
eof_pdu.pdu_header.pdu_datafield_len = eof_pdu.calc_pdu_datafield_len() as u16;
eof_pdu
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn written_len(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn file_checksum(&self) -> u32 {
self.file_checksum
}
pub fn file_size(&self) -> u64 {
self.file_size
}
fn calc_pdu_datafield_len(&self) -> usize {
// One directive type octet, 4 bits condition code, 4 spare bits.
let mut len = 2 + core::mem::size_of::<u32>() + 4;
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4;
}
if let Some(fault_location) = self.fault_location {
len += fault_location.len_full();
}
len
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.written_len();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::EofPdu as u8;
current_idx += 1;
buf[current_idx] = (self.condition_code as u8) << 4;
current_idx += 1;
buf[current_idx..current_idx + 4].copy_from_slice(&self.file_checksum.to_be_bytes());
current_idx += 4;
current_idx += write_fss_field(
self.pdu_header.pdu_conf.file_flag,
self.file_size,
&mut buf[current_idx..],
)?;
if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_be_bytes(buf)?;
}
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
pub fn from_bytes(buf: &[u8]) -> Result<EofPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let is_large_file = pdu_header.pdu_conf.file_flag == LargeFileFlag::Large;
let mut min_expected_len = 2 + 4 + 4;
if is_large_file {
min_expected_len += 4;
}
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::EofPdu),
}
})?;
if directive_type != FileDirectiveType::EofPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::EofPdu,
});
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
current_idx += 1;
let file_checksum =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
current_idx += 4;
let (fss_field_len, file_size) =
read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss_field_len;
let mut fault_location = None;
if condition_code != ConditionCode::NoError && current_idx < full_len_without_crc {
fault_location = Some(EntityIdTlv::from_bytes(&buf[current_idx..])?);
}
Ok(Self {
pdu_header,
condition_code,
file_checksum,
file_size,
fault_location,
})
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::pdu::eof::EofPdu;
use crate::cfdp::pdu::tests::{common_pdu_conf, verify_raw_header};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag};
#[test]
fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
assert_eq!(eof_pdu.written_len(), pdu_header.header_len() + 2 + 4 + 4);
assert_eq!(eof_pdu.file_checksum(), 0x01020304);
assert_eq!(eof_pdu.file_size(), 12);
assert_eq!(eof_pdu.condition_code(), ConditionCode::NoError);
}
#[test]
fn test_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let res = eof_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, eof_pdu.written_len());
verify_raw_header(eof_pdu.pdu_header(), &buf);
let mut current_idx = eof_pdu.pdu_header().header_len();
buf[current_idx] = FileDirectiveType::EofPdu as u8;
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
ConditionCode::NoError as u8
);
current_idx += 1;
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
0x01020304
);
current_idx += 4;
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
12
);
current_idx += 4;
assert_eq!(current_idx, written);
}
#[test]
fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
eof_pdu.write_to_bytes(&mut buf).unwrap();
let eof_read_back = EofPdu::from_bytes(&buf);
if !eof_read_back.is_ok() {
let e = eof_read_back.unwrap_err();
panic!("deserialization failed with: {e}")
}
let eof_read_back = eof_read_back.unwrap();
assert_eq!(eof_read_back, eof_pdu);
}
}

397
src/cfdp/pdu/file_data.rs Normal file
View File

@ -0,0 +1,397 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
PduError, PduHeader,
};
use crate::cfdp::{CrcFlag, LargeFileFlag, PduType, SegmentMetadataFlag};
use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum RecordContinuationState {
NoStartNoEnd = 0b00,
StartWithoutEnd = 0b01,
EndWithoutStart = 0b10,
StartAndEnd = 0b11,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct SegmentMetadata<'seg_meta> {
record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>,
}
impl<'seg_meta> SegmentMetadata<'seg_meta> {
pub fn new(
record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>,
) -> Option<Self> {
if let Some(metadata) = metadata {
if metadata.len() > 2_usize.pow(6) - 1 {
return None;
}
}
Some(Self {
record_continuation_state,
metadata,
})
}
pub fn written_len(&self) -> usize {
// Map empty metadata to 0 and slice to its length.
1 + self.metadata.map_or(0, |meta| meta.len())
}
pub(crate) fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.written_len() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.written_len(),
});
}
buf[0] = ((self.record_continuation_state as u8) << 6)
| self.metadata.map_or(0, |meta| meta.len() as u8);
if let Some(metadata) = self.metadata {
buf[1..1 + metadata.len()].copy_from_slice(metadata)
}
Ok(self.written_len())
}
pub(crate) fn from_bytes(buf: &'seg_meta [u8]) -> Result<Self, ByteConversionError> {
if buf.is_empty() {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: 2,
});
}
let mut metadata = None;
let seg_metadata_len = (buf[0] & 0b111111) as usize;
if seg_metadata_len > 0 {
metadata = Some(&buf[1..1 + seg_metadata_len]);
}
Ok(Self {
// Can't fail, only 2 bits
record_continuation_state: RecordContinuationState::try_from((buf[0] >> 6) & 0b11)
.unwrap(),
metadata,
})
}
}
/// File Data PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPdu<'seg_meta, 'file_data> {
pdu_header: PduHeader,
#[cfg_attr(feature = "serde", serde(borrow))]
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
}
impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, Some(segment_metadata), offset, file_data)
}
pub fn new_no_seg_metadata(
pdu_header: PduHeader,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, None, offset, file_data)
}
pub fn new_generic(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
pdu_header,
segment_metadata,
offset,
file_data,
};
pdu.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut len = core::mem::size_of::<u32>();
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4;
}
if self.segment_metadata.is_some() {
len += self.segment_metadata.as_ref().unwrap().written_len()
}
len += self.file_data.len();
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
len += 2;
}
len
}
pub fn written_len(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
pub fn offset(&self) -> u64 {
self.offset
}
pub fn file_data(&self) -> &'file_data [u8] {
self.file_data
}
pub fn segment_metadata(&self) -> Option<&SegmentMetadata> {
self.segment_metadata.as_ref()
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
if buf.len() < self.written_len() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.written_len(),
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
if self.segment_metadata.is_some() {
current_idx += self
.segment_metadata
.as_ref()
.unwrap()
.write_to_bytes(&mut buf[current_idx..])?;
}
current_idx += write_fss_field(
self.pdu_header.common_pdu_conf().file_flag,
self.offset,
&mut buf[current_idx..],
)?;
buf[current_idx..current_idx + self.file_data.len()].copy_from_slice(self.file_data);
current_idx += self.file_data.len();
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
pub fn from_bytes<'longest: 'seg_meta + 'file_data>(
buf: &'longest [u8],
) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + core::mem::size_of::<u32>();
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let mut segment_metadata = None;
if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present {
segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?);
current_idx += segment_metadata.as_ref().unwrap().written_len();
}
let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss;
if current_idx > full_len_without_crc {
return Err(ByteConversionError::FromSliceTooSmall {
found: current_idx,
expected: full_len_without_crc,
}
.into());
}
Ok(Self {
pdu_header,
segment_metadata,
offset,
file_data: &buf[current_idx..full_len_without_crc],
})
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::pdu::file_data::{FileDataPdu, RecordContinuationState, SegmentMetadata};
use crate::cfdp::pdu::{CommonPduConfig, PduHeader};
use crate::cfdp::{SegmentMetadataFlag, SegmentationControl};
use crate::util::UbfU8;
#[test]
fn test_basic() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
assert_eq!(fd_pdu.file_data(), file_data);
assert_eq!(fd_pdu.offset(), 10);
assert!(fd_pdu.segment_metadata().is_none());
assert_eq!(
fd_pdu.written_len(),
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4
);
}
#[test]
fn test_serialization() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
let res = fd_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(
written,
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4
);
let mut current_idx = fd_pdu.pdu_header.header_len();
let file_size = u32::from_be_bytes(
buf[fd_pdu.pdu_header.header_len()..fd_pdu.pdu_header.header_len() + 4]
.try_into()
.unwrap(),
);
current_idx += 4;
assert_eq!(file_size, 10);
assert_eq!(buf[current_idx], 1);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 4);
}
#[test]
fn test_deserialization() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
fd_pdu.write_to_bytes(&mut buf).unwrap();
let fd_pdu_read_back = FileDataPdu::from_bytes(&buf);
assert!(fd_pdu_read_back.is_ok());
let fd_pdu_read_back = fd_pdu_read_back.unwrap();
assert_eq!(fd_pdu_read_back, fd_pdu);
}
#[test]
fn test_with_seg_metadata_serialization() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
assert!(fd_pdu.segment_metadata().is_some());
assert_eq!(*fd_pdu.segment_metadata().unwrap(), segment_meta);
assert_eq!(
fd_pdu.written_len(),
fd_pdu.pdu_header.header_len()
+ 1
+ seg_metadata.len()
+ core::mem::size_of::<u32>()
+ 4
);
let mut buf: [u8; 32] = [0; 32];
fd_pdu
.write_to_bytes(&mut buf)
.expect("writing FD PDU failed");
let mut current_idx = fd_pdu.pdu_header.header_len();
assert_eq!(
RecordContinuationState::try_from((buf[current_idx] >> 6) & 0b11).unwrap(),
RecordContinuationState::StartAndEnd
);
assert_eq!((buf[current_idx] & 0b111111) as usize, seg_metadata.len());
current_idx += 1;
assert_eq!(buf[current_idx], 4);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 1);
current_idx += 1;
// Still verify that the rest is written correctly.
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
10
);
current_idx += 4;
assert_eq!(buf[current_idx], 1);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 4);
current_idx += 1;
assert_eq!(current_idx, fd_pdu.written_len());
}
#[test]
fn test_with_seg_metadata_deserialization() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_num = UbfU8::new(3);
let common_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
fd_pdu
.write_to_bytes(&mut buf)
.expect("writing FD PDU failed");
let fd_pdu_read_back = FileDataPdu::from_bytes(&buf);
assert!(fd_pdu_read_back.is_ok());
let fd_pdu_read_back = fd_pdu_read_back.unwrap();
assert_eq!(fd_pdu_read_back, fd_pdu);
}
}

339
src/cfdp/pdu/finished.rs Normal file
View File

@ -0,0 +1,339 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::{EntityIdTlv, Tlv, TlvType, TlvTypeField};
use crate::cfdp::{ConditionCode, CrcFlag, PduType, TlvLvError};
use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum DeliveryCode {
Complete = 0,
Incomplete = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum FileStatus {
DiscardDeliberately = 0b00,
DiscardedFsRejection = 0b01,
Retained = 0b10,
Unreported = 0b11,
}
/// Finished PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FinishedPdu<'fs_responses> {
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses: Option<&'fs_responses [u8]>,
fault_location: Option<EntityIdTlv>,
}
impl<'fs_responses> FinishedPdu<'fs_responses> {
/// Default finished PDU: No error (no fault location field) and no filestore responses.
pub fn new_default(
pdu_header: PduHeader,
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> Self {
Self::new_generic(
pdu_header,
ConditionCode::NoError,
delivery_code,
file_status,
None,
None,
)
}
pub fn new_with_error(
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fault_location: EntityIdTlv,
) -> Self {
Self::new_generic(
pdu_header,
condition_code,
delivery_code,
file_status,
None,
Some(fault_location),
)
}
pub fn new_generic(
mut pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses: Option<&'fs_responses [u8]>,
fault_location: Option<EntityIdTlv>,
) -> Self {
pdu_header.pdu_type = PduType::FileDirective;
let mut finished_pdu = Self {
pdu_header,
condition_code,
delivery_code,
file_status,
fs_responses,
fault_location,
};
finished_pdu.pdu_header.pdu_datafield_len = finished_pdu.calc_pdu_datafield_len() as u16;
finished_pdu
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn written_len(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code
}
pub fn file_status(&self) -> FileStatus {
self.file_status
}
pub fn filestore_responses(&self) -> Option<&'fs_responses [u8]> {
self.fs_responses
}
pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut base_len = 2;
if let Some(fs_responses) = self.fs_responses {
base_len += fs_responses.len();
}
if let Some(fault_location) = self.fault_location {
base_len += fault_location.len_full();
}
base_len
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.written_len();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::FinishedPdu as u8;
current_idx += 1;
buf[current_idx] = ((self.condition_code as u8) << 4)
| ((self.delivery_code as u8) << 2)
| self.file_status as u8;
current_idx += 1;
if let Some(fs_responses) = self.fs_responses {
buf[current_idx..current_idx + fs_responses.len()].copy_from_slice(fs_responses);
current_idx += fs_responses.len();
}
if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_be_bytes(&mut buf[current_idx..])?;
}
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
/// Generates [Self] from a raw bytestream.
pub fn from_bytes(buf: &'fs_responses [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + 2;
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::FinishedPdu),
}
})?;
if directive_type != FileDirectiveType::FinishedPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::FinishedPdu,
});
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
// Unwrap is okay here for both of the following operations which can not fail.
let delivery_code = DeliveryCode::try_from((buf[current_idx] >> 2) & 0b1).unwrap();
let file_status = FileStatus::try_from(buf[current_idx] & 0b11).unwrap();
current_idx += 1;
let (fs_responses, fault_location) =
Self::parse_tlv_fields(current_idx, full_len_without_crc, buf)?;
Ok(Self {
pdu_header,
condition_code,
delivery_code,
file_status,
fs_responses,
fault_location,
})
}
fn parse_tlv_fields(
mut current_idx: usize,
full_len_without_crc: usize,
buf: &'fs_responses [u8],
) -> Result<(Option<&'fs_responses [u8]>, Option<EntityIdTlv>), PduError> {
let mut fs_responses = None;
let mut fault_location = None;
let start_of_fs_responses = current_idx;
// There are leftover filestore response(s) and/or a fault location field.
while current_idx < full_len_without_crc {
let next_tlv = Tlv::from_bytes(&buf[current_idx..])?;
match next_tlv.tlv_type_field() {
TlvTypeField::Standard(tlv_type) => {
if tlv_type == TlvType::FilestoreResponse {
current_idx += next_tlv.len_full();
if current_idx == full_len_without_crc {
fs_responses = Some(&buf[start_of_fs_responses..current_idx]);
}
} else if tlv_type == TlvType::EntityId {
// At least one FS response is included.
if current_idx > full_len_without_crc {
fs_responses = Some(&buf[start_of_fs_responses..current_idx]);
}
fault_location = Some(EntityIdTlv::from_bytes(&buf[current_idx..])?);
current_idx += fault_location.as_ref().unwrap().len_full();
// This is considered a configuration error: The entity ID has to be the
// last TLV, everything else would break the whole handling of the packet
// TLVs.
if current_idx != full_len_without_crc {
return Err(PduError::FormatError);
}
} else {
return Err(TlvLvError::InvalidTlvTypeField((tlv_type as u8, None)).into());
}
}
TlvTypeField::Custom(raw) => {
return Err(TlvLvError::InvalidTlvTypeField((raw, None)).into());
}
}
}
Ok((fs_responses, fault_location))
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::pdu::finished::{DeliveryCode, FileStatus, FinishedPdu};
use crate::cfdp::pdu::tests::{common_pdu_conf, verify_raw_header};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag};
fn generic_finished_pdu(
crc_flag: CrcFlag,
fss: LargeFileFlag,
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> FinishedPdu<'static> {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
FinishedPdu::new_default(pdu_header, delivery_code, file_status)
}
#[test]
fn test_basic() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
assert_eq!(finished_pdu.condition_code(), ConditionCode::NoError);
assert_eq!(finished_pdu.delivery_code(), DeliveryCode::Complete);
assert_eq!(finished_pdu.file_status(), FileStatus::Retained);
assert_eq!(finished_pdu.filestore_responses(), None);
assert_eq!(finished_pdu.fault_location(), None);
assert_eq!(finished_pdu.pdu_header().pdu_datafield_len, 2);
}
fn generic_serialization_test_no_error(delivery_code: DeliveryCode, file_status: FileStatus) {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
delivery_code,
file_status,
);
let mut buf: [u8; 64] = [0; 64];
let written = finished_pdu.write_to_bytes(&mut buf);
assert!(written.is_ok());
let written = written.unwrap();
assert_eq!(written, finished_pdu.written_len());
assert_eq!(written, finished_pdu.pdu_header().header_len() + 2);
verify_raw_header(finished_pdu.pdu_header(), &buf);
let mut current_idx = finished_pdu.pdu_header().header_len();
assert_eq!(buf[current_idx], FileDirectiveType::FinishedPdu as u8);
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
ConditionCode::NoError as u8
);
assert_eq!((buf[current_idx] >> 2) & 0b1, delivery_code as u8);
assert_eq!(buf[current_idx] & 0b11, file_status as u8);
assert_eq!(current_idx + 1, written);
}
#[test]
fn test_serialization_simple() {
generic_serialization_test_no_error(DeliveryCode::Complete, FileStatus::Retained);
}
#[test]
fn test_serialization_simple_2() {
generic_serialization_test_no_error(
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
);
}
#[test]
fn test_serialization_simple_3() {
generic_serialization_test_no_error(DeliveryCode::Incomplete, FileStatus::Unreported);
}
#[test]
fn test_deserialization_simple() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 64] = [0; 64];
finished_pdu.write_to_bytes(&mut buf).unwrap();
let read_back = FinishedPdu::from_bytes(&buf);
assert!(read_back.is_ok());
let read_back = read_back.unwrap();
assert_eq!(finished_pdu, read_back);
}
}

534
src/cfdp/pdu/metadata.rs Normal file
View File

@ -0,0 +1,534 @@
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::Tlv;
use crate::cfdp::{ChecksumType, CrcFlag, LargeFileFlag, PduType};
use crate::ByteConversionError;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MetadataGenericParams {
pub closure_requested: bool,
pub checksum_type: ChecksumType,
pub file_size: u64,
}
impl MetadataGenericParams {
pub fn new(closure_requested: bool, checksum_type: ChecksumType, file_size: u64) -> Self {
Self {
closure_requested,
checksum_type,
file_size,
}
}
}
pub fn build_metadata_opts_from_slice(
buf: &mut [u8],
tlvs: &[Tlv],
) -> Result<usize, ByteConversionError> {
let mut written = 0;
for tlv in tlvs {
written += tlv.write_to_bytes(&mut buf[written..])?;
}
Ok(written)
}
#[cfg(feature = "alloc")]
pub fn build_metadata_opts_from_vec(
buf: &mut [u8],
tlvs: &Vec<Tlv>,
) -> Result<usize, ByteConversionError> {
build_metadata_opts_from_slice(buf, tlvs.as_slice())
}
/// Helper structure to loop through all options of a metadata PDU. It should be noted that
/// iterators in Rust are not fallible, but the TLV creation can fail, for example if the raw TLV
/// data is invalid for some reason. In that case, the iterator will yield [None] because there
/// is no way to recover from this.
///
/// The user can accumulate the length of all TLVs yielded by the iterator and compare it against
/// the full length of the options to check whether the iterator was able to parse all TLVs
/// successfully.
pub struct OptionsIter<'opts> {
opt_buf: &'opts [u8],
current_idx: usize,
}
impl<'opts> Iterator for OptionsIter<'opts> {
type Item = Tlv<'opts>;
fn next(&mut self) -> Option<Self::Item> {
if self.current_idx == self.opt_buf.len() {
return None;
}
let tlv = Tlv::from_bytes(&self.opt_buf[self.current_idx..]);
// There are not really fallible iterators so we can't continue here..
if tlv.is_err() {
return None;
}
let tlv = tlv.unwrap();
self.current_idx += tlv.len_full();
Some(tlv)
}
}
/// Metadata PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.5.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MetadataPdu<'src_name, 'dest_name, 'opts> {
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
#[cfg_attr(feature = "serde", serde(borrow))]
src_file_name: Lv<'src_name>,
#[cfg_attr(feature = "serde", serde(borrow))]
dest_file_name: Lv<'dest_name>,
options: Option<&'opts [u8]>,
}
impl<'src_name, 'dest_name, 'opts> MetadataPdu<'src_name, 'dest_name, 'opts> {
pub fn new_no_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
) -> Self {
Self::new(
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
None,
)
}
pub fn new_with_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [u8],
) -> Self {
Self::new(
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
Some(options),
)
}
pub fn new(
mut pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: Option<&'opts [u8]>,
) -> Self {
pdu_header.pdu_type = PduType::FileDirective;
let mut pdu = Self {
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options,
};
pdu.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params
}
pub fn src_file_name(&self) -> Lv<'src_name> {
self.src_file_name
}
pub fn dest_file_name(&self) -> Lv<'dest_name> {
self.dest_file_name
}
pub fn options(&self) -> Option<&'opts [u8]> {
self.options
}
/// Yield an iterator which can be used to loop through all options. Returns [None] if the
/// options field is empty.
pub fn options_iter(&self) -> Option<OptionsIter<'opts>> {
self.options?;
Some(OptionsIter {
opt_buf: self.options.unwrap(),
current_idx: 0,
})
}
pub fn written_len(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
fn calc_pdu_datafield_len(&self) -> usize {
// One directve type octet and one byte of the directive parameter field.
let mut len = 2;
if self.pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
len += 8;
} else {
len += 4;
}
len += self.src_file_name.len_full();
len += self.dest_file_name.len_full();
if let Some(opts) = self.options {
len += opts.len();
}
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
len += 2;
}
len
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.written_len();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::MetadataPdu as u8;
current_idx += 1;
buf[current_idx] = ((self.metadata_params.closure_requested as u8) << 7)
| (self.metadata_params.checksum_type as u8);
current_idx += 1;
current_idx += write_fss_field(
self.pdu_header.common_pdu_conf().file_flag,
self.metadata_params.file_size,
&mut buf[current_idx..],
)?;
current_idx += self
.src_file_name
.write_to_be_bytes(&mut buf[current_idx..])?;
current_idx += self
.dest_file_name
.write_to_be_bytes(&mut buf[current_idx..])?;
if let Some(opts) = self.options {
buf[current_idx..current_idx + opts.len()].copy_from_slice(opts);
current_idx += opts.len();
}
if self.pdu_header.pdu_conf.crc_flag == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
pub fn from_bytes<'longest: 'src_name + 'dest_name + 'opts>(
buf: &'longest [u8],
) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let is_large_file = pdu_header.pdu_conf.file_flag == LargeFileFlag::Large;
// Minimal length: 1 byte + FSS (4 byte) + 2 empty LV (1 byte)
let mut min_expected_len = current_idx + 7;
if is_large_file {
min_expected_len += 4;
}
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::MetadataPdu),
}
})?;
if directive_type != FileDirectiveType::MetadataPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::MetadataPdu,
});
}
current_idx += 1;
let (fss_len, file_size) =
read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx + 1..]);
let metadata_params = MetadataGenericParams {
closure_requested: ((buf[current_idx] >> 6) & 0b1) != 0,
checksum_type: ChecksumType::try_from(buf[current_idx] & 0b1111)
.map_err(|_| PduError::InvalidChecksumType(buf[current_idx] & 0b1111))?,
file_size,
};
current_idx += 1 + fss_len;
let src_file_name = Lv::from_bytes(&buf[current_idx..])?;
current_idx += src_file_name.len_full();
let dest_file_name = Lv::from_bytes(&buf[current_idx..])?;
current_idx += dest_file_name.len_full();
// All left-over bytes are options.
let mut options = None;
if current_idx < full_len_without_crc {
options = Some(&buf[current_idx..full_len_without_crc]);
}
Ok(Self {
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options,
})
}
}
#[cfg(test)]
pub mod tests {
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::metadata::{
build_metadata_opts_from_slice, build_metadata_opts_from_vec, MetadataGenericParams,
MetadataPdu,
};
use crate::cfdp::pdu::tests::{common_pdu_conf, verify_raw_header};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::tlv::{Tlv, TlvType};
use crate::cfdp::{
ChecksumType, CrcFlag, LargeFileFlag, PduType, SegmentMetadataFlag, SegmentationControl,
};
use std::vec;
const SRC_FILENAME: &'static str = "hello-world.txt";
const DEST_FILENAME: &'static str = "hello-world2.txt";
fn generic_metadata_pdu<'opts>(
crc_flag: CrcFlag,
fss: LargeFileFlag,
opts: Option<&'opts [u8]>,
) -> (
Lv<'static>,
Lv<'static>,
MetadataPdu<'static, 'static, 'opts>,
) {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
let metadata_params = MetadataGenericParams::new(false, ChecksumType::Crc32, 0x1010);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename =
Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed");
(
src_filename,
dest_filename,
MetadataPdu::new(
pdu_header,
metadata_params,
src_filename,
dest_filename,
opts,
),
)
}
#[test]
fn test_basic() {
let (src_filename, dest_filename, metadata_pdu) =
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, None);
assert_eq!(
metadata_pdu.written_len(),
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
);
assert_eq!(metadata_pdu.src_file_name(), src_filename);
assert_eq!(metadata_pdu.dest_file_name(), dest_filename);
assert_eq!(metadata_pdu.options(), None);
}
#[test]
fn test_serialization() {
let (src_filename, dest_filename, metadata_pdu) =
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, None);
let mut buf: [u8; 64] = [0; 64];
let res = metadata_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
);
verify_raw_header(metadata_pdu.pdu_header(), &buf);
assert_eq!(buf[7], FileDirectiveType::MetadataPdu as u8);
assert_eq!(buf[8] >> 6, false as u8);
assert_eq!(buf[8] & 0b1111, ChecksumType::Crc32 as u8);
assert_eq!(u32::from_be_bytes(buf[9..13].try_into().unwrap()), 0x1010);
let mut current_idx = 13;
let src_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating source name LV failed");
assert_eq!(src_name_from_raw, src_filename);
current_idx += src_name_from_raw.len_full();
let dest_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating dest name LV failed");
assert_eq!(dest_name_from_raw, dest_filename);
current_idx += dest_name_from_raw.len_full();
// No options, so no additional data here.
assert_eq!(current_idx, written);
}
#[test]
fn test_deserialization() {
let (_, _, metadata_pdu) =
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, None);
let mut buf: [u8; 64] = [0; 64];
metadata_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_read_back = MetadataPdu::from_bytes(&buf);
assert!(pdu_read_back.is_ok());
let pdu_read_back = pdu_read_back.unwrap();
assert_eq!(pdu_read_back, metadata_pdu);
}
#[test]
fn test_with_crc_flag() {
let (src_filename, dest_filename, metadata_pdu) =
generic_metadata_pdu(CrcFlag::WithCrc, LargeFileFlag::Normal, None);
let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ core::mem::size_of::<u32>()
+ src_filename.len_full()
+ dest_filename.len_full()
+ 2
);
let pdu_read_back = MetadataPdu::from_bytes(&buf).unwrap();
assert_eq!(pdu_read_back, metadata_pdu);
}
#[test]
fn test_with_large_file_flag() {
let (src_filename, dest_filename, metadata_pdu) =
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Large, None);
let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ core::mem::size_of::<u64>()
+ src_filename.len_full()
+ dest_filename.len_full()
);
let pdu_read_back = MetadataPdu::from_bytes(&buf).unwrap();
assert_eq!(pdu_read_back, metadata_pdu);
}
#[test]
fn test_opts_builders() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_slice = [tlv1, tlv2];
let mut buf: [u8; 32] = [0; 32];
let opts = build_metadata_opts_from_slice(&mut buf, &tlv_slice);
assert!(opts.is_ok());
let opts_len = opts.unwrap();
assert_eq!(opts_len, tlv1.len_full() + tlv2.len_full());
let tlv1_conv_back = Tlv::from_bytes(&buf).unwrap();
assert_eq!(tlv1_conv_back, tlv1);
let tlv2_conv_back = Tlv::from_bytes(&buf[tlv1_conv_back.len_full()..]).unwrap();
assert_eq!(tlv2_conv_back, tlv2);
}
#[test]
fn test_opts_builders_from_vec() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_vec = vec![tlv1, tlv2];
let mut buf: [u8; 32] = [0; 32];
let opts = build_metadata_opts_from_vec(&mut buf, &tlv_vec);
assert!(opts.is_ok());
let opts_len = opts.unwrap();
assert_eq!(opts_len, tlv1.len_full() + tlv2.len_full());
let tlv1_conv_back = Tlv::from_bytes(&buf).unwrap();
assert_eq!(tlv1_conv_back, tlv1);
let tlv2_conv_back = Tlv::from_bytes(&buf[tlv1_conv_back.len_full()..]).unwrap();
assert_eq!(tlv2_conv_back, tlv2);
}
#[test]
fn test_with_opts() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_vec = vec![tlv1, tlv2];
let mut opts_buf: [u8; 32] = [0; 32];
let opts_len = build_metadata_opts_from_vec(&mut opts_buf, &tlv_vec).unwrap();
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
Some(&opts_buf[..opts_len]),
);
let mut buf: [u8; 128] = [0; 128];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
+ opts_len
);
let pdu_read_back = MetadataPdu::from_bytes(&buf).unwrap();
assert_eq!(pdu_read_back, metadata_pdu);
let opts_iter = pdu_read_back.options_iter();
assert!(opts_iter.is_some());
let opts_iter = opts_iter.unwrap();
let mut accumulated_len = 0;
for (idx, opt) in opts_iter.enumerate() {
assert_eq!(tlv_vec[idx], opt);
accumulated_len += opt.len_full();
}
assert_eq!(accumulated_len, pdu_read_back.options().unwrap().len());
}
#[test]
fn test_corrects_pdu_header() {
let pdu_header = PduHeader::new_for_file_data(
common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
);
let metadata_params = MetadataGenericParams::new(false, ChecksumType::Crc32, 10);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename =
Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed");
let metadata_pdu =
MetadataPdu::new_no_opts(pdu_header, metadata_params, src_filename, dest_filename);
assert_eq!(metadata_pdu.pdu_header().pdu_type(), PduType::FileDirective);
}
}

995
src/cfdp/pdu/mod.rs Normal file
View File

@ -0,0 +1,995 @@
//! CFDP Packet Data Unit (PDU) support.
use crate::cfdp::*;
use crate::util::{UnsignedByteField, UnsignedByteFieldU8, UnsignedEnum};
use crate::ByteConversionError;
use crate::CRC_CCITT_FALSE;
use core::fmt::{Display, Formatter};
#[cfg(feature = "std")]
use std::error::Error;
pub mod eof;
pub mod file_data;
pub mod finished;
pub mod metadata;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum FileDirectiveType {
EofPdu = 0x04,
FinishedPdu = 0x05,
AckPdu = 0x06,
MetadataPdu = 0x07,
NakPdu = 0x08,
PromptPdu = 0x09,
KeepAlivePdu = 0x0c,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PduError {
ByteConversionError(ByteConversionError),
/// Found version ID invalid, not equal to [CFDP_VERSION_2].
CfdpVersionMissmatch(u8),
/// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported.
InvalidEntityLen(u8),
/// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported.
InvalidTransactionSeqNumLen(u8),
SourceDestIdLenMissmatch {
src_id_len: usize,
dest_id_len: usize,
},
WrongDirectiveType {
found: FileDirectiveType,
expected: FileDirectiveType,
},
/// The directive type field contained a value not in the range of permitted values.
InvalidDirectiveType {
found: u8,
expected: Option<FileDirectiveType>,
},
/// Invalid condition code. Contains the raw detected value.
InvalidConditionCode(u8),
/// Invalid checksum type which is not part of the checksums listed in the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/).
InvalidChecksumType(u8),
FileSizeTooLarge(u64),
/// If the CRC flag for a PDU is enabled and the checksum check fails. Contains raw 16-bit CRC.
ChecksumError(u16),
/// Generic error for invalid PDU formats.
FormatError,
/// Error handling a TLV field.
TlvLvError(TlvLvError),
}
impl Display for PduError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PduError::InvalidEntityLen(raw_id) => {
write!(
f,
"Invalid PDU entity ID length {raw_id}, only [1, 2, 4, 8] are allowed"
)
}
PduError::InvalidTransactionSeqNumLen(raw_id) => {
write!(
f,
"invalid PDUtransaction seq num length {raw_id}, only [1, 2, 4, 8] are allowed"
)
}
PduError::CfdpVersionMissmatch(raw) => {
write!(
f,
"cfdp version missmatch, found {raw}, expected {CFDP_VERSION_2}"
)
}
PduError::SourceDestIdLenMissmatch {
src_id_len,
dest_id_len,
} => {
write!(
f,
"missmatch of PDU source length {src_id_len} and destination length {dest_id_len}"
)
}
PduError::ByteConversionError(e) => {
write!(f, "{}", e)
}
PduError::FileSizeTooLarge(value) => {
write!(f, "file size value {value} exceeds allowed 32 bit width")
}
PduError::WrongDirectiveType { found, expected } => {
write!(f, "found directive type {found:?}, expected {expected:?}")
}
PduError::InvalidConditionCode(raw_code) => {
write!(f, "found invalid condition code with raw value {raw_code}")
}
PduError::InvalidDirectiveType { found, expected } => {
write!(
f,
"invalid directive type value {found}, expected {expected:?}"
)
}
PduError::InvalidChecksumType(checksum_type) => {
write!(f, "invalid checksum type {checksum_type}")
}
PduError::ChecksumError(checksum) => {
write!(f, "checksum error for CRC {checksum:#04x}")
}
PduError::TlvLvError(error) => {
write!(f, "pdu tlv error: {error}")
}
PduError::FormatError => {
write!(f, "generic PDU format error")
}
}
}
}
#[cfg(feature = "std")]
impl Error for PduError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
PduError::ByteConversionError(e) => Some(e),
PduError::TlvLvError(e) => Some(e),
_ => None,
}
}
}
impl From<ByteConversionError> for PduError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl From<TlvLvError> for PduError {
fn from(e: TlvLvError) -> Self {
Self::TlvLvError(e)
}
}
/// Common configuration fields for a PDU.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct CommonPduConfig {
source_entity_id: UnsignedByteField,
dest_entity_id: UnsignedByteField,
pub transaction_seq_num: UnsignedByteField,
pub trans_mode: TransmissionMode,
pub file_flag: LargeFileFlag,
pub crc_flag: CrcFlag,
pub direction: Direction,
}
// TODO: Builder pattern might be applicable here..
impl CommonPduConfig {
pub fn new(
source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>,
transaction_seq_num: impl Into<UnsignedByteField>,
trans_mode: TransmissionMode,
file_flag: LargeFileFlag,
crc_flag: CrcFlag,
direction: Direction,
) -> Result<Self, PduError> {
let (source_id, dest_id) = Self::source_dest_id_check(source_id, dest_id)?;
let transaction_seq_num = transaction_seq_num.into();
if transaction_seq_num.size() != 1
&& transaction_seq_num.size() != 2
&& transaction_seq_num.size() != 4
&& transaction_seq_num.size() != 8
{
return Err(PduError::InvalidTransactionSeqNumLen(
transaction_seq_num.size() as u8,
));
}
Ok(Self {
source_entity_id: source_id,
dest_entity_id: dest_id,
transaction_seq_num,
trans_mode,
file_flag,
crc_flag,
direction,
})
}
pub fn new_with_byte_fields(
source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>,
transaction_seq_num: impl Into<UnsignedByteField>,
) -> Result<Self, PduError> {
Self::new(
source_id,
dest_id,
transaction_seq_num,
TransmissionMode::Acknowledged,
LargeFileFlag::Normal,
CrcFlag::NoCrc,
Direction::TowardsReceiver,
)
}
pub fn source_id(&self) -> UnsignedByteField {
self.source_entity_id
}
fn source_dest_id_check(
source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>,
) -> Result<(UnsignedByteField, UnsignedByteField), PduError> {
let source_id = source_id.into();
let dest_id = dest_id.into();
if source_id.size() != dest_id.size() {
return Err(PduError::SourceDestIdLenMissmatch {
src_id_len: source_id.size(),
dest_id_len: dest_id.size(),
});
}
if source_id.size() != 1
&& source_id.size() != 2
&& source_id.size() != 4
&& source_id.size() != 8
{
return Err(PduError::InvalidEntityLen(source_id.size() as u8));
}
Ok((source_id, dest_id))
}
pub fn set_source_and_dest_id(
&mut self,
source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>,
) -> Result<(), PduError> {
let (source_id, dest_id) = Self::source_dest_id_check(source_id, dest_id)?;
self.source_entity_id = source_id;
self.dest_entity_id = dest_id;
Ok(())
}
pub fn dest_id(&self) -> UnsignedByteField {
self.dest_entity_id
}
}
impl Default for CommonPduConfig {
/// The defaults for the source ID, destination ID and the transaction sequence number is the
/// [UnsignedByteFieldU8] with an intitial value of 0
fn default() -> Self {
// The new function can not fail for these input parameters.
Self::new(
UnsignedByteFieldU8::new(0),
UnsignedByteFieldU8::new(0),
UnsignedByteFieldU8::new(0),
TransmissionMode::Acknowledged,
LargeFileFlag::Normal,
CrcFlag::NoCrc,
Direction::TowardsReceiver,
)
.unwrap()
}
}
pub const FIXED_HEADER_LEN: usize = 4;
/// Abstraction for the PDU header common to all CFDP PDUs.
///
/// For detailed information, refer to chapter 5.1 of the CFDP standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PduHeader {
pdu_type: PduType,
pdu_conf: CommonPduConfig,
seg_metadata_flag: SegmentMetadataFlag,
seg_ctrl: SegmentationControl,
pdu_datafield_len: u16,
}
impl PduHeader {
pub fn new_for_file_data(
pdu_conf: CommonPduConfig,
pdu_datafield_len: u16,
seg_metadata_flag: SegmentMetadataFlag,
seg_ctrl: SegmentationControl,
) -> Self {
Self::new_generic(
PduType::FileData,
pdu_conf,
pdu_datafield_len,
seg_metadata_flag,
seg_ctrl,
)
}
pub fn new_for_file_data_default(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
Self::new_generic(
PduType::FileData,
pdu_conf,
pdu_datafield_len,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
)
}
pub fn new_no_file_data(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
Self::new_generic(
PduType::FileDirective,
pdu_conf,
pdu_datafield_len,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
)
}
pub fn new_generic(
pdu_type: PduType,
pdu_conf: CommonPduConfig,
pdu_datafield_len: u16,
seg_metadata_flag: SegmentMetadataFlag,
seg_ctrl: SegmentationControl,
) -> Self {
Self {
pdu_type,
pdu_conf,
seg_metadata_flag,
seg_ctrl,
pdu_datafield_len,
}
}
/// Returns only the length of the PDU header when written to a raw buffer.
pub fn header_len(&self) -> usize {
FIXED_HEADER_LEN
+ self.pdu_conf.source_entity_id.size()
+ self.pdu_conf.transaction_seq_num.size()
+ self.pdu_conf.dest_entity_id.size()
}
pub fn pdu_datafield_len(&self) -> usize {
self.pdu_datafield_len.into()
}
/// Returns the full length of the PDU when written to a raw buffer, which is the header length
/// plus the PDU datafield length.
pub fn pdu_len(&self) -> usize {
self.header_len() + self.pdu_datafield_len as usize
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
// Internal note: There is currently no way to pass a PDU configuration like this, but
// this check is still kept for defensive programming.
if self.pdu_conf.source_entity_id.size() != self.pdu_conf.dest_entity_id.size() {
return Err(PduError::SourceDestIdLenMissmatch {
src_id_len: self.pdu_conf.source_entity_id.size(),
dest_id_len: self.pdu_conf.dest_entity_id.size(),
});
}
if buf.len()
< FIXED_HEADER_LEN
+ self.pdu_conf.source_entity_id.size()
+ self.pdu_conf.transaction_seq_num.size()
{
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: FIXED_HEADER_LEN,
}
.into());
}
let mut current_idx = 0;
buf[current_idx] = (CFDP_VERSION_2 << 5)
| ((self.pdu_type as u8) << 4)
| ((self.pdu_conf.direction as u8) << 3)
| ((self.pdu_conf.trans_mode as u8) << 2)
| ((self.pdu_conf.crc_flag as u8) << 1)
| (self.pdu_conf.file_flag as u8);
current_idx += 1;
buf[current_idx..current_idx + 2].copy_from_slice(&self.pdu_datafield_len.to_be_bytes());
current_idx += 2;
buf[current_idx] = ((self.seg_ctrl as u8) << 7)
| (((self.pdu_conf.source_entity_id.size() - 1) as u8) << 4)
| ((self.seg_metadata_flag as u8) << 3)
| ((self.pdu_conf.transaction_seq_num.size() - 1) as u8);
current_idx += 1;
self.pdu_conf.source_entity_id.write_to_be_bytes(
&mut buf[current_idx..current_idx + self.pdu_conf.source_entity_id.size()],
)?;
current_idx += self.pdu_conf.source_entity_id.size();
self.pdu_conf.transaction_seq_num.write_to_be_bytes(
&mut buf[current_idx..current_idx + self.pdu_conf.transaction_seq_num.size()],
)?;
current_idx += self.pdu_conf.transaction_seq_num.size();
self.pdu_conf.dest_entity_id.write_to_be_bytes(
&mut buf[current_idx..current_idx + self.pdu_conf.dest_entity_id.size()],
)?;
current_idx += self.pdu_conf.dest_entity_id.size();
Ok(current_idx)
}
/// This function first verifies that the buffer can hold the full length of the PDU parsed from
/// the header. Then, it verifies the checksum as specified in the standard if the CRC flag
/// of the PDU header is set.
///
/// This function will return the PDU length excluding the 2 CRC bytes on success. If the CRC
/// flag is not set, it will simply return the PDU length.
pub fn verify_length_and_checksum(&self, buf: &[u8]) -> Result<usize, PduError> {
if buf.len() < self.pdu_len() {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: self.pdu_len(),
}
.into());
}
if self.pdu_conf.crc_flag == CrcFlag::WithCrc {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[..self.pdu_len()]);
if digest.finalize() != 0 {
return Err(PduError::ChecksumError(u16::from_be_bytes(
buf[self.pdu_len() - 2..self.pdu_len()].try_into().unwrap(),
)));
}
return Ok(self.pdu_len() - 2);
}
Ok(self.pdu_len())
}
/// Please note that this function will not verify that the passed buffer can hold the full
/// PDU length. This allows recovering the header portion even if the data field length is
/// invalid. This function will also not do the CRC procedure specified in chapter 4.1.1
/// and 4.1.2 because performing the CRC procedure requires the buffer to be large enough
/// to hold the full PDU.
///
/// Both functions can however be performed with the [Self::verify_length_and_checksum]
/// function.
pub fn from_bytes(buf: &[u8]) -> Result<(Self, usize), PduError> {
if buf.len() < FIXED_HEADER_LEN {
return Err(PduError::ByteConversionError(
ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: FIXED_HEADER_LEN,
},
));
}
let cfdp_version_raw = (buf[0] >> 5) & 0b111;
if cfdp_version_raw != CFDP_VERSION_2 {
return Err(PduError::CfdpVersionMissmatch(cfdp_version_raw));
}
// unwrap for single bit fields: This operation will always succeed.
let pdu_type = PduType::try_from((buf[0] >> 4) & 0b1).unwrap();
let direction = Direction::try_from((buf[0] >> 3) & 0b1).unwrap();
let trans_mode = TransmissionMode::try_from((buf[0] >> 2) & 0b1).unwrap();
let crc_flag = CrcFlag::try_from((buf[0] >> 1) & 0b1).unwrap();
let file_flag = LargeFileFlag::try_from(buf[0] & 0b1).unwrap();
let pdu_datafield_len = u16::from_be_bytes(buf[1..3].try_into().unwrap());
let seg_ctrl = SegmentationControl::try_from((buf[3] >> 7) & 0b1).unwrap();
let expected_len_entity_ids = (((buf[3] >> 4) & 0b111) + 1) as usize;
if (expected_len_entity_ids != 1)
&& (expected_len_entity_ids != 2)
&& (expected_len_entity_ids != 4)
&& (expected_len_entity_ids != 8)
{
return Err(PduError::InvalidEntityLen(expected_len_entity_ids as u8));
}
let seg_metadata_flag = SegmentMetadataFlag::try_from((buf[3] >> 3) & 0b1).unwrap();
let expected_len_seq_num = ((buf[3] & 0b111) + 1) as usize;
if (expected_len_seq_num != 1)
&& (expected_len_seq_num != 2)
&& (expected_len_seq_num != 4)
&& (expected_len_seq_num != 8)
{
return Err(PduError::InvalidTransactionSeqNumLen(
expected_len_seq_num as u8,
));
}
if buf.len() < (4 + 2 * expected_len_entity_ids + expected_len_seq_num) {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: 4 + 2 * expected_len_entity_ids + expected_len_seq_num,
}
.into());
}
let mut current_idx = 4;
// It is okay to unwrap here because we checked the validity of the expected length and of
// the remaining buffer length.
let source_id =
UnsignedByteField::new_from_be_bytes(expected_len_entity_ids, &buf[current_idx..])
.unwrap();
current_idx += expected_len_entity_ids;
let transaction_seq_num =
UnsignedByteField::new_from_be_bytes(expected_len_seq_num, &buf[current_idx..])
.unwrap();
current_idx += expected_len_seq_num;
let dest_id =
UnsignedByteField::new_from_be_bytes(expected_len_entity_ids, &buf[current_idx..])
.unwrap();
current_idx += expected_len_entity_ids;
let common_pdu_conf = CommonPduConfig::new(
source_id,
dest_id,
transaction_seq_num,
trans_mode,
file_flag,
crc_flag,
direction,
)
.unwrap();
Ok((
PduHeader {
pdu_type,
pdu_conf: common_pdu_conf,
seg_metadata_flag,
seg_ctrl,
pdu_datafield_len,
},
current_idx,
))
}
pub fn pdu_type(&self) -> PduType {
self.pdu_type
}
pub fn common_pdu_conf(&self) -> &CommonPduConfig {
&self.pdu_conf
}
pub fn seg_metadata_flag(&self) -> SegmentMetadataFlag {
self.seg_metadata_flag
}
pub fn seg_ctrl(&self) -> SegmentationControl {
self.seg_ctrl
}
}
pub(crate) fn write_fss_field(
file_flag: LargeFileFlag,
file_size: u64,
buf: &mut [u8],
) -> Result<usize, PduError> {
Ok(if file_flag == LargeFileFlag::Large {
buf[..core::mem::size_of::<u64>()].copy_from_slice(&file_size.to_be_bytes());
core::mem::size_of::<u64>()
} else {
if file_size > u32::MAX as u64 {
return Err(PduError::FileSizeTooLarge(file_size));
}
buf[..core::mem::size_of::<u32>()].copy_from_slice(&(file_size as u32).to_be_bytes());
core::mem::size_of::<u32>()
})
}
pub(crate) fn read_fss_field(file_flag: LargeFileFlag, buf: &[u8]) -> (usize, u64) {
if file_flag == LargeFileFlag::Large {
(
core::mem::size_of::<u64>(),
u64::from_be_bytes(buf[..core::mem::size_of::<u64>()].try_into().unwrap()),
)
} else {
(
core::mem::size_of::<u32>(),
u32::from_be_bytes(buf[..core::mem::size_of::<u32>()].try_into().unwrap()).into(),
)
}
}
// This is a generic length check applicable to most PDU deserializations. It first checks whether
// a given buffer can hold an expected minimum size, and then it checks whether the PDU datafield
// length is larger than that expected minimum size.
pub(crate) fn generic_length_checks_pdu_deserialization(
buf: &[u8],
min_expected_len: usize,
full_len_without_crc: usize,
) -> Result<(), ByteConversionError> {
// Buffer too short to hold additional expected minimum datasize.
if buf.len() < min_expected_len {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: min_expected_len,
});
}
// This can happen if the PDU datafield length value is invalid.
if full_len_without_crc < min_expected_len {
return Err(ByteConversionError::FromSliceTooSmall {
found: full_len_without_crc,
expected: min_expected_len,
});
}
Ok(())
}
pub(crate) fn add_pdu_crc(buf: &mut [u8], mut current_idx: usize) -> usize {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[..current_idx]);
buf[current_idx..current_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
current_idx += 2;
current_idx
}
#[cfg(test)]
mod tests {
use crate::cfdp::pdu::{CommonPduConfig, PduError, PduHeader, FIXED_HEADER_LEN};
use crate::cfdp::{
CrcFlag, Direction, LargeFileFlag, PduType, SegmentMetadataFlag, SegmentationControl,
TransmissionMode, CFDP_VERSION_2,
};
use crate::util::{
UbfU8, UnsignedByteField, UnsignedByteFieldU16, UnsignedByteFieldU8, UnsignedEnum,
};
use crate::ByteConversionError;
use std::format;
pub(crate) fn common_pdu_conf(crc_flag: CrcFlag, fss: LargeFileFlag) -> CommonPduConfig {
let src_id = UbfU8::new(5);
let dest_id = UbfU8::new(10);
let transaction_seq_num = UbfU8::new(20);
let mut pdu_conf =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_num)
.expect("Generating common PDU config");
pdu_conf.crc_flag = crc_flag;
pdu_conf.file_flag = fss;
pdu_conf
}
pub(crate) fn verify_raw_header(pdu_conf: &PduHeader, buf: &[u8]) {
assert_eq!((buf[0] >> 5) & 0b111, CFDP_VERSION_2);
// File directive
assert_eq!((buf[0] >> 4) & 1, pdu_conf.pdu_type as u8);
// Towards receiver
assert_eq!((buf[0] >> 3) & 1, pdu_conf.pdu_conf.direction as u8);
// Acknowledged
assert_eq!((buf[0] >> 2) & 1, pdu_conf.pdu_conf.trans_mode as u8);
// No CRC
assert_eq!((buf[0] >> 1) & 1, pdu_conf.pdu_conf.crc_flag as u8);
// Regular file size
assert_eq!(buf[0] & 1, pdu_conf.pdu_conf.file_flag as u8);
let pdu_datafield_len = u16::from_be_bytes(buf[1..3].try_into().unwrap());
assert_eq!(pdu_datafield_len, pdu_conf.pdu_datafield_len);
// No record boundary preservation
assert_eq!((buf[3] >> 7) & 1, pdu_conf.seg_ctrl as u8);
// Entity ID length raw value is actual number of octets - 1 => 0
let entity_id_len = pdu_conf.pdu_conf.source_entity_id.size();
assert_eq!((buf[3] >> 4) & 0b111, entity_id_len as u8 - 1);
// No segment metadata
assert_eq!((buf[3] >> 3) & 0b1, pdu_conf.seg_metadata_flag as u8);
// Transaction Sequence ID length raw value is actual number of octets - 1 => 0
let seq_num_len = pdu_conf.pdu_conf.transaction_seq_num.size();
assert_eq!(buf[3] & 0b111, seq_num_len as u8 - 1);
let mut current_idx = 4;
let mut byte_field_check = |field_len: usize, ubf: &UnsignedByteField| {
match field_len {
1 => assert_eq!(buf[current_idx], ubf.value() as u8),
2 => assert_eq!(
u16::from_be_bytes(
buf[current_idx..current_idx + field_len]
.try_into()
.unwrap()
),
ubf.value() as u16
),
4 => assert_eq!(
u32::from_be_bytes(
buf[current_idx..current_idx + field_len]
.try_into()
.unwrap()
),
ubf.value() as u32
),
8 => assert_eq!(
u64::from_be_bytes(
buf[current_idx..current_idx + field_len]
.try_into()
.unwrap()
),
ubf.value() as u64
),
_ => panic!("invalid entity ID length"),
}
current_idx += field_len
};
byte_field_check(entity_id_len, &pdu_conf.pdu_conf.source_entity_id);
byte_field_check(seq_num_len, &pdu_conf.pdu_conf.transaction_seq_num);
byte_field_check(entity_id_len, &pdu_conf.pdu_conf.dest_entity_id);
}
#[test]
fn test_basic_state() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
assert_eq!(pdu_header.pdu_type(), PduType::FileDirective);
let common_conf_ref = pdu_header.common_pdu_conf();
assert_eq!(*common_conf_ref, common_pdu_cfg);
// These should be 0 and ignored for non-filedata PDUs
assert_eq!(
pdu_header.seg_metadata_flag(),
SegmentMetadataFlag::NotPresent
);
assert_eq!(
pdu_header.seg_ctrl(),
SegmentationControl::NoRecordBoundaryPreservation
);
assert_eq!(pdu_header.pdu_datafield_len, 5);
assert_eq!(pdu_header.header_len(), 7);
}
#[test]
fn test_basic_state_default() {
let default_conf = CommonPduConfig::default();
assert_eq!(default_conf.source_id(), UnsignedByteFieldU8::new(0).into());
assert_eq!(default_conf.dest_id(), UnsignedByteFieldU8::new(0).into());
assert_eq!(
default_conf.transaction_seq_num,
UnsignedByteFieldU8::new(0).into()
);
assert_eq!(default_conf.trans_mode, TransmissionMode::Acknowledged);
assert_eq!(default_conf.direction, Direction::TowardsReceiver);
assert_eq!(default_conf.crc_flag, CrcFlag::NoCrc);
assert_eq!(default_conf.file_flag, LargeFileFlag::Normal);
}
#[test]
fn test_pdu_header_setter() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let mut common_pdu_cfg =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let other_src_id = UnsignedByteFieldU16::new(5);
let other_dest_id = UnsignedByteFieldU16::new(6);
let set_result = common_pdu_cfg.set_source_and_dest_id(other_src_id, other_dest_id);
assert!(set_result.is_ok());
assert_eq!(common_pdu_cfg.source_id(), other_src_id.into());
assert_eq!(common_pdu_cfg.dest_id(), other_dest_id.into());
}
#[test]
fn test_serialization_1() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
// 4 byte fixed header plus three bytes src, dest ID and transaction ID
assert_eq!(res.unwrap(), 7);
verify_raw_header(&pdu_header, &buf);
}
#[test]
fn test_deserialization_1() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
let deser_res = PduHeader::from_bytes(&buf);
assert!(deser_res.is_ok());
let (header_read_back, read_size) = deser_res.unwrap();
assert_eq!(read_size, 7);
assert_eq!(header_read_back, pdu_header);
}
#[test]
fn test_serialization_2() {
let src_id = UnsignedByteFieldU16::new(0x0001);
let dest_id = UnsignedByteFieldU16::new(0x0203);
let transaction_id = UnsignedByteFieldU16::new(0x0405);
let mut common_pdu_cfg =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
common_pdu_cfg.crc_flag = CrcFlag::WithCrc;
common_pdu_cfg.direction = Direction::TowardsSender;
common_pdu_cfg.trans_mode = TransmissionMode::Unacknowledged;
common_pdu_cfg.file_flag = LargeFileFlag::Large;
let pdu_header = PduHeader::new_for_file_data(
common_pdu_cfg,
5,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
assert_eq!(pdu_header.header_len(), 10);
let mut buf: [u8; 16] = [0; 16];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok(), "{}", format!("Result {res:?} not okay"));
// 4 byte fixed header, 6 bytes additional fields
assert_eq!(res.unwrap(), 10);
verify_raw_header(&pdu_header, &buf);
}
#[test]
fn test_deserialization_2() {
let src_id = UnsignedByteFieldU16::new(0x0001);
let dest_id = UnsignedByteFieldU16::new(0x0203);
let transaction_id = UnsignedByteFieldU16::new(0x0405);
let mut common_pdu_cfg =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
common_pdu_cfg.crc_flag = CrcFlag::WithCrc;
common_pdu_cfg.direction = Direction::TowardsSender;
common_pdu_cfg.trans_mode = TransmissionMode::Unacknowledged;
common_pdu_cfg.file_flag = LargeFileFlag::Large;
let pdu_header = PduHeader::new_for_file_data(
common_pdu_cfg,
5,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let mut buf: [u8; 16] = [0; 16];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
let deser_res = PduHeader::from_bytes(&buf);
assert!(deser_res.is_ok());
let (header_read_back, read_size) = deser_res.unwrap();
assert_eq!(read_size, 10);
assert_eq!(header_read_back, pdu_header);
}
#[test]
fn test_invalid_raw_version() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
buf[0] &= !0b1110_0000;
buf[0] |= (CFDP_VERSION_2 + 1) << 5;
let res = PduHeader::from_bytes(&buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let PduError::CfdpVersionMissmatch(raw_version) = error {
assert_eq!(raw_version, CFDP_VERSION_2 + 1);
} else {
panic!("invalid exception: {}", error);
}
}
#[test]
fn test_buf_too_small_1() {
let buf: [u8; 3] = [0; 3];
let res = PduHeader::from_bytes(&buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let PduError::ByteConversionError(ByteConversionError::FromSliceTooSmall {
found,
expected,
}) = error
{
assert_eq!(found, 3);
assert_eq!(expected, FIXED_HEADER_LEN);
} else {
panic!("invalid exception: {}", error);
}
}
#[test]
fn test_buf_too_small_2() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
let header = PduHeader::from_bytes(&buf[0..6]);
assert!(header.is_err());
let error = header.unwrap_err();
if let PduError::ByteConversionError(ByteConversionError::FromSliceTooSmall {
found,
expected,
}) = error
{
assert_eq!(found, 6);
assert_eq!(expected, 7);
}
}
#[test]
fn test_invalid_seq_len() {
let src_id = UbfU8::new(1);
let dest_id = UbfU8::new(2);
let transaction_seq_id = UbfU8::new(3);
let invalid_byte_field = UnsignedByteField::new(3, 5);
let pdu_conf_res =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, invalid_byte_field);
assert!(pdu_conf_res.is_err());
let error = pdu_conf_res.unwrap_err();
if let PduError::InvalidTransactionSeqNumLen(len) = error {
assert_eq!(len, 3);
} else {
panic!("Invalid exception: {}", error)
}
let pdu_conf_res = CommonPduConfig::new_with_byte_fields(
invalid_byte_field,
invalid_byte_field,
transaction_seq_id,
);
assert!(pdu_conf_res.is_err());
let error = pdu_conf_res.unwrap_err();
if let PduError::InvalidEntityLen(len) = error {
assert_eq!(len, 3);
} else {
panic!("Invalid exception: {}", error)
}
}
#[test]
fn test_missmatch_src_dest_id() {
let src_id = UnsignedByteField::new(1, 5);
let dest_id = UnsignedByteField::new(2, 5);
let transaction_seq_id = UbfU8::new(3);
let pdu_conf_res =
CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_seq_id);
assert!(pdu_conf_res.is_err());
let error = pdu_conf_res.unwrap_err();
if let PduError::SourceDestIdLenMissmatch {
src_id_len,
dest_id_len,
} = error
{
assert_eq!(src_id_len, 1);
assert_eq!(dest_id_len, 2);
}
}
#[test]
fn test_invalid_raw_src_id_len() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
buf[3] &= !0b0111_0000;
// Equivalent to the length of three
buf[3] |= 0b10 << 4;
let header_res = PduHeader::from_bytes(&buf);
assert!(header_res.is_err());
let error = header_res.unwrap_err();
if let PduError::InvalidEntityLen(len) = error {
assert_eq!(len, 3);
} else {
panic!("invalid exception {:?}", error)
}
}
#[test]
fn test_invalid_transaction_seq_id_len() {
let src_id = UnsignedByteFieldU8::new(1);
let dest_id = UnsignedByteFieldU8::new(2);
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
buf[3] &= !0b0000_0111;
// Equivalent to the length of three
buf[3] |= 0b10;
let header_res = PduHeader::from_bytes(&buf);
assert!(header_res.is_err());
let error = header_res.unwrap_err();
if let PduError::InvalidTransactionSeqNumLen(len) = error {
assert_eq!(len, 3);
} else {
panic!("invalid exception {:?}", error)
}
}
}

798
src/cfdp/tlv/mod.rs Normal file
View File

@ -0,0 +1,798 @@
//! Generic CFDP type-length-value (TLV) abstraction as specified in CFDP 5.1.9.
use crate::cfdp::lv::{
generic_len_check_data_serialization, generic_len_check_deserialization, Lv, MIN_LV_LEN,
};
use crate::cfdp::TlvLvError;
use crate::util::{UnsignedByteField, UnsignedByteFieldError, UnsignedEnum};
use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub mod msg_to_user;
pub const MIN_TLV_LEN: usize = 2;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum TlvType {
FilestoreRequest = 0x00,
FilestoreResponse = 0x01,
MsgToUser = 0x02,
FaultHandler = 0x04,
FlowLabel = 0x05,
EntityId = 0x06,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TlvTypeField {
Standard(TlvType),
Custom(u8),
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum FilestoreActionCode {
CreateFile = 0b0000,
DeleteFile = 0b0001,
RenameFile = 0b0010,
/// This operation appends one file to another. The first specified name will form the first
/// part of the new file and the name of the new file. This function can be used to get
/// similar functionality to the UNIX cat utility (albeit for only two files).
AppendFile = 0b0011,
/// This operation replaces the content of the first specified file with the content of
/// the secondly specified file.
ReplaceFile = 0b0100,
CreateDirectory = 0b0101,
RemoveDirectory = 0b0110,
DenyFile = 0b0111,
DenyDirectory = 0b1000,
}
impl From<u8> for TlvTypeField {
fn from(value: u8) -> Self {
match TlvType::try_from(value) {
Ok(tlv_type) => TlvTypeField::Standard(tlv_type),
Err(_) => TlvTypeField::Custom(value),
}
}
}
impl From<TlvTypeField> for u8 {
fn from(value: TlvTypeField) -> Self {
match value {
TlvTypeField::Standard(std) => std as u8,
TlvTypeField::Custom(custom) => custom,
}
}
}
/// Generic CFDP type-length-value (TLV) abstraction as specified in CFDP 5.1.9.
///
/// Please note that this class is zero-copy and does not generate a copy of the value data for
/// both the regular [Self::new] constructor and the [Self::from_bytes] constructor.
///
/// # Lifetimes
/// * `data`: If the TLV is generated from a raw bytestream, this will be the lifetime of
/// the raw bytestream. If the TLV is generated from a raw slice or a similar data reference,
/// this will be the lifetime of that data reference.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Tlv<'data> {
tlv_type_field: TlvTypeField,
#[cfg_attr(feature = "serde", serde(borrow))]
lv: Lv<'data>,
}
impl<'data> Tlv<'data> {
pub fn new(tlv_type: TlvType, data: &[u8]) -> Result<Tlv, TlvLvError> {
Ok(Tlv {
tlv_type_field: TlvTypeField::Standard(tlv_type),
lv: Lv::new(data)?,
})
}
/// Creates a TLV with an empty value field.
pub fn new_empty(tlv_type: TlvType) -> Tlv<'data> {
Tlv {
tlv_type_field: TlvTypeField::Standard(tlv_type),
lv: Lv::new_empty(),
}
}
/// Checks whether the type field contains one of the standard types specified in the CFDP
/// standard and is part of the [TlvType] enum.
pub fn is_standard_tlv(&self) -> bool {
if let TlvTypeField::Standard(_) = self.tlv_type_field {
return true;
}
false
}
/// Returns the standard TLV type if the TLV field is not a custom field
pub fn tlv_type(&self) -> Option<TlvType> {
if let TlvTypeField::Standard(tlv_type) = self.tlv_type_field {
Some(tlv_type)
} else {
None
}
}
pub fn tlv_type_field(&self) -> TlvTypeField {
self.tlv_type_field
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.value().len(), MIN_TLV_LEN)?;
buf[0] = self.tlv_type_field.into();
self.lv.write_to_be_bytes_no_len_check(&mut buf[1..]);
Ok(self.len_full())
}
pub fn value(&self) -> &[u8] {
self.lv.value()
}
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value]
pub fn len_value(&self) -> usize {
self.lv.len_value()
}
/// Returns the full raw length, including the length byte.
pub fn len_full(&self) -> usize {
self.lv.len_full() + 1
}
/// Checks whether the value field is empty.
pub fn is_empty(&self) -> bool {
self.lv.is_empty()
}
/// Creates a TLV give a raw bytestream. Please note that is is not necessary to pass the
/// bytestream with the exact size of the expected TLV. This function will take care
/// of parsing the length byte, and the length of the parsed TLV can be retrieved using
/// [Self::len_full].
pub fn from_bytes(buf: &'data [u8]) -> Result<Tlv<'data>, TlvLvError> {
generic_len_check_deserialization(buf, MIN_TLV_LEN)?;
let mut tlv = Self {
tlv_type_field: TlvTypeField::from(buf[0]),
lv: Lv::from_bytes(&buf[MIN_LV_LEN..])?,
};
// We re-use this field so we do not need an additional struct field to store the raw start
// of the TLV.
tlv.lv.raw_data = Some(buf);
Ok(tlv)
}
/// If the TLV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the TLV can be retrieved with this method.
pub fn raw_data(&self) -> Option<&[u8]> {
self.lv.raw_data()
}
}
pub(crate) fn verify_tlv_type(raw_type: u8, expected_tlv_type: TlvType) -> Result<(), TlvLvError> {
let tlv_type = TlvType::try_from(raw_type)
.map_err(|_| TlvLvError::InvalidTlvTypeField((raw_type, Some(expected_tlv_type as u8))))?;
if tlv_type != expected_tlv_type {
return Err(TlvLvError::InvalidTlvTypeField((
tlv_type as u8,
Some(expected_tlv_type as u8),
)));
}
Ok(())
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct EntityIdTlv {
entity_id: UnsignedByteField,
}
impl EntityIdTlv {
pub fn new(entity_id: UnsignedByteField) -> Self {
Self { entity_id }
}
fn len_check(buf: &[u8]) -> Result<(), ByteConversionError> {
if buf.len() < 2 {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: 2,
});
}
Ok(())
}
pub fn len_value(&self) -> usize {
self.entity_id.size()
}
pub fn len_full(&self) -> usize {
2 + self.entity_id.size()
}
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
Self::len_check(buf)?;
buf[0] = TlvType::EntityId as u8;
buf[1] = self.entity_id.size() as u8;
self.entity_id.write_to_be_bytes(&mut buf[2..])
}
pub fn from_bytes(buf: &[u8]) -> Result<Self, TlvLvError> {
Self::len_check(buf)?;
verify_tlv_type(buf[0], TlvType::EntityId)?;
let len = buf[1];
if len != 1 && len != 2 && len != 4 && len != 8 {
return Err(TlvLvError::InvalidValueLength(len as usize));
}
// Okay to unwrap here. The checks before make sure that the deserialization never fails
let entity_id = UnsignedByteField::new_from_be_bytes(len as usize, &buf[2..]).unwrap();
Ok(Self { entity_id })
}
/// Convert to a generic [Tlv], which also erases the programmatic type information.
pub fn to_tlv(self, buf: &mut [u8]) -> Result<Tlv, ByteConversionError> {
Self::len_check(buf)?;
self.entity_id
.write_to_be_bytes(&mut buf[2..2 + self.entity_id.size()])?;
Tlv::new(TlvType::EntityId, &buf[2..2 + self.entity_id.size()]).map_err(|e| match e {
TlvLvError::ByteConversionError(e) => e,
// All other errors are impossible.
_ => panic!("unexpected TLV error"),
})
}
}
impl<'data> TryFrom<Tlv<'data>> for EntityIdTlv {
type Error = TlvLvError;
fn try_from(value: Tlv) -> Result<Self, Self::Error> {
match value.tlv_type_field {
TlvTypeField::Standard(tlv_type) => {
if tlv_type != TlvType::EntityId {
return Err(TlvLvError::InvalidTlvTypeField((
tlv_type as u8,
Some(TlvType::EntityId as u8),
)));
}
}
TlvTypeField::Custom(val) => {
return Err(TlvLvError::InvalidTlvTypeField((
val,
Some(TlvType::EntityId as u8),
)));
}
}
let len_value = value.value().len();
if len_value != 1 && len_value != 2 && len_value != 4 && len_value != 8 {
return Err(TlvLvError::InvalidValueLength(len_value));
}
Ok(Self::new(
UnsignedByteField::new_from_be_bytes(len_value, value.value()).map_err(
|e| match e {
UnsignedByteFieldError::ByteConversionError(e) => e,
// This can not happen, we checked for the length validity, and the data is always smaller than
// 255 bytes.
_ => panic!("unexpected error"),
},
)?,
))
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FilestoreRequestTlv<'first_name, 'second_name> {
action_code: FilestoreActionCode,
#[cfg_attr(feature = "serde", serde(borrow))]
first_name: Lv<'first_name>,
#[cfg_attr(feature = "serde", serde(borrow))]
second_name: Option<Lv<'second_name>>,
}
impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
pub fn new_create_file(first_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::CreateFile, first_name, None)
}
pub fn new_delete_file(first_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DeleteFile, first_name, None)
}
pub fn new_rename_file(
source_name: Lv<'first_name>,
target_name: Lv<'second_name>,
) -> Result<Self, TlvLvError> {
Self::new(
FilestoreActionCode::RenameFile,
source_name,
Some(target_name),
)
}
/// This operation appends one file to another. The first specified name will form the first
/// part of the new file and the name of the new file. This function can be used to get
/// similar functionality to the UNIX cat utility (albeit for only two files).
pub fn new_append_file(
first_file: Lv<'first_name>,
second_file: Lv<'second_name>,
) -> Result<Self, TlvLvError> {
Self::new(
FilestoreActionCode::AppendFile,
first_file,
Some(second_file),
)
}
/// This operation replaces the content of the first specified file with the content of
/// the secondly specified file. This function can be used to get similar functionality to
/// the UNIX copy (cp) utility if the target file already exists.
pub fn new_replace_file(
replaced_file: Lv<'first_name>,
new_file: Lv<'second_name>,
) -> Result<Self, TlvLvError> {
Self::new(
FilestoreActionCode::ReplaceFile,
replaced_file,
Some(new_file),
)
}
pub fn new_create_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::CreateDirectory, dir_name, None)
}
pub fn new_remove_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::RemoveDirectory, dir_name, None)
}
pub fn new_deny_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DenyFile, file_name, None)
}
pub fn new_deny_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DenyDirectory, dir_name, None)
}
/// This function will return [None] if the respective action code requires two names but
/// only one is passed. It will also returns [None] if the cumulative length of the first
/// name and the second name exceeds 255 bytes.
///
/// This is the case for the rename, append and replace filestore request.
pub fn new(
action_code: FilestoreActionCode,
first_name: Lv<'first_name>,
second_name: Option<Lv<'second_name>>,
) -> Result<Self, TlvLvError> {
let mut base_value_len = first_name.len_full();
if Self::has_second_filename(action_code) {
if second_name.is_none() {
return Err(TlvLvError::SecondNameMissing);
}
base_value_len += second_name.as_ref().unwrap().len_full();
}
if base_value_len > u8::MAX as usize {
return Err(TlvLvError::InvalidValueLength(base_value_len));
}
Ok(Self {
action_code,
first_name,
second_name,
})
}
pub fn has_second_filename(action_code: FilestoreActionCode) -> bool {
if action_code == FilestoreActionCode::RenameFile
|| action_code == FilestoreActionCode::AppendFile
|| action_code == FilestoreActionCode::ReplaceFile
{
return true;
}
false
}
pub fn action_code(&self) -> FilestoreActionCode {
self.action_code
}
pub fn first_name(&self) -> Lv<'first_name> {
self.first_name
}
pub fn second_name(&self) -> Option<Lv<'second_name>> {
self.second_name
}
pub fn len_value(&self) -> usize {
let mut len = 1 + self.first_name.len_full();
if let Some(second_name) = self.second_name {
len += second_name.len_full();
}
len
}
pub fn len_full(&self) -> usize {
2 + self.len_value()
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.len_full() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_full(),
});
}
buf[0] = TlvType::FilestoreRequest as u8;
buf[1] = self.len_value() as u8;
buf[2] = (self.action_code as u8) << 4;
let mut current_idx = 3;
// Length checks were already performed.
self.first_name.write_to_be_bytes_no_len_check(
&mut buf[current_idx..current_idx + self.first_name.len_full()],
);
current_idx += self.first_name.len_full();
if let Some(second_name) = self.second_name {
second_name.write_to_be_bytes_no_len_check(
&mut buf[current_idx..current_idx + second_name.len_full()],
);
current_idx += second_name.len_full();
}
Ok(current_idx)
}
pub fn from_bytes<'longest: 'first_name + 'second_name>(
buf: &'longest [u8],
) -> Result<Self, TlvLvError> {
if buf.len() < 2 {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: 2,
}
.into());
}
verify_tlv_type(buf[0], TlvType::FilestoreRequest)?;
let len = buf[1] as usize;
let mut current_idx = 2;
let action_code = FilestoreActionCode::try_from((buf[2] >> 4) & 0b1111)
.map_err(|_| TlvLvError::InvalidFilestoreActionCode((buf[2] >> 4) & 0b1111))?;
current_idx += 1;
let first_name = Lv::from_bytes(&buf[current_idx..])?;
let mut second_name = None;
current_idx += first_name.len_full();
if Self::has_second_filename(action_code) {
if current_idx >= 2 + len {
return Err(TlvLvError::SecondNameMissing);
}
second_name = Some(Lv::from_bytes(&buf[current_idx..])?);
}
Ok(Self {
action_code,
first_name,
second_name,
})
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::lv::Lv;
use crate::cfdp::tlv::{FilestoreActionCode, FilestoreRequestTlv, Tlv, TlvType, TlvTypeField};
use crate::cfdp::TlvLvError;
use crate::util::{UbfU8, UnsignedEnum};
const TLV_TEST_STR_0: &str = "hello.txt";
const TLV_TEST_STR_1: &str = "hello2.txt";
#[test]
fn test_basic() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = Tlv::new(TlvType::EntityId, &buf[0..1]);
assert!(tlv_res.is_ok());
let tlv_res = tlv_res.unwrap();
assert_eq!(
tlv_res.tlv_type_field(),
TlvTypeField::Standard(TlvType::EntityId)
);
assert_eq!(tlv_res.len_full(), 3);
assert_eq!(tlv_res.value().len(), 1);
assert_eq!(tlv_res.len_value(), 1);
assert!(!tlv_res.is_empty());
assert_eq!(tlv_res.value()[0], 5);
}
#[test]
fn test_serialization() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = Tlv::new(TlvType::EntityId, &buf[0..1]);
assert!(tlv_res.is_ok());
let tlv_res = tlv_res.unwrap();
let mut ser_buf: [u8; 4] = [0; 4];
assert!(tlv_res.write_to_bytes(&mut ser_buf).is_ok());
assert_eq!(ser_buf[0], TlvType::EntityId as u8);
assert_eq!(ser_buf[1], 1);
assert_eq!(ser_buf[2], 5);
}
#[test]
fn test_deserialization() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf[2..]).is_ok());
buf[0] = TlvType::EntityId as u8;
buf[1] = 1;
let tlv_from_raw = Tlv::from_bytes(&buf);
assert!(tlv_from_raw.is_ok());
let tlv_from_raw = tlv_from_raw.unwrap();
assert!(tlv_from_raw.raw_data().is_some());
assert_eq!(tlv_from_raw.raw_data().unwrap(), buf);
assert_eq!(
tlv_from_raw.tlv_type_field(),
TlvTypeField::Standard(TlvType::EntityId)
);
assert_eq!(tlv_from_raw.value().len(), 1);
assert_eq!(tlv_from_raw.len_full(), 3);
assert_eq!(tlv_from_raw.value()[0], 5);
}
#[test]
fn test_empty() {
let tlv_empty = Tlv::new_empty(TlvType::MsgToUser);
assert_eq!(tlv_empty.value().len(), 0);
assert!(tlv_empty.is_empty());
assert_eq!(tlv_empty.len_full(), 2);
assert!(tlv_empty.value().is_empty());
assert_eq!(
tlv_empty.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
}
#[test]
fn test_empty_serialization() {
let tlv_empty = Tlv::new_empty(TlvType::MsgToUser);
let mut buf: [u8; 4] = [0; 4];
assert!(tlv_empty.write_to_bytes(&mut buf).is_ok());
assert_eq!(buf[0], TlvType::MsgToUser as u8);
assert_eq!(buf[1], 0);
}
#[test]
fn test_empty_deserialization() {
let mut buf: [u8; 4] = [0; 4];
buf[0] = TlvType::MsgToUser as u8;
buf[1] = 0;
let tlv_empty = Tlv::from_bytes(&buf);
assert!(tlv_empty.is_ok());
let tlv_empty = tlv_empty.unwrap();
assert!(tlv_empty.is_empty());
assert_eq!(tlv_empty.value().len(), 0);
assert_eq!(
tlv_empty.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(tlv_empty.len_full(), 2);
assert!(tlv_empty.value().is_empty());
}
#[test]
fn test_buf_too_large() {
let buf_too_large: [u8; u8::MAX as usize + 1] = [0; u8::MAX as usize + 1];
let tlv_res = Tlv::new(TlvType::MsgToUser, &buf_too_large);
assert!(tlv_res.is_err());
let error = tlv_res.unwrap_err();
if let TlvLvError::DataTooLarge(size) = error {
assert_eq!(size, u8::MAX as usize + 1);
} else {
panic!("unexpected error {:?}", error);
}
}
#[test]
fn test_deserialization_custom_tlv_type() {
let mut buf: [u8; 4] = [0; 4];
buf[0] = 3;
buf[1] = 1;
buf[2] = 5;
let tlv = Tlv::from_bytes(&buf);
assert!(tlv.is_ok());
let tlv = tlv.unwrap();
assert_eq!(tlv.tlv_type_field(), TlvTypeField::Custom(3));
assert_eq!(tlv.value().len(), 1);
assert_eq!(tlv.len_full(), 3);
}
fn generic_fs_request_test_one_file(
action_code: FilestoreActionCode,
) -> FilestoreRequestTlv<'static, 'static> {
assert!(!FilestoreRequestTlv::has_second_filename(action_code));
let first_name = Lv::new_from_str(TLV_TEST_STR_0).unwrap();
let fs_request = match action_code {
FilestoreActionCode::CreateFile => FilestoreRequestTlv::new_create_file(first_name),
FilestoreActionCode::DeleteFile => FilestoreRequestTlv::new_delete_file(first_name),
FilestoreActionCode::CreateDirectory => {
FilestoreRequestTlv::new_create_directory(first_name)
}
FilestoreActionCode::RemoveDirectory => {
FilestoreRequestTlv::new_remove_directory(first_name)
}
FilestoreActionCode::DenyFile => FilestoreRequestTlv::new_deny_file(first_name),
FilestoreActionCode::DenyDirectory => {
FilestoreRequestTlv::new_deny_directory(first_name)
}
_ => panic!("invalid action code"),
};
assert!(fs_request.is_ok());
let fs_request = fs_request.unwrap();
assert_eq!(fs_request.len_value(), 1 + first_name.len_full());
assert_eq!(fs_request.len_full(), fs_request.len_value() + 2);
assert_eq!(fs_request.action_code(), action_code);
assert_eq!(fs_request.first_name(), first_name);
assert_eq!(fs_request.second_name(), None);
fs_request
}
fn generic_fs_request_test_two_files(
action_code: FilestoreActionCode,
) -> FilestoreRequestTlv<'static, 'static> {
assert!(FilestoreRequestTlv::has_second_filename(action_code));
let first_name = Lv::new_from_str(TLV_TEST_STR_0).unwrap();
let second_name = Lv::new_from_str(TLV_TEST_STR_1).unwrap();
let fs_request = match action_code {
FilestoreActionCode::ReplaceFile => {
FilestoreRequestTlv::new_replace_file(first_name, second_name)
}
FilestoreActionCode::AppendFile => {
FilestoreRequestTlv::new_append_file(first_name, second_name)
}
FilestoreActionCode::RenameFile => {
FilestoreRequestTlv::new_rename_file(first_name, second_name)
}
_ => panic!("invalid action code"),
};
assert!(fs_request.is_ok());
let fs_request = fs_request.unwrap();
assert_eq!(
fs_request.len_value(),
1 + first_name.len_full() + second_name.len_full()
);
assert_eq!(fs_request.len_full(), fs_request.len_value() + 2);
assert_eq!(fs_request.action_code(), action_code);
assert_eq!(fs_request.first_name(), first_name);
assert!(fs_request.second_name().is_some());
assert_eq!(fs_request.second_name().unwrap(), second_name);
fs_request
}
#[test]
fn test_fs_request_basic_create_file() {
generic_fs_request_test_one_file(FilestoreActionCode::CreateFile);
}
#[test]
fn test_fs_request_basic_delete() {
generic_fs_request_test_one_file(FilestoreActionCode::DeleteFile);
}
#[test]
fn test_fs_request_basic_create_dir() {
generic_fs_request_test_one_file(FilestoreActionCode::CreateDirectory);
}
#[test]
fn test_fs_request_basic_remove_dir() {
generic_fs_request_test_one_file(FilestoreActionCode::RemoveDirectory);
}
#[test]
fn test_fs_request_basic_deny_file() {
generic_fs_request_test_one_file(FilestoreActionCode::DenyFile);
}
#[test]
fn test_fs_request_basic_deny_dir() {
generic_fs_request_test_one_file(FilestoreActionCode::DenyDirectory);
}
#[test]
fn test_fs_request_basic_append_file() {
generic_fs_request_test_two_files(FilestoreActionCode::AppendFile);
}
#[test]
fn test_fs_request_basic_rename_file() {
generic_fs_request_test_two_files(FilestoreActionCode::RenameFile);
}
#[test]
fn test_fs_request_basic_replace_file() {
generic_fs_request_test_two_files(FilestoreActionCode::ReplaceFile);
}
fn check_fs_request_first_part(
buf: &[u8],
action_code: FilestoreActionCode,
expected_val_len: u8,
) -> usize {
assert_eq!(buf[0], TlvType::FilestoreRequest as u8);
assert_eq!(buf[1], expected_val_len);
assert_eq!((buf[2] >> 4) & 0b1111, action_code as u8);
let lv = Lv::from_bytes(&buf[3..]);
assert!(lv.is_ok());
let lv = lv.unwrap();
assert_eq!(lv.value_as_str().unwrap().unwrap(), TLV_TEST_STR_0);
3 + lv.len_full()
}
#[test]
fn test_fs_request_serialization_one_file() {
let req = generic_fs_request_test_one_file(FilestoreActionCode::CreateFile);
let mut buf: [u8; 64] = [0; 64];
let res = req.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 3 + 1 + TLV_TEST_STR_0.len());
assert_eq!(written, req.len_full());
check_fs_request_first_part(
&buf,
FilestoreActionCode::CreateFile,
1 + 1 + TLV_TEST_STR_0.len() as u8,
);
}
#[test]
fn test_fs_request_deserialization_one_file() {
let req = generic_fs_request_test_one_file(FilestoreActionCode::CreateFile);
let mut buf: [u8; 64] = [0; 64];
let res = req.write_to_bytes(&mut buf);
assert!(res.is_ok());
let req_conv_back = FilestoreRequestTlv::from_bytes(&buf);
assert!(req_conv_back.is_ok());
let req_conv_back = req_conv_back.unwrap();
assert_eq!(req_conv_back, req);
}
#[test]
fn test_fs_request_serialization_two_files() {
let req = generic_fs_request_test_two_files(FilestoreActionCode::RenameFile);
let mut buf: [u8; 64] = [0; 64];
let res = req.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, req.len_full());
assert_eq!(
written,
3 + 1 + TLV_TEST_STR_0.len() + 1 + TLV_TEST_STR_1.len()
);
let current_idx = check_fs_request_first_part(
&buf,
FilestoreActionCode::RenameFile,
1 + 1 + TLV_TEST_STR_0.len() as u8 + 1 + TLV_TEST_STR_1.len() as u8,
);
let second_lv = Lv::from_bytes(&buf[current_idx..]);
assert!(second_lv.is_ok());
let second_lv = second_lv.unwrap();
assert_eq!(second_lv.value_as_str().unwrap().unwrap(), TLV_TEST_STR_1);
assert_eq!(current_idx + second_lv.len_full(), req.len_full());
}
#[test]
fn test_fs_request_deserialization_two_files() {
let req = generic_fs_request_test_two_files(FilestoreActionCode::RenameFile);
let mut buf: [u8; 64] = [0; 64];
req.write_to_bytes(&mut buf).unwrap();
let req_conv_back = FilestoreRequestTlv::from_bytes(&buf);
assert!(req_conv_back.is_ok());
let req_conv_back = req_conv_back.unwrap();
assert_eq!(req_conv_back, req);
}
}

110
src/cfdp/tlv/msg_to_user.rs Normal file
View File

@ -0,0 +1,110 @@
//! Abstractions for the Message to User CFDP TLV subtype.
use super::{Tlv, TlvLvError, TlvType, TlvTypeField};
use crate::ByteConversionError;
use delegate::delegate;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct MsgToUserTlv<'data> {
pub tlv: Tlv<'data>,
}
impl<'data> MsgToUserTlv<'data> {
/// Create a new message to user TLV where the type field is set correctly.
pub fn new(value: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvError> {
Ok(Self {
tlv: Tlv::new(TlvType::MsgToUser, value)?,
})
}
delegate! {
to self.tlv {
pub fn tlv_type_field(&self) -> TlvTypeField;
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
pub fn value(&self) -> &[u8];
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value]
pub fn len_value(&self) -> usize;
/// Returns the full raw length, including the length byte.
pub fn len_full(&self) -> usize;
/// Checks whether the value field is empty.
pub fn is_empty(&self) -> bool;
/// If the TLV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the TLV can be retrieved with this method.
pub fn raw_data(&self) -> Option<&[u8]>;
}
}
pub fn is_standard_tlv(&self) -> bool {
true
}
pub fn tlv_type(&self) -> Option<TlvType> {
Some(TlvType::MsgToUser)
}
/// Check whether this message is a reserved CFDP message like a Proxy Operation Message.
pub fn is_reserved_cfdp_msg(&self) -> bool {
if self.value().len() < 4 {
return false;
}
let value = self.value();
if value[0] == b'c' && value[1] == b'f' && value[2] == b'd' && value[3] == b'p' {
return true;
}
false
}
/// This is a thin wrapper around [Tlv::from_bytes] with the additional type check.
pub fn from_bytes(buf: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvError> {
let msg_to_user = Self {
tlv: Tlv::from_bytes(buf)?,
};
match msg_to_user.tlv_type_field() {
TlvTypeField::Standard(tlv_type) => {
if tlv_type != TlvType::MsgToUser {
return Err(TlvLvError::InvalidTlvTypeField((
tlv_type as u8,
Some(TlvType::MsgToUser as u8),
)));
}
}
TlvTypeField::Custom(raw) => {
return Err(TlvLvError::InvalidTlvTypeField((
raw,
Some(TlvType::MsgToUser as u8),
)));
}
}
Ok(msg_to_user)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_basic() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value);
assert!(msg_to_user.is_ok());
let msg_to_user = msg_to_user.unwrap();
assert!(msg_to_user.is_standard_tlv());
assert_eq!(msg_to_user.tlv_type().unwrap(), TlvType::MsgToUser);
assert_eq!(msg_to_user.value(), custom_value);
assert_eq!(msg_to_user.value().len(), 4);
assert_eq!(msg_to_user.len_value(), 4);
assert_eq!(msg_to_user.len_full(), 6);
assert!(!msg_to_user.is_empty());
assert!(msg_to_user.raw_data().is_none());
assert!(!msg_to_user.is_reserved_cfdp_msg());
}
#[test]
fn test_reserved_msg() {
let reserved_str = "cfdp";
let msg_to_user = MsgToUserTlv::new(reserved_str.as_bytes());
assert!(msg_to_user.is_ok());
let msg_to_user = msg_to_user.unwrap();
assert!(msg_to_user.is_reserved_cfdp_msg());
}
}

View File

@ -1,12 +1,13 @@
//! Common definitions and helpers required to create PUS TMTC packets according to
//! [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/)
//!
//! You can find the PUS telecommand definitions in the [crate::tc] module and ithe PUS telemetry definitions
//! inside the [crate::tm] module.
use crate::{ByteConversionError, CcsdsPacket, SizeMissmatch};
//! You can find the PUS telecommand definitions in the [tc] module and ithe PUS telemetry definitions
//! inside the [tm] module.
use crate::{ByteConversionError, CcsdsPacket, CRC_CCITT_FALSE};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::fmt::{Debug, Display, Formatter};
use core::mem::size_of;
use crc::{Crc, CRC_16_IBM_3740};
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
@ -16,12 +17,11 @@ use std::error::Error;
pub mod event;
pub mod hk;
pub mod scheduling;
pub mod tc;
pub mod tm;
pub mod verification;
pub type CrcType = u16;
/// CRC algorithm used by the PUS standard.
pub const CRC_CCITT_FALSE: Crc<u16> = Crc::<u16>::new(&CRC_16_IBM_3740);
pub const CCSDS_HEADER_LEN: usize = size_of::<crate::zc::SpHeader>();
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
@ -153,7 +153,7 @@ pub enum PusError {
NoRawData,
/// CRC16 needs to be calculated first
CrcCalculationMissing,
ByteConversionError(ByteConversionError),
ByteConversion(ByteConversionError),
}
impl Display for PusError {
@ -177,7 +177,7 @@ impl Display for PusError {
PusError::CrcCalculationMissing => {
write!(f, "crc16 was not calculated")
}
PusError::ByteConversionError(e) => {
PusError::ByteConversion(e) => {
write!(f, "low level byte conversion error: {e}")
}
}
@ -187,7 +187,7 @@ impl Display for PusError {
#[cfg(feature = "std")]
impl Error for PusError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
if let PusError::ByteConversionError(e) = self {
if let PusError::ByteConversion(e) = self {
return Some(e);
}
None
@ -196,7 +196,7 @@ impl Error for PusError {
impl From<ByteConversionError> for PusError {
fn from(e: ByteConversionError) -> Self {
PusError::ByteConversionError(e)
PusError::ByteConversion(e)
}
}
@ -208,8 +208,7 @@ pub trait PusPacket: CcsdsPacket {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn user_data(&self) -> Option<&[u8]>;
fn user_data(&self) -> &[u8];
fn crc16(&self) -> Option<u16>;
}
@ -253,21 +252,29 @@ pub(crate) fn user_data_from_raw(
total_len: usize,
raw_data_len: usize,
slice: &[u8],
) -> Result<Option<&[u8]>, PusError> {
) -> Result<&[u8], PusError> {
match current_idx {
_ if current_idx == total_len - 2 => Ok(None),
_ if current_idx > total_len - 2 => Err(PusError::RawDataTooShort(raw_data_len)),
_ => Ok(Some(&slice[current_idx..total_len - 2])),
_ => Ok(&slice[current_idx..total_len - 2]),
}
}
pub(crate) fn verify_crc16_from_raw(raw_data: &[u8], crc16: u16) -> Result<(), PusError> {
pub(crate) fn verify_crc16_ccitt_false_from_raw_to_pus_error(
raw_data: &[u8],
crc16: u16,
) -> Result<(), PusError> {
verify_crc16_ccitt_false_from_raw(raw_data)
.then(|| ())
.ok_or(PusError::IncorrectCrc(crc16))
}
pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(raw_data);
if digest.finalize() == 0 {
return Ok(());
return true;
}
Err(PusError::IncorrectCrc(crc16))
false
}
macro_rules! ccsds_impl {
@ -291,6 +298,7 @@ macro_rules! sp_header_impls {
}
}
use crate::util::{GenericUnsignedByteField, ToBeBytes, UnsignedEnum};
pub(crate) use ccsds_impl;
pub(crate) use sp_header_impls;
@ -298,66 +306,17 @@ pub(crate) use sp_header_impls;
/// and an unsigned value. The trait makes no assumptions about the actual type of the unsigned
/// value and only requires implementors to implement a function which writes the enumeration into
/// a raw byte format.
pub trait EcssEnumeration {
pub trait EcssEnumeration: UnsignedEnum {
/// Packet Format Code, which denotes the number of bits of the enumeration
fn pfc(&self) -> u8;
fn byte_width(&self) -> usize {
(self.pfc() / 8) as usize
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError>;
}
pub trait EcssEnumerationExt: EcssEnumeration + Debug + Copy + Clone + PartialEq + Eq {}
pub trait ToBeBytes {
type ByteArray: AsRef<[u8]>;
fn to_be_bytes(&self) -> Self::ByteArray;
}
impl ToBeBytes for () {
type ByteArray = [u8; 0];
fn to_be_bytes(&self) -> Self::ByteArray {
[]
}
}
impl ToBeBytes for u8 {
type ByteArray = [u8; 1];
fn to_be_bytes(&self) -> Self::ByteArray {
u8::to_be_bytes(*self)
}
}
impl ToBeBytes for u16 {
type ByteArray = [u8; 2];
fn to_be_bytes(&self) -> Self::ByteArray {
u16::to_be_bytes(*self)
}
}
impl ToBeBytes for u32 {
type ByteArray = [u8; 4];
fn to_be_bytes(&self) -> Self::ByteArray {
u32::to_be_bytes(*self)
}
}
impl ToBeBytes for u64 {
type ByteArray = [u8; 8];
fn to_be_bytes(&self) -> Self::ByteArray {
u64::to_be_bytes(*self)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericEcssEnumWrapper<TYPE> {
val: TYPE,
field: GenericUnsignedByteField<TYPE>,
}
impl<TYPE> GenericEcssEnumWrapper<TYPE> {
@ -366,7 +325,19 @@ impl<TYPE> GenericEcssEnumWrapper<TYPE> {
}
pub fn new(val: TYPE) -> Self {
Self { val }
Self {
field: GenericUnsignedByteField::new(val),
}
}
}
impl<TYPE: ToBeBytes> UnsignedEnum for GenericEcssEnumWrapper<TYPE> {
fn size(&self) -> usize {
(self.pfc() / 8) as usize
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.field.write_to_be_bytes(buf)
}
}
@ -374,17 +345,6 @@ impl<TYPE: ToBeBytes> EcssEnumeration for GenericEcssEnumWrapper<TYPE> {
fn pfc(&self) -> u8 {
size_of::<TYPE>() as u8 * 8_u8
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError> {
if buf.len() < self.byte_width() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.byte_width(),
}));
}
buf[0..self.byte_width()].copy_from_slice(self.val.to_be_bytes().as_ref());
Ok(())
}
}
impl<TYPE: Debug + Copy + Clone + PartialEq + Eq + ToBeBytes> EcssEnumerationExt
@ -397,9 +357,26 @@ pub type EcssEnumU16 = GenericEcssEnumWrapper<u16>;
pub type EcssEnumU32 = GenericEcssEnumWrapper<u32>;
pub type EcssEnumU64 = GenericEcssEnumWrapper<u64>;
/// Generic trait for PUS packet abstractions which can written to a raw slice as their raw
/// byte representation. This is especially useful for generic abstractions which depend only
/// on the serialization of those packets.
pub trait SerializablePusPacket {
fn len_packed(&self) -> usize;
fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError>;
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<Vec<u8>, PusError> {
// This is the correct way to do this. See
// [this issue](https://github.com/rust-lang/rust-clippy/issues/4483) for caveats of more
// "efficient" implementations.
let mut vec = alloc::vec![0; self.len_packed()];
self.write_to_bytes(&mut vec)?;
Ok(vec)
}
}
#[cfg(test)]
mod tests {
use crate::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, EcssEnumeration};
use crate::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, UnsignedEnum};
use crate::ByteConversionError;
#[test]
@ -431,9 +408,9 @@ mod tests {
assert!(res.is_err());
let error = res.unwrap_err();
match error {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.expected, 2);
assert_eq!(missmatch.found, 1);
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(expected, 2);
assert_eq!(found, 1);
}
_ => {
panic!("Unexpected error {:?}", error);
@ -462,9 +439,9 @@ mod tests {
assert!(res.is_err());
let error = res.unwrap_err();
match error {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.expected, 4);
assert_eq!(missmatch.found, 3);
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(expected, 4);
assert_eq!(found, 3);
}
_ => {
panic!("Unexpected error {:?}", error);

1128
src/ecss/tc.rs Normal file

File diff suppressed because it is too large Load Diff

1204
src/ecss/tm.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,8 @@
//!
//! - Space Packet implementation according to
//! [CCSDS Blue Book 133.0-B-2](https://public.ccsds.org/Pubs/133x0b2e1.pdf)
//! - CCSDS File Delivery Protocol (CFDP) packet implementations according to
//! [CCSDS Blue Book 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf)
//! - PUS Telecommand and PUS Telemetry implementation according to the
//! [ECSS-E-ST-70-41C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
//! - CUC (CCSDS Unsegmented Time Code) implementation according to
@ -60,40 +62,48 @@ extern crate alloc;
extern crate std;
use crate::ecss::CCSDS_HEADER_LEN;
use core::fmt::{Display, Formatter};
use core::{
fmt::{Debug, Display, Formatter},
hash::Hash,
};
use crc::{Crc, CRC_16_IBM_3740};
use delegate::delegate;
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub mod cfdp;
pub mod ecss;
pub mod tc;
pub mod time;
pub mod tm;
pub mod util;
mod private {
pub trait Sealed {}
}
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard.
pub const CRC_CCITT_FALSE: Crc<u16> = Crc::<u16>::new(&CRC_16_IBM_3740);
pub const MAX_APID: u16 = 2u16.pow(11) - 1;
pub const MAX_SEQ_COUNT: u16 = 2u16.pow(14) - 1;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct SizeMissmatch {
pub found: usize,
pub expected: usize,
}
/// Generic error type when converting to and from raw byte slices.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ByteConversionError {
/// The passed slice is too small. Returns the passed slice length and expected minimum size
ToSliceTooSmall(SizeMissmatch),
ToSliceTooSmall {
found: usize,
expected: usize,
},
/// The provider buffer is too small. Returns the passed slice length and expected minimum size
FromSliceTooSmall(SizeMissmatch),
FromSliceTooSmall {
found: usize,
expected: usize,
},
/// The [zerocopy] library failed to write to bytes
ZeroCopyToError,
ZeroCopyFromError,
@ -102,18 +112,18 @@ pub enum ByteConversionError {
impl Display for ByteConversionError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
ByteConversionError::ToSliceTooSmall(missmatch) => {
ByteConversionError::ToSliceTooSmall { found, expected } => {
write!(
f,
"target slice with size {} is too small, expected size of at least {}",
missmatch.found, missmatch.expected
found, expected
)
}
ByteConversionError::FromSliceTooSmall(missmatch) => {
ByteConversionError::FromSliceTooSmall { found, expected } => {
write!(
f,
"source slice with size {} too small, expected at least {} bytes",
missmatch.found, missmatch.expected
found, expected
)
}
ByteConversionError::ZeroCopyToError => {
@ -180,7 +190,7 @@ impl TryFrom<u8> for SequenceFlags {
/// Abstraction for the CCSDS Packet ID, which forms the last thirteen bits
/// of the first two bytes in the CCSDS primary header.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[derive(Debug, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PacketId {
pub ptype: PacketType,
@ -188,6 +198,31 @@ pub struct PacketId {
apid: u16,
}
impl PartialEq for PacketId {
fn eq(&self, other: &Self) -> bool {
self.raw().eq(&other.raw())
}
}
impl PartialOrd for PacketId {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.raw().partial_cmp(&other.raw())
}
}
impl Ord for PacketId {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.raw().cmp(&other.raw())
}
}
impl Hash for PacketId {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
let raw = self.raw();
raw.hash(state);
}
}
impl Default for PacketId {
fn default() -> Self {
PacketId {
@ -248,6 +283,7 @@ impl PacketId {
self.apid
}
#[inline]
pub fn raw(&self) -> u16 {
((self.ptype as u16) << 12) | ((self.sec_header_flag as u16) << 11) | self.apid
}
@ -557,10 +593,10 @@ impl SpHeader {
/// CCSDS header.
pub fn from_be_bytes(buf: &[u8]) -> Result<(Self, &[u8]), ByteConversionError> {
if buf.len() < CCSDS_HEADER_LEN {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: CCSDS_HEADER_LEN,
}));
});
}
let zc_header = zc::SpHeader::from_bytes(&buf[0..CCSDS_HEADER_LEN])
.ok_or(ByteConversionError::ZeroCopyFromError)?;
@ -574,10 +610,10 @@ impl SpHeader {
buf: &'a mut [u8],
) -> Result<&'a mut [u8], ByteConversionError> {
if buf.len() < CCSDS_HEADER_LEN {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: CCSDS_HEADER_LEN,
}));
});
}
let zc_header: zc::SpHeader = zc::SpHeader::from(*self);
zc_header
@ -634,9 +670,9 @@ sph_from_other!(SpHeader, crate::zc::SpHeader);
pub mod zc {
use crate::{CcsdsPacket, CcsdsPrimaryHeader, PacketId, PacketSequenceCtrl, VERSION_MASK};
use zerocopy::byteorder::NetworkEndian;
use zerocopy::{AsBytes, FromBytes, Unaligned, U16};
use zerocopy::{AsBytes, FromBytes, FromZeroes, Unaligned, U16};
#[derive(FromBytes, AsBytes, Unaligned, Debug)]
#[derive(FromBytes, FromZeroes, AsBytes, Unaligned, Debug)]
#[repr(C)]
pub struct SpHeader {
version_packet_id: U16<NetworkEndian>,
@ -714,6 +750,8 @@ pub mod zc {
#[cfg(all(test, feature = "std"))]
mod tests {
use std::collections::HashSet;
#[cfg(feature = "serde")]
use crate::CcsdsPrimaryHeader;
use crate::{
@ -1027,4 +1065,22 @@ mod tests {
assert_eq!(sp_header.ptype(), PacketType::Tc);
assert_eq!(sp_header.data_len(), 0);
}
#[test]
fn packet_id_ord_partial_ord() {
let packet_id_small = PacketId::from(1_u16);
let packet_id_larger = PacketId::from(2_u16);
assert!(packet_id_small < packet_id_larger);
assert!(packet_id_larger > packet_id_small);
assert_eq!(
packet_id_small.cmp(&packet_id_larger),
core::cmp::Ordering::Less
);
}
#[test]
fn packet_id_hashable() {
let mut id_set = HashSet::new();
id_set.insert(PacketId::from(1_u16));
}
}

776
src/tc.rs
View File

@ -1,776 +0,0 @@
//! This module contains all components required to create a ECSS PUS C telecommand packets according
//! to [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
//!
//! # Examples
//!
//! ```rust
//! use spacepackets::{CcsdsPacket, SpHeader};
//! use spacepackets::tc::{PusTc, PusTcSecondaryHeader};
//! use spacepackets::ecss::PusPacket;
//!
//! // Create a ping telecommand with no user application data
//! let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
//! let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
//! let pus_tc = PusTc::new(&mut sph, tc_header, None, true);
//! println!("{:?}", pus_tc);
//! assert_eq!(pus_tc.service(), 17);
//! assert_eq!(pus_tc.subservice(), 1);
//! assert_eq!(pus_tc.apid(), 0x02);
//!
//! // Serialize TC into a raw buffer
//! let mut test_buf: [u8; 32] = [0; 32];
//! let size = pus_tc
//! .write_to_bytes(test_buf.as_mut_slice())
//! .expect("Error writing TC to buffer");
//! assert_eq!(size, 13);
//! println!("{:?}", &test_buf[0..size]);
//!
//! // Deserialize from the raw byte representation
//! let pus_tc_deserialized = PusTc::from_bytes(&test_buf).expect("Deserialization failed");
//! assert_eq!(pus_tc.service(), 17);
//! assert_eq!(pus_tc.subservice(), 1);
//! assert_eq!(pus_tc.apid(), 0x02);
//! ```
use crate::ecss::{
ccsds_impl, crc_from_raw_data, crc_procedure, sp_header_impls, user_data_from_raw,
verify_crc16_from_raw, CrcType, PusError, PusPacket, PusVersion, CRC_CCITT_FALSE,
};
use crate::SpHeader;
use crate::{
ByteConversionError, CcsdsPacket, PacketType, SequenceFlags, SizeMissmatch, CCSDS_HEADER_LEN,
};
use core::mem::size_of;
use delegate::delegate;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zerocopy::AsBytes;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
/// PUS C secondary header length is fixed
pub const PUC_TC_SECONDARY_HEADER_LEN: usize = size_of::<zc::PusTcSecondaryHeader>();
pub const PUS_TC_MIN_LEN_WITHOUT_APP_DATA: usize =
CCSDS_HEADER_LEN + PUC_TC_SECONDARY_HEADER_LEN + size_of::<CrcType>();
const PUS_VERSION: PusVersion = PusVersion::PusC;
#[derive(Copy, Clone, PartialEq, Debug)]
enum AckOpts {
Acceptance = 0b1000,
Start = 0b0100,
Progress = 0b0010,
Completion = 0b0001,
}
pub const ACK_ALL: u8 = AckOpts::Acceptance as u8
| AckOpts::Start as u8
| AckOpts::Progress as u8
| AckOpts::Completion as u8;
pub trait GenericPusTcSecondaryHeader {
fn pus_version(&self) -> PusVersion;
fn ack_flags(&self) -> u8;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn source_id(&self) -> u16;
}
pub mod zc {
use crate::ecss::{PusError, PusVersion};
use crate::tc::GenericPusTcSecondaryHeader;
use zerocopy::{AsBytes, FromBytes, NetworkEndian, Unaligned, U16};
#[derive(FromBytes, AsBytes, Unaligned)]
#[repr(C)]
pub struct PusTcSecondaryHeader {
version_ack: u8,
service: u8,
subservice: u8,
source_id: U16<NetworkEndian>,
}
impl TryFrom<crate::tc::PusTcSecondaryHeader> for PusTcSecondaryHeader {
type Error = PusError;
fn try_from(value: crate::tc::PusTcSecondaryHeader) -> Result<Self, Self::Error> {
if value.version != PusVersion::PusC {
return Err(PusError::VersionNotSupported(value.version));
}
Ok(PusTcSecondaryHeader {
version_ack: ((value.version as u8) << 4) | value.ack,
service: value.service,
subservice: value.subservice,
source_id: U16::from(value.source_id),
})
}
}
impl GenericPusTcSecondaryHeader for PusTcSecondaryHeader {
fn pus_version(&self) -> PusVersion {
PusVersion::try_from(self.version_ack >> 4 & 0b1111).unwrap_or(PusVersion::Invalid)
}
fn ack_flags(&self) -> u8 {
self.version_ack & 0b1111
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn source_id(&self) -> u16 {
self.source_id.get()
}
}
impl PusTcSecondaryHeader {
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Option<()> {
self.write_to(slice)
}
pub fn from_bytes(slice: &[u8]) -> Option<Self> {
Self::read_from(slice)
}
}
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTcSecondaryHeader {
pub service: u8,
pub subservice: u8,
pub source_id: u16,
pub ack: u8,
pub version: PusVersion,
}
impl GenericPusTcSecondaryHeader for PusTcSecondaryHeader {
fn pus_version(&self) -> PusVersion {
self.version
}
fn ack_flags(&self) -> u8 {
self.ack
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn source_id(&self) -> u16 {
self.source_id
}
}
impl TryFrom<zc::PusTcSecondaryHeader> for PusTcSecondaryHeader {
type Error = ();
fn try_from(value: zc::PusTcSecondaryHeader) -> Result<Self, Self::Error> {
Ok(PusTcSecondaryHeader {
service: value.service(),
subservice: value.subservice(),
source_id: value.source_id(),
ack: value.ack_flags(),
version: PUS_VERSION,
})
}
}
impl PusTcSecondaryHeader {
pub fn new_simple(service: u8, subservice: u8) -> Self {
PusTcSecondaryHeader {
service,
subservice,
ack: ACK_ALL,
source_id: 0,
version: PusVersion::PusC,
}
}
pub fn new(service: u8, subservice: u8, ack: u8, source_id: u16) -> Self {
PusTcSecondaryHeader {
service,
subservice,
ack: ack & 0b1111,
source_id,
version: PusVersion::PusC,
}
}
}
/// This class models the PUS C telecommand packet. It is the primary data structure to generate the
/// raw byte representation of a PUS telecommand or to deserialize from one from raw bytes.
///
/// This class also derives the [serde::Serialize] and [serde::Deserialize] trait if the
/// [serde] feature is used, which allows to send around TC packets in a raw byte format using a
/// serde provider like [postcard](https://docs.rs/postcard/latest/postcard/).
///
/// There is no spare bytes support yet.
///
/// # Lifetimes
///
/// * `'raw_data` - If the TC is not constructed from a raw slice, this will be the life time of
/// a buffer where the user provided application data will be serialized into. If it
/// is, this is the lifetime of the raw byte slice it is constructed from.
#[derive(Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTc<'raw_data> {
sp_header: SpHeader,
pub sec_header: PusTcSecondaryHeader,
/// If this is set to false, a manual call to [PusTc::calc_own_crc16] or
/// [PusTc::update_packet_fields] is necessary for the serialized or cached CRC16 to be valid.
pub calc_crc_on_serialization: bool,
#[cfg_attr(feature = "serde", serde(skip))]
raw_data: Option<&'raw_data [u8]>,
app_data: Option<&'raw_data [u8]>,
crc16: Option<u16>,
}
impl<'raw_data> PusTc<'raw_data> {
/// Generates a new struct instance.
///
/// # Arguments
///
/// * `sp_header` - Space packet header information. The correct packet type will be set
/// automatically
/// * `sec_header` - Information contained in the data field header, including the service
/// and subservice type
/// * `app_data` - Custom application data
/// * `set_ccsds_len` - Can be used to automatically update the CCSDS space packet data length
/// field. If this is not set to true, [PusTc::update_ccsds_data_len] can be called to set
/// the correct value to this field manually
pub fn new(
sp_header: &mut SpHeader,
sec_header: PusTcSecondaryHeader,
app_data: Option<&'raw_data [u8]>,
set_ccsds_len: bool,
) -> Self {
sp_header.set_packet_type(PacketType::Tc);
sp_header.set_sec_header_flag();
let mut pus_tc = PusTc {
sp_header: *sp_header,
raw_data: None,
app_data,
sec_header,
calc_crc_on_serialization: true,
crc16: None,
};
if set_ccsds_len {
pus_tc.update_ccsds_data_len();
}
pus_tc
}
/// Simplified version of the [PusTc::new] function which allows to only specify service and
/// subservice instead of the full PUS TC secondary header.
pub fn new_simple(
sph: &mut SpHeader,
service: u8,
subservice: u8,
app_data: Option<&'raw_data [u8]>,
set_ccsds_len: bool,
) -> Self {
Self::new(
sph,
PusTcSecondaryHeader::new(service, subservice, ACK_ALL, 0),
app_data,
set_ccsds_len,
)
}
pub fn sp_header(&self) -> &SpHeader {
&self.sp_header
}
pub fn len_packed(&self) -> usize {
let mut length = PUS_TC_MIN_LEN_WITHOUT_APP_DATA;
if let Some(app_data) = self.app_data {
length += app_data.len();
}
length
}
pub fn set_ack_field(&mut self, ack: u8) -> bool {
if ack > 0b1111 {
return false;
}
self.sec_header.ack = ack & 0b1111;
true
}
pub fn set_source_id(&mut self, source_id: u16) {
self.sec_header.source_id = source_id;
}
sp_header_impls!();
/// Calculate the CCSDS space packet data length field and sets it
/// This is called automatically if the `set_ccsds_len` argument in the [PusTc::new] call was
/// used.
/// If this was not done or the application data is set or changed after construction,
/// this function needs to be called to ensure that the data length field of the CCSDS header
/// is set correctly.
pub fn update_ccsds_data_len(&mut self) {
self.sp_header.data_len =
self.len_packed() as u16 - size_of::<crate::zc::SpHeader>() as u16 - 1;
}
/// This function should be called before the TC packet is serialized if
/// [PusTc::calc_crc_on_serialization] is set to False. It will calculate and cache the CRC16.
pub fn calc_own_crc16(&mut self) {
let mut digest = CRC_CCITT_FALSE.digest();
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
digest.update(sph_zc.as_bytes());
let pus_tc_header = zc::PusTcSecondaryHeader::try_from(self.sec_header).unwrap();
digest.update(pus_tc_header.as_bytes());
if let Some(app_data) = self.app_data {
digest.update(app_data);
}
self.crc16 = Some(digest.finalize())
}
/// This helper function calls both [PusTc::update_ccsds_data_len] and [PusTc::calc_own_crc16].
pub fn update_packet_fields(&mut self) {
self.update_ccsds_data_len();
self.calc_own_crc16();
}
/// Write the raw PUS byte representation to a provided buffer.
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = 0;
let tc_header_len = size_of::<zc::PusTcSecondaryHeader>();
let total_size = self.len_packed();
if total_size > slice.len() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: slice.len(),
expected: total_size,
})
.into());
}
self.sp_header.write_to_be_bytes(slice)?;
curr_idx += CCSDS_HEADER_LEN;
let sec_header = zc::PusTcSecondaryHeader::try_from(self.sec_header).unwrap();
sec_header
.write_to_bytes(&mut slice[curr_idx..curr_idx + tc_header_len])
.ok_or(ByteConversionError::ZeroCopyToError)?;
curr_idx += tc_header_len;
if let Some(app_data) = self.app_data {
slice[curr_idx..curr_idx + app_data.len()].copy_from_slice(app_data);
curr_idx += app_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
0,
curr_idx,
slice,
)?;
slice[curr_idx..curr_idx + 2].copy_from_slice(crc16.to_be_bytes().as_slice());
curr_idx += 2;
Ok(curr_idx)
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn append_to_vec(&self, vec: &mut Vec<u8>) -> Result<usize, PusError> {
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
let mut appended_len = PUS_TC_MIN_LEN_WITHOUT_APP_DATA;
if let Some(app_data) = self.app_data {
appended_len += app_data.len();
};
let start_idx = vec.len();
let mut ser_len = 0;
vec.extend_from_slice(sph_zc.as_bytes());
ser_len += sph_zc.as_bytes().len();
// The PUS version is hardcoded to PUS C
let pus_tc_header = zc::PusTcSecondaryHeader::try_from(self.sec_header).unwrap();
vec.extend_from_slice(pus_tc_header.as_bytes());
ser_len += pus_tc_header.as_bytes().len();
if let Some(app_data) = self.app_data {
vec.extend_from_slice(app_data);
ser_len += app_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
start_idx,
ser_len,
&vec[start_idx..ser_len],
)?;
vec.extend_from_slice(crc16.to_be_bytes().as_slice());
Ok(appended_len)
}
/// Create a [PusTc] instance from a raw slice. On success, it returns a tuple containing
/// the instance and the found byte length of the packet.
pub fn from_bytes(slice: &'raw_data [u8]) -> Result<(Self, usize), PusError> {
let raw_data_len = slice.len();
if raw_data_len < PUS_TC_MIN_LEN_WITHOUT_APP_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let mut current_idx = 0;
let (sp_header, _) = SpHeader::from_be_bytes(&slice[0..CCSDS_HEADER_LEN])?;
current_idx += CCSDS_HEADER_LEN;
let total_len = sp_header.total_len();
if raw_data_len < total_len || total_len < PUS_TC_MIN_LEN_WITHOUT_APP_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let sec_header = zc::PusTcSecondaryHeader::from_bytes(
&slice[current_idx..current_idx + PUC_TC_SECONDARY_HEADER_LEN],
)
.ok_or(ByteConversionError::ZeroCopyFromError)?;
current_idx += PUC_TC_SECONDARY_HEADER_LEN;
let raw_data = &slice[0..total_len];
let pus_tc = PusTc {
sp_header,
sec_header: PusTcSecondaryHeader::try_from(sec_header).unwrap(),
raw_data: Some(raw_data),
app_data: user_data_from_raw(current_idx, total_len, raw_data_len, slice)?,
calc_crc_on_serialization: false,
crc16: Some(crc_from_raw_data(raw_data)?),
};
verify_crc16_from_raw(raw_data, pus_tc.crc16.expect("CRC16 invalid"))?;
Ok((pus_tc, total_len))
}
#[deprecated(since = "0.5.2", note = "use raw_bytes() instead")]
pub fn raw(&self) -> Option<&'raw_data [u8]> {
self.raw_bytes()
}
/// If [Self] was constructed [Self::from_bytes], this function will return the slice it was
/// constructed from. Otherwise, [None] will be returned.
pub fn raw_bytes(&self) -> Option<&'raw_data [u8]> {
self.raw_data
}
}
impl PartialEq for PusTc<'_> {
fn eq(&self, other: &Self) -> bool {
self.sp_header == other.sp_header
&& self.sec_header == other.sec_header
&& self.app_data == other.app_data
}
}
//noinspection RsTraitImplementation
impl CcsdsPacket for PusTc<'_> {
ccsds_impl!();
}
//noinspection RsTraitImplementation
impl PusPacket for PusTc<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
});
fn user_data(&self) -> Option<&[u8]> {
self.app_data
}
fn crc16(&self) -> Option<u16> {
self.crc16
}
}
//noinspection RsTraitImplementation
impl GenericPusTcSecondaryHeader for PusTc<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn source_id(&self) -> u16;
fn ack_flags(&self) -> u8;
});
}
#[cfg(all(test, feature = "std"))]
mod tests {
use crate::ecss::PusVersion::PusC;
use crate::ecss::{PusError, PusPacket};
use crate::tc::ACK_ALL;
use crate::tc::{GenericPusTcSecondaryHeader, PusTc, PusTcSecondaryHeader};
use crate::{ByteConversionError, SpHeader};
use crate::{CcsdsPacket, SequenceFlags};
use alloc::vec::Vec;
fn base_ping_tc_full_ctor() -> PusTc<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
PusTc::new(&mut sph, tc_header, None, true)
}
fn base_ping_tc_simple_ctor() -> PusTc<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
PusTc::new_simple(&mut sph, 17, 1, None, true)
}
fn base_ping_tc_simple_ctor_with_app_data(app_data: &'static [u8]) -> PusTc<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
PusTc::new_simple(&mut sph, 17, 1, Some(app_data), true)
}
#[test]
fn test_tc_fields() {
let pus_tc = base_ping_tc_full_ctor();
assert_eq!(pus_tc.crc16(), None);
verify_test_tc(&pus_tc, false, 13);
}
#[test]
fn test_serialization() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(size, 13);
}
#[test]
fn test_deserialization() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(size, 13);
let (tc_from_raw, size) =
PusTc::from_bytes(&test_buf).expect("Creating PUS TC struct from raw buffer failed");
assert_eq!(size, 13);
verify_test_tc(&tc_from_raw, false, 13);
assert!(tc_from_raw.user_data().is_none());
verify_test_tc_raw(&test_buf);
verify_crc_no_app_data(&test_buf);
}
#[test]
fn test_update_func() {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
let mut tc = PusTc::new_simple(&mut sph, 17, 1, None, false);
tc.calc_crc_on_serialization = false;
assert_eq!(tc.data_len(), 0);
tc.update_packet_fields();
assert_eq!(tc.data_len(), 6);
}
#[test]
fn test_deserialization_with_app_data() {
let pus_tc = base_ping_tc_simple_ctor_with_app_data(&[1, 2, 3]);
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(size, 16);
let (tc_from_raw, size) =
PusTc::from_bytes(&test_buf).expect("Creating PUS TC struct from raw buffer failed");
assert_eq!(size, 16);
verify_test_tc(&tc_from_raw, true, 16);
let user_data = tc_from_raw.user_data().unwrap();
assert_eq!(user_data[0], 1);
assert_eq!(user_data[1], 2);
assert_eq!(user_data[2], 3);
}
#[test]
fn test_vec_ser_deser() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_vec = Vec::new();
let size = pus_tc
.append_to_vec(&mut test_vec)
.expect("Error writing TC to vector");
assert_eq!(size, 13);
verify_test_tc_raw(&test_vec.as_slice());
verify_crc_no_app_data(&test_vec.as_slice());
}
#[test]
fn test_incorrect_crc() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
test_buf[12] = 0;
let res = PusTc::from_bytes(&test_buf);
assert!(res.is_err());
let err = res.unwrap_err();
assert!(matches!(err, PusError::IncorrectCrc { .. }));
}
#[test]
fn test_manual_crc_calculation() {
let mut pus_tc = base_ping_tc_simple_ctor();
pus_tc.calc_crc_on_serialization = false;
let mut test_buf: [u8; 32] = [0; 32];
pus_tc.calc_own_crc16();
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
verify_test_tc_raw(&test_buf);
verify_crc_no_app_data(&test_buf);
}
#[test]
fn test_manual_crc_calculation_no_calc_call() {
let mut pus_tc = base_ping_tc_simple_ctor();
pus_tc.calc_crc_on_serialization = false;
let mut test_buf: [u8; 32] = [0; 32];
let res = pus_tc.write_to_bytes(test_buf.as_mut_slice());
assert!(res.is_err());
let err = res.unwrap_err();
assert!(matches!(err, PusError::CrcCalculationMissing { .. }));
}
#[test]
fn test_with_application_data_vec() {
let pus_tc = base_ping_tc_simple_ctor_with_app_data(&[1, 2, 3]);
verify_test_tc(&pus_tc, true, 16);
let mut test_vec = Vec::new();
let size = pus_tc
.append_to_vec(&mut test_vec)
.expect("Error writing TC to vector");
assert_eq!(test_vec[11], 1);
assert_eq!(test_vec[12], 2);
assert_eq!(test_vec[13], 3);
assert_eq!(size, 16);
}
#[test]
fn test_write_buf_too_small() {
let pus_tc = base_ping_tc_simple_ctor();
let mut test_buf = [0; 12];
let res = pus_tc.write_to_bytes(test_buf.as_mut_slice());
assert!(res.is_err());
let err = res.unwrap_err();
match err {
PusError::ByteConversionError(err) => match err {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.expected, pus_tc.len_packed());
assert_eq!(missmatch.found, 12);
}
_ => panic!("Unexpected error"),
},
_ => panic!("Unexpected error"),
}
}
#[test]
fn test_with_application_data_buf() {
let pus_tc = base_ping_tc_simple_ctor_with_app_data(&[1, 2, 3]);
verify_test_tc(&pus_tc, true, 16);
let mut test_buf: [u8; 32] = [0; 32];
let size = pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(test_buf[11], 1);
assert_eq!(test_buf[12], 2);
assert_eq!(test_buf[13], 3);
assert_eq!(size, 16);
}
#[test]
fn test_custom_setters() {
let mut pus_tc = base_ping_tc_simple_ctor();
let mut test_buf: [u8; 32] = [0; 32];
pus_tc.set_apid(0x7ff);
pus_tc.set_seq_count(0x3fff);
pus_tc.set_ack_field(0b11);
pus_tc.set_source_id(0xffff);
pus_tc.set_seq_flags(SequenceFlags::Unsegmented);
assert_eq!(pus_tc.source_id(), 0xffff);
assert_eq!(pus_tc.seq_count(), 0x3fff);
assert_eq!(pus_tc.ack_flags(), 0b11);
assert_eq!(pus_tc.apid(), 0x7ff);
assert_eq!(pus_tc.sequence_flags(), SequenceFlags::Unsegmented);
pus_tc.calc_own_crc16();
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
assert_eq!(test_buf[0], 0x1f);
assert_eq!(test_buf[1], 0xff);
assert_eq!(test_buf[2], 0xff);
assert_eq!(test_buf[3], 0xff);
assert_eq!(test_buf[6], 0x23);
// Source ID 0
assert_eq!(test_buf[9], 0xff);
assert_eq!(test_buf[10], 0xff);
}
fn verify_test_tc(tc: &PusTc, has_user_data: bool, exp_full_len: usize) {
assert_eq!(PusPacket::service(tc), 17);
assert_eq!(PusPacket::subservice(tc), 1);
assert!(tc.sec_header_flag());
assert_eq!(PusPacket::pus_version(tc), PusC);
if !has_user_data {
assert_eq!(tc.user_data(), None);
}
assert_eq!(tc.seq_count(), 0x34);
assert_eq!(tc.source_id(), 0);
assert_eq!(tc.apid(), 0x02);
assert_eq!(tc.ack_flags(), ACK_ALL);
assert_eq!(tc.len_packed(), exp_full_len);
let mut comp_header = SpHeader::tc_unseg(0x02, 0x34, exp_full_len as u16 - 7).unwrap();
comp_header.set_sec_header_flag();
assert_eq!(tc.sp_header, comp_header);
}
fn verify_test_tc_raw(slice: &impl AsRef<[u8]>) {
// Reference comparison implementation:
// https://github.com/us-irs/py-spacepackets/blob/v0.13.0/tests/ecss/test_pus_tc.py
let slice = slice.as_ref();
// 0x1801 is the generic
assert_eq!(slice[0], 0x18);
// APID is 0x01
assert_eq!(slice[1], 0x02);
// Unsegmented packets
assert_eq!(slice[2], 0xc0);
// Sequence count 0x34
assert_eq!(slice[3], 0x34);
assert_eq!(slice[4], 0x00);
// Space data length of 6 equals total packet length of 13
assert_eq!(slice[5], 0x06);
// PUS Version C 0b0010 and ACK flags 0b1111
assert_eq!(slice[6], 0x2f);
// Service 17
assert_eq!(slice[7], 0x11);
// Subservice 1
assert_eq!(slice[8], 0x01);
// Source ID 0
assert_eq!(slice[9], 0x00);
assert_eq!(slice[10], 0x00);
}
fn verify_crc_no_app_data(slice: &impl AsRef<[u8]>) {
// Reference comparison implementation:
// https://github.com/us-irs/py-spacepackets/blob/v0.13.0/tests/ecss/test_pus_tc.py
let slice = slice.as_ref();
assert_eq!(slice[11], 0xee);
assert_eq!(slice[12], 0x63);
}
#[test]
fn partial_eq_pus_tc() {
// new vs new simple
let pus_tc_1 = base_ping_tc_simple_ctor();
let pus_tc_2 = base_ping_tc_full_ctor();
assert_eq!(pus_tc_1, pus_tc_2);
}
#[test]
fn partial_eq_serialized_vs_derialized() {
let pus_tc = base_ping_tc_simple_ctor();
let mut buf = [0; 32];
pus_tc.write_to_bytes(&mut buf).unwrap();
assert_eq!(pus_tc, PusTc::from_bytes(&buf).unwrap().0);
}
}

View File

@ -428,14 +428,17 @@ pub fn get_dyn_time_provider_from_bytes(
) -> Result<Box<dyn DynCdsTimeProvider>, TimestampError> {
let time_code = ccsds_time_code_from_p_field(buf[0]);
if let Err(e) = time_code {
return Err(TimestampError::InvalidTimeCode(CcsdsTimeCodes::Cds, e));
return Err(TimestampError::InvalidTimeCode {
expected: CcsdsTimeCodes::Cds,
found: e,
});
}
let time_code = time_code.unwrap();
if time_code != CcsdsTimeCodes::Cds {
return Err(TimestampError::InvalidTimeCode(
CcsdsTimeCodes::Cds,
time_code as u8,
));
return Err(TimestampError::InvalidTimeCode {
expected: CcsdsTimeCodes::Cds,
found: time_code as u8,
});
}
if length_of_day_segment_from_pfield(buf[0]) == LengthOfDaySegment::Short16Bits {
Ok(Box::new(TimeProvider::from_bytes_with_u16_days(buf)?))
@ -511,11 +514,11 @@ impl<ProvidesDaysLen: ProvidesDaysLength> TimeProvider<ProvidesDaysLen> {
days_len: LengthOfDaySegment,
) -> Result<SubmillisPrecision, TimestampError> {
if buf.len() < MIN_CDS_FIELD_LEN {
return Err(TimestampError::ByteConversionError(
ByteConversionError::FromSliceTooSmall(SizeMissmatch {
return Err(TimestampError::ByteConversion(
ByteConversionError::FromSliceTooSmall {
expected: MIN_CDS_FIELD_LEN,
found: buf.len(),
}),
},
));
}
let pfield = buf[0];
@ -523,17 +526,17 @@ impl<ProvidesDaysLen: ProvidesDaysLength> TimeProvider<ProvidesDaysLen> {
Ok(cds_type) => match cds_type {
CcsdsTimeCodes::Cds => (),
_ => {
return Err(TimestampError::InvalidTimeCode(
CcsdsTimeCodes::Cds,
cds_type as u8,
))
return Err(TimestampError::InvalidTimeCode {
expected: CcsdsTimeCodes::Cds,
found: cds_type as u8,
})
}
},
_ => {
return Err(TimestampError::InvalidTimeCode(
CcsdsTimeCodes::Cds,
pfield >> 4 & 0b111,
))
return Err(TimestampError::InvalidTimeCode {
expected: CcsdsTimeCodes::Cds,
found: pfield >> 4 & 0b111,
});
}
};
if ((pfield >> 3) & 0b1) == 1 {
@ -545,11 +548,11 @@ impl<ProvidesDaysLen: ProvidesDaysLength> TimeProvider<ProvidesDaysLen> {
}
let stamp_len = Self::calc_stamp_len(pfield);
if buf.len() < stamp_len {
return Err(TimestampError::ByteConversionError(
ByteConversionError::FromSliceTooSmall(SizeMissmatch {
return Err(TimestampError::ByteConversion(
ByteConversionError::FromSliceTooSmall {
expected: stamp_len,
found: buf.len(),
}),
},
));
}
Ok(precision_from_pfield(pfield))
@ -602,11 +605,11 @@ impl<ProvidesDaysLen: ProvidesDaysLength> TimeProvider<ProvidesDaysLen> {
fn length_check(&self, buf: &[u8], len_as_bytes: usize) -> Result<(), TimestampError> {
if buf.len() < len_as_bytes {
return Err(TimestampError::ByteConversionError(
ByteConversionError::ToSliceTooSmall(SizeMissmatch {
return Err(TimestampError::ByteConversion(
ByteConversionError::ToSliceTooSmall {
expected: len_as_bytes,
found: buf.len(),
}),
},
));
}
Ok(())
@ -671,21 +674,21 @@ impl<ProvidesDaysLen: ProvidesDaysLength> TimeProvider<ProvidesDaysLen> {
fn from_now_generic(days_len: LengthOfDaySegment) -> Result<Self, StdTimestampError> {
let conversion_from_now = ConversionFromNow::new()?;
Self::generic_from_conversion(days_len, conversion_from_now)
.map_err(StdTimestampError::TimestampError)
.map_err(StdTimestampError::Timestamp)
}
#[cfg(feature = "std")]
fn from_now_generic_us_prec(days_len: LengthOfDaySegment) -> Result<Self, StdTimestampError> {
let conversion_from_now = ConversionFromNow::new_with_submillis_us_prec()?;
Self::generic_from_conversion(days_len, conversion_from_now)
.map_err(StdTimestampError::TimestampError)
.map_err(StdTimestampError::Timestamp)
}
#[cfg(feature = "std")]
fn from_now_generic_ps_prec(days_len: LengthOfDaySegment) -> Result<Self, StdTimestampError> {
let conversion_from_now = ConversionFromNow::new_with_submillis_ps_prec()?;
Self::generic_from_conversion(days_len, conversion_from_now)
.map_err(StdTimestampError::TimestampError)
.map_err(StdTimestampError::Timestamp)
}
fn generic_from_conversion<C: CdsConverter>(
@ -694,7 +697,7 @@ impl<ProvidesDaysLen: ProvidesDaysLength> TimeProvider<ProvidesDaysLen> {
) -> Result<Self, TimestampError> {
let ccsds_days: ProvidesDaysLen::FieldType =
converter.ccsds_days_as_u32().try_into().map_err(|_| {
TimestampError::CdsError(CdsError::InvalidCcsdsDays(
TimestampError::Cds(CdsError::InvalidCcsdsDays(
converter.ccsds_days_as_u32().into(),
))
})?;
@ -750,7 +753,7 @@ impl<ProvidesDaysLen: ProvidesDaysLength> TimeProvider<ProvidesDaysLen> {
.ccsds_days
.try_into()
.map_err(|_| {
StdTimestampError::TimestampError(
StdTimestampError::Timestamp(
CdsError::InvalidCcsdsDays(
conversion_from_now.unix_conversion.ccsds_days as i64,
)
@ -788,7 +791,7 @@ impl TimeProvider<DaysLen24Bits> {
/// ## Errors
///
/// This function will return [TimestampError::DateBeforeCcsdsEpoch] or
/// [TimestampError::CdsError] if the time is before the CCSDS epoch (1958-01-01T00:00:00+00:00)
/// [TimestampError::Cds] if the time is before the CCSDS epoch (1958-01-01T00:00:00+00:00)
/// or the CCSDS days value exceeds the allowed bit width (24 bits).
pub fn from_dt_with_u24_days(dt: &DateTime<Utc>) -> Result<Self, TimestampError> {
Self::from_dt_generic(dt, LengthOfDaySegment::Long24Bits)
@ -799,7 +802,7 @@ impl TimeProvider<DaysLen24Bits> {
/// ## Errors
///
/// This function will return [TimestampError::DateBeforeCcsdsEpoch] or
/// [TimestampError::CdsError] if the time is before the CCSDS epoch (1958-01-01T00:00:00+00:00)
/// [TimestampError::Cds] if the time is before the CCSDS epoch (1958-01-01T00:00:00+00:00)
/// or the CCSDS days value exceeds the allowed bit width (24 bits).
pub fn from_unix_secs_with_u24_days(
unix_stamp: &UnixTimestamp,
@ -864,7 +867,7 @@ impl TimeProvider<DaysLen16Bits> {
/// Create a provider from a [`DateTime<Utc>`] struct.
///
/// This function will return a [TimestampError::DateBeforeCcsdsEpoch] or a
/// [TimestampError::CdsError] if the time is before the CCSDS epoch (01-01-1958 00:00:00) or
/// [TimestampError::Cds] if the time is before the CCSDS epoch (01-01-1958 00:00:00) or
/// the CCSDS days value exceeds the allowed bit width (16 bits).
pub fn from_dt_with_u16_days(dt: &DateTime<Utc>) -> Result<Self, TimestampError> {
Self::from_dt_generic(dt, LengthOfDaySegment::Short16Bits)
@ -882,7 +885,7 @@ impl TimeProvider<DaysLen16Bits> {
/// ## Errors
///
/// This function will return [TimestampError::DateBeforeCcsdsEpoch] or
/// [TimestampError::CdsError] if the time is before the CCSDS epoch (1958-01-01T00:00:00+00:00)
/// [TimestampError::Cds] if the time is before the CCSDS epoch (1958-01-01T00:00:00+00:00)
/// or the CCSDS days value exceeds the allowed bit width (24 bits).
pub fn from_unix_secs_with_u16_days(
unix_stamp: &UnixTimestamp,
@ -1302,7 +1305,7 @@ impl TryFrom<TimeProvider<DaysLen24Bits>> for TimeProvider<DaysLen16Bits> {
#[cfg(test)]
mod tests {
use super::*;
use crate::time::TimestampError::{ByteConversionError, InvalidTimeCode};
use crate::time::TimestampError::{ByteConversion, InvalidTimeCode};
use crate::ByteConversionError::{FromSliceTooSmall, ToSliceTooSmall};
use chrono::{Datelike, NaiveDate, Timelike};
#[cfg(feature = "serde")]
@ -1400,8 +1403,7 @@ mod tests {
let faulty_ctor = TimeProvider::<DaysLen16Bits>::from_bytes(&buf);
assert!(faulty_ctor.is_err());
let error = faulty_ctor.unwrap_err();
if let TimestampError::CdsError(CdsError::InvalidCtorForDaysOfLenInPreamble(len_of_day)) =
error
if let TimestampError::Cds(CdsError::InvalidCtorForDaysOfLenInPreamble(len_of_day)) = error
{
assert_eq!(len_of_day, LengthOfDaySegment::Long24Bits);
} else {
@ -1446,9 +1448,9 @@ mod tests {
let res = time_stamper.write_to_bytes(&mut buf[0..i]);
assert!(res.is_err());
match res.unwrap_err() {
ByteConversionError(ToSliceTooSmall(missmatch)) => {
assert_eq!(missmatch.found, i);
assert_eq!(missmatch.expected, 7);
ByteConversion(ToSliceTooSmall { found, expected }) => {
assert_eq!(found, i);
assert_eq!(expected, 7);
}
_ => panic!(
"{}",
@ -1466,10 +1468,10 @@ mod tests {
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError(e) => match e {
FromSliceTooSmall(missmatch) => {
assert_eq!(missmatch.found, i);
assert_eq!(missmatch.expected, 7);
ByteConversion(e) => match e {
FromSliceTooSmall { found, expected } => {
assert_eq!(found, i);
assert_eq!(expected, 7);
}
_ => panic!("{}", format!("Invalid error {:?} detected", e)),
},
@ -1491,9 +1493,9 @@ mod tests {
assert!(res.is_err());
let err = res.unwrap_err();
match err {
InvalidTimeCode(code, raw) => {
assert_eq!(code, CcsdsTimeCodes::Cds);
assert_eq!(raw, 0);
InvalidTimeCode { expected, found } => {
assert_eq!(expected, CcsdsTimeCodes::Cds);
assert_eq!(found, 0);
}
_ => {}
}
@ -1714,11 +1716,12 @@ mod tests {
}
fn generic_dt_case_0_no_prec(subsec_millis: u32) -> DateTime<Utc> {
let naivedatetime_utc = NaiveDate::from_ymd_opt(2023, 01, 14)
NaiveDate::from_ymd_opt(2023, 1, 14)
.unwrap()
.and_hms_milli_opt(16, 49, 30, subsec_millis)
.unwrap();
DateTime::<Utc>::from_utc(naivedatetime_utc, Utc)
.unwrap()
.and_local_timezone(Utc)
.unwrap()
}
fn generic_check_dt_case_0<DaysLen: ProvidesDaysLength>(
@ -1762,11 +1765,12 @@ mod tests {
fn generic_dt_case_1_us_prec(subsec_millis: u32) -> DateTime<Utc> {
// 250 ms + 500 us
let subsec_micros = subsec_millis * 1000 + 500;
let naivedatetime_utc = NaiveDate::from_ymd_opt(2023, 01, 14)
NaiveDate::from_ymd_opt(2023, 1, 14)
.unwrap()
.and_hms_micro_opt(16, 49, 30, subsec_micros)
.unwrap();
DateTime::<Utc>::from_utc(naivedatetime_utc, Utc)
.unwrap()
.and_local_timezone(Utc)
.unwrap()
}
fn generic_check_dt_case_1_us_prec<DaysLen: ProvidesDaysLength>(
@ -1813,12 +1817,13 @@ mod tests {
// 250 ms + 500 us
let subsec_nanos = subsec_millis * 1000 * 1000 + 500 * 1000;
let submilli_nanos = subsec_nanos % 10_u32.pow(6);
let naivedatetime_utc = NaiveDate::from_ymd_opt(2023, 01, 14)
.unwrap()
.and_hms_nano_opt(16, 49, 30, subsec_nanos)
.unwrap();
(
DateTime::<Utc>::from_utc(naivedatetime_utc, Utc),
NaiveDate::from_ymd_opt(2023, 1, 14)
.unwrap()
.and_hms_nano_opt(16, 49, 30, subsec_nanos)
.unwrap()
.and_local_timezone(Utc)
.unwrap(),
submilli_nanos,
)
}
@ -1901,11 +1906,12 @@ mod tests {
#[test]
fn test_creation_from_unix_stamp_1() {
let subsec_millis = 250;
let naivedatetime_utc = NaiveDate::from_ymd_opt(2023, 01, 14)
let datetime_utc = NaiveDate::from_ymd_opt(2023, 1, 14)
.unwrap()
.and_hms_milli_opt(16, 49, 30, subsec_millis)
.unwrap()
.and_local_timezone(Utc)
.unwrap();
let datetime_utc = DateTime::<Utc>::from_utc(naivedatetime_utc, Utc);
let time_provider = TimeProvider::from_unix_secs_with_u16_days(&datetime_utc.into())
.expect("creating provider from unix stamp failed");
// https://www.timeanddate.com/date/durationresult.html?d1=01&m1=01&y1=1958&d2=14&m2=01&y2=2023
@ -1943,7 +1949,7 @@ mod tests {
panic!("creation should not succeed")
}
Err(e) => {
if let TimestampError::CdsError(CdsError::InvalidCcsdsDays(days)) = e {
if let TimestampError::Cds(CdsError::InvalidCcsdsDays(days)) = e {
assert_eq!(
days,
unix_to_ccsds_days(invalid_unix_secs / SECONDS_PER_DAY as i64)
@ -2183,11 +2189,12 @@ mod tests {
#[test]
fn test_from_dt_invalid_time() {
// Date before CCSDS epoch
let naivedatetime_utc = NaiveDate::from_ymd_opt(1957, 12, 31)
let datetime_utc = NaiveDate::from_ymd_opt(1957, 12, 31)
.unwrap()
.and_hms_milli_opt(23, 59, 59, 999)
.unwrap()
.and_local_timezone(Utc)
.unwrap();
let datetime_utc = DateTime::<Utc>::from_utc(naivedatetime_utc, Utc);
let time_provider = TimeProvider::from_dt_with_u24_days(&datetime_utc);
assert!(time_provider.is_err());
if let TimestampError::DateBeforeCcsdsEpoch(dt) = time_provider.unwrap_err() {
@ -2202,8 +2209,8 @@ mod tests {
stamp0.write_to_bytes(&mut buf).unwrap();
let stamp1 = TimeProvider::from_bytes_with_u16_days(&buf).unwrap();
assert_eq!(stamp0, stamp1);
assert!(!(stamp0 < stamp1));
assert!(!(stamp1 > stamp0));
assert!(stamp0 >= stamp1);
assert!(stamp1 <= stamp0);
}
#[test]

View File

@ -244,7 +244,7 @@ impl TimeProviderCcsdsEpoch {
let fractions =
fractional_part_from_subsec_ns(fraction_resolution, now.subsec_nanos() as u64);
Self::new_with_fractions(ccsds_epoch as u32, fractions.unwrap())
.map_err(|e| StdTimestampError::TimestampError(e.into()))
.map_err(|e| StdTimestampError::Timestamp(e.into()))
}
/// Updates the current time stamp from the current time. The fractional field width remains
@ -453,37 +453,37 @@ impl TimeReader for TimeProviderCcsdsEpoch {
Self: Sized,
{
if buf.len() < MIN_CUC_LEN {
return Err(TimestampError::ByteConversionError(
ByteConversionError::FromSliceTooSmall(SizeMissmatch {
return Err(TimestampError::ByteConversion(
ByteConversionError::FromSliceTooSmall {
expected: MIN_CUC_LEN,
found: buf.len(),
}),
},
));
}
match ccsds_time_code_from_p_field(buf[0]) {
Ok(code) => {
if code != CcsdsTimeCodes::CucCcsdsEpoch {
return Err(TimestampError::InvalidTimeCode(
CcsdsTimeCodes::CucCcsdsEpoch,
code as u8,
));
return Err(TimestampError::InvalidTimeCode {
expected: CcsdsTimeCodes::CucCcsdsEpoch,
found: code as u8,
});
}
}
Err(raw) => {
return Err(TimestampError::InvalidTimeCode(
CcsdsTimeCodes::CucCcsdsEpoch,
raw,
))
return Err(TimestampError::InvalidTimeCode {
expected: CcsdsTimeCodes::CucCcsdsEpoch,
found: raw,
});
}
}
let (cntr_len, fractions_len, total_len) =
Self::len_components_and_total_from_pfield(buf[0]);
if buf.len() < total_len {
return Err(TimestampError::ByteConversionError(
ByteConversionError::FromSliceTooSmall(SizeMissmatch {
return Err(TimestampError::ByteConversion(
ByteConversionError::FromSliceTooSmall {
expected: total_len,
found: buf.len(),
}),
},
));
}
let mut current_idx = 1;
@ -535,11 +535,11 @@ impl TimeWriter for TimeProviderCcsdsEpoch {
fn write_to_bytes(&self, bytes: &mut [u8]) -> Result<usize, TimestampError> {
// Cross check the sizes of the counters against byte widths in the ctor
if bytes.len() < self.len_as_bytes() {
return Err(TimestampError::ByteConversionError(
ByteConversionError::ToSliceTooSmall(SizeMissmatch {
return Err(TimestampError::ByteConversion(
ByteConversionError::ToSliceTooSmall {
found: bytes.len(),
expected: self.len_as_bytes(),
}),
},
));
}
bytes[0] = self.pfield;
@ -797,11 +797,13 @@ mod tests {
let res = TimeProviderCcsdsEpoch::from_bytes(&buf[0..i]);
assert!(res.is_err());
let err = res.unwrap_err();
if let TimestampError::ByteConversionError(ByteConversionError::FromSliceTooSmall(e)) =
err
if let TimestampError::ByteConversion(ByteConversionError::FromSliceTooSmall {
found,
expected,
}) = err
{
assert_eq!(e.found, i);
assert_eq!(e.expected, 2);
assert_eq!(found, i);
assert_eq!(expected, 2);
}
}
let large_stamp = TimeProviderCcsdsEpoch::new_with_fine_fractions(22, 300).unwrap();
@ -810,11 +812,13 @@ mod tests {
let res = TimeProviderCcsdsEpoch::from_bytes(&buf[0..i]);
assert!(res.is_err());
let err = res.unwrap_err();
if let TimestampError::ByteConversionError(ByteConversionError::FromSliceTooSmall(e)) =
err
if let TimestampError::ByteConversion(ByteConversionError::FromSliceTooSmall {
found,
expected,
}) = err
{
assert_eq!(e.found, i);
assert_eq!(e.expected, large_stamp.len_as_bytes());
assert_eq!(found, i);
assert_eq!(expected, large_stamp.len_as_bytes());
}
}
}
@ -886,11 +890,13 @@ mod tests {
let err = cuc.write_to_bytes(&mut buf[0..i]);
assert!(err.is_err());
let err = err.unwrap_err();
if let TimestampError::ByteConversionError(ByteConversionError::ToSliceTooSmall(e)) =
err
if let TimestampError::ByteConversion(ByteConversionError::ToSliceTooSmall {
found,
expected,
}) = err
{
assert_eq!(e.expected, cuc.len_as_bytes());
assert_eq!(e.found, i);
assert_eq!(expected, cuc.len_as_bytes());
assert_eq!(found, i);
} else {
panic!("unexpected error: {}", err);
}
@ -903,9 +909,9 @@ mod tests {
let res = TimeProviderCcsdsEpoch::from_bytes(&buf);
assert!(res.is_err());
let err = res.unwrap_err();
if let TimestampError::InvalidTimeCode(code, raw) = err {
assert_eq!(code, CcsdsTimeCodes::CucCcsdsEpoch);
assert_eq!(raw, CcsdsTimeCodes::CucAgencyEpoch as u8);
if let TimestampError::InvalidTimeCode { expected, found } = err {
assert_eq!(expected, CcsdsTimeCodes::CucCcsdsEpoch);
assert_eq!(found, CcsdsTimeCodes::CucAgencyEpoch as u8);
} else {
panic!("unexpected error: {}", err);
}

View File

@ -1,10 +1,11 @@
//! CCSDS Time Code Formats according to [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
use crate::{ByteConversionError, SizeMissmatch};
use crate::ByteConversionError;
use chrono::{DateTime, LocalResult, TimeZone, Utc};
use core::cmp::Ordering;
use core::fmt::{Display, Formatter};
use core::ops::{Add, AddAssign};
use core::time::Duration;
use core::u8;
#[allow(unused_imports)]
#[cfg(not(feature = "std"))]
@ -16,6 +17,8 @@ use serde::{Deserialize, Serialize};
use std::error::Error;
#[cfg(feature = "std")]
use std::time::{SystemTime, SystemTimeError};
#[cfg(feature = "std")]
pub use std_mod::*;
pub mod ascii;
pub mod cds;
@ -61,66 +64,30 @@ pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCodes, u8> {
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[non_exhaustive]
pub enum TimestampError {
/// Contains tuple where first value is the expected time code and the second
/// value is the found raw value
InvalidTimeCode(CcsdsTimeCodes, u8),
ByteConversionError(ByteConversionError),
CdsError(cds::CdsError),
CucError(cuc::CucError),
InvalidTimeCode { expected: CcsdsTimeCodes, found: u8 },
ByteConversion(ByteConversionError),
Cds(cds::CdsError),
Cuc(cuc::CucError),
DateBeforeCcsdsEpoch(DateTime<Utc>),
CustomEpochNotSupported,
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::CdsError(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::CucError(e)
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[derive(Debug, Clone)]
pub enum StdTimestampError {
SystemTimeError(SystemTimeError),
TimestampError(TimestampError),
}
#[cfg(feature = "std")]
impl From<TimestampError> for StdTimestampError {
fn from(v: TimestampError) -> Self {
Self::TimestampError(v)
}
}
#[cfg(feature = "std")]
impl From<SystemTimeError> for StdTimestampError {
fn from(v: SystemTimeError) -> Self {
Self::SystemTimeError(v)
}
}
impl Display for TimestampError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TimestampError::InvalidTimeCode(time_code, raw_val) => {
TimestampError::InvalidTimeCode { expected, found } => {
write!(
f,
"invalid raw time code value {raw_val} for time code {time_code:?}"
"invalid raw time code value {found} for time code {expected:?}"
)
}
TimestampError::CdsError(e) => {
TimestampError::Cds(e) => {
write!(f, "cds error {e}")
}
TimestampError::CucError(e) => {
TimestampError::Cuc(e) => {
write!(f, "cuc error {e}")
}
TimestampError::ByteConversionError(e) => {
TimestampError::ByteConversion(e) => {
write!(f, "byte conversion error {e}")
}
TimestampError::DateBeforeCcsdsEpoch(e) => {
@ -137,13 +104,40 @@ impl Display for TimestampError {
impl Error for TimestampError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TimestampError::ByteConversionError(e) => Some(e),
TimestampError::CdsError(e) => Some(e),
TimestampError::CucError(e) => Some(e),
TimestampError::ByteConversion(e) => Some(e),
TimestampError::Cds(e) => Some(e),
TimestampError::Cuc(e) => Some(e),
_ => None,
}
}
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::Cds(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::Cuc(e)
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod {
use crate::time::TimestampError;
use std::time::SystemTimeError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
pub enum StdTimestampError {
#[error("system time error: {0}")]
SystemTime(#[from] SystemTimeError),
#[error("timestamp error: {0}")]
Timestamp(#[from] TimestampError),
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]

715
src/tm.rs
View File

@ -1,715 +0,0 @@
//! This module contains all components required to create a ECSS PUS C telemetry packets according
//! to [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
use crate::ecss::{
ccsds_impl, crc_from_raw_data, crc_procedure, sp_header_impls, user_data_from_raw,
verify_crc16_from_raw, CrcType, PusError, PusPacket, PusVersion, CRC_CCITT_FALSE,
};
use crate::{
ByteConversionError, CcsdsPacket, PacketType, SequenceFlags, SizeMissmatch, SpHeader,
CCSDS_HEADER_LEN,
};
use core::mem::size_of;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zerocopy::AsBytes;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use delegate::delegate;
/// Length without timestamp
pub const PUC_TM_MIN_SEC_HEADER_LEN: usize = 7;
pub const PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA: usize =
CCSDS_HEADER_LEN + PUC_TM_MIN_SEC_HEADER_LEN + size_of::<CrcType>();
pub trait GenericPusTmSecondaryHeader {
fn pus_version(&self) -> PusVersion;
fn sc_time_ref_status(&self) -> u8;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn msg_counter(&self) -> u16;
fn dest_id(&self) -> u16;
}
pub mod zc {
use super::GenericPusTmSecondaryHeader;
use crate::ecss::{PusError, PusVersion};
use zerocopy::{AsBytes, FromBytes, NetworkEndian, Unaligned, U16};
#[derive(FromBytes, AsBytes, Unaligned)]
#[repr(C)]
pub struct PusTmSecHeaderWithoutTimestamp {
pus_version_and_sc_time_ref_status: u8,
service: u8,
subservice: u8,
msg_counter: U16<NetworkEndian>,
dest_id: U16<NetworkEndian>,
}
pub struct PusTmSecHeader<'slice> {
pub(crate) zc_header: PusTmSecHeaderWithoutTimestamp,
pub(crate) timestamp: Option<&'slice [u8]>,
}
impl TryFrom<crate::tm::PusTmSecondaryHeader<'_>> for PusTmSecHeaderWithoutTimestamp {
type Error = PusError;
fn try_from(header: crate::tm::PusTmSecondaryHeader) -> Result<Self, Self::Error> {
if header.pus_version != PusVersion::PusC {
return Err(PusError::VersionNotSupported(header.pus_version));
}
Ok(PusTmSecHeaderWithoutTimestamp {
pus_version_and_sc_time_ref_status: ((header.pus_version as u8) << 4)
| header.sc_time_ref_status,
service: header.service,
subservice: header.subservice,
msg_counter: U16::from(header.msg_counter),
dest_id: U16::from(header.dest_id),
})
}
}
impl PusTmSecHeaderWithoutTimestamp {
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Option<()> {
self.write_to(slice)
}
pub fn from_bytes(slice: &[u8]) -> Option<Self> {
Self::read_from(slice)
}
}
impl GenericPusTmSecondaryHeader for PusTmSecHeaderWithoutTimestamp {
fn pus_version(&self) -> PusVersion {
PusVersion::try_from(self.pus_version_and_sc_time_ref_status >> 4 & 0b1111)
.unwrap_or(PusVersion::Invalid)
}
fn sc_time_ref_status(&self) -> u8 {
self.pus_version_and_sc_time_ref_status & 0b1111
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn msg_counter(&self) -> u16 {
self.msg_counter.get()
}
fn dest_id(&self) -> u16 {
self.dest_id.get()
}
}
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTmSecondaryHeader<'stamp> {
pus_version: PusVersion,
pub sc_time_ref_status: u8,
pub service: u8,
pub subservice: u8,
pub msg_counter: u16,
pub dest_id: u16,
pub timestamp: Option<&'stamp [u8]>,
}
impl<'stamp> PusTmSecondaryHeader<'stamp> {
pub fn new_simple(service: u8, subservice: u8, timestamp: &'stamp [u8]) -> Self {
Self::new(service, subservice, 0, 0, Some(timestamp))
}
/// Like [Self::new_simple] but without a timestamp.
pub fn new_simple_no_timestamp(service: u8, subservice: u8) -> Self {
Self::new(service, subservice, 0, 0, None)
}
pub fn new(
service: u8,
subservice: u8,
msg_counter: u16,
dest_id: u16,
timestamp: Option<&'stamp [u8]>,
) -> Self {
PusTmSecondaryHeader {
pus_version: PusVersion::PusC,
sc_time_ref_status: 0,
service,
subservice,
msg_counter,
dest_id,
timestamp,
}
}
}
impl GenericPusTmSecondaryHeader for PusTmSecondaryHeader<'_> {
fn pus_version(&self) -> PusVersion {
self.pus_version
}
fn sc_time_ref_status(&self) -> u8 {
self.sc_time_ref_status
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn msg_counter(&self) -> u16 {
self.msg_counter
}
fn dest_id(&self) -> u16 {
self.dest_id
}
}
impl<'slice> TryFrom<zc::PusTmSecHeader<'slice>> for PusTmSecondaryHeader<'slice> {
type Error = ();
fn try_from(sec_header: zc::PusTmSecHeader<'slice>) -> Result<Self, Self::Error> {
Ok(PusTmSecondaryHeader {
pus_version: sec_header.zc_header.pus_version(),
sc_time_ref_status: sec_header.zc_header.sc_time_ref_status(),
service: sec_header.zc_header.service(),
subservice: sec_header.zc_header.subservice(),
msg_counter: sec_header.zc_header.msg_counter(),
dest_id: sec_header.zc_header.dest_id(),
timestamp: sec_header.timestamp,
})
}
}
/// This class models the PUS C telemetry packet. It is the primary data structure to generate the
/// raw byte representation of PUS telemetry or to deserialize from one from raw bytes.
///
/// This class also derives the [serde::Serialize] and [serde::Deserialize] trait if the [serde]
/// feature is used which allows to send around TM packets in a raw byte format using a serde
/// provider like [postcard](https://docs.rs/postcard/latest/postcard/).
///
/// There is no spare bytes support yet.
///
/// # Lifetimes
///
/// * `'raw_data` - If the TM is not constructed from a raw slice, this will be the life time of
/// a buffer where the user provided time stamp and source data will be serialized into. If it
/// is, this is the lifetime of the raw byte slice it is constructed from.
#[derive(Eq, Debug, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTm<'raw_data> {
pub sp_header: SpHeader,
pub sec_header: PusTmSecondaryHeader<'raw_data>,
/// If this is set to false, a manual call to [PusTm::calc_own_crc16] or
/// [PusTm::update_packet_fields] is necessary for the serialized or cached CRC16 to be valid.
pub calc_crc_on_serialization: bool,
#[cfg_attr(feature = "serde", serde(skip))]
raw_data: Option<&'raw_data [u8]>,
source_data: Option<&'raw_data [u8]>,
crc16: Option<u16>,
}
impl<'raw_data> PusTm<'raw_data> {
/// Generates a new struct instance.
///
/// # Arguments
///
/// * `sp_header` - Space packet header information. The correct packet type will be set
/// automatically
/// * `sec_header` - Information contained in the secondary header, including the service
/// and subservice type
/// * `app_data` - Custom application data
/// * `set_ccsds_len` - Can be used to automatically update the CCSDS space packet data length
/// field. If this is not set to true, [PusTm::update_ccsds_data_len] can be called to set
/// the correct value to this field manually
pub fn new(
sp_header: &mut SpHeader,
sec_header: PusTmSecondaryHeader<'raw_data>,
source_data: Option<&'raw_data [u8]>,
set_ccsds_len: bool,
) -> Self {
sp_header.set_packet_type(PacketType::Tm);
sp_header.set_sec_header_flag();
let mut pus_tm = PusTm {
sp_header: *sp_header,
raw_data: None,
source_data,
sec_header,
calc_crc_on_serialization: true,
crc16: None,
};
if set_ccsds_len {
pus_tm.update_ccsds_data_len();
}
pus_tm
}
pub fn len_packed(&self) -> usize {
let mut length = PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA;
if let Some(timestamp) = self.sec_header.timestamp {
length += timestamp.len();
}
if let Some(src_data) = self.source_data {
length += src_data.len();
}
length
}
pub fn timestamp(&self) -> Option<&'raw_data [u8]> {
self.sec_header.timestamp
}
pub fn source_data(&self) -> Option<&'raw_data [u8]> {
self.source_data
}
pub fn set_dest_id(&mut self, dest_id: u16) {
self.sec_header.dest_id = dest_id;
}
pub fn set_msg_counter(&mut self, msg_counter: u16) {
self.sec_header.msg_counter = msg_counter
}
pub fn set_sc_time_ref_status(&mut self, sc_time_ref_status: u8) {
self.sec_header.sc_time_ref_status = sc_time_ref_status & 0b1111;
}
sp_header_impls!();
/// This is called automatically if the `set_ccsds_len` argument in the [PusTm::new] call was
/// used.
/// If this was not done or the time stamp or source data is set or changed after construction,
/// this function needs to be called to ensure that the data length field of the CCSDS header
/// is set correctly
pub fn update_ccsds_data_len(&mut self) {
self.sp_header.data_len =
self.len_packed() as u16 - size_of::<crate::zc::SpHeader>() as u16 - 1;
}
/// This function should be called before the TM packet is serialized if
/// [PusTm.calc_crc_on_serialization] is set to False. It will calculate and cache the CRC16.
pub fn calc_own_crc16(&mut self) {
let mut digest = CRC_CCITT_FALSE.digest();
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
digest.update(sph_zc.as_bytes());
let pus_tc_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
digest.update(pus_tc_header.as_bytes());
if let Some(stamp) = self.sec_header.timestamp {
digest.update(stamp);
}
if let Some(src_data) = self.source_data {
digest.update(src_data);
}
self.crc16 = Some(digest.finalize())
}
/// This helper function calls both [PusTm.update_ccsds_data_len] and [PusTm.calc_own_crc16]
pub fn update_packet_fields(&mut self) {
self.update_ccsds_data_len();
self.calc_own_crc16();
}
/// Write the raw PUS byte representation to a provided buffer.
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = 0;
let total_size = self.len_packed();
if total_size > slice.len() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: slice.len(),
expected: total_size,
})
.into());
}
self.sp_header
.write_to_be_bytes(&mut slice[0..CCSDS_HEADER_LEN])?;
curr_idx += CCSDS_HEADER_LEN;
let sec_header_len = size_of::<zc::PusTmSecHeaderWithoutTimestamp>();
let sec_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
sec_header
.write_to_bytes(&mut slice[curr_idx..curr_idx + sec_header_len])
.ok_or(ByteConversionError::ZeroCopyToError)?;
curr_idx += sec_header_len;
if let Some(timestamp) = self.sec_header.timestamp {
let timestamp_len = timestamp.len();
slice[curr_idx..curr_idx + timestamp_len].copy_from_slice(timestamp);
curr_idx += timestamp_len;
}
if let Some(src_data) = self.source_data {
slice[curr_idx..curr_idx + src_data.len()].copy_from_slice(src_data);
curr_idx += src_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
0,
curr_idx,
slice,
)?;
slice[curr_idx..curr_idx + 2].copy_from_slice(crc16.to_be_bytes().as_slice());
curr_idx += 2;
Ok(curr_idx)
}
/// Append the raw PUS byte representation to a provided [alloc::vec::Vec]
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn append_to_vec(&self, vec: &mut Vec<u8>) -> Result<usize, PusError> {
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
let mut appended_len = PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA;
if let Some(timestamp) = self.sec_header.timestamp {
appended_len += timestamp.len();
}
if let Some(src_data) = self.source_data {
appended_len += src_data.len();
};
let start_idx = vec.len();
let mut ser_len = 0;
vec.extend_from_slice(sph_zc.as_bytes());
ser_len += sph_zc.as_bytes().len();
// The PUS version is hardcoded to PUS C
let sec_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
vec.extend_from_slice(sec_header.as_bytes());
ser_len += sec_header.as_bytes().len();
if let Some(timestamp) = self.sec_header.timestamp {
ser_len += timestamp.len();
vec.extend_from_slice(timestamp);
}
if let Some(src_data) = self.source_data {
vec.extend_from_slice(src_data);
ser_len += src_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
start_idx,
ser_len,
&vec[start_idx..start_idx + ser_len],
)?;
vec.extend_from_slice(crc16.to_be_bytes().as_slice());
Ok(appended_len)
}
/// Create a [PusTm] instance from a raw slice. On success, it returns a tuple containing
/// the instance and the found byte length of the packet. The timestamp length needs to be
/// known beforehand.
pub fn from_bytes(
slice: &'raw_data [u8],
timestamp_len: usize,
) -> Result<(Self, usize), PusError> {
let raw_data_len = slice.len();
if raw_data_len < PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let mut current_idx = 0;
let (sp_header, _) = SpHeader::from_be_bytes(&slice[0..CCSDS_HEADER_LEN])?;
current_idx += 6;
let total_len = sp_header.total_len();
if raw_data_len < total_len || total_len < PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let sec_header_zc = zc::PusTmSecHeaderWithoutTimestamp::from_bytes(
&slice[current_idx..current_idx + PUC_TM_MIN_SEC_HEADER_LEN],
)
.ok_or(ByteConversionError::ZeroCopyFromError)?;
current_idx += PUC_TM_MIN_SEC_HEADER_LEN;
let mut timestamp = None;
if timestamp_len > 0 {
timestamp = Some(&slice[current_idx..current_idx + timestamp_len]);
}
let zc_sec_header_wrapper = zc::PusTmSecHeader {
zc_header: sec_header_zc,
timestamp,
};
current_idx += timestamp_len;
let raw_data = &slice[0..total_len];
let pus_tm = PusTm {
sp_header,
sec_header: PusTmSecondaryHeader::try_from(zc_sec_header_wrapper).unwrap(),
raw_data: Some(&slice[0..total_len]),
source_data: user_data_from_raw(current_idx, total_len, raw_data_len, slice)?,
calc_crc_on_serialization: false,
crc16: Some(crc_from_raw_data(raw_data)?),
};
verify_crc16_from_raw(raw_data, pus_tm.crc16.expect("CRC16 invalid"))?;
Ok((pus_tm, total_len))
}
/// If [Self] was constructed [Self::from_bytes], this function will return the slice it was
/// constructed from. Otherwise, [None] will be returned.
pub fn raw_bytes(&self) -> Option<&'raw_data [u8]> {
self.raw_data
}
}
impl PartialEq for PusTm<'_> {
fn eq(&self, other: &Self) -> bool {
self.sp_header == other.sp_header
&& self.sec_header == other.sec_header
&& self.source_data == other.source_data
}
}
//noinspection RsTraitImplementation
impl CcsdsPacket for PusTm<'_> {
ccsds_impl!();
}
//noinspection RsTraitImplementation
impl PusPacket for PusTm<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
});
fn user_data(&self) -> Option<&[u8]> {
self.source_data
}
fn crc16(&self) -> Option<u16> {
self.crc16
}
}
//noinspection RsTraitImplementation
impl GenericPusTmSecondaryHeader for PusTm<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn dest_id(&self) -> u16;
fn msg_counter(&self) -> u16;
fn sc_time_ref_status(&self) -> u8;
});
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ecss::PusVersion::PusC;
use crate::SpHeader;
fn base_ping_reply_full_ctor(timestamp: &[u8]) -> PusTm {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tm_header = PusTmSecondaryHeader::new_simple(17, 2, &timestamp);
PusTm::new(&mut sph, tm_header, None, true)
}
fn base_hk_reply<'a>(timestamp: &'a [u8], src_data: &'a [u8]) -> PusTm<'a> {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(3, 5, &timestamp);
PusTm::new(&mut sph, tc_header, Some(src_data), true)
}
fn dummy_timestamp() -> &'static [u8] {
return &[0, 1, 2, 3, 4, 5, 6];
}
#[test]
fn test_basic() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
verify_ping_reply(&pus_tm, false, 22, dummy_timestamp());
}
#[test]
fn test_serialization_no_source_data() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
let mut buf: [u8; 32] = [0; 32];
let ser_len = pus_tm
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 22);
verify_raw_ping_reply(&buf);
}
#[test]
fn test_serialization_with_source_data() {
let src_data = [1, 2, 3];
let hk_reply = base_hk_reply(dummy_timestamp(), &src_data);
let mut buf: [u8; 32] = [0; 32];
let ser_len = hk_reply
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 25);
assert_eq!(buf[20], 1);
assert_eq!(buf[21], 2);
assert_eq!(buf[22], 3);
}
#[test]
fn test_setters() {
let timestamp = dummy_timestamp();
let mut pus_tm = base_ping_reply_full_ctor(&timestamp);
pus_tm.set_sc_time_ref_status(0b1010);
pus_tm.set_dest_id(0x7fff);
pus_tm.set_msg_counter(0x1f1f);
assert_eq!(pus_tm.sc_time_ref_status(), 0b1010);
assert_eq!(pus_tm.dest_id(), 0x7fff);
assert_eq!(pus_tm.msg_counter(), 0x1f1f);
assert!(pus_tm.set_apid(0x7ff));
assert_eq!(pus_tm.apid(), 0x7ff);
}
#[test]
fn test_deserialization_no_source_data() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
let mut buf: [u8; 32] = [0; 32];
let ser_len = pus_tm
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 22);
let (tm_deserialized, size) = PusTm::from_bytes(&buf, 7).expect("Deserialization failed");
assert_eq!(ser_len, size);
verify_ping_reply(&tm_deserialized, false, 22, dummy_timestamp());
}
#[test]
fn test_manual_field_update() {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, dummy_timestamp());
let mut tm = PusTm::new(&mut sph, tc_header, None, false);
tm.calc_crc_on_serialization = false;
assert_eq!(tm.data_len(), 0x00);
let mut buf: [u8; 32] = [0; 32];
let res = tm.write_to_bytes(&mut buf);
assert!(res.is_err());
assert!(matches!(res.unwrap_err(), PusError::CrcCalculationMissing));
tm.update_ccsds_data_len();
assert_eq!(tm.data_len(), 15);
tm.calc_own_crc16();
let res = tm.write_to_bytes(&mut buf);
assert!(res.is_ok());
tm.sp_header.data_len = 0;
tm.update_packet_fields();
assert_eq!(tm.data_len(), 15);
}
#[test]
fn test_target_buf_too_small() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
let mut buf: [u8; 16] = [0; 16];
let res = pus_tm.write_to_bytes(&mut buf);
assert!(res.is_err());
let error = res.unwrap_err();
assert!(matches!(error, PusError::ByteConversionError { .. }));
match error {
PusError::ByteConversionError(err) => match err {
ByteConversionError::ToSliceTooSmall(size_missmatch) => {
assert_eq!(size_missmatch.expected, 22);
assert_eq!(size_missmatch.found, 16);
}
_ => panic!("Invalid PUS error {:?}", err),
},
_ => {
panic!("Invalid error {:?}", error);
}
}
}
#[test]
#[cfg(feature = "alloc")]
fn test_append_to_vec() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
let mut vec = Vec::new();
let res = pus_tm.append_to_vec(&mut vec);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 22);
verify_raw_ping_reply(vec.as_slice());
}
#[test]
#[cfg(feature = "alloc")]
fn test_append_to_vec_with_src_data() {
let src_data = [1, 2, 3];
let hk_reply = base_hk_reply(dummy_timestamp(), &src_data);
let mut vec = Vec::new();
vec.push(4);
let res = hk_reply.append_to_vec(&mut vec);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 25);
assert_eq!(vec.len(), 26);
}
fn verify_raw_ping_reply(buf: &[u8]) {
// Secondary header is set -> 0b0000_1001 , APID occupies last bit of first byte
assert_eq!(buf[0], 0x09);
// Rest of APID 0x123
assert_eq!(buf[1], 0x23);
// Unsegmented is the default, and first byte of 0x234 occupies this byte as well
assert_eq!(buf[2], 0xc2);
assert_eq!(buf[3], 0x34);
assert_eq!(((buf[4] as u16) << 8) | buf[5] as u16, 15);
// SC time ref status is 0
assert_eq!(buf[6], (PusC as u8) << 4);
assert_eq!(buf[7], 17);
assert_eq!(buf[8], 2);
// MSG counter 0
assert_eq!(buf[9], 0x00);
assert_eq!(buf[10], 0x00);
// Destination ID
assert_eq!(buf[11], 0x00);
assert_eq!(buf[12], 0x00);
// Timestamp
assert_eq!(&buf[13..20], dummy_timestamp());
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[0..20]);
let crc16 = digest.finalize();
assert_eq!(((crc16 >> 8) & 0xff) as u8, buf[20]);
assert_eq!((crc16 & 0xff) as u8, buf[21]);
}
fn verify_ping_reply(
tm: &PusTm,
has_user_data: bool,
exp_full_len: usize,
exp_timestamp: &[u8],
) {
assert!(tm.is_tm());
assert_eq!(PusPacket::service(tm), 17);
assert_eq!(PusPacket::subservice(tm), 2);
assert!(tm.sec_header_flag());
assert_eq!(tm.len_packed(), exp_full_len);
assert_eq!(tm.timestamp().unwrap(), exp_timestamp);
if has_user_data {
assert!(!tm.user_data().is_none());
}
assert_eq!(PusPacket::pus_version(tm), PusC);
assert_eq!(tm.apid(), 0x123);
assert_eq!(tm.seq_count(), 0x234);
assert_eq!(tm.data_len(), exp_full_len as u16 - 7);
assert_eq!(tm.dest_id(), 0x0000);
assert_eq!(tm.msg_counter(), 0x0000);
assert_eq!(tm.sc_time_ref_status(), 0b0000);
}
#[test]
fn partial_eq_pus_tm() {
let timestamp = dummy_timestamp();
let pus_tm_1 = base_ping_reply_full_ctor(timestamp);
let pus_tm_2 = base_ping_reply_full_ctor(timestamp);
assert_eq!(pus_tm_1, pus_tm_2);
}
#[test]
fn partial_eq_serialized_vs_derialized() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(timestamp);
let mut buf = [0; 32];
pus_tm.write_to_bytes(&mut buf).unwrap();
assert_eq!(pus_tm, PusTm::from_bytes(&buf, timestamp.len()).unwrap().0);
}
}

674
src/util.rs Normal file
View File

@ -0,0 +1,674 @@
use crate::ByteConversionError;
use core::fmt::{Debug, Display, Formatter};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub trait ToBeBytes {
type ByteArray: AsRef<[u8]>;
/// Length when written to big endian bytes.
fn written_len(&self) -> usize;
fn to_be_bytes(&self) -> Self::ByteArray;
}
impl ToBeBytes for () {
type ByteArray = [u8; 0];
fn written_len(&self) -> usize {
0
}
fn to_be_bytes(&self) -> Self::ByteArray {
[]
}
}
impl ToBeBytes for u8 {
type ByteArray = [u8; 1];
fn written_len(&self) -> usize {
1
}
fn to_be_bytes(&self) -> Self::ByteArray {
u8::to_be_bytes(*self)
}
}
impl ToBeBytes for u16 {
type ByteArray = [u8; 2];
fn written_len(&self) -> usize {
2
}
fn to_be_bytes(&self) -> Self::ByteArray {
u16::to_be_bytes(*self)
}
}
impl ToBeBytes for u32 {
type ByteArray = [u8; 4];
fn written_len(&self) -> usize {
4
}
fn to_be_bytes(&self) -> Self::ByteArray {
u32::to_be_bytes(*self)
}
}
impl ToBeBytes for u64 {
type ByteArray = [u8; 8];
fn written_len(&self) -> usize {
8
}
fn to_be_bytes(&self) -> Self::ByteArray {
u64::to_be_bytes(*self)
}
}
pub trait UnsignedEnum {
/// Size of the unsigned enumeration in bytes.
fn size(&self) -> usize;
/// Write the unsigned enumeration to a raw buffer. Returns the written size on success.
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
}
pub trait UnsignedEnumExt: UnsignedEnum + Debug + Copy + Clone + PartialEq + Eq {}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum UnsignedByteFieldError {
/// Value is too large for specified width of byte field.
ValueTooLargeForWidth {
width: usize,
value: u64,
},
/// Only 1, 2, 4 and 8 are allow width values. Optionally contains the expected width if
/// applicable, for example for conversions.
InvalidWidth {
found: usize,
expected: Option<usize>,
},
ByteConversionError(ByteConversionError),
}
impl From<ByteConversionError> for UnsignedByteFieldError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl Display for UnsignedByteFieldError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {e}")
}
Self::InvalidWidth { found, .. } => {
write!(f, "invalid width {found}, only 1, 2, 4 and 8 are allowed.")
}
Self::ValueTooLargeForWidth { width, value } => {
write!(f, "value {value} too large for width {width}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for UnsignedByteFieldError {}
/// Type erased variant.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct UnsignedByteField {
width: usize,
value: u64,
}
impl UnsignedByteField {
pub const fn new(width: usize, value: u64) -> Self {
Self { width, value }
}
pub fn value(&self) -> u64 {
self.value
}
pub fn new_from_be_bytes(width: usize, buf: &[u8]) -> Result<Self, UnsignedByteFieldError> {
if width > buf.len() {
return Err(ByteConversionError::FromSliceTooSmall {
expected: width,
found: buf.len(),
}
.into());
}
match width {
0 => Ok(Self::new(width, 0)),
1 => Ok(Self::new(width, buf[0] as u64)),
2 => Ok(Self::new(
width,
u16::from_be_bytes(buf[0..2].try_into().unwrap()) as u64,
)),
4 => Ok(Self::new(
width,
u32::from_be_bytes(buf[0..4].try_into().unwrap()) as u64,
)),
8 => Ok(Self::new(
width,
u64::from_be_bytes(buf[0..8].try_into().unwrap()),
)),
_ => Err(UnsignedByteFieldError::InvalidWidth {
found: width,
expected: None,
}),
}
}
}
impl UnsignedEnum for UnsignedByteField {
fn size(&self) -> usize {
self.width
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.size() {
return Err(ByteConversionError::ToSliceTooSmall {
expected: self.size(),
found: buf.len(),
});
}
match self.size() {
0 => Ok(0),
1 => {
let u8 = UnsignedByteFieldU8::try_from(*self).unwrap();
u8.write_to_be_bytes(buf)
}
2 => {
let u16 = UnsignedByteFieldU16::try_from(*self).unwrap();
u16.write_to_be_bytes(buf)
}
4 => {
let u32 = UnsignedByteFieldU32::try_from(*self).unwrap();
u32.write_to_be_bytes(buf)
}
8 => {
let u64 = UnsignedByteFieldU64::try_from(*self).unwrap();
u64.write_to_be_bytes(buf)
}
_ => {
// The API does not allow this.
panic!("unexpected written length");
}
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericUnsignedByteField<TYPE> {
value: TYPE,
}
impl<TYPE> GenericUnsignedByteField<TYPE> {
pub const fn new(val: TYPE) -> Self {
Self { value: val }
}
}
impl<TYPE: ToBeBytes> UnsignedEnum for GenericUnsignedByteField<TYPE> {
fn size(&self) -> usize {
self.value.written_len()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.size() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.size(),
});
}
buf[0..self.size()].copy_from_slice(self.value.to_be_bytes().as_ref());
Ok(self.value.written_len())
}
}
pub type UnsignedByteFieldEmpty = GenericUnsignedByteField<()>;
pub type UnsignedByteFieldU8 = GenericUnsignedByteField<u8>;
pub type UnsignedByteFieldU16 = GenericUnsignedByteField<u16>;
pub type UnsignedByteFieldU32 = GenericUnsignedByteField<u32>;
pub type UnsignedByteFieldU64 = GenericUnsignedByteField<u64>;
pub type UbfU8 = UnsignedByteFieldU8;
pub type UbfU16 = UnsignedByteFieldU16;
pub type UbfU32 = UnsignedByteFieldU32;
pub type UbfU64 = UnsignedByteFieldU64;
impl From<UnsignedByteFieldU8> for UnsignedByteField {
fn from(value: UnsignedByteFieldU8) -> Self {
Self::new(1, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU8 {
type Error = UnsignedByteFieldError;
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 1 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(1),
});
}
Ok(Self::new(value.value as u8))
}
}
impl From<UnsignedByteFieldU16> for UnsignedByteField {
fn from(value: UnsignedByteFieldU16) -> Self {
Self::new(2, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU16 {
type Error = UnsignedByteFieldError;
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 2 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(2),
});
}
Ok(Self::new(value.value as u16))
}
}
impl From<UnsignedByteFieldU32> for UnsignedByteField {
fn from(value: UnsignedByteFieldU32) -> Self {
Self::new(4, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU32 {
type Error = UnsignedByteFieldError;
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 4 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(4),
});
}
Ok(Self::new(value.value as u32))
}
}
impl From<UnsignedByteFieldU64> for UnsignedByteField {
fn from(value: UnsignedByteFieldU64) -> Self {
Self::new(8, value.value)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU64 {
type Error = UnsignedByteFieldError;
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 8 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(8),
});
}
Ok(Self::new(value.value))
}
}
#[cfg(test)]
pub mod tests {
use crate::util::{
UnsignedByteField, UnsignedByteFieldError, UnsignedByteFieldU16, UnsignedByteFieldU32,
UnsignedByteFieldU64, UnsignedByteFieldU8, UnsignedEnum,
};
use crate::ByteConversionError;
use std::format;
#[test]
fn test_simple_u8() {
let u8 = UnsignedByteFieldU8::new(5);
assert_eq!(u8.size(), 1);
let mut buf: [u8; 8] = [0; 8];
let len = u8
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 1);
assert_eq!(buf[0], 5);
for i in 1..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn test_simple_u16() {
let u16 = UnsignedByteFieldU16::new(3823);
assert_eq!(u16.size(), 2);
let mut buf: [u8; 8] = [0; 8];
let len = u16
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 2);
let raw_val = u16::from_be_bytes(buf[0..2].try_into().unwrap());
assert_eq!(raw_val, 3823);
for i in 2..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn test_simple_u32() {
let u32 = UnsignedByteFieldU32::new(80932);
assert_eq!(u32.size(), 4);
let mut buf: [u8; 8] = [0; 8];
let len = u32
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 4);
let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap());
assert_eq!(raw_val, 80932);
for i in 4..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn test_simple_u64() {
let u64 = UnsignedByteFieldU64::new(5999999);
assert_eq!(u64.size(), 8);
let mut buf: [u8; 8] = [0; 8];
let len = u64
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 8);
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999);
}
#[test]
fn conversions_u8() {
let u8 = UnsignedByteFieldU8::new(5);
let u8_type_erased = UnsignedByteField::from(u8);
assert_eq!(u8_type_erased.width, 1);
assert_eq!(u8_type_erased.value, 5);
let u8_conv_back =
UnsignedByteFieldU8::try_from(u8_type_erased).expect("conversion failed for u8");
assert_eq!(u8, u8_conv_back);
assert_eq!(u8_conv_back.value, 5);
}
#[test]
fn conversion_u8_fails() {
let field = UnsignedByteField::new(2, 60000);
let conv_fails = UnsignedByteFieldU8::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 2);
assert_eq!(expected, 1);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u16() {
let u16 = UnsignedByteFieldU16::new(64444);
let u16_type_erased = UnsignedByteField::from(u16);
assert_eq!(u16_type_erased.width, 2);
assert_eq!(u16_type_erased.value, 64444);
let u16_conv_back =
UnsignedByteFieldU16::try_from(u16_type_erased).expect("conversion failed for u16");
assert_eq!(u16, u16_conv_back);
assert_eq!(u16_conv_back.value, 64444);
}
#[test]
fn conversion_u16_fails() {
let field = UnsignedByteField::new(4, 75000);
let conv_fails = UnsignedByteFieldU16::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 4);
assert_eq!(expected, 2);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u32() {
let u32 = UnsignedByteFieldU32::new(75000);
let u32_type_erased = UnsignedByteField::from(u32);
assert_eq!(u32_type_erased.width, 4);
assert_eq!(u32_type_erased.value, 75000);
let u32_conv_back =
UnsignedByteFieldU32::try_from(u32_type_erased).expect("conversion failed for u32");
assert_eq!(u32, u32_conv_back);
assert_eq!(u32_conv_back.value, 75000);
}
#[test]
fn conversion_u32_fails() {
let field = UnsignedByteField::new(8, 75000);
let conv_fails = UnsignedByteFieldU32::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 8);
assert_eq!(expected, 4);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u64() {
let u64 = UnsignedByteFieldU64::new(5999999);
let u64_type_erased = UnsignedByteField::from(u64);
assert_eq!(u64_type_erased.width, 8);
assert_eq!(u64_type_erased.value, 5999999);
let u64_conv_back =
UnsignedByteFieldU64::try_from(u64_type_erased).expect("conversion failed for u64");
assert_eq!(u64, u64_conv_back);
assert_eq!(u64_conv_back.value, 5999999);
}
#[test]
fn conversion_u64_fails() {
let field = UnsignedByteField::new(4, 60000);
let conv_fails = UnsignedByteFieldU64::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 4);
assert_eq!(expected, 8);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn type_erased_u8_write() {
let u8 = UnsignedByteField::new(1, 5);
assert_eq!(u8.size(), 1);
let mut buf: [u8; 8] = [0; 8];
u8.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(buf[0], 5);
for i in 1..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn type_erased_u16_write() {
let u16 = UnsignedByteField::new(2, 3823);
assert_eq!(u16.size(), 2);
let mut buf: [u8; 8] = [0; 8];
u16.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u16::from_be_bytes(buf[0..2].try_into().unwrap());
assert_eq!(raw_val, 3823);
for i in 2..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn type_erased_u32_write() {
let u32 = UnsignedByteField::new(4, 80932);
assert_eq!(u32.size(), 4);
let mut buf: [u8; 8] = [0; 8];
u32.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap());
assert_eq!(raw_val, 80932);
for i in 4..8 {
assert_eq!(buf[i], 0);
}
}
#[test]
fn type_erased_u64_write() {
let u64 = UnsignedByteField::new(8, 5999999);
assert_eq!(u64.size(), 8);
let mut buf: [u8; 8] = [0; 8];
u64.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999);
}
#[test]
fn type_erased_u8_construction() {
let buf: [u8; 2] = [5, 10];
let u8 = UnsignedByteField::new_from_be_bytes(1, &buf).expect("construction failed");
assert_eq!(u8.width, 1);
assert_eq!(u8.value, 5);
}
#[test]
fn type_erased_u16_construction() {
let buf: [u8; 2] = [0x10, 0x15];
let u16 = UnsignedByteField::new_from_be_bytes(2, &buf).expect("construction failed");
assert_eq!(u16.width, 2);
assert_eq!(u16.value, 0x1015);
}
#[test]
fn type_erased_u32_construction() {
let buf: [u8; 4] = [0x01, 0x02, 0x03, 0x04];
let u32 = UnsignedByteField::new_from_be_bytes(4, &buf).expect("construction failed");
assert_eq!(u32.width, 4);
assert_eq!(u32.value, 0x01020304);
}
#[test]
fn type_erased_u64_construction() {
let buf: [u8; 8] = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
let u64 = UnsignedByteField::new_from_be_bytes(8, &buf).expect("construction failed");
assert_eq!(u64.width, 8);
assert_eq!(u64.value, 0x0102030405060708);
}
#[test]
fn type_u16_target_buf_too_small() {
let u16 = UnsignedByteFieldU16::new(500);
let mut buf: [u8; 1] = [0; 1];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, 1);
assert_eq!(expected, 2);
}
_ => {
panic!("invalid exception")
}
}
}
#[test]
fn type_erased_u16_target_buf_too_small() {
let u16 = UnsignedByteField::new(2, 500);
let mut buf: [u8; 1] = [0; 1];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, 1);
assert_eq!(expected, 2);
}
_ => {
panic!("invalid exception {}", err)
}
}
let u16 = UnsignedByteField::new_from_be_bytes(2, &buf);
assert!(u16.is_err());
let err = u16.unwrap_err();
if let UnsignedByteFieldError::ByteConversionError(
ByteConversionError::FromSliceTooSmall { found, expected },
) = err
{
assert_eq!(expected, 2);
assert_eq!(found, 1);
} else {
panic!("unexpected exception {}", err);
}
}
#[test]
fn type_u32_target_buf_too_small() {
let u16 = UnsignedByteFieldU32::new(500);
let mut buf: [u8; 3] = [0; 3];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, 3);
assert_eq!(expected, 4);
}
_ => {
panic!("invalid exception")
}
}
}
}