300 Commits

Author SHA1 Message Date
Robin Mueller
a7d004b299 CI tweaks 2025-11-28 10:35:28 +01:00
cf9306d992 Merge pull request 'more precise error handling for CCSDS support' (#195) from more-precise-error-handling into main
Reviewed-on: #195
2025-11-27 17:36:37 +01:00
Robin Mueller
73b11d0ae2 more precise error handling for CCSDS support 2025-11-27 17:34:51 +01:00
b7efa0378d Merge pull request 'fix docs CI' (#194) from fix-docs-ci into main
Reviewed-on: #194
2025-11-27 12:47:56 +01:00
Robin Mueller
c6d10422d5 fix docs CI 2025-11-27 12:44:26 +01:00
416a89b807 Merge pull request 'added raw data getter function' (#193) from ccsds-raw-data-getter into main
Reviewed-on: #193
2025-11-19 17:39:17 +01:00
Robin Mueller
8136f554b5 added raw data getter function 2025-11-19 17:28:11 +01:00
665cb3b107 Merge pull request 'sequence counter update' (#192) from seq-counter-update into main
Reviewed-on: #192
2025-11-19 17:11:11 +01:00
Robin Mueller
989ace786e Update sequence counter impl 2025-11-19 17:10:02 +01:00
402331c725 Merge pull request 'added distinction between CCSDS packet and user data' (#191) from ccsds-reader-user-data into main
Reviewed-on: #191
2025-11-19 17:02:33 +01:00
3e3f33a7d1 added distiction between CCSDS packet and user data 2025-11-19 16:43:39 +01:00
f001234025 Merge pull request 'clippy' (#190) from clippy into main
Reviewed-on: #190
2025-11-06 13:43:48 +01:00
Robin Mueller
e5a7839901 clippy 2025-11-06 13:43:20 +01:00
5707c6322a Merge pull request 'changelog' (#189) from prep-v0.17.0 into main
Reviewed-on: #189
2025-11-06 13:27:09 +01:00
Robin Mueller
093f82ae86 changelog 2025-11-06 13:26:46 +01:00
e68d1ade48 Merge pull request 'Finish full crate docs' (#188) from renaming-docs-for-ecss into main
Reviewed-on: #188
2025-11-05 20:20:35 +01:00
Robin Mueller
fbdc325d0d Finish full crate docs 2025-11-05 20:18:31 +01:00
4bc0219cb2 Merge pull request 'docs and minor cfdp change' (#187) from cfdp-update-docs into main
Reviewed-on: #187
2025-11-04 18:54:33 +01:00
3f4f76849f docs and minor cfdp change 2025-11-04 18:50:53 +01:00
fb1e2fc583 Merge pull request 'added missing derives' (#186) from add-missing-derives into main
Reviewed-on: #186
2025-11-04 15:57:46 +01:00
Robin Mueller
96e5851864 added missing derives 2025-11-04 15:57:09 +01:00
b4d00c26c5 Merge pull request 'add direct APID getter' (#185) from add-direct-ccsds-apid-getter into main
Reviewed-on: #185
2025-11-04 15:40:10 +01:00
Robin Mueller
a8b64f2fef add direct APID getter 2025-11-04 15:39:37 +01:00
e7cb6f2a7a Merge pull request 'error reporting bugfix' (#184) from ccsds-packet-reader-error-reporting-fix into main
Reviewed-on: #184
2025-11-04 15:34:54 +01:00
Robin Mueller
973ba4d3c4 error reporting bugfix 2025-11-04 15:34:15 +01:00
8789e34c14 Merge pull request 'added missing function to reader' (#183) from add-ccsds-id-function-to-reader into main
Reviewed-on: #183
2025-10-31 16:56:12 +01:00
Robin Mueller
a68e82a825 added missing function to reader 2025-10-31 16:55:45 +01:00
0b46fa785b Merge pull request 'better naming' (#182) from naming-improvement into main
Reviewed-on: #182
2025-10-31 16:03:40 +01:00
Robin Mueller
c57ee3e131 better naming 2025-10-31 16:03:08 +01:00
6ac84c3dca Merge pull request 'add option to ignore checksum for CCSDS' (#181) from add-option-to-ignore-checksum into main
Reviewed-on: #181
2025-10-31 16:01:11 +01:00
Robin Mueller
374f39f13b add option to ignore checksum for CCSDS 2025-10-31 15:59:41 +01:00
2bc6167710 Merge pull request 'less confusing naming' (#180) from less-confusing-naming into main
Reviewed-on: #180
2025-10-31 12:55:53 +01:00
Robin Mueller
cfe0937afe less confusing naming 2025-10-31 12:55:07 +01:00
e1c693cb29 Merge pull request 'update ECSS PUS naming convention' (#179) from update-ecss-pus-naming-convention into main
Reviewed-on: #179
2025-10-31 12:39:39 +01:00
Robin Mueller
38165420b7 update ECSS PUS naming convention 2025-10-31 12:38:59 +01:00
0d09ff7825 Merge pull request 'add docs and minor changes' (#178) from add-docs-minor-changes into main
Reviewed-on: #178
2025-10-31 11:45:06 +01:00
Robin Mueller
8f2096ca35 add docs and minor changes 2025-10-31 11:39:23 +01:00
3f35e9dba9 Merge pull request 'add owned CCSDS packet creator' (#177) from add-owned-ccsds-packet-creator into main
Reviewed-on: #177
2025-10-31 10:10:50 +01:00
Robin Mueller
ea96099f55 add owned CCSDS packet creator 2025-10-31 10:10:11 +01:00
e117239852 Merge pull request 'add useful functions' (#176) from add-ccsds-id-functions into main
Reviewed-on: #176
2025-10-30 18:58:27 +01:00
Robin Mueller
844c517a94 add useful functions 2025-10-30 18:58:10 +01:00
0ae2ac149b Merge pull request 'add CCSDS packet ID' (#175) from add-ccsds-packet-id into main
Reviewed-on: #175
2025-10-29 21:58:56 +01:00
Robin Mueller
2b41f9754d add CCSDS packet ID 2025-10-29 21:45:07 +01:00
8e2e0ce632 Merge pull request 'fix portable atomic support' (#174) from fix-portable-atomic-support into main
Reviewed-on: #174
2025-10-29 16:13:09 +01:00
Robin Mueller
14d935ac2a fix portable atomic support 2025-10-29 16:05:11 +01:00
756a803213 Merge pull request 'prepare v0.17.0' (#173) from prepare-v0.17.0 into main
Reviewed-on: #173
2025-10-29 16:04:19 +01:00
Robin Mueller
937bdeaf54 prepare v0.17.0 2025-10-29 15:48:29 +01:00
bc30143d61 Merge pull request 'start adding improved CCSDS packet support' (#172) from add-better-ccsds-packet-support into main
Reviewed-on: #172
2025-10-29 15:44:38 +01:00
Robin Mueller
549e323211 start adding improved CCSDS packet support 2025-10-29 15:28:25 +01:00
82c3e06ac0 Merge pull request 'feature gate all core atomics' (#171) from feature-gate-all-core-atomics into main
Reviewed-on: #171
2025-10-29 11:26:57 +01:00
Robin Mueller
750add26ef feature gate all core atomics 2025-10-29 11:24:05 +01:00
c3ff947fb0 Merge pull request 'move some modules' (#170) from clean-up-cds-time-mod into main
Reviewed-on: #170
2025-10-15 15:26:39 +02:00
Robin Mueller
8d86ecc8ee move some modules 2025-10-15 15:26:06 +02:00
4b2bebb8cb Merge pull request 'simplified CDS short impl' (#169) from simplify-cds-timestamp-impl into main
Reviewed-on: #169
2025-10-15 15:01:06 +02:00
Robin Mueller
e0b7a6a6bb simplified CDS short impl 2025-10-15 11:57:44 +02:00
49983a5d6c Merge pull request 'update for docs generation' (#168) from doc-generation-update into main
Reviewed-on: #168
2025-10-02 09:45:45 +02:00
Robin Mueller
04c864d6a2 update for docs generation 2025-10-01 00:20:47 +02:00
922801cc74 Merge pull request 'try to fix CI' (#167) from ci-fix into main
Reviewed-on: #167
2025-09-26 15:14:20 +02:00
Robin Mueller
f5717d98cd try to fix CI 2025-09-26 15:10:27 +02:00
6ea7b8902a Merge pull request 'SpHeader::packet_len is pub now' (#166) from sp-packet-len-pub into main
Reviewed-on: #166
2025-09-26 15:09:07 +02:00
Robin Mueller
f6ac9ee918 SpHeader::packet_len is pub now 2025-09-26 15:08:29 +02:00
0aa41fee92 Merge pull request 'prepare v0.16.0' (#165) from prep-v0.16.0 into main
Reviewed-on: #165
2025-09-24 19:58:02 +02:00
Robin Mueller
d1516d669d prepare v0.16.0 2025-09-24 19:56:52 +02:00
b1ebb4d7c4 Merge pull request 'update docs on coverage' (#164) from update-coverage-docs into main
Reviewed-on: #164
2025-09-24 19:55:47 +02:00
Robin Mueller
cd79af4440 update docs on coverage 2025-09-24 19:54:45 +02:00
6a760c8585 Merge pull request 'improve backwards compatibility' (#163) from improve-backwards-compat into main
Reviewed-on: #163
2025-09-24 19:54:11 +02:00
Robin Mueller
5eb409f1ec improve backwards compatibility 2025-09-24 19:49:51 +02:00
69d416d6ff Merge pull request 'improvement for NAK API' (#162) from nak-api-improvement into main
Reviewed-on: #162
2025-09-23 17:08:15 +02:00
Robin Mueller
e2b239ae61 improvement for NAK API 2025-09-23 17:06:45 +02:00
b06d7c1a87 Merge pull request 'better error handling' (#161) from better-nak-error into main
Reviewed-on: #161
2025-09-18 17:37:00 +02:00
Robin Mueller
ec1ddbde81 better error handling 2025-09-18 17:36:51 +02:00
7f4ada1734 Merge pull request 'NAK constructor is pub' (#160) from nak-new-pub into main
Reviewed-on: #160
2025-09-18 17:35:48 +02:00
Robin Mueller
15f97e960b NAK constructor is pub 2025-09-18 17:32:11 +02:00
49b7c2d072 Merge pull request 'PDU header improvements' (#159) from pdu-header-improvements into main
Reviewed-on: #159
2025-09-18 16:56:26 +02:00
1ed23bd7ef PDU header improvements 2025-09-18 16:54:28 +02:00
a82cdb1e82 Merge pull request 'nak docs' (#158) from nak-docs into main
Reviewed-on: #158
2025-09-17 13:42:04 +02:00
12e7062075 nak docs 2025-09-17 13:40:49 +02:00
a1e40834f5 Merge pull request 'improve ACK PDU' (#157) from improve-ack-pdu into main
Reviewed-on: #157
2025-09-15 13:02:30 +02:00
Robin Mueller
3f6a5df8e7 improve ACK PDU 2025-09-15 13:02:16 +02:00
a8d5fdf8d3 Merge pull request 'extend NAK PDU' (#156) from extend-nak-pdu into main
Reviewed-on: #156
2025-09-15 10:30:02 +02:00
Robin Mueller
62326da276 extend NAK PDU 2025-09-15 10:16:07 +02:00
477890346a Merge pull request 'improve CFDP module' (#154) from cfdp-module-improvements into main
Reviewed-on: #154
2025-09-11 16:10:47 +02:00
Robin Mueller
9394beea38 improve CFDP module 2025-09-11 16:03:58 +02:00
6c425e137a Merge pull request 'add coverage to justfile' (#155) from update-justfile into main
Reviewed-on: #155
2025-09-11 16:03:41 +02:00
24b91a7a83 add coverage to justfile 2025-09-11 13:22:27 +02:00
a7c6ce7d44 Merge pull request 'improve CFDP module' (#153) from cfdp-module-improvements into main
Reviewed-on: #153
2025-09-11 09:12:59 +02:00
Robin Mueller
c68e71a25e improve CFDP module 2025-09-11 09:09:41 +02:00
272a961a70 Merge pull request 'add packet_len direct method for SpHeader' (#152) from sp-header-tweak into main
Reviewed-on: #152
2025-09-10 21:05:56 +02:00
Robin Mueller
6f4df7e3c2 add packet_len direct method for SpHeader 2025-09-10 19:04:47 +02:00
15c477e810 Merge pull request 'prepare v0.16.0' (#151) from prep-v0.16.0 into main
Reviewed-on: #151
2025-09-10 18:08:10 +02:00
Robin Mueller
e5b10920a0 prepare v0.16.0 2025-09-10 18:03:35 +02:00
3f8434e1fa Merge pull request 'add missing Error impls' (#150) from add-missing-error-impls into main
Reviewed-on: #150
2025-09-10 17:54:46 +02:00
Robin Mueller
ec3f462931 add missing Error impls 2025-09-10 17:52:49 +02:00
e6686caba1 Merge pull request 'add-missing-defmt-impls' (#149) from add-missing-defmt-impls into main
Reviewed-on: #149
2025-09-10 17:52:39 +02:00
Robin Mueller
2a0b21983e add some missing defmt impls 2025-09-10 17:48:49 +02:00
4e153e0b68 Merge pull request 'Add TM builder API' (#148) from add-tm-builder-api into main
Reviewed-on: #148
2025-09-10 17:39:05 +02:00
Robin Mueller
aaac15e3d0 Add TM builder API 2025-09-10 17:36:39 +02:00
89788c1341 Merge pull request 'add first builder API' (#147) from add-tc-builder-api into main
Reviewed-on: #147
2025-09-10 16:38:25 +02:00
Robin Mueller
578be2da8f add first TC builder API 2025-09-10 16:12:06 +02:00
3a21daf8de Merge pull request 'refactor and improve ECSS module' (#146) from refactor-improve-ecss-module into main
Reviewed-on: #146
2025-09-10 15:37:27 +02:00
Robin Mueller
8fd46f6a30 refactor and improve ECSS module 2025-09-10 15:28:58 +02:00
c6b74fecbd Merge pull request 'start making ECSS checksum optional' (#144) from ecss-checksum-optional into main
Reviewed-on: #144
2025-09-09 16:14:45 +02:00
Robin Mueller
60e35559e5 start making ECSS checksum optional 2025-09-09 16:14:11 +02:00
e708f1b861 Merge pull request 'some more tests' (#145) from add-some-more-tests into main
Reviewed-on: #145
2025-09-09 15:57:11 +02:00
Robin Mueller
91490b5dd6 some more tests 2025-09-09 15:56:44 +02:00
e151b8e761 Merge pull request 'fix for embedded systems, introduce portable atomic seq counters' (#143) from portable-atomic-seq-counters-embedded-fix into main
Reviewed-on: #143
2025-09-09 13:49:23 +02:00
Robin Mueller
2839174e5f fix for embedded systems, introduce portable atomic seq counters 2025-09-09 13:34:12 +02:00
6e2db87fa9 Merge pull request 'improve sequence counters' (#141) from improve-seq-counters into main
Reviewed-on: #141
2025-09-09 11:53:31 +02:00
Robin Mueller
e8a01dc6b2 improve sequence counters 2025-09-09 11:51:59 +02:00
20403bda32 Merge pull request 'sequence counter improvements' (#140) from seq-counter-improvements into main
Reviewed-on: #140
2025-09-09 10:27:08 +02:00
Robin Mueller
2cbd48331c sequence counter improvements 2025-09-09 10:24:20 +02:00
c1346f2b12 Merge pull request 'add some more tests' (#138) from some-more-tests into main
Reviewed-on: #138
2025-09-08 17:01:45 +02:00
2e3a7849a7 add some more tests 2025-09-08 16:59:41 +02:00
86ebea8eb8 Merge pull request 'Add basic USLP support' (#137) from add-basic-uslp-support into main
Reviewed-on: #137
2025-09-08 16:59:21 +02:00
2c8c77acb8 add basic USLP support 2025-09-08 16:51:33 +02:00
63d74aa58b Merge pull request 'PUS version fixes' (#136) from small-bugfix-pus-tm-a into main
Reviewed-on: #136
2025-08-26 16:41:13 +02:00
5a86f89c83 version fixes 2025-08-26 16:40:44 +02:00
b8ae26c302 Merge pull request 'improvement for naming' (#135) from naming-improvement into main
Reviewed-on: #135
2025-08-26 16:22:19 +02:00
160b1dedf9 improvement for naming 2025-08-26 16:16:54 +02:00
8eccf1fa29 Merge pull request 'NAK PDU reader update' (#134) from nak-pdu-reader-refactoring into main
Reviewed-on: #134
2025-08-20 17:53:32 +02:00
Robin Mueller
8445b7cc31 NAK PDU reader update 2025-08-20 16:02:08 +02:00
a2971f8f73 Merge pull request 'add badge' (#133) from add-chat-badge into main
Reviewed-on: #133
2025-08-14 14:22:09 +02:00
Robin Mueller
ba3b66326d add badge 2025-08-14 14:21:37 +02:00
de2675e602 Merge pull request 'add PUS A support' (#132) from add-pus-a-support into main
Reviewed-on: #132
2025-08-13 17:24:50 +02:00
Robin Mueller
3d344c11cc add PUS A support 2025-08-13 17:04:39 +02:00
6e2c35e0c0 Merge pull request 'prepare next release' (#131) from prep-v0.15.0 into main
Reviewed-on: #131
2025-07-18 19:32:28 +02:00
Robin Mueller
026e1a50b9 prepare next release 2025-07-18 19:31:55 +02:00
440b836b70 Merge pull request 'allow arbitrary crc minor version' (#130) from allow-arbitrary-crc-minor-version into main
Reviewed-on: #130
2025-07-18 19:28:31 +02:00
Robin Mueller
00e28e4a96 allow arbitrary crc minor version 2025-07-18 19:27:59 +02:00
4c1cad5b72 Merge pull request 'reserved data variants for ECSS TM and TC' (#129) from ecss-tm-tc-reserved-data-variants into main
Reviewed-on: #129
2025-05-16 19:06:08 +02:00
5cd5c1ce6d reserved data variants for ECSS TM and TC 2025-05-16 19:04:23 +02:00
de99bb926a Merge pull request 'small changelog tweak' (#128) from small-changelog-tweak into main
Reviewed-on: #128
2025-05-10 15:08:18 +02:00
167f53cac7 small changelog tweak 2025-05-10 15:07:58 +02:00
172227b843 Merge pull request 'update MSRV check' (#127) from update-msrv-check into main
Reviewed-on: #127
2025-05-10 15:04:21 +02:00
1bbca6866b update MSRV check 2025-05-10 15:03:05 +02:00
b569208d45 Merge pull request 'prepare v0.14.0' (#126) from prepare-release into main
Reviewed-on: #126
2025-05-10 14:58:45 +02:00
d9709ffd6c prepare v0.14.0 2025-05-10 14:54:27 +02:00
243dc64a78 Merge pull request 'remove badge' (#125) from remove-badge into main
Reviewed-on: #125
2025-05-10 14:38:23 +02:00
a6dc173f7f Merge branch 'main' into remove-badge 2025-05-10 14:38:19 +02:00
86dddbeef5 remove badge 2025-05-10 14:36:00 +02:00
17d112e838 Merge pull request 'one more test fix' (#124) from one-more-test-fix into main
Reviewed-on: #124
2025-05-10 14:31:53 +02:00
9c8467ccfe one more test fix 2025-05-10 14:30:00 +02:00
217a8c2cc7 Merge pull request 'formatting' (#123) from formatting into main
Reviewed-on: #123
2025-05-10 14:26:27 +02:00
349e34bed6 formatting 2025-05-10 14:25:44 +02:00
d6a76ca360 Merge pull request 'CRC handling and dependency update' (#122) from msp430-tweak into main
Reviewed-on: #122
2025-05-10 14:24:54 +02:00
8f4351771b API variants which use table-less CRC 2025-05-10 13:58:10 +02:00
b08c3329f4 Merge pull request 'bump patch release' (#120) from prep-v0.13.1 into main
Reviewed-on: #120
2025-03-21 14:53:01 +01:00
08e0d39154 bump patch release 2025-03-21 14:50:10 +01:00
ab97607024 Merge pull request 'clippy fixes' (#119) from clippy-fixes into main
Reviewed-on: #119
2025-03-21 14:47:06 +01:00
60d1f77844 bugfix due to operator precendence and clippy fixes 2025-03-21 14:46:13 +01:00
5a112b7f39 Merge pull request 'add funding file' (#118) from add-funding-file into main
Reviewed-on: #118
2025-03-17 16:33:46 +01:00
e774dd69d4 add funding file 2025-03-17 16:32:43 +01:00
a03d26a49c Merge pull request 'prep v0.13.0' (#117) from prep-v0.13.0 into main
Reviewed-on: #117
2024-11-08 16:55:52 +01:00
026173514f prep v0.13.0 2024-11-08 16:54:53 +01:00
2d7ccc0909 Merge pull request 'Add back API which was deleted accidently' (#116) from add-back-api into main
Reviewed-on: #116
2024-11-08 15:50:46 +01:00
05d3bac927 Add back API which was deleted accidently 2024-11-08 15:46:42 +01:00
d58df5fee2 Merge pull request 'Switch to thiserror' (#115) from switch-to-thiserror into main
Reviewed-on: #115
2024-11-08 15:42:37 +01:00
9d23ac5b9b switch to thiserror completely 2024-11-08 15:26:40 +01:00
c0b4653c01 Merge pull request 'bump CI msrv check' (#114) from bump-msrv-check into main
Reviewed-on: #114
2024-11-08 11:27:52 +01:00
f156833985 bump CI msrv check 2024-11-08 11:26:51 +01:00
9aea3dba00 Merge pull request 'bump dependencies' (#113) from bump-dependencies into main
Reviewed-on: #113
2024-11-08 11:14:04 +01:00
48247a0a87 bump thiserror and zerocopy 2024-11-08 11:13:41 +01:00
f70b957d9a Merge pull request 'docs fixes' (#112) from smaller-doc-fixes into main
Reviewed-on: #112
2024-11-07 23:28:41 +01:00
fbf953df0e docs fixes 2024-11-04 11:42:51 +01:00
f135d54364 Merge pull request 'prepare v0.12.0' (#111) from prepare-v0.12.0 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #111
2024-09-10 17:58:03 +02:00
d8b2a3dfea prepare v0.12.0
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2024-09-10 17:51:31 +02:00
448b76be91 Merge pull request 'condition code bugfix' (#110) from cfdp-cond-code-bugfix into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #110
2024-08-29 09:47:27 +02:00
027b01f00f condition code bugfix
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
2024-08-29 09:46:40 +02:00
bf15b22889 Merge pull request 'added max file segment length calculator' (#109) from file-segment-calculator into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #109
2024-08-21 14:29:16 +02:00
16f91b562d added max file segment length calculator
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2024-08-21 14:26:11 +02:00
cd77b806fe Merge pull request 'Added additional converter method' (#108) from msgs-to-user-converter-method into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #108
2024-08-21 11:20:33 +02:00
43c88da3f2 Added additional converter method
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-08-20 17:24:53 +02:00
b19a61b859 Merge pull request 'update msg to user module' (#107) from cfdp-msg-to-user-update into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #107
2024-08-20 17:17:11 +02:00
8aa957b8bb update msg to user module
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-08-20 16:56:25 +02:00
190fa1befc Merge pull request 'Added generic sequence counter module' (#106) from seq-count-module into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #106
2024-08-20 11:20:07 +02:00
175b61deca Added generic sequence counter module
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-08-20 10:57:53 +02:00
51c28b5cc6 Merge pull request 'Github MSRV version update' (#105) from github-msrv into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #105
2024-08-19 10:58:31 +02:00
45cc74daa7 Github MSRV version update
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2024-08-19 10:44:33 +02:00
191c6f8146 Merge pull request 'Bump MSRV and delegate version' (#104) from bump-msrv-delegate-version into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #104
2024-08-19 10:42:29 +02:00
5449884b2e Bump MSRV and delegate version
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-08-19 02:23:34 -06:00
9c93c76193 Merge pull request 'Update EOF PDU API' (#103) from eof-pdu-update into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #103
2024-08-19 10:18:19 +02:00
043927c7ef Update EOF PDU API
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-07-21 10:14:41 -07:00
f4dc5a0302 Merge pull request 'added new API for file data PDU' (#102) from file-data-pdu-update into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #102
2024-07-21 18:25:08 +02:00
9166faa4ae optimization
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-07-19 11:29:37 -07:00
ed808e69d4 added new API for file data PDU
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-07-19 10:41:31 -07:00
d146b6cf57 Merge pull request 'Metadata PDU creator update' (#101) from metadata-pdu-creator-update into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #101
2024-07-14 17:08:46 +02:00
ff0c9d8c70 Update and simplify Metadata PDU creator API
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-07-09 16:30:48 +02:00
c40bc855a2 Merge pull request 'add owned TLV type' (#98) from cfdp-tlv-owned-type into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #98
2024-07-09 16:08:53 +02:00
81423fc6e8 add owned TLV type
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
Rust/spacepackets/pipeline/pr-main Build queued...
2024-07-09 16:04:08 +02:00
a399b11a8e Merge pull request 'update documentation build' (#99) from update-docs-build into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #99
2024-07-03 16:14:46 +02:00
9d4c7446a3 Merge branch 'main' into update-docs-build
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-06-25 16:20:08 +02:00
b87f7d73b1 Merge pull request 'clippy fix' (#100) from clippy-fix into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #100
2024-06-25 16:20:01 +02:00
80744eea16 clippy fix
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2024-06-25 16:19:30 +02:00
a5918bfd4a update documentation build
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-06-25 16:07:07 +02:00
0e347b0e37 Merge pull request 'Bump MSRV' (#97) from bump-msrv into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #97
2024-05-19 13:07:12 +02:00
58dabb6f2f specify exact required version
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-19 09:13:12 +02:00
7fd65aa592 bumped MSRV
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-19 09:12:39 +02:00
0024afc83e Merge pull request 'prep patch release' (#96) from prep-v0.11.2 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #96
2024-05-19 09:02:46 +02:00
c48bd848d3 prep patch release
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-19 08:49:03 +02:00
b8be9ae641 Merge pull request 'Fixes for Miri' (#95) from fixes-for-miri into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #95
2024-05-15 13:03:24 +02:00
c2506dbba9 Merge branch 'main' into fixes-for-miri
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-05-14 19:25:07 +02:00
b842b9d11a Merge pull request 'remove defmt::Format impl for MetadataPduCreator' (#94) from fix-defmt-derives into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #94
2024-05-14 19:24:57 +02:00
374c034e92 add miri chapter in README
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-14 15:37:20 +02:00
791c7f6e02 it is now possible to run cargo miri 2024-05-14 15:34:40 +02:00
8001938507 remove defmt::Format impl for MetadataPduCreator
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-14 15:01:26 +02:00
73ab7ff148 Merge pull request 'add doctests to github CI' (#93) from github-ci-doctest into main
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Reviewed-on: #93
2024-05-02 14:56:13 +02:00
c59d01174f add doctests to github CI
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
2024-05-02 14:48:31 +02:00
eb49bff0c9 Merge pull request 'update github CI' (#92) from update-github-ci into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #92
2024-05-02 14:29:53 +02:00
af392d40d0 this might work
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
Rust/spacepackets/pipeline/pr-main Build queued...
2024-05-02 14:22:03 +02:00
b78bfe2114 some fixes
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
Rust/spacepackets/pipeline/pr-main Build queued...
2024-05-02 14:16:20 +02:00
69a3b1d8f3 update github CI
Some checks are pending
Rust/spacepackets/pipeline/pr-main Build queued...
Rust/spacepackets/pipeline/head Build started...
2024-05-02 14:12:26 +02:00
e7b3ba9575 Merge pull request 'date correction' (#91) from date-correction into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #91
2024-04-22 10:19:19 +02:00
c515535ccd date correction
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
2024-04-22 10:18:35 +02:00
95158a8cd2 Merge pull request 'prepare next patch version' (#90) from small-improvements-and-fixes into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #90
2024-04-22 10:15:21 +02:00
8b1ccb0cd0 prepare next patch version 2024-04-20 10:42:36 +02:00
619b22e58f Merge pull request 'prepare v0.11.0' (#89) from prep_v0.11.0 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #89
2024-04-16 19:23:17 +02:00
55222d92b3 small typo fix
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2024-04-16 19:17:17 +02:00
8e1934e604 prepare release v0.11.0
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-04-16 19:15:04 +02:00
5f37978c56 Merge pull request 'added small defmt test' (#88) from added-small-defmt-test into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #88
2024-04-16 15:34:41 +02:00
97bbb14168 Merge branch 'main' into added-small-defmt-test 2024-04-16 15:34:34 +02:00
a65a98f43f Merge pull request 'clippy and msrv fix' (#87) from ci-github-fixes into main
Reviewed-on: #87
2024-04-16 15:34:27 +02:00
e1a200e65b Merge branch 'main' into added-small-defmt-test
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-04-16 15:14:23 +02:00
b55c7db3fc clippy and msrv fix
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-04-16 15:13:43 +02:00
944bcf1320 Merge pull request 'bump MSRV' (#86) from bump-msrv into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #86
2024-04-13 18:48:45 +02:00
8972dcbfc0 bump MSRV
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head This commit looks good
2024-04-13 17:39:53 +02:00
04b671fa6f Merge pull request 'moved CCSDS constant' (#85) from move-ccsds-constant into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #85
2024-04-13 17:19:15 +02:00
533afc33fa moved CCSDS constant
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-04-13 12:10:32 +02:00
50c56f6504 added small defmt test
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-04-04 16:35:22 +02:00
9e02e00d1a Merge pull request 'prepare next release candidate' (#84) from prep_v0.11.0-rc.2 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #84
2024-04-04 14:21:40 +02:00
d8676ae711 prepare next release candidate
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-04-04 14:12:33 +02:00
9711159969 Merge pull request 'use cargo nextest in CI for testing' (#83) from use-nextest-as-test-runner-ci into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #83
2024-04-04 13:20:27 +02:00
57adb619b3 use cargo nextest in CI for testing
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-04-04 13:13:43 +02:00
fe52657d11 Merge pull request 'ECSS Ctors: Expect SP header by copy' (#82) from ecss-take-sp-header-by-copy into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #82
2024-04-04 12:20:45 +02:00
50b86939a1 this API is a bit more ergonomic
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-04-04 12:07:37 +02:00
179984f258 Merge pull request 'More smaller tweaks' (#81) from more-smaller-tweaks into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #81
2024-04-04 11:58:48 +02:00
deb89362a4 More smaller tweaks
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-04-04 11:47:39 +02:00
4cd40f37ce Merge pull request 'added additional ctors which only set the APID' (#80) from addition-sp-header-ctors into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #80
2024-04-03 22:59:19 +02:00
bbd66a6a8b added a lot of inline attrs
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-04-03 21:56:26 +02:00
0115461bb5 added additional ctors which only set the APID
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-04-03 21:30:23 +02:00
ca90393d95 Merge pull request 'unify CCSDS API as well' (#79) from unify-ccsds-api into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #79
2024-04-03 19:45:14 +02:00
325e7d6ff3 unify CCSDS API as well
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-04-03 18:47:00 +02:00
228f198006 Merge pull request 'prepare next release candidate' (#78) from prep_v0.11.0-rc.1 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #78
2024-04-03 15:07:06 +02:00
54f065ed74 small fix
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-04-03 14:18:12 +02:00
4ef65279ea doc update
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-04-03 14:16:42 +02:00
f0af16dc29 prepare next release candidate
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-04-03 14:13:04 +02:00
d05a1077e8 Merge pull request 'consistent ECSS object constructors' (#77) from consistent-ecss-ctors into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #77
2024-04-03 14:09:36 +02:00
fc684a42a8 consistent ECSS object constructors
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-04-03 13:30:01 +02:00
e9ddc316c8 Merge pull request 'update ECSS code' (#75) from update-ecss-code into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #75
2024-03-29 14:22:43 +01:00
4da417dfd2 cargo fmt
Some checks are pending
Rust/spacepackets/pipeline/pr-main Build started...
Rust/spacepackets/pipeline/head This commit looks good
2024-03-29 14:13:44 +01:00
cabb3a19ef Update ECSS code
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2024-03-29 14:06:52 +01:00
5eef376351 Merge pull request 'Start adding defmt support' (#76) from start-adding-defmt-support into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #76
2024-03-29 14:06:04 +01:00
538548b05e Merge branch 'main' into start-adding-defmt-support
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-03-29 13:50:01 +01:00
caaecdff0c Merge pull request 'Some more API adaptions' (#74) from api-name-updates into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #74
2024-03-29 13:44:03 +01:00
3045a27d8c just add support for everything
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-03-29 13:42:02 +01:00
c7cf83d468 some defmt support would be good
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2024-03-28 22:48:58 +01:00
ef37a84edc Make API more inline with other time API out there
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-03-25 16:08:30 +01:00
c1b32bca21 Merge pull request 'More granular error handling' (#73) from more-granular-error-handling into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #73
2024-03-25 14:18:37 +01:00
d9525674c3 doc fixes
All checks were successful
Rust/spacepackets/pipeline/pr-main This commit looks good
Rust/spacepackets/pipeline/head This commit looks good
2024-03-25 14:05:04 +01:00
8b151d942d Merge remote-tracking branch 'origin/main' into more-granular-error-handling
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-03-25 13:43:19 +01:00
85a8eb3f4a more granular error handling
Some checks failed
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
Rust/spacepackets/pipeline/head This commit looks good
2024-03-25 13:42:18 +01:00
fb71185b4a Merge pull request 'Introduce automatic doc feature configuration' (#72) from update-docs into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #72
2024-03-25 10:55:34 +01:00
3e62d7d411 introduce doc_auto_cfg
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-03-24 12:22:08 +01:00
3faffd52fc Merge pull request 'Update Time API' (#71) from update-time-api into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #71
2024-03-18 15:57:17 +01:00
7476fc8096 some more fixes and cleaning up
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-03-18 15:23:26 +01:00
59c7ece126 Major refactoring of the time API
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-03-18 15:14:40 +01:00
6f5254bdbd Merge pull request 'More useful conversions' (#68) from missing-ecss-enum-conversion into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #68
2024-03-11 14:57:51 +01:00
bd1927c5c2 use a more generic blanket impl
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-03-11 14:35:09 +01:00
77862868d5 these conversions are also useful
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-03-11 14:28:18 +01:00
ea05a547ac Merge pull request 'add missing doc_cfg attr' (#67) from missing-doc-cfg-attr into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #67
2024-03-04 12:58:28 +01:00
0ab69b3ddc add missing doc_cfg attr
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2024-03-04 12:55:16 +01:00
240f0bc267 Merge pull request 'CHANGELOG' (#66) from merge-conflict into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #66
2024-03-04 12:52:49 +01:00
00744a22fc Merge branch 'main' of egit.irs.uni-stuttgart.de:rust/spacepackets
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-03-04 12:26:29 +01:00
c5aeeec19f prepare rc.0 2024-03-04 12:25:47 +01:00
d13cd28962 Merge pull request 'add from impls' (#65) from ecss-enum-from-impls into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #65
2024-03-04 12:20:01 +01:00
5641d9007e Merge remote-tracking branch 'origin/main' into ecss-enum-from-impls
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-03-01 17:55:16 +01:00
f39ea2f793 Merge pull request 'improve the time API' (#64) from improve-time-api into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #64
2024-03-01 17:54:31 +01:00
e4730d4b8f changelog
Some checks failed
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main There was a failure building this commit
2024-03-01 17:54:02 +01:00
64ea7e609d better naming
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-03-01 17:52:51 +01:00
ebaa6210a4 add from impls
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-03-01 17:51:16 +01:00
d14f532f62 improve the time API
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-02-27 15:59:04 +01:00
6ea18d3715 Merge pull request 'added missing doc_cfg attribute' (#63) from add-missing-doc-cfg-attr into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #63
2024-02-19 20:12:15 +01:00
6056342334 added missing doc_cfg attribute
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-02-17 21:01:11 +01:00
4e6dcc5afa Merge pull request 'UnsignedEnum trait extensions' (#62) from unsigned-enum-vec-ext into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #62
2024-02-17 13:41:51 +01:00
200593bfb4 added tests for vec converters
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-02-17 13:27:28 +01:00
60bf876dd3 Extensions for UnsignedEnum trait
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-02-17 13:24:25 +01:00
f47604346e Merge pull request 'update PusTmZeroCopyWriter' (#61) from update-pus-tm-zero-copy-writer into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #61
2024-02-07 11:28:55 +01:00
0d0d7a256a update PusTmZeroCopyWriter
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-02-07 11:07:20 +01:00
2fd5860e18 Merge pull request 'v0.8.1' (#60) from prep-v0.8.1 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #60
2024-02-05 15:32:09 +01:00
7e8b71db6d prep patch release
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-02-05 15:29:18 +01:00
c3cc6d5c73 Merge pull request 'extended time writer trait' (#58) from extend-time-writer-trait into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #58
2024-02-05 15:07:14 +01:00
d01309cccf extended time writer trait
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-02-05 15:04:29 +01:00
92403738ca Merge pull request 'v0.7.0' (#57) from prep_v0.7.0 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #57
2024-02-01 17:54:35 +01:00
3353475261 prep next release
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-02-01 17:52:45 +01:00
84c1c47fe1 not sure what this does
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-02-01 17:47:25 +01:00
c4bbf91be8 lets keep all features
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-01-31 14:56:41 +01:00
7200e10250 these flags are new
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-01-31 14:55:19 +01:00
66ae83c0ce Merge pull request 'prep next beta release' (#56) from prep_v0.7.0-beta.4 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #56
2024-01-23 18:39:48 +01:00
2439c9e5fd prep next beta release
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-01-23 17:59:56 +01:00
e992aad52c Merge pull request 'bugfix for metadata PDU creator' (#55) from metadata-pdu-creator-bugfix into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #55
2024-01-23 17:57:52 +01:00
77135af2bc bugfix for metadata PDU creator
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-01-23 17:55:07 +01:00
39 changed files with 16297 additions and 4682 deletions

View File

@@ -1,113 +1,77 @@
on: [push]
name: ci name: ci
on: [push, pull_request]
jobs: jobs:
check: build:
name: Check name: Check build
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, macos-latest, windows-latest] os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@stable
with: - run: cargo build
profile: minimal
toolchain: stable
- uses: actions-rs/cargo@v1
with:
command: check
args: --release
msrv: test:
name: Check with MSRV name: Run Tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@stable
with: - name: Install nextest
toolchain: 1.61.0 uses: taiki-e/install-action@nextest
override: true - run: cargo nextest run --all-features
profile: minimal - run: cargo test --doc
- uses: actions-rs/cargo@v1
with: msrv:
command: check name: Check MSRV
args: --release runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.83
- run: cargo check
cross-check: cross-check:
name: Check Cross name: Check Cross-Compilation
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
target: target:
- armv7-unknown-linux-gnueabihf - armv7-unknown-linux-gnueabihf
- thumbv6m-none-eabi
- thumbv7em-none-eabihf - thumbv7em-none-eabihf
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@stable
with: with:
profile: minimal targets: "armv7-unknown-linux-gnueabihf, thumbv7em-none-eabihf, thumbv6m-none-eabi"
toolchain: stable - run: cargo check --target=${{matrix.target}} --no-default-features
target: ${{ matrix.target }}
override: true
- uses: actions-rs/cargo@v1
with:
use-cross: true
command: check
args: --release --target=${{ matrix.target }} --no-default-features
fmt: fmt:
name: Rustfmt name: Check formatting
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@stable
with: with:
profile: minimal components: rustfmt
toolchain: stable - run: cargo fmt --all -- --check
override: true
- run: rustup component add rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
check-doc: docs:
name: Check Documentation Build name: Check Documentation Build
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@nightly
with: - run: RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features --no-deps
toolchain: nightly
override: true
profile: minimal
- uses: actions-rs/cargo@v1
with:
command: doc
args: --all-features
clippy: clippy:
name: Clippy name: Clippy
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@stable
with: with:
profile: minimal components: clippy
toolchain: stable - run: cargo clippy -- -D warnings
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings
ci:
if: ${{ success() }}
# all new jobs must be added to this list
needs: [check, fmt, clippy]
runs-on: ubuntu-latest
steps:
- name: CI succeeded
run: exit 0

View File

@@ -8,6 +8,349 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased] # [unreleased]
# [v0.18.0] ?
## Changed
- Added distinction between `CcsdsPacketReader::user_data` and `CcsdsPacketReader::packet_data`.
- Added distinction between `CcsdsPacketCreatorWithReservedData::user_data` and
`CcsdsPacketCreatorWithReservedData::packet_data`, including mutable variants as well.
- `SequenceCounter::MAX_BIT_WIDTH` is now a regular trait method `SequenceCounter::max_bit_width`
to allow dyn compatibility and easier usage in trait objects.
## Added
- `checksum` getter for `CcsdsPacketReader`.
- Added `SequenceCounterOnFile` which persists the sequence counter by writing it to a file.
- Added `SequenceCounter::set` method which allows manually setting an initial value.
- Added `CcsdsPacketReader::raw_data` full data getter.
## Removed
- `SequenceCounter::increment_mut` and `SequenceCounter::get_and_increment_mut`
# [v0.17.0] 2025-11-06
## Changed
- `CdsCommon` renamed to `CdsBase`
- cfdp: Removed `FileDirectiveType` variant `*Pdu` suffix
- ecss: Renamed `Subservice` to `MessageSubtypeId`
- Simplified CDS short timestamp, contains one less field which reduced serialization length.
- Renamed `UnsignedEnum::value` to `UnsignedEnum::value_raw`, `value` is reserved for the `const`
value getter.
- Renamed `CcsdsPrimaryHeader::from_composite_fields` to
`CcsdsPrimaryHeader::new_from_composite_fields`
- Renamed `PusPacket::service` to `PusPacket::service_type_id` and `PusPacket::subservice` to
`PusPacket::message_subtype_id`. Also added `PusPacket::message_type_id`. Performed the same
change for the ECSS PUS C secondary header traits.
## Added
- Added `CcsdsPacketCreator`, `CcsdsPacketReader`, `CcsdsPacketCreatorWithReservedData` and
`CcsdsPacketCreatorOwned` which simplify the process of creating full CCSDS space packets.
- Added new optional `portable-atomic` because portable atomics might not work on every
architecture in addition to requiring atomic CAS support enabled inside for the crate.
## Fixed
- All `core::sync::Atomic?` usages are feature gated properly to allow compilation on systems
without atomic CAS.
# [v0.16.1] 2025-09-26
## Fixed
`SpHeader::packet_len` is public now.
# [v0.16.0] 2025-09-24
- Bump Rust MSRV to v1.83
## Changed
- `PusTcCreator` has its own `service`, `subservice` and `apid` methods and does not require trait
imports anymore.
- CFDP NAK PDU `SegmentRequestIter` is not generic over the file size anymore. Instead, the
iterator returns pairs of `u64` for both large and normal file size.
- `PusVersion::VersionNotSupported` contains raw version number instead of `PusVersion` enum now
to make it more flexible.
- `pus_version` API now returns a `Result<PusVersion, u8>` instead of a `PusVersion` to allow
modelling invalid version numbers properly.
- Renamed `CcsdsPacket::total_len` to `CcsdsPacket::packet_len`
- Renamed `SequenceCountProvider` to `SequenceCounter`
- Renamed `SeqCountProviderSimple` to `SequenceCounterSimple`
- Renamed `CcsdsSimpleSeqCountProvider` to `SequenceCounterCcsdsSimple`
- Renamed `SeqCountProviderSync` to `SequenceCounterSync`
- Renamed `PusPacket::opt_crc16` to `PusPacket::checksum`
- Renamed `PacketSequenceCtrl` to `PacketSequenceControl`
- ECSS checksum generation is now optional as specified in the standard. Added `has_checksum`
parameters for ECSS TM/TC creators and readers to reflect this.
- APID is represented by `arbitrary-int::u11` while the sequence count is represented by
`arbitrary-int::u14`. A lot of corresponding checks were removed because the type now ensure
value validity.
- ACK field changed from `u8` to `AckFlags` structure.
- PUS version raw representation is `u4` now.
- SC time reference status representation is `u4` now.
- Renamed `ptype` to `packet_type`
- Renamed `PduHeader::new_no_file_data` to `PduHeader::new_for_file_directive`
- Renamd `FinishedPduCreator::new_generic` to `new` and `new_default` to `new_no_error`
## Removed
- `PusVersion::Invalid`, which will be modelled with `Result<PusVersion, u8>` now.
## Added
- `cfdp::pdu::ack::InvalidAckedDirectiveCodeError` which is returned by the `AckPdu` constructor.
- `cfdp::pdu::nak::NakPduCreatorWithReservedSegReqsBuf` constructor which exposes the segment
request buffer mutably to avoid the need for a separate segment request buffer.
- `SpHeader::packet_len` direct method.
- `AckFlags` which is implemented with `bitbybit::bitfield`
- `ApidOutOfRangeError` and `SequenceCountOutOfRangeError`
- Added PUS A legacy support for telecommands inside the `ecss.tc_pus_a` module
- Added `SequenceCounter::increment_mut` and `SequenceCounter::get_and_increment_mut`
- Implemented `SequenceCounter` for `Atomic` unsigned types and references of them
- `PusPacket::has_checksum` and `WritablePusPacket::has_checksum`
- PUS TC builder API, either via `PusTcBuilder::new`, or `PusTcCreator::builder`
# [v0.15.0] 2025-07-18
## Added
- `PusTcCreatorWithReservedAppData` and `PusTmCreatorWithReservedSourceData` constructor variants
which allow writing source/app data into the serialization buffer directly without
requiring an extra buffer.
# [v0.14.0] 2025-05-10
## Changed
- Moved CRC constants/implementations to dedicated `crc` module.
- `crc::CRC_CCITT_FALSE_NO_TABLE` and `crc::CRC_CCITT_FALSE_BIG_TABLE` variants.
- Renamed `PusPacket::crc16` to `PusPacket::opt_crc16`.
## Added
- `WritablePusPacket::write_to_bytes_crc_no_table` and `WritablePusPacket::write_to_bytes_no_crc`
variants.
- `PusTmReader::new_crc_no_table` and `PusTcReader::new_crc_no_table` variants.
- `crc16` methods for PUS TM and PUS TC reader.
- PUS TM and PUS TC reader now return the reader instance directly instead of a tuple of the reader
and the read size. The instance `total_len` method can be used to retrieve the read lenght.
# [v0.13.1] 2025-03-21
- Bugfix due to operator precendence for `PusTcSecondaryHeader::pus_version`,
`PusTcSecondaryHeaderWithoutTimestamp::pus_version`, `CdsTime::from_bytes_with_u16_days` and
`CdsTime::from_bytes_with_u24_days`
# [v0.13.0] 2024-11-08
- Bumped MSRV to 1.81.0
- Bump `zerocopy` to v0.8.0
- Bump `thiserror` to v2.0.0
## Changed
- Migrated all Error implementations to thiserror, improved some naming and error handling in
general
# [v0.12.0] 2024-09-10
- Bumped MSRV to 1.70.0
## Added
- Added new `cfdp::tlv::TlvOwned` type which erases the lifetime and is clonable.
- Dedicated `cfdp::tlv::TlvLvDataTooLarge` error struct for APIs where this is the only possible
API error.
- Added File Data PDU API which expects the expected file data size and then exposes the unwritten
file data field as a mutable slice. This allows to read data from the virtual file system
API to the file data buffer without an intermediate buffer.
- Generic `EofPdu::new` constructor.
- Added generic sequence counter module.
- Added `MsgToUserTlv::to_tlv` converter which reduced the type and converts
it to a generic `Tlv`.
- Implemented `From<MsgToUserTlv> for Tlv` converter trait.
- Added CFDP maximum file segment length calculator method `calculate_max_file_seg_len_for_max_packet_len_and_pdu_header`
## Added and Changed
- Added new `ReadableTlv` to avoid some boilerplate code and have a common abstraction implemented
for both `Tlv` and `TlvOwned` to read the raw TLV data field and its length.
- Replaced `cfdp::tlv::TlvLvError` by `cfdp::tlv::TlvLvDataTooLarge` where applicable.
## Fixed
- Fixed an error in the EOF writer which wrote the fault location to the wrong buffer position.
- cfdp `ConditionCode::CheckLimitReached` previous had the wrong numerical value of `0b1001` (9)
and now has the correct value of `0b1010` (10).
## Changed
- Minor documentation build updates.
- Increased delegate version range to v0.13
# [v0.11.2] 2024-05-19
- Bumped MSRV to 1.68.2
## Fixed
- Removed `defmt::Format` impl for `MetadataPduCreator` which seems to be problematic.
# [v0.11.1] 2024-04-22
## Fixed
- The default data length for for `SpHeader` constructors where the data field length is not
specified is now 0.
- The `SpHeader::new_from_fields` is public now.
## Added
- `SpHeader::to_vec` method.
# [v0.11.0] 2024-04-16
## Changed
- Moved `CCSDS_HEADER_LEN` constant to the crate root.
## Added
- Added `SpacePacketHeader` type alias for `SpHeader` type.
# [v0.11.0-rc.2] 2024-04-04
## Changed
- Renamed `PacketId` and `PacketSequenceCtrl` `new` method to `new_checked` and former
`new_const` method to `new`.
- Renamed `tc`, `tm`, `tc_unseg` and `tm_unseg` variants for `PacketId` and `SpHeader`
to `new_for_tc_checked`, `new_for_tm_checked`, `new_for_unseg_tc_checked` and
`new_for_unseg_tm_checked`.
- `PusTmCreator` and `PusTcCreator` now expect a regular instance of `SpHeader` instead of
a mutable reference.
## Added
- `SpHeader::new_from_apid` and `SpHeader::new_from_apid_checked` constructor.
- `#[inline]` attribute for a lot of small functions.
# [v0.11.0-rc.1] 2024-04-03
Major API changes for the time API. If you are using the time API, it is strongly recommended
to check all the API changes in the **Changed** chapter.
## Fixed
- CUC timestamp was fixed to include leap second corrections because it is based on the TAI
time reference. The default CUC time object do not implement `CcsdsTimeProvider` anymore
because the trait methods require cached leap second information. This task is now performed
by the `cuc::CucTimeWithLeapSecs` which implements the trait.
## Added
- `From<$EcssEnum$TY> from $TY` for the ECSS enum type definitions.
- Added basic support conversions to the `time` library. Introduce new `chrono` and `timelib`
feature gate.
- Added `CcsdsTimeProvider::timelib_date_time`.
- Optional support for `defmt` by adding optional `defmt::Format` derives for common types.
## Changed
- `PusTcCreator::new_simple` now expects a valid slice for the source data instead of an optional
slice. For telecommands without application data, `&[]` can be passed.
- `PusTmSecondaryHeader` constructors now expects a valid slice for the time stamp instead of an
optional slice.
- Renamed `CcsdsTimeProvider::date_time` to `CcsdsTimeProvider::chrono_date_time`
- Renamed `CcsdsTimeCodes` to `CcsdsTimeCode`
- Renamed `cds::TimeProvider` to `cds::CdsTime`
- Renamed `cuc::TimeProviderCcsdsEpoch` to `cuc::CucTime`
- `UnixTimestamp` renamed to `UnixTime`
- `UnixTime` seconds are now private and can be retrieved using the `secs` member method.
- `UnixTime::new` renamed to `UnixTime::new_checked`.
- `UnixTime::secs` renamed to `UnixTime::as_secs`.
- `UnixTime` now has a nanosecond subsecond precision. The `new` constructor now expects
nanoseconds as the second argument.
- Added new `UnixTime::new_subsec_millis` and `UnixTime::new_subsec_millis_checked` API
to still allow creating a timestamp with only millisecond subsecond resolution.
- `CcsdsTimeProvider` now has a new `subsec_nanos` method in addition to a default
implementation for the `subsec_millis` method.
- `CcsdsTimeProvider::date_time` renamed to `CcsdsTimeProvider::chrono_date_time`.
- Added `UnixTime::MIN`, `UnixTime::MAX` and `UnixTime::EPOCH`.
- Added `UnixTime::timelib_date_time`.
- Error handling for ECSS and time module is more granular now, with a new
`DateBeforeCcsdsEpochError` error and a `DateBeforeCcsdsEpoch` enum variant for both
`CdsError` and `CucError`.
- `PusTmCreator` now has two lifetimes: One for the raw source data buffer and one for the
raw timestamp.
- Time API `from_now*` API renamed to `now*`.
## Removed
- Legacy `PusTm` and `PusTc` objects.
# [v0.11.0-rc.0] 2024-03-04
## Added
- `From<$TY>` for the `EcssEnum$TY` ECSS enum type definitions.
- `Sub` implementation for `UnixTimestamp` to calculate the duration between two timestamps.
## Changed
- `CcsdsTimeProvider` `subsecond_millis` function now returns `u16` instead of `Option<u16>`.
- `UnixTimestamp` `subsecond_millis` function now returns `u16` instead of `Option<u16>`.
# [v0.10.0] 2024-02-17
## Added
- Added `value` and `to_vec` methods for the `UnsignedEnum` trait. The value is returned as
as `u64`. Renamed former `value` method on `GenericUnsignedByteField` to `value_typed`.
- Added `value_const` const function for `UnsignedByteField` type.
- Added `value_typed` const functions for `GenericUnsignedByteField` and `GenericEcssEnumWrapper`.
# [v0.9.0] 2024-02-07
## Added
- `CcsdsPacket`, `PusPacket` and `GenericPusTmSecondaryHeader` implementation for
`PusTmZeroCopyWriter`.
- Additional length checks for `PusTmZeroCopyWriter`.
## Changed
- `PusTmZeroCopyWriter`: Added additional timestamp length argument for `new` constructor.
## Fixed
- Typo: `PUC_TM_MIN_HEADER_LEN` -> `PUS_TM_MIN_HEADER_LEN`
# [v0.8.1] 2024-02-05
## Fixed
- Added `pub` visibility for `PacketSequenceCtrl::const_new`.
# [v0.8.0] 2024-02-05
## Added
- Added `len_written` and `to_vec` methods to the `TimeWriter` trait.
# [v0.7.0] 2024-02-01
# [v0.7.0-beta.4] 2024-01-23
## Fixed
- `MetadataPduCreator`: The serialization function shifted the closure requested information
to the wrong position (first reserved bit) inside the raw content field.
# [v0.7.0-beta.3] 2023-12-06 # [v0.7.0-beta.3] 2023-12-06
## Added ## Added
@@ -355,3 +698,13 @@ The timestamp of `PusTm` is now optional. See Added and Changed section for deta
Initial release with CCSDS Space Packet Primary Header implementation and basic PUS TC and TM Initial release with CCSDS Space Packet Primary Header implementation and basic PUS TC and TM
implementations. implementations.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.17.0...HEAD
[v0.17.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.16.1...v0.17.0
[v0.16.1]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.16.0...v0.16.1
[v0.16.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.15.0...v0.16.0
[v0.15.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.14.0...v0.15.0
[v0.14.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.1...v0.14.0
[v0.13.1]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.0...v0.13.1
[v0.13.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.12.0...v0.13.0
[v0.12.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.11.2...v0.12.0

View File

@@ -1,8 +1,8 @@
[package] [package]
name = "spacepackets" name = "spacepackets"
version = "0.7.0-beta.3" version = "0.17.0"
edition = "2021" edition = "2021"
rust-version = "1.61" rust-version = "1.83"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"] authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "Generic implementations for various CCSDS and ECSS packet standards" description = "Generic implementations for various CCSDS and ECSS packet standards"
homepage = "https://egit.irs.uni-stuttgart.de/rust/spacepackets" homepage = "https://egit.irs.uni-stuttgart.de/rust/spacepackets"
@@ -14,43 +14,35 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
[dependencies] [dependencies]
crc = "3" crc = "3"
delegate = ">=0.8, <0.11" delegate = "0.13"
paste = "1"
zerocopy = { version = "0.8", features = ["derive"] }
thiserror = { version = "2", default-features = false }
num_enum = { version = "0.7", default-features = false }
num-traits = { version = "0.2", default-features = false }
serde = { version = "1", optional = true, default-features = false, features = ["derive"] }
arbitrary-int = { version = "2" }
portable-atomic = { version = "1", optional = true }
bitbybit = "1.4"
[dependencies.zerocopy] time = { version = "0.3", default-features = false, optional = true }
version = "0.7" chrono = { version = "0.4", default-features = false, optional = true }
features = ["derive"] defmt = { version = "1", default-features = false, optional = true }
[dependencies.thiserror]
version = "1"
optional = true
[dependencies.num_enum]
version = ">0.5, <=0.7"
default-features = false
[dependencies.serde]
version = "1"
optional = true
default-features = false
features = ["derive"]
[dependencies.chrono]
version = "0.4"
default-features = false
[dependencies.num-traits]
version = "0.2"
default-features = false
[dev-dependencies.postcard]
version = "1"
[features] [features]
default = ["std"] default = ["std"]
std = ["chrono/std", "chrono/clock", "alloc", "thiserror"] std = ["alloc", "chrono/std", "chrono/clock", "thiserror/std"]
serde = ["dep:serde", "chrono/serde"] portable-atomic = ["dep:portable-atomic", "portable-atomic/require-cas"]
alloc = ["postcard/alloc", "chrono/alloc"] defmt = ["dep:defmt", "arbitrary-int/defmt"]
serde = ["dep:serde", "chrono?/serde", "arbitrary-int/serde"]
alloc = ["chrono?/alloc", "defmt?/alloc", "serde?/alloc"]
timelib = ["dep:time"]
[dev-dependencies]
postcard = { version = "1", features = ["alloc"] }
chrono = "0.4"
tempfile = "3"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
rustdoc-args = ["--cfg", "doc_cfg"] rustdoc-args = ["--generate-link-to-definition"]

1
FUNDING.yml Normal file
View File

@@ -0,0 +1 @@
github: robamu

View File

@@ -1,7 +1,7 @@
[![Crates.io](https://img.shields.io/crates/v/spacepackets)](https://crates.io/crates/spacepackets) [![Crates.io](https://img.shields.io/crates/v/spacepackets)](https://crates.io/crates/spacepackets)
[![docs.rs](https://img.shields.io/docsrs/spacepackets)](https://docs.rs/spacepackets) [![docs.rs](https://img.shields.io/docsrs/spacepackets)](https://docs.rs/spacepackets)
[![ci](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml) [![ci](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml)
[![coverage](https://shields.io/endpoint?url=https://absatsw.irs.uni-stuttgart.de/projects/spacepackets/coverage-rs/latest/coverage.json)](https://absatsw.irs.uni-stuttgart.de/projects/spacepackets/coverage-rs/latest/index.html) [![matrix chat](https://img.shields.io/matrix/sat-rs%3Amatrix.org)](https://matrix.to/#/#sat-rs:matrix.org)
ECSS and CCSDS Spacepackets ECSS and CCSDS Spacepackets
====== ======
@@ -29,10 +29,6 @@ Currently, this includes the following components:
`spacepackets` supports various runtime environments and is also suitable for `no_std` environments. `spacepackets` supports various runtime environments and is also suitable for `no_std` environments.
It also offers optional support for [`serde`](https://serde.rs/). This allows serializing and
deserializing them with an appropriate `serde` provider like
[`postcard`](https://github.com/jamesmunns/postcard).
## Default features ## Default features
- [`std`](https://doc.rust-lang.org/std/): Enables functionality relying on the standard library. - [`std`](https://doc.rust-lang.org/std/): Enables functionality relying on the standard library.
@@ -43,6 +39,13 @@ deserializing them with an appropriate `serde` provider like
## Optional Features ## Optional Features
- [`serde`](https://serde.rs/): Adds `serde` support for most types by adding `Serialize` and `Deserialize` `derive`s - [`serde`](https://serde.rs/): Adds `serde` support for most types by adding `Serialize` and `Deserialize` `derive`s
- [`chrono`](https://crates.io/crates/chrono): Add basic support for the `chrono` time library.
- [`timelib`](https://crates.io/crates/time): Add basic support for the `time` time library.
- [`defmt`](https://defmt.ferrous-systems.com/): Add support for the `defmt` by adding the
[`defmt::Format`](https://defmt.ferrous-systems.com/format) derive on many types.
- [`portable-atomic`](https://github.com/taiki-e/portable-atomic): Basic support for `portable-atomic`
crate in addition to the support for core atomic types. This support requires atomic CAS support
enabled in the portable atomic crate.
# Examples # Examples
@@ -51,13 +54,21 @@ usage examples.
# Coverage # Coverage
Coverage was generated using [`grcov`](https://github.com/mozilla/grcov). If you have not done so Coverage can be generated using [`llvm-cov`](https://github.com/taiki-e/cargo-llvm-cov). If you have not done so
already, install the `llvm-tools-preview`: already, install the tool:
```sh ```sh
rustup component add llvm-tools-preview cargo +stable install cargo-llvm-cov --locked
cargo install grcov --locked
``` ```
After that, you can simply run `coverage.py` to test the project with coverage. You can optionally After this, you can run `cargo llvm-cov nextest` to run all the tests and display coverage.
supply the `--open` flag to open the coverage report in your webbrowser.
# Miri
You can run the [`miri`](https://github.com/rust-lang/miri) tool on this library to check for
undefined behaviour (UB). This library does not use use any `unsafe` code blocks, but `miri` could
still catch UB from used libraries.
```sh
cargo +nightly miri nextest run --all-features
```

View File

@@ -15,7 +15,10 @@ RUN rustup install nightly && \
rustup target add thumbv7em-none-eabihf armv7-unknown-linux-gnueabihf && \ rustup target add thumbv7em-none-eabihf armv7-unknown-linux-gnueabihf && \
rustup component add rustfmt clippy llvm-tools-preview rustup component add rustfmt clippy llvm-tools-preview
# Get grcov
RUN curl -sSL https://github.com/mozilla/grcov/releases/download/v0.8.19/grcov-x86_64-unknown-linux-gnu.tar.bz2 | tar -xj --directory /usr/local/bin RUN curl -sSL https://github.com/mozilla/grcov/releases/download/v0.8.19/grcov-x86_64-unknown-linux-gnu.tar.bz2 | tar -xj --directory /usr/local/bin
# Get nextest
RUN curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin
# SSH stuff to allow deployment to doc server # SSH stuff to allow deployment to doc server
RUN adduser --uid 114 jenkins RUN adduser --uid 114 jenkins

View File

@@ -21,7 +21,9 @@ pipeline {
} }
stage('Docs') { stage('Docs') {
steps { steps {
sh 'cargo +nightly doc --all-features' sh """
RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features
"""
} }
} }
stage('Rustfmt') { stage('Rustfmt') {
@@ -31,7 +33,8 @@ pipeline {
} }
stage('Test') { stage('Test') {
steps { steps {
sh 'cargo test --all-features' sh 'cargo nextest r --all-features'
sh 'cargo test --doc'
} }
} }
stage('Check with all features') { stage('Check with all features') {

View File

@@ -1,54 +0,0 @@
#!/usr/bin/env python3
import os
import logging
import argparse
import webbrowser
_LOGGER = logging.getLogger()
def generate_cov_report(open_report: bool, format: str):
logging.basicConfig(level=logging.INFO)
os.environ["RUSTFLAGS"] = "-Cinstrument-coverage"
os.environ["LLVM_PROFILE_FILE"] = "target/coverage/%p-%m.profraw"
_LOGGER.info("Executing tests with coverage")
os.system("cargo test --all-features")
out_path = "./target/debug/coverage"
if format == "lcov":
out_path = "./target/debug/lcov.info"
os.system(
f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing "
f"-o {out_path}"
)
if format == "lcov":
os.system(
"genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source "
"--legend ./target/debug/lcov.info"
)
if open_report:
coverage_report_path = os.path.abspath("./target/debug/coverage/index.html")
webbrowser.open_new_tab(coverage_report_path)
_LOGGER.info("Done")
def main():
parser = argparse.ArgumentParser(
description="Generate coverage report and optionally open it in a browser"
)
parser.add_argument(
"--open", action="store_true", help="Open the coverage report in a browser"
)
parser.add_argument(
"--format",
choices=["html", "lcov"],
default="html",
help="Choose report format (html or lcov)",
)
args = parser.parse_args()
generate_cov_report(args.open, args.format)
if __name__ == "__main__":
main()

36
justfile Normal file
View File

@@ -0,0 +1,36 @@
all: check build embedded test clippy check-fmt docs coverage
clippy:
cargo clippy -- -D warnings
fmt:
cargo fmt --all
check-fmt:
cargo fmt --all -- --check
check:
cargo check --all-features
embedded:
cargo build --target thumbv7em-none-eabihf --no-default-features
cargo build --target thumbv6m-none-eabi --no-default-features
test:
cargo nextest r --all-features
cargo test --doc
build:
cargo build --all-features
docs:
RUSTDOCFLAGS="--cfg docsrs -Z unstable-options --generate-link-to-definition" cargo +nightly doc --all-features --no-deps
docs-html:
RUSTDOCFLAGS="--cfg docsrs -Z unstable-options --generate-link-to-definition" cargo +nightly doc --all-features --open
coverage:
cargo llvm-cov nextest
coverage-html:
cargo llvm-cov nextest --html --open

View File

@@ -4,11 +4,14 @@ Checklist for new releases
# Pre-Release # Pre-Release
1. Make sure any new modules are documented sufficiently enough and check docs with 1. Make sure any new modules are documented sufficiently enough and check docs with
`cargo +nightly doc --all-features --config 'rustdocflags=["--cfg", "doc_cfg"]' --open`. `RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features --open`
or `cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docsrs" --generate-link-to-definition"]' --open`
(was problematic on more recent nightly versions).
2. Bump version specifier in `Cargo.toml`. 2. Bump version specifier in `Cargo.toml`.
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new 3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
`unreleased` section. `unreleased` section.
4. Run `cargo test --all-features`. 4. Run `cargo test --all-features` or `cargo nextest r --all-features` together with
`cargo test --doc`.
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`. 5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal 6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
targets. targets.

View File

@@ -1,5 +1,4 @@
//! Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8. //! Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
use crate::cfdp::TlvLvError;
use crate::ByteConversionError; use crate::ByteConversionError;
use core::str::Utf8Error; use core::str::Utf8Error;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
@@ -7,6 +6,9 @@ use serde::{Deserialize, Serialize};
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::string::String; use std::string::String;
use super::TlvLvDataTooLargeError;
/// Minmum length of a CFDP length-value structure in bytes.
pub const MIN_LV_LEN: usize = 1; pub const MIN_LV_LEN: usize = 1;
/// Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8. /// Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
@@ -20,6 +22,7 @@ pub const MIN_LV_LEN: usize = 1;
/// this will be the lifetime of that data reference. /// this will be the lifetime of that data reference.
#[derive(Debug, Copy, Clone, Eq)] #[derive(Debug, Copy, Clone, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct Lv<'data> { pub struct Lv<'data> {
data: &'data [u8], data: &'data [u8],
// If the LV was generated from a raw bytestream, this will contain the start of the // If the LV was generated from a raw bytestream, this will contain the start of the
@@ -61,9 +64,14 @@ pub(crate) fn generic_len_check_deserialization(
} }
impl<'data> Lv<'data> { impl<'data> Lv<'data> {
pub fn new(data: &[u8]) -> Result<Lv, TlvLvError> { /// Minimum length of a LV structure in bytes.
pub const MIN_LEN: usize = MIN_LV_LEN;
/// Generic constructor.
#[inline]
pub fn new(data: &[u8]) -> Result<Lv<'_>, TlvLvDataTooLargeError> {
if data.len() > u8::MAX as usize { if data.len() > u8::MAX as usize {
return Err(TlvLvError::DataTooLarge(data.len())); return Err(TlvLvDataTooLargeError(data.len()));
} }
Ok(Lv { Ok(Lv {
data, data,
@@ -72,6 +80,7 @@ impl<'data> Lv<'data> {
} }
/// Creates a LV with an empty value field. /// Creates a LV with an empty value field.
#[inline]
pub fn new_empty() -> Lv<'data> { pub fn new_empty() -> Lv<'data> {
Lv { Lv {
data: &[], data: &[],
@@ -81,45 +90,53 @@ impl<'data> Lv<'data> {
/// Helper function to build a string LV. This is especially useful for the file or directory /// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs /// path LVs
pub fn new_from_str(str_slice: &str) -> Result<Lv, TlvLvError> { #[inline]
pub fn new_from_str(str_slice: &str) -> Result<Lv<'_>, TlvLvDataTooLargeError> {
Self::new(str_slice.as_bytes()) Self::new(str_slice.as_bytes())
} }
/// Helper function to build a string LV. This is especially useful for the file or directory /// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs /// path LVs
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] #[inline]
pub fn new_from_string(string: &'data String) -> Result<Lv<'data>, TlvLvError> { pub fn new_from_string(string: &'data String) -> Result<Lv<'data>, TlvLvDataTooLargeError> {
Self::new(string.as_bytes()) Self::new(string.as_bytes())
} }
/// Returns the length of the value part, not including the length byte. /// Returns the length of the value part, not including the length byte.
#[inline]
pub fn len_value(&self) -> usize { pub fn len_value(&self) -> usize {
self.data.len() self.data.len()
} }
/// Returns the full raw length, including the length byte. /// Returns the full raw length, including the length byte.
#[inline]
pub fn len_full(&self) -> usize { pub fn len_full(&self) -> usize {
self.len_value() + 1 self.len_value() + 1
} }
/// Checks whether the value field is empty. /// Checks whether the value field is empty.
#[inline]
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
self.data.len() == 0 self.data.len() == 0
} }
/// Raw value part of the LV.
#[inline]
pub fn value(&self) -> &[u8] { pub fn value(&self) -> &[u8] {
self.data self.data
} }
/// If the LV was generated from a raw bytestream using [Self::from_bytes], the raw start /// If the LV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the LV can be retrieved with this method. /// of the LV can be retrieved with this method.
#[inline]
pub fn raw_data(&self) -> Option<&[u8]> { pub fn raw_data(&self) -> Option<&[u8]> {
self.raw_data self.raw_data
} }
/// Convenience function to extract the value as a [str]. This is useful if the LV is /// Convenience function to extract the value as a [str]. This is useful if the LV is
/// known to contain a [str], for example being a file name. /// known to contain a [str], for example being a file name.
#[inline]
pub fn value_as_str(&self) -> Option<Result<&'data str, Utf8Error>> { pub fn value_as_str(&self) -> Option<Result<&'data str, Utf8Error>> {
if self.is_empty() { if self.is_empty() {
return None; return None;
@@ -135,6 +152,7 @@ impl<'data> Lv<'data> {
} }
/// Reads a LV from a raw buffer. /// Reads a LV from a raw buffer.
#[inline]
pub fn from_bytes(buf: &'data [u8]) -> Result<Lv<'data>, ByteConversionError> { pub fn from_bytes(buf: &'data [u8]) -> Result<Lv<'data>, ByteConversionError> {
generic_len_check_deserialization(buf, MIN_LV_LEN)?; generic_len_check_deserialization(buf, MIN_LV_LEN)?;
Self::from_be_bytes_no_len_check(buf) Self::from_be_bytes_no_len_check(buf)
@@ -151,6 +169,7 @@ impl<'data> Lv<'data> {
MIN_LV_LEN + self.data.len() MIN_LV_LEN + self.data.len()
} }
#[inline]
pub(crate) fn from_be_bytes_no_len_check( pub(crate) fn from_be_bytes_no_len_check(
buf: &'data [u8], buf: &'data [u8],
) -> Result<Lv<'data>, ByteConversionError> { ) -> Result<Lv<'data>, ByteConversionError> {
@@ -164,11 +183,11 @@ impl<'data> Lv<'data> {
} }
#[cfg(test)] #[cfg(test)]
pub mod tests { mod tests {
use super::*;
use alloc::string::ToString; use alloc::string::ToString;
use crate::cfdp::TlvLvError; use super::*;
use crate::ByteConversionError; use crate::ByteConversionError;
use std::string::String; use std::string::String;
@@ -259,15 +278,11 @@ pub mod tests {
let lv = Lv::new(&data_big); let lv = Lv::new(&data_big);
assert!(lv.is_err()); assert!(lv.is_err());
let error = lv.unwrap_err(); let error = lv.unwrap_err();
if let TlvLvError::DataTooLarge(size) = error { assert_eq!(error.0, u8::MAX as usize + 1);
assert_eq!(size, u8::MAX as usize + 1); assert_eq!(
assert_eq!( error.to_string(),
error.to_string(), "data with size 256 larger than allowed 255 bytes"
"data with size 256 larger than allowed 255 bytes" );
);
} else {
panic!("invalid exception {:?}", error)
}
} }
#[test] #[test]

View File

@@ -1,11 +1,8 @@
//! Low-level CCSDS File Delivery Protocol (CFDP) support according to [CCSDS 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf). //! Low-level CCSDS File Delivery Protocol (CFDP) support according to [CCSDS 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf).
use crate::ByteConversionError; use crate::ByteConversionError;
use core::fmt::{Display, Formatter};
use num_enum::{IntoPrimitive, TryFromPrimitive}; use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub mod lv; pub mod lv;
pub mod pdu; pub mod pdu;
@@ -16,35 +13,55 @@ pub const CFDP_VERSION_2_NAME: &str = "CCSDS 727.0-B-5";
/// Currently, only this version is supported. /// Currently, only this version is supported.
pub const CFDP_VERSION_2: u8 = 0b001; pub const CFDP_VERSION_2: u8 = 0b001;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] /// PDU type.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum PduType { pub enum PduType {
/// File directive PDU.
FileDirective = 0, FileDirective = 0,
/// File data PDU.
FileData = 1, FileData = 1,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] /// PDU direction.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum Direction { pub enum Direction {
/// Going towards the file receiver.
TowardsReceiver = 0, TowardsReceiver = 0,
/// Going towards the file sender.
TowardsSender = 1, TowardsSender = 1,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] /// PDU transmission mode.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum TransmissionMode { pub enum TransmissionMode {
/// Acknowledged (class 1) transfer.
Acknowledged = 0, Acknowledged = 0,
/// Unacknowledged (class 2) transfer.
Unacknowledged = 1, Unacknowledged = 1,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] /// CRC flag.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum CrcFlag { pub enum CrcFlag {
/// No CRC for the packet.
NoCrc = 0, NoCrc = 0,
/// Packet has CRC.
WithCrc = 1, WithCrc = 1,
} }
@@ -67,48 +84,76 @@ impl From<CrcFlag> for bool {
} }
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75) /// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum SegmentMetadataFlag { pub enum SegmentMetadataFlag {
/// Segment metadata not present.
NotPresent = 0, NotPresent = 0,
/// Segment metadata present.
Present = 1, Present = 1,
} }
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75) /// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum SegmentationControl { pub enum SegmentationControl {
/// No record boundary preservation.
NoRecordBoundaryPreservation = 0, NoRecordBoundaryPreservation = 0,
/// With record boundary preservation.
WithRecordBoundaryPreservation = 1, WithRecordBoundaryPreservation = 1,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] /// Fault handler codes according to the CFDP standard.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u3, exhaustive = false)]
#[repr(u8)] #[repr(u8)]
pub enum FaultHandlerCode { pub enum FaultHandlerCode {
/// Notice of cancellation fault handler code.
NoticeOfCancellation = 0b0001, NoticeOfCancellation = 0b0001,
/// Notice of suspension fault handler code.
NoticeOfSuspension = 0b0010, NoticeOfSuspension = 0b0010,
/// Ignore error fault handler code.
IgnoreError = 0b0011, IgnoreError = 0b0011,
/// Abandon transaction fault handler code.
AbandonTransaction = 0b0100, AbandonTransaction = 0b0100,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] /// CFDP condition codes.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u4, exhaustive = false)]
#[repr(u8)] #[repr(u8)]
pub enum ConditionCode { pub enum ConditionCode {
/// This is not an error condition for which a faulty handler override can be specified /// This is not an error condition for which a faulty handler override can be specified
NoError = 0b0000, NoError = 0b0000,
/// Positive acknowledgement limit reached.
PositiveAckLimitReached = 0b0001, PositiveAckLimitReached = 0b0001,
/// Keep-alive limit reached.
KeepAliveLimitReached = 0b0010, KeepAliveLimitReached = 0b0010,
/// Invalid transmission mode.
InvalidTransmissionMode = 0b0011, InvalidTransmissionMode = 0b0011,
/// Filestore rejection.
FilestoreRejection = 0b0100, FilestoreRejection = 0b0100,
/// File checksum error.
FileChecksumFailure = 0b0101, FileChecksumFailure = 0b0101,
/// File size error.
FileSizeError = 0b0110, FileSizeError = 0b0110,
/// NAK limit reached.
NakLimitReached = 0b0111, NakLimitReached = 0b0111,
/// Inactivity detected.
InactivityDetected = 0b1000, InactivityDetected = 0b1000,
CheckLimitReached = 0b1001, /// Check limit reached.
CheckLimitReached = 0b1010,
/// Unsupported checksum type.
UnsupportedChecksumType = 0b1011, UnsupportedChecksumType = 0b1011,
/// Not an actual fault condition for which fault handler overrides can be specified /// Not an actual fault condition for which fault handler overrides can be specified
SuspendRequestReceived = 0b1110, SuspendRequestReceived = 0b1110,
@@ -116,8 +161,11 @@ pub enum ConditionCode {
CancelRequestReceived = 0b1111, CancelRequestReceived = 0b1111,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] /// Large file flag.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum LargeFileFlag { pub enum LargeFileFlag {
/// 32 bit maximum file size and FSS size /// 32 bit maximum file size and FSS size
@@ -127,13 +175,16 @@ pub enum LargeFileFlag {
} }
/// Transaction status for the ACK PDU field according to chapter 5.2.4 of the CFDP standard. /// Transaction status for the ACK PDU field according to chapter 5.2.4 of the CFDP standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u2, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum TransactionStatus { pub enum TransactionStatus {
/// Transaction is not currently active and the CFDP implementation does not retain a /// Transaction is not currently active and the CFDP implementation does not retain a
/// transaction history. /// transaction history.
Undefined = 0b00, Undefined = 0b00,
/// Transaction is currently active.
Active = 0b01, Active = 0b01,
/// Transaction was active in the past and was terminated. /// Transaction was active in the past and was terminated.
Terminated = 0b10, Terminated = 0b10,
@@ -144,96 +195,70 @@ pub enum TransactionStatus {
/// Checksum types according to the /// Checksum types according to the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/) /// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum ChecksumType { pub enum ChecksumType {
/// Modular legacy checksum /// Modular legacy checksum
Modular = 0, Modular = 0,
/// CRC32 Proximity-1.
Crc32Proximity1 = 1, Crc32Proximity1 = 1,
/// CRC32C.
Crc32C = 2, Crc32C = 2,
/// Polynomial: 0x4C11DB7. Preferred checksum for now. /// CRC32. Polynomial: 0x4C11DB7. Preferred checksum for now.
Crc32 = 3, Crc32 = 3,
/// Null checksum (no checksum).
#[default]
NullChecksum = 15, NullChecksum = 15,
} }
impl Default for ChecksumType { /// Raw null checksum.
fn default() -> Self {
Self::NullChecksum
}
}
pub const NULL_CHECKSUM_U32: [u8; 4] = [0; 4]; pub const NULL_CHECKSUM_U32: [u8; 4] = [0; 4];
#[derive(Debug, Copy, Clone, PartialEq, Eq)] /// TLV or LV data larger than allowed [u8::MAX].
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[error("data with size {0} larger than allowed {max} bytes", max = u8::MAX)]
pub struct TlvLvDataTooLargeError(pub usize);
/// First value: Found value. Second value: Expected value if there is one.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[error("invalid TLV type field, found {found}, expected {expected:?}")]
pub struct InvalidTlvTypeFieldError {
found: u8,
expected: Option<u8>,
}
/// Generic TLV/LV error.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum TlvLvError { pub enum TlvLvError {
DataTooLarge(usize), /// Data too large error.
ByteConversion(ByteConversionError), #[error("{0}")]
/// First value: Found value. Second value: Expected value if there is one. DataTooLarge(#[from] TlvLvDataTooLargeError),
InvalidTlvTypeField { /// Byte conversion error.
found: u8, #[error("byte conversion error: {0}")]
expected: Option<u8>, ByteConversion(#[from] ByteConversionError),
}, /// Invalid TLV type field error.
/// Logically invalid value length detected. The value length may not exceed 255 bytes. #[error("{0}")]
/// Depending on the concrete TLV type, the value length may also be logically invalid. InvalidTlvTypeField(#[from] InvalidTlvTypeFieldError),
/// Invalid value length.
#[error("invalid value length {0}")]
InvalidValueLength(usize), InvalidValueLength(usize),
/// Only applies to filestore requests and responses. Second name was missing where one is /// Only applies to filestore requests and responses. Second name was missing where one is
/// expected. /// expected.
#[error("second name missing for filestore request or response")]
SecondNameMissing, SecondNameMissing,
/// Invalid action code for filestore requests or responses. /// Invalid action code for filestore requests or responses.
#[error("invalid action code {0}")]
InvalidFilestoreActionCode(u8), InvalidFilestoreActionCode(u8),
} }
impl From<ByteConversionError> for TlvLvError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversion(value)
}
}
impl Display for TlvLvError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TlvLvError::DataTooLarge(data_len) => {
write!(
f,
"data with size {} larger than allowed {} bytes",
data_len,
u8::MAX
)
}
TlvLvError::ByteConversion(e) => {
write!(f, "tlv or lv byte conversion: {}", e)
}
TlvLvError::InvalidTlvTypeField { found, expected } => {
write!(
f,
"invalid TLV type field, found {found}, expected {expected:?}"
)
}
TlvLvError::InvalidValueLength(len) => {
write!(f, "invalid value length {len}")
}
TlvLvError::SecondNameMissing => {
write!(f, "second name missing for filestore request or response")
}
TlvLvError::InvalidFilestoreActionCode(raw) => {
write!(f, "invalid filestore action code with raw value {raw}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for TlvLvError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TlvLvError::ByteConversion(e) => Some(e),
_ => None,
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@@ -1,3 +1,4 @@
//! # Acknowledgement (ACK) PDU packet implementation.
use crate::{ use crate::{
cfdp::{ConditionCode, CrcFlag, Direction, TransactionStatus}, cfdp::{ConditionCode, CrcFlag, Direction, TransactionStatus},
ByteConversionError, ByteConversionError,
@@ -10,11 +11,17 @@ use super::{
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// Invalid [FileDirectiveType] of the acknowledged PDU error.
#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
#[error("invalid directive code of acknowledged PDU")]
pub struct InvalidAckedDirectiveCodeError(pub FileDirectiveType);
/// ACK PDU abstraction. /// ACK PDU abstraction.
/// ///
/// For more information, refer to CFDP chapter 5.2.4. /// For more information, refer to CFDP chapter 5.2.4.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct AckPdu { pub struct AckPdu {
pdu_header: PduHeader, pdu_header: PduHeader,
directive_code_of_acked_pdu: FileDirectiveType, directive_code_of_acked_pdu: FileDirectiveType,
@@ -23,21 +30,19 @@ pub struct AckPdu {
} }
impl AckPdu { impl AckPdu {
/// Constructor.
pub fn new( pub fn new(
mut pdu_header: PduHeader, mut pdu_header: PduHeader,
directive_code_of_acked_pdu: FileDirectiveType, directive_code_of_acked_pdu: FileDirectiveType,
condition_code: ConditionCode, condition_code: ConditionCode,
transaction_status: TransactionStatus, transaction_status: TransactionStatus,
) -> Result<Self, PduError> { ) -> Result<Self, InvalidAckedDirectiveCodeError> {
if directive_code_of_acked_pdu == FileDirectiveType::EofPdu { if directive_code_of_acked_pdu == FileDirectiveType::Eof {
pdu_header.pdu_conf.direction = Direction::TowardsSender; pdu_header.pdu_conf.direction = Direction::TowardsSender;
} else if directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu { } else if directive_code_of_acked_pdu == FileDirectiveType::Finished {
pdu_header.pdu_conf.direction = Direction::TowardsReceiver; pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
} else { } else {
return Err(PduError::InvalidDirectiveType { return Err(InvalidAckedDirectiveCodeError(directive_code_of_acked_pdu));
found: directive_code_of_acked_pdu as u8,
expected: None,
});
} }
// Force correct direction flag. // Force correct direction flag.
let mut ack_pdu = Self { let mut ack_pdu = Self {
@@ -50,6 +55,9 @@ impl AckPdu {
Ok(ack_pdu) Ok(ack_pdu)
} }
/// Constructor for an ACK PDU acknowledging an EOF PDU.
///
/// Relevant for the file receiver.
pub fn new_for_eof_pdu( pub fn new_for_eof_pdu(
pdu_header: PduHeader, pdu_header: PduHeader,
condition_code: ConditionCode, condition_code: ConditionCode,
@@ -58,13 +66,16 @@ impl AckPdu {
// Unwrap okay here, [new] can only fail on invalid directive codes. // Unwrap okay here, [new] can only fail on invalid directive codes.
Self::new( Self::new(
pdu_header, pdu_header,
FileDirectiveType::EofPdu, FileDirectiveType::Eof,
condition_code, condition_code,
transaction_status, transaction_status,
) )
.unwrap() .unwrap()
} }
/// Constructor for an ACK PDU acknowledging a Finished PDU.
///
/// Relevant for the file sender.
pub fn new_for_finished_pdu( pub fn new_for_finished_pdu(
pdu_header: PduHeader, pdu_header: PduHeader,
condition_code: ConditionCode, condition_code: ConditionCode,
@@ -73,29 +84,38 @@ impl AckPdu {
// Unwrap okay here, [new] can only fail on invalid directive codes. // Unwrap okay here, [new] can only fail on invalid directive codes.
Self::new( Self::new(
pdu_header, pdu_header,
FileDirectiveType::FinishedPdu, FileDirectiveType::Finished,
condition_code, condition_code,
transaction_status, transaction_status,
) )
.unwrap() .unwrap()
} }
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader { pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header &self.pdu_header
} }
/// Directive code of the acknowledged PDU.
#[inline]
pub fn directive_code_of_acked_pdu(&self) -> FileDirectiveType { pub fn directive_code_of_acked_pdu(&self) -> FileDirectiveType {
self.directive_code_of_acked_pdu self.directive_code_of_acked_pdu
} }
/// Condition code.
#[inline]
pub fn condition_code(&self) -> ConditionCode { pub fn condition_code(&self) -> ConditionCode {
self.condition_code self.condition_code
} }
/// Transaction status.
#[inline]
pub fn transaction_status(&self) -> TransactionStatus { pub fn transaction_status(&self) -> TransactionStatus {
self.transaction_status self.transaction_status
} }
#[inline]
fn calc_pdu_datafield_len(&self) -> usize { fn calc_pdu_datafield_len(&self) -> usize {
if self.crc_flag() == CrcFlag::WithCrc { if self.crc_flag() == CrcFlag::WithCrc {
return 5; return 5;
@@ -103,6 +123,7 @@ impl AckPdu {
3 3
} }
/// Construct [Self] from the provided byte slice.
pub fn from_bytes(buf: &[u8]) -> Result<AckPdu, PduError> { pub fn from_bytes(buf: &[u8]) -> Result<AckPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?; let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?; let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
@@ -110,13 +131,13 @@ impl AckPdu {
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| { let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType { PduError::InvalidDirectiveType {
found: buf[current_idx], found: buf[current_idx],
expected: Some(FileDirectiveType::AckPdu), expected: Some(FileDirectiveType::Ack),
} }
})?; })?;
if directive_type != FileDirectiveType::AckPdu { if directive_type != FileDirectiveType::Ack {
return Err(PduError::WrongDirectiveType { return Err(PduError::WrongDirectiveType {
found: directive_type, found: directive_type,
expected: FileDirectiveType::AckPdu, expected: FileDirectiveType::Ack,
}); });
} }
current_idx += 1; current_idx += 1;
@@ -127,8 +148,8 @@ impl AckPdu {
expected: None, expected: None,
} }
})?; })?;
if acked_directive_type != FileDirectiveType::EofPdu if acked_directive_type != FileDirectiveType::Eof
&& acked_directive_type != FileDirectiveType::FinishedPdu && acked_directive_type != FileDirectiveType::Finished
{ {
return Err(PduError::InvalidDirectiveType { return Err(PduError::InvalidDirectiveType {
found: acked_directive_type as u8, found: acked_directive_type as u8,
@@ -139,27 +160,18 @@ impl AckPdu {
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111) let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?; .map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
let transaction_status = TransactionStatus::try_from(buf[current_idx] & 0b11).unwrap(); let transaction_status = TransactionStatus::try_from(buf[current_idx] & 0b11).unwrap();
Self::new( // Unwrap okay, validity of acked directive code was checked.
Ok(Self::new(
pdu_header, pdu_header,
acked_directive_type, acked_directive_type,
condition_code, condition_code,
transaction_status, transaction_status,
) )
} .unwrap())
}
impl CfdpPdu for AckPdu {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
} }
fn file_directive_type(&self) -> Option<FileDirectiveType> { /// Write [Self] to the provided buffer and returns the written size.
Some(FileDirectiveType::AckPdu) pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
}
}
impl WritablePduPacket for AckPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written(); let expected_len = self.len_written();
if buf.len() < expected_len { if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -169,11 +181,11 @@ impl WritablePduPacket for AckPdu {
.into()); .into());
} }
let mut current_idx = self.pdu_header.write_to_bytes(buf)?; let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::AckPdu as u8; buf[current_idx] = FileDirectiveType::Ack as u8;
current_idx += 1; current_idx += 1;
buf[current_idx] = (self.directive_code_of_acked_pdu as u8) << 4; buf[current_idx] = (self.directive_code_of_acked_pdu as u8) << 4;
if self.directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu { if self.directive_code_of_acked_pdu == FileDirectiveType::Finished {
// This is the directive subtype code. It needs to be set to 0b0001 if the ACK PDU // This is the directive subtype code. It needs to be set to 0b0001 if the ACK PDU
// acknowledges a Finished PDU, and to 0b0000 otherwise. // acknowledges a Finished PDU, and to 0b0000 otherwise.
buf[current_idx] |= 0b0001; buf[current_idx] |= 0b0001;
@@ -187,11 +199,34 @@ impl WritablePduPacket for AckPdu {
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { /// Length of the written PDU in bytes.
pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len() self.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for AckPdu {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::Ack)
}
}
impl WritablePduPacket for AckPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::cfdp::{ use crate::cfdp::{
@@ -210,10 +245,7 @@ mod tests {
assert_eq!(ack_pdu.crc_flag(), expected_crc_flag); assert_eq!(ack_pdu.crc_flag(), expected_crc_flag);
assert_eq!(ack_pdu.file_flag(), LargeFileFlag::Normal); assert_eq!(ack_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(ack_pdu.pdu_type(), PduType::FileDirective); assert_eq!(ack_pdu.pdu_type(), PduType::FileDirective);
assert_eq!( assert_eq!(ack_pdu.file_directive_type(), Some(FileDirectiveType::Ack));
ack_pdu.file_directive_type(),
Some(FileDirectiveType::AckPdu)
);
assert_eq!(ack_pdu.transmission_mode(), TransmissionMode::Acknowledged); assert_eq!(ack_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(ack_pdu.direction(), expected_dir); assert_eq!(ack_pdu.direction(), expected_dir);
assert_eq!(ack_pdu.source_id(), TEST_SRC_ID.into()); assert_eq!(ack_pdu.source_id(), TEST_SRC_ID.into());
@@ -224,17 +256,17 @@ mod tests {
#[test] #[test]
fn test_basic() { fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new( let ack_pdu = AckPdu::new(
pdu_header, pdu_header,
FileDirectiveType::FinishedPdu, FileDirectiveType::Finished,
ConditionCode::NoError, ConditionCode::NoError,
TransactionStatus::Active, TransactionStatus::Active,
) )
.expect("creating ACK PDU failed"); .expect("creating ACK PDU failed");
assert_eq!( assert_eq!(
ack_pdu.directive_code_of_acked_pdu(), ack_pdu.directive_code_of_acked_pdu(),
FileDirectiveType::FinishedPdu FileDirectiveType::Finished
); );
verify_state(&ack_pdu, CrcFlag::NoCrc, Direction::TowardsReceiver); verify_state(&ack_pdu, CrcFlag::NoCrc, Direction::TowardsReceiver);
} }
@@ -244,7 +276,7 @@ mod tests {
transaction_status: TransactionStatus, transaction_status: TransactionStatus,
) { ) {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(pdu_header, condition_code, transaction_status); let ack_pdu = AckPdu::new_for_finished_pdu(pdu_header, condition_code, transaction_status);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let res = ack_pdu.write_to_bytes(&mut buf); let res = ack_pdu.write_to_bytes(&mut buf);
@@ -253,8 +285,8 @@ mod tests {
assert_eq!(written, ack_pdu.len_written()); assert_eq!(written, ack_pdu.len_written());
verify_raw_header(ack_pdu.pdu_header(), &buf); verify_raw_header(ack_pdu.pdu_header(), &buf);
assert_eq!(buf[7], FileDirectiveType::AckPdu as u8); assert_eq!(buf[7], FileDirectiveType::Ack as u8);
assert_eq!((buf[8] >> 4) & 0b1111, FileDirectiveType::FinishedPdu as u8); assert_eq!((buf[8] >> 4) & 0b1111, FileDirectiveType::Finished as u8);
assert_eq!(buf[8] & 0b1111, 0b0001); assert_eq!(buf[8] & 0b1111, 0b0001);
assert_eq!(buf[9] >> 4 & 0b1111, condition_code as u8); assert_eq!(buf[9] >> 4 & 0b1111, condition_code as u8);
assert_eq!(buf[9] & 0b11, transaction_status as u8); assert_eq!(buf[9] & 0b11, transaction_status as u8);
@@ -266,15 +298,53 @@ mod tests {
generic_serialization_test(ConditionCode::NoError, TransactionStatus::Active); generic_serialization_test(ConditionCode::NoError, TransactionStatus::Active);
} }
#[test]
fn test_serialization_too_small() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new(
pdu_header,
FileDirectiveType::Finished,
ConditionCode::NoError,
TransactionStatus::Active,
)
.expect("creating ACK PDU failed");
if let Err(PduError::ByteConversion(ByteConversionError::ToSliceTooSmall {
found,
expected,
})) = ack_pdu.write_to_bytes(&mut [0; 5])
{
assert_eq!(found, 5);
assert_eq!(expected, ack_pdu.len_written());
} else {
panic!("serialization should have failed");
}
}
#[test] #[test]
fn test_serialization_fs_error() { fn test_serialization_fs_error() {
generic_serialization_test(ConditionCode::FileSizeError, TransactionStatus::Terminated); generic_serialization_test(ConditionCode::FileSizeError, TransactionStatus::Terminated);
} }
#[test]
fn test_invalid_directive_code_of_acked_pdu() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
assert_eq!(
AckPdu::new(
pdu_header,
FileDirectiveType::Metadata,
ConditionCode::NoError,
TransactionStatus::Active,
)
.unwrap_err(),
InvalidAckedDirectiveCodeError(FileDirectiveType::Metadata)
);
}
#[test] #[test]
fn test_deserialization() { fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu( let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,
@@ -289,7 +359,7 @@ mod tests {
#[test] #[test]
fn test_with_crc() { fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu( let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,
@@ -306,7 +376,7 @@ mod tests {
#[test] #[test]
fn test_for_eof_pdu() { fn test_for_eof_pdu() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu( let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,
@@ -314,7 +384,7 @@ mod tests {
); );
assert_eq!( assert_eq!(
ack_pdu.directive_code_of_acked_pdu(), ack_pdu.directive_code_of_acked_pdu(),
FileDirectiveType::EofPdu FileDirectiveType::Eof
); );
verify_state(&ack_pdu, CrcFlag::WithCrc, Direction::TowardsSender); verify_state(&ack_pdu, CrcFlag::WithCrc, Direction::TowardsSender);
} }
@@ -323,7 +393,7 @@ mod tests {
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
fn test_ack_pdu_serialization() { fn test_ack_pdu_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu( let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,

View File

@@ -1,3 +1,4 @@
//! # End-of-File (EOF) PDU packet implementation.
use crate::cfdp::pdu::{ use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field, add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader, FileDirectiveType, PduError, PduHeader,
@@ -15,6 +16,7 @@ use super::{CfdpPdu, WritablePduPacket};
/// For more information, refer to CFDP chapter 5.2.2. /// For more information, refer to CFDP chapter 5.2.2.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EofPdu { pub struct EofPdu {
pdu_header: PduHeader, pdu_header: PduHeader,
condition_code: ConditionCode, condition_code: ConditionCode,
@@ -24,32 +26,58 @@ pub struct EofPdu {
} }
impl EofPdu { impl EofPdu {
pub fn new_no_error(mut pdu_header: PduHeader, file_checksum: u32, file_size: u64) -> Self { /// Constructor.
pub fn new(
mut pdu_header: PduHeader,
condition_code: ConditionCode,
file_checksum: u32,
file_size: u64,
fault_location: Option<EntityIdTlv>,
) -> Self {
// Force correct direction flag. // Force correct direction flag.
pdu_header.pdu_conf.direction = Direction::TowardsReceiver; pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
let mut eof_pdu = Self { let mut eof_pdu = Self {
pdu_header, pdu_header,
condition_code: ConditionCode::NoError, condition_code,
file_checksum, file_checksum,
file_size, file_size,
fault_location: None, fault_location,
}; };
eof_pdu.pdu_header.pdu_datafield_len = eof_pdu.calc_pdu_datafield_len() as u16; eof_pdu.pdu_header.pdu_datafield_len = eof_pdu.calc_pdu_datafield_len() as u16;
eof_pdu eof_pdu
} }
/// Constructor for no error EOF PDUs.
pub fn new_no_error(pdu_header: PduHeader, file_checksum: u32, file_size: u64) -> Self {
Self::new(
pdu_header,
ConditionCode::NoError,
file_checksum,
file_size,
None,
)
}
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader { pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header &self.pdu_header
} }
/// Condition code.
#[inline]
pub fn condition_code(&self) -> ConditionCode { pub fn condition_code(&self) -> ConditionCode {
self.condition_code self.condition_code
} }
/// File checksum.
#[inline]
pub fn file_checksum(&self) -> u32 { pub fn file_checksum(&self) -> u32 {
self.file_checksum self.file_checksum
} }
/// File size.
#[inline]
pub fn file_size(&self) -> u64 { pub fn file_size(&self) -> u64 {
self.file_size self.file_size
} }
@@ -69,6 +97,7 @@ impl EofPdu {
len len
} }
/// Construct [Self] from the provided byte slice.
pub fn from_bytes(buf: &[u8]) -> Result<EofPdu, PduError> { pub fn from_bytes(buf: &[u8]) -> Result<EofPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?; let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?; let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
@@ -81,13 +110,13 @@ impl EofPdu {
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| { let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType { PduError::InvalidDirectiveType {
found: buf[current_idx], found: buf[current_idx],
expected: Some(FileDirectiveType::EofPdu), expected: Some(FileDirectiveType::Eof),
} }
})?; })?;
if directive_type != FileDirectiveType::EofPdu { if directive_type != FileDirectiveType::Eof {
return Err(PduError::WrongDirectiveType { return Err(PduError::WrongDirectiveType {
found: directive_type, found: directive_type,
expected: FileDirectiveType::EofPdu, expected: FileDirectiveType::Eof,
}); });
} }
current_idx += 1; current_idx += 1;
@@ -112,20 +141,9 @@ impl EofPdu {
fault_location, fault_location,
}) })
} }
}
impl CfdpPdu for EofPdu { /// Write [Self] to the provided buffer and returns the written size.
fn pdu_header(&self) -> &PduHeader { pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::EofPdu)
}
}
impl WritablePduPacket for EofPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written(); let expected_len = self.len_written();
if buf.len() < expected_len { if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -135,7 +153,7 @@ impl WritablePduPacket for EofPdu {
.into()); .into());
} }
let mut current_idx = self.pdu_header.write_to_bytes(buf)?; let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::EofPdu as u8; buf[current_idx] = FileDirectiveType::Eof as u8;
current_idx += 1; current_idx += 1;
buf[current_idx] = (self.condition_code as u8) << 4; buf[current_idx] = (self.condition_code as u8) << 4;
current_idx += 1; current_idx += 1;
@@ -147,7 +165,7 @@ impl WritablePduPacket for EofPdu {
&mut buf[current_idx..], &mut buf[current_idx..],
)?; )?;
if let Some(fault_location) = self.fault_location { if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_bytes(buf)?; current_idx += fault_location.write_to_bytes(&mut buf[current_idx..])?;
} }
if self.crc_flag() == CrcFlag::WithCrc { if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx); current_idx = add_pdu_crc(buf, current_idx);
@@ -155,11 +173,34 @@ impl WritablePduPacket for EofPdu {
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { /// Length of the written PDU in bytes.
pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len() self.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for EofPdu {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::Eof)
}
}
impl WritablePduPacket for EofPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -170,19 +211,26 @@ mod tests {
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag, PduType, TransmissionMode}; use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag, PduType, TransmissionMode};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use crate::tests::generic_serde_test; use crate::tests::generic_serde_test;
use crate::util::{UnsignedByteFieldU16, UnsignedEnum};
fn verify_state(&eof_pdu: &EofPdu, file_flag: LargeFileFlag) { fn verify_state_no_error_no_crc(eof_pdu: &EofPdu, file_flag: LargeFileFlag) {
verify_state(eof_pdu, CrcFlag::NoCrc, file_flag, ConditionCode::NoError);
}
fn verify_state(
eof_pdu: &EofPdu,
crc_flag: CrcFlag,
file_flag: LargeFileFlag,
cond_code: ConditionCode,
) {
assert_eq!(eof_pdu.file_checksum(), 0x01020304); assert_eq!(eof_pdu.file_checksum(), 0x01020304);
assert_eq!(eof_pdu.file_size(), 12); assert_eq!(eof_pdu.file_size(), 12);
assert_eq!(eof_pdu.condition_code(), ConditionCode::NoError); assert_eq!(eof_pdu.condition_code(), cond_code);
assert_eq!(eof_pdu.crc_flag(), CrcFlag::NoCrc); assert_eq!(eof_pdu.crc_flag(), crc_flag);
assert_eq!(eof_pdu.file_flag(), file_flag); assert_eq!(eof_pdu.file_flag(), file_flag);
assert_eq!(eof_pdu.pdu_type(), PduType::FileDirective); assert_eq!(eof_pdu.pdu_type(), PduType::FileDirective);
assert_eq!( assert_eq!(eof_pdu.file_directive_type(), Some(FileDirectiveType::Eof));
eof_pdu.file_directive_type(),
Some(FileDirectiveType::EofPdu)
);
assert_eq!(eof_pdu.transmission_mode(), TransmissionMode::Acknowledged); assert_eq!(eof_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(eof_pdu.direction(), Direction::TowardsReceiver); assert_eq!(eof_pdu.direction(), Direction::TowardsReceiver);
assert_eq!(eof_pdu.source_id(), TEST_SRC_ID.into()); assert_eq!(eof_pdu.source_id(), TEST_SRC_ID.into());
@@ -193,16 +241,16 @@ mod tests {
#[test] #[test]
fn test_basic() { fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 4 + 4); assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 4 + 4);
verify_state(&eof_pdu, LargeFileFlag::Normal); verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Normal);
} }
#[test] #[test]
fn test_serialization() { fn test_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let res = eof_pdu.write_to_bytes(&mut buf); let res = eof_pdu.write_to_bytes(&mut buf);
@@ -211,7 +259,7 @@ mod tests {
assert_eq!(written, eof_pdu.len_written()); assert_eq!(written, eof_pdu.len_written());
verify_raw_header(eof_pdu.pdu_header(), &buf); verify_raw_header(eof_pdu.pdu_header(), &buf);
let mut current_idx = eof_pdu.pdu_header().header_len(); let mut current_idx = eof_pdu.pdu_header().header_len();
buf[current_idx] = FileDirectiveType::EofPdu as u8; buf[current_idx] = FileDirectiveType::Eof as u8;
current_idx += 1; current_idx += 1;
assert_eq!( assert_eq!(
(buf[current_idx] >> 4) & 0b1111, (buf[current_idx] >> 4) & 0b1111,
@@ -234,7 +282,7 @@ mod tests {
#[test] #[test]
fn test_deserialization() { fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
eof_pdu.write_to_bytes(&mut buf).unwrap(); eof_pdu.write_to_bytes(&mut buf).unwrap();
@@ -249,7 +297,7 @@ mod tests {
#[test] #[test]
fn test_write_to_vec() { fn test_write_to_vec() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap(); let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
@@ -260,7 +308,7 @@ mod tests {
#[test] #[test]
fn test_with_crc() { fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap(); let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
@@ -270,7 +318,7 @@ mod tests {
buf[written - 1] -= 1; buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16; let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16;
let error = EofPdu::from_bytes(&buf).unwrap_err(); let error = EofPdu::from_bytes(&buf).unwrap_err();
if let PduError::ChecksumError(e) = error { if let PduError::Checksum(e) = error {
assert_eq!(e, crc); assert_eq!(e, crc);
} else { } else {
panic!("expected crc error"); panic!("expected crc error");
@@ -280,9 +328,9 @@ mod tests {
#[test] #[test]
fn test_with_large_file_flag() { fn test_with_large_file_flag() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Large); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Large);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
verify_state(&eof_pdu, LargeFileFlag::Large); verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Large);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 8 + 4); assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 8 + 4);
} }
@@ -290,8 +338,52 @@ mod tests {
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
fn test_eof_serde() { fn test_eof_serde() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal); let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0); let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12); let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
generic_serde_test(eof_pdu); generic_serde_test(eof_pdu);
} }
fn generic_test_with_fault_location_and_error(crc: CrcFlag) {
let pdu_conf = common_pdu_conf(crc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new(
pdu_header,
ConditionCode::FileChecksumFailure,
0x01020304,
12,
Some(EntityIdTlv::new(UnsignedByteFieldU16::new(5).into())),
);
let mut expected_len = pdu_header.header_len() + 2 + 4 + 4 + 4;
if crc == CrcFlag::WithCrc {
expected_len += 2;
}
// Entity ID TLV increaes length by 4.
assert_eq!(eof_pdu.len_written(), expected_len);
verify_state(
&eof_pdu,
crc,
LargeFileFlag::Normal,
ConditionCode::FileChecksumFailure,
);
let eof_vec = eof_pdu.to_vec().unwrap();
let eof_read_back = EofPdu::from_bytes(&eof_vec);
if let Err(e) = eof_read_back {
panic!("deserialization failed with: {e}")
}
let eof_read_back = eof_read_back.unwrap();
assert_eq!(eof_read_back, eof_pdu);
assert!(eof_read_back.fault_location.is_some());
assert_eq!(eof_read_back.fault_location.unwrap().entity_id().value(), 5);
assert_eq!(eof_read_back.fault_location.unwrap().entity_id().size(), 2);
}
#[test]
fn test_with_fault_location_and_error() {
generic_test_with_fault_location_and_error(CrcFlag::NoCrc);
}
#[test]
fn test_with_fault_location_and_error_and_crc() {
generic_test_with_fault_location_and_error(CrcFlag::WithCrc);
}
} }

View File

@@ -1,3 +1,4 @@
//! # File Data PDU packet implementation
use crate::cfdp::pdu::{ use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field, add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
PduError, PduHeader, PduError, PduHeader,
@@ -10,16 +11,24 @@ use serde::{Deserialize, Serialize};
use super::{CfdpPdu, FileDirectiveType, WritablePduPacket}; use super::{CfdpPdu, FileDirectiveType, WritablePduPacket};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] /// Record continuation state for segment metadata.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u2, exhaustive = true)]
#[repr(u8)] #[repr(u8)]
pub enum RecordContinuationState { pub enum RecordContinuationState {
/// No start and no end.
NoStartNoEnd = 0b00, NoStartNoEnd = 0b00,
/// Start without end.
StartWithoutEnd = 0b01, StartWithoutEnd = 0b01,
/// End without start.
EndWithoutStart = 0b10, EndWithoutStart = 0b10,
/// Start and end.
StartAndEnd = 0b11, StartAndEnd = 0b11,
} }
/// Segment metadata structure.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct SegmentMetadata<'seg_meta> { pub struct SegmentMetadata<'seg_meta> {
@@ -28,6 +37,7 @@ pub struct SegmentMetadata<'seg_meta> {
} }
impl<'seg_meta> SegmentMetadata<'seg_meta> { impl<'seg_meta> SegmentMetadata<'seg_meta> {
/// Constructor.
pub fn new( pub fn new(
record_continuation_state: RecordContinuationState, record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>, metadata: Option<&'seg_meta [u8]>,
@@ -43,24 +53,30 @@ impl<'seg_meta> SegmentMetadata<'seg_meta> {
}) })
} }
/// Record continuation state.
#[inline]
pub fn record_continuation_state(&self) -> RecordContinuationState { pub fn record_continuation_state(&self) -> RecordContinuationState {
self.record_continuation_state self.record_continuation_state
} }
/// Raw metadata slice.
#[inline]
pub fn metadata(&self) -> Option<&'seg_meta [u8]> { pub fn metadata(&self) -> Option<&'seg_meta [u8]> {
self.metadata self.metadata
} }
pub fn written_len(&self) -> usize { /// Length of the written segment metadata structure.
#[inline]
pub fn len_written(&self) -> usize {
// Map empty metadata to 0 and slice to its length. // Map empty metadata to 0 and slice to its length.
1 + self.metadata.map_or(0, |meta| meta.len()) 1 + self.metadata.map_or(0, |meta| meta.len())
} }
pub(crate) fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { pub(crate) fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.written_len() { if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(), found: buf.len(),
expected: self.written_len(), expected: self.len_written(),
}); });
} }
buf[0] = ((self.record_continuation_state as u8) << 6) buf[0] = ((self.record_continuation_state as u8) << 6)
@@ -68,7 +84,7 @@ impl<'seg_meta> SegmentMetadata<'seg_meta> {
if let Some(metadata) = self.metadata { if let Some(metadata) = self.metadata {
buf[1..1 + metadata.len()].copy_from_slice(metadata) buf[1..1 + metadata.len()].copy_from_slice(metadata)
} }
Ok(self.written_len()) Ok(self.len_written())
} }
pub(crate) fn from_bytes(buf: &'seg_meta [u8]) -> Result<Self, ByteConversionError> { pub(crate) fn from_bytes(buf: &'seg_meta [u8]) -> Result<Self, ByteConversionError> {
@@ -92,130 +108,44 @@ impl<'seg_meta> SegmentMetadata<'seg_meta> {
} }
} }
/// File Data PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPdu<'seg_meta, 'file_data> { struct FdPduBase<'seg_meta> {
pdu_header: PduHeader, pdu_header: PduHeader,
#[cfg_attr(feature = "serde", serde(borrow))] #[cfg_attr(feature = "serde", serde(borrow))]
segment_metadata: Option<SegmentMetadata<'seg_meta>>, segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64, offset: u64,
file_data: &'file_data [u8],
} }
impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> { impl CfdpPdu for FdPduBase<'_> {
pub fn new_with_seg_metadata( #[inline]
pdu_header: PduHeader, fn pdu_header(&self) -> &PduHeader {
segment_metadata: SegmentMetadata<'seg_meta>, self.pdu_header()
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, Some(segment_metadata), offset, file_data)
} }
pub fn new_no_seg_metadata( #[inline]
pdu_header: PduHeader, fn file_directive_type(&self) -> Option<FileDirectiveType> {
offset: u64, None
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, None, offset, file_data)
} }
}
pub fn new_generic( impl FdPduBase<'_> {
mut pdu_header: PduHeader, fn calc_pdu_datafield_len(&self, file_data_len: u64) -> usize {
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
pdu_header,
segment_metadata,
offset,
file_data,
};
pdu.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut len = core::mem::size_of::<u32>(); let mut len = core::mem::size_of::<u32>();
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large { if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4; len += 4;
} }
if self.segment_metadata.is_some() { if self.segment_metadata.is_some() {
len += self.segment_metadata.as_ref().unwrap().written_len() len += self.segment_metadata.as_ref().unwrap().len_written()
} }
len += self.file_data.len(); len += file_data_len as usize;
if self.crc_flag() == CrcFlag::WithCrc { if self.crc_flag() == CrcFlag::WithCrc {
len += 2; len += 2;
} }
len len
} }
pub fn offset(&self) -> u64 { fn write_common_fields_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.offset
}
pub fn file_data(&self) -> &'file_data [u8] {
self.file_data
}
pub fn segment_metadata(&self) -> Option<&SegmentMetadata> {
self.segment_metadata.as_ref()
}
pub fn from_bytes<'buf: 'seg_meta + 'file_data>(buf: &'buf [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + core::mem::size_of::<u32>();
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let mut segment_metadata = None;
if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present {
segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?);
current_idx += segment_metadata.as_ref().unwrap().written_len();
}
let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss;
if current_idx > full_len_without_crc {
return Err(ByteConversionError::FromSliceTooSmall {
found: current_idx,
expected: full_len_without_crc,
}
.into());
}
Ok(Self {
pdu_header,
segment_metadata,
offset,
file_data: &buf[current_idx..full_len_without_crc],
})
}
}
impl CfdpPdu for FileDataPdu<'_, '_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
impl WritablePduPacket for FileDataPdu<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_written(),
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?; let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
if self.segment_metadata.is_some() { if self.segment_metadata.is_some() {
current_idx += self current_idx += self
@@ -229,6 +159,139 @@ impl WritablePduPacket for FileDataPdu<'_, '_> {
self.offset, self.offset,
&mut buf[current_idx..], &mut buf[current_idx..],
)?; )?;
Ok(current_idx)
}
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
}
/// File Data PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPdu<'seg_meta, 'file_data> {
#[cfg_attr(feature = "serde", serde(borrow))]
common: FdPduBase<'seg_meta>,
file_data: &'file_data [u8],
}
impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
/// Constructor for a file data PDU including segment metadata.
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new(pdu_header, Some(segment_metadata), offset, file_data)
}
/// Constructor for a file data PDU without segment metadata.
pub fn new_no_seg_metadata(
pdu_header: PduHeader,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new(pdu_header, None, offset, file_data)
}
/// Generic constructor for a file data PDU.
pub fn new(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
common: FdPduBase {
pdu_header,
segment_metadata,
offset,
},
file_data,
};
pdu.common.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
self.common
.calc_pdu_datafield_len(self.file_data.len() as u64)
}
/// Optional segment metadata.
#[inline]
pub fn segment_metadata(&self) -> Option<&SegmentMetadata<'_>> {
self.common.segment_metadata.as_ref()
}
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
self.common.pdu_header()
}
/// File data offset.
#[inline]
pub fn offset(&self) -> u64 {
self.common.offset
}
/// File data.
#[inline]
pub fn file_data(&self) -> &'file_data [u8] {
self.file_data
}
/// Read [Self] from the provided buffer.
pub fn from_bytes<'buf: 'seg_meta + 'file_data>(buf: &'buf [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + core::mem::size_of::<u32>();
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let mut segment_metadata = None;
if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present {
segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?);
current_idx += segment_metadata.as_ref().unwrap().len_written();
}
let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss;
if current_idx > full_len_without_crc {
return Err(ByteConversionError::FromSliceTooSmall {
found: current_idx,
expected: full_len_without_crc,
}
.into());
}
Ok(Self {
common: FdPduBase {
pdu_header,
segment_metadata,
offset,
},
file_data: &buf[current_idx..full_len_without_crc],
})
}
/// Write [Self] to the provided buffer and returns the written size.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_written(),
}
.into());
}
let mut current_idx = self.common.write_common_fields_to_bytes(buf)?;
buf[current_idx..current_idx + self.file_data.len()].copy_from_slice(self.file_data); buf[current_idx..current_idx + self.file_data.len()].copy_from_slice(self.file_data);
current_idx += self.file_data.len(); current_idx += self.file_data.len();
if self.crc_flag() == CrcFlag::WithCrc { if self.crc_flag() == CrcFlag::WithCrc {
@@ -237,10 +300,194 @@ impl WritablePduPacket for FileDataPdu<'_, '_> {
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { /// Length of the written PDU.
self.pdu_header.header_len() + self.calc_pdu_datafield_len() pub fn len_written(&self) -> usize {
self.common.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for FileDataPdu<'_, '_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
&self.common.pdu_header
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
impl WritablePduPacket for FileDataPdu<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
/// File Data PDU creator abstraction.
///
/// This special creator object allows to read into the file data buffer directly. This avoids
/// the need of an additional buffer to create a file data PDU. This structure therefore
/// does not implement the regular [WritablePduPacket] trait.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPduCreatorWithReservedDatafield<'seg_meta> {
#[cfg_attr(feature = "serde", serde(borrow))]
common: FdPduBase<'seg_meta>,
file_data_len: u64,
}
impl<'seg_meta> FileDataPduCreatorWithReservedDatafield<'seg_meta> {
/// Constructor for a file data PDU including segment metadata.
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data_len: u64,
) -> Self {
Self::new(pdu_header, Some(segment_metadata), offset, file_data_len)
}
/// Constructor for a file data PDU without segment metadata.
pub fn new_no_seg_metadata(pdu_header: PduHeader, offset: u64, file_data_len: u64) -> Self {
Self::new(pdu_header, None, offset, file_data_len)
}
/// Generic constructor.
pub fn new(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data_len: u64,
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
common: FdPduBase {
pdu_header,
segment_metadata,
offset,
},
file_data_len,
};
pdu.common.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
self.common.calc_pdu_datafield_len(self.file_data_len)
}
/// Length of the written PDU.
pub fn len_written(&self) -> usize {
self.common.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
/// This function performs a partial write by writing all data except the file data
/// and the CRC.
///
/// It returns a [FileDataPduCreatorWithUnwrittenData] which provides a mutable slice to
/// the reserved file data field. The user can read file data into this field directly and
/// then finish the PDU creation using the [FileDataPduCreatorWithUnwrittenData::finish] call.
pub fn write_to_bytes_partially<'buf>(
&self,
buf: &'buf mut [u8],
) -> Result<FileDataPduCreatorWithUnwrittenData<'buf>, PduError> {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_written(),
}
.into());
}
let mut current_idx = self.common.write_common_fields_to_bytes(buf)?;
let file_data_offset = current_idx as u64;
current_idx += self.file_data_len as usize;
if self.crc_flag() == CrcFlag::WithCrc {
current_idx += 2;
}
Ok(FileDataPduCreatorWithUnwrittenData {
write_buf: &mut buf[0..current_idx],
file_data_offset,
file_data_len: self.file_data_len,
needs_crc: self.crc_flag() == CrcFlag::WithCrc,
})
}
}
impl CfdpPdu for FileDataPduCreatorWithReservedDatafield<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.common.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
/// This structure is created with [FileDataPduCreatorWithReservedDatafield::write_to_bytes_partially]
/// and provides an API to read file data from the virtual filesystem into the file data PDU buffer
/// directly.
///
/// This structure provides a mutable slice to the reserved file data field. The user can read
/// file data into this field directly and then finish the PDU creation using the
/// [FileDataPduCreatorWithUnwrittenData::finish] call.
pub struct FileDataPduCreatorWithUnwrittenData<'buf> {
write_buf: &'buf mut [u8],
file_data_offset: u64,
file_data_len: u64,
needs_crc: bool,
}
impl FileDataPduCreatorWithUnwrittenData<'_> {
/// Mutable access to the file data field.
pub fn file_data_field_mut(&mut self) -> &mut [u8] {
&mut self.write_buf[self.file_data_offset as usize
..self.file_data_offset as usize + self.file_data_len as usize]
}
/// This functio needs to be called to add a CRC to the file data PDU where applicable.
///
/// It returns the full written size of the PDU.
pub fn finish(self) -> usize {
if self.needs_crc {
add_pdu_crc(
self.write_buf,
self.file_data_offset as usize + self.file_data_len as usize,
);
}
self.write_buf.len()
}
}
/// This function can be used to calculate the maximum allowed file segment size for
/// a given maximum packet length and the segment metadata if there is any.
pub fn calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(
pdu_header: &PduHeader,
max_packet_len: usize,
segment_metadata: Option<&SegmentMetadata>,
) -> usize {
let mut subtract = pdu_header.header_len();
if let Some(segment_metadata) = segment_metadata {
subtract += 1 + segment_metadata.metadata().unwrap().len();
}
if pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
subtract += 8;
} else {
subtract += 4;
}
if pdu_header.common_pdu_conf().crc_flag == CrcFlag::WithCrc {
subtract += 2;
}
max_packet_len.saturating_sub(subtract)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
@@ -263,7 +510,7 @@ mod tests {
assert!(fd_pdu.segment_metadata().is_none()); assert!(fd_pdu.segment_metadata().is_none());
assert_eq!( assert_eq!(
fd_pdu.len_written(), fd_pdu.len_written(),
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4 fd_pdu.pdu_header().header_len() + core::mem::size_of::<u32>() + 4
); );
assert_eq!(fd_pdu.crc_flag(), CrcFlag::NoCrc); assert_eq!(fd_pdu.crc_flag(), CrcFlag::NoCrc);
@@ -290,11 +537,11 @@ mod tests {
let written = res.unwrap(); let written = res.unwrap();
assert_eq!( assert_eq!(
written, written,
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4 fd_pdu.pdu_header().header_len() + core::mem::size_of::<u32>() + 4
); );
let mut current_idx = fd_pdu.pdu_header.header_len(); let mut current_idx = fd_pdu.pdu_header().header_len();
let file_size = u32::from_be_bytes( let file_size = u32::from_be_bytes(
buf[fd_pdu.pdu_header.header_len()..fd_pdu.pdu_header.header_len() + 4] buf[fd_pdu.pdu_header().header_len()..fd_pdu.pdu_header().header_len() + 4]
.try_into() .try_into()
.unwrap(), .unwrap(),
); );
@@ -353,7 +600,7 @@ mod tests {
buf[written - 1] -= 1; buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) | buf[written - 1] as u16; let crc: u16 = ((buf[written - 2] as u16) << 8) | buf[written - 1] as u16;
let error = FileDataPdu::from_bytes(&buf).unwrap_err(); let error = FileDataPdu::from_bytes(&buf).unwrap_err();
if let PduError::ChecksumError(e) = error { if let PduError::Checksum(e) = error {
assert_eq!(e, crc); assert_eq!(e, crc);
} else { } else {
panic!("expected crc error"); panic!("expected crc error");
@@ -380,7 +627,7 @@ mod tests {
assert_eq!(*fd_pdu.segment_metadata().unwrap(), segment_meta); assert_eq!(*fd_pdu.segment_metadata().unwrap(), segment_meta);
assert_eq!( assert_eq!(
fd_pdu.len_written(), fd_pdu.len_written(),
fd_pdu.pdu_header.header_len() fd_pdu.pdu_header().header_len()
+ 1 + 1
+ seg_metadata.len() + seg_metadata.len()
+ core::mem::size_of::<u32>() + core::mem::size_of::<u32>()
@@ -390,7 +637,7 @@ mod tests {
fd_pdu fd_pdu
.write_to_bytes(&mut buf) .write_to_bytes(&mut buf)
.expect("writing FD PDU failed"); .expect("writing FD PDU failed");
let mut current_idx = fd_pdu.pdu_header.header_len(); let mut current_idx = fd_pdu.pdu_header().header_len();
assert_eq!( assert_eq!(
RecordContinuationState::try_from((buf[current_idx] >> 6) & 0b11).unwrap(), RecordContinuationState::try_from((buf[current_idx] >> 6) & 0b11).unwrap(),
RecordContinuationState::StartAndEnd RecordContinuationState::StartAndEnd
@@ -482,4 +729,142 @@ mod tests {
let output_converted_back: FileDataPdu = from_bytes(&output).unwrap(); let output_converted_back: FileDataPdu = from_bytes(&output).unwrap();
assert_eq!(output_converted_back, fd_pdu); assert_eq!(output_converted_back, fd_pdu);
} }
#[test]
fn test_fd_pdu_creator_with_reserved_field_no_crc() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let test_str = "hello world!";
let fd_pdu = FileDataPduCreatorWithReservedDatafield::new_no_seg_metadata(
pdu_header,
10,
test_str.len() as u64,
);
let mut write_buf: [u8; 64] = [0; 64];
let mut pdu_unwritten = fd_pdu
.write_to_bytes_partially(&mut write_buf)
.expect("partial write failed");
pdu_unwritten
.file_data_field_mut()
.copy_from_slice(test_str.as_bytes());
pdu_unwritten.finish();
let pdu_reader = FileDataPdu::from_bytes(&write_buf).expect("reading file data PDU failed");
assert_eq!(
core::str::from_utf8(pdu_reader.file_data()).expect("reading utf8 string failed"),
"hello world!"
);
}
#[test]
fn test_fd_pdu_creator_with_reserved_field_with_crc() {
let mut common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
common_conf.crc_flag = true.into();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let test_str = "hello world!";
let fd_pdu = FileDataPduCreatorWithReservedDatafield::new_no_seg_metadata(
pdu_header,
10,
test_str.len() as u64,
);
let mut write_buf: [u8; 64] = [0; 64];
let mut pdu_unwritten = fd_pdu
.write_to_bytes_partially(&mut write_buf)
.expect("partial write failed");
pdu_unwritten
.file_data_field_mut()
.copy_from_slice(test_str.as_bytes());
pdu_unwritten.finish();
let pdu_reader = FileDataPdu::from_bytes(&write_buf).expect("reading file data PDU failed");
assert_eq!(
core::str::from_utf8(pdu_reader.file_data()).expect("reading utf8 string failed"),
"hello world!"
);
}
#[test]
fn test_fd_pdu_creator_with_reserved_field_with_crc_without_finish_fails() {
let mut common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
common_conf.crc_flag = true.into();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let test_str = "hello world!";
let fd_pdu = FileDataPduCreatorWithReservedDatafield::new_no_seg_metadata(
pdu_header,
10,
test_str.len() as u64,
);
let mut write_buf: [u8; 64] = [0; 64];
let mut pdu_unwritten = fd_pdu
.write_to_bytes_partially(&mut write_buf)
.expect("partial write failed");
pdu_unwritten
.file_data_field_mut()
.copy_from_slice(test_str.as_bytes());
let pdu_reader_error = FileDataPdu::from_bytes(&write_buf);
assert!(pdu_reader_error.is_err());
let error = pdu_reader_error.unwrap_err();
match error {
PduError::Checksum(_) => (),
_ => {
panic!("unexpected PDU error {}", error)
}
}
}
#[test]
fn test_max_file_seg_calculator_0() {
let pdu_header = PduHeader::new_for_file_data_default(CommonPduConfig::default(), 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 64, None),
53
);
}
#[test]
fn test_max_file_seg_calculator_1() {
let common_conf = CommonPduConfig {
crc_flag: CrcFlag::WithCrc,
..Default::default()
};
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 64, None),
51
);
}
#[test]
fn test_max_file_seg_calculator_2() {
let common_conf = CommonPduConfig {
file_flag: LargeFileFlag::Large,
..Default::default()
};
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 64, None),
49
);
}
#[test]
fn test_max_file_seg_calculator_saturating_sub() {
let common_conf = CommonPduConfig {
file_flag: LargeFileFlag::Large,
..Default::default()
};
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 15, None),
0
);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 14, None),
0
);
}
} }

View File

@@ -1,32 +1,44 @@
//! # Finished PDU packet implementation.
use crate::cfdp::pdu::{ use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, FileDirectiveType, PduError, PduHeader, add_pdu_crc, generic_length_checks_pdu_deserialization, FileDirectiveType, PduError, PduHeader,
}; };
use crate::cfdp::tlv::{ use crate::cfdp::tlv::{
EntityIdTlv, FilestoreResponseTlv, GenericTlv, Tlv, TlvType, TlvTypeField, WritableTlv, EntityIdTlv, FilestoreResponseTlv, GenericTlv, Tlv, TlvType, TlvTypeField, WritableTlv,
}; };
use crate::cfdp::{ConditionCode, CrcFlag, Direction, PduType, TlvLvError}; use crate::cfdp::{ConditionCode, CrcFlag, Direction, PduType};
use crate::ByteConversionError; use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive}; use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::{CfdpPdu, WritablePduPacket}; use super::tlv::ReadableTlv;
use super::{CfdpPdu, InvalidTlvTypeFieldError, WritablePduPacket};
/// Delivery code enumeration.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum DeliveryCode { pub enum DeliveryCode {
/// Completed delivery.
Complete = 0, Complete = 0,
/// Incomplete delivery.
Incomplete = 1, Incomplete = 1,
} }
/// File status enumeration.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum FileStatus { pub enum FileStatus {
/// File was discarded deliberately.
DiscardDeliberately = 0b00, DiscardDeliberately = 0b00,
/// File was rejected by the filestore.
DiscardedFsRejection = 0b01, DiscardedFsRejection = 0b01,
/// File was retained (but not necesarilly complete).
Retained = 0b10, Retained = 0b10,
/// Unreported file status.
Unreported = 0b11, Unreported = 0b11,
} }
@@ -34,6 +46,7 @@ pub enum FileStatus {
/// ///
/// For more information, refer to CFDP chapter 5.2.3. /// For more information, refer to CFDP chapter 5.2.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct FinishedPduCreator<'fs_responses> { pub struct FinishedPduCreator<'fs_responses> {
pdu_header: PduHeader, pdu_header: PduHeader,
condition_code: ConditionCode, condition_code: ConditionCode,
@@ -46,12 +59,12 @@ pub struct FinishedPduCreator<'fs_responses> {
impl<'fs_responses> FinishedPduCreator<'fs_responses> { impl<'fs_responses> FinishedPduCreator<'fs_responses> {
/// Default finished PDU: No error (no fault location field) and no filestore responses. /// Default finished PDU: No error (no fault location field) and no filestore responses.
pub fn new_default( pub fn new_no_error(
pdu_header: PduHeader, pdu_header: PduHeader,
delivery_code: DeliveryCode, delivery_code: DeliveryCode,
file_status: FileStatus, file_status: FileStatus,
) -> Self { ) -> Self {
Self::new_generic( Self::new(
pdu_header, pdu_header,
ConditionCode::NoError, ConditionCode::NoError,
delivery_code, delivery_code,
@@ -61,6 +74,7 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
) )
} }
/// Constructor where the fault location is provided.
pub fn new_with_error( pub fn new_with_error(
pdu_header: PduHeader, pdu_header: PduHeader,
condition_code: ConditionCode, condition_code: ConditionCode,
@@ -68,7 +82,7 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
file_status: FileStatus, file_status: FileStatus,
fault_location: EntityIdTlv, fault_location: EntityIdTlv,
) -> Self { ) -> Self {
Self::new_generic( Self::new(
pdu_header, pdu_header,
condition_code, condition_code,
delivery_code, delivery_code,
@@ -78,7 +92,8 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
) )
} }
pub fn new_generic( /// Generic constructor.
pub fn new(
mut pdu_header: PduHeader, mut pdu_header: PduHeader,
condition_code: ConditionCode, condition_code: ConditionCode,
delivery_code: DeliveryCode, delivery_code: DeliveryCode,
@@ -105,23 +120,38 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
finished_pdu finished_pdu
} }
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
/// Condition code.
#[inline]
pub fn condition_code(&self) -> ConditionCode { pub fn condition_code(&self) -> ConditionCode {
self.condition_code self.condition_code
} }
/// Delivery code.
#[inline]
pub fn delivery_code(&self) -> DeliveryCode { pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code self.delivery_code
} }
/// File status.
#[inline]
pub fn file_status(&self) -> FileStatus { pub fn file_status(&self) -> FileStatus {
self.file_status self.file_status
} }
// If there are no filestore responses, an empty slice will be returned. /// Filestore responses as a slice.
#[inline]
pub fn filestore_responses(&self) -> &[FilestoreResponseTlv<'_, '_, '_>] { pub fn filestore_responses(&self) -> &[FilestoreResponseTlv<'_, '_, '_>] {
self.fs_responses self.fs_responses
} }
/// Optional fault location [EntityIdTlv].
#[inline]
pub fn fault_location(&self) -> Option<EntityIdTlv> { pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location self.fault_location
} }
@@ -139,20 +169,9 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
} }
datafield_len datafield_len
} }
}
impl CfdpPdu for FinishedPduCreator<'_> { /// Write [Self] to the provided buffer and returns the written size.
fn pdu_header(&self) -> &PduHeader { pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu)
}
}
impl WritablePduPacket for FinishedPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written(); let expected_len = self.len_written();
if buf.len() < expected_len { if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -163,7 +182,7 @@ impl WritablePduPacket for FinishedPduCreator<'_> {
} }
let mut current_idx = self.pdu_header.write_to_bytes(buf)?; let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::FinishedPdu as u8; buf[current_idx] = FileDirectiveType::Finished as u8;
current_idx += 1; current_idx += 1;
buf[current_idx] = ((self.condition_code as u8) << 4) buf[current_idx] = ((self.condition_code as u8) << 4)
| ((self.delivery_code as u8) << 2) | ((self.delivery_code as u8) << 2)
@@ -181,11 +200,34 @@ impl WritablePduPacket for FinishedPduCreator<'_> {
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { /// Length of the written PDU in bytes.
pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len() self.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for FinishedPduCreator<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::Finished)
}
}
impl WritablePduPacket for FinishedPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
/// Helper structure to loop through all filestore responses of a read Finished PDU. It should be /// Helper structure to loop through all filestore responses of a read Finished PDU. It should be
/// noted that iterators in Rust are not fallible, but the TLV creation can fail, for example if /// noted that iterators in Rust are not fallible, but the TLV creation can fail, for example if
/// the raw TLV data is invalid for some reason. In that case, the iterator will yield [None] /// the raw TLV data is invalid for some reason. In that case, the iterator will yield [None]
@@ -217,8 +259,10 @@ impl<'buf> Iterator for FilestoreResponseIterator<'buf> {
} }
} }
/// Fnished PDU reader structure.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct FinishedPduReader<'buf> { pub struct FinishedPduReader<'buf> {
pdu_header: PduHeader, pdu_header: PduHeader,
condition_code: ConditionCode, condition_code: ConditionCode,
@@ -243,13 +287,13 @@ impl<'buf> FinishedPduReader<'buf> {
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| { let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType { PduError::InvalidDirectiveType {
found: buf[current_idx], found: buf[current_idx],
expected: Some(FileDirectiveType::FinishedPdu), expected: Some(FileDirectiveType::Finished),
} }
})?; })?;
if directive_type != FileDirectiveType::FinishedPdu { if directive_type != FileDirectiveType::Finished {
return Err(PduError::WrongDirectiveType { return Err(PduError::WrongDirectiveType {
found: directive_type, found: directive_type,
expected: FileDirectiveType::FinishedPdu, expected: FileDirectiveType::Finished,
}); });
} }
current_idx += 1; current_idx += 1;
@@ -271,10 +315,14 @@ impl<'buf> FinishedPduReader<'buf> {
}) })
} }
/// Raw filestore responses.
#[inline]
pub fn fs_responses_raw(&self) -> &[u8] { pub fn fs_responses_raw(&self) -> &[u8] {
self.fs_responses_raw self.fs_responses_raw
} }
/// Iterator over the filestore responses.
#[inline]
pub fn fs_responses_iter(&self) -> FilestoreResponseIterator<'_> { pub fn fs_responses_iter(&self) -> FilestoreResponseIterator<'_> {
FilestoreResponseIterator { FilestoreResponseIterator {
responses_buf: self.fs_responses_raw, responses_buf: self.fs_responses_raw,
@@ -282,22 +330,36 @@ impl<'buf> FinishedPduReader<'buf> {
} }
} }
/// Condition code.
#[inline]
pub fn condition_code(&self) -> ConditionCode { pub fn condition_code(&self) -> ConditionCode {
self.condition_code self.condition_code
} }
/// Delivery code.
#[inline]
pub fn delivery_code(&self) -> DeliveryCode { pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code self.delivery_code
} }
/// File status.
#[inline]
pub fn file_status(&self) -> FileStatus { pub fn file_status(&self) -> FileStatus {
self.file_status self.file_status
} }
/// Optional fault location [EntityIdTlv].
#[inline]
pub fn fault_location(&self) -> Option<EntityIdTlv> { pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location self.fault_location
} }
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn parse_tlv_fields( fn parse_tlv_fields(
mut current_idx: usize, mut current_idx: usize,
full_len_without_crc: usize, full_len_without_crc: usize,
@@ -327,22 +389,26 @@ impl<'buf> FinishedPduReader<'buf> {
// last TLV, everything else would break the whole handling of the packet // last TLV, everything else would break the whole handling of the packet
// TLVs. // TLVs.
if current_idx != full_len_without_crc { if current_idx != full_len_without_crc {
return Err(PduError::FormatError); return Err(PduError::Format);
} }
} else { } else {
return Err(TlvLvError::InvalidTlvTypeField { return Err(PduError::TlvLv(
found: tlv_type.into(), InvalidTlvTypeFieldError {
expected: Some(TlvType::FilestoreResponse.into()), found: tlv_type.into(),
} expected: Some(TlvType::FilestoreResponse.into()),
.into()); }
.into(),
));
} }
} }
TlvTypeField::Custom(raw) => { TlvTypeField::Custom(raw) => {
return Err(TlvLvError::InvalidTlvTypeField { return Err(PduError::TlvLv(
found: raw, InvalidTlvTypeFieldError {
expected: None, found: raw,
} expected: None,
.into()); }
.into(),
));
} }
} }
} }
@@ -351,12 +417,14 @@ impl<'buf> FinishedPduReader<'buf> {
} }
impl CfdpPdu for FinishedPduReader<'_> { impl CfdpPdu for FinishedPduReader<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader { fn pdu_header(&self) -> &PduHeader {
&self.pdu_header self.pdu_header()
} }
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> { fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu) Some(FileDirectiveType::Finished)
} }
} }
@@ -397,8 +465,8 @@ mod tests {
delivery_code: DeliveryCode, delivery_code: DeliveryCode,
file_status: FileStatus, file_status: FileStatus,
) -> FinishedPduCreator<'static> { ) -> FinishedPduCreator<'static> {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0); let pdu_header = PduHeader::new_for_file_directive(common_pdu_conf(crc_flag, fss), 0);
FinishedPduCreator::new_default(pdu_header, delivery_code, file_status) FinishedPduCreator::new_no_error(pdu_header, delivery_code, file_status)
} }
#[test] #[test]
@@ -425,7 +493,7 @@ mod tests {
assert_eq!(finished_pdu.pdu_type(), PduType::FileDirective); assert_eq!(finished_pdu.pdu_type(), PduType::FileDirective);
assert_eq!( assert_eq!(
finished_pdu.file_directive_type(), finished_pdu.file_directive_type(),
Some(FileDirectiveType::FinishedPdu) Some(FileDirectiveType::Finished)
); );
assert_eq!( assert_eq!(
finished_pdu.transmission_mode(), finished_pdu.transmission_mode(),
@@ -457,7 +525,7 @@ mod tests {
); );
verify_raw_header(finished_pdu.pdu_header(), &buf); verify_raw_header(finished_pdu.pdu_header(), &buf);
let mut current_idx = finished_pdu.pdu_header().header_len(); let mut current_idx = finished_pdu.pdu_header().header_len();
assert_eq!(buf[current_idx], FileDirectiveType::FinishedPdu as u8); assert_eq!(buf[current_idx], FileDirectiveType::Finished as u8);
current_idx += 1; current_idx += 1;
assert_eq!( assert_eq!(
(buf[current_idx] >> 4) & 0b1111, (buf[current_idx] >> 4) & 0b1111,
@@ -559,7 +627,7 @@ mod tests {
buf[written - 1] -= 1; buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16; let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16;
let error = FinishedPduReader::new(&buf).unwrap_err(); let error = FinishedPduReader::new(&buf).unwrap_err();
if let PduError::ChecksumError(e) = error { if let PduError::Checksum(e) = error {
assert_eq!(e, crc); assert_eq!(e, crc);
} else { } else {
panic!("expected crc error"); panic!("expected crc error");
@@ -568,8 +636,10 @@ mod tests {
#[test] #[test]
fn test_with_fault_location() { fn test_with_fault_location() {
let pdu_header = let pdu_header = PduHeader::new_for_file_directive(
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0); common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
);
let finished_pdu = FinishedPduCreator::new_with_error( let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header, pdu_header,
ConditionCode::NakLimitReached, ConditionCode::NakLimitReached,
@@ -590,8 +660,10 @@ mod tests {
#[test] #[test]
fn test_deserialization_with_fault_location() { fn test_deserialization_with_fault_location() {
let pdu_header = let pdu_header = PduHeader::new_for_file_directive(
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0); common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
);
let entity_id_tlv = EntityIdTlv::new(TEST_DEST_ID.into()); let entity_id_tlv = EntityIdTlv::new(TEST_DEST_ID.into());
let finished_pdu = FinishedPduCreator::new_with_error( let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header, pdu_header,
@@ -626,9 +698,11 @@ mod tests {
.unwrap(); .unwrap();
let fs_responses = &[fs_response_0, fs_response_1]; let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header = let pdu_header = PduHeader::new_for_file_directive(
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0); common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
let finished_pdu = FinishedPduCreator::new_generic( 0,
);
let finished_pdu = FinishedPduCreator::new(
pdu_header, pdu_header,
ConditionCode::NakLimitReached, ConditionCode::NakLimitReached,
DeliveryCode::Incomplete, DeliveryCode::Incomplete,
@@ -661,9 +735,11 @@ mod tests {
.unwrap(); .unwrap();
let fs_responses = &[fs_response_0, fs_response_1]; let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header = let pdu_header = PduHeader::new_for_file_directive(
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0); common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
let finished_pdu = FinishedPduCreator::new_generic( 0,
);
let finished_pdu = FinishedPduCreator::new(
pdu_header, pdu_header,
ConditionCode::NakLimitReached, ConditionCode::NakLimitReached,
DeliveryCode::Incomplete, DeliveryCode::Incomplete,

View File

@@ -1,3 +1,6 @@
//! # Metadata PDU packet implementation.
#[cfg(feature = "alloc")]
use super::tlv::TlvOwned;
use crate::cfdp::lv::Lv; use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::{ use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field, add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
@@ -11,17 +14,24 @@ use alloc::vec::Vec;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::tlv::ReadableTlv;
use super::{CfdpPdu, WritablePduPacket}; use super::{CfdpPdu, WritablePduPacket};
/// Generic metadata parameters.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] #[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct MetadataGenericParams { pub struct MetadataGenericParams {
/// Closure requested flag.
pub closure_requested: bool, pub closure_requested: bool,
/// Checksum type.
pub checksum_type: ChecksumType, pub checksum_type: ChecksumType,
/// File size.
pub file_size: u64, pub file_size: u64,
} }
impl MetadataGenericParams { impl MetadataGenericParams {
/// Constructor.
pub fn new(closure_requested: bool, checksum_type: ChecksumType, file_size: u64) -> Self { pub fn new(closure_requested: bool, checksum_type: ChecksumType, file_size: u64) -> Self {
Self { Self {
closure_requested, closure_requested,
@@ -31,6 +41,7 @@ impl MetadataGenericParams {
} }
} }
/// Build the metadata options from a slice of [Tlv]s
pub fn build_metadata_opts_from_slice( pub fn build_metadata_opts_from_slice(
buf: &mut [u8], buf: &mut [u8],
tlvs: &[Tlv], tlvs: &[Tlv],
@@ -42,6 +53,7 @@ pub fn build_metadata_opts_from_slice(
Ok(written) Ok(written)
} }
/// Build the metadata options from a vector of [Tlv]s
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub fn build_metadata_opts_from_vec( pub fn build_metadata_opts_from_vec(
buf: &mut [u8], buf: &mut [u8],
@@ -50,6 +62,16 @@ pub fn build_metadata_opts_from_vec(
build_metadata_opts_from_slice(buf, tlvs.as_slice()) build_metadata_opts_from_slice(buf, tlvs.as_slice())
} }
/// Build the metadata options from a slice of [TlvOwned]s
#[cfg(feature = "alloc")]
pub fn build_metadata_opts_from_owned_slice(tlvs: &[TlvOwned]) -> Vec<u8> {
let mut sum_vec = Vec::new();
for tlv in tlvs {
sum_vec.extend(tlv.to_vec());
}
sum_vec
}
/// Metadata PDU creator abstraction. /// Metadata PDU creator abstraction.
/// ///
/// This abstraction exposes a specialized API for creating metadata PDUs as specified in /// This abstraction exposes a specialized API for creating metadata PDUs as specified in
@@ -60,10 +82,11 @@ pub struct MetadataPduCreator<'src_name, 'dest_name, 'opts> {
metadata_params: MetadataGenericParams, metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>, src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>, dest_file_name: Lv<'dest_name>,
options: &'opts [Tlv<'opts>], options: &'opts [u8],
} }
impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'opts> { impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'opts> {
/// Constructor for a metadata PDU without options.
pub fn new_no_opts( pub fn new_no_opts(
pdu_header: PduHeader, pdu_header: PduHeader,
metadata_params: MetadataGenericParams, metadata_params: MetadataGenericParams,
@@ -79,12 +102,13 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
) )
} }
/// Constructor for a metadata PDU with options.
pub fn new_with_opts( pub fn new_with_opts(
pdu_header: PduHeader, pdu_header: PduHeader,
metadata_params: MetadataGenericParams, metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>, src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>, dest_file_name: Lv<'dest_name>,
options: &'opts [Tlv<'opts>], options: &'opts [u8],
) -> Self { ) -> Self {
Self::new( Self::new(
pdu_header, pdu_header,
@@ -95,12 +119,13 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
) )
} }
/// Generic constructor for a metadata PDU.
pub fn new( pub fn new(
mut pdu_header: PduHeader, mut pdu_header: PduHeader,
metadata_params: MetadataGenericParams, metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>, src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>, dest_file_name: Lv<'dest_name>,
options: &'opts [Tlv<'opts>], options: &'opts [u8],
) -> Self { ) -> Self {
pdu_header.pdu_type = PduType::FileDirective; pdu_header.pdu_type = PduType::FileDirective;
pdu_header.pdu_conf.direction = Direction::TowardsReceiver; pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
@@ -115,22 +140,45 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
pdu pdu
} }
/// Metadata generic parameters.
#[inline]
pub fn metadata_params(&self) -> &MetadataGenericParams { pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params &self.metadata_params
} }
/// Source file name as a [Lv].
#[inline]
pub fn src_file_name(&self) -> Lv<'src_name> { pub fn src_file_name(&self) -> Lv<'src_name> {
self.src_file_name self.src_file_name
} }
/// Destination file name as a [Lv].
#[inline]
pub fn dest_file_name(&self) -> Lv<'dest_name> { pub fn dest_file_name(&self) -> Lv<'dest_name> {
self.dest_file_name self.dest_file_name
} }
pub fn options(&self) -> &'opts [Tlv<'opts>] { /// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
/// Raw options.
#[inline]
pub fn options(&self) -> &'opts [u8] {
self.options self.options
} }
/// Yield an iterator which can be used to loop through all options. Returns [None] if the
/// options field is empty.
pub fn options_iter(&self) -> OptionsIter<'_> {
OptionsIter {
opt_buf: self.options,
current_idx: 0,
}
}
fn calc_pdu_datafield_len(&self) -> usize { fn calc_pdu_datafield_len(&self) -> usize {
// One directve type octet and one byte of the directive parameter field. // One directve type octet and one byte of the directive parameter field.
let mut len = 2; let mut len = 2;
@@ -141,28 +189,15 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
} }
len += self.src_file_name.len_full(); len += self.src_file_name.len_full();
len += self.dest_file_name.len_full(); len += self.dest_file_name.len_full();
for tlv in self.options() { len += self.options().len();
len += tlv.len_full()
}
if self.crc_flag() == CrcFlag::WithCrc { if self.crc_flag() == CrcFlag::WithCrc {
len += 2; len += 2;
} }
len len
} }
}
impl CfdpPdu for MetadataPduCreator<'_, '_, '_> { /// Write [Self] to the provided buffer and returns the written size.
fn pdu_header(&self) -> &PduHeader { pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu)
}
}
impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written(); let expected_len = self.len_written();
if buf.len() < expected_len { if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -173,9 +208,9 @@ impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
} }
let mut current_idx = self.pdu_header.write_to_bytes(buf)?; let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::MetadataPdu as u8; buf[current_idx] = FileDirectiveType::Metadata as u8;
current_idx += 1; current_idx += 1;
buf[current_idx] = ((self.metadata_params.closure_requested as u8) << 7) buf[current_idx] = ((self.metadata_params.closure_requested as u8) << 6)
| (self.metadata_params.checksum_type as u8); | (self.metadata_params.checksum_type as u8);
current_idx += 1; current_idx += 1;
current_idx += write_fss_field( current_idx += write_fss_field(
@@ -189,21 +224,42 @@ impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
current_idx += self current_idx += self
.dest_file_name .dest_file_name
.write_to_be_bytes(&mut buf[current_idx..])?; .write_to_be_bytes(&mut buf[current_idx..])?;
for opt in self.options() { buf[current_idx..current_idx + self.options.len()].copy_from_slice(self.options);
opt.write_to_bytes(&mut buf[current_idx..current_idx + opt.len_full()])?; current_idx += self.options.len();
current_idx += opt.len_full();
}
if self.crc_flag() == CrcFlag::WithCrc { if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx); current_idx = add_pdu_crc(buf, current_idx);
} }
Ok(current_idx) Ok(current_idx)
} }
fn len_written(&self) -> usize { /// Length of the written PDU in bytes.
pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len() self.pdu_header.header_len() + self.calc_pdu_datafield_len()
} }
} }
impl CfdpPdu for MetadataPduCreator<'_, '_, '_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::Metadata)
}
}
impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
/// Helper structure to loop through all options of a metadata PDU. It should be noted that /// Helper structure to loop through all options of a metadata PDU. It should be noted that
/// iterators in Rust are not fallible, but the TLV creation can fail, for example if the raw TLV /// iterators in Rust are not fallible, but the TLV creation can fail, for example if the raw TLV
/// data is invalid for some reason. In that case, the iterator will yield [None] because there /// data is invalid for some reason. In that case, the iterator will yield [None] because there
@@ -241,6 +297,7 @@ impl<'opts> Iterator for OptionsIter<'opts> {
/// involved. /// involved.
#[derive(Debug)] #[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct MetadataPduReader<'buf> { pub struct MetadataPduReader<'buf> {
pdu_header: PduHeader, pdu_header: PduHeader,
metadata_params: MetadataGenericParams, metadata_params: MetadataGenericParams,
@@ -252,10 +309,12 @@ pub struct MetadataPduReader<'buf> {
} }
impl<'raw> MetadataPduReader<'raw> { impl<'raw> MetadataPduReader<'raw> {
/// Constructor from raw bytes.
pub fn new(buf: &'raw [u8]) -> Result<Self, PduError> { pub fn new(buf: &'raw [u8]) -> Result<Self, PduError> {
Self::from_bytes(buf) Self::from_bytes(buf)
} }
/// Constructor from raw bytes.
pub fn from_bytes(buf: &'raw [u8]) -> Result<Self, PduError> { pub fn from_bytes(buf: &'raw [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?; let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?; let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
@@ -269,13 +328,13 @@ impl<'raw> MetadataPduReader<'raw> {
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| { let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType { PduError::InvalidDirectiveType {
found: buf[current_idx], found: buf[current_idx],
expected: Some(FileDirectiveType::MetadataPdu), expected: Some(FileDirectiveType::Metadata),
} }
})?; })?;
if directive_type != FileDirectiveType::MetadataPdu { if directive_type != FileDirectiveType::Metadata {
return Err(PduError::WrongDirectiveType { return Err(PduError::WrongDirectiveType {
found: directive_type, found: directive_type,
expected: FileDirectiveType::MetadataPdu, expected: FileDirectiveType::Metadata,
}); });
} }
current_idx += 1; current_idx += 1;
@@ -311,35 +370,50 @@ impl<'raw> MetadataPduReader<'raw> {
}) })
} }
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
/// Raw options.
#[inline]
pub fn options(&self) -> &'raw [u8] { pub fn options(&self) -> &'raw [u8] {
self.options self.options
} }
/// Generic metadata parameters.
#[inline]
pub fn metadata_params(&self) -> &MetadataGenericParams { pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params &self.metadata_params
} }
pub fn src_file_name(&self) -> Lv { /// Source file name as a [Lv].
#[inline]
pub fn src_file_name(&self) -> Lv<'_> {
self.src_file_name self.src_file_name
} }
pub fn dest_file_name(&self) -> Lv { /// Destination file name as a [Lv].
#[inline]
pub fn dest_file_name(&self) -> Lv<'_> {
self.dest_file_name self.dest_file_name
} }
} }
impl CfdpPdu for MetadataPduReader<'_> { impl CfdpPdu for MetadataPduReader<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader { fn pdu_header(&self) -> &PduHeader {
&self.pdu_header self.pdu_header()
} }
fn file_directive_type(&self) -> Option<FileDirectiveType> { fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu) Some(FileDirectiveType::Metadata)
} }
} }
#[cfg(test)] #[cfg(test)]
pub mod tests { mod tests {
use alloc::string::ToString; use alloc::string::ToString;
use crate::cfdp::lv::Lv; use crate::cfdp::lv::Lv;
@@ -352,7 +426,7 @@ pub mod tests {
}; };
use crate::cfdp::pdu::{CfdpPdu, PduError, WritablePduPacket}; use crate::cfdp::pdu::{CfdpPdu, PduError, WritablePduPacket};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader}; use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::tlv::{Tlv, TlvType}; use crate::cfdp::tlv::{ReadableTlv, Tlv, TlvOwned, TlvType, WritableTlv};
use crate::cfdp::{ use crate::cfdp::{
ChecksumType, CrcFlag, Direction, LargeFileFlag, PduType, SegmentMetadataFlag, ChecksumType, CrcFlag, Direction, LargeFileFlag, PduType, SegmentMetadataFlag,
SegmentationControl, TransmissionMode, SegmentationControl, TransmissionMode,
@@ -362,17 +436,19 @@ pub mod tests {
const SRC_FILENAME: &str = "hello-world.txt"; const SRC_FILENAME: &str = "hello-world.txt";
const DEST_FILENAME: &str = "hello-world2.txt"; const DEST_FILENAME: &str = "hello-world2.txt";
fn generic_metadata_pdu<'opts>( fn generic_metadata_pdu(
crc_flag: CrcFlag, crc_flag: CrcFlag,
checksum_type: ChecksumType,
closure_requested: bool,
fss: LargeFileFlag, fss: LargeFileFlag,
opts: &'opts [Tlv], opts: &[u8],
) -> ( ) -> (
Lv<'static>, Lv<'static>,
Lv<'static>, Lv<'static>,
MetadataPduCreator<'static, 'static, 'opts>, MetadataPduCreator<'static, 'static, '_>,
) { ) {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0); let pdu_header = PduHeader::new_for_file_directive(common_pdu_conf(crc_flag, fss), 0);
let metadata_params = MetadataGenericParams::new(false, ChecksumType::Crc32, 0x1010); let metadata_params = MetadataGenericParams::new(closure_requested, checksum_type, 0x1010);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed"); let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename = let dest_filename =
Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed"); Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed");
@@ -391,8 +467,13 @@ pub mod tests {
#[test] #[test]
fn test_basic() { fn test_basic() {
let (src_filename, dest_filename, metadata_pdu) = let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, &[]); CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&[],
);
assert_eq!( assert_eq!(
metadata_pdu.len_written(), metadata_pdu.len_written(),
metadata_pdu.pdu_header().header_len() metadata_pdu.pdu_header().header_len()
@@ -408,9 +489,14 @@ pub mod tests {
assert_eq!(metadata_pdu.crc_flag(), CrcFlag::NoCrc); assert_eq!(metadata_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(metadata_pdu.file_flag(), LargeFileFlag::Normal); assert_eq!(metadata_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(metadata_pdu.pdu_type(), PduType::FileDirective); assert_eq!(metadata_pdu.pdu_type(), PduType::FileDirective);
assert!(!metadata_pdu.metadata_params().closure_requested);
assert_eq!(
metadata_pdu.metadata_params().checksum_type,
ChecksumType::Crc32
);
assert_eq!( assert_eq!(
metadata_pdu.file_directive_type(), metadata_pdu.file_directive_type(),
Some(FileDirectiveType::MetadataPdu) Some(FileDirectiveType::Metadata)
); );
assert_eq!( assert_eq!(
metadata_pdu.transmission_mode(), metadata_pdu.transmission_mode(),
@@ -422,44 +508,103 @@ pub mod tests {
assert_eq!(metadata_pdu.transaction_seq_num(), TEST_SEQ_NUM.into()); assert_eq!(metadata_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
} }
#[test] fn check_metadata_raw_fields(
fn test_serialization() { metadata_pdu: &MetadataPduCreator,
let (src_filename, dest_filename, metadata_pdu) = buf: &[u8],
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, &[]); written_bytes: usize,
let mut buf: [u8; 64] = [0; 64]; checksum_type: ChecksumType,
let res = metadata_pdu.write_to_bytes(&mut buf); closure_requested: bool,
assert!(res.is_ok()); expected_src_filename: &Lv,
let written = res.unwrap(); expected_dest_filename: &Lv,
) {
verify_raw_header(metadata_pdu.pdu_header(), buf);
assert_eq!( assert_eq!(
written, written_bytes,
metadata_pdu.pdu_header.header_len() metadata_pdu.pdu_header.header_len()
+ 1 + 1
+ 1 + 1
+ 4 + 4
+ src_filename.len_full() + expected_src_filename.len_full()
+ dest_filename.len_full() + expected_dest_filename.len_full()
); );
verify_raw_header(metadata_pdu.pdu_header(), &buf); assert_eq!(buf[7], FileDirectiveType::Metadata as u8);
assert_eq!(buf[7], FileDirectiveType::MetadataPdu as u8); assert_eq!(buf[8] >> 6, closure_requested as u8);
assert_eq!(buf[8] >> 6, false as u8); assert_eq!(buf[8] & 0b1111, checksum_type as u8);
assert_eq!(buf[8] & 0b1111, ChecksumType::Crc32 as u8);
assert_eq!(u32::from_be_bytes(buf[9..13].try_into().unwrap()), 0x1010); assert_eq!(u32::from_be_bytes(buf[9..13].try_into().unwrap()), 0x1010);
let mut current_idx = 13; let mut current_idx = 13;
let src_name_from_raw = let src_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating source name LV failed"); Lv::from_bytes(&buf[current_idx..]).expect("Creating source name LV failed");
assert_eq!(src_name_from_raw, src_filename); assert_eq!(src_name_from_raw, *expected_src_filename);
current_idx += src_name_from_raw.len_full(); current_idx += src_name_from_raw.len_full();
let dest_name_from_raw = let dest_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating dest name LV failed"); Lv::from_bytes(&buf[current_idx..]).expect("Creating dest name LV failed");
assert_eq!(dest_name_from_raw, dest_filename); assert_eq!(dest_name_from_raw, *expected_dest_filename);
current_idx += dest_name_from_raw.len_full(); current_idx += dest_name_from_raw.len_full();
// No options, so no additional data here. // No options, so no additional data here.
assert_eq!(current_idx, written); assert_eq!(current_idx, written_bytes);
}
#[test]
fn test_serialization_0() {
let checksum_type = ChecksumType::Crc32;
let closure_requested = false;
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
checksum_type,
closure_requested,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let res = metadata_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
check_metadata_raw_fields(
&metadata_pdu,
&buf,
written,
checksum_type,
closure_requested,
&src_filename,
&dest_filename,
);
}
#[test]
fn test_serialization_1() {
let checksum_type = ChecksumType::Modular;
let closure_requested = true;
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
checksum_type,
closure_requested,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let res = metadata_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
check_metadata_raw_fields(
&metadata_pdu,
&buf,
written,
checksum_type,
closure_requested,
&src_filename,
&dest_filename,
);
} }
#[test] #[test]
fn test_write_to_vec() { fn test_write_to_vec() {
let (_, _, metadata_pdu) = generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, &[]); let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let pdu_vec = metadata_pdu.to_vec().unwrap(); let pdu_vec = metadata_pdu.to_vec().unwrap();
let written = metadata_pdu.write_to_bytes(&mut buf).unwrap(); let written = metadata_pdu.write_to_bytes(&mut buf).unwrap();
@@ -470,15 +615,21 @@ pub mod tests {
assert_eq!(written.metadata_params(), read.metadata_params()); assert_eq!(written.metadata_params(), read.metadata_params());
assert_eq!(written.src_file_name(), read.src_file_name()); assert_eq!(written.src_file_name(), read.src_file_name());
assert_eq!(written.dest_file_name(), read.dest_file_name()); assert_eq!(written.dest_file_name(), read.dest_file_name());
let opts = written.options(); let opts = written.options_iter();
for (tlv_written, tlv_read) in opts.iter().zip(read.options_iter().unwrap()) { for (tlv_written, tlv_read) in opts.zip(read.options_iter().unwrap()) {
assert_eq!(tlv_written, &tlv_read); assert_eq!(&tlv_written, &tlv_read);
} }
} }
#[test] #[test]
fn test_deserialization() { fn test_deserialization() {
let (_, _, metadata_pdu) = generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, &[]); let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
true,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
metadata_pdu.write_to_bytes(&mut buf).unwrap(); metadata_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_read_back = MetadataPduReader::from_bytes(&buf); let pdu_read_back = MetadataPduReader::from_bytes(&buf);
@@ -489,8 +640,13 @@ pub mod tests {
#[test] #[test]
fn test_with_crc_flag() { fn test_with_crc_flag() {
let (src_filename, dest_filename, metadata_pdu) = let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
generic_metadata_pdu(CrcFlag::WithCrc, LargeFileFlag::Normal, &[]); CrcFlag::WithCrc,
ChecksumType::Crc32,
true,
LargeFileFlag::Normal,
&[],
);
assert_eq!(metadata_pdu.crc_flag(), CrcFlag::WithCrc); assert_eq!(metadata_pdu.crc_flag(), CrcFlag::WithCrc);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf); let write_res = metadata_pdu.write_to_bytes(&mut buf);
@@ -513,8 +669,13 @@ pub mod tests {
#[test] #[test]
fn test_with_large_file_flag() { fn test_with_large_file_flag() {
let (src_filename, dest_filename, metadata_pdu) = let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Large, &[]); CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Large,
&[],
);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf); let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok()); assert!(write_res.is_ok());
@@ -571,10 +732,15 @@ pub mod tests {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel); let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4]; let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap(); let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_vec = vec![tlv1, tlv2]; let mut tlv_buf: [u8; 64] = [0; 64];
let opts_len = tlv1.len_full() + tlv2.len_full(); let opts_len = build_metadata_opts_from_slice(&mut tlv_buf, &[tlv1, tlv2]).unwrap();
let (src_filename, dest_filename, metadata_pdu) = let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Normal, &tlv_vec); CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&tlv_buf[0..opts_len],
);
let mut buf: [u8; 128] = [0; 128]; let mut buf: [u8; 128] = [0; 128];
let write_res = metadata_pdu.write_to_bytes(&mut buf); let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok()); assert!(write_res.is_ok());
@@ -596,7 +762,55 @@ pub mod tests {
let opts_iter = opts_iter.unwrap(); let opts_iter = opts_iter.unwrap();
let mut accumulated_len = 0; let mut accumulated_len = 0;
for (idx, opt) in opts_iter.enumerate() { for (idx, opt) in opts_iter.enumerate() {
assert_eq!(tlv_vec[idx], opt); if idx == 0 {
assert_eq!(tlv1, opt);
} else if idx == 1 {
assert_eq!(tlv2, opt);
}
accumulated_len += opt.len_full();
}
assert_eq!(accumulated_len, pdu_read_back.options().len());
}
#[test]
fn test_with_owned_opts() {
let tlv1 = TlvOwned::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = TlvOwned::new(TlvType::MsgToUser, &msg_to_user);
let mut all_tlvs = tlv1.to_vec();
all_tlvs.extend(tlv2.to_vec());
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&all_tlvs,
);
let mut buf: [u8; 128] = [0; 128];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
+ all_tlvs.len()
);
let pdu_read_back = MetadataPduReader::from_bytes(&buf).unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
let opts_iter = pdu_read_back.options_iter();
assert!(opts_iter.is_some());
let opts_iter = opts_iter.unwrap();
let mut accumulated_len = 0;
for (idx, opt) in opts_iter.enumerate() {
if idx == 0 {
assert_eq!(tlv1, opt);
} else if idx == 1 {
assert_eq!(tlv2, opt);
}
accumulated_len += opt.len_full(); accumulated_len += opt.len_full();
} }
assert_eq!(accumulated_len, pdu_read_back.options().len()); assert_eq!(accumulated_len, pdu_read_back.options().len());
@@ -604,7 +818,13 @@ pub mod tests {
#[test] #[test]
fn test_invalid_directive_code() { fn test_invalid_directive_code() {
let (_, _, metadata_pdu) = generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Large, &[]); let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
true,
LargeFileFlag::Large,
&[],
);
let mut metadata_vec = metadata_pdu.to_vec().unwrap(); let mut metadata_vec = metadata_pdu.to_vec().unwrap();
metadata_vec[7] = 0xff; metadata_vec[7] = 0xff;
let metadata_error = MetadataPduReader::from_bytes(&metadata_vec); let metadata_error = MetadataPduReader::from_bytes(&metadata_vec);
@@ -612,10 +832,10 @@ pub mod tests {
let error = metadata_error.unwrap_err(); let error = metadata_error.unwrap_err();
if let PduError::InvalidDirectiveType { found, expected } = error { if let PduError::InvalidDirectiveType { found, expected } = error {
assert_eq!(found, 0xff); assert_eq!(found, 0xff);
assert_eq!(expected, Some(FileDirectiveType::MetadataPdu)); assert_eq!(expected, Some(FileDirectiveType::Metadata));
assert_eq!( assert_eq!(
error.to_string(), error.to_string(),
"invalid directive type value 255, expected Some(MetadataPdu)" "invalid directive type, found 255, expected Some(Metadata)"
); );
} else { } else {
panic!("Expected InvalidDirectiveType error, got {:?}", error); panic!("Expected InvalidDirectiveType error, got {:?}", error);
@@ -624,18 +844,24 @@ pub mod tests {
#[test] #[test]
fn test_wrong_directive_code() { fn test_wrong_directive_code() {
let (_, _, metadata_pdu) = generic_metadata_pdu(CrcFlag::NoCrc, LargeFileFlag::Large, &[]); let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Large,
&[],
);
let mut metadata_vec = metadata_pdu.to_vec().unwrap(); let mut metadata_vec = metadata_pdu.to_vec().unwrap();
metadata_vec[7] = FileDirectiveType::EofPdu as u8; metadata_vec[7] = FileDirectiveType::Eof as u8;
let metadata_error = MetadataPduReader::from_bytes(&metadata_vec); let metadata_error = MetadataPduReader::from_bytes(&metadata_vec);
assert!(metadata_error.is_err()); assert!(metadata_error.is_err());
let error = metadata_error.unwrap_err(); let error = metadata_error.unwrap_err();
if let PduError::WrongDirectiveType { found, expected } = error { if let PduError::WrongDirectiveType { found, expected } = error {
assert_eq!(found, FileDirectiveType::EofPdu); assert_eq!(found, FileDirectiveType::Eof);
assert_eq!(expected, FileDirectiveType::MetadataPdu); assert_eq!(expected, FileDirectiveType::Metadata);
assert_eq!( assert_eq!(
error.to_string(), error.to_string(),
"found directive type EofPdu, expected MetadataPdu" "wrong directive type, found Eof, expected Metadata"
); );
} else { } else {
panic!("Expected InvalidDirectiveType error, got {:?}", error); panic!("Expected InvalidDirectiveType error, got {:?}", error);

View File

@@ -1,13 +1,12 @@
//! CFDP Packet Data Unit (PDU) support. //! CFDP Packet Data Unit (PDU) support.
use crate::cfdp::pdu::ack::InvalidAckedDirectiveCodeError;
use crate::cfdp::pdu::nak::InvalidStartOrEndOfScopeError;
use crate::cfdp::*; use crate::cfdp::*;
use crate::crc::CRC_CCITT_FALSE;
use crate::util::{UnsignedByteField, UnsignedByteFieldU8, UnsignedEnum}; use crate::util::{UnsignedByteField, UnsignedByteFieldU8, UnsignedEnum};
use crate::ByteConversionError; use crate::ByteConversionError;
use crate::CRC_CCITT_FALSE;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use alloc::vec::Vec; use alloc::vec::Vec;
use core::fmt::{Display, Formatter};
#[cfg(feature = "std")]
use std::error::Error;
pub mod ack; pub mod ack;
pub mod eof; pub mod eof;
@@ -16,153 +15,115 @@ pub mod finished;
pub mod metadata; pub mod metadata;
pub mod nak; pub mod nak;
/// File directive type.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum FileDirectiveType { pub enum FileDirectiveType {
EofPdu = 0x04, /// EOF.
FinishedPdu = 0x05, Eof = 0x04,
AckPdu = 0x06, /// Finished.
MetadataPdu = 0x07, Finished = 0x05,
NakPdu = 0x08, /// ACK.
PromptPdu = 0x09, Ack = 0x06,
KeepAlivePdu = 0x0c, /// Metadata.
Metadata = 0x07,
/// NAK.
Nak = 0x08,
/// Prompt.
Prompt = 0x09,
/// Keep Alive.
KeepAlive = 0x0c,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq)] /// PDU error.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PduError { pub enum PduError {
ByteConversion(ByteConversionError), /// Byte conversion error.
/// Found version ID invalid, not equal to [CFDP_VERSION_2]. #[error("byte conversion error: {0}")]
ByteConversion(#[from] ByteConversionError),
/// Found version ID invalid, not equal to [super::CFDP_VERSION_2].
#[error("CFDP version missmatch, found {0}, expected {ver}", ver = super::CFDP_VERSION_2)]
CfdpVersionMissmatch(u8), CfdpVersionMissmatch(u8),
/// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported. /// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported.
#[error("invalid PDU entity ID length {0}, only [1, 2, 4, 8] are allowed")]
InvalidEntityLen(u8), InvalidEntityLen(u8),
/// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported. /// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported.
#[error("invalid transaction ID length {0}")]
InvalidTransactionSeqNumLen(u8), InvalidTransactionSeqNumLen(u8),
/// Source and destination entity ID lengths do not match.
#[error(
"missmatch of PDU source ID length {src_id_len} and destination ID length {dest_id_len}"
)]
SourceDestIdLenMissmatch { SourceDestIdLenMissmatch {
/// Source ID length.
src_id_len: usize, src_id_len: usize,
/// Destination ID length.
dest_id_len: usize, dest_id_len: usize,
}, },
/// Wrong directive type, for example when parsing the directive field for a file directive /// Wrong directive type, for example when parsing the directive field for a file directive
/// PDU. /// PDU.
#[error("wrong directive type, found {found:?}, expected {expected:?}")]
WrongDirectiveType { WrongDirectiveType {
/// Found directive type.
found: FileDirectiveType, found: FileDirectiveType,
/// Expected directive type.
expected: FileDirectiveType, expected: FileDirectiveType,
}, },
/// The directive type field contained a value not in the range of permitted values. This can /// The directive type field contained a value not in the range of permitted values. This can
/// also happen if an invalid value is passed to the ACK PDU constructor. /// also happen if an invalid value is passed to the ACK PDU reader.
#[error("invalid directive type, found {found:?}, expected {expected:?}")]
InvalidDirectiveType { InvalidDirectiveType {
/// Found raw directive type.
found: u8, found: u8,
/// Expected raw directive type if applicable.
expected: Option<FileDirectiveType>, expected: Option<FileDirectiveType>,
}, },
InvalidStartOrEndOfScopeValue, /// Invalid start or end of scope for a NAK PDU.
#[error("nak pdu: {0}")]
InvalidStartOrEndOfScope(#[from] InvalidStartOrEndOfScopeError),
/// Invalid condition code. Contains the raw detected value. /// Invalid condition code. Contains the raw detected value.
#[error("invalid condition code {0}")]
InvalidConditionCode(u8), InvalidConditionCode(u8),
/// Invalid checksum type which is not part of the checksums listed in the /// Invalid checksum type which is not part of the checksums listed in the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/). /// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/).
#[error("invalid checksum type {0}")]
InvalidChecksumType(u8), InvalidChecksumType(u8),
/// File size is too large.
#[error("file size {0} too large")]
FileSizeTooLarge(u64), FileSizeTooLarge(u64),
/// If the CRC flag for a PDU is enabled and the checksum check fails. Contains raw 16-bit CRC. /// If the CRC flag for a PDU is enabled and the checksum check fails. Contains raw 16-bit CRC.
ChecksumError(u16), #[error("checksum error for checksum {0}")]
Checksum(u16),
/// Generic error for invalid PDU formats. /// Generic error for invalid PDU formats.
FormatError, #[error("generic PDU format error")]
Format,
/// Error handling a TLV field. /// Error handling a TLV field.
TlvLvError(TlvLvError), #[error("PDU error: {0}")]
TlvLv(#[from] TlvLvError),
} }
impl Display for PduError { impl From<InvalidAckedDirectiveCodeError> for PduError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { fn from(value: InvalidAckedDirectiveCodeError) -> Self {
match self { Self::InvalidDirectiveType {
PduError::InvalidEntityLen(raw_id) => { found: value.0 as u8,
write!( expected: None,
f,
"invalid PDU entity ID length {raw_id}, only [1, 2, 4, 8] are allowed"
)
}
PduError::InvalidStartOrEndOfScopeValue => {
write!(f, "invalid start or end of scope for NAK PDU")
}
PduError::InvalidTransactionSeqNumLen(raw_id) => {
write!(
f,
"invalid PDUtransaction seq num length {raw_id}, only [1, 2, 4, 8] are allowed"
)
}
PduError::CfdpVersionMissmatch(raw) => {
write!(
f,
"cfdp version missmatch, found {raw}, expected {CFDP_VERSION_2}"
)
}
PduError::SourceDestIdLenMissmatch {
src_id_len,
dest_id_len,
} => {
write!(
f,
"missmatch of PDU source length {src_id_len} and destination length {dest_id_len}"
)
}
PduError::ByteConversion(e) => {
write!(f, "{}", e)
}
PduError::FileSizeTooLarge(value) => {
write!(f, "file size value {value} exceeds allowed 32 bit width")
}
PduError::WrongDirectiveType { found, expected } => {
write!(f, "found directive type {found:?}, expected {expected:?}")
}
PduError::InvalidConditionCode(raw_code) => {
write!(f, "found invalid condition code with raw value {raw_code}")
}
PduError::InvalidDirectiveType { found, expected } => {
write!(
f,
"invalid directive type value {found}, expected {expected:?}"
)
}
PduError::InvalidChecksumType(checksum_type) => {
write!(f, "invalid checksum type {checksum_type}")
}
PduError::ChecksumError(checksum) => {
write!(f, "checksum error for CRC {checksum:#04x}")
}
PduError::TlvLvError(error) => {
write!(f, "pdu tlv error: {error}")
}
PduError::FormatError => {
write!(f, "generic PDU format error")
}
} }
} }
} }
#[cfg(feature = "std")] /// Generic trait for a PDU which can be written to bytes.
impl Error for PduError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
PduError::ByteConversion(e) => Some(e),
PduError::TlvLvError(e) => Some(e),
_ => None,
}
}
}
impl From<ByteConversionError> for PduError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversion(value)
}
}
impl From<TlvLvError> for PduError {
fn from(e: TlvLvError) -> Self {
Self::TlvLvError(e)
}
}
pub trait WritablePduPacket { pub trait WritablePduPacket {
/// Length when written to bytes.
fn len_written(&self) -> usize; fn len_written(&self) -> usize;
/// Write the PDU to a raw buffer, returning the written length.
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError>; fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError>;
/// Convert the PDU to an owned vector of bytes.
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<Vec<u8>, PduError> { fn to_vec(&self) -> Result<Vec<u8>, PduError> {
// This is the correct way to do this. See // This is the correct way to do this. See
@@ -176,39 +137,58 @@ pub trait WritablePduPacket {
/// Abstraction trait for fields and properties common for all PDUs. /// Abstraction trait for fields and properties common for all PDUs.
pub trait CfdpPdu { pub trait CfdpPdu {
/// PDU header.
fn pdu_header(&self) -> &PduHeader; fn pdu_header(&self) -> &PduHeader;
/// Source ID (file sender).
#[inline]
fn source_id(&self) -> UnsignedByteField { fn source_id(&self) -> UnsignedByteField {
self.pdu_header().common_pdu_conf().source_entity_id self.pdu_header().common_pdu_conf().source_entity_id
} }
/// Destination ID (file sender).
#[inline]
fn dest_id(&self) -> UnsignedByteField { fn dest_id(&self) -> UnsignedByteField {
self.pdu_header().common_pdu_conf().dest_entity_id self.pdu_header().common_pdu_conf().dest_entity_id
} }
/// Transaction sequence number.
#[inline]
fn transaction_seq_num(&self) -> UnsignedByteField { fn transaction_seq_num(&self) -> UnsignedByteField {
self.pdu_header().common_pdu_conf().transaction_seq_num self.pdu_header().common_pdu_conf().transaction_seq_num
} }
/// Transmission mode.
#[inline]
fn transmission_mode(&self) -> TransmissionMode { fn transmission_mode(&self) -> TransmissionMode {
self.pdu_header().common_pdu_conf().trans_mode self.pdu_header().common_pdu_conf().trans_mode
} }
/// Direction.
#[inline]
fn direction(&self) -> Direction { fn direction(&self) -> Direction {
self.pdu_header().common_pdu_conf().direction self.pdu_header().common_pdu_conf().direction
} }
/// CRC flag.
#[inline]
fn crc_flag(&self) -> CrcFlag { fn crc_flag(&self) -> CrcFlag {
self.pdu_header().common_pdu_conf().crc_flag self.pdu_header().common_pdu_conf().crc_flag
} }
/// File flag.
#[inline]
fn file_flag(&self) -> LargeFileFlag { fn file_flag(&self) -> LargeFileFlag {
self.pdu_header().common_pdu_conf().file_flag self.pdu_header().common_pdu_conf().file_flag
} }
/// PDU type.
#[inline]
fn pdu_type(&self) -> PduType { fn pdu_type(&self) -> PduType {
self.pdu_header().pdu_type() self.pdu_header().pdu_type()
} }
/// File directive type when applicable.
fn file_directive_type(&self) -> Option<FileDirectiveType>; fn file_directive_type(&self) -> Option<FileDirectiveType>;
} }
@@ -220,18 +200,26 @@ pub trait CfdpPdu {
/// same. /// same.
#[derive(Debug, Copy, Clone, Eq)] #[derive(Debug, Copy, Clone, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct CommonPduConfig { pub struct CommonPduConfig {
source_entity_id: UnsignedByteField, source_entity_id: UnsignedByteField,
dest_entity_id: UnsignedByteField, dest_entity_id: UnsignedByteField,
/// Transaction sequence number.
pub transaction_seq_num: UnsignedByteField, pub transaction_seq_num: UnsignedByteField,
/// Transmission mode.
pub trans_mode: TransmissionMode, pub trans_mode: TransmissionMode,
/// File flag.
pub file_flag: LargeFileFlag, pub file_flag: LargeFileFlag,
/// CRC flag.
pub crc_flag: CrcFlag, pub crc_flag: CrcFlag,
/// Direction.
pub direction: Direction, pub direction: Direction,
} }
// TODO: Builder pattern might be applicable here.. // TODO: Builder pattern might be applicable here..
impl CommonPduConfig { impl CommonPduConfig {
/// Generic constructor.
#[inline]
pub fn new( pub fn new(
source_id: impl Into<UnsignedByteField>, source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>, dest_id: impl Into<UnsignedByteField>,
@@ -263,6 +251,8 @@ impl CommonPduConfig {
}) })
} }
/// Constructor for custom byte field with default field values for the other fields.
#[inline]
pub fn new_with_byte_fields( pub fn new_with_byte_fields(
source_id: impl Into<UnsignedByteField>, source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>, dest_id: impl Into<UnsignedByteField>,
@@ -279,10 +269,13 @@ impl CommonPduConfig {
) )
} }
/// Source ID (file sender).
#[inline]
pub fn source_id(&self) -> UnsignedByteField { pub fn source_id(&self) -> UnsignedByteField {
self.source_entity_id self.source_entity_id
} }
#[inline]
fn source_dest_id_check( fn source_dest_id_check(
source_id: impl Into<UnsignedByteField>, source_id: impl Into<UnsignedByteField>,
dest_id: impl Into<UnsignedByteField>, dest_id: impl Into<UnsignedByteField>,
@@ -305,6 +298,8 @@ impl CommonPduConfig {
Ok((source_id, dest_id)) Ok((source_id, dest_id))
} }
/// Set the source and destination ID field.
#[inline]
pub fn set_source_and_dest_id( pub fn set_source_and_dest_id(
&mut self, &mut self,
source_id: impl Into<UnsignedByteField>, source_id: impl Into<UnsignedByteField>,
@@ -316,6 +311,8 @@ impl CommonPduConfig {
Ok(()) Ok(())
} }
/// Destination ID (file receiver).
#[inline]
pub fn dest_id(&self) -> UnsignedByteField { pub fn dest_id(&self) -> UnsignedByteField {
self.dest_entity_id self.dest_entity_id
} }
@@ -324,6 +321,7 @@ impl CommonPduConfig {
impl Default for CommonPduConfig { impl Default for CommonPduConfig {
/// The defaults for the source ID, destination ID and the transaction sequence number is the /// The defaults for the source ID, destination ID and the transaction sequence number is the
/// [UnsignedByteFieldU8] with an intitial value of 0 /// [UnsignedByteFieldU8] with an intitial value of 0
#[inline]
fn default() -> Self { fn default() -> Self {
// The new function can not fail for these input parameters. // The new function can not fail for these input parameters.
Self::new( Self::new(
@@ -340,6 +338,7 @@ impl Default for CommonPduConfig {
} }
impl PartialEq for CommonPduConfig { impl PartialEq for CommonPduConfig {
#[inline]
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.source_entity_id.value() == other.source_entity_id.value() self.source_entity_id.value() == other.source_entity_id.value()
&& self.dest_entity_id.value() == other.dest_entity_id.value() && self.dest_entity_id.value() == other.dest_entity_id.value()
@@ -351,6 +350,7 @@ impl PartialEq for CommonPduConfig {
} }
} }
/// Fixed header length of the PDU header.
pub const FIXED_HEADER_LEN: usize = 4; pub const FIXED_HEADER_LEN: usize = 4;
/// Abstraction for the PDU header common to all CFDP PDUs. /// Abstraction for the PDU header common to all CFDP PDUs.
@@ -358,6 +358,7 @@ pub const FIXED_HEADER_LEN: usize = 4;
/// For detailed information, refer to chapter 5.1 of the CFDP standard. /// For detailed information, refer to chapter 5.1 of the CFDP standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct PduHeader { pub struct PduHeader {
pdu_type: PduType, pdu_type: PduType,
pdu_conf: CommonPduConfig, pdu_conf: CommonPduConfig,
@@ -367,6 +368,11 @@ pub struct PduHeader {
} }
impl PduHeader { impl PduHeader {
/// Fixed length of the PDU header when written to a raw buffer.
pub const FIXED_LEN: usize = FIXED_HEADER_LEN;
/// Constructor for a File Data PDU header.
#[inline]
pub fn new_for_file_data( pub fn new_for_file_data(
pdu_conf: CommonPduConfig, pdu_conf: CommonPduConfig,
pdu_datafield_len: u16, pdu_datafield_len: u16,
@@ -382,6 +388,8 @@ impl PduHeader {
) )
} }
/// Constructor for a file data PDU.
#[inline]
pub fn new_for_file_data_default(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self { pub fn new_for_file_data_default(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
Self::new_generic( Self::new_generic(
PduType::FileData, PduType::FileData,
@@ -391,7 +399,10 @@ impl PduHeader {
SegmentationControl::NoRecordBoundaryPreservation, SegmentationControl::NoRecordBoundaryPreservation,
) )
} }
pub fn new_no_file_data(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
/// Constructor for a file directive PDU.
#[inline]
pub fn new_for_file_directive(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
Self::new_generic( Self::new_generic(
PduType::FileDirective, PduType::FileDirective,
pdu_conf, pdu_conf,
@@ -401,6 +412,20 @@ impl PduHeader {
) )
} }
/// Constructor from a given [CommonPduConfig] and for a file directive PDU.
#[inline]
pub fn from_pdu_conf_for_file_directive(pdu_conf: CommonPduConfig) -> Self {
Self::new_generic(
PduType::FileDirective,
pdu_conf,
0,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
)
}
/// Generic constructor.
#[inline]
pub fn new_generic( pub fn new_generic(
pdu_type: PduType, pdu_type: PduType,
pdu_conf: CommonPduConfig, pdu_conf: CommonPduConfig,
@@ -418,6 +443,7 @@ impl PduHeader {
} }
/// Returns only the length of the PDU header when written to a raw buffer. /// Returns only the length of the PDU header when written to a raw buffer.
#[inline]
pub fn header_len(&self) -> usize { pub fn header_len(&self) -> usize {
FIXED_HEADER_LEN FIXED_HEADER_LEN
+ self.pdu_conf.source_entity_id.size() + self.pdu_conf.source_entity_id.size()
@@ -425,25 +451,28 @@ impl PduHeader {
+ self.pdu_conf.dest_entity_id.size() + self.pdu_conf.dest_entity_id.size()
} }
/// PDU data field length.
#[inline]
pub fn pdu_datafield_len(&self) -> usize { pub fn pdu_datafield_len(&self) -> usize {
self.pdu_datafield_len.into() self.pdu_datafield_len.into()
} }
/// Returns the full length of the PDU when written to a raw buffer, which is the header length /// Returns the full length of the PDU when written to a raw buffer, which is the header length
/// plus the PDU datafield length. /// plus the PDU datafield length.
#[inline]
pub fn pdu_len(&self) -> usize { pub fn pdu_len(&self) -> usize {
self.header_len() + self.pdu_datafield_len as usize self.header_len() + self.pdu_datafield_len as usize
} }
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> { /// Write the header to a raw buffer, returning the written length on success.
// Internal note: There is currently no way to pass a PDU configuration like this, but pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
// this check is still kept for defensive programming. // The API does not allow passing entity IDs with different sizes, so this should
if self.pdu_conf.source_entity_id.size() != self.pdu_conf.dest_entity_id.size() { // never happen.
return Err(PduError::SourceDestIdLenMissmatch { assert_eq!(
src_id_len: self.pdu_conf.source_entity_id.size(), self.pdu_conf.source_entity_id.size(),
dest_id_len: self.pdu_conf.dest_entity_id.size(), self.pdu_conf.dest_entity_id.size(),
}); "unexpected missmatch of source and destination entity ID length"
} );
if buf.len() if buf.len()
< FIXED_HEADER_LEN < FIXED_HEADER_LEN
+ self.pdu_conf.source_entity_id.size() + self.pdu_conf.source_entity_id.size()
@@ -452,8 +481,7 @@ impl PduHeader {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(), found: buf.len(),
expected: FIXED_HEADER_LEN, expected: FIXED_HEADER_LEN,
} });
.into());
} }
let mut current_idx = 0; let mut current_idx = 0;
buf[current_idx] = (CFDP_VERSION_2 << 5) buf[current_idx] = (CFDP_VERSION_2 << 5)
@@ -503,7 +531,7 @@ impl PduHeader {
let mut digest = CRC_CCITT_FALSE.digest(); let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[..self.pdu_len()]); digest.update(&buf[..self.pdu_len()]);
if digest.finalize() != 0 { if digest.finalize() != 0 {
return Err(PduError::ChecksumError(u16::from_be_bytes( return Err(PduError::Checksum(u16::from_be_bytes(
buf[self.pdu_len() - 2..self.pdu_len()].try_into().unwrap(), buf[self.pdu_len() - 2..self.pdu_len()].try_into().unwrap(),
))); )));
} }
@@ -603,17 +631,27 @@ impl PduHeader {
current_idx, current_idx,
)) ))
} }
/// PDU type.
#[inline]
pub fn pdu_type(&self) -> PduType { pub fn pdu_type(&self) -> PduType {
self.pdu_type self.pdu_type
} }
/// Common PDU configuration fields.
#[inline]
pub fn common_pdu_conf(&self) -> &CommonPduConfig { pub fn common_pdu_conf(&self) -> &CommonPduConfig {
&self.pdu_conf &self.pdu_conf
} }
/// Segment metadata flag.
#[inline]
pub fn seg_metadata_flag(&self) -> SegmentMetadataFlag { pub fn seg_metadata_flag(&self) -> SegmentMetadataFlag {
self.seg_metadata_flag self.seg_metadata_flag
} }
/// Segmentation Control.
#[inline]
pub fn seg_ctrl(&self) -> SegmentationControl { pub fn seg_ctrl(&self) -> SegmentationControl {
self.seg_ctrl self.seg_ctrl
} }
@@ -778,7 +816,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
assert_eq!(pdu_header.pdu_type(), PduType::FileDirective); assert_eq!(pdu_header.pdu_type(), PduType::FileDirective);
let common_conf_ref = pdu_header.common_pdu_conf(); let common_conf_ref = pdu_header.common_pdu_conf();
assert_eq!(*common_conf_ref, common_pdu_cfg); assert_eq!(*common_conf_ref, common_pdu_cfg);
@@ -844,7 +882,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -861,7 +899,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -934,7 +972,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -947,7 +985,7 @@ mod tests {
assert_eq!(raw_version, CFDP_VERSION_2 + 1); assert_eq!(raw_version, CFDP_VERSION_2 + 1);
assert_eq!( assert_eq!(
error.to_string(), error.to_string(),
"cfdp version missmatch, found 2, expected 1" "CFDP version missmatch, found 2, expected 1"
); );
} else { } else {
panic!("invalid exception: {}", error); panic!("invalid exception: {}", error);
@@ -979,7 +1017,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -995,7 +1033,7 @@ mod tests {
assert_eq!(expected, 7); assert_eq!(expected, 7);
assert_eq!( assert_eq!(
error.to_string(), error.to_string(),
"source slice with size 6 too small, expected at least 7 bytes" "byte conversion error: source slice with size 6 too small, expected at least 7 bytes"
); );
} }
} }
@@ -1050,7 +1088,7 @@ mod tests {
assert_eq!(dest_id_len, 2); assert_eq!(dest_id_len, 2);
assert_eq!( assert_eq!(
error.to_string(), error.to_string(),
"missmatch of PDU source length 1 and destination length 2" "missmatch of PDU source ID length 1 and destination ID length 2"
); );
} }
} }
@@ -1062,7 +1100,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -1086,7 +1124,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3); let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id) let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed"); .expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5); let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7]; let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf); let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -1118,4 +1156,12 @@ mod tests {
let common_pdu_cfg_1 = common_pdu_cfg_0; let common_pdu_cfg_1 = common_pdu_cfg_0;
assert_eq!(common_pdu_cfg_0, common_pdu_cfg_1); assert_eq!(common_pdu_cfg_0, common_pdu_cfg_1);
} }
#[test]
fn test_ctor_from_pdu_conf() {
assert_eq!(
PduHeader::from_pdu_conf_for_file_directive(CommonPduConfig::default()),
PduHeader::new_for_file_directive(CommonPduConfig::default(), 0)
);
}
} }

File diff suppressed because it is too large Load Diff

View File

@@ -9,19 +9,27 @@ use crate::ByteConversionError;
use alloc::vec; use alloc::vec;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use alloc::vec::Vec; use alloc::vec::Vec;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
use num_enum::{IntoPrimitive, TryFromPrimitive}; use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::{InvalidTlvTypeFieldError, TlvLvDataTooLargeError};
pub mod msg_to_user; pub mod msg_to_user;
/// Minimum length of a type-length-value structure, including type and length fields.
pub const MIN_TLV_LEN: usize = 2; pub const MIN_TLV_LEN: usize = 2;
/// Trait for generic TLV structures.
pub trait GenericTlv { pub trait GenericTlv {
/// TLV type field.
fn tlv_type_field(&self) -> TlvTypeField; fn tlv_type_field(&self) -> TlvTypeField;
/// Checks whether the type field contains one of the standard types specified in the CFDP /// Checks whether the type field contains one of the standard types specified in the CFDP
/// standard and is part of the [TlvType] enum. /// standard and is part of the [TlvType] enum.
#[inline]
fn is_standard_tlv(&self) -> bool { fn is_standard_tlv(&self) -> bool {
if let TlvTypeField::Standard(_) = self.tlv_type_field() { if let TlvTypeField::Standard(_) = self.tlv_type_field() {
return true; return true;
@@ -30,6 +38,7 @@ pub trait GenericTlv {
} }
/// Returns the standard TLV type if the TLV field is not a custom field /// Returns the standard TLV type if the TLV field is not a custom field
#[inline]
fn tlv_type(&self) -> Option<TlvType> { fn tlv_type(&self) -> Option<TlvType> {
if let TlvTypeField::Standard(tlv_type) = self.tlv_type_field() { if let TlvTypeField::Standard(tlv_type) = self.tlv_type_field() {
Some(tlv_type) Some(tlv_type)
@@ -39,9 +48,40 @@ pub trait GenericTlv {
} }
} }
/// Readable TLV structure trait.
pub trait ReadableTlv {
/// Value field of the TLV.
fn value(&self) -> &[u8];
/// Checks whether the value field is empty.
#[inline]
fn is_empty(&self) -> bool {
self.value().is_empty()
}
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value]
#[inline]
fn len_value(&self) -> usize {
self.value().len()
}
/// Returns the full raw length, including the length byte.
#[inline]
fn len_full(&self) -> usize {
self.len_value() + 2
}
}
/// Writable TLV structure trait.
pub trait WritableTlv { pub trait WritableTlv {
/// Write the TLV to bytes.
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>; fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
/// Length of the written TLV.
fn len_written(&self) -> usize; fn len_written(&self) -> usize;
/// Convenience method to write the TLV to an owned [alloc::vec::Vec].
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
fn to_vec(&self) -> Vec<u8> { fn to_vec(&self) -> Vec<u8> {
let mut buf = vec![0; self.len_written()]; let mut buf = vec![0; self.len_written()];
@@ -50,31 +90,50 @@ pub trait WritableTlv {
} }
} }
/// TLV type.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum TlvType { pub enum TlvType {
/// Filestore request.
FilestoreRequest = 0x00, FilestoreRequest = 0x00,
/// Filestore response.
FilestoreResponse = 0x01, FilestoreResponse = 0x01,
/// Message to user.
MsgToUser = 0x02, MsgToUser = 0x02,
/// Fault handler.
FaultHandler = 0x04, FaultHandler = 0x04,
/// Flow label.
FlowLabel = 0x05, FlowLabel = 0x05,
/// Entity ID.
EntityId = 0x06, EntityId = 0x06,
} }
/// TLV type field variants.
///
/// This allows specifying custom variants as well.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum TlvTypeField { pub enum TlvTypeField {
/// Standard TLV types.
Standard(TlvType), Standard(TlvType),
/// Custom TLV type.
Custom(u8), Custom(u8),
} }
/// Filestore action codes as specified in the standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)] #[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum FilestoreActionCode { pub enum FilestoreActionCode {
/// Create file.
CreateFile = 0b0000, CreateFile = 0b0000,
/// Delete file.
DeleteFile = 0b0001, DeleteFile = 0b0001,
/// Rename file.
RenameFile = 0b0010, RenameFile = 0b0010,
/// This operation appends one file to another. The first specified name will form the first /// This operation appends one file to another. The first specified name will form the first
/// part of the new file and the name of the new file. This function can be used to get /// part of the new file and the name of the new file. This function can be used to get
@@ -83,9 +142,13 @@ pub enum FilestoreActionCode {
/// This operation replaces the content of the first specified file with the content of /// This operation replaces the content of the first specified file with the content of
/// the secondly specified file. /// the secondly specified file.
ReplaceFile = 0b0100, ReplaceFile = 0b0100,
/// Create directory.
CreateDirectory = 0b0101, CreateDirectory = 0b0101,
/// Remove directory.
RemoveDirectory = 0b0110, RemoveDirectory = 0b0110,
/// Deny file.
DenyFile = 0b0111, DenyFile = 0b0111,
/// Deny directory.
DenyDirectory = 0b1000, DenyDirectory = 0b1000,
} }
@@ -118,6 +181,7 @@ impl From<TlvTypeField> for u8 {
/// this will be the lifetime of that data reference. /// this will be the lifetime of that data reference.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct Tlv<'data> { pub struct Tlv<'data> {
tlv_type_field: TlvTypeField, tlv_type_field: TlvTypeField,
#[cfg_attr(feature = "serde", serde(borrow))] #[cfg_attr(feature = "serde", serde(borrow))]
@@ -125,14 +189,22 @@ pub struct Tlv<'data> {
} }
impl<'data> Tlv<'data> { impl<'data> Tlv<'data> {
pub fn new(tlv_type: TlvType, data: &[u8]) -> Result<Tlv, TlvLvError> { /// Minimum length of a TLV structure, including type and length fields.
pub const MIN_LEN: usize = MIN_TLV_LEN;
/// Generic constructor for a TLV structure.
pub fn new(tlv_type: TlvType, data: &[u8]) -> Result<Tlv<'_>, TlvLvDataTooLargeError> {
Ok(Tlv { Ok(Tlv {
tlv_type_field: TlvTypeField::Standard(tlv_type), tlv_type_field: TlvTypeField::Standard(tlv_type),
lv: Lv::new(data)?, lv: Lv::new(data)?,
}) })
} }
pub fn new_with_custom_type(tlv_type: u8, data: &[u8]) -> Result<Tlv, TlvLvError> { /// Constructor for a TLV with a custom type field.
pub fn new_with_custom_type(
tlv_type: u8,
data: &[u8],
) -> Result<Tlv<'_>, TlvLvDataTooLargeError> {
Ok(Tlv { Ok(Tlv {
tlv_type_field: TlvTypeField::Custom(tlv_type), tlv_type_field: TlvTypeField::Custom(tlv_type),
lv: Lv::new(data)?, lv: Lv::new(data)?,
@@ -147,31 +219,11 @@ impl<'data> Tlv<'data> {
} }
} }
pub fn value(&self) -> &[u8] {
self.lv.value()
}
/// Checks whether the value field is empty.
pub fn is_empty(&self) -> bool {
self.value().is_empty()
}
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value]
pub fn len_value(&self) -> usize {
self.value().len()
}
/// Returns the full raw length, including the length byte.
pub fn len_full(&self) -> usize {
self.len_value() + 2
}
/// Creates a TLV give a raw bytestream. Please note that is is not necessary to pass the /// Creates a TLV give a raw bytestream. Please note that is is not necessary to pass the
/// bytestream with the exact size of the expected TLV. This function will take care /// bytestream with the exact size of the expected TLV. This function will take care
/// of parsing the length byte, and the length of the parsed TLV can be retrieved using /// of parsing the length byte, and the length of the parsed TLV can be retrieved using
/// [Self::len_full]. /// [Self::len_full].
pub fn from_bytes(buf: &'data [u8]) -> Result<Tlv<'data>, TlvLvError> { pub fn from_bytes(buf: &'data [u8]) -> Result<Tlv<'data>, ByteConversionError> {
generic_len_check_deserialization(buf, MIN_TLV_LEN)?; generic_len_check_deserialization(buf, MIN_TLV_LEN)?;
let mut tlv = Self { let mut tlv = Self {
tlv_type_field: TlvTypeField::from(buf[0]), tlv_type_field: TlvTypeField::from(buf[0]),
@@ -185,9 +237,32 @@ impl<'data> Tlv<'data> {
/// If the TLV was generated from a raw bytestream using [Self::from_bytes], the raw start /// If the TLV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the TLV can be retrieved with this method. /// of the TLV can be retrieved with this method.
#[inline]
pub fn raw_data(&self) -> Option<&[u8]> { pub fn raw_data(&self) -> Option<&[u8]> {
self.lv.raw_data() self.lv.raw_data()
} }
/// Converts to an owned TLV variant, allocating memory for the value field.
#[cfg(feature = "alloc")]
pub fn to_owned(&self) -> TlvOwned {
TlvOwned {
tlv_type_field: self.tlv_type_field,
data: self.value().to_vec(),
}
}
}
#[cfg(feature = "alloc")]
impl PartialEq<TlvOwned> for Tlv<'_> {
fn eq(&self, other: &TlvOwned) -> bool {
self.tlv_type_field == other.tlv_type_field && self.value() == other.value()
}
}
impl ReadableTlv for Tlv<'_> {
fn value(&self) -> &[u8] {
self.lv.value()
}
} }
impl WritableTlv for Tlv<'_> { impl WritableTlv for Tlv<'_> {
@@ -197,38 +272,134 @@ impl WritableTlv for Tlv<'_> {
self.lv.write_to_be_bytes_no_len_check(&mut buf[1..]); self.lv.write_to_be_bytes_no_len_check(&mut buf[1..]);
Ok(self.len_full()) Ok(self.len_full())
} }
#[inline]
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.len_full() self.len_full()
} }
} }
impl GenericTlv for Tlv<'_> { impl GenericTlv for Tlv<'_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
self.tlv_type_field self.tlv_type_field
} }
} }
pub(crate) fn verify_tlv_type(raw_type: u8, expected_tlv_type: TlvType) -> Result<(), TlvLvError> { /// Component of the TLV module which require [alloc] support.
let tlv_type = TlvType::try_from(raw_type).map_err(|_| TlvLvError::InvalidTlvTypeField { #[cfg(feature = "alloc")]
found: raw_type, pub mod alloc_mod {
expected: Some(expected_tlv_type.into()), use super::*;
})?;
if tlv_type != expected_tlv_type { /// Owned variant of [Tlv] which is consequently [Clone]able and does not have a lifetime
return Err(TlvLvError::InvalidTlvTypeField { /// associated to a data slice.
found: tlv_type as u8, #[derive(Debug, Clone, PartialEq, Eq)]
expected: Some(expected_tlv_type as u8), #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
}); #[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct TlvOwned {
pub(crate) tlv_type_field: TlvTypeField,
pub(crate) data: Vec<u8>,
}
impl TlvOwned {
/// Generic constructor.
pub fn new(tlv_type: TlvType, data: &[u8]) -> Self {
Self {
tlv_type_field: TlvTypeField::Standard(tlv_type),
data: data.to_vec(),
}
}
/// Generic constructor with a custom TLV type.
pub fn new_with_custom_type(tlv_type: u8, data: &[u8]) -> Self {
Self {
tlv_type_field: TlvTypeField::Custom(tlv_type),
data: data.to_vec(),
}
}
/// Creates a TLV with an empty value field.
pub fn new_empty(tlv_type: TlvType) -> Self {
Self {
tlv_type_field: TlvTypeField::Standard(tlv_type),
data: Vec::new(),
}
}
/// Write to a byte slice.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.data.len(), MIN_TLV_LEN)?;
buf[0] = self.tlv_type_field.into();
buf[1] = self.data.len() as u8;
buf[2..2 + self.data.len()].copy_from_slice(&self.data);
Ok(self.len_written())
}
#[inline]
fn len_written(&self) -> usize {
self.data.len() + 2
}
/// Convert to [Tlv]
pub fn as_tlv(&self) -> Tlv<'_> {
Tlv {
tlv_type_field: self.tlv_type_field,
// The API should ensure that the data length is never to large, so the unwrap for the
// LV creation should never be an issue.
lv: Lv::new(&self.data).expect("lv creation failed unexpectedly"),
}
}
}
impl ReadableTlv for TlvOwned {
#[inline]
fn value(&self) -> &[u8] {
&self.data
}
}
impl WritableTlv for TlvOwned {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for TlvOwned {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField {
self.tlv_type_field
}
}
impl From<Tlv<'_>> for TlvOwned {
fn from(value: Tlv<'_>) -> Self {
value.to_owned()
}
}
impl PartialEq<Tlv<'_>> for TlvOwned {
fn eq(&self, other: &Tlv) -> bool {
self.tlv_type_field == other.tlv_type_field && self.data == other.value()
}
} }
Ok(())
} }
/// Entity ID TLV.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EntityIdTlv { pub struct EntityIdTlv {
entity_id: UnsignedByteField, entity_id: UnsignedByteField,
} }
impl EntityIdTlv { impl EntityIdTlv {
/// Constructor.
#[inline]
pub fn new(entity_id: UnsignedByteField) -> Self { pub fn new(entity_id: UnsignedByteField) -> Self {
Self { entity_id } Self { entity_id }
} }
@@ -243,18 +414,25 @@ impl EntityIdTlv {
Ok(()) Ok(())
} }
/// Entity ID.
#[inline]
pub fn entity_id(&self) -> &UnsignedByteField { pub fn entity_id(&self) -> &UnsignedByteField {
&self.entity_id &self.entity_id
} }
/// Length of the value field.
#[inline]
pub fn len_value(&self) -> usize { pub fn len_value(&self) -> usize {
self.entity_id.size() self.entity_id.size()
} }
/// Full length of the TLV, including type and length fields.
#[inline]
pub fn len_full(&self) -> usize { pub fn len_full(&self) -> usize {
2 + self.entity_id.size() 2 + self.entity_id.size()
} }
/// Create from a raw bytestream.
pub fn from_bytes(buf: &[u8]) -> Result<Self, TlvLvError> { pub fn from_bytes(buf: &[u8]) -> Result<Self, TlvLvError> {
Self::len_check(buf)?; Self::len_check(buf)?;
verify_tlv_type(buf[0], TlvType::EntityId)?; verify_tlv_type(buf[0], TlvType::EntityId)?;
@@ -268,19 +446,14 @@ impl EntityIdTlv {
} }
/// Convert to a generic [Tlv], which also erases the programmatic type information. /// Convert to a generic [Tlv], which also erases the programmatic type information.
pub fn to_tlv(self, buf: &mut [u8]) -> Result<Tlv, ByteConversionError> { pub fn to_tlv(self, buf: &mut [u8]) -> Result<Tlv<'_>, ByteConversionError> {
Self::len_check(buf)?; Self::len_check(buf)?;
self.entity_id self.entity_id
.write_to_be_bytes(&mut buf[2..2 + self.entity_id.size()])?; .write_to_be_bytes(&mut buf[2..2 + self.entity_id.size()])?;
Tlv::new(TlvType::EntityId, &buf[2..2 + self.entity_id.size()]).map_err(|e| match e { // Can't fail.
TlvLvError::ByteConversion(e) => e, Ok(Tlv::new(TlvType::EntityId, &buf[2..2 + self.entity_id.size()]).unwrap())
// All other errors are impossible.
_ => panic!("unexpected TLV error"),
})
} }
}
impl WritableTlv for EntityIdTlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
Self::len_check(buf)?; Self::len_check(buf)?;
buf[0] = TlvType::EntityId as u8; buf[0] = TlvType::EntityId as u8;
@@ -288,35 +461,50 @@ impl WritableTlv for EntityIdTlv {
Ok(2 + self.entity_id.write_to_be_bytes(&mut buf[2..])?) Ok(2 + self.entity_id.write_to_be_bytes(&mut buf[2..])?)
} }
#[inline]
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.len_full() self.len_full()
} }
} }
impl WritableTlv for EntityIdTlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for EntityIdTlv { impl GenericTlv for EntityIdTlv {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
TlvTypeField::Standard(TlvType::EntityId) TlvTypeField::Standard(TlvType::EntityId)
} }
} }
impl<'data> TryFrom<Tlv<'data>> for EntityIdTlv { impl TryFrom<Tlv<'_>> for EntityIdTlv {
type Error = TlvLvError; type Error = TlvLvError;
fn try_from(value: Tlv) -> Result<Self, Self::Error> { fn try_from(value: Tlv) -> Result<Self, TlvLvError> {
match value.tlv_type_field { match value.tlv_type_field {
TlvTypeField::Standard(tlv_type) => { TlvTypeField::Standard(tlv_type) => {
if tlv_type != TlvType::EntityId { if tlv_type != TlvType::EntityId {
return Err(TlvLvError::InvalidTlvTypeField { return Err(InvalidTlvTypeFieldError {
found: tlv_type as u8, found: tlv_type as u8,
expected: Some(TlvType::EntityId as u8), expected: Some(TlvType::EntityId as u8),
}); }
.into());
} }
} }
TlvTypeField::Custom(val) => { TlvTypeField::Custom(val) => {
return Err(TlvLvError::InvalidTlvTypeField { return Err(InvalidTlvTypeFieldError {
found: val, found: val,
expected: Some(TlvType::EntityId as u8), expected: Some(TlvType::EntityId as u8),
}); }
.into());
} }
} }
let len_value = value.value().len(); let len_value = value.value().len();
@@ -336,6 +524,8 @@ impl<'data> TryFrom<Tlv<'data>> for EntityIdTlv {
} }
} }
/// Does the [FilestoreActionCode] have a second filename?
#[inline]
pub fn fs_request_has_second_filename(action_code: FilestoreActionCode) -> bool { pub fn fs_request_has_second_filename(action_code: FilestoreActionCode) -> bool {
if action_code == FilestoreActionCode::RenameFile if action_code == FilestoreActionCode::RenameFile
|| action_code == FilestoreActionCode::AppendFile || action_code == FilestoreActionCode::AppendFile
@@ -348,6 +538,7 @@ pub fn fs_request_has_second_filename(action_code: FilestoreActionCode) -> bool
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
struct FilestoreTlvBase<'first_name, 'second_name> { struct FilestoreTlvBase<'first_name, 'second_name> {
pub action_code: FilestoreActionCode, pub action_code: FilestoreActionCode,
#[cfg_attr(feature = "serde", serde(borrow))] #[cfg_attr(feature = "serde", serde(borrow))]
@@ -357,6 +548,7 @@ struct FilestoreTlvBase<'first_name, 'second_name> {
} }
impl FilestoreTlvBase<'_, '_> { impl FilestoreTlvBase<'_, '_> {
#[inline]
fn base_len_value(&self) -> usize { fn base_len_value(&self) -> usize {
let mut len = 1 + self.first_name.len_full(); let mut len = 1 + self.first_name.len_full();
if let Some(second_name) = self.second_name { if let Some(second_name) = self.second_name {
@@ -366,6 +558,7 @@ impl FilestoreTlvBase<'_, '_> {
} }
} }
/// Filestore request TLV.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FilestoreRequestTlv<'first_name, 'second_name> { pub struct FilestoreRequestTlv<'first_name, 'second_name> {
@@ -374,14 +567,17 @@ pub struct FilestoreRequestTlv<'first_name, 'second_name> {
} }
impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> { impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
/// Constructor for file creation.
pub fn new_create_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> { pub fn new_create_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::CreateFile, file_name, None) Self::new(FilestoreActionCode::CreateFile, file_name, None)
} }
/// Constructor for file deletion.
pub fn new_delete_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> { pub fn new_delete_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DeleteFile, file_name, None) Self::new(FilestoreActionCode::DeleteFile, file_name, None)
} }
/// Constructor for file renaming.
pub fn new_rename_file( pub fn new_rename_file(
source_name: Lv<'first_name>, source_name: Lv<'first_name>,
target_name: Lv<'second_name>, target_name: Lv<'second_name>,
@@ -421,18 +617,22 @@ impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
) )
} }
/// Constructor for directory creation.
pub fn new_create_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> { pub fn new_create_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::CreateDirectory, dir_name, None) Self::new(FilestoreActionCode::CreateDirectory, dir_name, None)
} }
/// Constructor for directory removal.
pub fn new_remove_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> { pub fn new_remove_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::RemoveDirectory, dir_name, None) Self::new(FilestoreActionCode::RemoveDirectory, dir_name, None)
} }
/// Constructor for file denial.
pub fn new_deny_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> { pub fn new_deny_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DenyFile, file_name, None) Self::new(FilestoreActionCode::DenyFile, file_name, None)
} }
/// Constructor for directory denial.
pub fn new_deny_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> { pub fn new_deny_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DenyDirectory, dir_name, None) Self::new(FilestoreActionCode::DenyDirectory, dir_name, None)
} }
@@ -466,26 +666,37 @@ impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
}) })
} }
/// Action code.
#[inline]
pub fn action_code(&self) -> FilestoreActionCode { pub fn action_code(&self) -> FilestoreActionCode {
self.base.action_code self.base.action_code
} }
/// First name as [Lv].
#[inline]
pub fn first_name(&self) -> Lv<'first_name> { pub fn first_name(&self) -> Lv<'first_name> {
self.base.first_name self.base.first_name
} }
/// First name as optional [Lv].
#[inline]
pub fn second_name(&self) -> Option<Lv<'second_name>> { pub fn second_name(&self) -> Option<Lv<'second_name>> {
self.base.second_name self.base.second_name
} }
/// Length of the value field.
#[inline]
pub fn len_value(&self) -> usize { pub fn len_value(&self) -> usize {
self.base.base_len_value() self.base.base_len_value()
} }
/// Full TLV length.
#[inline]
pub fn len_full(&self) -> usize { pub fn len_full(&self) -> usize {
2 + self.len_value() 2 + self.len_value()
} }
/// Construct from a raw bytestream.
pub fn from_bytes<'longest: 'first_name + 'second_name>( pub fn from_bytes<'longest: 'first_name + 'second_name>(
buf: &'longest [u8], buf: &'longest [u8],
) -> Result<Self, TlvLvError> { ) -> Result<Self, TlvLvError> {
@@ -520,9 +731,7 @@ impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
}, },
}) })
} }
}
impl WritableTlv for FilestoreRequestTlv<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.len_full() { if buf.len() < self.len_full() {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -548,19 +757,34 @@ impl WritableTlv for FilestoreRequestTlv<'_, '_> {
Ok(current_idx) Ok(current_idx)
} }
#[inline]
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.len_full() self.len_full()
} }
} }
impl WritableTlv for FilestoreRequestTlv<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for FilestoreRequestTlv<'_, '_> { impl GenericTlv for FilestoreRequestTlv<'_, '_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
TlvTypeField::Standard(TlvType::FilestoreRequest) TlvTypeField::Standard(TlvType::FilestoreRequest)
} }
} }
/// Filestore response TLV.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct FilestoreResponseTlv<'first_name, 'second_name, 'fs_msg> { pub struct FilestoreResponseTlv<'first_name, 'second_name, 'fs_msg> {
#[cfg_attr(feature = "serde", serde(borrow))] #[cfg_attr(feature = "serde", serde(borrow))]
base: FilestoreTlvBase<'first_name, 'second_name>, base: FilestoreTlvBase<'first_name, 'second_name>,
@@ -589,6 +813,8 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
Lv::new_empty(), Lv::new_empty(),
) )
} }
/// Generic constructor.
pub fn new( pub fn new(
action_code: FilestoreActionCode, action_code: FilestoreActionCode,
status_code: u8, status_code: u8,
@@ -617,6 +843,7 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
}) })
} }
/// Check whether this response has a second filename.
pub fn has_second_filename(action_code: FilestoreActionCode) -> bool { pub fn has_second_filename(action_code: FilestoreActionCode) -> bool {
if action_code == FilestoreActionCode::RenameFile if action_code == FilestoreActionCode::RenameFile
|| action_code == FilestoreActionCode::AppendFile || action_code == FilestoreActionCode::AppendFile
@@ -627,30 +854,43 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
false false
} }
/// Action code.
#[inline]
pub fn action_code(&self) -> FilestoreActionCode { pub fn action_code(&self) -> FilestoreActionCode {
self.base.action_code self.base.action_code
} }
/// Status code.
#[inline]
pub fn status_code(&self) -> u8 { pub fn status_code(&self) -> u8 {
self.status_code self.status_code
} }
/// First name as [Lv].
#[inline]
pub fn first_name(&self) -> Lv<'first_name> { pub fn first_name(&self) -> Lv<'first_name> {
self.base.first_name self.base.first_name
} }
/// Optional second name as [Lv].
#[inline]
pub fn second_name(&self) -> Option<Lv<'second_name>> { pub fn second_name(&self) -> Option<Lv<'second_name>> {
self.base.second_name self.base.second_name
} }
/// Length of the value field.
#[inline]
pub fn len_value(&self) -> usize { pub fn len_value(&self) -> usize {
self.base.base_len_value() + self.filestore_message.len_full() self.base.base_len_value() + self.filestore_message.len_full()
} }
/// Full length of the TLV.
#[inline]
pub fn len_full(&self) -> usize { pub fn len_full(&self) -> usize {
2 + self.len_value() 2 + self.len_value()
} }
/// Construct from a raw bytestream.
pub fn from_bytes<'buf: 'first_name + 'second_name + 'fs_msg>( pub fn from_bytes<'buf: 'first_name + 'second_name + 'fs_msg>(
buf: &'buf [u8], buf: &'buf [u8],
) -> Result<Self, TlvLvError> { ) -> Result<Self, TlvLvError> {
@@ -704,9 +944,7 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
filestore_message, filestore_message,
}) })
} }
}
impl WritableTlv for FilestoreResponseTlv<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.len_full() { if buf.len() < self.len_full() {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -739,12 +977,41 @@ impl WritableTlv for FilestoreResponseTlv<'_, '_, '_> {
} }
} }
impl WritableTlv for FilestoreResponseTlv<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for FilestoreResponseTlv<'_, '_, '_> { impl GenericTlv for FilestoreResponseTlv<'_, '_, '_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
TlvTypeField::Standard(TlvType::FilestoreResponse) TlvTypeField::Standard(TlvType::FilestoreResponse)
} }
} }
pub(crate) fn verify_tlv_type(
raw_type: u8,
expected_tlv_type: TlvType,
) -> Result<(), InvalidTlvTypeFieldError> {
let tlv_type = TlvType::try_from(raw_type).map_err(|_| InvalidTlvTypeFieldError {
found: raw_type,
expected: Some(expected_tlv_type.into()),
})?;
if tlv_type != expected_tlv_type {
return Err(InvalidTlvTypeFieldError {
found: tlv_type as u8,
expected: Some(expected_tlv_type as u8),
});
}
Ok(())
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -932,14 +1199,14 @@ mod tests {
let tlv_res = Tlv::new(TlvType::MsgToUser, &buf_too_large); let tlv_res = Tlv::new(TlvType::MsgToUser, &buf_too_large);
assert!(tlv_res.is_err()); assert!(tlv_res.is_err());
let error = tlv_res.unwrap_err(); let error = tlv_res.unwrap_err();
if let TlvLvError::DataTooLarge(size) = error { match error {
assert_eq!(size, u8::MAX as usize + 1); TlvLvDataTooLargeError(size) => {
assert_eq!( assert_eq!(size, u8::MAX as usize + 1);
error.to_string(), assert_eq!(
"data with size 256 larger than allowed 255 bytes" error.to_string(),
); "data with size 256 larger than allowed 255 bytes"
} else { );
panic!("unexpected error {:?}", error); }
} }
} }
@@ -1249,7 +1516,8 @@ mod tests {
let error = EntityIdTlv::try_from(msg_to_user_tlv); let error = EntityIdTlv::try_from(msg_to_user_tlv);
assert!(error.is_err()); assert!(error.is_err());
let error = error.unwrap_err(); let error = error.unwrap_err();
if let TlvLvError::InvalidTlvTypeField { found, expected } = error { if let TlvLvError::InvalidTlvTypeField(InvalidTlvTypeFieldError { found, expected }) = error
{
assert_eq!(found, TlvType::MsgToUser as u8); assert_eq!(found, TlvType::MsgToUser as u8);
assert_eq!(expected, Some(TlvType::EntityId as u8)); assert_eq!(expected, Some(TlvType::EntityId as u8));
assert_eq!( assert_eq!(
@@ -1293,4 +1561,71 @@ mod tests {
assert_eq!(tlv_as_vec[0], 20); assert_eq!(tlv_as_vec[0], 20);
assert_eq!(tlv_as_vec[1], 0); assert_eq!(tlv_as_vec[1], 0);
} }
#[test]
fn test_tlv_to_owned() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = Tlv::new(TlvType::EntityId, &buf[0..1]);
assert!(tlv_res.is_ok());
let tlv_res = tlv_res.unwrap();
let tlv_owned = tlv_res.to_owned();
assert_eq!(tlv_res, tlv_owned);
let tlv_owned_from_conversion: TlvOwned = tlv_res.into();
assert_eq!(tlv_owned_from_conversion, tlv_owned);
assert_eq!(tlv_owned_from_conversion, tlv_res);
}
#[test]
fn test_owned_tlv() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = TlvOwned::new(TlvType::EntityId, &buf[0..1]);
assert_eq!(
tlv_res.tlv_type_field(),
TlvTypeField::Standard(TlvType::EntityId)
);
assert_eq!(tlv_res.len_full(), 3);
assert_eq!(tlv_res.value().len(), 1);
assert_eq!(tlv_res.len_value(), 1);
assert!(!tlv_res.is_empty());
assert_eq!(tlv_res.value()[0], 5);
}
#[test]
fn test_owned_tlv_empty() {
let tlv_res = TlvOwned::new_empty(TlvType::FlowLabel);
assert_eq!(
tlv_res.tlv_type_field(),
TlvTypeField::Standard(TlvType::FlowLabel)
);
assert_eq!(tlv_res.len_full(), 2);
assert_eq!(tlv_res.value().len(), 0);
assert_eq!(tlv_res.len_value(), 0);
assert!(tlv_res.is_empty());
}
#[test]
fn test_owned_tlv_custom_type() {
let tlv_res = TlvOwned::new_with_custom_type(32, &[]);
assert_eq!(tlv_res.tlv_type_field(), TlvTypeField::Custom(32));
assert_eq!(tlv_res.len_full(), 2);
assert_eq!(tlv_res.value().len(), 0);
assert_eq!(tlv_res.len_value(), 0);
assert!(tlv_res.is_empty());
}
#[test]
fn test_owned_tlv_conversion_to_bytes() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = Tlv::new(TlvType::EntityId, &buf[0..1]);
assert!(tlv_res.is_ok());
let tlv_res = tlv_res.unwrap();
let tlv_owned_from_conversion: TlvOwned = tlv_res.into();
assert_eq!(tlv_res.to_vec(), tlv_owned_from_conversion.to_vec());
}
} }

View File

@@ -1,16 +1,23 @@
//! Abstractions for the Message to User CFDP TLV subtype. //! Abstractions for the Message to User CFDP TLV subtype.
use super::{GenericTlv, Tlv, TlvLvError, TlvType, TlvTypeField, WritableTlv}; #[cfg(feature = "alloc")]
use crate::ByteConversionError; use super::TlvOwned;
use super::{GenericTlv, ReadableTlv, Tlv, TlvLvError, TlvType, TlvTypeField, WritableTlv};
use crate::{
cfdp::{InvalidTlvTypeFieldError, TlvLvDataTooLargeError},
ByteConversionError,
};
use delegate::delegate; use delegate::delegate;
/// Message To User TLV structure.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct MsgToUserTlv<'data> { pub struct MsgToUserTlv<'data> {
/// Wrapped generic TLV structure.
pub tlv: Tlv<'data>, pub tlv: Tlv<'data>,
} }
impl<'data> MsgToUserTlv<'data> { impl<'data> MsgToUserTlv<'data> {
/// Create a new message to user TLV where the type field is set correctly. /// Create a new message to user TLV where the type field is set correctly.
pub fn new(value: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvError> { pub fn new(value: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvDataTooLargeError> {
Ok(Self { Ok(Self {
tlv: Tlv::new(TlvType::MsgToUser, value)?, tlv: Tlv::new(TlvType::MsgToUser, value)?,
}) })
@@ -18,7 +25,9 @@ impl<'data> MsgToUserTlv<'data> {
delegate! { delegate! {
to self.tlv { to self.tlv {
/// Value field of the TLV.
pub fn value(&self) -> &[u8]; pub fn value(&self) -> &[u8];
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of /// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value] /// [Self::value]
pub fn len_value(&self) -> usize; pub fn len_value(&self) -> usize;
@@ -32,12 +41,16 @@ impl<'data> MsgToUserTlv<'data> {
} }
} }
/// Is this a standard TLV?
#[inline]
pub fn is_standard_tlv(&self) -> bool { pub fn is_standard_tlv(&self) -> bool {
true true
} }
pub fn tlv_type(&self) -> Option<TlvType> { /// TLV type field.
Some(TlvType::MsgToUser) #[inline]
pub fn tlv_type(&self) -> TlvType {
TlvType::MsgToUser
} }
/// Check whether this message is a reserved CFDP message like a Proxy Operation Message. /// Check whether this message is a reserved CFDP message like a Proxy Operation Message.
@@ -60,36 +73,68 @@ impl<'data> MsgToUserTlv<'data> {
match msg_to_user.tlv.tlv_type_field() { match msg_to_user.tlv.tlv_type_field() {
TlvTypeField::Standard(tlv_type) => { TlvTypeField::Standard(tlv_type) => {
if tlv_type != TlvType::MsgToUser { if tlv_type != TlvType::MsgToUser {
return Err(TlvLvError::InvalidTlvTypeField { return Err(InvalidTlvTypeFieldError {
found: tlv_type as u8, found: tlv_type as u8,
expected: Some(TlvType::MsgToUser as u8), expected: Some(TlvType::MsgToUser as u8),
}); }
.into());
} }
} }
TlvTypeField::Custom(raw) => { TlvTypeField::Custom(raw) => {
return Err(TlvLvError::InvalidTlvTypeField { return Err(InvalidTlvTypeFieldError {
found: raw, found: raw,
expected: Some(TlvType::MsgToUser as u8), expected: Some(TlvType::MsgToUser as u8),
}); }
.into());
} }
} }
Ok(msg_to_user) Ok(msg_to_user)
} }
}
impl WritableTlv for MsgToUserTlv<'_> { /// Convert to a generic [Tlv].
#[inline]
pub fn to_tlv(&self) -> Tlv<'data> {
self.tlv
}
/// Convert to an [TlvOwned].
#[cfg(feature = "alloc")]
pub fn to_owned(&self) -> TlvOwned {
self.tlv.to_owned()
}
#[inline]
fn len_written(&self) -> usize { fn len_written(&self) -> usize {
self.len_full() self.len_full()
} }
delegate!( delegate!(
to self.tlv { to self.tlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>; /// Write the TLV to a byte buffer.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
} }
); );
} }
impl<'a> From<MsgToUserTlv<'a>> for Tlv<'a> {
fn from(value: MsgToUserTlv<'a>) -> Tlv<'a> {
value.to_tlv()
}
}
impl WritableTlv for MsgToUserTlv<'_> {
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.tlv.write_to_bytes(buf)
}
}
impl GenericTlv for MsgToUserTlv<'_> { impl GenericTlv for MsgToUserTlv<'_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField { fn tlv_type_field(&self) -> TlvTypeField {
self.tlv.tlv_type_field() self.tlv.tlv_type_field()
} }
@@ -106,7 +151,7 @@ mod tests {
assert!(msg_to_user.is_ok()); assert!(msg_to_user.is_ok());
let msg_to_user = msg_to_user.unwrap(); let msg_to_user = msg_to_user.unwrap();
assert!(msg_to_user.is_standard_tlv()); assert!(msg_to_user.is_standard_tlv());
assert_eq!(msg_to_user.tlv_type().unwrap(), TlvType::MsgToUser); assert_eq!(msg_to_user.tlv_type(), TlvType::MsgToUser);
assert_eq!( assert_eq!(
msg_to_user.tlv_type_field(), msg_to_user.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser) TlvTypeField::Standard(TlvType::MsgToUser)
@@ -139,6 +184,40 @@ mod tests {
); );
} }
#[test]
fn test_msg_to_user_type_reduction() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let tlv = msg_to_user.to_tlv();
assert_eq!(
tlv.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(tlv.value(), custom_value);
}
#[test]
fn test_msg_to_user_to_tlv() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let tlv: Tlv = msg_to_user.into();
assert_eq!(msg_to_user.to_tlv(), tlv);
}
#[test]
fn test_msg_to_user_owner_converter() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let tlv = msg_to_user.to_owned();
assert_eq!(
tlv.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(tlv.value(), custom_value);
}
#[test] #[test]
fn test_reserved_msg_deserialization() { fn test_reserved_msg_deserialization() {
let custom_value: [u8; 3] = [1, 2, 3]; let custom_value: [u8; 3] = [1, 2, 3];
@@ -154,9 +233,9 @@ mod tests {
fn test_reserved_msg_deserialization_invalid_type() { fn test_reserved_msg_deserialization_invalid_type() {
let trash: [u8; 5] = [TlvType::FlowLabel as u8, 3, 1, 2, 3]; let trash: [u8; 5] = [TlvType::FlowLabel as u8, 3, 1, 2, 3];
let error = MsgToUserTlv::from_bytes(&trash).unwrap_err(); let error = MsgToUserTlv::from_bytes(&trash).unwrap_err();
if let TlvLvError::InvalidTlvTypeField { found, expected } = error { if let TlvLvError::InvalidTlvTypeField(inner) = error {
assert_eq!(found, TlvType::FlowLabel as u8); assert_eq!(inner.found, TlvType::FlowLabel as u8);
assert_eq!(expected, Some(TlvType::MsgToUser as u8)); assert_eq!(inner.expected, Some(TlvType::MsgToUser as u8));
} else { } else {
panic!("Wrong error type returned: {:?}", error); panic!("Wrong error type returned: {:?}", error);
} }

15
src/crc.rs Normal file
View File

@@ -0,0 +1,15 @@
//! # CRC checksum support.
//!
//! Thin wrapper around the [crc] crate.
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard, using
/// a [crc::NoTable] as the CRC implementation.
pub const CRC_CCITT_FALSE_NO_TABLE: crc::Crc<u16, crc::NoTable> =
crc::Crc::<u16, crc::NoTable>::new(&crc::CRC_16_IBM_3740);
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard, using
/// [crc::Table<1>] as the CRC implementation.
pub const CRC_CCITT_FALSE: crc::Crc<u16> = crc::Crc::<u16>::new(&crc::CRC_16_IBM_3740);
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard, using
/// a [crc::Table<16>] large table as the CRC implementation.
pub const CRC_CCITT_FALSE_BIG_TABLE: crc::Crc<u16, crc::Table<16>> =
crc::Crc::<u16, crc::Table<16>>::new(&crc::CRC_16_IBM_3740);

View File

@@ -3,17 +3,26 @@ use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// Event service subtype ID.
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)] #[repr(u8)]
pub enum Subservice { pub enum MessageSubtypeId {
/// Telemetry - Info report.
TmInfoReport = 1, TmInfoReport = 1,
/// Telemetry - Low severity report.
TmLowSeverityReport = 2, TmLowSeverityReport = 2,
/// Telemetry - Medium severity report.
TmMediumSeverityReport = 3, TmMediumSeverityReport = 3,
/// Telemetry - High severity report.
TmHighSeverityReport = 4, TmHighSeverityReport = 4,
/// Telecommand - Enable event generation.
TcEnableEventGeneration = 5, TcEnableEventGeneration = 5,
/// Telecommand - Disable event generation.
TcDisableEventGeneration = 6, TcDisableEventGeneration = 6,
/// Telecommand - Report disabled list.
TcReportDisabledList = 7, TcReportDisabledList = 7,
/// Telemetry - Disabled events report.
TmDisabledEventsReport = 8, TmDisabledEventsReport = 8,
} }
@@ -23,19 +32,19 @@ mod tests {
#[test] #[test]
fn test_conv_into_u8() { fn test_conv_into_u8() {
let subservice: u8 = Subservice::TmLowSeverityReport.into(); let subservice: u8 = MessageSubtypeId::TmLowSeverityReport.into();
assert_eq!(subservice, 2); assert_eq!(subservice, 2);
} }
#[test] #[test]
fn test_conv_from_u8() { fn test_conv_from_u8() {
let subservice: Subservice = 2.try_into().unwrap(); let subservice: MessageSubtypeId = 2.try_into().unwrap();
assert_eq!(subservice, Subservice::TmLowSeverityReport); assert_eq!(subservice, MessageSubtypeId::TmLowSeverityReport);
} }
#[test] #[test]
fn test_conv_fails() { fn test_conv_fails() {
let conversion = Subservice::try_from(9); let conversion = MessageSubtypeId::try_from(9);
assert!(conversion.is_err()); assert!(conversion.is_err());
let err = conversion.unwrap_err(); let err = conversion.unwrap_err();
assert_eq!(err.number, 9); assert_eq!(err.number, 9);

View File

@@ -3,30 +3,49 @@ use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// Housekeeping service subtype ID.
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum Subservice { pub enum MessageSubtypeId {
// Regular HK // Regular HK
/// Telecommand - Create Housekeeping Report Structure.
TcCreateHkReportStructure = 1, TcCreateHkReportStructure = 1,
/// Telecommand - Delete HK report structures.
TcDeleteHkReportStructures = 3, TcDeleteHkReportStructures = 3,
/// Telecommand - Enable HK generation.
TcEnableHkGeneration = 5, TcEnableHkGeneration = 5,
/// Telecommand - Disable HK generation.
TcDisableHkGeneration = 6, TcDisableHkGeneration = 6,
/// Telecommand - Report HK report structures.
TcReportHkReportStructures = 9, TcReportHkReportStructures = 9,
/// Telemetry - HK report.
TmHkPacket = 25, TmHkPacket = 25,
/// Telecommand - Generate one-shot report.
TcGenerateOneShotHk = 27, TcGenerateOneShotHk = 27,
/// Telecommand - Modify collection interval.
TcModifyHkCollectionInterval = 31, TcModifyHkCollectionInterval = 31,
// Diagnostics HK /// Telecommand - Create diagnostics report structures.
TcCreateDiagReportStructure = 2, TcCreateDiagReportStructure = 2,
/// Telecommand - Delete diagnostics report structures.
TcDeleteDiagReportStructures = 4, TcDeleteDiagReportStructures = 4,
/// Telecommand - Enable diagnostics generation.
TcEnableDiagGeneration = 7, TcEnableDiagGeneration = 7,
/// Telecommand - Disable diagnostics generation.
TcDisableDiagGeneration = 8, TcDisableDiagGeneration = 8,
/// Telemetry - HK structures report.
TmHkStructuresReport = 10, TmHkStructuresReport = 10,
/// Telecommand - Report diagnostics report structures.
TcReportDiagReportStructures = 11, TcReportDiagReportStructures = 11,
/// Telemetry - Diagnostics report structures.
TmDiagStructuresReport = 12, TmDiagStructuresReport = 12,
/// Telemetry - Diagnostics packet.
TmDiagPacket = 26, TmDiagPacket = 26,
/// Telecommand - Generate one-shot diagnostics report.
TcGenerateOneShotDiag = 28, TcGenerateOneShotDiag = 28,
/// Telecommand - Modify diagnostics interval report.
TcModifyDiagCollectionInterval = 32, TcModifyDiagCollectionInterval = 32,
} }
@@ -36,25 +55,26 @@ mod tests {
#[test] #[test]
fn test_try_from_u8() { fn test_try_from_u8() {
let hk_report_subservice_raw = 25; let hk_report_subservice_raw = 25;
let hk_report: Subservice = Subservice::try_from(hk_report_subservice_raw).unwrap(); let hk_report: MessageSubtypeId =
assert_eq!(hk_report, Subservice::TmHkPacket); MessageSubtypeId::try_from(hk_report_subservice_raw).unwrap();
assert_eq!(hk_report, MessageSubtypeId::TmHkPacket);
} }
#[test] #[test]
fn test_into_u8() { fn test_into_u8() {
let hk_report_raw: u8 = Subservice::TmHkPacket.into(); let hk_report_raw: u8 = MessageSubtypeId::TmHkPacket.into();
assert_eq!(hk_report_raw, 25); assert_eq!(hk_report_raw, 25);
} }
#[test] #[test]
fn test_partial_eq() { fn test_partial_eq() {
let hk_report_raw = Subservice::TmHkPacket; let hk_report_raw = MessageSubtypeId::TmHkPacket;
assert_ne!(hk_report_raw, Subservice::TcGenerateOneShotHk); assert_ne!(hk_report_raw, MessageSubtypeId::TcGenerateOneShotHk);
assert_eq!(hk_report_raw, Subservice::TmHkPacket); assert_eq!(hk_report_raw, MessageSubtypeId::TmHkPacket);
} }
#[test] #[test]
fn test_copy_clone() { fn test_copy_clone() {
let hk_report = Subservice::TmHkPacket; let hk_report = MessageSubtypeId::TmHkPacket;
let hk_report_copy = hk_report; let hk_report_copy = hk_report;
assert_eq!(hk_report, hk_report_copy); assert_eq!(hk_report, hk_report_copy);
} }

View File

@@ -1,96 +1,107 @@
//! Common definitions and helpers required to create PUS TMTC packets according to //! Common definitions and helpers required to create PUS TMTC packets according to
//! [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/) //! [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/)
//! //!
//! You can find the PUS telecommand definitions in the [tc] module and ithe PUS telemetry definitions //! You can find the PUS telecommand types in the [tc] module and the the PUS telemetry
//! inside the [tm] module. //! types inside the [tm] module.
use crate::{ByteConversionError, CcsdsPacket, CRC_CCITT_FALSE}; use crate::{
crc::{CRC_CCITT_FALSE, CRC_CCITT_FALSE_NO_TABLE},
ByteConversionError, CcsdsPacket,
};
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use alloc::vec::Vec; use alloc::vec::Vec;
use core::fmt::{Debug, Display, Formatter}; use arbitrary_int::u4;
use core::fmt::Debug;
use core::mem::size_of; use core::mem::size_of;
use num_enum::{IntoPrimitive, TryFromPrimitive}; use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub mod event; pub mod event;
pub mod hk; pub mod hk;
pub mod scheduling; pub mod scheduling;
pub mod tc; pub mod tc;
pub mod tc_pus_a;
pub mod tm; pub mod tm;
pub mod tm_pus_a;
pub mod verification; pub mod verification;
/// Type alias for the CRC16 type.
pub type CrcType = u16; pub type CrcType = u16;
pub const CCSDS_HEADER_LEN: usize = size_of::<crate::zc::SpHeader>();
/// Standard PUS service IDs.
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
#[non_exhaustive] #[non_exhaustive]
pub enum PusServiceId { pub enum PusServiceId {
/// Service 1 /// Service 1 Verification
Verification = 1, Verification = 1,
/// Service 2 /// Service 2 Device Access
DeviceAccess = 2, DeviceAccess = 2,
/// Service 3 /// Service 3 Housekeeping
Housekeeping = 3, Housekeeping = 3,
/// Service 4 /// Service 4 Parameter Statistics
ParameterStatistics = 4, ParameterStatistics = 4,
/// Service 5 /// Service 5 Event
Event = 5, Event = 5,
/// Service 6 /// Service 6 Memory Management
MemoryManagement = 6, MemoryManagement = 6,
/// Service 8 /// Service 8 Action
Action = 8, Action = 8,
/// Service 9 /// Service 9 Time Management
TimeManagement = 9, TimeManagement = 9,
/// Service 11 /// Service 11 Scheduling
Scheduling = 11, Scheduling = 11,
/// Service 12 /// Service 12 On-Board Monitoring
OnBoardMonitoring = 12, OnBoardMonitoring = 12,
/// Service 13 /// Service 13 Large Packet Transfer
LargePacketTransfer = 13, LargePacketTransfer = 13,
/// Service 14 /// Service 14 Real-Time Forwarding Control
RealTimeForwardingControl = 14, RealTimeForwardingControl = 14,
/// Service 15 /// Service 15 Storage And Retrival
StorageAndRetrival = 15, StorageAndRetrival = 15,
/// Service 17 /// Service 17 Test
Test = 17, Test = 17,
/// Service 18 /// Service 18 Operations And Procedures
OpsAndProcedures = 18, OpsAndProcedures = 18,
/// Service 19 /// Service 19 Event Action
EventAction = 19, EventAction = 19,
/// Service 20 /// Service 20 Parameter
Parameter = 20, Parameter = 20,
/// Service 21 /// Service 21 Request Sequencing
RequestSequencing = 21, RequestSequencing = 21,
/// Service 22 /// Service 22 Position Based Scheduling
PositionBasedScheduling = 22, PositionBasedScheduling = 22,
/// Service 23 /// Service 23 File Management
FileManagement = 23, FileManagement = 23,
} }
/// All PUS versions. Only PUS C is supported by this library. /// All PUS versions. Only PUS C is supported by this library.
#[derive(PartialEq, Eq, Copy, Clone, Debug)] #[derive(PartialEq, Eq, Debug, num_enum::TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u4, exhaustive = false)]
#[repr(u8)]
#[non_exhaustive] #[non_exhaustive]
pub enum PusVersion { pub enum PusVersion {
/// ESA PUS
EsaPus = 0, EsaPus = 0,
/// PUS A
PusA = 1, PusA = 1,
/// PUS C
PusC = 2, PusC = 2,
Invalid = 0b1111,
} }
impl TryFrom<u8> for PusVersion { impl TryFrom<u4> for PusVersion {
type Error = (); type Error = u4;
fn try_from(value: u8) -> Result<Self, Self::Error> { fn try_from(value: u4) -> Result<Self, Self::Error> {
match value { match value {
x if x == PusVersion::EsaPus as u8 => Ok(PusVersion::EsaPus), x if x == PusVersion::EsaPus.raw_value() => Ok(PusVersion::EsaPus),
x if x == PusVersion::PusA as u8 => Ok(PusVersion::PusA), x if x == PusVersion::PusA.raw_value() => Ok(PusVersion::PusA),
x if x == PusVersion::PusC as u8 => Ok(PusVersion::PusC), x if x == PusVersion::PusC.raw_value() => Ok(PusVersion::PusC),
_ => Err(()), _ => Err(value),
} }
} }
} }
@@ -98,44 +109,70 @@ impl TryFrom<u8> for PusVersion {
/// ECSS Packet Type Codes (PTC)s. /// ECSS Packet Type Codes (PTC)s.
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum PacketTypeCodes { pub enum PacketTypeCodes {
/// Boolean.
Boolean = 1, Boolean = 1,
/// Enumerated.
Enumerated = 2, Enumerated = 2,
/// Unsigned Integer.
UnsignedInt = 3, UnsignedInt = 3,
/// Signed Integer.
SignedInt = 4, SignedInt = 4,
/// Real (floating point).
Real = 5, Real = 5,
/// Bit string.
BitString = 6, BitString = 6,
/// Octet (byte) string.
OctetString = 7, OctetString = 7,
/// Character string.
CharString = 8, CharString = 8,
/// Absolute time.
AbsoluteTime = 9, AbsoluteTime = 9,
/// Relative time.
RelativeTime = 10, RelativeTime = 10,
/// Deduced.
Deduced = 11, Deduced = 11,
/// Packet.
Packet = 12, Packet = 12,
} }
/// Type alias for the ECSS Packet Type Codes (PTC)s.
pub type Ptc = PacketTypeCodes; pub type Ptc = PacketTypeCodes;
/// ECSS Packet Field Codes (PFC)s for the unsigned [Ptc]. /// ECSS Packet Field Codes (PFC)s for the unsigned [Ptc].
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum PfcUnsigned { pub enum PfcUnsigned {
/// 1 byte.
OneByte = 4, OneByte = 4,
/// 12 bits.
TwelveBits = 8, TwelveBits = 8,
/// 2 bytes.
TwoBytes = 12, TwoBytes = 12,
/// 3 bytes.
ThreeBytes = 13, ThreeBytes = 13,
/// 4 bytes.
FourBytes = 14, FourBytes = 14,
/// 6 bytes.
SixBytes = 15, SixBytes = 15,
/// 8 bytes.
EightBytes = 16, EightBytes = 16,
/// 1 bit.
OneBit = 17, OneBit = 17,
/// 2 bits.
TwoBits = 18, TwoBits = 18,
/// 3 bits.
ThreeBits = 19, ThreeBits = 19,
} }
/// ECSS Packet Field Codes (PFC)s for the real (floating point) [Ptc]. /// ECSS Packet Field Codes (PFC)s for the real (floating point) [Ptc].
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum PfcReal { pub enum PfcReal {
/// 4 octets simple precision format (IEEE) /// 4 octets simple precision format (IEEE)
@@ -148,70 +185,84 @@ pub enum PfcReal {
DoubleMilStd = 4, DoubleMilStd = 4,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq)] /// Generic PUS error.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PusError { pub enum PusError {
VersionNotSupported(PusVersion), /// PUS version is not supported.
#[error("PUS version {0:?} not supported")]
VersionNotSupported(u4),
/// Checksum failure.
#[error("checksum verification for crc16 {0:#06x} failed")]
ChecksumFailure(u16), ChecksumFailure(u16),
/// CRC16 needs to be calculated first /// CRC16 needs to be calculated first
CrcCalculationMissing, //#[error("crc16 was not calculated")]
ByteConversion(ByteConversionError), //CrcCalculationMissing,
#[error("pus error: {0}")]
ByteConversion(#[from] ByteConversionError),
} }
impl Display for PusError { /// Message type ID field.
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
match self { #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
PusError::VersionNotSupported(v) => { #[cfg_attr(feature = "defmt", derive(defmt::Format))]
write!(f, "PUS version {v:?} not supported") pub struct MessageTypeId {
} /// Service type ID.
PusError::ChecksumFailure(crc) => { pub type_id: u8,
write!(f, "checksum verification for crc16 {crc:#06x} failed") /// Subtype ID.
} pub subtype_id: u8,
PusError::CrcCalculationMissing => { }
write!(f, "crc16 was not calculated")
} impl MessageTypeId {
PusError::ByteConversion(e) => { /// Generic constructor.
write!(f, "pus error: {e}") pub const fn new(type_id: u8, subtype_id: u8) -> Self {
} Self {
type_id,
subtype_id,
} }
} }
} }
#[cfg(feature = "std")]
impl Error for PusError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
if let PusError::ByteConversion(e) = self {
return Some(e);
}
None
}
}
impl From<ByteConversionError> for PusError {
fn from(e: ByteConversionError) -> Self {
PusError::ByteConversion(e)
}
}
/// Generic trait to describe common attributes for both PUS Telecommands (TC) and PUS Telemetry /// Generic trait to describe common attributes for both PUS Telecommands (TC) and PUS Telemetry
/// (TM) packets. All PUS packets are also a special type of [CcsdsPacket]s. /// (TM) packets. All PUS packets are also a special type of [CcsdsPacket]s.
pub trait PusPacket: CcsdsPacket { pub trait PusPacket: CcsdsPacket {
const PUS_VERSION: PusVersion = PusVersion::PusC; /// PUS version.
fn pus_version(&self) -> Result<PusVersion, u4>;
fn pus_version(&self) -> PusVersion; /// Message type ID.
fn service(&self) -> u8; fn message_type_id(&self) -> MessageTypeId;
fn subservice(&self) -> u8;
/// Service type ID.
#[inline]
fn service_type_id(&self) -> u8 {
self.message_type_id().type_id
}
/// Message subtype ID.
#[inline]
fn message_subtype_id(&self) -> u8 {
self.message_type_id().subtype_id
}
/// User data field.
fn user_data(&self) -> &[u8]; fn user_data(&self) -> &[u8];
fn crc16(&self) -> Option<u16>;
/// CRC-16-CCITT checksum.
fn checksum(&self) -> Option<u16>;
/// The presence of the CRC-16-CCITT checksum is optional.
fn has_checksum(&self) -> bool {
self.checksum().is_some()
}
} }
pub(crate) fn crc_from_raw_data(raw_data: &[u8]) -> Result<u16, PusError> { pub(crate) fn crc_from_raw_data(raw_data: &[u8]) -> Result<u16, ByteConversionError> {
if raw_data.len() < 2 { if raw_data.len() < 2 {
return Err(ByteConversionError::FromSliceTooSmall { return Err(ByteConversionError::FromSliceTooSmall {
found: raw_data.len(), found: raw_data.len(),
expected: 2, expected: 2,
} });
.into());
} }
Ok(u16::from_be_bytes( Ok(u16::from_be_bytes(
raw_data[raw_data.len() - 2..raw_data.len()] raw_data[raw_data.len() - 2..raw_data.len()]
@@ -226,49 +277,48 @@ pub(crate) fn calc_pus_crc16(bytes: &[u8]) -> u16 {
digest.finalize() digest.finalize()
} }
pub(crate) fn crc_procedure(
calc_on_serialization: bool,
cached_crc16: &Option<u16>,
start_idx: usize,
curr_idx: usize,
slice: &[u8],
) -> Result<u16, PusError> {
let crc16;
if calc_on_serialization {
crc16 = calc_pus_crc16(&slice[start_idx..curr_idx])
} else if cached_crc16.is_none() {
return Err(PusError::CrcCalculationMissing);
} else {
crc16 = cached_crc16.unwrap();
}
Ok(crc16)
}
pub(crate) fn user_data_from_raw( pub(crate) fn user_data_from_raw(
current_idx: usize, current_idx: usize,
total_len: usize, total_len: usize,
slice: &[u8], slice: &[u8],
) -> Result<&[u8], PusError> { has_checksum: bool,
match current_idx { ) -> Result<&[u8], ByteConversionError> {
_ if current_idx > total_len - 2 => Err(ByteConversionError::FromSliceTooSmall { if has_checksum {
found: total_len - 2, if current_idx > total_len - 2 {
expected: current_idx, return Err(ByteConversionError::FromSliceTooSmall {
found: total_len - 2,
expected: current_idx,
});
} }
.into()), Ok(&slice[current_idx..total_len - 2])
_ => Ok(&slice[current_idx..total_len - 2]), } else {
Ok(&slice[current_idx..total_len])
} }
} }
pub(crate) fn verify_crc16_ccitt_false_from_raw_to_pus_error( /// Verify the CRC16 of a raw packet and return a [PusError] on failure.
pub fn verify_crc16_ccitt_false_from_raw_to_pus_error(
raw_data: &[u8], raw_data: &[u8],
crc16: u16, crc16: u16,
) -> Result<(), PusError> { ) -> Result<(), PusError> {
verify_crc16_ccitt_false_from_raw(raw_data) verify_crc16_ccitt_false_from_raw(raw_data)
.then(|| ()) .then_some(())
.ok_or(PusError::ChecksumFailure(crc16)) .ok_or(PusError::ChecksumFailure(crc16))
} }
pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool { /// Verify the CRC16 of a raw packet using a table-less implementation and return a [PusError] on
/// failure.
pub fn verify_crc16_ccitt_false_from_raw_to_pus_error_no_table(
raw_data: &[u8],
crc16: u16,
) -> Result<(), PusError> {
verify_crc16_ccitt_false_from_raw_no_table(raw_data)
.then_some(())
.ok_or(PusError::ChecksumFailure(crc16))
}
/// Verify the CRC16 of a raw packet.
pub fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
let mut digest = CRC_CCITT_FALSE.digest(); let mut digest = CRC_CCITT_FALSE.digest();
digest.update(raw_data); digest.update(raw_data);
if digest.finalize() == 0 { if digest.finalize() == 0 {
@@ -277,29 +327,35 @@ pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
false false
} }
macro_rules! ccsds_impl { /// Verify the CRC16 of a raw packet, using the table-less implementation.
() => { pub fn verify_crc16_ccitt_false_from_raw_no_table(raw_data: &[u8]) -> bool {
delegate!(to self.sp_header { let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest();
fn ccsds_version(&self) -> u8; digest.update(raw_data);
fn packet_id(&self) -> crate::PacketId; if digest.finalize() == 0 {
fn psc(&self) -> crate::PacketSequenceCtrl; return true;
fn data_len(&self) -> u16;
});
} }
false
} }
macro_rules! sp_header_impls { macro_rules! sp_header_impls {
() => { () => {
delegate!(to self.sp_header { delegate!(to self.sp_header {
pub fn set_apid(&mut self, apid: u16) -> bool; /// Set the CCSDS APID.
pub fn set_seq_count(&mut self, seq_count: u16) -> bool; #[inline]
pub fn set_apid(&mut self, apid: u11);
/// Set the CCSDS sequence count.
#[inline]
pub fn set_seq_count(&mut self, seq_count: u14);
/// Set the CCSDS sequence flags.
#[inline]
pub fn set_seq_flags(&mut self, seq_flag: SequenceFlags); pub fn set_seq_flags(&mut self, seq_flag: SequenceFlags);
}); });
} }
} }
use crate::util::{GenericUnsignedByteField, ToBeBytes, UnsignedEnum}; use crate::util::{GenericUnsignedByteField, ToBeBytes, UnsignedEnum};
pub(crate) use ccsds_impl;
pub(crate) use sp_header_impls; pub(crate) use sp_header_impls;
/// Generic trait for ECSS enumeration which consist of a PFC field denoting their bit length /// Generic trait for ECSS enumeration which consist of a PFC field denoting their bit length
@@ -311,58 +367,132 @@ pub trait EcssEnumeration: UnsignedEnum {
fn pfc(&self) -> u8; fn pfc(&self) -> u8;
} }
/// Extension trait for [EcssEnumeration] which adds common trait bounds.
pub trait EcssEnumerationExt: EcssEnumeration + Debug + Copy + Clone + PartialEq + Eq {} pub trait EcssEnumerationExt: EcssEnumeration + Debug + Copy + Clone + PartialEq + Eq {}
/// ECSS enumerated type wrapper.
#[derive(Debug, Copy, Clone, Eq, PartialEq)] #[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericEcssEnumWrapper<TYPE: Copy> { pub struct GenericEcssEnumWrapper<TYPE: Copy + Into<u64>>(GenericUnsignedByteField<TYPE>);
field: GenericUnsignedByteField<TYPE>,
}
impl<TYPE: Copy> GenericEcssEnumWrapper<TYPE> { impl<TYPE: Copy + Into<u64>> GenericEcssEnumWrapper<TYPE> {
/// Returns [PacketTypeCodes::Enumerated].
pub const fn ptc() -> PacketTypeCodes { pub const fn ptc() -> PacketTypeCodes {
PacketTypeCodes::Enumerated PacketTypeCodes::Enumerated
} }
pub fn new(val: TYPE) -> Self { /// Value.
Self { pub const fn value(&self) -> TYPE {
field: GenericUnsignedByteField::new(val), self.0.value()
} }
/// Generic constructor.
pub const fn new(val: TYPE) -> Self {
Self(GenericUnsignedByteField::new(val))
} }
} }
impl<TYPE: Copy + ToBeBytes> UnsignedEnum for GenericEcssEnumWrapper<TYPE> { impl<TYPE: Copy + ToBeBytes + Into<u64>> UnsignedEnum for GenericEcssEnumWrapper<TYPE> {
fn size(&self) -> usize { fn size(&self) -> usize {
(self.pfc() / 8) as usize (self.pfc() / 8) as usize
} }
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.field.write_to_be_bytes(buf) self.0.write_to_be_bytes(buf)
}
fn value_raw(&self) -> u64 {
self.0.value().into()
} }
} }
impl<TYPE: Copy + ToBeBytes> EcssEnumeration for GenericEcssEnumWrapper<TYPE> { impl<TYPE: Copy + ToBeBytes + Into<u64>> EcssEnumeration for GenericEcssEnumWrapper<TYPE> {
fn pfc(&self) -> u8 { fn pfc(&self) -> u8 {
size_of::<TYPE>() as u8 * 8_u8 size_of::<TYPE>() as u8 * 8_u8
} }
} }
impl<TYPE: Debug + Copy + Clone + PartialEq + Eq + ToBeBytes> EcssEnumerationExt impl<TYPE: Debug + Copy + Clone + PartialEq + Eq + ToBeBytes + Into<u64>> EcssEnumerationExt
for GenericEcssEnumWrapper<TYPE> for GenericEcssEnumWrapper<TYPE>
{ {
} }
pub type EcssEnumU8 = GenericEcssEnumWrapper<u8>; impl<T: Copy + Into<u64>> From<T> for GenericEcssEnumWrapper<T> {
pub type EcssEnumU16 = GenericEcssEnumWrapper<u16>; fn from(value: T) -> Self {
pub type EcssEnumU32 = GenericEcssEnumWrapper<u32>; Self::new(value)
pub type EcssEnumU64 = GenericEcssEnumWrapper<u64>; }
}
macro_rules! generic_ecss_enum_typedefs_and_from_impls {
($($ty:ty => $Enum:ident),*) => {
$(
/// Type alias for ECSS enumeration wrapper around `$ty`
pub type $Enum = GenericEcssEnumWrapper<$ty>;
impl From<$Enum> for $ty {
fn from(value: $Enum) -> Self {
value.value()
}
}
)*
};
}
// Generates EcssEnum<$TY> type definitions as well as a From<$TY> for EcssEnum<$TY>
// implementation.
generic_ecss_enum_typedefs_and_from_impls! {
u8 => EcssEnumU8,
u16 => EcssEnumU16,
u32 => EcssEnumU32,
u64 => EcssEnumU64
}
/// Generic trait for PUS packet abstractions which can written to a raw slice as their raw /// Generic trait for PUS packet abstractions which can written to a raw slice as their raw
/// byte representation. This is especially useful for generic abstractions which depend only /// byte representation. This is especially useful for generic abstractions which depend only
/// on the serialization of those packets. /// on the serialization of those packets.
pub trait WritablePusPacket { pub trait WritablePusPacket {
/// The length here also includes the CRC length.
fn len_written(&self) -> usize; fn len_written(&self) -> usize;
fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError>;
/// Checksum generation is enabled for the packet.
fn has_checksum(&self) -> bool;
/// Writes the packet to the given slice without writing the CRC checksum.
///
/// The returned size is the written size WITHOUT the CRC checksum.
/// If the checksum generation is disabled, this function is identical to the APIs which
/// generate a checksum.
fn write_to_bytes_no_checksum(&self, slice: &mut [u8]) -> Result<usize, PusError>;
/// First uses [Self::write_to_bytes_no_checksum] to write the packet to the given slice and
/// then uses the [CRC_CCITT_FALSE] to calculate the CRC and write it to the slice if the
/// packet is configured to include a checksum.
fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = self.write_to_bytes_no_checksum(slice)?;
if self.has_checksum() {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&slice[0..curr_idx]);
slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
curr_idx += 2;
}
Ok(curr_idx)
}
/// First uses [Self::write_to_bytes_no_checksum] to write the packet to the given slice and then
/// uses the [CRC_CCITT_FALSE_NO_TABLE] to calculate the CRC and write it to the slice if
/// the paket is configured to include a checksum.
fn write_to_bytes_checksum_no_table(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = self.write_to_bytes_no_checksum(slice)?;
if self.has_checksum() {
let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest();
digest.update(&slice[0..curr_idx]);
slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
curr_idx += 2;
}
Ok(curr_idx)
}
/// Converts the packet into an owned [alloc::vec::Vec].
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<Vec<u8>, PusError> { fn to_vec(&self) -> Result<Vec<u8>, PusError> {
// This is the correct way to do this. See // This is the correct way to do this. See
@@ -374,6 +504,26 @@ pub trait WritablePusPacket {
} }
} }
/// PUS packet creator configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct CreatorConfig {
/// Set the CCSDS data length field on construction.
pub set_ccsds_len: bool,
/// CRC-16-CCITT Checksum is present.
pub has_checksum: bool,
}
impl Default for CreatorConfig {
fn default() -> Self {
Self {
set_ccsds_len: true,
has_checksum: true,
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use alloc::string::ToString; use alloc::string::ToString;
@@ -396,6 +546,12 @@ mod tests {
.write_to_be_bytes(&mut buf[1..2]) .write_to_be_bytes(&mut buf[1..2])
.expect("To byte conversion of u8 failed"); .expect("To byte conversion of u8 failed");
assert_eq!(buf[1], 1); assert_eq!(buf[1], 1);
assert_eq!(my_enum.value(), 1);
assert_eq!(my_enum.value(), 1);
let enum_as_u8: u8 = my_enum.into();
assert_eq!(enum_as_u8, 1);
let vec = my_enum.to_vec();
assert_eq!(vec, buf[1..2]);
} }
#[test] #[test]
@@ -409,6 +565,12 @@ mod tests {
assert_eq!(my_enum.pfc(), 16); assert_eq!(my_enum.pfc(), 16);
assert_eq!(buf[1], 0x1f); assert_eq!(buf[1], 0x1f);
assert_eq!(buf[2], 0x2f); assert_eq!(buf[2], 0x2f);
assert_eq!(my_enum.value(), 0x1f2f);
assert_eq!(my_enum.value(), 0x1f2f);
let enum_as_raw: u16 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f);
let vec = my_enum.to_vec();
assert_eq!(vec, buf[1..3]);
} }
#[test] #[test]
@@ -440,6 +602,12 @@ mod tests {
assert_eq!(buf[2], 0x2f); assert_eq!(buf[2], 0x2f);
assert_eq!(buf[3], 0x3f); assert_eq!(buf[3], 0x3f);
assert_eq!(buf[4], 0x4f); assert_eq!(buf[4], 0x4f);
assert_eq!(my_enum.value(), 0x1f2f3f4f);
assert_eq!(my_enum.value(), 0x1f2f3f4f);
let enum_as_raw: u32 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f3f4f);
let vec = my_enum.to_vec();
assert_eq!(vec, buf[1..5]);
} }
#[test] #[test]
@@ -460,11 +628,33 @@ mod tests {
} }
} }
#[test]
fn test_enum_u64() {
let mut buf = [0; 8];
let my_enum = EcssEnumU64::new(0x1f2f3f4f5f);
my_enum
.write_to_be_bytes(&mut buf)
.expect("To byte conversion of u64 failed");
assert_eq!(buf[3], 0x1f);
assert_eq!(buf[4], 0x2f);
assert_eq!(buf[5], 0x3f);
assert_eq!(buf[6], 0x4f);
assert_eq!(buf[7], 0x5f);
assert_eq!(my_enum.value(), 0x1f2f3f4f5f);
assert_eq!(my_enum.value(), 0x1f2f3f4f5f);
let enum_as_raw: u64 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f3f4f5f);
assert_eq!(u64::from_be_bytes(buf), 0x1f2f3f4f5f);
let vec = my_enum.to_vec();
assert_eq!(vec, buf);
}
#[test] #[test]
fn test_pus_error_display() { fn test_pus_error_display() {
let unsupport_version = PusError::VersionNotSupported(super::PusVersion::EsaPus); let unsupport_version =
PusError::VersionNotSupported(super::PusVersion::EsaPus.raw_value());
let write_str = unsupport_version.to_string(); let write_str = unsupport_version.to_string();
assert_eq!(write_str, "PUS version EsaPus not supported") assert_eq!(write_str, "PUS version 0 not supported")
} }
#[test] #[test]
@@ -498,8 +688,8 @@ mod tests {
#[test] #[test]
fn test_pus_error_eq_impl() { fn test_pus_error_eq_impl() {
assert_eq!( assert_eq!(
PusError::VersionNotSupported(PusVersion::EsaPus), PusError::VersionNotSupported(PusVersion::EsaPus.raw_value()),
PusError::VersionNotSupported(PusVersion::EsaPus) PusError::VersionNotSupported(PusVersion::EsaPus.raw_value())
); );
} }

View File

@@ -3,57 +3,90 @@ use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// Scheduling service subtype ID.
#[derive(Debug, PartialEq, Eq, Copy, Clone, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, PartialEq, Eq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum Subservice { pub enum MessageSubtypeId {
// Core subservices // Core subservices
/// Telecommand - Enable scheduling.
TcEnableScheduling = 1, TcEnableScheduling = 1,
/// Telecommand - Disable scheduling.
TcDisableScheduling = 2, TcDisableScheduling = 2,
/// Telecommand - Reset scheduling.
TcResetScheduling = 3, TcResetScheduling = 3,
/// Telecommand - Insert activity.
TcInsertActivity = 4, TcInsertActivity = 4,
/// Telecommand - Delete activity by request ID.
TcDeleteActivityByRequestId = 5, TcDeleteActivityByRequestId = 5,
/// Telecommand - Delete activity by filter.
TcDeleteActivitiesByFilter = 6, TcDeleteActivitiesByFilter = 6,
// Time shift subservices // Time shift subservices
/// Telecommand - Time shift activity by request ID.
TcTimeShiftActivityWithRequestId = 7, TcTimeShiftActivityWithRequestId = 7,
/// Telecommand - Time shift activity by filter.
TcTimeShiftActivitiesByFilter = 8, TcTimeShiftActivitiesByFilter = 8,
/// Telecommand - Time shift all.
TcTimeShiftAll = 15, TcTimeShiftAll = 15,
// Reporting subservices // Reporting subservices
/// Telecommand - Detail report by request ID.
TcDetailReportByRequestId = 9, TcDetailReportByRequestId = 9,
/// Telemetry - Detail report.
TmDetailReport = 10, TmDetailReport = 10,
/// Telecommand - Detail report by filter.
TcDetailReportByFilter = 11, TcDetailReportByFilter = 11,
/// Telecommand - Summary report by request ID.
TcSummaryReportByRequestId = 12, TcSummaryReportByRequestId = 12,
/// Telemetry - Summary report.
TmSummaryReport = 13, TmSummaryReport = 13,
/// Telecommand - Summary report by filter.
TcSummaryReportByFilter = 14, TcSummaryReportByFilter = 14,
/// Telecommand - Detail report all.
TcDetailReportAll = 16, TcDetailReportAll = 16,
/// Telecommand - Summary report all.
TcSummaryReportAll = 17, TcSummaryReportAll = 17,
// Subschedule subservices // Subschedule subservices
/// Telecommand - Report subschedule status.
TcReportSubscheduleStatus = 18, TcReportSubscheduleStatus = 18,
/// Telemetry - Subschedule status report.
TmReportSubscheduleStatus = 19, TmReportSubscheduleStatus = 19,
/// Telecommand - Enable subschedule.
TcEnableSubschedule = 20, TcEnableSubschedule = 20,
/// Telecommand - Disable subschedule.
TcDisableSubschedule = 21, TcDisableSubschedule = 21,
// Group subservices // Group subservices
/// Telecommand - Create schedule group.
TcCreateScheduleGroup = 22, TcCreateScheduleGroup = 22,
/// Telecommand - Delete schedule group.
TcDeleteScheduleGroup = 23, TcDeleteScheduleGroup = 23,
/// Telecommand - Enable schedule group.
TcEnableScheduleGroup = 24, TcEnableScheduleGroup = 24,
/// Telecommand - Disable schedule group.
TcDisableScheduleGroup = 25, TcDisableScheduleGroup = 25,
/// Telecommand - Report all group status.
TcReportAllGroupsStatus = 26, TcReportAllGroupsStatus = 26,
/// Telemetry - All group status report.
TmReportAllGroupsStatus = 27, TmReportAllGroupsStatus = 27,
} }
/// This status applies to sub-schedules and groups as well as specified in ECSS-E-ST-70-41C 8.11.3 /// This status applies to sub-schedules and groups as well as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum SchedStatus { pub enum SchedStatus {
/// Scheduling disabled.
Disabled = 0, Disabled = 0,
/// Scheduling enabled.
Enabled = 1, Enabled = 1,
} }
impl From<bool> for SchedStatus { impl From<bool> for SchedStatus {
#[inline]
fn from(value: bool) -> Self { fn from(value: bool) -> Self {
if value { if value {
SchedStatus::Enabled SchedStatus::Enabled
@@ -65,11 +98,16 @@ impl From<bool> for SchedStatus {
/// Time window types as specified in ECSS-E-ST-70-41C 8.11.3 /// Time window types as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TimeWindowType { pub enum TimeWindowType {
/// Select all.
SelectAll = 0, SelectAll = 0,
/// From time tag to time tag.
TimeTagToTimeTag = 1, TimeTagToTimeTag = 1,
/// Starting from a time tag.
FromTimeTag = 2, FromTimeTag = 2,
/// Until a time tag.
ToTimeTag = 3, ToTimeTag = 3,
} }
@@ -95,20 +133,20 @@ mod tests {
#[test] #[test]
fn test_conv_into_u8() { fn test_conv_into_u8() {
let subservice: u8 = Subservice::TcCreateScheduleGroup.into(); let subservice: u8 = MessageSubtypeId::TcCreateScheduleGroup.into();
assert_eq!(subservice, 22); assert_eq!(subservice, 22);
} }
#[test] #[test]
fn test_conv_from_u8() { fn test_conv_from_u8() {
let subservice: Subservice = 22u8.try_into().unwrap(); let subservice: MessageSubtypeId = 22u8.try_into().unwrap();
assert_eq!(subservice, Subservice::TcCreateScheduleGroup); assert_eq!(subservice, MessageSubtypeId::TcCreateScheduleGroup);
} }
#[test] #[test]
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
fn test_serde_subservice_id() { fn test_serde_subservice_id() {
generic_serde_test(Subservice::TcEnableScheduling); generic_serde_test(MessageSubtypeId::TcEnableScheduling);
} }
#[test] #[test]

File diff suppressed because it is too large Load Diff

1440
src/ecss/tc_pus_a.rs Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

2081
src/ecss/tm_pus_a.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -3,17 +3,27 @@ use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// Message subtype ID.
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)] #[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)] #[repr(u8)]
pub enum Subservice { pub enum MessageSubtypeId {
/// Telemetry - Acceptance success.
TmAcceptanceSuccess = 1, TmAcceptanceSuccess = 1,
/// Telemetry - Acceptance failure.
TmAcceptanceFailure = 2, TmAcceptanceFailure = 2,
/// Telemetry - Start success.
TmStartSuccess = 3, TmStartSuccess = 3,
/// Telemetry - Start failure.
TmStartFailure = 4, TmStartFailure = 4,
/// Telemetry - Step success.
TmStepSuccess = 5, TmStepSuccess = 5,
/// Telemetry - Step failure.
TmStepFailure = 6, TmStepFailure = 6,
/// Telemetry - Completion success.
TmCompletionSuccess = 7, TmCompletionSuccess = 7,
/// Telemetry - Completion failure.
TmCompletionFailure = 8, TmCompletionFailure = 8,
} }
@@ -23,13 +33,13 @@ mod tests {
#[test] #[test]
fn test_conv_into_u8() { fn test_conv_into_u8() {
let subservice: u8 = Subservice::TmCompletionSuccess.into(); let subservice: u8 = MessageSubtypeId::TmCompletionSuccess.into();
assert_eq!(subservice, 7); assert_eq!(subservice, 7);
} }
#[test] #[test]
fn test_conv_from_u8() { fn test_conv_from_u8() {
let subservice: Subservice = 7.try_into().unwrap(); let subservice: MessageSubtypeId = 7.try_into().unwrap();
assert_eq!(subservice, Subservice::TmCompletionSuccess); assert_eq!(subservice, MessageSubtypeId::TmCompletionSuccess);
} }
} }

2575
src/lib.rs

File diff suppressed because it is too large Load Diff

828
src/seq_count.rs Normal file
View File

@@ -0,0 +1,828 @@
//! # Sequence counter module.
//!
//! CCSDS and ECSS packet standard oftentimes use sequence counters, for example to allow detecting
//! packet gaps. This module provides basic abstractions and helper components to implement
//! sequence counters.
use crate::MAX_SEQ_COUNT;
use arbitrary_int::traits::Integer;
use core::cell::Cell;
use paste::paste;
/// Core trait for objects which can provide a sequence count.
///
/// The core functions are not mutable on purpose to allow easier usage with
/// static structs when using the interior mutability pattern. This can be achieved by using
/// [Cell], [core::cell::RefCell] or atomic types.
pub trait SequenceCounter {
/// Raw type of the counter.
type Raw: Into<u64>;
/// Bit width of the counter.
fn max_bit_width(&self) -> usize;
/// Get the current sequence count value.
fn get(&self) -> Self::Raw;
/// Increment the sequence count by one.
fn increment(&self);
/// Get the current sequence count value and increment the counter by one.
fn get_and_increment(&self) -> Self::Raw {
let val = self.get();
self.increment();
val
}
/// Set the sequence counter.
///
/// This should not be required by default but can be used to reset the counter
/// or initialize it with a custom value.
fn set(&self, value: Self::Raw);
}
/// Simple sequence counter which wraps at ´T::MAX´.
#[derive(Clone)]
pub struct SequenceCounterSimple<T: Copy> {
seq_count: Cell<T>,
// The maximum value
max_val: T,
}
macro_rules! impl_for_primitives {
($($ty: ident,)+) => {
$(
paste! {
impl SequenceCounterSimple<$ty> {
/// Constructor with a custom maximum value.
pub const fn [<new_custom_max_val_ $ty>](max_val: $ty) -> Self {
Self {
seq_count: Cell::new(0),
max_val,
}
}
/// Generic constructor.
pub const fn [<new_ $ty>]() -> Self {
Self {
seq_count: Cell::new(0),
max_val: $ty::MAX
}
}
}
impl Default for SequenceCounterSimple<$ty> {
fn default() -> Self {
Self::[<new_ $ty>]()
}
}
impl SequenceCounter for SequenceCounterSimple<$ty> {
type Raw = $ty;
#[inline]
fn max_bit_width(&self) -> usize {
core::mem::size_of::<Self::Raw>() * 8
}
#[inline]
fn get(&self) -> Self::Raw {
self.seq_count.get()
}
#[inline]
fn increment(&self) {
self.get_and_increment();
}
#[inline]
fn get_and_increment(&self) -> Self::Raw {
let curr_count = self.seq_count.get();
if curr_count == self.max_val {
self.seq_count.set(0);
} else {
self.seq_count.set(curr_count + 1);
}
curr_count
}
#[inline]
fn set(&self, value: Self::Raw) {
self.seq_count.set(value);
}
}
}
)+
}
}
impl_for_primitives!(u8, u16, u32, u64,);
/// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT].
#[derive(Clone)]
pub struct SequenceCounterCcsdsSimple {
provider: SequenceCounterSimple<u16>,
}
impl Default for SequenceCounterCcsdsSimple {
#[inline]
fn default() -> Self {
Self {
provider: SequenceCounterSimple::new_custom_max_val_u16(MAX_SEQ_COUNT.as_u16()),
}
}
}
impl SequenceCounter for SequenceCounterCcsdsSimple {
type Raw = u16;
delegate::delegate! {
to self.provider {
fn get(&self) -> u16;
fn increment(&self);
fn get_and_increment(&self) -> u16;
}
}
#[inline]
fn set(&self, value: u16) {
if value > MAX_SEQ_COUNT.as_u16() {
return;
}
self.provider.set(value);
}
#[inline]
fn max_bit_width(&self) -> usize {
Self::MAX_BIT_WIDTH
}
}
impl SequenceCounterCcsdsSimple {
/// Maximum bit width for CCSDS packet sequence counter is 14 bits.
pub const MAX_BIT_WIDTH: usize = 14;
/// Create a new sequence counter specifically for the sequence count of CCSDS packets.
///
/// It has a [Self::MAX_BIT_WIDTH] of 14.
pub const fn new() -> Self {
Self {
provider: SequenceCounterSimple::new_custom_max_val_u16(MAX_SEQ_COUNT.value()),
}
}
}
#[cfg(target_has_atomic = "8")]
impl SequenceCounter for core::sync::atomic::AtomicU8 {
type Raw = u8;
#[inline]
fn max_bit_width(&self) -> usize {
8
}
#[inline]
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
#[inline]
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
#[inline]
fn set(&self, value: u8) {
self.store(value, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(target_has_atomic = "16")]
impl SequenceCounter for core::sync::atomic::AtomicU16 {
type Raw = u16;
#[inline]
fn max_bit_width(&self) -> usize {
16
}
#[inline]
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
#[inline]
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
#[inline]
fn set(&self, value: u16) {
self.store(value, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(target_has_atomic = "32")]
impl SequenceCounter for core::sync::atomic::AtomicU32 {
type Raw = u32;
#[inline]
fn max_bit_width(&self) -> usize {
32
}
#[inline]
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
#[inline]
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
#[inline]
fn set(&self, value: u32) {
self.store(value, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(target_has_atomic = "64")]
impl SequenceCounter for core::sync::atomic::AtomicU64 {
type Raw = u64;
#[inline]
fn max_bit_width(&self) -> usize {
64
}
#[inline]
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
#[inline]
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
#[inline]
fn set(&self, value: u64) {
self.store(value, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(feature = "portable-atomic")]
impl SequenceCounter for portable_atomic::AtomicU8 {
type Raw = u8;
#[inline]
fn max_bit_width(&self) -> usize {
8
}
fn get(&self) -> Self::Raw {
self.load(portable_atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, portable_atomic::Ordering::Relaxed);
}
fn set(&self, value: Self::Raw) {
self.store(value, portable_atomic::Ordering::Relaxed);
}
}
#[cfg(feature = "portable-atomic")]
impl SequenceCounter for portable_atomic::AtomicU16 {
type Raw = u16;
#[inline]
fn max_bit_width(&self) -> usize {
16
}
fn get(&self) -> Self::Raw {
self.load(portable_atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, portable_atomic::Ordering::Relaxed);
}
fn set(&self, value: Self::Raw) {
self.store(value, portable_atomic::Ordering::Relaxed);
}
}
#[cfg(feature = "portable-atomic")]
impl SequenceCounter for portable_atomic::AtomicU32 {
type Raw = u32;
#[inline]
fn max_bit_width(&self) -> usize {
32
}
fn get(&self) -> Self::Raw {
self.load(portable_atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, portable_atomic::Ordering::Relaxed);
}
fn set(&self, value: Self::Raw) {
self.store(value, portable_atomic::Ordering::Relaxed);
}
}
#[cfg(feature = "portable-atomic")]
impl SequenceCounter for portable_atomic::AtomicU64 {
type Raw = u64;
#[inline]
fn max_bit_width(&self) -> usize {
64
}
fn get(&self) -> Self::Raw {
self.load(portable_atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, portable_atomic::Ordering::Relaxed);
}
fn set(&self, value: Self::Raw) {
self.store(value, portable_atomic::Ordering::Relaxed);
}
}
impl<T: SequenceCounter + ?Sized> SequenceCounter for &T {
type Raw = T::Raw;
#[inline]
fn max_bit_width(&self) -> usize {
(**self).max_bit_width()
}
#[inline]
fn get(&self) -> Self::Raw {
(**self).get()
}
#[inline]
fn increment(&self) {
(**self).increment()
}
fn set(&self, value: Self::Raw) {
(**self).set(value);
}
}
#[cfg(any(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64"
))]
macro_rules! sync_clonable_seq_counter_impl {
($ty: ident) => {
paste::paste! {
/// This can be used if a custom wrap value is required when using a thread-safe
/// atomic based sequence counter.
#[derive(Debug)]
pub struct [<SequenceCounterSyncCustomWrap $ty:upper>] {
seq_count: core::sync::atomic::[<Atomic $ty:upper>],
max_val: $ty,
}
impl [<SequenceCounterSyncCustomWrap $ty:upper>] {
/// Generic constructor.
pub fn new(max_val: $ty) -> Self {
Self {
seq_count: core::sync::atomic::[<Atomic $ty:upper>]::new(0),
max_val,
}
}
}
impl SequenceCounter for [<SequenceCounterSyncCustomWrap $ty:upper>] {
type Raw = $ty;
fn max_bit_width(&self) -> usize {
core::mem::size_of::<Self::Raw>() * 8
}
fn get(&self) -> $ty {
self.seq_count.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.get_and_increment();
}
fn get_and_increment(&self) -> $ty {
self.seq_count.fetch_update(
core::sync::atomic::Ordering::Relaxed,
core::sync::atomic::Ordering::Relaxed,
|cur| {
// compute the next value, wrapping at MAX_VAL
let next = if cur == self.max_val { 0 } else { cur + 1 };
Some(next)
},
).unwrap()
}
fn set(&self, value: $ty) {
self.seq_count.store(value, core::sync::atomic::Ordering::Relaxed);
}
}
}
};
}
#[cfg(target_has_atomic = "8")]
sync_clonable_seq_counter_impl!(u8);
#[cfg(target_has_atomic = "16")]
sync_clonable_seq_counter_impl!(u16);
#[cfg(target_has_atomic = "32")]
sync_clonable_seq_counter_impl!(u32);
#[cfg(target_has_atomic = "64")]
sync_clonable_seq_counter_impl!(u64);
/// Modules relying on [std] support.
#[cfg(feature = "std")]
pub mod std_mod {
use super::*;
use core::str::FromStr;
use std::path::{Path, PathBuf};
use std::string::ToString as _;
use std::{fs, io};
/// A persistent file-backed sequence counter that can wrap any other [SequenceCounter]
/// implementation which is non-persistent.
///
/// In the default configuration, the underlying [SequenceCounter] is initialized from the file
/// content, and the file content will only be updated on a manual [Self::save] or on drop.
#[derive(Debug, PartialEq, Eq)]
pub struct SequenceCounterOnFile<
Inner: SequenceCounter<Raw = RawTy>,
RawTy: core::fmt::Debug
+ Copy
+ Clone
+ Into<u64>
+ TryFrom<u64>
+ FromStr
+ Default
+ PartialEq
+ Eq,
> {
path: PathBuf,
inner: Inner,
/// Configures whether the counter value is saved to disk when the object is dropped.
///
/// If this is set to [true] which is the default, the sequence counter will only be stored
/// to disk if the [Self::save] method is used or the object is dropped. Otherwise, the
/// counter will be saved to disk on every [Self::increment] or [Self::set].
pub save_on_drop: bool,
}
impl<
Inner: SequenceCounter<Raw = RawTy>,
RawTy: core::fmt::Debug
+ Copy
+ Clone
+ Into<u64>
+ TryFrom<u64>
+ FromStr
+ Default
+ PartialEq
+ Eq,
> SequenceCounterOnFile<Inner, RawTy>
{
/// Initialize a new persistent sequence counter using a file at the given path and
/// any non persistent inner [SequenceCounter] implementation.
pub fn new<P: AsRef<Path>>(path: P, inner: Inner) -> io::Result<Self> {
let path = path.as_ref().to_path_buf();
let value = Self::load_from_path(&path);
inner.set(value);
Ok(Self {
path,
inner,
save_on_drop: true,
})
}
fn load_from_path(path: &Path) -> RawTy {
let bytes = match fs::read(path) {
Ok(b) => b,
Err(_) => return Default::default(),
};
// Trim optional single trailing newline (Unix/Windows)
let trimmed = match bytes.last() {
Some(&b'\n') => &bytes[..bytes.len() - 1],
_ => &bytes,
};
// Reject non-ASCII
if !trimmed.is_ascii() {
return Default::default();
}
// Parse
std::str::from_utf8(trimmed)
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or_default()
}
/// Persist the current value to disk (best-effort).
pub fn save(&self) -> io::Result<()> {
let value = self.inner.get();
std::fs::write(&self.path, value.into().to_string())
}
}
impl<
Inner: SequenceCounter<Raw = RawTy>,
RawTy: core::fmt::Debug
+ Copy
+ Clone
+ Into<u64>
+ TryFrom<u64, Error: core::fmt::Debug>
+ FromStr
+ Default
+ PartialEq
+ Eq,
> SequenceCounter for SequenceCounterOnFile<Inner, RawTy>
{
type Raw = RawTy;
fn max_bit_width(&self) -> usize {
self.inner.max_bit_width()
}
fn get(&self) -> RawTy {
self.inner.get()
}
fn increment(&self) {
self.inner.increment();
if !self.save_on_drop {
// persist (ignore I/O errors here; caller can call `save` explicitly)
let _ = self.save();
}
}
fn set(&self, value: RawTy) {
self.inner.set(value);
if !self.save_on_drop {
// persist (ignore I/O errors here; caller can call `save` explicitly)
let _ = self.save();
}
}
}
impl<
Inner: SequenceCounter<Raw = RawTy>,
RawTy: core::fmt::Debug
+ Copy
+ Clone
+ Into<u64>
+ TryFrom<u64>
+ FromStr
+ Default
+ PartialEq
+ Eq,
> Drop for SequenceCounterOnFile<Inner, RawTy>
{
fn drop(&mut self) {
if self.save_on_drop {
let _ = self.save();
}
}
}
/// Type alisas for a CCSDS sequence counter stored on file.
pub type SequenceCounterCcsdsOnFile = SequenceCounterOnFile<SequenceCounterCcsdsSimple, u16>;
impl SequenceCounterCcsdsOnFile {
/// Open or create the counter file at `path`.
pub fn new_ccsds_counter<P: AsRef<Path>>(path: P) -> io::Result<Self> {
SequenceCounterOnFile::new(path, SequenceCounterCcsdsSimple::default())
}
}
/// Type alisas for a [u16] sequence counter stored on file.
pub type SequenceCounterU16OnFile = SequenceCounterOnFile<SequenceCounterSimple<u16>, u16>;
impl SequenceCounterU16OnFile {
/// Open or create the counter file at `path`.
pub fn new_u16_counter<P: AsRef<Path>>(path: P) -> io::Result<Self> {
SequenceCounterOnFile::new(path, SequenceCounterSimple::<u16>::default())
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::{AtomicU16, AtomicU32, AtomicU64, AtomicU8};
use std::boxed::Box;
use crate::seq_count::{
SequenceCounter, SequenceCounterCcsdsSimple, SequenceCounterSimple,
SequenceCounterSyncCustomWrapU8,
};
use crate::MAX_SEQ_COUNT;
#[test]
fn test_u8_counter() {
let u8_counter = SequenceCounterSimple::<u8>::default();
assert_eq!(u8_counter.get(), 0);
assert_eq!(u8_counter.max_bit_width(), 8);
assert_eq!(u8_counter.get_and_increment(), 0);
assert_eq!(u8_counter.get_and_increment(), 1);
assert_eq!(u8_counter.get(), 2);
}
#[test]
fn test_u8_counter_overflow() {
let u8_counter = SequenceCounterSimple::new_u8();
for _ in 0..256 {
u8_counter.increment();
}
assert_eq!(u8_counter.get(), 0);
}
#[test]
fn test_ccsds_counter() {
let ccsds_counter = SequenceCounterCcsdsSimple::default();
assert_eq!(ccsds_counter.get(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 1);
assert_eq!(ccsds_counter.get(), 2);
}
#[test]
fn test_ccsds_counter_overflow() {
let ccsds_counter = SequenceCounterCcsdsSimple::default();
for _ in 0..MAX_SEQ_COUNT.value() + 1 {
ccsds_counter.increment();
}
assert_eq!(ccsds_counter.get(), 0);
}
fn common_counter_test(seq_counter: &mut impl SequenceCounter) {
assert_eq!(seq_counter.get().into(), 0);
assert_eq!(seq_counter.get_and_increment().into(), 0);
assert_eq!(seq_counter.get_and_increment().into(), 1);
assert_eq!(seq_counter.get().into(), 2);
seq_counter.increment();
assert_eq!(seq_counter.get().into(), 3);
assert_eq!(seq_counter.get_and_increment().into(), 3);
assert_eq!(seq_counter.get().into(), 4);
}
#[test]
fn test_atomic_counter_u8() {
let mut sync_u8_counter = AtomicU8::new(0);
assert_eq!(sync_u8_counter.max_bit_width(), 8);
common_counter_test(&mut sync_u8_counter);
}
#[test]
fn test_atomic_counter_u16() {
let mut sync_u16_counter = AtomicU16::new(0);
assert_eq!(sync_u16_counter.max_bit_width(), 16);
common_counter_test(&mut sync_u16_counter);
}
#[test]
fn test_atomic_counter_u32() {
let mut sync_u32_counter = AtomicU32::new(0);
assert_eq!(sync_u32_counter.max_bit_width(), 32);
common_counter_test(&mut sync_u32_counter);
}
#[test]
fn test_atomic_counter_u64() {
let mut sync_u64_counter = AtomicU64::new(0);
assert_eq!(sync_u64_counter.max_bit_width(), 64);
common_counter_test(&mut sync_u64_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_counter_u8() {
let mut sync_u8_counter = portable_atomic::AtomicU8::new(0);
common_counter_test(&mut sync_u8_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_counter_u16() {
let mut sync_u16_counter = portable_atomic::AtomicU16::new(0);
common_counter_test(&mut sync_u16_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_counter_u32() {
let mut sync_u32_counter = portable_atomic::AtomicU32::new(0);
common_counter_test(&mut sync_u32_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_counter_u64() {
let mut sync_u64_counter = portable_atomic::AtomicU64::new(0);
common_counter_test(&mut sync_u64_counter);
}
fn common_overflow_test_u8(seq_counter: &impl SequenceCounter) {
for _ in 0..u8::MAX as u16 + 1 {
seq_counter.increment();
}
assert_eq!(seq_counter.get().into(), 0);
}
#[test]
fn test_atomic_u8_counter_overflow() {
let sync_u8_counter = AtomicU8::new(0);
common_overflow_test_u8(&sync_u8_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_u8_counter_overflow() {
let sync_u8_counter = portable_atomic::AtomicU8::new(0);
common_overflow_test_u8(&sync_u8_counter);
}
#[test]
fn test_atomic_ref_counters_overflow_custom_max_val() {
let sync_u8_counter = SequenceCounterSyncCustomWrapU8::new(128);
for _ in 0..129 {
sync_u8_counter.increment();
}
assert_eq!(sync_u8_counter.get(), 0);
}
#[test]
fn test_dyn_compatible() {
let counter: Box<dyn SequenceCounter<Raw = u16>> =
Box::new(SequenceCounterCcsdsSimple::default());
assert_eq!(counter.get(), 0);
assert_eq!(counter.max_bit_width(), 14);
counter.increment();
assert_eq!(counter.get(), 1);
assert_eq!(counter.get_and_increment(), 1);
assert_eq!(counter.get(), 2);
}
#[test]
fn test_persistent_counter() {
let tempdir = tempfile::tempdir().expect("failed to create temp dir");
let path = tempdir.path().join("seq_count.txt");
let mut persistent_counter =
crate::seq_count::std_mod::SequenceCounterCcsdsOnFile::new_ccsds_counter(&path)
.unwrap();
assert_eq!(persistent_counter.get(), 0);
assert_eq!(persistent_counter.get_and_increment(), 0);
drop(persistent_counter);
assert!(path.exists());
persistent_counter =
crate::seq_count::std_mod::SequenceCounterCcsdsOnFile::new_ccsds_counter(
tempdir.path().join("seq_count.txt"),
)
.unwrap();
assert_eq!(persistent_counter.get(), 1);
}
#[test]
fn test_persistent_couter_manual_save() {
let tempdir = tempfile::tempdir().expect("failed to create temp dir");
let path = tempdir.path().join("seq_count.txt");
let mut persistent_counter =
crate::seq_count::std_mod::SequenceCounterCcsdsOnFile::new_ccsds_counter(&path)
.unwrap();
assert_eq!(persistent_counter.get(), 0);
assert_eq!(persistent_counter.get_and_increment(), 0);
persistent_counter.save().unwrap();
assert!(path.exists());
std::mem::forget(persistent_counter);
persistent_counter =
crate::seq_count::std_mod::SequenceCounterCcsdsOnFile::new_ccsds_counter(
tempdir.path().join("seq_count.txt"),
)
.unwrap();
assert_eq!(persistent_counter.get(), 1);
}
}

View File

@@ -2,11 +2,8 @@
//! [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf) section 3.5 . //! [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf) section 3.5 .
//! See [chrono::DateTime::format] for a usage example of the generated //! See [chrono::DateTime::format] for a usage example of the generated
//! [chrono::format::DelayedFormat] structs. //! [chrono::format::DelayedFormat] structs.
#[cfg(feature = "alloc")] #[cfg(all(feature = "alloc", feature = "chrono"))]
use chrono::{ pub use alloc_mod_chrono::*;
format::{DelayedFormat, StrftimeItems},
DateTime, Utc,
};
/// Tuple of format string and formatted size for time code A. /// Tuple of format string and formatted size for time code A.
/// ///
@@ -34,36 +31,38 @@ pub const FMT_STR_CODE_B_WITH_SIZE: (&str, usize) = ("%Y-%jT%T%.3f", 21);
/// Three digits are used for the decimal fraction and a terminator is added at the end. /// Three digits are used for the decimal fraction and a terminator is added at the end.
pub const FMT_STR_CODE_B_TERMINATED_WITH_SIZE: (&str, usize) = ("%Y-%jT%T%.3fZ", 22); pub const FMT_STR_CODE_B_TERMINATED_WITH_SIZE: (&str, usize) = ("%Y-%jT%T%.3fZ", 22);
/// Generates a time code formatter using the [FMT_STR_CODE_A_WITH_SIZE] format. /// Functions requiring both [chrono] and [alloc] support.
#[cfg(feature = "alloc")] #[cfg(all(feature = "alloc", feature = "chrono"))]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] pub mod alloc_mod_chrono {
pub fn generate_time_code_a(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> { use super::*;
date.format(FMT_STR_CODE_A_WITH_SIZE.0) use chrono::{
} format::{DelayedFormat, StrftimeItems},
DateTime, Utc,
};
/// Generates a time code formatter using the [FMT_STR_CODE_A_TERMINATED_WITH_SIZE] format. /// Generates a time code formatter using the [FMT_STR_CODE_A_WITH_SIZE] format.
#[cfg(feature = "alloc")] pub fn generate_time_code_a(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] date.format(FMT_STR_CODE_A_WITH_SIZE.0)
pub fn generate_time_code_a_terminated( }
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_A_TERMINATED_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_B_WITH_SIZE] format. /// Generates a time code formatter using the [FMT_STR_CODE_A_TERMINATED_WITH_SIZE] format.
#[cfg(feature = "alloc")] pub fn generate_time_code_a_terminated(
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] date: &DateTime<Utc>,
pub fn generate_time_code_b(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> { ) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_WITH_SIZE.0) date.format(FMT_STR_CODE_A_TERMINATED_WITH_SIZE.0)
} }
/// Generates a time code formatter using the [FMT_STR_CODE_B_TERMINATED_WITH_SIZE] format. /// Generates a time code formatter using the [FMT_STR_CODE_B_WITH_SIZE] format.
#[cfg(feature = "alloc")] pub fn generate_time_code_b(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] date.format(FMT_STR_CODE_B_WITH_SIZE.0)
pub fn generate_time_code_b_terminated( }
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> { /// Generates a time code formatter using the [FMT_STR_CODE_B_TERMINATED_WITH_SIZE] format.
date.format(FMT_STR_CODE_B_TERMINATED_WITH_SIZE.0) pub fn generate_time_code_b_terminated(
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_TERMINATED_WITH_SIZE.0)
}
} }
#[cfg(test)] #[cfg(test)]
@@ -73,25 +72,54 @@ mod tests {
use std::format; use std::format;
#[test] #[test]
fn test_ascii_timestamp_a_unterminated() { fn test_ascii_timestamp_a_unterminated_epoch() {
let date = Utc::now(); let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_a(&date); let stamp_formatter = generate_time_code_a(&date);
let stamp = format!("{}", stamp_formatter); let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find("T"); let t_sep = stamp.find('T');
assert!(t_sep.is_some()); assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10); assert_eq!(t_sep.unwrap(), 10);
assert_eq!(stamp.len(), FMT_STR_CODE_A_WITH_SIZE.1); assert_eq!(stamp.len(), FMT_STR_CODE_A_WITH_SIZE.1);
} }
#[test] #[test]
fn test_ascii_timestamp_a_terminated() { #[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_a_unterminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_a(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
assert_eq!(stamp.len(), FMT_STR_CODE_A_WITH_SIZE.1);
}
#[test]
fn test_ascii_timestamp_a_terminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_a_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
FMT_STR_CODE_A_TERMINATED_WITH_SIZE.1 - 1
);
assert_eq!(stamp.len(), FMT_STR_CODE_A_TERMINATED_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_a_terminated_now() {
let date = Utc::now(); let date = Utc::now();
let stamp_formatter = generate_time_code_a_terminated(&date); let stamp_formatter = generate_time_code_a_terminated(&date);
let stamp = format!("{}", stamp_formatter); let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find("T"); let t_sep = stamp.find('T');
assert!(t_sep.is_some()); assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10); assert_eq!(t_sep.unwrap(), 10);
let z_terminator = stamp.find("Z"); let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some()); assert!(z_terminator.is_some());
assert_eq!( assert_eq!(
z_terminator.unwrap(), z_terminator.unwrap(),
@@ -101,25 +129,55 @@ mod tests {
} }
#[test] #[test]
fn test_ascii_timestamp_b_unterminated() { fn test_ascii_timestamp_b_unterminated_epoch() {
let date = Utc::now(); let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_b(&date); let stamp_formatter = generate_time_code_b(&date);
let stamp = format!("{}", stamp_formatter); let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find("T"); let t_sep = stamp.find('T');
assert!(t_sep.is_some()); assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8); assert_eq!(t_sep.unwrap(), 8);
assert_eq!(stamp.len(), FMT_STR_CODE_B_WITH_SIZE.1); assert_eq!(stamp.len(), FMT_STR_CODE_B_WITH_SIZE.1);
} }
#[test] #[test]
fn test_ascii_timestamp_b_terminated() { #[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_b_unterminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_b(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
assert_eq!(stamp.len(), FMT_STR_CODE_B_WITH_SIZE.1);
}
#[test]
fn test_ascii_timestamp_b_terminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_b_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
FMT_STR_CODE_B_TERMINATED_WITH_SIZE.1 - 1
);
assert_eq!(stamp.len(), FMT_STR_CODE_B_TERMINATED_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_b_terminated_now() {
let date = Utc::now(); let date = Utc::now();
let stamp_formatter = generate_time_code_b_terminated(&date); let stamp_formatter = generate_time_code_b_terminated(&date);
let stamp = format!("{}", stamp_formatter); let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find("T"); let t_sep = stamp.find('T');
assert!(t_sep.is_some()); assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8); assert_eq!(t_sep.unwrap(), 8);
let z_terminator = stamp.find("Z"); let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some()); assert!(z_terminator.is_some());
assert_eq!( assert_eq!(
z_terminator.unwrap(), z_terminator.unwrap(),

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,10 @@
//! CCSDS Time Code Formats according to [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf) //! CCSDS Time Code Formats according to [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
use crate::ByteConversionError; use crate::ByteConversionError;
use chrono::{DateTime, LocalResult, TimeZone, Utc}; #[cfg(feature = "chrono")]
use chrono::{TimeZone, Utc};
use core::cmp::Ordering; use core::cmp::Ordering;
use core::fmt::{Display, Formatter}; use core::ops::{Add, AddAssign, Sub};
use core::ops::{Add, AddAssign};
use core::time::Duration; use core::time::Duration;
use core::u8;
#[allow(unused_imports)] #[allow(unused_imports)]
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
@@ -13,8 +12,7 @@ use num_traits::float::FloatCore;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::time::{SystemTime, SystemTimeError}; use std::time::{SystemTime, SystemTimeError};
#[cfg(feature = "std")] #[cfg(feature = "std")]
@@ -24,30 +22,42 @@ pub mod ascii;
pub mod cds; pub mod cds;
pub mod cuc; pub mod cuc;
/// Conversion constant for converting CCSDS days to UNIX days.
pub const DAYS_CCSDS_TO_UNIX: i32 = -4383; pub const DAYS_CCSDS_TO_UNIX: i32 = -4383;
/// Seconds per day.
pub const SECONDS_PER_DAY: u32 = 86400; pub const SECONDS_PER_DAY: u32 = 86400;
/// Milliseconds per day.
pub const MS_PER_DAY: u32 = SECONDS_PER_DAY * 1000; pub const MS_PER_DAY: u32 = SECONDS_PER_DAY * 1000;
/// Nanoseconds per second.
pub const NANOS_PER_SECOND: u32 = 1_000_000_000;
/// CCSDS time code identifiers.
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum CcsdsTimeCodes { #[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum CcsdsTimeCode {
/// CUC with a CCSDS epoch (1958-01-01T00:00:00+00:00).
CucCcsdsEpoch = 0b001, CucCcsdsEpoch = 0b001,
/// CUC with a custom agency epoch.
CucAgencyEpoch = 0b010, CucAgencyEpoch = 0b010,
/// CDS time code.
Cds = 0b100, Cds = 0b100,
/// CCS time code.
Ccs = 0b101, Ccs = 0b101,
/// Agency defined time code.
AgencyDefined = 0b110, AgencyDefined = 0b110,
} }
impl TryFrom<u8> for CcsdsTimeCodes { impl TryFrom<u8> for CcsdsTimeCode {
type Error = (); type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> { fn try_from(value: u8) -> Result<Self, Self::Error> {
match value { match value {
x if x == CcsdsTimeCodes::CucCcsdsEpoch as u8 => Ok(CcsdsTimeCodes::CucCcsdsEpoch), x if x == CcsdsTimeCode::CucCcsdsEpoch as u8 => Ok(CcsdsTimeCode::CucCcsdsEpoch),
x if x == CcsdsTimeCodes::CucAgencyEpoch as u8 => Ok(CcsdsTimeCodes::CucAgencyEpoch), x if x == CcsdsTimeCode::CucAgencyEpoch as u8 => Ok(CcsdsTimeCode::CucAgencyEpoch),
x if x == CcsdsTimeCodes::Cds as u8 => Ok(CcsdsTimeCodes::Cds), x if x == CcsdsTimeCode::Cds as u8 => Ok(CcsdsTimeCode::Cds),
x if x == CcsdsTimeCodes::Ccs as u8 => Ok(CcsdsTimeCodes::Ccs), x if x == CcsdsTimeCode::Ccs as u8 => Ok(CcsdsTimeCode::Ccs),
x if x == CcsdsTimeCodes::AgencyDefined as u8 => Ok(CcsdsTimeCodes::AgencyDefined), x if x == CcsdsTimeCode::AgencyDefined as u8 => Ok(CcsdsTimeCode::AgencyDefined),
_ => Err(()), _ => Err(()),
} }
} }
@@ -55,92 +65,67 @@ impl TryFrom<u8> for CcsdsTimeCodes {
/// Retrieve the CCSDS time code from the p-field. If no valid time code identifier is found, the /// Retrieve the CCSDS time code from the p-field. If no valid time code identifier is found, the
/// value of the raw time code identification field is returned. /// value of the raw time code identification field is returned.
pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCodes, u8> { pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCode, u8> {
let raw_bits = (pfield >> 4) & 0b111; let raw_bits = (pfield >> 4) & 0b111;
CcsdsTimeCodes::try_from(raw_bits).map_err(|_| raw_bits) CcsdsTimeCode::try_from(raw_bits).map_err(|_| raw_bits)
} }
#[derive(Debug, PartialEq, Eq, Copy, Clone)] /// Date is before the CCSDS epoch.
#[derive(Debug, PartialEq, Eq, Copy, Clone, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[error("date before ccsds epoch: {0:?}")]
pub struct DateBeforeCcsdsEpochError(UnixTime);
/// Generic timestamp error.
#[derive(Debug, PartialEq, Eq, Copy, Clone, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive] #[non_exhaustive]
pub enum TimestampError { pub enum TimestampError {
InvalidTimeCode { expected: CcsdsTimeCodes, found: u8 }, /// Invalid time code.
ByteConversion(ByteConversionError), #[error("invalid time code, expected {expected:?}, found {found}")]
Cds(cds::CdsError), InvalidTimeCode {
Cuc(cuc::CucError), /// Expected time code.
DateBeforeCcsdsEpoch(DateTime<Utc>), expected: CcsdsTimeCode,
/// Found raw time code.
found: u8,
},
/// Byte conversion error.
#[error("time stamp: byte conversion error: {0}")]
ByteConversion(#[from] ByteConversionError),
/// CDS timestamp error.
#[error("CDS error: {0}")]
Cds(#[from] cds::CdsError),
/// CUC timestamp error.
#[error("CUC error: {0}")]
Cuc(#[from] cuc::CucError),
/// Custom epoch is not supported.
#[error("custom epoch not supported")]
CustomEpochNotSupported, CustomEpochNotSupported,
} }
impl Display for TimestampError { /// [std] module.
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TimestampError::InvalidTimeCode { expected, found } => {
write!(
f,
"invalid raw time code value {found} for time code {expected:?}"
)
}
TimestampError::Cds(e) => {
write!(f, "cds error: {e}")
}
TimestampError::Cuc(e) => {
write!(f, "cuc error: {e}")
}
TimestampError::ByteConversion(e) => {
write!(f, "time stamp: {e}")
}
TimestampError::DateBeforeCcsdsEpoch(e) => {
write!(f, "datetime with date before ccsds epoch: {e}")
}
TimestampError::CustomEpochNotSupported => {
write!(f, "custom epochs are not supported")
}
}
}
}
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl Error for TimestampError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TimestampError::ByteConversion(e) => Some(e),
TimestampError::Cds(e) => Some(e),
TimestampError::Cuc(e) => Some(e),
_ => None,
}
}
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::Cds(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::Cuc(e)
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod { pub mod std_mod {
use crate::time::TimestampError; use crate::time::TimestampError;
use std::time::SystemTimeError; use std::time::SystemTimeError;
use thiserror::Error; use thiserror::Error;
/// [std] timestamp error.
#[derive(Debug, Clone, Error)] #[derive(Debug, Clone, Error)]
pub enum StdTimestampError { pub enum StdTimestampError {
#[error("system time error: {0}")] /// System time error.
#[error("system time error: {0:?}")]
SystemTime(#[from] SystemTimeError), SystemTime(#[from] SystemTimeError),
/// Generic timestamp error.
#[error("timestamp error: {0}")] #[error("timestamp error: {0}")]
Timestamp(#[from] TimestampError), Timestamp(#[from] TimestampError),
} }
} }
/// Seconds since epoch for the current system time.
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn seconds_since_epoch() -> f64 { pub fn seconds_since_epoch() -> f64 {
SystemTime::now() SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH) .duration_since(SystemTime::UNIX_EPOCH)
@@ -152,6 +137,7 @@ pub fn seconds_since_epoch() -> f64 {
/// ///
/// - CCSDS epoch: 1958-01-01T00:00:00+00:00 /// - CCSDS epoch: 1958-01-01T00:00:00+00:00
/// - UNIX Epoch: 1970-01-01T00:00:00+00:00 /// - UNIX Epoch: 1970-01-01T00:00:00+00:00
#[inline]
pub const fn unix_to_ccsds_days(unix_days: i64) -> i64 { pub const fn unix_to_ccsds_days(unix_days: i64) -> i64 {
unix_days - DAYS_CCSDS_TO_UNIX as i64 unix_days - DAYS_CCSDS_TO_UNIX as i64
} }
@@ -160,26 +146,31 @@ pub const fn unix_to_ccsds_days(unix_days: i64) -> i64 {
/// ///
/// - CCSDS epoch: 1958-01-01T00:00:00+00:00 /// - CCSDS epoch: 1958-01-01T00:00:00+00:00
/// - UNIX Epoch: 1970-01-01T00:00:00+00:00 /// - UNIX Epoch: 1970-01-01T00:00:00+00:00
#[inline]
pub const fn ccsds_to_unix_days(ccsds_days: i64) -> i64 { pub const fn ccsds_to_unix_days(ccsds_days: i64) -> i64 {
ccsds_days + DAYS_CCSDS_TO_UNIX as i64 ccsds_days + DAYS_CCSDS_TO_UNIX as i64
} }
/// Similar to [unix_to_ccsds_days] but converts the epoch instead, which is the number of elpased /// Similar to [unix_to_ccsds_days] but converts the epoch instead, which is the number of elpased
/// seconds since the CCSDS and UNIX epoch times. /// seconds since the CCSDS and UNIX epoch times.
#[inline]
pub const fn unix_epoch_to_ccsds_epoch(unix_epoch: i64) -> i64 { pub const fn unix_epoch_to_ccsds_epoch(unix_epoch: i64) -> i64 {
unix_epoch - (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64) unix_epoch - (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
} }
/// Convert CCSDS epoch to UNIX epoch.
#[inline]
pub const fn ccsds_epoch_to_unix_epoch(ccsds_epoch: i64) -> i64 { pub const fn ccsds_epoch_to_unix_epoch(ccsds_epoch: i64) -> i64 {
ccsds_epoch + (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64) ccsds_epoch + (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
} }
/// Milliseconds of day for the current system time.
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn ms_of_day_using_sysclock() -> u32 { pub fn ms_of_day_using_sysclock() -> u32 {
ms_of_day(seconds_since_epoch()) ms_of_day(seconds_since_epoch())
} }
/// Milliseconds for the given seconds since epoch.
pub fn ms_of_day(seconds_since_epoch: f64) -> u32 { pub fn ms_of_day(seconds_since_epoch: f64) -> u32 {
let fraction_ms = seconds_since_epoch - seconds_since_epoch.floor(); let fraction_ms = seconds_since_epoch - seconds_since_epoch.floor();
let ms_of_day: u32 = (((seconds_since_epoch.floor() as u32 % SECONDS_PER_DAY) * 1000) as f64 let ms_of_day: u32 = (((seconds_since_epoch.floor() as u32 % SECONDS_PER_DAY) * 1000) as f64
@@ -188,23 +179,37 @@ pub fn ms_of_day(seconds_since_epoch: f64) -> u32 {
ms_of_day ms_of_day
} }
/// Generic writable timestamp trait.
pub trait TimeWriter { pub trait TimeWriter {
/// Written length.
fn len_written(&self) -> usize;
/// Generic function to convert write a timestamp into a raw buffer. /// Generic function to convert write a timestamp into a raw buffer.
/// Returns the number of written bytes on success. /// Returns the number of written bytes on success.
fn write_to_bytes(&self, bytes: &mut [u8]) -> Result<usize, TimestampError>; fn write_to_bytes(&self, bytes: &mut [u8]) -> Result<usize, TimestampError>;
/// Convert to a owned [alloc::vec::Vec].
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<alloc::vec::Vec<u8>, TimestampError> {
let mut vec = alloc::vec![0; self.len_written()];
self.write_to_bytes(&mut vec)?;
Ok(vec)
}
} }
pub trait TimeReader { /// Genmeric readable timestamp trait.
fn from_bytes(buf: &[u8]) -> Result<Self, TimestampError> pub trait TimeReader: Sized {
where /// Create a timestamp from a raw byte buffer.
Self: Sized; fn from_bytes(buf: &[u8]) -> Result<Self, TimestampError>;
} }
/// Trait for generic CCSDS time providers. /// Trait for generic CCSDS time providers.
/// ///
/// The UNIX helper methods and the [Self::date_time] method are not strictly necessary but extremely /// The UNIX helper methods and the helper method are not strictly necessary but extremely
/// practical because they are a very common and simple exchange format for time information. /// practical because they are a very common and simple exchange format for time information.
/// Therefore, it was decided to keep them in this trait as well.
pub trait CcsdsTimeProvider { pub trait CcsdsTimeProvider {
/// Length when written to bytes.
fn len_as_bytes(&self) -> usize; fn len_as_bytes(&self) -> usize;
/// Returns the pfield of the time provider. The pfield can have one or two bytes depending /// Returns the pfield of the time provider. The pfield can have one or two bytes depending
@@ -212,130 +217,224 @@ pub trait CcsdsTimeProvider {
/// entry denotes the length of the pfield and the second entry is the value of the pfield /// entry denotes the length of the pfield and the second entry is the value of the pfield
/// in big endian format. /// in big endian format.
fn p_field(&self) -> (usize, [u8; 2]); fn p_field(&self) -> (usize, [u8; 2]);
fn ccdsd_time_code(&self) -> CcsdsTimeCodes;
fn unix_seconds(&self) -> i64; /// CCSDS time code field.
fn subsecond_millis(&self) -> Option<u16>; fn ccdsd_time_code(&self) -> CcsdsTimeCode;
fn unix_stamp(&self) -> UnixTimestamp {
if self.subsecond_millis().is_none() { /// UNIX time as seconds.
return UnixTimestamp::new_only_seconds(self.unix_seconds()); fn unix_secs(&self) -> i64 {
} self.unix_time().secs
UnixTimestamp::const_new(self.unix_seconds(), self.subsecond_millis().unwrap())
} }
fn date_time(&self) -> Option<DateTime<Utc>>; /// Subsecond nanoseconds.
fn subsec_nanos(&self) -> u32 {
self.unix_time().subsec_nanos
}
/// Subsecond milliseconds.
fn subsec_millis(&self) -> u16 {
(self.subsec_nanos() / 1_000_000) as u16
}
/// UNIX time.
fn unix_time(&self) -> UnixTime {
UnixTime::new(self.unix_secs(), self.subsec_nanos())
}
/// [chrono] date time.
#[cfg(feature = "chrono")]
fn chrono_date_time(&self) -> chrono::LocalResult<chrono::DateTime<chrono::Utc>> {
chrono::Utc.timestamp_opt(self.unix_secs(), self.subsec_nanos())
}
/// [time] library date] library date time.
#[cfg(feature = "timelib")]
fn timelib_date_time(&self) -> Result<time::OffsetDateTime, time::error::ComponentRange> {
Ok(time::OffsetDateTime::from_unix_timestamp(self.unix_secs())?
+ time::Duration::nanoseconds(self.subsec_nanos().into()))
}
} }
/// UNIX timestamp: Elapsed seconds since 1970-01-01T00:00:00+00:00. /// UNIX time: Elapsed non-leap seconds since 1970-01-01T00:00:00+00:00 UTC.
/// ///
/// Also can optionally include subsecond millisecond for greater accuracy. Please note that a /// This is a commonly used time format and can therefore also be used as a generic format to
/// subsecond millisecond value of 0 gets converted to [None]. /// convert other CCSDS time formats to and from. The subsecond precision is in nanoseconds
/// similarly to other common time formats and libraries.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)] #[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct UnixTimestamp { #[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub unix_seconds: i64, pub struct UnixTime {
subsecond_millis: Option<u16>, secs: i64,
subsec_nanos: u32,
} }
impl UnixTimestamp { impl UnixTime {
/// Returns none if the subsecond millisecond value is larger than 999. 0 is converted to /// The UNIX epoch time: 1970-01-01T00:00:00+00:00 UTC.
/// a [None] value. pub const EPOCH: Self = Self {
pub fn new(unix_seconds: i64, subsec_millis: u16) -> Option<Self> { secs: 0,
if subsec_millis > 999 { subsec_nanos: 0,
};
/// The minimum possible `UnixTime`.
pub const MIN: Self = Self {
secs: i64::MIN,
subsec_nanos: 0,
};
/// The maximum possible `UnixTime`.
pub const MAX: Self = Self {
secs: i64::MAX,
subsec_nanos: NANOS_PER_SECOND - 1,
};
/// Returns [None] if the subsecond nanosecond value is invalid (larger than fraction of a
/// second)
pub fn new_checked(unix_seconds: i64, subsec_nanos: u32) -> Option<Self> {
if subsec_nanos >= NANOS_PER_SECOND {
return None; return None;
} }
Some(Self::const_new(unix_seconds, subsec_millis)) Some(Self::new(unix_seconds, subsec_nanos))
} }
/// Like [Self::new] but const. Panics if the subsecond value is larger than 999. /// Returns [None] if the subsecond millisecond value is invalid (larger than fraction of a
pub const fn const_new(unix_seconds: i64, subsec_millis: u16) -> Self { /// second)
if subsec_millis > 999 { pub fn new_subsec_millis_checked(unix_seconds: i64, subsec_millis: u16) -> Option<Self> {
panic!("subsec milliseconds exceeds 999"); if subsec_millis >= 1000 {
return None;
}
Self::new_checked(unix_seconds, subsec_millis as u32 * 1_000_000)
}
/// This function will panic if the subsecond value is larger than the fraction of a second.
/// Use [Self::new_checked] if you want to handle this case without a panic.
pub const fn new(unix_seconds: i64, subsecond_nanos: u32) -> Self {
if subsecond_nanos >= NANOS_PER_SECOND {
panic!("invalid subsecond nanos value");
} }
let subsecond_millis = if subsec_millis == 0 {
None
} else {
Some(subsec_millis)
};
Self { Self {
unix_seconds, secs: unix_seconds,
subsecond_millis, subsec_nanos: subsecond_nanos,
} }
} }
pub fn new_only_seconds(unix_seconds: i64) -> Self { /// This function will panic if the subsecond value is larger than the fraction of a second.
/// Use [Self::new_subsec_millis_checked] if you want to handle this case without a panic.
pub const fn new_subsec_millis(unix_seconds: i64, subsecond_millis: u16) -> Self {
if subsecond_millis >= 1000 {
panic!("invalid subsecond millisecond value");
}
Self { Self {
unix_seconds, secs: unix_seconds,
subsecond_millis: None, subsec_nanos: subsecond_millis as u32 * 1_000_000,
} }
} }
pub fn subsecond_millis(&self) -> Option<u16> { /// New UNIX time with only seconds, subseconds set to zero.
self.subsecond_millis pub fn new_only_secs(unix_seconds: i64) -> Self {
Self {
secs: unix_seconds,
subsec_nanos: 0,
}
} }
/// Sub-second milliseconds.
#[inline]
pub fn subsec_millis(&self) -> u16 {
(self.subsec_nanos / 1_000_000) as u16
}
/// Sub-second nanoseconds.
pub fn subsec_nanos(&self) -> u32 {
self.subsec_nanos
}
/// Create a UNIX timestamp from the current system time.
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] pub fn now() -> Result<Self, SystemTimeError> {
pub fn from_now() -> Result<Self, SystemTimeError> {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?; let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let epoch = now.as_secs(); let epoch = now.as_secs();
Ok(Self::const_new(epoch as i64, now.subsec_millis() as u16)) Ok(Self::new(epoch as i64, now.subsec_nanos()))
} }
/// UNIX timestamp as a floating point number in seconds.
#[inline] #[inline]
pub fn unix_seconds_f64(&self) -> f64 { pub fn unix_secs_f64(&self) -> f64 {
let mut secs = self.unix_seconds as f64; self.secs as f64 + (self.subsec_nanos as f64 / 1_000_000_000.0)
if let Some(subsec_millis) = self.subsecond_millis {
secs += subsec_millis as f64 / 1000.0;
}
secs
} }
pub fn as_date_time(&self) -> LocalResult<DateTime<Utc>> { /// UNIX timestamp as seconds, discards the sub-second part.
Utc.timestamp_opt( pub fn as_secs(&self) -> i64 {
self.unix_seconds, self.secs
self.subsecond_millis.unwrap_or(0) as u32 * 10_u32.pow(6), }
)
/// UNIX timestamp as [chrono] date time.
#[cfg(feature = "chrono")]
pub fn chrono_date_time(&self) -> chrono::LocalResult<chrono::DateTime<chrono::Utc>> {
Utc.timestamp_opt(self.secs, self.subsec_nanos)
}
/// UNIX timestamp as [time] library date time.
#[cfg(feature = "timelib")]
pub fn timelib_date_time(&self) -> Result<time::OffsetDateTime, time::error::ComponentRange> {
Ok(time::OffsetDateTime::from_unix_timestamp(self.as_secs())?
+ time::Duration::nanoseconds(self.subsec_nanos().into()))
}
/// Calculate the difference in milliseconds between two UnixTimestamps
pub fn diff_in_millis(&self, other: &UnixTime) -> Option<i64> {
let seconds_difference = self.secs.checked_sub(other.secs)?;
// Convert seconds difference to milliseconds
let milliseconds_difference = seconds_difference.checked_mul(1000)?;
// Calculate the difference in subsecond milliseconds directly
let subsecond_difference_nanos = self.subsec_nanos as i64 - other.subsec_nanos as i64;
// Combine the differences
Some(milliseconds_difference + (subsecond_difference_nanos / 1_000_000))
} }
} }
impl From<DateTime<Utc>> for UnixTimestamp { #[cfg(feature = "chrono")]
fn from(value: DateTime<Utc>) -> Self { impl From<chrono::DateTime<chrono::Utc>> for UnixTime {
Self::const_new(value.timestamp(), value.timestamp_subsec_millis() as u16) fn from(value: chrono::DateTime<chrono::Utc>) -> Self {
Self::new(value.timestamp(), value.timestamp_subsec_nanos())
} }
} }
impl PartialOrd for UnixTimestamp { #[cfg(feature = "timelib")]
impl From<time::OffsetDateTime> for UnixTime {
fn from(value: time::OffsetDateTime) -> Self {
Self::new(value.unix_timestamp(), value.nanosecond())
}
}
impl PartialOrd for UnixTime {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other)) Some(self.cmp(other))
} }
} }
impl Ord for UnixTimestamp { impl Ord for UnixTime {
fn cmp(&self, other: &Self) -> Ordering { fn cmp(&self, other: &Self) -> Ordering {
if self == other { if self == other {
return Ordering::Equal; return Ordering::Equal;
} }
match self.unix_seconds.cmp(&other.unix_seconds) { match self.secs.cmp(&other.secs) {
Ordering::Less => return Ordering::Less, Ordering::Less => return Ordering::Less,
Ordering::Greater => return Ordering::Greater, Ordering::Greater => return Ordering::Greater,
_ => (), _ => (),
} }
match self match self.subsec_millis().cmp(&other.subsec_millis()) {
.subsecond_millis()
.unwrap_or(0)
.cmp(&other.subsecond_millis().unwrap_or(0))
{
Ordering::Less => { Ordering::Less => {
return if self.unix_seconds < 0 { return if self.secs < 0 {
Ordering::Greater Ordering::Greater
} else { } else {
Ordering::Less Ordering::Less
} }
} }
Ordering::Greater => { Ordering::Greater => {
return if self.unix_seconds < 0 { return if self.secs < 0 {
Ordering::Less Ordering::Less
} else { } else {
Ordering::Greater Ordering::Greater
@@ -347,13 +446,38 @@ impl Ord for UnixTimestamp {
} }
} }
fn get_new_stamp_after_addition( /// Difference between two UNIX timestamps. The [Duration] type can not contain negative durations,
current_stamp: &UnixTimestamp, /// so the sign information is supplied separately.
duration: Duration, #[derive(Clone, Copy, PartialEq, Eq)]
) -> UnixTimestamp { pub struct StampDiff {
let mut new_subsec_millis = /// Positive duration flag.
current_stamp.subsecond_millis().unwrap_or(0) + duration.subsec_millis() as u16; pub positive_duration: bool,
let mut new_unix_seconds = current_stamp.unix_seconds; /// Absolute duration.
pub duration_absolute: Duration,
}
impl Sub for UnixTime {
type Output = Option<StampDiff>;
fn sub(self, rhs: Self) -> Self::Output {
let difference = self.diff_in_millis(&rhs)?;
Some(if difference < 0 {
StampDiff {
positive_duration: false,
duration_absolute: Duration::from_millis(-difference as u64),
}
} else {
StampDiff {
positive_duration: true,
duration_absolute: Duration::from_millis(difference as u64),
}
})
}
}
fn get_new_stamp_after_addition(current_stamp: &UnixTime, duration: Duration) -> UnixTime {
let mut new_subsec_nanos = current_stamp.subsec_nanos() + duration.subsec_nanos();
let mut new_unix_seconds = current_stamp.secs;
let mut increment_seconds = |value: u32| { let mut increment_seconds = |value: u32| {
if new_unix_seconds < 0 { if new_unix_seconds < 0 {
new_unix_seconds = new_unix_seconds new_unix_seconds = new_unix_seconds
@@ -365,8 +489,8 @@ fn get_new_stamp_after_addition(
.expect("new unix seconds would exceed i64::MAX"); .expect("new unix seconds would exceed i64::MAX");
} }
}; };
if new_subsec_millis >= 1000 { if new_subsec_nanos >= 1_000_000_000 {
new_subsec_millis -= 1000; new_subsec_nanos -= 1_000_000_000;
increment_seconds(1); increment_seconds(1);
} }
increment_seconds( increment_seconds(
@@ -375,7 +499,7 @@ fn get_new_stamp_after_addition(
.try_into() .try_into()
.expect("duration seconds exceeds u32::MAX"), .expect("duration seconds exceeds u32::MAX"),
); );
UnixTimestamp::const_new(new_unix_seconds, new_subsec_millis) UnixTime::new(new_unix_seconds, new_subsec_nanos)
} }
/// Please note that this operation will panic on the following conditions: /// Please note that this operation will panic on the following conditions:
@@ -383,7 +507,7 @@ fn get_new_stamp_after_addition(
/// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN]. /// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN].
/// - Unix seconds after addition exceeds [i64::MAX]. /// - Unix seconds after addition exceeds [i64::MAX].
/// - Seconds from duration to add exceeds [u32::MAX]. /// - Seconds from duration to add exceeds [u32::MAX].
impl AddAssign<Duration> for UnixTimestamp { impl AddAssign<Duration> for UnixTime {
fn add_assign(&mut self, duration: Duration) { fn add_assign(&mut self, duration: Duration) {
*self = get_new_stamp_after_addition(self, duration); *self = get_new_stamp_after_addition(self, duration);
} }
@@ -394,7 +518,7 @@ impl AddAssign<Duration> for UnixTimestamp {
/// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN]. /// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN].
/// - Unix seconds after addition exceeds [i64::MAX]. /// - Unix seconds after addition exceeds [i64::MAX].
/// - Unix seconds exceeds [u32::MAX]. /// - Unix seconds exceeds [u32::MAX].
impl Add<Duration> for UnixTimestamp { impl Add<Duration> for UnixTime {
type Output = Self; type Output = Self;
fn add(self, duration: Duration) -> Self::Output { fn add(self, duration: Duration) -> Self::Output {
@@ -402,8 +526,8 @@ impl Add<Duration> for UnixTimestamp {
} }
} }
impl Add<Duration> for &UnixTimestamp { impl Add<Duration> for &UnixTime {
type Output = UnixTimestamp; type Output = UnixTime;
fn add(self, duration: Duration) -> Self::Output { fn add(self, duration: Duration) -> Self::Output {
get_new_stamp_after_addition(self, duration) get_new_stamp_after_addition(self, duration)
@@ -414,10 +538,15 @@ impl Add<Duration> for &UnixTimestamp {
mod tests { mod tests {
use alloc::string::ToString; use alloc::string::ToString;
use chrono::{Datelike, Timelike}; use chrono::{Datelike, Timelike};
use std::format; use std::{format, println};
use super::{cuc::CucError, *}; use super::{cuc::CucError, *};
#[allow(dead_code)]
const UNIX_STAMP_CONST: UnixTime = UnixTime::new(5, 999_999_999);
#[allow(dead_code)]
const UNIX_STAMP_CONST_2: UnixTime = UnixTime::new_subsec_millis(5, 999);
#[test] #[test]
fn test_days_conversion() { fn test_days_conversion() {
assert_eq!(unix_to_ccsds_days(DAYS_CCSDS_TO_UNIX.into()), 0); assert_eq!(unix_to_ccsds_days(DAYS_CCSDS_TO_UNIX.into()), 0);
@@ -425,6 +554,7 @@ mod tests {
} }
#[test] #[test]
#[cfg_attr(miri, ignore)]
fn test_get_current_time() { fn test_get_current_time() {
let sec_floats = seconds_since_epoch(); let sec_floats = seconds_since_epoch();
assert!(sec_floats > 0.0); assert!(sec_floats > 0.0);
@@ -439,6 +569,7 @@ mod tests {
} }
#[test] #[test]
#[cfg_attr(miri, ignore)]
fn test_ccsds_epoch() { fn test_ccsds_epoch() {
let now = SystemTime::now() let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH) .duration_since(SystemTime::UNIX_EPOCH)
@@ -453,29 +584,29 @@ mod tests {
#[test] #[test]
fn basic_unix_stamp_test() { fn basic_unix_stamp_test() {
let stamp = UnixTimestamp::new_only_seconds(-200); let stamp = UnixTime::new_only_secs(-200);
assert_eq!(stamp.unix_seconds, -200); assert_eq!(stamp.secs, -200);
assert!(stamp.subsecond_millis().is_none()); assert_eq!(stamp.subsec_millis(), 0);
let stamp = UnixTimestamp::new_only_seconds(250); let stamp = UnixTime::new_only_secs(250);
assert_eq!(stamp.unix_seconds, 250); assert_eq!(stamp.secs, 250);
assert!(stamp.subsecond_millis().is_none()); assert_eq!(stamp.subsec_millis(), 0);
} }
#[test] #[test]
fn basic_float_unix_stamp_test() { fn basic_float_unix_stamp_test() {
let stamp = UnixTimestamp::new(500, 600).unwrap(); let stamp = UnixTime::new_subsec_millis_checked(500, 600).unwrap();
assert!(stamp.subsecond_millis.is_some()); assert_eq!(stamp.secs, 500);
assert_eq!(stamp.unix_seconds, 500); let subsec_millis = stamp.subsec_millis();
let subsec_millis = stamp.subsecond_millis().unwrap();
assert_eq!(subsec_millis, 600); assert_eq!(subsec_millis, 600);
assert!((500.6 - stamp.unix_seconds_f64()).abs() < 0.0001); println!("{:?}", (500.6 - stamp.unix_secs_f64()).to_string());
assert!((500.6 - stamp.unix_secs_f64()).abs() < 0.0001);
} }
#[test] #[test]
fn test_ord_larger() { fn test_ord_larger() {
let stamp0 = UnixTimestamp::new_only_seconds(5); let stamp0 = UnixTime::new_only_secs(5);
let stamp1 = UnixTimestamp::new(5, 500).unwrap(); let stamp1 = UnixTime::new_subsec_millis_checked(5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(6); let stamp2 = UnixTime::new_only_secs(6);
assert!(stamp1 > stamp0); assert!(stamp1 > stamp0);
assert!(stamp2 > stamp0); assert!(stamp2 > stamp0);
assert!(stamp2 > stamp1); assert!(stamp2 > stamp1);
@@ -483,9 +614,9 @@ mod tests {
#[test] #[test]
fn test_ord_smaller() { fn test_ord_smaller() {
let stamp0 = UnixTimestamp::new_only_seconds(5); let stamp0 = UnixTime::new_only_secs(5);
let stamp1 = UnixTimestamp::new(5, 500).unwrap(); let stamp1 = UnixTime::new_subsec_millis_checked(5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(6); let stamp2 = UnixTime::new_only_secs(6);
assert!(stamp0 < stamp1); assert!(stamp0 < stamp1);
assert!(stamp0 < stamp2); assert!(stamp0 < stamp2);
assert!(stamp1 < stamp2); assert!(stamp1 < stamp2);
@@ -493,9 +624,9 @@ mod tests {
#[test] #[test]
fn test_ord_larger_neg_numbers() { fn test_ord_larger_neg_numbers() {
let stamp0 = UnixTimestamp::new_only_seconds(-5); let stamp0 = UnixTime::new_only_secs(-5);
let stamp1 = UnixTimestamp::new(-5, 500).unwrap(); let stamp1 = UnixTime::new_subsec_millis_checked(-5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(-6); let stamp2 = UnixTime::new_only_secs(-6);
assert!(stamp0 > stamp1); assert!(stamp0 > stamp1);
assert!(stamp0 > stamp2); assert!(stamp0 > stamp2);
assert!(stamp1 > stamp2); assert!(stamp1 > stamp2);
@@ -505,9 +636,9 @@ mod tests {
#[test] #[test]
fn test_ord_smaller_neg_numbers() { fn test_ord_smaller_neg_numbers() {
let stamp0 = UnixTimestamp::new_only_seconds(-5); let stamp0 = UnixTime::new_only_secs(-5);
let stamp1 = UnixTimestamp::new(-5, 500).unwrap(); let stamp1 = UnixTime::new_subsec_millis_checked(-5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(-6); let stamp2 = UnixTime::new_only_secs(-6);
assert!(stamp2 < stamp1); assert!(stamp2 < stamp1);
assert!(stamp2 < stamp0); assert!(stamp2 < stamp0);
assert!(stamp1 < stamp0); assert!(stamp1 < stamp0);
@@ -515,10 +646,11 @@ mod tests {
assert!(stamp2 <= stamp1); assert!(stamp2 <= stamp1);
} }
#[allow(clippy::nonminimal_bool)]
#[test] #[test]
fn test_eq() { fn test_eq() {
let stamp0 = UnixTimestamp::new(5, 0).unwrap(); let stamp0 = UnixTime::new(5, 0);
let stamp1 = UnixTimestamp::new_only_seconds(5); let stamp1 = UnixTime::new_only_secs(5);
assert_eq!(stamp0, stamp1); assert_eq!(stamp0, stamp1);
assert!(stamp0 <= stamp1); assert!(stamp0 <= stamp1);
assert!(stamp0 >= stamp1); assert!(stamp0 >= stamp1);
@@ -528,28 +660,27 @@ mod tests {
#[test] #[test]
fn test_addition() { fn test_addition() {
let mut stamp0 = UnixTimestamp::new_only_seconds(1); let mut stamp0 = UnixTime::new_only_secs(1);
stamp0 += Duration::from_secs(5); stamp0 += Duration::from_secs(5);
assert_eq!(stamp0.unix_seconds, 6); assert_eq!(stamp0.as_secs(), 6);
assert!(stamp0.subsecond_millis().is_none()); assert_eq!(stamp0.subsec_millis(), 0);
let stamp1 = stamp0 + Duration::from_millis(500); let stamp1 = stamp0 + Duration::from_millis(500);
assert_eq!(stamp1.unix_seconds, 6); assert_eq!(stamp1.secs, 6);
assert!(stamp1.subsecond_millis().is_some()); assert_eq!(stamp1.subsec_millis(), 500);
assert_eq!(stamp1.subsecond_millis().unwrap(), 500);
} }
#[test] #[test]
fn test_addition_on_ref() { fn test_addition_on_ref() {
let stamp0 = &UnixTimestamp::new(20, 500).unwrap(); let stamp0 = &UnixTime::new_subsec_millis_checked(20, 500).unwrap();
let stamp1 = stamp0 + Duration::from_millis(2500); let stamp1 = stamp0 + Duration::from_millis(2500);
assert_eq!(stamp1.unix_seconds, 23); assert_eq!(stamp1.secs, 23);
assert!(stamp1.subsecond_millis().is_none()); assert_eq!(stamp1.subsec_millis(), 0);
} }
#[test] #[test]
fn test_as_dt() { fn test_as_dt() {
let stamp = UnixTimestamp::new_only_seconds(0); let stamp = UnixTime::new_only_secs(0);
let dt = stamp.as_date_time().unwrap(); let dt = stamp.chrono_date_time().unwrap();
assert_eq!(dt.year(), 1970); assert_eq!(dt.year(), 1970);
assert_eq!(dt.month(), 1); assert_eq!(dt.month(), 1);
assert_eq!(dt.day(), 1); assert_eq!(dt.day(), 1);
@@ -559,27 +690,85 @@ mod tests {
} }
#[test] #[test]
#[cfg_attr(miri, ignore)]
fn test_from_now() { fn test_from_now() {
let stamp_now = UnixTimestamp::from_now().unwrap(); let stamp_now = UnixTime::now().unwrap();
let dt_now = stamp_now.as_date_time().unwrap(); let dt_now = stamp_now.chrono_date_time().unwrap();
assert!(dt_now.year() >= 2020); assert!(dt_now.year() >= 2020);
} }
#[test]
fn test_stamp_diff_positive_0() {
let stamp_later = UnixTime::new(2, 0);
let StampDiff {
positive_duration,
duration_absolute,
} = (stamp_later - UnixTime::new(1, 0)).expect("stamp diff error");
assert!(positive_duration);
assert_eq!(duration_absolute, Duration::from_secs(1));
}
#[test]
fn test_stamp_diff_positive_1() {
let stamp_later = UnixTime::new(3, 800 * 1_000_000);
let stamp_earlier = UnixTime::new_subsec_millis_checked(1, 900).unwrap();
let StampDiff {
positive_duration,
duration_absolute,
} = (stamp_later - stamp_earlier).expect("stamp diff error");
assert!(positive_duration);
assert_eq!(duration_absolute, Duration::from_millis(1900));
}
#[test]
fn test_stamp_diff_negative() {
let stamp_later = UnixTime::new_subsec_millis_checked(3, 800).unwrap();
let stamp_earlier = UnixTime::new_subsec_millis_checked(1, 900).unwrap();
let StampDiff {
positive_duration,
duration_absolute,
} = (stamp_earlier - stamp_later).expect("stamp diff error");
assert!(!positive_duration);
assert_eq!(duration_absolute, Duration::from_millis(1900));
}
#[test] #[test]
fn test_addition_spillover() { fn test_addition_spillover() {
let mut stamp0 = UnixTimestamp::new(1, 900).unwrap(); let mut stamp0 = UnixTime::new_subsec_millis_checked(1, 900).unwrap();
stamp0 += Duration::from_millis(100); stamp0 += Duration::from_millis(100);
assert_eq!(stamp0.unix_seconds, 2); assert_eq!(stamp0.secs, 2);
assert!(stamp0.subsecond_millis().is_none()); assert_eq!(stamp0.subsec_millis(), 0);
stamp0 += Duration::from_millis(1100); stamp0 += Duration::from_millis(1100);
assert_eq!(stamp0.unix_seconds, 3); assert_eq!(stamp0.secs, 3);
assert_eq!(stamp0.subsecond_millis().unwrap(), 100); assert_eq!(stamp0.subsec_millis(), 100);
} }
#[test] #[test]
fn test_cuc_error_printout() { fn test_cuc_error_printout() {
let cuc_error = CucError::InvalidCounterWidth(12); let cuc_error = CucError::InvalidCounterWidth(12);
let stamp_error = TimestampError::from(cuc_error); let stamp_error = TimestampError::from(cuc_error);
assert_eq!(stamp_error.to_string(), format!("cuc error: {cuc_error}")); assert_eq!(stamp_error.to_string(), format!("CUC error: {cuc_error}"));
}
#[test]
#[cfg(feature = "timelib")]
fn test_unix_stamp_as_timelib_datetime() {
let stamp_epoch = UnixTime::EPOCH;
let timelib_dt = stamp_epoch.timelib_date_time().unwrap();
assert_eq!(timelib_dt.year(), 1970);
assert_eq!(timelib_dt.month(), time::Month::January);
assert_eq!(timelib_dt.day(), 1);
assert_eq!(timelib_dt.hour(), 0);
assert_eq!(timelib_dt.minute(), 0);
assert_eq!(timelib_dt.second(), 0);
}
#[test]
#[cfg(feature = "timelib")]
fn test_unix_stamp_from_timelib_datetime() {
let timelib_dt = time::OffsetDateTime::UNIX_EPOCH;
let unix_time = UnixTime::from(timelib_dt);
let timelib_converted_back = unix_time.timelib_date_time().unwrap();
assert_eq!(timelib_dt, timelib_converted_back);
} }
} }

1005
src/uslp/mod.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,24 +1,28 @@
//! # Utility module.
use crate::ByteConversionError; use crate::ByteConversionError;
use core::fmt::{Debug, Display, Formatter}; use core::fmt::Debug;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
/// Helper traits for types which can be converted to a byte array.
pub trait ToBeBytes { pub trait ToBeBytes {
/// Concrete byte array type.
type ByteArray: AsRef<[u8]>; type ByteArray: AsRef<[u8]>;
/// Length when written to big endian bytes. /// Length when written to big endian bytes.
fn written_len(&self) -> usize; fn written_len(&self) -> usize;
/// Convert to big endian byte array.
fn to_be_bytes(&self) -> Self::ByteArray; fn to_be_bytes(&self) -> Self::ByteArray;
} }
impl ToBeBytes for () { impl ToBeBytes for () {
type ByteArray = [u8; 0]; type ByteArray = [u8; 0];
#[inline]
fn written_len(&self) -> usize { fn written_len(&self) -> usize {
0 0
} }
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray { fn to_be_bytes(&self) -> Self::ByteArray {
[] []
} }
@@ -27,9 +31,12 @@ impl ToBeBytes for () {
impl ToBeBytes for u8 { impl ToBeBytes for u8 {
type ByteArray = [u8; 1]; type ByteArray = [u8; 1];
#[inline]
fn written_len(&self) -> usize { fn written_len(&self) -> usize {
1 1
} }
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray { fn to_be_bytes(&self) -> Self::ByteArray {
u8::to_be_bytes(*self) u8::to_be_bytes(*self)
} }
@@ -38,9 +45,12 @@ impl ToBeBytes for u8 {
impl ToBeBytes for u16 { impl ToBeBytes for u16 {
type ByteArray = [u8; 2]; type ByteArray = [u8; 2];
#[inline]
fn written_len(&self) -> usize { fn written_len(&self) -> usize {
2 2
} }
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray { fn to_be_bytes(&self) -> Self::ByteArray {
u16::to_be_bytes(*self) u16::to_be_bytes(*self)
} }
@@ -49,9 +59,12 @@ impl ToBeBytes for u16 {
impl ToBeBytes for u32 { impl ToBeBytes for u32 {
type ByteArray = [u8; 4]; type ByteArray = [u8; 4];
#[inline]
fn written_len(&self) -> usize { fn written_len(&self) -> usize {
4 4
} }
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray { fn to_be_bytes(&self) -> Self::ByteArray {
u32::to_be_bytes(*self) u32::to_be_bytes(*self)
} }
@@ -60,82 +73,90 @@ impl ToBeBytes for u32 {
impl ToBeBytes for u64 { impl ToBeBytes for u64 {
type ByteArray = [u8; 8]; type ByteArray = [u8; 8];
#[inline]
fn written_len(&self) -> usize { fn written_len(&self) -> usize {
8 8
} }
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray { fn to_be_bytes(&self) -> Self::ByteArray {
u64::to_be_bytes(*self) u64::to_be_bytes(*self)
} }
} }
/// Helper trait for unsigned enumerations.
pub trait UnsignedEnum { pub trait UnsignedEnum {
/// Size of the unsigned enumeration in bytes. /// Size of the unsigned enumeration in bytes.
fn size(&self) -> usize; fn size(&self) -> usize;
/// Write the unsigned enumeration to a raw buffer. Returns the written size on success. /// Write the unsigned enumeration to a raw buffer. Returns the written size on success.
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>; fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
/// Type-erased raw value.
fn value_raw(&self) -> u64;
/// Convert to a [alloc::vec::Vec].
#[cfg(feature = "alloc")]
fn to_vec(&self) -> alloc::vec::Vec<u8> {
let mut buf = alloc::vec![0; self.size()];
self.write_to_be_bytes(&mut buf).unwrap();
buf
}
} }
/// Extension trait for unsigned enumerations.
pub trait UnsignedEnumExt: UnsignedEnum + Debug + Copy + Clone + PartialEq + Eq {} pub trait UnsignedEnumExt: UnsignedEnum + Debug + Copy + Clone + PartialEq + Eq {}
#[derive(Debug, Copy, Clone, PartialEq, Eq)] /// Unsigned byte field errors.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum UnsignedByteFieldError { pub enum UnsignedByteFieldError {
/// Value is too large for specified width of byte field. /// Value is too large for specified width of byte field.
#[error("value {value} too large for width {width}")]
ValueTooLargeForWidth { ValueTooLargeForWidth {
/// Width in bytes.
width: usize, width: usize,
/// Value.
value: u64, value: u64,
}, },
/// Only 1, 2, 4 and 8 are allow width values. Optionally contains the expected width if /// Only 1, 2, 4 and 8 are allow width values. Optionally contains the expected width if
/// applicable, for example for conversions. /// applicable, for example for conversions.
#[error("invalid width {found}, expected {expected:?}")]
InvalidWidth { InvalidWidth {
/// Found width.
found: usize, found: usize,
/// Expected width.
expected: Option<usize>, expected: Option<usize>,
}, },
ByteConversionError(ByteConversionError), /// Error during byte conversion.
#[error("byte conversion error: {0}")]
ByteConversionError(#[from] ByteConversionError),
} }
impl From<ByteConversionError> for UnsignedByteFieldError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl Display for UnsignedByteFieldError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {e}")
}
Self::InvalidWidth { found, .. } => {
write!(f, "invalid width {found}, only 1, 2, 4 and 8 are allowed.")
}
Self::ValueTooLargeForWidth { width, value } => {
write!(f, "value {value} too large for width {width}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for UnsignedByteFieldError {}
/// Type erased variant. /// Type erased variant.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct UnsignedByteField { pub struct UnsignedByteField {
width: usize, width: usize,
value: u64, value: u64,
} }
impl UnsignedByteField { impl UnsignedByteField {
/// Generic constructor.
#[inline]
pub const fn new(width: usize, value: u64) -> Self { pub const fn new(width: usize, value: u64) -> Self {
Self { width, value } Self { width, value }
} }
pub fn value(&self) -> u64 { /// Type-erased raw value.
#[inline]
pub const fn value(&self) -> u64 {
self.value self.value
} }
/// Construct from raw bytes, assuming big-endian byte order.
#[inline]
pub fn new_from_be_bytes(width: usize, buf: &[u8]) -> Result<Self, UnsignedByteFieldError> { pub fn new_from_be_bytes(width: usize, buf: &[u8]) -> Result<Self, UnsignedByteFieldError> {
if width > buf.len() { if width > buf.len() {
return Err(ByteConversionError::FromSliceTooSmall { return Err(ByteConversionError::FromSliceTooSmall {
@@ -168,10 +189,16 @@ impl UnsignedByteField {
} }
impl UnsignedEnum for UnsignedByteField { impl UnsignedEnum for UnsignedByteField {
#[inline]
fn size(&self) -> usize { fn size(&self) -> usize {
self.width self.width
} }
#[inline]
fn value_raw(&self) -> u64 {
self.value()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.size() { if buf.len() < self.size() {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@@ -205,23 +232,28 @@ impl UnsignedEnum for UnsignedByteField {
} }
} }
/// Generic type erased unsigned byte field.
#[derive(Debug, Copy, Clone, Eq, PartialEq)] #[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericUnsignedByteField<TYPE: Copy> { #[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct GenericUnsignedByteField<TYPE: Copy + Into<u64>> {
value: TYPE, value: TYPE,
} }
impl<TYPE: Copy> GenericUnsignedByteField<TYPE> { impl<TYPE: Copy + Into<u64>> GenericUnsignedByteField<TYPE> {
/// Generic constructor.
pub const fn new(val: TYPE) -> Self { pub const fn new(val: TYPE) -> Self {
Self { value: val } Self { value: val }
} }
/// Raw value.
pub const fn value(&self) -> TYPE { pub const fn value(&self) -> TYPE {
self.value self.value
} }
} }
impl<TYPE: Copy + ToBeBytes> UnsignedEnum for GenericUnsignedByteField<TYPE> { impl<TYPE: Copy + ToBeBytes + Into<u64>> UnsignedEnum for GenericUnsignedByteField<TYPE> {
#[inline]
fn size(&self) -> usize { fn size(&self) -> usize {
self.value.written_len() self.value.written_len()
} }
@@ -233,20 +265,34 @@ impl<TYPE: Copy + ToBeBytes> UnsignedEnum for GenericUnsignedByteField<TYPE> {
expected: self.size(), expected: self.size(),
}); });
} }
buf[0..self.size()].copy_from_slice(self.value.to_be_bytes().as_ref()); buf[..self.size()].copy_from_slice(self.value.to_be_bytes().as_ref());
Ok(self.value.written_len()) Ok(self.value.written_len())
} }
#[inline]
fn value_raw(&self) -> u64 {
self.value().into()
}
} }
/// Alias for [GenericUnsignedByteField] with [()] generic.
pub type UnsignedByteFieldEmpty = GenericUnsignedByteField<()>; pub type UnsignedByteFieldEmpty = GenericUnsignedByteField<()>;
/// Alias for [GenericUnsignedByteField] with [u8] generic.
pub type UnsignedByteFieldU8 = GenericUnsignedByteField<u8>; pub type UnsignedByteFieldU8 = GenericUnsignedByteField<u8>;
/// Alias for [GenericUnsignedByteField] with [u16] generic.
pub type UnsignedByteFieldU16 = GenericUnsignedByteField<u16>; pub type UnsignedByteFieldU16 = GenericUnsignedByteField<u16>;
/// Alias for [GenericUnsignedByteField] with [u32] generic.
pub type UnsignedByteFieldU32 = GenericUnsignedByteField<u32>; pub type UnsignedByteFieldU32 = GenericUnsignedByteField<u32>;
/// Alias for [GenericUnsignedByteField] with [u64] generic.
pub type UnsignedByteFieldU64 = GenericUnsignedByteField<u64>; pub type UnsignedByteFieldU64 = GenericUnsignedByteField<u64>;
/// Alias for [UnsignedByteFieldU8]
pub type UbfU8 = UnsignedByteFieldU8; pub type UbfU8 = UnsignedByteFieldU8;
/// Alias for [UnsignedByteFieldU16]
pub type UbfU16 = UnsignedByteFieldU16; pub type UbfU16 = UnsignedByteFieldU16;
/// Alias for [UnsignedByteFieldU32]
pub type UbfU32 = UnsignedByteFieldU32; pub type UbfU32 = UnsignedByteFieldU32;
/// Alias for [UnsignedByteFieldU64]
pub type UbfU64 = UnsignedByteFieldU64; pub type UbfU64 = UnsignedByteFieldU64;
impl From<UnsignedByteFieldU8> for UnsignedByteField { impl From<UnsignedByteFieldU8> for UnsignedByteField {
@@ -258,6 +304,7 @@ impl From<UnsignedByteFieldU8> for UnsignedByteField {
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU8 { impl TryFrom<UnsignedByteField> for UnsignedByteFieldU8 {
type Error = UnsignedByteFieldError; type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> { fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 1 { if value.width != 1 {
return Err(UnsignedByteFieldError::InvalidWidth { return Err(UnsignedByteFieldError::InvalidWidth {
@@ -270,6 +317,7 @@ impl TryFrom<UnsignedByteField> for UnsignedByteFieldU8 {
} }
impl From<UnsignedByteFieldU16> for UnsignedByteField { impl From<UnsignedByteFieldU16> for UnsignedByteField {
#[inline]
fn from(value: UnsignedByteFieldU16) -> Self { fn from(value: UnsignedByteFieldU16) -> Self {
Self::new(2, value.value as u64) Self::new(2, value.value as u64)
} }
@@ -278,6 +326,7 @@ impl From<UnsignedByteFieldU16> for UnsignedByteField {
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU16 { impl TryFrom<UnsignedByteField> for UnsignedByteFieldU16 {
type Error = UnsignedByteFieldError; type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> { fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 2 { if value.width != 2 {
return Err(UnsignedByteFieldError::InvalidWidth { return Err(UnsignedByteFieldError::InvalidWidth {
@@ -290,6 +339,7 @@ impl TryFrom<UnsignedByteField> for UnsignedByteFieldU16 {
} }
impl From<UnsignedByteFieldU32> for UnsignedByteField { impl From<UnsignedByteFieldU32> for UnsignedByteField {
#[inline]
fn from(value: UnsignedByteFieldU32) -> Self { fn from(value: UnsignedByteFieldU32) -> Self {
Self::new(4, value.value as u64) Self::new(4, value.value as u64)
} }
@@ -298,6 +348,7 @@ impl From<UnsignedByteFieldU32> for UnsignedByteField {
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU32 { impl TryFrom<UnsignedByteField> for UnsignedByteFieldU32 {
type Error = UnsignedByteFieldError; type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> { fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 4 { if value.width != 4 {
return Err(UnsignedByteFieldError::InvalidWidth { return Err(UnsignedByteFieldError::InvalidWidth {
@@ -310,6 +361,7 @@ impl TryFrom<UnsignedByteField> for UnsignedByteFieldU32 {
} }
impl From<UnsignedByteFieldU64> for UnsignedByteField { impl From<UnsignedByteFieldU64> for UnsignedByteField {
#[inline]
fn from(value: UnsignedByteFieldU64) -> Self { fn from(value: UnsignedByteFieldU64) -> Self {
Self::new(8, value.value) Self::new(8, value.value)
} }
@@ -318,6 +370,7 @@ impl From<UnsignedByteFieldU64> for UnsignedByteField {
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU64 { impl TryFrom<UnsignedByteField> for UnsignedByteFieldU64 {
type Error = UnsignedByteFieldError; type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> { fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 8 { if value.width != 8 {
return Err(UnsignedByteFieldError::InvalidWidth { return Err(UnsignedByteFieldError::InvalidWidth {
@@ -330,7 +383,7 @@ impl TryFrom<UnsignedByteField> for UnsignedByteFieldU64 {
} }
#[cfg(test)] #[cfg(test)]
pub mod tests { mod tests {
use crate::util::{ use crate::util::{
UnsignedByteField, UnsignedByteFieldError, UnsignedByteFieldU16, UnsignedByteFieldU32, UnsignedByteField, UnsignedByteFieldError, UnsignedByteFieldU16, UnsignedByteFieldU32,
UnsignedByteFieldU64, UnsignedByteFieldU8, UnsignedEnum, UnsignedByteFieldU64, UnsignedByteFieldU8, UnsignedEnum,
@@ -351,6 +404,8 @@ pub mod tests {
for val in buf.iter().skip(1) { for val in buf.iter().skip(1) {
assert_eq!(*val, 0); assert_eq!(*val, 0);
} }
assert_eq!(u8.value_raw(), 5);
assert_eq!(u8.value(), 5);
} }
#[test] #[test]
@@ -367,6 +422,8 @@ pub mod tests {
for val in buf.iter().skip(2) { for val in buf.iter().skip(2) {
assert_eq!(*val, 0); assert_eq!(*val, 0);
} }
assert_eq!(u16.value_raw(), 3823);
assert_eq!(u16.value(), 3823);
} }
#[test] #[test]
@@ -383,6 +440,8 @@ pub mod tests {
(4..8).for_each(|i| { (4..8).for_each(|i| {
assert_eq!(buf[i], 0); assert_eq!(buf[i], 0);
}); });
assert_eq!(u32.value_raw(), 80932);
assert_eq!(u32.value(), 80932);
} }
#[test] #[test]
@@ -396,6 +455,8 @@ pub mod tests {
assert_eq!(len, 8); assert_eq!(len, 8);
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap()); let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999); assert_eq!(raw_val, 5999999);
assert_eq!(u64.value_raw(), 5999999);
assert_eq!(u64.value(), 5999999);
} }
#[test] #[test]
@@ -534,9 +595,9 @@ pub mod tests {
u8.write_to_be_bytes(&mut buf) u8.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed"); .expect("writing to raw buffer failed");
assert_eq!(buf[0], 5); assert_eq!(buf[0], 5);
for i in 1..8 { (1..8).for_each(|i| {
assert_eq!(buf[i], 0); assert_eq!(buf[i], 0);
} });
} }
#[test] #[test]
@@ -562,9 +623,9 @@ pub mod tests {
.expect("writing to raw buffer failed"); .expect("writing to raw buffer failed");
let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap()); let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap());
assert_eq!(raw_val, 80932); assert_eq!(raw_val, 80932);
for i in 4..8 { (4..8).for_each(|i| {
assert_eq!(buf[i], 0); assert_eq!(buf[i], 0);
} });
} }
#[test] #[test]