204 Commits

Author SHA1 Message Date
f001234025 Merge pull request 'clippy' (#190) from clippy into main
Reviewed-on: #190
2025-11-06 13:43:48 +01:00
Robin Mueller
e5a7839901 clippy 2025-11-06 13:43:20 +01:00
5707c6322a Merge pull request 'changelog' (#189) from prep-v0.17.0 into main
Reviewed-on: #189
2025-11-06 13:27:09 +01:00
Robin Mueller
093f82ae86 changelog 2025-11-06 13:26:46 +01:00
e68d1ade48 Merge pull request 'Finish full crate docs' (#188) from renaming-docs-for-ecss into main
Reviewed-on: #188
2025-11-05 20:20:35 +01:00
Robin Mueller
fbdc325d0d Finish full crate docs 2025-11-05 20:18:31 +01:00
4bc0219cb2 Merge pull request 'docs and minor cfdp change' (#187) from cfdp-update-docs into main
Reviewed-on: #187
2025-11-04 18:54:33 +01:00
3f4f76849f docs and minor cfdp change 2025-11-04 18:50:53 +01:00
fb1e2fc583 Merge pull request 'added missing derives' (#186) from add-missing-derives into main
Reviewed-on: #186
2025-11-04 15:57:46 +01:00
Robin Mueller
96e5851864 added missing derives 2025-11-04 15:57:09 +01:00
b4d00c26c5 Merge pull request 'add direct APID getter' (#185) from add-direct-ccsds-apid-getter into main
Reviewed-on: #185
2025-11-04 15:40:10 +01:00
Robin Mueller
a8b64f2fef add direct APID getter 2025-11-04 15:39:37 +01:00
e7cb6f2a7a Merge pull request 'error reporting bugfix' (#184) from ccsds-packet-reader-error-reporting-fix into main
Reviewed-on: #184
2025-11-04 15:34:54 +01:00
Robin Mueller
973ba4d3c4 error reporting bugfix 2025-11-04 15:34:15 +01:00
8789e34c14 Merge pull request 'added missing function to reader' (#183) from add-ccsds-id-function-to-reader into main
Reviewed-on: #183
2025-10-31 16:56:12 +01:00
Robin Mueller
a68e82a825 added missing function to reader 2025-10-31 16:55:45 +01:00
0b46fa785b Merge pull request 'better naming' (#182) from naming-improvement into main
Reviewed-on: #182
2025-10-31 16:03:40 +01:00
Robin Mueller
c57ee3e131 better naming 2025-10-31 16:03:08 +01:00
6ac84c3dca Merge pull request 'add option to ignore checksum for CCSDS' (#181) from add-option-to-ignore-checksum into main
Reviewed-on: #181
2025-10-31 16:01:11 +01:00
Robin Mueller
374f39f13b add option to ignore checksum for CCSDS 2025-10-31 15:59:41 +01:00
2bc6167710 Merge pull request 'less confusing naming' (#180) from less-confusing-naming into main
Reviewed-on: #180
2025-10-31 12:55:53 +01:00
Robin Mueller
cfe0937afe less confusing naming 2025-10-31 12:55:07 +01:00
e1c693cb29 Merge pull request 'update ECSS PUS naming convention' (#179) from update-ecss-pus-naming-convention into main
Reviewed-on: #179
2025-10-31 12:39:39 +01:00
Robin Mueller
38165420b7 update ECSS PUS naming convention 2025-10-31 12:38:59 +01:00
0d09ff7825 Merge pull request 'add docs and minor changes' (#178) from add-docs-minor-changes into main
Reviewed-on: #178
2025-10-31 11:45:06 +01:00
Robin Mueller
8f2096ca35 add docs and minor changes 2025-10-31 11:39:23 +01:00
3f35e9dba9 Merge pull request 'add owned CCSDS packet creator' (#177) from add-owned-ccsds-packet-creator into main
Reviewed-on: #177
2025-10-31 10:10:50 +01:00
Robin Mueller
ea96099f55 add owned CCSDS packet creator 2025-10-31 10:10:11 +01:00
e117239852 Merge pull request 'add useful functions' (#176) from add-ccsds-id-functions into main
Reviewed-on: #176
2025-10-30 18:58:27 +01:00
Robin Mueller
844c517a94 add useful functions 2025-10-30 18:58:10 +01:00
0ae2ac149b Merge pull request 'add CCSDS packet ID' (#175) from add-ccsds-packet-id into main
Reviewed-on: #175
2025-10-29 21:58:56 +01:00
Robin Mueller
2b41f9754d add CCSDS packet ID 2025-10-29 21:45:07 +01:00
8e2e0ce632 Merge pull request 'fix portable atomic support' (#174) from fix-portable-atomic-support into main
Reviewed-on: #174
2025-10-29 16:13:09 +01:00
Robin Mueller
14d935ac2a fix portable atomic support 2025-10-29 16:05:11 +01:00
756a803213 Merge pull request 'prepare v0.17.0' (#173) from prepare-v0.17.0 into main
Reviewed-on: #173
2025-10-29 16:04:19 +01:00
Robin Mueller
937bdeaf54 prepare v0.17.0 2025-10-29 15:48:29 +01:00
bc30143d61 Merge pull request 'start adding improved CCSDS packet support' (#172) from add-better-ccsds-packet-support into main
Reviewed-on: #172
2025-10-29 15:44:38 +01:00
Robin Mueller
549e323211 start adding improved CCSDS packet support 2025-10-29 15:28:25 +01:00
82c3e06ac0 Merge pull request 'feature gate all core atomics' (#171) from feature-gate-all-core-atomics into main
Reviewed-on: #171
2025-10-29 11:26:57 +01:00
Robin Mueller
750add26ef feature gate all core atomics 2025-10-29 11:24:05 +01:00
c3ff947fb0 Merge pull request 'move some modules' (#170) from clean-up-cds-time-mod into main
Reviewed-on: #170
2025-10-15 15:26:39 +02:00
Robin Mueller
8d86ecc8ee move some modules 2025-10-15 15:26:06 +02:00
4b2bebb8cb Merge pull request 'simplified CDS short impl' (#169) from simplify-cds-timestamp-impl into main
Reviewed-on: #169
2025-10-15 15:01:06 +02:00
Robin Mueller
e0b7a6a6bb simplified CDS short impl 2025-10-15 11:57:44 +02:00
49983a5d6c Merge pull request 'update for docs generation' (#168) from doc-generation-update into main
Reviewed-on: #168
2025-10-02 09:45:45 +02:00
Robin Mueller
04c864d6a2 update for docs generation 2025-10-01 00:20:47 +02:00
922801cc74 Merge pull request 'try to fix CI' (#167) from ci-fix into main
Reviewed-on: #167
2025-09-26 15:14:20 +02:00
Robin Mueller
f5717d98cd try to fix CI 2025-09-26 15:10:27 +02:00
6ea7b8902a Merge pull request 'SpHeader::packet_len is pub now' (#166) from sp-packet-len-pub into main
Reviewed-on: #166
2025-09-26 15:09:07 +02:00
Robin Mueller
f6ac9ee918 SpHeader::packet_len is pub now 2025-09-26 15:08:29 +02:00
0aa41fee92 Merge pull request 'prepare v0.16.0' (#165) from prep-v0.16.0 into main
Reviewed-on: #165
2025-09-24 19:58:02 +02:00
Robin Mueller
d1516d669d prepare v0.16.0 2025-09-24 19:56:52 +02:00
b1ebb4d7c4 Merge pull request 'update docs on coverage' (#164) from update-coverage-docs into main
Reviewed-on: #164
2025-09-24 19:55:47 +02:00
Robin Mueller
cd79af4440 update docs on coverage 2025-09-24 19:54:45 +02:00
6a760c8585 Merge pull request 'improve backwards compatibility' (#163) from improve-backwards-compat into main
Reviewed-on: #163
2025-09-24 19:54:11 +02:00
Robin Mueller
5eb409f1ec improve backwards compatibility 2025-09-24 19:49:51 +02:00
69d416d6ff Merge pull request 'improvement for NAK API' (#162) from nak-api-improvement into main
Reviewed-on: #162
2025-09-23 17:08:15 +02:00
Robin Mueller
e2b239ae61 improvement for NAK API 2025-09-23 17:06:45 +02:00
b06d7c1a87 Merge pull request 'better error handling' (#161) from better-nak-error into main
Reviewed-on: #161
2025-09-18 17:37:00 +02:00
Robin Mueller
ec1ddbde81 better error handling 2025-09-18 17:36:51 +02:00
7f4ada1734 Merge pull request 'NAK constructor is pub' (#160) from nak-new-pub into main
Reviewed-on: #160
2025-09-18 17:35:48 +02:00
Robin Mueller
15f97e960b NAK constructor is pub 2025-09-18 17:32:11 +02:00
49b7c2d072 Merge pull request 'PDU header improvements' (#159) from pdu-header-improvements into main
Reviewed-on: #159
2025-09-18 16:56:26 +02:00
1ed23bd7ef PDU header improvements 2025-09-18 16:54:28 +02:00
a82cdb1e82 Merge pull request 'nak docs' (#158) from nak-docs into main
Reviewed-on: #158
2025-09-17 13:42:04 +02:00
12e7062075 nak docs 2025-09-17 13:40:49 +02:00
a1e40834f5 Merge pull request 'improve ACK PDU' (#157) from improve-ack-pdu into main
Reviewed-on: #157
2025-09-15 13:02:30 +02:00
Robin Mueller
3f6a5df8e7 improve ACK PDU 2025-09-15 13:02:16 +02:00
a8d5fdf8d3 Merge pull request 'extend NAK PDU' (#156) from extend-nak-pdu into main
Reviewed-on: #156
2025-09-15 10:30:02 +02:00
Robin Mueller
62326da276 extend NAK PDU 2025-09-15 10:16:07 +02:00
477890346a Merge pull request 'improve CFDP module' (#154) from cfdp-module-improvements into main
Reviewed-on: #154
2025-09-11 16:10:47 +02:00
Robin Mueller
9394beea38 improve CFDP module 2025-09-11 16:03:58 +02:00
6c425e137a Merge pull request 'add coverage to justfile' (#155) from update-justfile into main
Reviewed-on: #155
2025-09-11 16:03:41 +02:00
24b91a7a83 add coverage to justfile 2025-09-11 13:22:27 +02:00
a7c6ce7d44 Merge pull request 'improve CFDP module' (#153) from cfdp-module-improvements into main
Reviewed-on: #153
2025-09-11 09:12:59 +02:00
Robin Mueller
c68e71a25e improve CFDP module 2025-09-11 09:09:41 +02:00
272a961a70 Merge pull request 'add packet_len direct method for SpHeader' (#152) from sp-header-tweak into main
Reviewed-on: #152
2025-09-10 21:05:56 +02:00
Robin Mueller
6f4df7e3c2 add packet_len direct method for SpHeader 2025-09-10 19:04:47 +02:00
15c477e810 Merge pull request 'prepare v0.16.0' (#151) from prep-v0.16.0 into main
Reviewed-on: #151
2025-09-10 18:08:10 +02:00
Robin Mueller
e5b10920a0 prepare v0.16.0 2025-09-10 18:03:35 +02:00
3f8434e1fa Merge pull request 'add missing Error impls' (#150) from add-missing-error-impls into main
Reviewed-on: #150
2025-09-10 17:54:46 +02:00
Robin Mueller
ec3f462931 add missing Error impls 2025-09-10 17:52:49 +02:00
e6686caba1 Merge pull request 'add-missing-defmt-impls' (#149) from add-missing-defmt-impls into main
Reviewed-on: #149
2025-09-10 17:52:39 +02:00
Robin Mueller
2a0b21983e add some missing defmt impls 2025-09-10 17:48:49 +02:00
4e153e0b68 Merge pull request 'Add TM builder API' (#148) from add-tm-builder-api into main
Reviewed-on: #148
2025-09-10 17:39:05 +02:00
Robin Mueller
aaac15e3d0 Add TM builder API 2025-09-10 17:36:39 +02:00
89788c1341 Merge pull request 'add first builder API' (#147) from add-tc-builder-api into main
Reviewed-on: #147
2025-09-10 16:38:25 +02:00
Robin Mueller
578be2da8f add first TC builder API 2025-09-10 16:12:06 +02:00
3a21daf8de Merge pull request 'refactor and improve ECSS module' (#146) from refactor-improve-ecss-module into main
Reviewed-on: #146
2025-09-10 15:37:27 +02:00
Robin Mueller
8fd46f6a30 refactor and improve ECSS module 2025-09-10 15:28:58 +02:00
c6b74fecbd Merge pull request 'start making ECSS checksum optional' (#144) from ecss-checksum-optional into main
Reviewed-on: #144
2025-09-09 16:14:45 +02:00
Robin Mueller
60e35559e5 start making ECSS checksum optional 2025-09-09 16:14:11 +02:00
e708f1b861 Merge pull request 'some more tests' (#145) from add-some-more-tests into main
Reviewed-on: #145
2025-09-09 15:57:11 +02:00
Robin Mueller
91490b5dd6 some more tests 2025-09-09 15:56:44 +02:00
e151b8e761 Merge pull request 'fix for embedded systems, introduce portable atomic seq counters' (#143) from portable-atomic-seq-counters-embedded-fix into main
Reviewed-on: #143
2025-09-09 13:49:23 +02:00
Robin Mueller
2839174e5f fix for embedded systems, introduce portable atomic seq counters 2025-09-09 13:34:12 +02:00
6e2db87fa9 Merge pull request 'improve sequence counters' (#141) from improve-seq-counters into main
Reviewed-on: #141
2025-09-09 11:53:31 +02:00
Robin Mueller
e8a01dc6b2 improve sequence counters 2025-09-09 11:51:59 +02:00
20403bda32 Merge pull request 'sequence counter improvements' (#140) from seq-counter-improvements into main
Reviewed-on: #140
2025-09-09 10:27:08 +02:00
Robin Mueller
2cbd48331c sequence counter improvements 2025-09-09 10:24:20 +02:00
c1346f2b12 Merge pull request 'add some more tests' (#138) from some-more-tests into main
Reviewed-on: #138
2025-09-08 17:01:45 +02:00
2e3a7849a7 add some more tests 2025-09-08 16:59:41 +02:00
86ebea8eb8 Merge pull request 'Add basic USLP support' (#137) from add-basic-uslp-support into main
Reviewed-on: #137
2025-09-08 16:59:21 +02:00
2c8c77acb8 add basic USLP support 2025-09-08 16:51:33 +02:00
63d74aa58b Merge pull request 'PUS version fixes' (#136) from small-bugfix-pus-tm-a into main
Reviewed-on: #136
2025-08-26 16:41:13 +02:00
5a86f89c83 version fixes 2025-08-26 16:40:44 +02:00
b8ae26c302 Merge pull request 'improvement for naming' (#135) from naming-improvement into main
Reviewed-on: #135
2025-08-26 16:22:19 +02:00
160b1dedf9 improvement for naming 2025-08-26 16:16:54 +02:00
8eccf1fa29 Merge pull request 'NAK PDU reader update' (#134) from nak-pdu-reader-refactoring into main
Reviewed-on: #134
2025-08-20 17:53:32 +02:00
Robin Mueller
8445b7cc31 NAK PDU reader update 2025-08-20 16:02:08 +02:00
a2971f8f73 Merge pull request 'add badge' (#133) from add-chat-badge into main
Reviewed-on: #133
2025-08-14 14:22:09 +02:00
Robin Mueller
ba3b66326d add badge 2025-08-14 14:21:37 +02:00
de2675e602 Merge pull request 'add PUS A support' (#132) from add-pus-a-support into main
Reviewed-on: #132
2025-08-13 17:24:50 +02:00
Robin Mueller
3d344c11cc add PUS A support 2025-08-13 17:04:39 +02:00
6e2c35e0c0 Merge pull request 'prepare next release' (#131) from prep-v0.15.0 into main
Reviewed-on: #131
2025-07-18 19:32:28 +02:00
Robin Mueller
026e1a50b9 prepare next release 2025-07-18 19:31:55 +02:00
440b836b70 Merge pull request 'allow arbitrary crc minor version' (#130) from allow-arbitrary-crc-minor-version into main
Reviewed-on: #130
2025-07-18 19:28:31 +02:00
Robin Mueller
00e28e4a96 allow arbitrary crc minor version 2025-07-18 19:27:59 +02:00
4c1cad5b72 Merge pull request 'reserved data variants for ECSS TM and TC' (#129) from ecss-tm-tc-reserved-data-variants into main
Reviewed-on: #129
2025-05-16 19:06:08 +02:00
5cd5c1ce6d reserved data variants for ECSS TM and TC 2025-05-16 19:04:23 +02:00
de99bb926a Merge pull request 'small changelog tweak' (#128) from small-changelog-tweak into main
Reviewed-on: #128
2025-05-10 15:08:18 +02:00
167f53cac7 small changelog tweak 2025-05-10 15:07:58 +02:00
172227b843 Merge pull request 'update MSRV check' (#127) from update-msrv-check into main
Reviewed-on: #127
2025-05-10 15:04:21 +02:00
1bbca6866b update MSRV check 2025-05-10 15:03:05 +02:00
b569208d45 Merge pull request 'prepare v0.14.0' (#126) from prepare-release into main
Reviewed-on: #126
2025-05-10 14:58:45 +02:00
d9709ffd6c prepare v0.14.0 2025-05-10 14:54:27 +02:00
243dc64a78 Merge pull request 'remove badge' (#125) from remove-badge into main
Reviewed-on: #125
2025-05-10 14:38:23 +02:00
a6dc173f7f Merge branch 'main' into remove-badge 2025-05-10 14:38:19 +02:00
86dddbeef5 remove badge 2025-05-10 14:36:00 +02:00
17d112e838 Merge pull request 'one more test fix' (#124) from one-more-test-fix into main
Reviewed-on: #124
2025-05-10 14:31:53 +02:00
9c8467ccfe one more test fix 2025-05-10 14:30:00 +02:00
217a8c2cc7 Merge pull request 'formatting' (#123) from formatting into main
Reviewed-on: #123
2025-05-10 14:26:27 +02:00
349e34bed6 formatting 2025-05-10 14:25:44 +02:00
d6a76ca360 Merge pull request 'CRC handling and dependency update' (#122) from msp430-tweak into main
Reviewed-on: #122
2025-05-10 14:24:54 +02:00
8f4351771b API variants which use table-less CRC 2025-05-10 13:58:10 +02:00
b08c3329f4 Merge pull request 'bump patch release' (#120) from prep-v0.13.1 into main
Reviewed-on: #120
2025-03-21 14:53:01 +01:00
08e0d39154 bump patch release 2025-03-21 14:50:10 +01:00
ab97607024 Merge pull request 'clippy fixes' (#119) from clippy-fixes into main
Reviewed-on: #119
2025-03-21 14:47:06 +01:00
60d1f77844 bugfix due to operator precendence and clippy fixes 2025-03-21 14:46:13 +01:00
5a112b7f39 Merge pull request 'add funding file' (#118) from add-funding-file into main
Reviewed-on: #118
2025-03-17 16:33:46 +01:00
e774dd69d4 add funding file 2025-03-17 16:32:43 +01:00
a03d26a49c Merge pull request 'prep v0.13.0' (#117) from prep-v0.13.0 into main
Reviewed-on: #117
2024-11-08 16:55:52 +01:00
026173514f prep v0.13.0 2024-11-08 16:54:53 +01:00
2d7ccc0909 Merge pull request 'Add back API which was deleted accidently' (#116) from add-back-api into main
Reviewed-on: #116
2024-11-08 15:50:46 +01:00
05d3bac927 Add back API which was deleted accidently 2024-11-08 15:46:42 +01:00
d58df5fee2 Merge pull request 'Switch to thiserror' (#115) from switch-to-thiserror into main
Reviewed-on: #115
2024-11-08 15:42:37 +01:00
9d23ac5b9b switch to thiserror completely 2024-11-08 15:26:40 +01:00
c0b4653c01 Merge pull request 'bump CI msrv check' (#114) from bump-msrv-check into main
Reviewed-on: #114
2024-11-08 11:27:52 +01:00
f156833985 bump CI msrv check 2024-11-08 11:26:51 +01:00
9aea3dba00 Merge pull request 'bump dependencies' (#113) from bump-dependencies into main
Reviewed-on: #113
2024-11-08 11:14:04 +01:00
48247a0a87 bump thiserror and zerocopy 2024-11-08 11:13:41 +01:00
f70b957d9a Merge pull request 'docs fixes' (#112) from smaller-doc-fixes into main
Reviewed-on: #112
2024-11-07 23:28:41 +01:00
fbf953df0e docs fixes 2024-11-04 11:42:51 +01:00
f135d54364 Merge pull request 'prepare v0.12.0' (#111) from prepare-v0.12.0 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #111
2024-09-10 17:58:03 +02:00
d8b2a3dfea prepare v0.12.0
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2024-09-10 17:51:31 +02:00
448b76be91 Merge pull request 'condition code bugfix' (#110) from cfdp-cond-code-bugfix into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #110
2024-08-29 09:47:27 +02:00
027b01f00f condition code bugfix
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
2024-08-29 09:46:40 +02:00
bf15b22889 Merge pull request 'added max file segment length calculator' (#109) from file-segment-calculator into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #109
2024-08-21 14:29:16 +02:00
16f91b562d added max file segment length calculator
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2024-08-21 14:26:11 +02:00
cd77b806fe Merge pull request 'Added additional converter method' (#108) from msgs-to-user-converter-method into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #108
2024-08-21 11:20:33 +02:00
43c88da3f2 Added additional converter method
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-08-20 17:24:53 +02:00
b19a61b859 Merge pull request 'update msg to user module' (#107) from cfdp-msg-to-user-update into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #107
2024-08-20 17:17:11 +02:00
8aa957b8bb update msg to user module
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-08-20 16:56:25 +02:00
190fa1befc Merge pull request 'Added generic sequence counter module' (#106) from seq-count-module into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #106
2024-08-20 11:20:07 +02:00
175b61deca Added generic sequence counter module
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-08-20 10:57:53 +02:00
51c28b5cc6 Merge pull request 'Github MSRV version update' (#105) from github-msrv into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #105
2024-08-19 10:58:31 +02:00
45cc74daa7 Github MSRV version update
Some checks are pending
Rust/spacepackets/pipeline/head Build started...
2024-08-19 10:44:33 +02:00
191c6f8146 Merge pull request 'Bump MSRV and delegate version' (#104) from bump-msrv-delegate-version into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #104
2024-08-19 10:42:29 +02:00
5449884b2e Bump MSRV and delegate version
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-08-19 02:23:34 -06:00
9c93c76193 Merge pull request 'Update EOF PDU API' (#103) from eof-pdu-update into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #103
2024-08-19 10:18:19 +02:00
043927c7ef Update EOF PDU API
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-07-21 10:14:41 -07:00
f4dc5a0302 Merge pull request 'added new API for file data PDU' (#102) from file-data-pdu-update into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #102
2024-07-21 18:25:08 +02:00
9166faa4ae optimization
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-07-19 11:29:37 -07:00
ed808e69d4 added new API for file data PDU
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-07-19 10:41:31 -07:00
d146b6cf57 Merge pull request 'Metadata PDU creator update' (#101) from metadata-pdu-creator-update into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #101
2024-07-14 17:08:46 +02:00
ff0c9d8c70 Update and simplify Metadata PDU creator API
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-07-09 16:30:48 +02:00
c40bc855a2 Merge pull request 'add owned TLV type' (#98) from cfdp-tlv-owned-type into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #98
2024-07-09 16:08:53 +02:00
81423fc6e8 add owned TLV type
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
Rust/spacepackets/pipeline/pr-main Build queued...
2024-07-09 16:04:08 +02:00
a399b11a8e Merge pull request 'update documentation build' (#99) from update-docs-build into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #99
2024-07-03 16:14:46 +02:00
9d4c7446a3 Merge branch 'main' into update-docs-build
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-06-25 16:20:08 +02:00
b87f7d73b1 Merge pull request 'clippy fix' (#100) from clippy-fix into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #100
2024-06-25 16:20:01 +02:00
80744eea16 clippy fix
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
2024-06-25 16:19:30 +02:00
a5918bfd4a update documentation build
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-06-25 16:07:07 +02:00
0e347b0e37 Merge pull request 'Bump MSRV' (#97) from bump-msrv into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #97
2024-05-19 13:07:12 +02:00
58dabb6f2f specify exact required version
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-19 09:13:12 +02:00
7fd65aa592 bumped MSRV
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-19 09:12:39 +02:00
0024afc83e Merge pull request 'prep patch release' (#96) from prep-v0.11.2 into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #96
2024-05-19 09:02:46 +02:00
c48bd848d3 prep patch release
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-19 08:49:03 +02:00
b8be9ae641 Merge pull request 'Fixes for Miri' (#95) from fixes-for-miri into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #95
2024-05-15 13:03:24 +02:00
c2506dbba9 Merge branch 'main' into fixes-for-miri
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Rust/spacepackets/pipeline/pr-main This commit looks good
2024-05-14 19:25:07 +02:00
b842b9d11a Merge pull request 'remove defmt::Format impl for MetadataPduCreator' (#94) from fix-defmt-derives into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #94
2024-05-14 19:24:57 +02:00
374c034e92 add miri chapter in README
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-14 15:37:20 +02:00
791c7f6e02 it is now possible to run cargo miri 2024-05-14 15:34:40 +02:00
8001938507 remove defmt::Format impl for MetadataPduCreator
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
2024-05-14 15:01:26 +02:00
73ab7ff148 Merge pull request 'add doctests to github CI' (#93) from github-ci-doctest into main
Some checks failed
Rust/spacepackets/pipeline/head There was a failure building this commit
Reviewed-on: #93
2024-05-02 14:56:13 +02:00
c59d01174f add doctests to github CI
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
2024-05-02 14:48:31 +02:00
eb49bff0c9 Merge pull request 'update github CI' (#92) from update-github-ci into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #92
2024-05-02 14:29:53 +02:00
af392d40d0 this might work
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
Rust/spacepackets/pipeline/pr-main Build queued...
2024-05-02 14:22:03 +02:00
b78bfe2114 some fixes
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
Rust/spacepackets/pipeline/pr-main Build queued...
2024-05-02 14:16:20 +02:00
69a3b1d8f3 update github CI
Some checks are pending
Rust/spacepackets/pipeline/pr-main Build queued...
Rust/spacepackets/pipeline/head Build started...
2024-05-02 14:12:26 +02:00
e7b3ba9575 Merge pull request 'date correction' (#91) from date-correction into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #91
2024-04-22 10:19:19 +02:00
c515535ccd date correction
Some checks are pending
Rust/spacepackets/pipeline/head Build queued...
2024-04-22 10:18:35 +02:00
95158a8cd2 Merge pull request 'prepare next patch version' (#90) from small-improvements-and-fixes into main
All checks were successful
Rust/spacepackets/pipeline/head This commit looks good
Reviewed-on: #90
2024-04-22 10:15:21 +02:00
8b1ccb0cd0 prepare next patch version 2024-04-20 10:42:36 +02:00
38 changed files with 13209 additions and 2826 deletions

View File

@@ -1,113 +1,77 @@
on: [push]
name: ci
on: [push, pull_request]
jobs:
check:
name: Check
name: Check build
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: actions-rs/cargo@v1
with:
command: check
args: --release
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo check --release
msrv:
name: Check with MSRV
test:
name: Run Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.65.0
override: true
profile: minimal
- uses: actions-rs/cargo@v1
with:
command: check
args: --release
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install nextest
uses: taiki-e/install-action@nextest
- run: cargo nextest run --all-features
- run: cargo test --doc
msrv:
name: Check MSRV
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.83
- run: cargo check --release
cross-check:
name: Check Cross
name: Check Cross-Compilation
runs-on: ubuntu-latest
strategy:
matrix:
target:
- armv7-unknown-linux-gnueabihf
- thumbv6m-none-eabi
- thumbv7em-none-eabihf
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
profile: minimal
toolchain: stable
target: ${{ matrix.target }}
override: true
- uses: actions-rs/cargo@v1
with:
use-cross: true
command: check
args: --release --target=${{ matrix.target }} --no-default-features
targets: "armv7-unknown-linux-gnueabihf, thumbv7em-none-eabihf, thumbv6m-none-eabi"
- run: cargo check --release --target=${{matrix.target}} --no-default-features
fmt:
name: Rustfmt
name: Check formatting
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
profile: minimal
toolchain: stable
override: true
- run: rustup component add rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
components: rustfmt
- run: cargo fmt --all -- --check
check-doc:
docs:
name: Check Documentation Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
profile: minimal
- uses: actions-rs/cargo@v1
with:
command: doc
args: --all-features
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
profile: minimal
toolchain: stable
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings
ci:
if: ${{ success() }}
# all new jobs must be added to this list
needs: [check, fmt, clippy]
runs-on: ubuntu-latest
steps:
- name: CI succeeded
run: exit 0
components: clippy
- run: cargo clippy -- -D warnings

View File

@@ -8,6 +8,189 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.17.0] 2025-11-06
## Changed
- `CdsCommon` renamed to `CdsBase`
- cfdp: Removed `FileDirectiveType` variant `*Pdu` suffix
- ecss: Renamed `Subservice` to `MessageSubtypeId`
- Simplified CDS short timestamp, contains one less field which reduced serialization length.
- Renamed `UnsignedEnum::value` to `UnsignedEnum::value_raw`, `value` is reserved for the `const`
value getter.
- Renamed `CcsdsPrimaryHeader::from_composite_fields` to
`CcsdsPrimaryHeader::new_from_composite_fields`
- Renamed `PusPacket::service` to `PusPacket::service_type_id` and `PusPacket::subservice` to
`PusPacket::message_subtype_id`. Also added `PusPacket::message_type_id`. Performed the same
change for the ECSS PUS C secondary header traits.
## Added
- Added `CcsdsPacketCreator`, `CcsdsPacketReader`, `CcsdsPacketCreatorWithReservedData` and
`CcsdsPacketCreatorOwned` which simplify the process of creating full CCSDS space packets.
- Added new optional `portable-atomic` because portable atomics might not work on every
architecture in addition to requiring atomic CAS support enabled inside for the crate.
## Fixed
- All `core::sync::Atomic?` usages are feature gated properly to allow compilation on systems
without atomic CAS.
# [v0.16.1] 2025-09-26
## Fixed
`SpHeader::packet_len` is public now.
# [v0.16.0] 2025-09-24
- Bump Rust MSRV to v1.83
## Changed
- `PusTcCreator` has its own `service`, `subservice` and `apid` methods and does not require trait
imports anymore.
- CFDP NAK PDU `SegmentRequestIter` is not generic over the file size anymore. Instead, the
iterator returns pairs of `u64` for both large and normal file size.
- `PusVersion::VersionNotSupported` contains raw version number instead of `PusVersion` enum now
to make it more flexible.
- `pus_version` API now returns a `Result<PusVersion, u8>` instead of a `PusVersion` to allow
modelling invalid version numbers properly.
- Renamed `CcsdsPacket::total_len` to `CcsdsPacket::packet_len`
- Renamed `SequenceCountProvider` to `SequenceCounter`
- Renamed `SeqCountProviderSimple` to `SequenceCounterSimple`
- Renamed `CcsdsSimpleSeqCountProvider` to `SequenceCounterCcsdsSimple`
- Renamed `SeqCountProviderSync` to `SequenceCounterSync`
- Renamed `PusPacket::opt_crc16` to `PusPacket::checksum`
- Renamed `PacketSequenceCtrl` to `PacketSequenceControl`
- ECSS checksum generation is now optional as specified in the standard. Added `has_checksum`
parameters for ECSS TM/TC creators and readers to reflect this.
- APID is represented by `arbitrary-int::u11` while the sequence count is represented by
`arbitrary-int::u14`. A lot of corresponding checks were removed because the type now ensure
value validity.
- ACK field changed from `u8` to `AckFlags` structure.
- PUS version raw representation is `u4` now.
- SC time reference status representation is `u4` now.
- Renamed `ptype` to `packet_type`
- Renamed `PduHeader::new_no_file_data` to `PduHeader::new_for_file_directive`
- Renamd `FinishedPduCreator::new_generic` to `new` and `new_default` to `new_no_error`
## Removed
- `PusVersion::Invalid`, which will be modelled with `Result<PusVersion, u8>` now.
## Added
- `cfdp::pdu::ack::InvalidAckedDirectiveCodeError` which is returned by the `AckPdu` constructor.
- `cfdp::pdu::nak::NakPduCreatorWithReservedSegReqsBuf` constructor which exposes the segment
request buffer mutably to avoid the need for a separate segment request buffer.
- `SpHeader::packet_len` direct method.
- `AckFlags` which is implemented with `bitbybit::bitfield`
- `ApidOutOfRangeError` and `SequenceCountOutOfRangeError`
- Added PUS A legacy support for telecommands inside the `ecss.tc_pus_a` module
- Added `SequenceCounter::increment_mut` and `SequenceCounter::get_and_increment_mut`
- Implemented `SequenceCounter` for `Atomic` unsigned types and references of them
- `PusPacket::has_checksum` and `WritablePusPacket::has_checksum`
- PUS TC builder API, either via `PusTcBuilder::new`, or `PusTcCreator::builder`
# [v0.15.0] 2025-07-18
## Added
- `PusTcCreatorWithReservedAppData` and `PusTmCreatorWithReservedSourceData` constructor variants
which allow writing source/app data into the serialization buffer directly without
requiring an extra buffer.
# [v0.14.0] 2025-05-10
## Changed
- Moved CRC constants/implementations to dedicated `crc` module.
- `crc::CRC_CCITT_FALSE_NO_TABLE` and `crc::CRC_CCITT_FALSE_BIG_TABLE` variants.
- Renamed `PusPacket::crc16` to `PusPacket::opt_crc16`.
## Added
- `WritablePusPacket::write_to_bytes_crc_no_table` and `WritablePusPacket::write_to_bytes_no_crc`
variants.
- `PusTmReader::new_crc_no_table` and `PusTcReader::new_crc_no_table` variants.
- `crc16` methods for PUS TM and PUS TC reader.
- PUS TM and PUS TC reader now return the reader instance directly instead of a tuple of the reader
and the read size. The instance `total_len` method can be used to retrieve the read lenght.
# [v0.13.1] 2025-03-21
- Bugfix due to operator precendence for `PusTcSecondaryHeader::pus_version`,
`PusTcSecondaryHeaderWithoutTimestamp::pus_version`, `CdsTime::from_bytes_with_u16_days` and
`CdsTime::from_bytes_with_u24_days`
# [v0.13.0] 2024-11-08
- Bumped MSRV to 1.81.0
- Bump `zerocopy` to v0.8.0
- Bump `thiserror` to v2.0.0
## Changed
- Migrated all Error implementations to thiserror, improved some naming and error handling in
general
# [v0.12.0] 2024-09-10
- Bumped MSRV to 1.70.0
## Added
- Added new `cfdp::tlv::TlvOwned` type which erases the lifetime and is clonable.
- Dedicated `cfdp::tlv::TlvLvDataTooLarge` error struct for APIs where this is the only possible
API error.
- Added File Data PDU API which expects the expected file data size and then exposes the unwritten
file data field as a mutable slice. This allows to read data from the virtual file system
API to the file data buffer without an intermediate buffer.
- Generic `EofPdu::new` constructor.
- Added generic sequence counter module.
- Added `MsgToUserTlv::to_tlv` converter which reduced the type and converts
it to a generic `Tlv`.
- Implemented `From<MsgToUserTlv> for Tlv` converter trait.
- Added CFDP maximum file segment length calculator method `calculate_max_file_seg_len_for_max_packet_len_and_pdu_header`
## Added and Changed
- Added new `ReadableTlv` to avoid some boilerplate code and have a common abstraction implemented
for both `Tlv` and `TlvOwned` to read the raw TLV data field and its length.
- Replaced `cfdp::tlv::TlvLvError` by `cfdp::tlv::TlvLvDataTooLarge` where applicable.
## Fixed
- Fixed an error in the EOF writer which wrote the fault location to the wrong buffer position.
- cfdp `ConditionCode::CheckLimitReached` previous had the wrong numerical value of `0b1001` (9)
and now has the correct value of `0b1010` (10).
## Changed
- Minor documentation build updates.
- Increased delegate version range to v0.13
# [v0.11.2] 2024-05-19
- Bumped MSRV to 1.68.2
## Fixed
- Removed `defmt::Format` impl for `MetadataPduCreator` which seems to be problematic.
# [v0.11.1] 2024-04-22
## Fixed
- The default data length for for `SpHeader` constructors where the data field length is not
specified is now 0.
- The `SpHeader::new_from_fields` is public now.
## Added
- `SpHeader::to_vec` method.
# [v0.11.0] 2024-04-16
## Changed
@@ -494,3 +677,13 @@ The timestamp of `PusTm` is now optional. See Added and Changed section for deta
Initial release with CCSDS Space Packet Primary Header implementation and basic PUS TC and TM
implementations.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.17.0...HEAD
[v0.17.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.16.1...v0.17.0
[v0.16.1]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.16.0...v0.16.1
[v0.16.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.15.0...v0.16.0
[v0.15.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.14.0...v0.15.0
[v0.14.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.1...v0.14.0
[v0.13.1]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.13.0...v0.13.1
[v0.13.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.12.0...v0.13.0
[v0.12.0]: https://egit.irs.uni-stuttgart.de/rust/spacepackets/compare/v0.11.2...v0.12.0

View File

@@ -1,8 +1,8 @@
[package]
name = "spacepackets"
version = "0.11.0"
version = "0.17.0"
edition = "2021"
rust-version = "1.65"
rust-version = "1.83"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "Generic implementations for various CCSDS and ECSS packet standards"
homepage = "https://egit.irs.uni-stuttgart.de/rust/spacepackets"
@@ -14,57 +14,34 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
[dependencies]
crc = "3"
delegate = ">=0.8, <0.11"
delegate = "0.13"
paste = "1"
zerocopy = { version = "0.8", features = ["derive"] }
thiserror = { version = "2", default-features = false }
num_enum = { version = "0.7", default-features = false }
num-traits = { version = "0.2", default-features = false }
serde = { version = "1", optional = true, default-features = false, features = ["derive"] }
arbitrary-int = { version = "2" }
portable-atomic = { version = "1", optional = true }
bitbybit = "1.4"
[dependencies.zerocopy]
version = "0.7"
features = ["derive"]
[dependencies.thiserror]
version = "1"
optional = true
[dependencies.num_enum]
version = ">0.5, <=0.7"
default-features = false
[dependencies.serde]
version = "1"
optional = true
default-features = false
features = ["derive"]
[dependencies.time]
version = "0.3"
default-features = false
optional = true
[dependencies.chrono]
version = "0.4"
default-features = false
optional = true
[dependencies.num-traits]
version = "0.2"
default-features = false
[dependencies.defmt]
version = "0.3"
optional = true
[dev-dependencies]
postcard = "1"
chrono = "0.4"
time = { version = "0.3", default-features = false, optional = true }
chrono = { version = "0.4", default-features = false, optional = true }
defmt = { version = "1", default-features = false, optional = true }
[features]
default = ["std"]
std = ["chrono/std", "chrono/clock", "alloc", "thiserror"]
serde = ["dep:serde", "chrono/serde"]
alloc = ["postcard/alloc", "chrono/alloc"]
chrono = ["dep:chrono"]
std = ["alloc", "chrono/std", "chrono/clock", "thiserror/std"]
portable-atomic = ["dep:portable-atomic", "portable-atomic/require-cas"]
defmt = ["dep:defmt", "arbitrary-int/defmt"]
serde = ["dep:serde", "chrono?/serde", "arbitrary-int/serde"]
alloc = ["chrono?/alloc", "defmt?/alloc", "serde?/alloc"]
timelib = ["dep:time"]
defmt = ["dep:defmt"]
[dev-dependencies]
postcard = { version = "1", features = ["alloc"] }
chrono = "0.4"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docs_rs", "--generate-link-to-definition"]
rustdoc-args = ["--generate-link-to-definition"]

1
FUNDING.yml Normal file
View File

@@ -0,0 +1 @@
github: robamu

View File

@@ -1,7 +1,7 @@
[![Crates.io](https://img.shields.io/crates/v/spacepackets)](https://crates.io/crates/spacepackets)
[![docs.rs](https://img.shields.io/docsrs/spacepackets)](https://docs.rs/spacepackets)
[![ci](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml)
[![coverage](https://shields.io/endpoint?url=https://absatsw.irs.uni-stuttgart.de/projects/spacepackets/coverage-rs/latest/coverage.json)](https://absatsw.irs.uni-stuttgart.de/projects/spacepackets/coverage-rs/latest/index.html)
[![matrix chat](https://img.shields.io/matrix/sat-rs%3Amatrix.org)](https://matrix.to/#/#sat-rs:matrix.org)
ECSS and CCSDS Spacepackets
======
@@ -43,6 +43,9 @@ Currently, this includes the following components:
- [`timelib`](https://crates.io/crates/time): Add basic support for the `time` time library.
- [`defmt`](https://defmt.ferrous-systems.com/): Add support for the `defmt` by adding the
[`defmt::Format`](https://defmt.ferrous-systems.com/format) derive on many types.
- [`portable-atomic`](https://github.com/taiki-e/portable-atomic): Basic support for `portable-atomic`
crate in addition to the support for core atomic types. This support requires atomic CAS support
enabled in the portable atomic crate.
# Examples
@@ -51,13 +54,21 @@ usage examples.
# Coverage
Coverage was generated using [`grcov`](https://github.com/mozilla/grcov). If you have not done so
already, install the `llvm-tools-preview`:
Coverage can be generated using [`llvm-cov`](https://github.com/taiki-e/cargo-llvm-cov). If you have not done so
already, install the tool:
```sh
rustup component add llvm-tools-preview
cargo install grcov --locked
cargo +stable install cargo-llvm-cov --locked
```
After that, you can simply run `coverage.py` to test the project with coverage. You can optionally
supply the `--open` flag to open the coverage report in your webbrowser.
After this, you can run `cargo llvm-cov nextest` to run all the tests and display coverage.
# Miri
You can run the [`miri`](https://github.com/rust-lang/miri) tool on this library to check for
undefined behaviour (UB). This library does not use use any `unsafe` code blocks, but `miri` could
still catch UB from used libraries.
```sh
cargo +nightly miri nextest run --all-features
```

View File

@@ -21,7 +21,9 @@ pipeline {
}
stage('Docs') {
steps {
sh 'cargo +nightly doc --all-features'
sh """
RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features
"""
}
}
stage('Rustfmt') {

View File

@@ -1,54 +0,0 @@
#!/usr/bin/env python3
import os
import logging
import argparse
import webbrowser
_LOGGER = logging.getLogger()
def generate_cov_report(open_report: bool, format: str):
logging.basicConfig(level=logging.INFO)
os.environ["RUSTFLAGS"] = "-Cinstrument-coverage"
os.environ["LLVM_PROFILE_FILE"] = "target/coverage/%p-%m.profraw"
_LOGGER.info("Executing tests with coverage")
os.system("cargo test --all-features")
out_path = "./target/debug/coverage"
if format == "lcov":
out_path = "./target/debug/lcov.info"
os.system(
f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing "
f"-o {out_path}"
)
if format == "lcov":
os.system(
"genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source "
"--legend ./target/debug/lcov.info"
)
if open_report:
coverage_report_path = os.path.abspath("./target/debug/coverage/index.html")
webbrowser.open_new_tab(coverage_report_path)
_LOGGER.info("Done")
def main():
parser = argparse.ArgumentParser(
description="Generate coverage report and optionally open it in a browser"
)
parser.add_argument(
"--open", action="store_true", help="Open the coverage report in a browser"
)
parser.add_argument(
"--format",
choices=["html", "lcov"],
default="html",
help="Choose report format (html or lcov)",
)
args = parser.parse_args()
generate_cov_report(args.open, args.format)
if __name__ == "__main__":
main()

36
justfile Normal file
View File

@@ -0,0 +1,36 @@
all: check build embedded test clippy check-fmt docs coverage
clippy:
cargo clippy -- -D warnings
fmt:
cargo fmt --all
check-fmt:
cargo fmt --all -- --check
check:
cargo check --all-features
embedded:
cargo build --target thumbv7em-none-eabihf --no-default-features
cargo build --target thumbv6m-none-eabi --no-default-features
test:
cargo nextest r --all-features
cargo test --doc
build:
cargo build --all-features
docs:
RUSTDOCFLAGS="--cfg docsrs -Z unstable-options --generate-link-to-definition" cargo +nightly doc --all-features
docs-html:
RUSTDOCFLAGS="--cfg docsrs -Z unstable-options --generate-link-to-definition" cargo +nightly doc --all-features --open
coverage:
cargo llvm-cov nextest
coverage-html:
cargo llvm-cov nextest --html --open

View File

@@ -4,7 +4,9 @@ Checklist for new releases
# Pre-Release
1. Make sure any new modules are documented sufficiently enough and check docs with
`cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]' --open`.
`RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features --open`
or `cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docsrs" --generate-link-to-definition"]' --open`
(was problematic on more recent nightly versions).
2. Bump version specifier in `Cargo.toml`.
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
`unreleased` section.

View File

@@ -1,5 +1,4 @@
//! Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
use crate::cfdp::TlvLvError;
use crate::ByteConversionError;
use core::str::Utf8Error;
#[cfg(feature = "serde")]
@@ -7,6 +6,9 @@ use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::string::String;
use super::TlvLvDataTooLargeError;
/// Minmum length of a CFDP length-value structure in bytes.
pub const MIN_LV_LEN: usize = 1;
/// Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
@@ -62,10 +64,14 @@ pub(crate) fn generic_len_check_deserialization(
}
impl<'data> Lv<'data> {
/// Minimum length of a LV structure in bytes.
pub const MIN_LEN: usize = MIN_LV_LEN;
/// Generic constructor.
#[inline]
pub fn new(data: &[u8]) -> Result<Lv, TlvLvError> {
pub fn new(data: &[u8]) -> Result<Lv<'_>, TlvLvDataTooLargeError> {
if data.len() > u8::MAX as usize {
return Err(TlvLvError::DataTooLarge(data.len()));
return Err(TlvLvDataTooLargeError(data.len()));
}
Ok(Lv {
data,
@@ -85,7 +91,7 @@ impl<'data> Lv<'data> {
/// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs
#[inline]
pub fn new_from_str(str_slice: &str) -> Result<Lv, TlvLvError> {
pub fn new_from_str(str_slice: &str) -> Result<Lv<'_>, TlvLvDataTooLargeError> {
Self::new(str_slice.as_bytes())
}
@@ -93,7 +99,7 @@ impl<'data> Lv<'data> {
/// path LVs
#[cfg(feature = "std")]
#[inline]
pub fn new_from_string(string: &'data String) -> Result<Lv<'data>, TlvLvError> {
pub fn new_from_string(string: &'data String) -> Result<Lv<'data>, TlvLvDataTooLargeError> {
Self::new(string.as_bytes())
}
@@ -115,6 +121,7 @@ impl<'data> Lv<'data> {
self.data.len() == 0
}
/// Raw value part of the LV.
#[inline]
pub fn value(&self) -> &[u8] {
self.data
@@ -176,11 +183,11 @@ impl<'data> Lv<'data> {
}
#[cfg(test)]
pub mod tests {
use super::*;
mod tests {
use alloc::string::ToString;
use crate::cfdp::TlvLvError;
use super::*;
use crate::ByteConversionError;
use std::string::String;
@@ -271,15 +278,11 @@ pub mod tests {
let lv = Lv::new(&data_big);
assert!(lv.is_err());
let error = lv.unwrap_err();
if let TlvLvError::DataTooLarge(size) = error {
assert_eq!(size, u8::MAX as usize + 1);
assert_eq!(
error.to_string(),
"data with size 256 larger than allowed 255 bytes"
);
} else {
panic!("invalid exception {:?}", error)
}
assert_eq!(error.0, u8::MAX as usize + 1);
assert_eq!(
error.to_string(),
"data with size 256 larger than allowed 255 bytes"
);
}
#[test]

View File

@@ -1,11 +1,8 @@
//! Low-level CCSDS File Delivery Protocol (CFDP) support according to [CCSDS 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf).
use crate::ByteConversionError;
use core::fmt::{Display, Formatter};
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub mod lv;
pub mod pdu;
@@ -16,39 +13,55 @@ pub const CFDP_VERSION_2_NAME: &str = "CCSDS 727.0-B-5";
/// Currently, only this version is supported.
pub const CFDP_VERSION_2: u8 = 0b001;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
/// PDU type.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)]
pub enum PduType {
/// File directive PDU.
FileDirective = 0,
/// File data PDU.
FileData = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
/// PDU direction.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)]
pub enum Direction {
/// Going towards the file receiver.
TowardsReceiver = 0,
/// Going towards the file sender.
TowardsSender = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
/// PDU transmission mode.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)]
pub enum TransmissionMode {
/// Acknowledged (class 1) transfer.
Acknowledged = 0,
/// Unacknowledged (class 2) transfer.
Unacknowledged = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
/// CRC flag.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)]
pub enum CrcFlag {
/// No CRC for the packet.
NoCrc = 0,
/// Packet has CRC.
WithCrc = 1,
}
@@ -71,52 +84,76 @@ impl From<CrcFlag> for bool {
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)]
pub enum SegmentMetadataFlag {
/// Segment metadata not present.
NotPresent = 0,
/// Segment metadata present.
Present = 1,
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)]
pub enum SegmentationControl {
/// No record boundary preservation.
NoRecordBoundaryPreservation = 0,
/// With record boundary preservation.
WithRecordBoundaryPreservation = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
/// Fault handler codes according to the CFDP standard.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u3, exhaustive = false)]
#[repr(u8)]
pub enum FaultHandlerCode {
/// Notice of cancellation fault handler code.
NoticeOfCancellation = 0b0001,
/// Notice of suspension fault handler code.
NoticeOfSuspension = 0b0010,
/// Ignore error fault handler code.
IgnoreError = 0b0011,
/// Abandon transaction fault handler code.
AbandonTransaction = 0b0100,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
/// CFDP condition codes.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u4, exhaustive = false)]
#[repr(u8)]
pub enum ConditionCode {
/// This is not an error condition for which a faulty handler override can be specified
NoError = 0b0000,
/// Positive acknowledgement limit reached.
PositiveAckLimitReached = 0b0001,
/// Keep-alive limit reached.
KeepAliveLimitReached = 0b0010,
/// Invalid transmission mode.
InvalidTransmissionMode = 0b0011,
/// Filestore rejection.
FilestoreRejection = 0b0100,
/// File checksum error.
FileChecksumFailure = 0b0101,
/// File size error.
FileSizeError = 0b0110,
/// NAK limit reached.
NakLimitReached = 0b0111,
/// Inactivity detected.
InactivityDetected = 0b1000,
CheckLimitReached = 0b1001,
/// Check limit reached.
CheckLimitReached = 0b1010,
/// Unsupported checksum type.
UnsupportedChecksumType = 0b1011,
/// Not an actual fault condition for which fault handler overrides can be specified
SuspendRequestReceived = 0b1110,
@@ -124,9 +161,11 @@ pub enum ConditionCode {
CancelRequestReceived = 0b1111,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
/// Large file flag.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u1, exhaustive = true)]
#[repr(u8)]
pub enum LargeFileFlag {
/// 32 bit maximum file size and FSS size
@@ -136,14 +175,16 @@ pub enum LargeFileFlag {
}
/// Transaction status for the ACK PDU field according to chapter 5.2.4 of the CFDP standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u2, exhaustive = true)]
#[repr(u8)]
pub enum TransactionStatus {
/// Transaction is not currently active and the CFDP implementation does not retain a
/// transaction history.
Undefined = 0b00,
/// Transaction is currently active.
Active = 0b01,
/// Transaction was active in the past and was terminated.
Terminated = 0b10,
@@ -154,98 +195,70 @@ pub enum TransactionStatus {
/// Checksum types according to the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum ChecksumType {
/// Modular legacy checksum
Modular = 0,
/// CRC32 Proximity-1.
Crc32Proximity1 = 1,
/// CRC32C.
Crc32C = 2,
/// Polynomial: 0x4C11DB7. Preferred checksum for now.
/// CRC32. Polynomial: 0x4C11DB7. Preferred checksum for now.
Crc32 = 3,
/// Null checksum (no checksum).
#[default]
NullChecksum = 15,
}
impl Default for ChecksumType {
fn default() -> Self {
Self::NullChecksum
}
}
/// Raw null checksum.
pub const NULL_CHECKSUM_U32: [u8; 4] = [0; 4];
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
/// TLV or LV data larger than allowed [u8::MAX].
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[error("data with size {0} larger than allowed {max} bytes", max = u8::MAX)]
pub struct TlvLvDataTooLargeError(pub usize);
/// First value: Found value. Second value: Expected value if there is one.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[error("invalid TLV type field, found {found}, expected {expected:?}")]
pub struct InvalidTlvTypeFieldError {
found: u8,
expected: Option<u8>,
}
/// Generic TLV/LV error.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum TlvLvError {
DataTooLarge(usize),
ByteConversion(ByteConversionError),
/// First value: Found value. Second value: Expected value if there is one.
InvalidTlvTypeField {
found: u8,
expected: Option<u8>,
},
/// Logically invalid value length detected. The value length may not exceed 255 bytes.
/// Depending on the concrete TLV type, the value length may also be logically invalid.
/// Data too large error.
#[error("{0}")]
DataTooLarge(#[from] TlvLvDataTooLargeError),
/// Byte conversion error.
#[error("byte conversion error: {0}")]
ByteConversion(#[from] ByteConversionError),
/// Invalid TLV type field error.
#[error("{0}")]
InvalidTlvTypeField(#[from] InvalidTlvTypeFieldError),
/// Invalid value length.
#[error("invalid value length {0}")]
InvalidValueLength(usize),
/// Only applies to filestore requests and responses. Second name was missing where one is
/// expected.
#[error("second name missing for filestore request or response")]
SecondNameMissing,
/// Invalid action code for filestore requests or responses.
#[error("invalid action code {0}")]
InvalidFilestoreActionCode(u8),
}
impl From<ByteConversionError> for TlvLvError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversion(value)
}
}
impl Display for TlvLvError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TlvLvError::DataTooLarge(data_len) => {
write!(
f,
"data with size {} larger than allowed {} bytes",
data_len,
u8::MAX
)
}
TlvLvError::ByteConversion(e) => {
write!(f, "tlv or lv byte conversion: {}", e)
}
TlvLvError::InvalidTlvTypeField { found, expected } => {
write!(
f,
"invalid TLV type field, found {found}, expected {expected:?}"
)
}
TlvLvError::InvalidValueLength(len) => {
write!(f, "invalid value length {len}")
}
TlvLvError::SecondNameMissing => {
write!(f, "second name missing for filestore request or response")
}
TlvLvError::InvalidFilestoreActionCode(raw) => {
write!(f, "invalid filestore action code with raw value {raw}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for TlvLvError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TlvLvError::ByteConversion(e) => Some(e),
_ => None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -1,3 +1,4 @@
//! # Acknowledgement (ACK) PDU packet implementation.
use crate::{
cfdp::{ConditionCode, CrcFlag, Direction, TransactionStatus},
ByteConversionError,
@@ -10,6 +11,11 @@ use super::{
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Invalid [FileDirectiveType] of the acknowledged PDU error.
#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
#[error("invalid directive code of acknowledged PDU")]
pub struct InvalidAckedDirectiveCodeError(pub FileDirectiveType);
/// ACK PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.4.
@@ -24,21 +30,19 @@ pub struct AckPdu {
}
impl AckPdu {
/// Constructor.
pub fn new(
mut pdu_header: PduHeader,
directive_code_of_acked_pdu: FileDirectiveType,
condition_code: ConditionCode,
transaction_status: TransactionStatus,
) -> Result<Self, PduError> {
if directive_code_of_acked_pdu == FileDirectiveType::EofPdu {
) -> Result<Self, InvalidAckedDirectiveCodeError> {
if directive_code_of_acked_pdu == FileDirectiveType::Eof {
pdu_header.pdu_conf.direction = Direction::TowardsSender;
} else if directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu {
} else if directive_code_of_acked_pdu == FileDirectiveType::Finished {
pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
} else {
return Err(PduError::InvalidDirectiveType {
found: directive_code_of_acked_pdu as u8,
expected: None,
});
return Err(InvalidAckedDirectiveCodeError(directive_code_of_acked_pdu));
}
// Force correct direction flag.
let mut ack_pdu = Self {
@@ -51,6 +55,9 @@ impl AckPdu {
Ok(ack_pdu)
}
/// Constructor for an ACK PDU acknowledging an EOF PDU.
///
/// Relevant for the file receiver.
pub fn new_for_eof_pdu(
pdu_header: PduHeader,
condition_code: ConditionCode,
@@ -59,13 +66,16 @@ impl AckPdu {
// Unwrap okay here, [new] can only fail on invalid directive codes.
Self::new(
pdu_header,
FileDirectiveType::EofPdu,
FileDirectiveType::Eof,
condition_code,
transaction_status,
)
.unwrap()
}
/// Constructor for an ACK PDU acknowledging a Finished PDU.
///
/// Relevant for the file sender.
pub fn new_for_finished_pdu(
pdu_header: PduHeader,
condition_code: ConditionCode,
@@ -74,29 +84,38 @@ impl AckPdu {
// Unwrap okay here, [new] can only fail on invalid directive codes.
Self::new(
pdu_header,
FileDirectiveType::FinishedPdu,
FileDirectiveType::Finished,
condition_code,
transaction_status,
)
.unwrap()
}
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
/// Directive code of the acknowledged PDU.
#[inline]
pub fn directive_code_of_acked_pdu(&self) -> FileDirectiveType {
self.directive_code_of_acked_pdu
}
/// Condition code.
#[inline]
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
/// Transaction status.
#[inline]
pub fn transaction_status(&self) -> TransactionStatus {
self.transaction_status
}
#[inline]
fn calc_pdu_datafield_len(&self) -> usize {
if self.crc_flag() == CrcFlag::WithCrc {
return 5;
@@ -104,6 +123,7 @@ impl AckPdu {
3
}
/// Construct [Self] from the provided byte slice.
pub fn from_bytes(buf: &[u8]) -> Result<AckPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
@@ -111,13 +131,13 @@ impl AckPdu {
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::AckPdu),
expected: Some(FileDirectiveType::Ack),
}
})?;
if directive_type != FileDirectiveType::AckPdu {
if directive_type != FileDirectiveType::Ack {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::AckPdu,
expected: FileDirectiveType::Ack,
});
}
current_idx += 1;
@@ -128,8 +148,8 @@ impl AckPdu {
expected: None,
}
})?;
if acked_directive_type != FileDirectiveType::EofPdu
&& acked_directive_type != FileDirectiveType::FinishedPdu
if acked_directive_type != FileDirectiveType::Eof
&& acked_directive_type != FileDirectiveType::Finished
{
return Err(PduError::InvalidDirectiveType {
found: acked_directive_type as u8,
@@ -140,27 +160,18 @@ impl AckPdu {
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
let transaction_status = TransactionStatus::try_from(buf[current_idx] & 0b11).unwrap();
Self::new(
// Unwrap okay, validity of acked directive code was checked.
Ok(Self::new(
pdu_header,
acked_directive_type,
condition_code,
transaction_status,
)
}
}
impl CfdpPdu for AckPdu {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
.unwrap())
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::AckPdu)
}
}
impl WritablePduPacket for AckPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
/// Write [Self] to the provided buffer and returns the written size.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
@@ -170,11 +181,11 @@ impl WritablePduPacket for AckPdu {
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::AckPdu as u8;
buf[current_idx] = FileDirectiveType::Ack as u8;
current_idx += 1;
buf[current_idx] = (self.directive_code_of_acked_pdu as u8) << 4;
if self.directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu {
if self.directive_code_of_acked_pdu == FileDirectiveType::Finished {
// This is the directive subtype code. It needs to be set to 0b0001 if the ACK PDU
// acknowledges a Finished PDU, and to 0b0000 otherwise.
buf[current_idx] |= 0b0001;
@@ -188,11 +199,34 @@ impl WritablePduPacket for AckPdu {
Ok(current_idx)
}
fn len_written(&self) -> usize {
/// Length of the written PDU in bytes.
pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
impl CfdpPdu for AckPdu {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::Ack)
}
}
impl WritablePduPacket for AckPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::{
@@ -211,10 +245,7 @@ mod tests {
assert_eq!(ack_pdu.crc_flag(), expected_crc_flag);
assert_eq!(ack_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(ack_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
ack_pdu.file_directive_type(),
Some(FileDirectiveType::AckPdu)
);
assert_eq!(ack_pdu.file_directive_type(), Some(FileDirectiveType::Ack));
assert_eq!(ack_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(ack_pdu.direction(), expected_dir);
assert_eq!(ack_pdu.source_id(), TEST_SRC_ID.into());
@@ -225,17 +256,17 @@ mod tests {
#[test]
fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new(
pdu_header,
FileDirectiveType::FinishedPdu,
FileDirectiveType::Finished,
ConditionCode::NoError,
TransactionStatus::Active,
)
.expect("creating ACK PDU failed");
assert_eq!(
ack_pdu.directive_code_of_acked_pdu(),
FileDirectiveType::FinishedPdu
FileDirectiveType::Finished
);
verify_state(&ack_pdu, CrcFlag::NoCrc, Direction::TowardsReceiver);
}
@@ -245,7 +276,7 @@ mod tests {
transaction_status: TransactionStatus,
) {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(pdu_header, condition_code, transaction_status);
let mut buf: [u8; 64] = [0; 64];
let res = ack_pdu.write_to_bytes(&mut buf);
@@ -254,8 +285,8 @@ mod tests {
assert_eq!(written, ack_pdu.len_written());
verify_raw_header(ack_pdu.pdu_header(), &buf);
assert_eq!(buf[7], FileDirectiveType::AckPdu as u8);
assert_eq!((buf[8] >> 4) & 0b1111, FileDirectiveType::FinishedPdu as u8);
assert_eq!(buf[7], FileDirectiveType::Ack as u8);
assert_eq!((buf[8] >> 4) & 0b1111, FileDirectiveType::Finished as u8);
assert_eq!(buf[8] & 0b1111, 0b0001);
assert_eq!(buf[9] >> 4 & 0b1111, condition_code as u8);
assert_eq!(buf[9] & 0b11, transaction_status as u8);
@@ -267,15 +298,53 @@ mod tests {
generic_serialization_test(ConditionCode::NoError, TransactionStatus::Active);
}
#[test]
fn test_serialization_too_small() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new(
pdu_header,
FileDirectiveType::Finished,
ConditionCode::NoError,
TransactionStatus::Active,
)
.expect("creating ACK PDU failed");
if let Err(PduError::ByteConversion(ByteConversionError::ToSliceTooSmall {
found,
expected,
})) = ack_pdu.write_to_bytes(&mut [0; 5])
{
assert_eq!(found, 5);
assert_eq!(expected, ack_pdu.len_written());
} else {
panic!("serialization should have failed");
}
}
#[test]
fn test_serialization_fs_error() {
generic_serialization_test(ConditionCode::FileSizeError, TransactionStatus::Terminated);
}
#[test]
fn test_invalid_directive_code_of_acked_pdu() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
assert_eq!(
AckPdu::new(
pdu_header,
FileDirectiveType::Metadata,
ConditionCode::NoError,
TransactionStatus::Active,
)
.unwrap_err(),
InvalidAckedDirectiveCodeError(FileDirectiveType::Metadata)
);
}
#[test]
fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header,
ConditionCode::NoError,
@@ -290,7 +359,7 @@ mod tests {
#[test]
fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header,
ConditionCode::NoError,
@@ -307,7 +376,7 @@ mod tests {
#[test]
fn test_for_eof_pdu() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header,
ConditionCode::NoError,
@@ -315,7 +384,7 @@ mod tests {
);
assert_eq!(
ack_pdu.directive_code_of_acked_pdu(),
FileDirectiveType::EofPdu
FileDirectiveType::Eof
);
verify_state(&ack_pdu, CrcFlag::WithCrc, Direction::TowardsSender);
}
@@ -324,7 +393,7 @@ mod tests {
#[cfg(feature = "serde")]
fn test_ack_pdu_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header,
ConditionCode::NoError,

View File

@@ -1,3 +1,4 @@
//! # End-of-File (EOF) PDU packet implementation.
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader,
@@ -25,32 +26,58 @@ pub struct EofPdu {
}
impl EofPdu {
pub fn new_no_error(mut pdu_header: PduHeader, file_checksum: u32, file_size: u64) -> Self {
/// Constructor.
pub fn new(
mut pdu_header: PduHeader,
condition_code: ConditionCode,
file_checksum: u32,
file_size: u64,
fault_location: Option<EntityIdTlv>,
) -> Self {
// Force correct direction flag.
pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
let mut eof_pdu = Self {
pdu_header,
condition_code: ConditionCode::NoError,
condition_code,
file_checksum,
file_size,
fault_location: None,
fault_location,
};
eof_pdu.pdu_header.pdu_datafield_len = eof_pdu.calc_pdu_datafield_len() as u16;
eof_pdu
}
/// Constructor for no error EOF PDUs.
pub fn new_no_error(pdu_header: PduHeader, file_checksum: u32, file_size: u64) -> Self {
Self::new(
pdu_header,
ConditionCode::NoError,
file_checksum,
file_size,
None,
)
}
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
/// Condition code.
#[inline]
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
/// File checksum.
#[inline]
pub fn file_checksum(&self) -> u32 {
self.file_checksum
}
/// File size.
#[inline]
pub fn file_size(&self) -> u64 {
self.file_size
}
@@ -70,6 +97,7 @@ impl EofPdu {
len
}
/// Construct [Self] from the provided byte slice.
pub fn from_bytes(buf: &[u8]) -> Result<EofPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
@@ -82,13 +110,13 @@ impl EofPdu {
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::EofPdu),
expected: Some(FileDirectiveType::Eof),
}
})?;
if directive_type != FileDirectiveType::EofPdu {
if directive_type != FileDirectiveType::Eof {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::EofPdu,
expected: FileDirectiveType::Eof,
});
}
current_idx += 1;
@@ -113,20 +141,9 @@ impl EofPdu {
fault_location,
})
}
}
impl CfdpPdu for EofPdu {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::EofPdu)
}
}
impl WritablePduPacket for EofPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
/// Write [Self] to the provided buffer and returns the written size.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
@@ -136,7 +153,7 @@ impl WritablePduPacket for EofPdu {
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::EofPdu as u8;
buf[current_idx] = FileDirectiveType::Eof as u8;
current_idx += 1;
buf[current_idx] = (self.condition_code as u8) << 4;
current_idx += 1;
@@ -148,7 +165,7 @@ impl WritablePduPacket for EofPdu {
&mut buf[current_idx..],
)?;
if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_bytes(buf)?;
current_idx += fault_location.write_to_bytes(&mut buf[current_idx..])?;
}
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
@@ -156,11 +173,34 @@ impl WritablePduPacket for EofPdu {
Ok(current_idx)
}
fn len_written(&self) -> usize {
/// Length of the written PDU in bytes.
pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
impl CfdpPdu for EofPdu {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::Eof)
}
}
impl WritablePduPacket for EofPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
#[cfg(test)]
mod tests {
use super::*;
@@ -171,19 +211,26 @@ mod tests {
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag, PduType, TransmissionMode};
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
use crate::util::{UnsignedByteFieldU16, UnsignedEnum};
fn verify_state(&eof_pdu: &EofPdu, file_flag: LargeFileFlag) {
fn verify_state_no_error_no_crc(eof_pdu: &EofPdu, file_flag: LargeFileFlag) {
verify_state(eof_pdu, CrcFlag::NoCrc, file_flag, ConditionCode::NoError);
}
fn verify_state(
eof_pdu: &EofPdu,
crc_flag: CrcFlag,
file_flag: LargeFileFlag,
cond_code: ConditionCode,
) {
assert_eq!(eof_pdu.file_checksum(), 0x01020304);
assert_eq!(eof_pdu.file_size(), 12);
assert_eq!(eof_pdu.condition_code(), ConditionCode::NoError);
assert_eq!(eof_pdu.condition_code(), cond_code);
assert_eq!(eof_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(eof_pdu.crc_flag(), crc_flag);
assert_eq!(eof_pdu.file_flag(), file_flag);
assert_eq!(eof_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
eof_pdu.file_directive_type(),
Some(FileDirectiveType::EofPdu)
);
assert_eq!(eof_pdu.file_directive_type(), Some(FileDirectiveType::Eof));
assert_eq!(eof_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(eof_pdu.direction(), Direction::TowardsReceiver);
assert_eq!(eof_pdu.source_id(), TEST_SRC_ID.into());
@@ -194,16 +241,16 @@ mod tests {
#[test]
fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 4 + 4);
verify_state(&eof_pdu, LargeFileFlag::Normal);
verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Normal);
}
#[test]
fn test_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let res = eof_pdu.write_to_bytes(&mut buf);
@@ -212,7 +259,7 @@ mod tests {
assert_eq!(written, eof_pdu.len_written());
verify_raw_header(eof_pdu.pdu_header(), &buf);
let mut current_idx = eof_pdu.pdu_header().header_len();
buf[current_idx] = FileDirectiveType::EofPdu as u8;
buf[current_idx] = FileDirectiveType::Eof as u8;
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
@@ -235,7 +282,7 @@ mod tests {
#[test]
fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
eof_pdu.write_to_bytes(&mut buf).unwrap();
@@ -250,7 +297,7 @@ mod tests {
#[test]
fn test_write_to_vec() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
@@ -261,7 +308,7 @@ mod tests {
#[test]
fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
@@ -271,7 +318,7 @@ mod tests {
buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16;
let error = EofPdu::from_bytes(&buf).unwrap_err();
if let PduError::ChecksumError(e) = error {
if let PduError::Checksum(e) = error {
assert_eq!(e, crc);
} else {
panic!("expected crc error");
@@ -281,9 +328,9 @@ mod tests {
#[test]
fn test_with_large_file_flag() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Large);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
verify_state(&eof_pdu, LargeFileFlag::Large);
verify_state_no_error_no_crc(&eof_pdu, LargeFileFlag::Large);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 8 + 4);
}
@@ -291,8 +338,52 @@ mod tests {
#[cfg(feature = "serde")]
fn test_eof_serde() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
generic_serde_test(eof_pdu);
}
fn generic_test_with_fault_location_and_error(crc: CrcFlag) {
let pdu_conf = common_pdu_conf(crc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_for_file_directive(pdu_conf, 0);
let eof_pdu = EofPdu::new(
pdu_header,
ConditionCode::FileChecksumFailure,
0x01020304,
12,
Some(EntityIdTlv::new(UnsignedByteFieldU16::new(5).into())),
);
let mut expected_len = pdu_header.header_len() + 2 + 4 + 4 + 4;
if crc == CrcFlag::WithCrc {
expected_len += 2;
}
// Entity ID TLV increaes length by 4.
assert_eq!(eof_pdu.len_written(), expected_len);
verify_state(
&eof_pdu,
crc,
LargeFileFlag::Normal,
ConditionCode::FileChecksumFailure,
);
let eof_vec = eof_pdu.to_vec().unwrap();
let eof_read_back = EofPdu::from_bytes(&eof_vec);
if let Err(e) = eof_read_back {
panic!("deserialization failed with: {e}")
}
let eof_read_back = eof_read_back.unwrap();
assert_eq!(eof_read_back, eof_pdu);
assert!(eof_read_back.fault_location.is_some());
assert_eq!(eof_read_back.fault_location.unwrap().entity_id().value(), 5);
assert_eq!(eof_read_back.fault_location.unwrap().entity_id().size(), 2);
}
#[test]
fn test_with_fault_location_and_error() {
generic_test_with_fault_location_and_error(CrcFlag::NoCrc);
}
#[test]
fn test_with_fault_location_and_error_and_crc() {
generic_test_with_fault_location_and_error(CrcFlag::WithCrc);
}
}

View File

@@ -1,3 +1,4 @@
//! # File Data PDU packet implementation
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
PduError, PduHeader,
@@ -10,16 +11,24 @@ use serde::{Deserialize, Serialize};
use super::{CfdpPdu, FileDirectiveType, WritablePduPacket};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
/// Record continuation state for segment metadata.
#[derive(Debug, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u2, exhaustive = true)]
#[repr(u8)]
pub enum RecordContinuationState {
/// No start and no end.
NoStartNoEnd = 0b00,
/// Start without end.
StartWithoutEnd = 0b01,
/// End without start.
EndWithoutStart = 0b10,
/// Start and end.
StartAndEnd = 0b11,
}
/// Segment metadata structure.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct SegmentMetadata<'seg_meta> {
@@ -28,6 +37,7 @@ pub struct SegmentMetadata<'seg_meta> {
}
impl<'seg_meta> SegmentMetadata<'seg_meta> {
/// Constructor.
pub fn new(
record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>,
@@ -43,24 +53,30 @@ impl<'seg_meta> SegmentMetadata<'seg_meta> {
})
}
/// Record continuation state.
#[inline]
pub fn record_continuation_state(&self) -> RecordContinuationState {
self.record_continuation_state
}
/// Raw metadata slice.
#[inline]
pub fn metadata(&self) -> Option<&'seg_meta [u8]> {
self.metadata
}
pub fn written_len(&self) -> usize {
/// Length of the written segment metadata structure.
#[inline]
pub fn len_written(&self) -> usize {
// Map empty metadata to 0 and slice to its length.
1 + self.metadata.map_or(0, |meta| meta.len())
}
pub(crate) fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.written_len() {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.written_len(),
expected: self.len_written(),
});
}
buf[0] = ((self.record_continuation_state as u8) << 6)
@@ -68,7 +84,7 @@ impl<'seg_meta> SegmentMetadata<'seg_meta> {
if let Some(metadata) = self.metadata {
buf[1..1 + metadata.len()].copy_from_slice(metadata)
}
Ok(self.written_len())
Ok(self.len_written())
}
pub(crate) fn from_bytes(buf: &'seg_meta [u8]) -> Result<Self, ByteConversionError> {
@@ -92,130 +108,44 @@ impl<'seg_meta> SegmentMetadata<'seg_meta> {
}
}
/// File Data PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPdu<'seg_meta, 'file_data> {
struct FdPduBase<'seg_meta> {
pdu_header: PduHeader,
#[cfg_attr(feature = "serde", serde(borrow))]
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
}
impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, Some(segment_metadata), offset, file_data)
impl CfdpPdu for FdPduBase<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
pub fn new_no_seg_metadata(
pdu_header: PduHeader,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, None, offset, file_data)
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
pub fn new_generic(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
pdu_header,
segment_metadata,
offset,
file_data,
};
pdu.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
impl FdPduBase<'_> {
fn calc_pdu_datafield_len(&self, file_data_len: u64) -> usize {
let mut len = core::mem::size_of::<u32>();
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4;
}
if self.segment_metadata.is_some() {
len += self.segment_metadata.as_ref().unwrap().written_len()
len += self.segment_metadata.as_ref().unwrap().len_written()
}
len += self.file_data.len();
len += file_data_len as usize;
if self.crc_flag() == CrcFlag::WithCrc {
len += 2;
}
len
}
pub fn offset(&self) -> u64 {
self.offset
}
pub fn file_data(&self) -> &'file_data [u8] {
self.file_data
}
pub fn segment_metadata(&self) -> Option<&SegmentMetadata> {
self.segment_metadata.as_ref()
}
pub fn from_bytes<'buf: 'seg_meta + 'file_data>(buf: &'buf [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + core::mem::size_of::<u32>();
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let mut segment_metadata = None;
if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present {
segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?);
current_idx += segment_metadata.as_ref().unwrap().written_len();
}
let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss;
if current_idx > full_len_without_crc {
return Err(ByteConversionError::FromSliceTooSmall {
found: current_idx,
expected: full_len_without_crc,
}
.into());
}
Ok(Self {
pdu_header,
segment_metadata,
offset,
file_data: &buf[current_idx..full_len_without_crc],
})
}
}
impl CfdpPdu for FileDataPdu<'_, '_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
impl WritablePduPacket for FileDataPdu<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_written(),
}
.into());
}
fn write_common_fields_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
if self.segment_metadata.is_some() {
current_idx += self
@@ -229,6 +159,139 @@ impl WritablePduPacket for FileDataPdu<'_, '_> {
self.offset,
&mut buf[current_idx..],
)?;
Ok(current_idx)
}
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
}
/// File Data PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPdu<'seg_meta, 'file_data> {
#[cfg_attr(feature = "serde", serde(borrow))]
common: FdPduBase<'seg_meta>,
file_data: &'file_data [u8],
}
impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
/// Constructor for a file data PDU including segment metadata.
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new(pdu_header, Some(segment_metadata), offset, file_data)
}
/// Constructor for a file data PDU without segment metadata.
pub fn new_no_seg_metadata(
pdu_header: PduHeader,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new(pdu_header, None, offset, file_data)
}
/// Generic constructor for a file data PDU.
pub fn new(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
common: FdPduBase {
pdu_header,
segment_metadata,
offset,
},
file_data,
};
pdu.common.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
self.common
.calc_pdu_datafield_len(self.file_data.len() as u64)
}
/// Optional segment metadata.
#[inline]
pub fn segment_metadata(&self) -> Option<&SegmentMetadata<'_>> {
self.common.segment_metadata.as_ref()
}
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
self.common.pdu_header()
}
/// File data offset.
#[inline]
pub fn offset(&self) -> u64 {
self.common.offset
}
/// File data.
#[inline]
pub fn file_data(&self) -> &'file_data [u8] {
self.file_data
}
/// Read [Self] from the provided buffer.
pub fn from_bytes<'buf: 'seg_meta + 'file_data>(buf: &'buf [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + core::mem::size_of::<u32>();
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let mut segment_metadata = None;
if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present {
segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?);
current_idx += segment_metadata.as_ref().unwrap().len_written();
}
let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss;
if current_idx > full_len_without_crc {
return Err(ByteConversionError::FromSliceTooSmall {
found: current_idx,
expected: full_len_without_crc,
}
.into());
}
Ok(Self {
common: FdPduBase {
pdu_header,
segment_metadata,
offset,
},
file_data: &buf[current_idx..full_len_without_crc],
})
}
/// Write [Self] to the provided buffer and returns the written size.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_written(),
}
.into());
}
let mut current_idx = self.common.write_common_fields_to_bytes(buf)?;
buf[current_idx..current_idx + self.file_data.len()].copy_from_slice(self.file_data);
current_idx += self.file_data.len();
if self.crc_flag() == CrcFlag::WithCrc {
@@ -237,10 +300,194 @@ impl WritablePduPacket for FileDataPdu<'_, '_> {
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
/// Length of the written PDU.
pub fn len_written(&self) -> usize {
self.common.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
impl CfdpPdu for FileDataPdu<'_, '_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
&self.common.pdu_header
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
impl WritablePduPacket for FileDataPdu<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
/// File Data PDU creator abstraction.
///
/// This special creator object allows to read into the file data buffer directly. This avoids
/// the need of an additional buffer to create a file data PDU. This structure therefore
/// does not implement the regular [WritablePduPacket] trait.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPduCreatorWithReservedDatafield<'seg_meta> {
#[cfg_attr(feature = "serde", serde(borrow))]
common: FdPduBase<'seg_meta>,
file_data_len: u64,
}
impl<'seg_meta> FileDataPduCreatorWithReservedDatafield<'seg_meta> {
/// Constructor for a file data PDU including segment metadata.
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data_len: u64,
) -> Self {
Self::new(pdu_header, Some(segment_metadata), offset, file_data_len)
}
/// Constructor for a file data PDU without segment metadata.
pub fn new_no_seg_metadata(pdu_header: PduHeader, offset: u64, file_data_len: u64) -> Self {
Self::new(pdu_header, None, offset, file_data_len)
}
/// Generic constructor.
pub fn new(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data_len: u64,
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
common: FdPduBase {
pdu_header,
segment_metadata,
offset,
},
file_data_len,
};
pdu.common.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
self.common.calc_pdu_datafield_len(self.file_data_len)
}
/// Length of the written PDU.
pub fn len_written(&self) -> usize {
self.common.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
/// This function performs a partial write by writing all data except the file data
/// and the CRC.
///
/// It returns a [FileDataPduCreatorWithUnwrittenData] which provides a mutable slice to
/// the reserved file data field. The user can read file data into this field directly and
/// then finish the PDU creation using the [FileDataPduCreatorWithUnwrittenData::finish] call.
pub fn write_to_bytes_partially<'buf>(
&self,
buf: &'buf mut [u8],
) -> Result<FileDataPduCreatorWithUnwrittenData<'buf>, PduError> {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_written(),
}
.into());
}
let mut current_idx = self.common.write_common_fields_to_bytes(buf)?;
let file_data_offset = current_idx as u64;
current_idx += self.file_data_len as usize;
if self.crc_flag() == CrcFlag::WithCrc {
current_idx += 2;
}
Ok(FileDataPduCreatorWithUnwrittenData {
write_buf: &mut buf[0..current_idx],
file_data_offset,
file_data_len: self.file_data_len,
needs_crc: self.crc_flag() == CrcFlag::WithCrc,
})
}
}
impl CfdpPdu for FileDataPduCreatorWithReservedDatafield<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.common.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
/// This structure is created with [FileDataPduCreatorWithReservedDatafield::write_to_bytes_partially]
/// and provides an API to read file data from the virtual filesystem into the file data PDU buffer
/// directly.
///
/// This structure provides a mutable slice to the reserved file data field. The user can read
/// file data into this field directly and then finish the PDU creation using the
/// [FileDataPduCreatorWithUnwrittenData::finish] call.
pub struct FileDataPduCreatorWithUnwrittenData<'buf> {
write_buf: &'buf mut [u8],
file_data_offset: u64,
file_data_len: u64,
needs_crc: bool,
}
impl FileDataPduCreatorWithUnwrittenData<'_> {
/// Mutable access to the file data field.
pub fn file_data_field_mut(&mut self) -> &mut [u8] {
&mut self.write_buf[self.file_data_offset as usize
..self.file_data_offset as usize + self.file_data_len as usize]
}
/// This functio needs to be called to add a CRC to the file data PDU where applicable.
///
/// It returns the full written size of the PDU.
pub fn finish(self) -> usize {
if self.needs_crc {
add_pdu_crc(
self.write_buf,
self.file_data_offset as usize + self.file_data_len as usize,
);
}
self.write_buf.len()
}
}
/// This function can be used to calculate the maximum allowed file segment size for
/// a given maximum packet length and the segment metadata if there is any.
pub fn calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(
pdu_header: &PduHeader,
max_packet_len: usize,
segment_metadata: Option<&SegmentMetadata>,
) -> usize {
let mut subtract = pdu_header.header_len();
if let Some(segment_metadata) = segment_metadata {
subtract += 1 + segment_metadata.metadata().unwrap().len();
}
if pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
subtract += 8;
} else {
subtract += 4;
}
if pdu_header.common_pdu_conf().crc_flag == CrcFlag::WithCrc {
subtract += 2;
}
max_packet_len.saturating_sub(subtract)
}
#[cfg(test)]
mod tests {
@@ -263,7 +510,7 @@ mod tests {
assert!(fd_pdu.segment_metadata().is_none());
assert_eq!(
fd_pdu.len_written(),
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4
fd_pdu.pdu_header().header_len() + core::mem::size_of::<u32>() + 4
);
assert_eq!(fd_pdu.crc_flag(), CrcFlag::NoCrc);
@@ -290,11 +537,11 @@ mod tests {
let written = res.unwrap();
assert_eq!(
written,
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4
fd_pdu.pdu_header().header_len() + core::mem::size_of::<u32>() + 4
);
let mut current_idx = fd_pdu.pdu_header.header_len();
let mut current_idx = fd_pdu.pdu_header().header_len();
let file_size = u32::from_be_bytes(
buf[fd_pdu.pdu_header.header_len()..fd_pdu.pdu_header.header_len() + 4]
buf[fd_pdu.pdu_header().header_len()..fd_pdu.pdu_header().header_len() + 4]
.try_into()
.unwrap(),
);
@@ -353,7 +600,7 @@ mod tests {
buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) | buf[written - 1] as u16;
let error = FileDataPdu::from_bytes(&buf).unwrap_err();
if let PduError::ChecksumError(e) = error {
if let PduError::Checksum(e) = error {
assert_eq!(e, crc);
} else {
panic!("expected crc error");
@@ -380,7 +627,7 @@ mod tests {
assert_eq!(*fd_pdu.segment_metadata().unwrap(), segment_meta);
assert_eq!(
fd_pdu.len_written(),
fd_pdu.pdu_header.header_len()
fd_pdu.pdu_header().header_len()
+ 1
+ seg_metadata.len()
+ core::mem::size_of::<u32>()
@@ -390,7 +637,7 @@ mod tests {
fd_pdu
.write_to_bytes(&mut buf)
.expect("writing FD PDU failed");
let mut current_idx = fd_pdu.pdu_header.header_len();
let mut current_idx = fd_pdu.pdu_header().header_len();
assert_eq!(
RecordContinuationState::try_from((buf[current_idx] >> 6) & 0b11).unwrap(),
RecordContinuationState::StartAndEnd
@@ -482,4 +729,142 @@ mod tests {
let output_converted_back: FileDataPdu = from_bytes(&output).unwrap();
assert_eq!(output_converted_back, fd_pdu);
}
#[test]
fn test_fd_pdu_creator_with_reserved_field_no_crc() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let test_str = "hello world!";
let fd_pdu = FileDataPduCreatorWithReservedDatafield::new_no_seg_metadata(
pdu_header,
10,
test_str.len() as u64,
);
let mut write_buf: [u8; 64] = [0; 64];
let mut pdu_unwritten = fd_pdu
.write_to_bytes_partially(&mut write_buf)
.expect("partial write failed");
pdu_unwritten
.file_data_field_mut()
.copy_from_slice(test_str.as_bytes());
pdu_unwritten.finish();
let pdu_reader = FileDataPdu::from_bytes(&write_buf).expect("reading file data PDU failed");
assert_eq!(
core::str::from_utf8(pdu_reader.file_data()).expect("reading utf8 string failed"),
"hello world!"
);
}
#[test]
fn test_fd_pdu_creator_with_reserved_field_with_crc() {
let mut common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
common_conf.crc_flag = true.into();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let test_str = "hello world!";
let fd_pdu = FileDataPduCreatorWithReservedDatafield::new_no_seg_metadata(
pdu_header,
10,
test_str.len() as u64,
);
let mut write_buf: [u8; 64] = [0; 64];
let mut pdu_unwritten = fd_pdu
.write_to_bytes_partially(&mut write_buf)
.expect("partial write failed");
pdu_unwritten
.file_data_field_mut()
.copy_from_slice(test_str.as_bytes());
pdu_unwritten.finish();
let pdu_reader = FileDataPdu::from_bytes(&write_buf).expect("reading file data PDU failed");
assert_eq!(
core::str::from_utf8(pdu_reader.file_data()).expect("reading utf8 string failed"),
"hello world!"
);
}
#[test]
fn test_fd_pdu_creator_with_reserved_field_with_crc_without_finish_fails() {
let mut common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
common_conf.crc_flag = true.into();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let test_str = "hello world!";
let fd_pdu = FileDataPduCreatorWithReservedDatafield::new_no_seg_metadata(
pdu_header,
10,
test_str.len() as u64,
);
let mut write_buf: [u8; 64] = [0; 64];
let mut pdu_unwritten = fd_pdu
.write_to_bytes_partially(&mut write_buf)
.expect("partial write failed");
pdu_unwritten
.file_data_field_mut()
.copy_from_slice(test_str.as_bytes());
let pdu_reader_error = FileDataPdu::from_bytes(&write_buf);
assert!(pdu_reader_error.is_err());
let error = pdu_reader_error.unwrap_err();
match error {
PduError::Checksum(_) => (),
_ => {
panic!("unexpected PDU error {}", error)
}
}
}
#[test]
fn test_max_file_seg_calculator_0() {
let pdu_header = PduHeader::new_for_file_data_default(CommonPduConfig::default(), 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 64, None),
53
);
}
#[test]
fn test_max_file_seg_calculator_1() {
let common_conf = CommonPduConfig {
crc_flag: CrcFlag::WithCrc,
..Default::default()
};
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 64, None),
51
);
}
#[test]
fn test_max_file_seg_calculator_2() {
let common_conf = CommonPduConfig {
file_flag: LargeFileFlag::Large,
..Default::default()
};
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 64, None),
49
);
}
#[test]
fn test_max_file_seg_calculator_saturating_sub() {
let common_conf = CommonPduConfig {
file_flag: LargeFileFlag::Large,
..Default::default()
};
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 15, None),
0
);
assert_eq!(
calculate_max_file_seg_len_for_max_packet_len_and_pdu_header(&pdu_header, 14, None),
0
);
}
}

View File

@@ -1,34 +1,44 @@
//! # Finished PDU packet implementation.
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::{
EntityIdTlv, FilestoreResponseTlv, GenericTlv, Tlv, TlvType, TlvTypeField, WritableTlv,
};
use crate::cfdp::{ConditionCode, CrcFlag, Direction, PduType, TlvLvError};
use crate::cfdp::{ConditionCode, CrcFlag, Direction, PduType};
use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::{CfdpPdu, WritablePduPacket};
use super::tlv::ReadableTlv;
use super::{CfdpPdu, InvalidTlvTypeFieldError, WritablePduPacket};
/// Delivery code enumeration.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum DeliveryCode {
/// Completed delivery.
Complete = 0,
/// Incomplete delivery.
Incomplete = 1,
}
/// File status enumeration.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum FileStatus {
/// File was discarded deliberately.
DiscardDeliberately = 0b00,
/// File was rejected by the filestore.
DiscardedFsRejection = 0b01,
/// File was retained (but not necesarilly complete).
Retained = 0b10,
/// Unreported file status.
Unreported = 0b11,
}
@@ -49,12 +59,12 @@ pub struct FinishedPduCreator<'fs_responses> {
impl<'fs_responses> FinishedPduCreator<'fs_responses> {
/// Default finished PDU: No error (no fault location field) and no filestore responses.
pub fn new_default(
pub fn new_no_error(
pdu_header: PduHeader,
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> Self {
Self::new_generic(
Self::new(
pdu_header,
ConditionCode::NoError,
delivery_code,
@@ -64,6 +74,7 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
)
}
/// Constructor where the fault location is provided.
pub fn new_with_error(
pdu_header: PduHeader,
condition_code: ConditionCode,
@@ -71,7 +82,7 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
file_status: FileStatus,
fault_location: EntityIdTlv,
) -> Self {
Self::new_generic(
Self::new(
pdu_header,
condition_code,
delivery_code,
@@ -81,7 +92,8 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
)
}
pub fn new_generic(
/// Generic constructor.
pub fn new(
mut pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
@@ -108,23 +120,38 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
finished_pdu
}
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
/// Condition code.
#[inline]
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
/// Delivery code.
#[inline]
pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code
}
/// File status.
#[inline]
pub fn file_status(&self) -> FileStatus {
self.file_status
}
// If there are no filestore responses, an empty slice will be returned.
/// Filestore responses as a slice.
#[inline]
pub fn filestore_responses(&self) -> &[FilestoreResponseTlv<'_, '_, '_>] {
self.fs_responses
}
/// Optional fault location [EntityIdTlv].
#[inline]
pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location
}
@@ -142,20 +169,9 @@ impl<'fs_responses> FinishedPduCreator<'fs_responses> {
}
datafield_len
}
}
impl CfdpPdu for FinishedPduCreator<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu)
}
}
impl WritablePduPacket for FinishedPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
/// Write [Self] to the provided buffer and returns the written size.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
@@ -166,7 +182,7 @@ impl WritablePduPacket for FinishedPduCreator<'_> {
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::FinishedPdu as u8;
buf[current_idx] = FileDirectiveType::Finished as u8;
current_idx += 1;
buf[current_idx] = ((self.condition_code as u8) << 4)
| ((self.delivery_code as u8) << 2)
@@ -184,11 +200,34 @@ impl WritablePduPacket for FinishedPduCreator<'_> {
Ok(current_idx)
}
fn len_written(&self) -> usize {
/// Length of the written PDU in bytes.
pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
impl CfdpPdu for FinishedPduCreator<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::Finished)
}
}
impl WritablePduPacket for FinishedPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
/// Helper structure to loop through all filestore responses of a read Finished PDU. It should be
/// noted that iterators in Rust are not fallible, but the TLV creation can fail, for example if
/// the raw TLV data is invalid for some reason. In that case, the iterator will yield [None]
@@ -220,6 +259,7 @@ impl<'buf> Iterator for FilestoreResponseIterator<'buf> {
}
}
/// Fnished PDU reader structure.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
@@ -247,13 +287,13 @@ impl<'buf> FinishedPduReader<'buf> {
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::FinishedPdu),
expected: Some(FileDirectiveType::Finished),
}
})?;
if directive_type != FileDirectiveType::FinishedPdu {
if directive_type != FileDirectiveType::Finished {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::FinishedPdu,
expected: FileDirectiveType::Finished,
});
}
current_idx += 1;
@@ -275,10 +315,14 @@ impl<'buf> FinishedPduReader<'buf> {
})
}
/// Raw filestore responses.
#[inline]
pub fn fs_responses_raw(&self) -> &[u8] {
self.fs_responses_raw
}
/// Iterator over the filestore responses.
#[inline]
pub fn fs_responses_iter(&self) -> FilestoreResponseIterator<'_> {
FilestoreResponseIterator {
responses_buf: self.fs_responses_raw,
@@ -286,22 +330,36 @@ impl<'buf> FinishedPduReader<'buf> {
}
}
/// Condition code.
#[inline]
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
/// Delivery code.
#[inline]
pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code
}
/// File status.
#[inline]
pub fn file_status(&self) -> FileStatus {
self.file_status
}
/// Optional fault location [EntityIdTlv].
#[inline]
pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location
}
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn parse_tlv_fields(
mut current_idx: usize,
full_len_without_crc: usize,
@@ -331,22 +389,26 @@ impl<'buf> FinishedPduReader<'buf> {
// last TLV, everything else would break the whole handling of the packet
// TLVs.
if current_idx != full_len_without_crc {
return Err(PduError::FormatError);
return Err(PduError::Format);
}
} else {
return Err(TlvLvError::InvalidTlvTypeField {
found: tlv_type.into(),
expected: Some(TlvType::FilestoreResponse.into()),
}
.into());
return Err(PduError::TlvLv(
InvalidTlvTypeFieldError {
found: tlv_type.into(),
expected: Some(TlvType::FilestoreResponse.into()),
}
.into(),
));
}
}
TlvTypeField::Custom(raw) => {
return Err(TlvLvError::InvalidTlvTypeField {
found: raw,
expected: None,
}
.into());
return Err(PduError::TlvLv(
InvalidTlvTypeFieldError {
found: raw,
expected: None,
}
.into(),
));
}
}
}
@@ -355,12 +417,14 @@ impl<'buf> FinishedPduReader<'buf> {
}
impl CfdpPdu for FinishedPduReader<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu)
Some(FileDirectiveType::Finished)
}
}
@@ -401,8 +465,8 @@ mod tests {
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> FinishedPduCreator<'static> {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
FinishedPduCreator::new_default(pdu_header, delivery_code, file_status)
let pdu_header = PduHeader::new_for_file_directive(common_pdu_conf(crc_flag, fss), 0);
FinishedPduCreator::new_no_error(pdu_header, delivery_code, file_status)
}
#[test]
@@ -429,7 +493,7 @@ mod tests {
assert_eq!(finished_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
finished_pdu.file_directive_type(),
Some(FileDirectiveType::FinishedPdu)
Some(FileDirectiveType::Finished)
);
assert_eq!(
finished_pdu.transmission_mode(),
@@ -461,7 +525,7 @@ mod tests {
);
verify_raw_header(finished_pdu.pdu_header(), &buf);
let mut current_idx = finished_pdu.pdu_header().header_len();
assert_eq!(buf[current_idx], FileDirectiveType::FinishedPdu as u8);
assert_eq!(buf[current_idx], FileDirectiveType::Finished as u8);
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
@@ -563,7 +627,7 @@ mod tests {
buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16;
let error = FinishedPduReader::new(&buf).unwrap_err();
if let PduError::ChecksumError(e) = error {
if let PduError::Checksum(e) = error {
assert_eq!(e, crc);
} else {
panic!("expected crc error");
@@ -572,8 +636,10 @@ mod tests {
#[test]
fn test_with_fault_location() {
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let pdu_header = PduHeader::new_for_file_directive(
common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
);
let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header,
ConditionCode::NakLimitReached,
@@ -585,7 +651,7 @@ mod tests {
assert_eq!(finished_pdu_vec.len(), 12);
assert_eq!(finished_pdu_vec[9], TlvType::EntityId.into());
assert_eq!(finished_pdu_vec[10], 1);
assert_eq!(finished_pdu_vec[11], TEST_DEST_ID.value_typed());
assert_eq!(finished_pdu_vec[11], TEST_DEST_ID.value());
assert_eq!(
finished_pdu.fault_location().unwrap().entity_id(),
&TEST_DEST_ID.into()
@@ -594,8 +660,10 @@ mod tests {
#[test]
fn test_deserialization_with_fault_location() {
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let pdu_header = PduHeader::new_for_file_directive(
common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
);
let entity_id_tlv = EntityIdTlv::new(TEST_DEST_ID.into());
let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header,
@@ -630,9 +698,11 @@ mod tests {
.unwrap();
let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let finished_pdu = FinishedPduCreator::new_generic(
let pdu_header = PduHeader::new_for_file_directive(
common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
);
let finished_pdu = FinishedPduCreator::new(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,
@@ -665,9 +735,11 @@ mod tests {
.unwrap();
let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let finished_pdu = FinishedPduCreator::new_generic(
let pdu_header = PduHeader::new_for_file_directive(
common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
);
let finished_pdu = FinishedPduCreator::new(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,

View File

@@ -1,3 +1,6 @@
//! # Metadata PDU packet implementation.
#[cfg(feature = "alloc")]
use super::tlv::TlvOwned;
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
@@ -11,18 +14,24 @@ use alloc::vec::Vec;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::tlv::ReadableTlv;
use super::{CfdpPdu, WritablePduPacket};
/// Generic metadata parameters.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct MetadataGenericParams {
/// Closure requested flag.
pub closure_requested: bool,
/// Checksum type.
pub checksum_type: ChecksumType,
/// File size.
pub file_size: u64,
}
impl MetadataGenericParams {
/// Constructor.
pub fn new(closure_requested: bool, checksum_type: ChecksumType, file_size: u64) -> Self {
Self {
closure_requested,
@@ -32,6 +41,7 @@ impl MetadataGenericParams {
}
}
/// Build the metadata options from a slice of [Tlv]s
pub fn build_metadata_opts_from_slice(
buf: &mut [u8],
tlvs: &[Tlv],
@@ -43,6 +53,7 @@ pub fn build_metadata_opts_from_slice(
Ok(written)
}
/// Build the metadata options from a vector of [Tlv]s
#[cfg(feature = "alloc")]
pub fn build_metadata_opts_from_vec(
buf: &mut [u8],
@@ -51,21 +62,31 @@ pub fn build_metadata_opts_from_vec(
build_metadata_opts_from_slice(buf, tlvs.as_slice())
}
/// Build the metadata options from a slice of [TlvOwned]s
#[cfg(feature = "alloc")]
pub fn build_metadata_opts_from_owned_slice(tlvs: &[TlvOwned]) -> Vec<u8> {
let mut sum_vec = Vec::new();
for tlv in tlvs {
sum_vec.extend(tlv.to_vec());
}
sum_vec
}
/// Metadata PDU creator abstraction.
///
/// This abstraction exposes a specialized API for creating metadata PDUs as specified in
/// CFDP chapter 5.2.5.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct MetadataPduCreator<'src_name, 'dest_name, 'opts> {
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [Tlv<'opts>],
options: &'opts [u8],
}
impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'opts> {
/// Constructor for a metadata PDU without options.
pub fn new_no_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
@@ -81,12 +102,13 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
)
}
/// Constructor for a metadata PDU with options.
pub fn new_with_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [Tlv<'opts>],
options: &'opts [u8],
) -> Self {
Self::new(
pdu_header,
@@ -97,12 +119,13 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
)
}
/// Generic constructor for a metadata PDU.
pub fn new(
mut pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [Tlv<'opts>],
options: &'opts [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileDirective;
pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
@@ -117,22 +140,45 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
pdu
}
/// Metadata generic parameters.
#[inline]
pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params
}
/// Source file name as a [Lv].
#[inline]
pub fn src_file_name(&self) -> Lv<'src_name> {
self.src_file_name
}
/// Destination file name as a [Lv].
#[inline]
pub fn dest_file_name(&self) -> Lv<'dest_name> {
self.dest_file_name
}
pub fn options(&self) -> &'opts [Tlv<'opts>] {
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
/// Raw options.
#[inline]
pub fn options(&self) -> &'opts [u8] {
self.options
}
/// Yield an iterator which can be used to loop through all options. Returns [None] if the
/// options field is empty.
pub fn options_iter(&self) -> OptionsIter<'_> {
OptionsIter {
opt_buf: self.options,
current_idx: 0,
}
}
fn calc_pdu_datafield_len(&self) -> usize {
// One directve type octet and one byte of the directive parameter field.
let mut len = 2;
@@ -143,28 +189,15 @@ impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'op
}
len += self.src_file_name.len_full();
len += self.dest_file_name.len_full();
for tlv in self.options() {
len += tlv.len_full()
}
len += self.options().len();
if self.crc_flag() == CrcFlag::WithCrc {
len += 2;
}
len
}
}
impl CfdpPdu for MetadataPduCreator<'_, '_, '_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu)
}
}
impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
/// Write [Self] to the provided buffer and returns the written size.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
@@ -175,7 +208,7 @@ impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::MetadataPdu as u8;
buf[current_idx] = FileDirectiveType::Metadata as u8;
current_idx += 1;
buf[current_idx] = ((self.metadata_params.closure_requested as u8) << 6)
| (self.metadata_params.checksum_type as u8);
@@ -191,21 +224,42 @@ impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
current_idx += self
.dest_file_name
.write_to_be_bytes(&mut buf[current_idx..])?;
for opt in self.options() {
opt.write_to_bytes(&mut buf[current_idx..current_idx + opt.len_full()])?;
current_idx += opt.len_full();
}
buf[current_idx..current_idx + self.options.len()].copy_from_slice(self.options);
current_idx += self.options.len();
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
/// Length of the written PDU in bytes.
pub fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
impl CfdpPdu for MetadataPduCreator<'_, '_, '_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
self.pdu_header()
}
#[inline]
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::Metadata)
}
}
impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
self.write_to_bytes(buf)
}
fn len_written(&self) -> usize {
self.len_written()
}
}
/// Helper structure to loop through all options of a metadata PDU. It should be noted that
/// iterators in Rust are not fallible, but the TLV creation can fail, for example if the raw TLV
/// data is invalid for some reason. In that case, the iterator will yield [None] because there
@@ -255,10 +309,12 @@ pub struct MetadataPduReader<'buf> {
}
impl<'raw> MetadataPduReader<'raw> {
/// Constructor from raw bytes.
pub fn new(buf: &'raw [u8]) -> Result<Self, PduError> {
Self::from_bytes(buf)
}
/// Constructor from raw bytes.
pub fn from_bytes(buf: &'raw [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
@@ -272,13 +328,13 @@ impl<'raw> MetadataPduReader<'raw> {
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::MetadataPdu),
expected: Some(FileDirectiveType::Metadata),
}
})?;
if directive_type != FileDirectiveType::MetadataPdu {
if directive_type != FileDirectiveType::Metadata {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::MetadataPdu,
expected: FileDirectiveType::Metadata,
});
}
current_idx += 1;
@@ -314,35 +370,50 @@ impl<'raw> MetadataPduReader<'raw> {
})
}
/// PDU header.
#[inline]
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
/// Raw options.
#[inline]
pub fn options(&self) -> &'raw [u8] {
self.options
}
/// Generic metadata parameters.
#[inline]
pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params
}
pub fn src_file_name(&self) -> Lv {
/// Source file name as a [Lv].
#[inline]
pub fn src_file_name(&self) -> Lv<'_> {
self.src_file_name
}
pub fn dest_file_name(&self) -> Lv {
/// Destination file name as a [Lv].
#[inline]
pub fn dest_file_name(&self) -> Lv<'_> {
self.dest_file_name
}
}
impl CfdpPdu for MetadataPduReader<'_> {
#[inline]
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
self.pdu_header()
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu)
Some(FileDirectiveType::Metadata)
}
}
#[cfg(test)]
pub mod tests {
mod tests {
use alloc::string::ToString;
use crate::cfdp::lv::Lv;
@@ -355,7 +426,7 @@ pub mod tests {
};
use crate::cfdp::pdu::{CfdpPdu, PduError, WritablePduPacket};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::tlv::{Tlv, TlvType};
use crate::cfdp::tlv::{ReadableTlv, Tlv, TlvOwned, TlvType, WritableTlv};
use crate::cfdp::{
ChecksumType, CrcFlag, Direction, LargeFileFlag, PduType, SegmentMetadataFlag,
SegmentationControl, TransmissionMode,
@@ -365,18 +436,18 @@ pub mod tests {
const SRC_FILENAME: &str = "hello-world.txt";
const DEST_FILENAME: &str = "hello-world2.txt";
fn generic_metadata_pdu<'opts>(
fn generic_metadata_pdu(
crc_flag: CrcFlag,
checksum_type: ChecksumType,
closure_requested: bool,
fss: LargeFileFlag,
opts: &'opts [Tlv],
opts: &[u8],
) -> (
Lv<'static>,
Lv<'static>,
MetadataPduCreator<'static, 'static, 'opts>,
MetadataPduCreator<'static, 'static, '_>,
) {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
let pdu_header = PduHeader::new_for_file_directive(common_pdu_conf(crc_flag, fss), 0);
let metadata_params = MetadataGenericParams::new(closure_requested, checksum_type, 0x1010);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename =
@@ -425,7 +496,7 @@ pub mod tests {
);
assert_eq!(
metadata_pdu.file_directive_type(),
Some(FileDirectiveType::MetadataPdu)
Some(FileDirectiveType::Metadata)
);
assert_eq!(
metadata_pdu.transmission_mode(),
@@ -456,7 +527,7 @@ pub mod tests {
+ expected_src_filename.len_full()
+ expected_dest_filename.len_full()
);
assert_eq!(buf[7], FileDirectiveType::MetadataPdu as u8);
assert_eq!(buf[7], FileDirectiveType::Metadata as u8);
assert_eq!(buf[8] >> 6, closure_requested as u8);
assert_eq!(buf[8] & 0b1111, checksum_type as u8);
assert_eq!(u32::from_be_bytes(buf[9..13].try_into().unwrap()), 0x1010);
@@ -544,9 +615,9 @@ pub mod tests {
assert_eq!(written.metadata_params(), read.metadata_params());
assert_eq!(written.src_file_name(), read.src_file_name());
assert_eq!(written.dest_file_name(), read.dest_file_name());
let opts = written.options();
for (tlv_written, tlv_read) in opts.iter().zip(read.options_iter().unwrap()) {
assert_eq!(tlv_written, &tlv_read);
let opts = written.options_iter();
for (tlv_written, tlv_read) in opts.zip(read.options_iter().unwrap()) {
assert_eq!(&tlv_written, &tlv_read);
}
}
@@ -661,14 +732,14 @@ pub mod tests {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_vec = vec![tlv1, tlv2];
let opts_len = tlv1.len_full() + tlv2.len_full();
let mut tlv_buf: [u8; 64] = [0; 64];
let opts_len = build_metadata_opts_from_slice(&mut tlv_buf, &[tlv1, tlv2]).unwrap();
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&tlv_vec,
&tlv_buf[0..opts_len],
);
let mut buf: [u8; 128] = [0; 128];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
@@ -691,7 +762,55 @@ pub mod tests {
let opts_iter = opts_iter.unwrap();
let mut accumulated_len = 0;
for (idx, opt) in opts_iter.enumerate() {
assert_eq!(tlv_vec[idx], opt);
if idx == 0 {
assert_eq!(tlv1, opt);
} else if idx == 1 {
assert_eq!(tlv2, opt);
}
accumulated_len += opt.len_full();
}
assert_eq!(accumulated_len, pdu_read_back.options().len());
}
#[test]
fn test_with_owned_opts() {
let tlv1 = TlvOwned::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = TlvOwned::new(TlvType::MsgToUser, &msg_to_user);
let mut all_tlvs = tlv1.to_vec();
all_tlvs.extend(tlv2.to_vec());
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&all_tlvs,
);
let mut buf: [u8; 128] = [0; 128];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
+ all_tlvs.len()
);
let pdu_read_back = MetadataPduReader::from_bytes(&buf).unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
let opts_iter = pdu_read_back.options_iter();
assert!(opts_iter.is_some());
let opts_iter = opts_iter.unwrap();
let mut accumulated_len = 0;
for (idx, opt) in opts_iter.enumerate() {
if idx == 0 {
assert_eq!(tlv1, opt);
} else if idx == 1 {
assert_eq!(tlv2, opt);
}
accumulated_len += opt.len_full();
}
assert_eq!(accumulated_len, pdu_read_back.options().len());
@@ -713,10 +832,10 @@ pub mod tests {
let error = metadata_error.unwrap_err();
if let PduError::InvalidDirectiveType { found, expected } = error {
assert_eq!(found, 0xff);
assert_eq!(expected, Some(FileDirectiveType::MetadataPdu));
assert_eq!(expected, Some(FileDirectiveType::Metadata));
assert_eq!(
error.to_string(),
"invalid directive type value 255, expected Some(MetadataPdu)"
"invalid directive type, found 255, expected Some(Metadata)"
);
} else {
panic!("Expected InvalidDirectiveType error, got {:?}", error);
@@ -733,16 +852,16 @@ pub mod tests {
&[],
);
let mut metadata_vec = metadata_pdu.to_vec().unwrap();
metadata_vec[7] = FileDirectiveType::EofPdu as u8;
metadata_vec[7] = FileDirectiveType::Eof as u8;
let metadata_error = MetadataPduReader::from_bytes(&metadata_vec);
assert!(metadata_error.is_err());
let error = metadata_error.unwrap_err();
if let PduError::WrongDirectiveType { found, expected } = error {
assert_eq!(found, FileDirectiveType::EofPdu);
assert_eq!(expected, FileDirectiveType::MetadataPdu);
assert_eq!(found, FileDirectiveType::Eof);
assert_eq!(expected, FileDirectiveType::Metadata);
assert_eq!(
error.to_string(),
"found directive type EofPdu, expected MetadataPdu"
"wrong directive type, found Eof, expected Metadata"
);
} else {
panic!("Expected InvalidDirectiveType error, got {:?}", error);

View File

@@ -1,13 +1,12 @@
//! CFDP Packet Data Unit (PDU) support.
use crate::cfdp::pdu::ack::InvalidAckedDirectiveCodeError;
use crate::cfdp::pdu::nak::InvalidStartOrEndOfScopeError;
use crate::cfdp::*;
use crate::crc::CRC_CCITT_FALSE;
use crate::util::{UnsignedByteField, UnsignedByteFieldU8, UnsignedEnum};
use crate::ByteConversionError;
use crate::CRC_CCITT_FALSE;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::fmt::{Display, Formatter};
#[cfg(feature = "std")]
use std::error::Error;
pub mod ack;
pub mod eof;
@@ -16,156 +15,115 @@ pub mod finished;
pub mod metadata;
pub mod nak;
/// File directive type.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum FileDirectiveType {
EofPdu = 0x04,
FinishedPdu = 0x05,
AckPdu = 0x06,
MetadataPdu = 0x07,
NakPdu = 0x08,
PromptPdu = 0x09,
KeepAlivePdu = 0x0c,
/// EOF.
Eof = 0x04,
/// Finished.
Finished = 0x05,
/// ACK.
Ack = 0x06,
/// Metadata.
Metadata = 0x07,
/// NAK.
Nak = 0x08,
/// Prompt.
Prompt = 0x09,
/// Keep Alive.
KeepAlive = 0x0c,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
/// PDU error.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PduError {
ByteConversion(ByteConversionError),
/// Found version ID invalid, not equal to [CFDP_VERSION_2].
/// Byte conversion error.
#[error("byte conversion error: {0}")]
ByteConversion(#[from] ByteConversionError),
/// Found version ID invalid, not equal to [super::CFDP_VERSION_2].
#[error("CFDP version missmatch, found {0}, expected {ver}", ver = super::CFDP_VERSION_2)]
CfdpVersionMissmatch(u8),
/// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported.
#[error("invalid PDU entity ID length {0}, only [1, 2, 4, 8] are allowed")]
InvalidEntityLen(u8),
/// Invalid length for the entity ID detected. Only the values 1, 2, 4 and 8 are supported.
#[error("invalid transaction ID length {0}")]
InvalidTransactionSeqNumLen(u8),
/// Source and destination entity ID lengths do not match.
#[error(
"missmatch of PDU source ID length {src_id_len} and destination ID length {dest_id_len}"
)]
SourceDestIdLenMissmatch {
/// Source ID length.
src_id_len: usize,
/// Destination ID length.
dest_id_len: usize,
},
/// Wrong directive type, for example when parsing the directive field for a file directive
/// PDU.
#[error("wrong directive type, found {found:?}, expected {expected:?}")]
WrongDirectiveType {
/// Found directive type.
found: FileDirectiveType,
/// Expected directive type.
expected: FileDirectiveType,
},
/// The directive type field contained a value not in the range of permitted values. This can
/// also happen if an invalid value is passed to the ACK PDU constructor.
/// also happen if an invalid value is passed to the ACK PDU reader.
#[error("invalid directive type, found {found:?}, expected {expected:?}")]
InvalidDirectiveType {
/// Found raw directive type.
found: u8,
/// Expected raw directive type if applicable.
expected: Option<FileDirectiveType>,
},
InvalidStartOrEndOfScopeValue,
/// Invalid start or end of scope for a NAK PDU.
#[error("nak pdu: {0}")]
InvalidStartOrEndOfScope(#[from] InvalidStartOrEndOfScopeError),
/// Invalid condition code. Contains the raw detected value.
#[error("invalid condition code {0}")]
InvalidConditionCode(u8),
/// Invalid checksum type which is not part of the checksums listed in the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/).
#[error("invalid checksum type {0}")]
InvalidChecksumType(u8),
/// File size is too large.
#[error("file size {0} too large")]
FileSizeTooLarge(u64),
/// If the CRC flag for a PDU is enabled and the checksum check fails. Contains raw 16-bit CRC.
ChecksumError(u16),
#[error("checksum error for checksum {0}")]
Checksum(u16),
/// Generic error for invalid PDU formats.
FormatError,
#[error("generic PDU format error")]
Format,
/// Error handling a TLV field.
TlvLvError(TlvLvError),
#[error("PDU error: {0}")]
TlvLv(#[from] TlvLvError),
}
impl Display for PduError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PduError::InvalidEntityLen(raw_id) => {
write!(
f,
"invalid PDU entity ID length {raw_id}, only [1, 2, 4, 8] are allowed"
)
}
PduError::InvalidStartOrEndOfScopeValue => {
write!(f, "invalid start or end of scope for NAK PDU")
}
PduError::InvalidTransactionSeqNumLen(raw_id) => {
write!(
f,
"invalid PDUtransaction seq num length {raw_id}, only [1, 2, 4, 8] are allowed"
)
}
PduError::CfdpVersionMissmatch(raw) => {
write!(
f,
"cfdp version missmatch, found {raw}, expected {CFDP_VERSION_2}"
)
}
PduError::SourceDestIdLenMissmatch {
src_id_len,
dest_id_len,
} => {
write!(
f,
"missmatch of PDU source length {src_id_len} and destination length {dest_id_len}"
)
}
PduError::ByteConversion(e) => {
write!(f, "{}", e)
}
PduError::FileSizeTooLarge(value) => {
write!(f, "file size value {value} exceeds allowed 32 bit width")
}
PduError::WrongDirectiveType { found, expected } => {
write!(f, "found directive type {found:?}, expected {expected:?}")
}
PduError::InvalidConditionCode(raw_code) => {
write!(f, "found invalid condition code with raw value {raw_code}")
}
PduError::InvalidDirectiveType { found, expected } => {
write!(
f,
"invalid directive type value {found}, expected {expected:?}"
)
}
PduError::InvalidChecksumType(checksum_type) => {
write!(f, "invalid checksum type {checksum_type}")
}
PduError::ChecksumError(checksum) => {
write!(f, "checksum error for CRC {checksum:#04x}")
}
PduError::TlvLvError(error) => {
write!(f, "pdu tlv error: {error}")
}
PduError::FormatError => {
write!(f, "generic PDU format error")
}
impl From<InvalidAckedDirectiveCodeError> for PduError {
fn from(value: InvalidAckedDirectiveCodeError) -> Self {
Self::InvalidDirectiveType {
found: value.0 as u8,
expected: None,
}
}
}
#[cfg(feature = "std")]
impl Error for PduError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
PduError::ByteConversion(e) => Some(e),
PduError::TlvLvError(e) => Some(e),
_ => None,
}
}
}
impl From<ByteConversionError> for PduError {
#[inline]
fn from(value: ByteConversionError) -> Self {
Self::ByteConversion(value)
}
}
impl From<TlvLvError> for PduError {
#[inline]
fn from(e: TlvLvError) -> Self {
Self::TlvLvError(e)
}
}
/// Generic trait for a PDU which can be written to bytes.
pub trait WritablePduPacket {
/// Length when written to bytes.
fn len_written(&self) -> usize;
/// Write the PDU to a raw buffer, returning the written length.
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError>;
/// Convert the PDU to an owned vector of bytes.
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<Vec<u8>, PduError> {
// This is the correct way to do this. See
@@ -179,48 +137,58 @@ pub trait WritablePduPacket {
/// Abstraction trait for fields and properties common for all PDUs.
pub trait CfdpPdu {
/// PDU header.
fn pdu_header(&self) -> &PduHeader;
/// Source ID (file sender).
#[inline]
fn source_id(&self) -> UnsignedByteField {
self.pdu_header().common_pdu_conf().source_entity_id
}
/// Destination ID (file sender).
#[inline]
fn dest_id(&self) -> UnsignedByteField {
self.pdu_header().common_pdu_conf().dest_entity_id
}
/// Transaction sequence number.
#[inline]
fn transaction_seq_num(&self) -> UnsignedByteField {
self.pdu_header().common_pdu_conf().transaction_seq_num
}
/// Transmission mode.
#[inline]
fn transmission_mode(&self) -> TransmissionMode {
self.pdu_header().common_pdu_conf().trans_mode
}
/// Direction.
#[inline]
fn direction(&self) -> Direction {
self.pdu_header().common_pdu_conf().direction
}
/// CRC flag.
#[inline]
fn crc_flag(&self) -> CrcFlag {
self.pdu_header().common_pdu_conf().crc_flag
}
/// File flag.
#[inline]
fn file_flag(&self) -> LargeFileFlag {
self.pdu_header().common_pdu_conf().file_flag
}
/// PDU type.
#[inline]
fn pdu_type(&self) -> PduType {
self.pdu_header().pdu_type()
}
/// File directive type when applicable.
fn file_directive_type(&self) -> Option<FileDirectiveType>;
}
@@ -236,15 +204,21 @@ pub trait CfdpPdu {
pub struct CommonPduConfig {
source_entity_id: UnsignedByteField,
dest_entity_id: UnsignedByteField,
/// Transaction sequence number.
pub transaction_seq_num: UnsignedByteField,
/// Transmission mode.
pub trans_mode: TransmissionMode,
/// File flag.
pub file_flag: LargeFileFlag,
/// CRC flag.
pub crc_flag: CrcFlag,
/// Direction.
pub direction: Direction,
}
// TODO: Builder pattern might be applicable here..
impl CommonPduConfig {
/// Generic constructor.
#[inline]
pub fn new(
source_id: impl Into<UnsignedByteField>,
@@ -277,6 +251,7 @@ impl CommonPduConfig {
})
}
/// Constructor for custom byte field with default field values for the other fields.
#[inline]
pub fn new_with_byte_fields(
source_id: impl Into<UnsignedByteField>,
@@ -294,6 +269,7 @@ impl CommonPduConfig {
)
}
/// Source ID (file sender).
#[inline]
pub fn source_id(&self) -> UnsignedByteField {
self.source_entity_id
@@ -322,6 +298,7 @@ impl CommonPduConfig {
Ok((source_id, dest_id))
}
/// Set the source and destination ID field.
#[inline]
pub fn set_source_and_dest_id(
&mut self,
@@ -334,6 +311,7 @@ impl CommonPduConfig {
Ok(())
}
/// Destination ID (file receiver).
#[inline]
pub fn dest_id(&self) -> UnsignedByteField {
self.dest_entity_id
@@ -372,6 +350,7 @@ impl PartialEq for CommonPduConfig {
}
}
/// Fixed header length of the PDU header.
pub const FIXED_HEADER_LEN: usize = 4;
/// Abstraction for the PDU header common to all CFDP PDUs.
@@ -389,6 +368,10 @@ pub struct PduHeader {
}
impl PduHeader {
/// Fixed length of the PDU header when written to a raw buffer.
pub const FIXED_LEN: usize = FIXED_HEADER_LEN;
/// Constructor for a File Data PDU header.
#[inline]
pub fn new_for_file_data(
pdu_conf: CommonPduConfig,
@@ -405,6 +388,7 @@ impl PduHeader {
)
}
/// Constructor for a file data PDU.
#[inline]
pub fn new_for_file_data_default(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
Self::new_generic(
@@ -415,8 +399,10 @@ impl PduHeader {
SegmentationControl::NoRecordBoundaryPreservation,
)
}
/// Constructor for a file directive PDU.
#[inline]
pub fn new_no_file_data(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
pub fn new_for_file_directive(pdu_conf: CommonPduConfig, pdu_datafield_len: u16) -> Self {
Self::new_generic(
PduType::FileDirective,
pdu_conf,
@@ -426,6 +412,19 @@ impl PduHeader {
)
}
/// Constructor from a given [CommonPduConfig] and for a file directive PDU.
#[inline]
pub fn from_pdu_conf_for_file_directive(pdu_conf: CommonPduConfig) -> Self {
Self::new_generic(
PduType::FileDirective,
pdu_conf,
0,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
)
}
/// Generic constructor.
#[inline]
pub fn new_generic(
pdu_type: PduType,
@@ -452,6 +451,7 @@ impl PduHeader {
+ self.pdu_conf.dest_entity_id.size()
}
/// PDU data field length.
#[inline]
pub fn pdu_datafield_len(&self) -> usize {
self.pdu_datafield_len.into()
@@ -464,15 +464,15 @@ impl PduHeader {
self.header_len() + self.pdu_datafield_len as usize
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
// Internal note: There is currently no way to pass a PDU configuration like this, but
// this check is still kept for defensive programming.
if self.pdu_conf.source_entity_id.size() != self.pdu_conf.dest_entity_id.size() {
return Err(PduError::SourceDestIdLenMissmatch {
src_id_len: self.pdu_conf.source_entity_id.size(),
dest_id_len: self.pdu_conf.dest_entity_id.size(),
});
}
/// Write the header to a raw buffer, returning the written length on success.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
// The API does not allow passing entity IDs with different sizes, so this should
// never happen.
assert_eq!(
self.pdu_conf.source_entity_id.size(),
self.pdu_conf.dest_entity_id.size(),
"unexpected missmatch of source and destination entity ID length"
);
if buf.len()
< FIXED_HEADER_LEN
+ self.pdu_conf.source_entity_id.size()
@@ -481,8 +481,7 @@ impl PduHeader {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: FIXED_HEADER_LEN,
}
.into());
});
}
let mut current_idx = 0;
buf[current_idx] = (CFDP_VERSION_2 << 5)
@@ -532,7 +531,7 @@ impl PduHeader {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[..self.pdu_len()]);
if digest.finalize() != 0 {
return Err(PduError::ChecksumError(u16::from_be_bytes(
return Err(PduError::Checksum(u16::from_be_bytes(
buf[self.pdu_len() - 2..self.pdu_len()].try_into().unwrap(),
)));
}
@@ -633,20 +632,25 @@ impl PduHeader {
))
}
/// PDU type.
#[inline]
pub fn pdu_type(&self) -> PduType {
self.pdu_type
}
/// Common PDU configuration fields.
#[inline]
pub fn common_pdu_conf(&self) -> &CommonPduConfig {
&self.pdu_conf
}
/// Segment metadata flag.
#[inline]
pub fn seg_metadata_flag(&self) -> SegmentMetadataFlag {
self.seg_metadata_flag
}
/// Segmentation Control.
#[inline]
pub fn seg_ctrl(&self) -> SegmentationControl {
self.seg_ctrl
@@ -812,7 +816,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
assert_eq!(pdu_header.pdu_type(), PduType::FileDirective);
let common_conf_ref = pdu_header.common_pdu_conf();
assert_eq!(*common_conf_ref, common_pdu_cfg);
@@ -878,7 +882,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
@@ -895,7 +899,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
@@ -968,7 +972,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
@@ -981,7 +985,7 @@ mod tests {
assert_eq!(raw_version, CFDP_VERSION_2 + 1);
assert_eq!(
error.to_string(),
"cfdp version missmatch, found 2, expected 1"
"CFDP version missmatch, found 2, expected 1"
);
} else {
panic!("invalid exception: {}", error);
@@ -1013,7 +1017,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
@@ -1029,7 +1033,7 @@ mod tests {
assert_eq!(expected, 7);
assert_eq!(
error.to_string(),
"source slice with size 6 too small, expected at least 7 bytes"
"byte conversion error: source slice with size 6 too small, expected at least 7 bytes"
);
}
}
@@ -1084,7 +1088,7 @@ mod tests {
assert_eq!(dest_id_len, 2);
assert_eq!(
error.to_string(),
"missmatch of PDU source length 1 and destination length 2"
"missmatch of PDU source ID length 1 and destination ID length 2"
);
}
}
@@ -1096,7 +1100,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
@@ -1120,7 +1124,7 @@ mod tests {
let transaction_id = UnsignedByteFieldU8::new(3);
let common_pdu_cfg = CommonPduConfig::new_with_byte_fields(src_id, dest_id, transaction_id)
.expect("common config creation failed");
let pdu_header = PduHeader::new_no_file_data(common_pdu_cfg, 5);
let pdu_header = PduHeader::new_for_file_directive(common_pdu_cfg, 5);
let mut buf: [u8; 7] = [0; 7];
let res = pdu_header.write_to_bytes(&mut buf);
assert!(res.is_ok());
@@ -1152,4 +1156,12 @@ mod tests {
let common_pdu_cfg_1 = common_pdu_cfg_0;
assert_eq!(common_pdu_cfg_0, common_pdu_cfg_1);
}
#[test]
fn test_ctor_from_pdu_conf() {
assert_eq!(
PduHeader::from_pdu_conf_for_file_directive(CommonPduConfig::default()),
PduHeader::new_for_file_directive(CommonPduConfig::default(), 0)
);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -9,19 +9,27 @@ use crate::ByteConversionError;
use alloc::vec;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::{InvalidTlvTypeFieldError, TlvLvDataTooLargeError};
pub mod msg_to_user;
/// Minimum length of a type-length-value structure, including type and length fields.
pub const MIN_TLV_LEN: usize = 2;
/// Trait for generic TLV structures.
pub trait GenericTlv {
/// TLV type field.
fn tlv_type_field(&self) -> TlvTypeField;
/// Checks whether the type field contains one of the standard types specified in the CFDP
/// standard and is part of the [TlvType] enum.
#[inline]
fn is_standard_tlv(&self) -> bool {
if let TlvTypeField::Standard(_) = self.tlv_type_field() {
return true;
@@ -30,6 +38,7 @@ pub trait GenericTlv {
}
/// Returns the standard TLV type if the TLV field is not a custom field
#[inline]
fn tlv_type(&self) -> Option<TlvType> {
if let TlvTypeField::Standard(tlv_type) = self.tlv_type_field() {
Some(tlv_type)
@@ -39,9 +48,40 @@ pub trait GenericTlv {
}
}
/// Readable TLV structure trait.
pub trait ReadableTlv {
/// Value field of the TLV.
fn value(&self) -> &[u8];
/// Checks whether the value field is empty.
#[inline]
fn is_empty(&self) -> bool {
self.value().is_empty()
}
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value]
#[inline]
fn len_value(&self) -> usize {
self.value().len()
}
/// Returns the full raw length, including the length byte.
#[inline]
fn len_full(&self) -> usize {
self.len_value() + 2
}
}
/// Writable TLV structure trait.
pub trait WritableTlv {
/// Write the TLV to bytes.
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
/// Length of the written TLV.
fn len_written(&self) -> usize;
/// Convenience method to write the TLV to an owned [alloc::vec::Vec].
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Vec<u8> {
let mut buf = vec![0; self.len_written()];
@@ -50,34 +90,50 @@ pub trait WritableTlv {
}
}
/// TLV type.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum TlvType {
/// Filestore request.
FilestoreRequest = 0x00,
/// Filestore response.
FilestoreResponse = 0x01,
/// Message to user.
MsgToUser = 0x02,
/// Fault handler.
FaultHandler = 0x04,
/// Flow label.
FlowLabel = 0x05,
/// Entity ID.
EntityId = 0x06,
}
/// TLV type field variants.
///
/// This allows specifying custom variants as well.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum TlvTypeField {
/// Standard TLV types.
Standard(TlvType),
/// Custom TLV type.
Custom(u8),
}
/// Filestore action codes as specified in the standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum FilestoreActionCode {
/// Create file.
CreateFile = 0b0000,
/// Delete file.
DeleteFile = 0b0001,
/// Rename file.
RenameFile = 0b0010,
/// This operation appends one file to another. The first specified name will form the first
/// part of the new file and the name of the new file. This function can be used to get
@@ -86,9 +142,13 @@ pub enum FilestoreActionCode {
/// This operation replaces the content of the first specified file with the content of
/// the secondly specified file.
ReplaceFile = 0b0100,
/// Create directory.
CreateDirectory = 0b0101,
/// Remove directory.
RemoveDirectory = 0b0110,
/// Deny file.
DenyFile = 0b0111,
/// Deny directory.
DenyDirectory = 0b1000,
}
@@ -129,14 +189,22 @@ pub struct Tlv<'data> {
}
impl<'data> Tlv<'data> {
pub fn new(tlv_type: TlvType, data: &[u8]) -> Result<Tlv, TlvLvError> {
/// Minimum length of a TLV structure, including type and length fields.
pub const MIN_LEN: usize = MIN_TLV_LEN;
/// Generic constructor for a TLV structure.
pub fn new(tlv_type: TlvType, data: &[u8]) -> Result<Tlv<'_>, TlvLvDataTooLargeError> {
Ok(Tlv {
tlv_type_field: TlvTypeField::Standard(tlv_type),
lv: Lv::new(data)?,
})
}
pub fn new_with_custom_type(tlv_type: u8, data: &[u8]) -> Result<Tlv, TlvLvError> {
/// Constructor for a TLV with a custom type field.
pub fn new_with_custom_type(
tlv_type: u8,
data: &[u8],
) -> Result<Tlv<'_>, TlvLvDataTooLargeError> {
Ok(Tlv {
tlv_type_field: TlvTypeField::Custom(tlv_type),
lv: Lv::new(data)?,
@@ -151,31 +219,11 @@ impl<'data> Tlv<'data> {
}
}
pub fn value(&self) -> &[u8] {
self.lv.value()
}
/// Checks whether the value field is empty.
pub fn is_empty(&self) -> bool {
self.value().is_empty()
}
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value]
pub fn len_value(&self) -> usize {
self.value().len()
}
/// Returns the full raw length, including the length byte.
pub fn len_full(&self) -> usize {
self.len_value() + 2
}
/// Creates a TLV give a raw bytestream. Please note that is is not necessary to pass the
/// bytestream with the exact size of the expected TLV. This function will take care
/// of parsing the length byte, and the length of the parsed TLV can be retrieved using
/// [Self::len_full].
pub fn from_bytes(buf: &'data [u8]) -> Result<Tlv<'data>, TlvLvError> {
pub fn from_bytes(buf: &'data [u8]) -> Result<Tlv<'data>, ByteConversionError> {
generic_len_check_deserialization(buf, MIN_TLV_LEN)?;
let mut tlv = Self {
tlv_type_field: TlvTypeField::from(buf[0]),
@@ -189,9 +237,32 @@ impl<'data> Tlv<'data> {
/// If the TLV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the TLV can be retrieved with this method.
#[inline]
pub fn raw_data(&self) -> Option<&[u8]> {
self.lv.raw_data()
}
/// Converts to an owned TLV variant, allocating memory for the value field.
#[cfg(feature = "alloc")]
pub fn to_owned(&self) -> TlvOwned {
TlvOwned {
tlv_type_field: self.tlv_type_field,
data: self.value().to_vec(),
}
}
}
#[cfg(feature = "alloc")]
impl PartialEq<TlvOwned> for Tlv<'_> {
fn eq(&self, other: &TlvOwned) -> bool {
self.tlv_type_field == other.tlv_type_field && self.value() == other.value()
}
}
impl ReadableTlv for Tlv<'_> {
fn value(&self) -> &[u8] {
self.lv.value()
}
}
impl WritableTlv for Tlv<'_> {
@@ -201,31 +272,124 @@ impl WritableTlv for Tlv<'_> {
self.lv.write_to_be_bytes_no_len_check(&mut buf[1..]);
Ok(self.len_full())
}
#[inline]
fn len_written(&self) -> usize {
self.len_full()
}
}
impl GenericTlv for Tlv<'_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField {
self.tlv_type_field
}
}
pub(crate) fn verify_tlv_type(raw_type: u8, expected_tlv_type: TlvType) -> Result<(), TlvLvError> {
let tlv_type = TlvType::try_from(raw_type).map_err(|_| TlvLvError::InvalidTlvTypeField {
found: raw_type,
expected: Some(expected_tlv_type.into()),
})?;
if tlv_type != expected_tlv_type {
return Err(TlvLvError::InvalidTlvTypeField {
found: tlv_type as u8,
expected: Some(expected_tlv_type as u8),
});
/// Component of the TLV module which require [alloc] support.
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use super::*;
/// Owned variant of [Tlv] which is consequently [Clone]able and does not have a lifetime
/// associated to a data slice.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct TlvOwned {
pub(crate) tlv_type_field: TlvTypeField,
pub(crate) data: Vec<u8>,
}
impl TlvOwned {
/// Generic constructor.
pub fn new(tlv_type: TlvType, data: &[u8]) -> Self {
Self {
tlv_type_field: TlvTypeField::Standard(tlv_type),
data: data.to_vec(),
}
}
/// Generic constructor with a custom TLV type.
pub fn new_with_custom_type(tlv_type: u8, data: &[u8]) -> Self {
Self {
tlv_type_field: TlvTypeField::Custom(tlv_type),
data: data.to_vec(),
}
}
/// Creates a TLV with an empty value field.
pub fn new_empty(tlv_type: TlvType) -> Self {
Self {
tlv_type_field: TlvTypeField::Standard(tlv_type),
data: Vec::new(),
}
}
/// Write to a byte slice.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.data.len(), MIN_TLV_LEN)?;
buf[0] = self.tlv_type_field.into();
buf[1] = self.data.len() as u8;
buf[2..2 + self.data.len()].copy_from_slice(&self.data);
Ok(self.len_written())
}
#[inline]
fn len_written(&self) -> usize {
self.data.len() + 2
}
/// Convert to [Tlv]
pub fn as_tlv(&self) -> Tlv<'_> {
Tlv {
tlv_type_field: self.tlv_type_field,
// The API should ensure that the data length is never to large, so the unwrap for the
// LV creation should never be an issue.
lv: Lv::new(&self.data).expect("lv creation failed unexpectedly"),
}
}
}
impl ReadableTlv for TlvOwned {
#[inline]
fn value(&self) -> &[u8] {
&self.data
}
}
impl WritableTlv for TlvOwned {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for TlvOwned {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField {
self.tlv_type_field
}
}
impl From<Tlv<'_>> for TlvOwned {
fn from(value: Tlv<'_>) -> Self {
value.to_owned()
}
}
impl PartialEq<Tlv<'_>> for TlvOwned {
fn eq(&self, other: &Tlv) -> bool {
self.tlv_type_field == other.tlv_type_field && self.data == other.value()
}
}
Ok(())
}
/// Entity ID TLV.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
@@ -234,6 +398,8 @@ pub struct EntityIdTlv {
}
impl EntityIdTlv {
/// Constructor.
#[inline]
pub fn new(entity_id: UnsignedByteField) -> Self {
Self { entity_id }
}
@@ -248,18 +414,25 @@ impl EntityIdTlv {
Ok(())
}
/// Entity ID.
#[inline]
pub fn entity_id(&self) -> &UnsignedByteField {
&self.entity_id
}
/// Length of the value field.
#[inline]
pub fn len_value(&self) -> usize {
self.entity_id.size()
}
/// Full length of the TLV, including type and length fields.
#[inline]
pub fn len_full(&self) -> usize {
2 + self.entity_id.size()
}
/// Create from a raw bytestream.
pub fn from_bytes(buf: &[u8]) -> Result<Self, TlvLvError> {
Self::len_check(buf)?;
verify_tlv_type(buf[0], TlvType::EntityId)?;
@@ -273,19 +446,14 @@ impl EntityIdTlv {
}
/// Convert to a generic [Tlv], which also erases the programmatic type information.
pub fn to_tlv(self, buf: &mut [u8]) -> Result<Tlv, ByteConversionError> {
pub fn to_tlv(self, buf: &mut [u8]) -> Result<Tlv<'_>, ByteConversionError> {
Self::len_check(buf)?;
self.entity_id
.write_to_be_bytes(&mut buf[2..2 + self.entity_id.size()])?;
Tlv::new(TlvType::EntityId, &buf[2..2 + self.entity_id.size()]).map_err(|e| match e {
TlvLvError::ByteConversion(e) => e,
// All other errors are impossible.
_ => panic!("unexpected TLV error"),
})
// Can't fail.
Ok(Tlv::new(TlvType::EntityId, &buf[2..2 + self.entity_id.size()]).unwrap())
}
}
impl WritableTlv for EntityIdTlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
Self::len_check(buf)?;
buf[0] = TlvType::EntityId as u8;
@@ -293,35 +461,50 @@ impl WritableTlv for EntityIdTlv {
Ok(2 + self.entity_id.write_to_be_bytes(&mut buf[2..])?)
}
#[inline]
fn len_written(&self) -> usize {
self.len_full()
}
}
impl WritableTlv for EntityIdTlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for EntityIdTlv {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField {
TlvTypeField::Standard(TlvType::EntityId)
}
}
impl<'data> TryFrom<Tlv<'data>> for EntityIdTlv {
impl TryFrom<Tlv<'_>> for EntityIdTlv {
type Error = TlvLvError;
fn try_from(value: Tlv) -> Result<Self, Self::Error> {
fn try_from(value: Tlv) -> Result<Self, TlvLvError> {
match value.tlv_type_field {
TlvTypeField::Standard(tlv_type) => {
if tlv_type != TlvType::EntityId {
return Err(TlvLvError::InvalidTlvTypeField {
return Err(InvalidTlvTypeFieldError {
found: tlv_type as u8,
expected: Some(TlvType::EntityId as u8),
});
}
.into());
}
}
TlvTypeField::Custom(val) => {
return Err(TlvLvError::InvalidTlvTypeField {
return Err(InvalidTlvTypeFieldError {
found: val,
expected: Some(TlvType::EntityId as u8),
});
}
.into());
}
}
let len_value = value.value().len();
@@ -341,6 +524,8 @@ impl<'data> TryFrom<Tlv<'data>> for EntityIdTlv {
}
}
/// Does the [FilestoreActionCode] have a second filename?
#[inline]
pub fn fs_request_has_second_filename(action_code: FilestoreActionCode) -> bool {
if action_code == FilestoreActionCode::RenameFile
|| action_code == FilestoreActionCode::AppendFile
@@ -363,6 +548,7 @@ struct FilestoreTlvBase<'first_name, 'second_name> {
}
impl FilestoreTlvBase<'_, '_> {
#[inline]
fn base_len_value(&self) -> usize {
let mut len = 1 + self.first_name.len_full();
if let Some(second_name) = self.second_name {
@@ -372,6 +558,7 @@ impl FilestoreTlvBase<'_, '_> {
}
}
/// Filestore request TLV.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FilestoreRequestTlv<'first_name, 'second_name> {
@@ -380,14 +567,17 @@ pub struct FilestoreRequestTlv<'first_name, 'second_name> {
}
impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
/// Constructor for file creation.
pub fn new_create_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::CreateFile, file_name, None)
}
/// Constructor for file deletion.
pub fn new_delete_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DeleteFile, file_name, None)
}
/// Constructor for file renaming.
pub fn new_rename_file(
source_name: Lv<'first_name>,
target_name: Lv<'second_name>,
@@ -427,18 +617,22 @@ impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
)
}
/// Constructor for directory creation.
pub fn new_create_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::CreateDirectory, dir_name, None)
}
/// Constructor for directory removal.
pub fn new_remove_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::RemoveDirectory, dir_name, None)
}
/// Constructor for file denial.
pub fn new_deny_file(file_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DenyFile, file_name, None)
}
/// Constructor for directory denial.
pub fn new_deny_directory(dir_name: Lv<'first_name>) -> Result<Self, TlvLvError> {
Self::new(FilestoreActionCode::DenyDirectory, dir_name, None)
}
@@ -472,26 +666,37 @@ impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
})
}
/// Action code.
#[inline]
pub fn action_code(&self) -> FilestoreActionCode {
self.base.action_code
}
/// First name as [Lv].
#[inline]
pub fn first_name(&self) -> Lv<'first_name> {
self.base.first_name
}
/// First name as optional [Lv].
#[inline]
pub fn second_name(&self) -> Option<Lv<'second_name>> {
self.base.second_name
}
/// Length of the value field.
#[inline]
pub fn len_value(&self) -> usize {
self.base.base_len_value()
}
/// Full TLV length.
#[inline]
pub fn len_full(&self) -> usize {
2 + self.len_value()
}
/// Construct from a raw bytestream.
pub fn from_bytes<'longest: 'first_name + 'second_name>(
buf: &'longest [u8],
) -> Result<Self, TlvLvError> {
@@ -526,9 +731,7 @@ impl<'first_name, 'second_name> FilestoreRequestTlv<'first_name, 'second_name> {
},
})
}
}
impl WritableTlv for FilestoreRequestTlv<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.len_full() {
return Err(ByteConversionError::ToSliceTooSmall {
@@ -554,17 +757,31 @@ impl WritableTlv for FilestoreRequestTlv<'_, '_> {
Ok(current_idx)
}
#[inline]
fn len_written(&self) -> usize {
self.len_full()
}
}
impl WritableTlv for FilestoreRequestTlv<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for FilestoreRequestTlv<'_, '_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField {
TlvTypeField::Standard(TlvType::FilestoreRequest)
}
}
/// Filestore response TLV.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
@@ -596,6 +813,8 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
Lv::new_empty(),
)
}
/// Generic constructor.
pub fn new(
action_code: FilestoreActionCode,
status_code: u8,
@@ -624,6 +843,7 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
})
}
/// Check whether this response has a second filename.
pub fn has_second_filename(action_code: FilestoreActionCode) -> bool {
if action_code == FilestoreActionCode::RenameFile
|| action_code == FilestoreActionCode::AppendFile
@@ -634,30 +854,43 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
false
}
/// Action code.
#[inline]
pub fn action_code(&self) -> FilestoreActionCode {
self.base.action_code
}
/// Status code.
#[inline]
pub fn status_code(&self) -> u8 {
self.status_code
}
/// First name as [Lv].
#[inline]
pub fn first_name(&self) -> Lv<'first_name> {
self.base.first_name
}
/// Optional second name as [Lv].
#[inline]
pub fn second_name(&self) -> Option<Lv<'second_name>> {
self.base.second_name
}
/// Length of the value field.
#[inline]
pub fn len_value(&self) -> usize {
self.base.base_len_value() + self.filestore_message.len_full()
}
/// Full length of the TLV.
#[inline]
pub fn len_full(&self) -> usize {
2 + self.len_value()
}
/// Construct from a raw bytestream.
pub fn from_bytes<'buf: 'first_name + 'second_name + 'fs_msg>(
buf: &'buf [u8],
) -> Result<Self, TlvLvError> {
@@ -711,9 +944,7 @@ impl<'first_name, 'second_name, 'fs_msg> FilestoreResponseTlv<'first_name, 'seco
filestore_message,
})
}
}
impl WritableTlv for FilestoreResponseTlv<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.len_full() {
return Err(ByteConversionError::ToSliceTooSmall {
@@ -746,12 +977,41 @@ impl WritableTlv for FilestoreResponseTlv<'_, '_, '_> {
}
}
impl WritableTlv for FilestoreResponseTlv<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.write_to_bytes(buf)
}
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
}
impl GenericTlv for FilestoreResponseTlv<'_, '_, '_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField {
TlvTypeField::Standard(TlvType::FilestoreResponse)
}
}
pub(crate) fn verify_tlv_type(
raw_type: u8,
expected_tlv_type: TlvType,
) -> Result<(), InvalidTlvTypeFieldError> {
let tlv_type = TlvType::try_from(raw_type).map_err(|_| InvalidTlvTypeFieldError {
found: raw_type,
expected: Some(expected_tlv_type.into()),
})?;
if tlv_type != expected_tlv_type {
return Err(InvalidTlvTypeFieldError {
found: tlv_type as u8,
expected: Some(expected_tlv_type as u8),
});
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
@@ -939,14 +1199,14 @@ mod tests {
let tlv_res = Tlv::new(TlvType::MsgToUser, &buf_too_large);
assert!(tlv_res.is_err());
let error = tlv_res.unwrap_err();
if let TlvLvError::DataTooLarge(size) = error {
assert_eq!(size, u8::MAX as usize + 1);
assert_eq!(
error.to_string(),
"data with size 256 larger than allowed 255 bytes"
);
} else {
panic!("unexpected error {:?}", error);
match error {
TlvLvDataTooLargeError(size) => {
assert_eq!(size, u8::MAX as usize + 1);
assert_eq!(
error.to_string(),
"data with size 256 larger than allowed 255 bytes"
);
}
}
}
@@ -1256,7 +1516,8 @@ mod tests {
let error = EntityIdTlv::try_from(msg_to_user_tlv);
assert!(error.is_err());
let error = error.unwrap_err();
if let TlvLvError::InvalidTlvTypeField { found, expected } = error {
if let TlvLvError::InvalidTlvTypeField(InvalidTlvTypeFieldError { found, expected }) = error
{
assert_eq!(found, TlvType::MsgToUser as u8);
assert_eq!(expected, Some(TlvType::EntityId as u8));
assert_eq!(
@@ -1300,4 +1561,71 @@ mod tests {
assert_eq!(tlv_as_vec[0], 20);
assert_eq!(tlv_as_vec[1], 0);
}
#[test]
fn test_tlv_to_owned() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = Tlv::new(TlvType::EntityId, &buf[0..1]);
assert!(tlv_res.is_ok());
let tlv_res = tlv_res.unwrap();
let tlv_owned = tlv_res.to_owned();
assert_eq!(tlv_res, tlv_owned);
let tlv_owned_from_conversion: TlvOwned = tlv_res.into();
assert_eq!(tlv_owned_from_conversion, tlv_owned);
assert_eq!(tlv_owned_from_conversion, tlv_res);
}
#[test]
fn test_owned_tlv() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = TlvOwned::new(TlvType::EntityId, &buf[0..1]);
assert_eq!(
tlv_res.tlv_type_field(),
TlvTypeField::Standard(TlvType::EntityId)
);
assert_eq!(tlv_res.len_full(), 3);
assert_eq!(tlv_res.value().len(), 1);
assert_eq!(tlv_res.len_value(), 1);
assert!(!tlv_res.is_empty());
assert_eq!(tlv_res.value()[0], 5);
}
#[test]
fn test_owned_tlv_empty() {
let tlv_res = TlvOwned::new_empty(TlvType::FlowLabel);
assert_eq!(
tlv_res.tlv_type_field(),
TlvTypeField::Standard(TlvType::FlowLabel)
);
assert_eq!(tlv_res.len_full(), 2);
assert_eq!(tlv_res.value().len(), 0);
assert_eq!(tlv_res.len_value(), 0);
assert!(tlv_res.is_empty());
}
#[test]
fn test_owned_tlv_custom_type() {
let tlv_res = TlvOwned::new_with_custom_type(32, &[]);
assert_eq!(tlv_res.tlv_type_field(), TlvTypeField::Custom(32));
assert_eq!(tlv_res.len_full(), 2);
assert_eq!(tlv_res.value().len(), 0);
assert_eq!(tlv_res.len_value(), 0);
assert!(tlv_res.is_empty());
}
#[test]
fn test_owned_tlv_conversion_to_bytes() {
let entity_id = UbfU8::new(5);
let mut buf: [u8; 4] = [0; 4];
assert!(entity_id.write_to_be_bytes(&mut buf).is_ok());
let tlv_res = Tlv::new(TlvType::EntityId, &buf[0..1]);
assert!(tlv_res.is_ok());
let tlv_res = tlv_res.unwrap();
let tlv_owned_from_conversion: TlvOwned = tlv_res.into();
assert_eq!(tlv_res.to_vec(), tlv_owned_from_conversion.to_vec());
}
}

View File

@@ -1,16 +1,23 @@
//! Abstractions for the Message to User CFDP TLV subtype.
use super::{GenericTlv, Tlv, TlvLvError, TlvType, TlvTypeField, WritableTlv};
use crate::ByteConversionError;
#[cfg(feature = "alloc")]
use super::TlvOwned;
use super::{GenericTlv, ReadableTlv, Tlv, TlvLvError, TlvType, TlvTypeField, WritableTlv};
use crate::{
cfdp::{InvalidTlvTypeFieldError, TlvLvDataTooLargeError},
ByteConversionError,
};
use delegate::delegate;
/// Message To User TLV structure.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct MsgToUserTlv<'data> {
/// Wrapped generic TLV structure.
pub tlv: Tlv<'data>,
}
impl<'data> MsgToUserTlv<'data> {
/// Create a new message to user TLV where the type field is set correctly.
pub fn new(value: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvError> {
pub fn new(value: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvDataTooLargeError> {
Ok(Self {
tlv: Tlv::new(TlvType::MsgToUser, value)?,
})
@@ -18,7 +25,9 @@ impl<'data> MsgToUserTlv<'data> {
delegate! {
to self.tlv {
/// Value field of the TLV.
pub fn value(&self) -> &[u8];
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value]
pub fn len_value(&self) -> usize;
@@ -32,12 +41,16 @@ impl<'data> MsgToUserTlv<'data> {
}
}
/// Is this a standard TLV?
#[inline]
pub fn is_standard_tlv(&self) -> bool {
true
}
pub fn tlv_type(&self) -> Option<TlvType> {
Some(TlvType::MsgToUser)
/// TLV type field.
#[inline]
pub fn tlv_type(&self) -> TlvType {
TlvType::MsgToUser
}
/// Check whether this message is a reserved CFDP message like a Proxy Operation Message.
@@ -60,36 +73,68 @@ impl<'data> MsgToUserTlv<'data> {
match msg_to_user.tlv.tlv_type_field() {
TlvTypeField::Standard(tlv_type) => {
if tlv_type != TlvType::MsgToUser {
return Err(TlvLvError::InvalidTlvTypeField {
return Err(InvalidTlvTypeFieldError {
found: tlv_type as u8,
expected: Some(TlvType::MsgToUser as u8),
});
}
.into());
}
}
TlvTypeField::Custom(raw) => {
return Err(TlvLvError::InvalidTlvTypeField {
return Err(InvalidTlvTypeFieldError {
found: raw,
expected: Some(TlvType::MsgToUser as u8),
});
}
.into());
}
}
Ok(msg_to_user)
}
}
impl WritableTlv for MsgToUserTlv<'_> {
/// Convert to a generic [Tlv].
#[inline]
pub fn to_tlv(&self) -> Tlv<'data> {
self.tlv
}
/// Convert to an [TlvOwned].
#[cfg(feature = "alloc")]
pub fn to_owned(&self) -> TlvOwned {
self.tlv.to_owned()
}
#[inline]
fn len_written(&self) -> usize {
self.len_full()
}
delegate!(
to self.tlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
/// Write the TLV to a byte buffer.
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
}
);
}
impl<'a> From<MsgToUserTlv<'a>> for Tlv<'a> {
fn from(value: MsgToUserTlv<'a>) -> Tlv<'a> {
value.to_tlv()
}
}
impl WritableTlv for MsgToUserTlv<'_> {
#[inline]
fn len_written(&self) -> usize {
self.len_written()
}
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.tlv.write_to_bytes(buf)
}
}
impl GenericTlv for MsgToUserTlv<'_> {
#[inline]
fn tlv_type_field(&self) -> TlvTypeField {
self.tlv.tlv_type_field()
}
@@ -106,7 +151,7 @@ mod tests {
assert!(msg_to_user.is_ok());
let msg_to_user = msg_to_user.unwrap();
assert!(msg_to_user.is_standard_tlv());
assert_eq!(msg_to_user.tlv_type().unwrap(), TlvType::MsgToUser);
assert_eq!(msg_to_user.tlv_type(), TlvType::MsgToUser);
assert_eq!(
msg_to_user.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
@@ -139,6 +184,40 @@ mod tests {
);
}
#[test]
fn test_msg_to_user_type_reduction() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let tlv = msg_to_user.to_tlv();
assert_eq!(
tlv.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(tlv.value(), custom_value);
}
#[test]
fn test_msg_to_user_to_tlv() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let tlv: Tlv = msg_to_user.into();
assert_eq!(msg_to_user.to_tlv(), tlv);
}
#[test]
fn test_msg_to_user_owner_converter() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let tlv = msg_to_user.to_owned();
assert_eq!(
tlv.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(tlv.value(), custom_value);
}
#[test]
fn test_reserved_msg_deserialization() {
let custom_value: [u8; 3] = [1, 2, 3];
@@ -154,9 +233,9 @@ mod tests {
fn test_reserved_msg_deserialization_invalid_type() {
let trash: [u8; 5] = [TlvType::FlowLabel as u8, 3, 1, 2, 3];
let error = MsgToUserTlv::from_bytes(&trash).unwrap_err();
if let TlvLvError::InvalidTlvTypeField { found, expected } = error {
assert_eq!(found, TlvType::FlowLabel as u8);
assert_eq!(expected, Some(TlvType::MsgToUser as u8));
if let TlvLvError::InvalidTlvTypeField(inner) = error {
assert_eq!(inner.found, TlvType::FlowLabel as u8);
assert_eq!(inner.expected, Some(TlvType::MsgToUser as u8));
} else {
panic!("Wrong error type returned: {:?}", error);
}

15
src/crc.rs Normal file
View File

@@ -0,0 +1,15 @@
//! # CRC checksum support.
//!
//! Thin wrapper around the [crc] crate.
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard, using
/// a [crc::NoTable] as the CRC implementation.
pub const CRC_CCITT_FALSE_NO_TABLE: crc::Crc<u16, crc::NoTable> =
crc::Crc::<u16, crc::NoTable>::new(&crc::CRC_16_IBM_3740);
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard, using
/// [crc::Table<1>] as the CRC implementation.
pub const CRC_CCITT_FALSE: crc::Crc<u16> = crc::Crc::<u16>::new(&crc::CRC_16_IBM_3740);
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard, using
/// a [crc::Table<16>] large table as the CRC implementation.
pub const CRC_CCITT_FALSE_BIG_TABLE: crc::Crc<u16, crc::Table<16>> =
crc::Crc::<u16, crc::Table<16>>::new(&crc::CRC_16_IBM_3740);

View File

@@ -3,17 +3,26 @@ use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Event service subtype ID.
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum Subservice {
pub enum MessageSubtypeId {
/// Telemetry - Info report.
TmInfoReport = 1,
/// Telemetry - Low severity report.
TmLowSeverityReport = 2,
/// Telemetry - Medium severity report.
TmMediumSeverityReport = 3,
/// Telemetry - High severity report.
TmHighSeverityReport = 4,
/// Telecommand - Enable event generation.
TcEnableEventGeneration = 5,
/// Telecommand - Disable event generation.
TcDisableEventGeneration = 6,
/// Telecommand - Report disabled list.
TcReportDisabledList = 7,
/// Telemetry - Disabled events report.
TmDisabledEventsReport = 8,
}
@@ -23,19 +32,19 @@ mod tests {
#[test]
fn test_conv_into_u8() {
let subservice: u8 = Subservice::TmLowSeverityReport.into();
let subservice: u8 = MessageSubtypeId::TmLowSeverityReport.into();
assert_eq!(subservice, 2);
}
#[test]
fn test_conv_from_u8() {
let subservice: Subservice = 2.try_into().unwrap();
assert_eq!(subservice, Subservice::TmLowSeverityReport);
let subservice: MessageSubtypeId = 2.try_into().unwrap();
assert_eq!(subservice, MessageSubtypeId::TmLowSeverityReport);
}
#[test]
fn test_conv_fails() {
let conversion = Subservice::try_from(9);
let conversion = MessageSubtypeId::try_from(9);
assert!(conversion.is_err());
let err = conversion.unwrap_err();
assert_eq!(err.number, 9);

View File

@@ -3,31 +3,49 @@ use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Housekeeping service subtype ID.
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Subservice {
pub enum MessageSubtypeId {
// Regular HK
/// Telecommand - Create Housekeeping Report Structure.
TcCreateHkReportStructure = 1,
/// Telecommand - Delete HK report structures.
TcDeleteHkReportStructures = 3,
/// Telecommand - Enable HK generation.
TcEnableHkGeneration = 5,
/// Telecommand - Disable HK generation.
TcDisableHkGeneration = 6,
/// Telecommand - Report HK report structures.
TcReportHkReportStructures = 9,
/// Telemetry - HK report.
TmHkPacket = 25,
/// Telecommand - Generate one-shot report.
TcGenerateOneShotHk = 27,
/// Telecommand - Modify collection interval.
TcModifyHkCollectionInterval = 31,
// Diagnostics HK
/// Telecommand - Create diagnostics report structures.
TcCreateDiagReportStructure = 2,
/// Telecommand - Delete diagnostics report structures.
TcDeleteDiagReportStructures = 4,
/// Telecommand - Enable diagnostics generation.
TcEnableDiagGeneration = 7,
/// Telecommand - Disable diagnostics generation.
TcDisableDiagGeneration = 8,
/// Telemetry - HK structures report.
TmHkStructuresReport = 10,
/// Telecommand - Report diagnostics report structures.
TcReportDiagReportStructures = 11,
/// Telemetry - Diagnostics report structures.
TmDiagStructuresReport = 12,
/// Telemetry - Diagnostics packet.
TmDiagPacket = 26,
/// Telecommand - Generate one-shot diagnostics report.
TcGenerateOneShotDiag = 28,
/// Telecommand - Modify diagnostics interval report.
TcModifyDiagCollectionInterval = 32,
}
@@ -37,25 +55,26 @@ mod tests {
#[test]
fn test_try_from_u8() {
let hk_report_subservice_raw = 25;
let hk_report: Subservice = Subservice::try_from(hk_report_subservice_raw).unwrap();
assert_eq!(hk_report, Subservice::TmHkPacket);
let hk_report: MessageSubtypeId =
MessageSubtypeId::try_from(hk_report_subservice_raw).unwrap();
assert_eq!(hk_report, MessageSubtypeId::TmHkPacket);
}
#[test]
fn test_into_u8() {
let hk_report_raw: u8 = Subservice::TmHkPacket.into();
let hk_report_raw: u8 = MessageSubtypeId::TmHkPacket.into();
assert_eq!(hk_report_raw, 25);
}
#[test]
fn test_partial_eq() {
let hk_report_raw = Subservice::TmHkPacket;
assert_ne!(hk_report_raw, Subservice::TcGenerateOneShotHk);
assert_eq!(hk_report_raw, Subservice::TmHkPacket);
let hk_report_raw = MessageSubtypeId::TmHkPacket;
assert_ne!(hk_report_raw, MessageSubtypeId::TcGenerateOneShotHk);
assert_eq!(hk_report_raw, MessageSubtypeId::TmHkPacket);
}
#[test]
fn test_copy_clone() {
let hk_report = Subservice::TmHkPacket;
let hk_report = MessageSubtypeId::TmHkPacket;
let hk_report_copy = hk_report;
assert_eq!(hk_report, hk_report_copy);
}

View File

@@ -3,94 +3,105 @@
//!
//! You can find the PUS telecommand types in the [tc] module and the the PUS telemetry
//! types inside the [tm] module.
use crate::{ByteConversionError, CcsdsPacket, CRC_CCITT_FALSE};
use crate::{
crc::{CRC_CCITT_FALSE, CRC_CCITT_FALSE_NO_TABLE},
ByteConversionError, CcsdsPacket,
};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::fmt::{Debug, Display, Formatter};
use arbitrary_int::u4;
use core::fmt::Debug;
use core::mem::size_of;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub mod event;
pub mod hk;
pub mod scheduling;
pub mod tc;
pub mod tc_pus_a;
pub mod tm;
pub mod tm_pus_a;
pub mod verification;
/// Type alias for the CRC16 type.
pub type CrcType = u16;
/// Standard PUS service IDs.
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
#[non_exhaustive]
pub enum PusServiceId {
/// Service 1
/// Service 1 Verification
Verification = 1,
/// Service 2
/// Service 2 Device Access
DeviceAccess = 2,
/// Service 3
/// Service 3 Housekeeping
Housekeeping = 3,
/// Service 4
/// Service 4 Parameter Statistics
ParameterStatistics = 4,
/// Service 5
/// Service 5 Event
Event = 5,
/// Service 6
/// Service 6 Memory Management
MemoryManagement = 6,
/// Service 8
/// Service 8 Action
Action = 8,
/// Service 9
/// Service 9 Time Management
TimeManagement = 9,
/// Service 11
/// Service 11 Scheduling
Scheduling = 11,
/// Service 12
/// Service 12 On-Board Monitoring
OnBoardMonitoring = 12,
/// Service 13
/// Service 13 Large Packet Transfer
LargePacketTransfer = 13,
/// Service 14
/// Service 14 Real-Time Forwarding Control
RealTimeForwardingControl = 14,
/// Service 15
/// Service 15 Storage And Retrival
StorageAndRetrival = 15,
/// Service 17
/// Service 17 Test
Test = 17,
/// Service 18
/// Service 18 Operations And Procedures
OpsAndProcedures = 18,
/// Service 19
/// Service 19 Event Action
EventAction = 19,
/// Service 20
/// Service 20 Parameter
Parameter = 20,
/// Service 21
/// Service 21 Request Sequencing
RequestSequencing = 21,
/// Service 22
/// Service 22 Position Based Scheduling
PositionBasedScheduling = 22,
/// Service 23
/// Service 23 File Management
FileManagement = 23,
}
/// All PUS versions. Only PUS C is supported by this library.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[derive(PartialEq, Eq, Debug, num_enum::TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[bitbybit::bitenum(u4, exhaustive = false)]
#[repr(u8)]
#[non_exhaustive]
pub enum PusVersion {
/// ESA PUS
EsaPus = 0,
/// PUS A
PusA = 1,
/// PUS C
PusC = 2,
Invalid = 0b1111,
}
impl TryFrom<u8> for PusVersion {
type Error = ();
impl TryFrom<u4> for PusVersion {
type Error = u4;
fn try_from(value: u8) -> Result<Self, Self::Error> {
fn try_from(value: u4) -> Result<Self, Self::Error> {
match value {
x if x == PusVersion::EsaPus as u8 => Ok(PusVersion::EsaPus),
x if x == PusVersion::PusA as u8 => Ok(PusVersion::PusA),
x if x == PusVersion::PusC as u8 => Ok(PusVersion::PusC),
_ => Err(()),
x if x == PusVersion::EsaPus.raw_value() => Ok(PusVersion::EsaPus),
x if x == PusVersion::PusA.raw_value() => Ok(PusVersion::PusA),
x if x == PusVersion::PusC.raw_value() => Ok(PusVersion::PusC),
_ => Err(value),
}
}
}
@@ -98,44 +109,70 @@ impl TryFrom<u8> for PusVersion {
/// ECSS Packet Type Codes (PTC)s.
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum PacketTypeCodes {
/// Boolean.
Boolean = 1,
/// Enumerated.
Enumerated = 2,
/// Unsigned Integer.
UnsignedInt = 3,
/// Signed Integer.
SignedInt = 4,
/// Real (floating point).
Real = 5,
/// Bit string.
BitString = 6,
/// Octet (byte) string.
OctetString = 7,
/// Character string.
CharString = 8,
/// Absolute time.
AbsoluteTime = 9,
/// Relative time.
RelativeTime = 10,
/// Deduced.
Deduced = 11,
/// Packet.
Packet = 12,
}
/// Type alias for the ECSS Packet Type Codes (PTC)s.
pub type Ptc = PacketTypeCodes;
/// ECSS Packet Field Codes (PFC)s for the unsigned [Ptc].
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum PfcUnsigned {
/// 1 byte.
OneByte = 4,
/// 12 bits.
TwelveBits = 8,
/// 2 bytes.
TwoBytes = 12,
/// 3 bytes.
ThreeBytes = 13,
/// 4 bytes.
FourBytes = 14,
/// 6 bytes.
SixBytes = 15,
/// 8 bytes.
EightBytes = 16,
/// 1 bit.
OneBit = 17,
/// 2 bits.
TwoBits = 18,
/// 3 bits.
ThreeBits = 19,
}
/// ECSS Packet Field Codes (PFC)s for the real (floating point) [Ptc].
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum PfcReal {
/// 4 octets simple precision format (IEEE)
@@ -148,62 +185,76 @@ pub enum PfcReal {
DoubleMilStd = 4,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
/// Generic PUS error.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PusError {
VersionNotSupported(PusVersion),
/// PUS version is not supported.
#[error("PUS version {0:?} not supported")]
VersionNotSupported(u4),
/// Checksum failure.
#[error("checksum verification for crc16 {0:#06x} failed")]
ChecksumFailure(u16),
/// CRC16 needs to be calculated first
CrcCalculationMissing,
ByteConversion(ByteConversionError),
//#[error("crc16 was not calculated")]
//CrcCalculationMissing,
#[error("pus error: {0}")]
ByteConversion(#[from] ByteConversionError),
}
impl Display for PusError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PusError::VersionNotSupported(v) => {
write!(f, "PUS version {v:?} not supported")
}
PusError::ChecksumFailure(crc) => {
write!(f, "checksum verification for crc16 {crc:#06x} failed")
}
PusError::CrcCalculationMissing => {
write!(f, "crc16 was not calculated")
}
PusError::ByteConversion(e) => {
write!(f, "pus error: {e}")
}
/// Message type ID field.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct MessageTypeId {
/// Service type ID.
pub type_id: u8,
/// Subtype ID.
pub subtype_id: u8,
}
impl MessageTypeId {
/// Generic constructor.
pub const fn new(type_id: u8, subtype_id: u8) -> Self {
Self {
type_id,
subtype_id,
}
}
}
#[cfg(feature = "std")]
impl Error for PusError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
if let PusError::ByteConversion(e) = self {
return Some(e);
}
None
}
}
impl From<ByteConversionError> for PusError {
fn from(e: ByteConversionError) -> Self {
PusError::ByteConversion(e)
}
}
/// Generic trait to describe common attributes for both PUS Telecommands (TC) and PUS Telemetry
/// (TM) packets. All PUS packets are also a special type of [CcsdsPacket]s.
pub trait PusPacket: CcsdsPacket {
const PUS_VERSION: PusVersion = PusVersion::PusC;
/// PUS version.
fn pus_version(&self) -> Result<PusVersion, u4>;
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
/// Message type ID.
fn message_type_id(&self) -> MessageTypeId;
/// Service type ID.
#[inline]
fn service_type_id(&self) -> u8 {
self.message_type_id().type_id
}
/// Message subtype ID.
#[inline]
fn message_subtype_id(&self) -> u8 {
self.message_type_id().subtype_id
}
/// User data field.
fn user_data(&self) -> &[u8];
fn crc16(&self) -> Option<u16>;
/// CRC-16-CCITT checksum.
fn checksum(&self) -> Option<u16>;
/// The presence of the CRC-16-CCITT checksum is optional.
fn has_checksum(&self) -> bool {
self.checksum().is_some()
}
}
pub(crate) fn crc_from_raw_data(raw_data: &[u8]) -> Result<u16, ByteConversionError> {
@@ -230,17 +281,23 @@ pub(crate) fn user_data_from_raw(
current_idx: usize,
total_len: usize,
slice: &[u8],
has_checksum: bool,
) -> Result<&[u8], ByteConversionError> {
match current_idx {
_ if current_idx > total_len - 2 => Err(ByteConversionError::FromSliceTooSmall {
found: total_len - 2,
expected: current_idx,
}),
_ => Ok(&slice[current_idx..total_len - 2]),
if has_checksum {
if current_idx > total_len - 2 {
return Err(ByteConversionError::FromSliceTooSmall {
found: total_len - 2,
expected: current_idx,
});
}
Ok(&slice[current_idx..total_len - 2])
} else {
Ok(&slice[current_idx..total_len])
}
}
pub(crate) fn verify_crc16_ccitt_false_from_raw_to_pus_error(
/// Verify the CRC16 of a raw packet and return a [PusError] on failure.
pub fn verify_crc16_ccitt_false_from_raw_to_pus_error(
raw_data: &[u8],
crc16: u16,
) -> Result<(), PusError> {
@@ -249,7 +306,19 @@ pub(crate) fn verify_crc16_ccitt_false_from_raw_to_pus_error(
.ok_or(PusError::ChecksumFailure(crc16))
}
pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
/// Verify the CRC16 of a raw packet using a table-less implementation and return a [PusError] on
/// failure.
pub fn verify_crc16_ccitt_false_from_raw_to_pus_error_no_table(
raw_data: &[u8],
crc16: u16,
) -> Result<(), PusError> {
verify_crc16_ccitt_false_from_raw_no_table(raw_data)
.then_some(())
.ok_or(PusError::ChecksumFailure(crc16))
}
/// Verify the CRC16 of a raw packet.
pub fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(raw_data);
if digest.finalize() == 0 {
@@ -258,28 +327,28 @@ pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
false
}
macro_rules! ccsds_impl {
() => {
delegate!(to self.sp_header {
#[inline]
fn ccsds_version(&self) -> u8;
#[inline]
fn packet_id(&self) -> crate::PacketId;
#[inline]
fn psc(&self) -> crate::PacketSequenceCtrl;
#[inline]
fn data_len(&self) -> u16;
});
/// Verify the CRC16 of a raw packet, using the table-less implementation.
pub fn verify_crc16_ccitt_false_from_raw_no_table(raw_data: &[u8]) -> bool {
let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest();
digest.update(raw_data);
if digest.finalize() == 0 {
return true;
}
false
}
macro_rules! sp_header_impls {
() => {
delegate!(to self.sp_header {
/// Set the CCSDS APID.
#[inline]
pub fn set_apid(&mut self, apid: u16) -> bool;
pub fn set_apid(&mut self, apid: u11);
/// Set the CCSDS sequence count.
#[inline]
pub fn set_seq_count(&mut self, seq_count: u16) -> bool;
pub fn set_seq_count(&mut self, seq_count: u14);
/// Set the CCSDS sequence flags.
#[inline]
pub fn set_seq_flags(&mut self, seq_flag: SequenceFlags);
});
@@ -287,7 +356,6 @@ macro_rules! sp_header_impls {
}
use crate::util::{GenericUnsignedByteField, ToBeBytes, UnsignedEnum};
pub(crate) use ccsds_impl;
pub(crate) use sp_header_impls;
/// Generic trait for ECSS enumeration which consist of a PFC field denoting their bit length
@@ -299,27 +367,28 @@ pub trait EcssEnumeration: UnsignedEnum {
fn pfc(&self) -> u8;
}
/// Extension trait for [EcssEnumeration] which adds common trait bounds.
pub trait EcssEnumerationExt: EcssEnumeration + Debug + Copy + Clone + PartialEq + Eq {}
/// ECSS enumerated type wrapper.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericEcssEnumWrapper<TYPE: Copy + Into<u64>> {
field: GenericUnsignedByteField<TYPE>,
}
pub struct GenericEcssEnumWrapper<TYPE: Copy + Into<u64>>(GenericUnsignedByteField<TYPE>);
impl<TYPE: Copy + Into<u64>> GenericEcssEnumWrapper<TYPE> {
/// Returns [PacketTypeCodes::Enumerated].
pub const fn ptc() -> PacketTypeCodes {
PacketTypeCodes::Enumerated
}
pub const fn value_typed(&self) -> TYPE {
self.field.value_typed()
/// Value.
pub const fn value(&self) -> TYPE {
self.0.value()
}
pub fn new(val: TYPE) -> Self {
Self {
field: GenericUnsignedByteField::new(val),
}
/// Generic constructor.
pub const fn new(val: TYPE) -> Self {
Self(GenericUnsignedByteField::new(val))
}
}
@@ -329,11 +398,11 @@ impl<TYPE: Copy + ToBeBytes + Into<u64>> UnsignedEnum for GenericEcssEnumWrapper
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.field.write_to_be_bytes(buf)
self.0.write_to_be_bytes(buf)
}
fn value(&self) -> u64 {
self.field.value()
fn value_raw(&self) -> u64 {
self.0.value().into()
}
}
@@ -357,11 +426,12 @@ impl<T: Copy + Into<u64>> From<T> for GenericEcssEnumWrapper<T> {
macro_rules! generic_ecss_enum_typedefs_and_from_impls {
($($ty:ty => $Enum:ident),*) => {
$(
/// Type alias for ECSS enumeration wrapper around `$ty`
pub type $Enum = GenericEcssEnumWrapper<$ty>;
impl From<$Enum> for $ty {
fn from(value: $Enum) -> Self {
value.value_typed()
value.value()
}
}
)*
@@ -381,8 +451,48 @@ generic_ecss_enum_typedefs_and_from_impls! {
/// byte representation. This is especially useful for generic abstractions which depend only
/// on the serialization of those packets.
pub trait WritablePusPacket {
/// The length here also includes the CRC length.
fn len_written(&self) -> usize;
fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError>;
/// Checksum generation is enabled for the packet.
fn has_checksum(&self) -> bool;
/// Writes the packet to the given slice without writing the CRC checksum.
///
/// The returned size is the written size WITHOUT the CRC checksum.
/// If the checksum generation is disabled, this function is identical to the APIs which
/// generate a checksum.
fn write_to_bytes_no_checksum(&self, slice: &mut [u8]) -> Result<usize, PusError>;
/// First uses [Self::write_to_bytes_no_checksum] to write the packet to the given slice and
/// then uses the [CRC_CCITT_FALSE] to calculate the CRC and write it to the slice if the
/// packet is configured to include a checksum.
fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = self.write_to_bytes_no_checksum(slice)?;
if self.has_checksum() {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&slice[0..curr_idx]);
slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
curr_idx += 2;
}
Ok(curr_idx)
}
/// First uses [Self::write_to_bytes_no_checksum] to write the packet to the given slice and then
/// uses the [CRC_CCITT_FALSE_NO_TABLE] to calculate the CRC and write it to the slice if
/// the paket is configured to include a checksum.
fn write_to_bytes_checksum_no_table(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = self.write_to_bytes_no_checksum(slice)?;
if self.has_checksum() {
let mut digest = CRC_CCITT_FALSE_NO_TABLE.digest();
digest.update(&slice[0..curr_idx]);
slice[curr_idx..curr_idx + 2].copy_from_slice(&digest.finalize().to_be_bytes());
curr_idx += 2;
}
Ok(curr_idx)
}
/// Converts the packet into an owned [alloc::vec::Vec].
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<Vec<u8>, PusError> {
// This is the correct way to do this. See
@@ -394,6 +504,26 @@ pub trait WritablePusPacket {
}
}
/// PUS packet creator configuration.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct CreatorConfig {
/// Set the CCSDS data length field on construction.
pub set_ccsds_len: bool,
/// CRC-16-CCITT Checksum is present.
pub has_checksum: bool,
}
impl Default for CreatorConfig {
fn default() -> Self {
Self {
set_ccsds_len: true,
has_checksum: true,
}
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
@@ -417,7 +547,7 @@ mod tests {
.expect("To byte conversion of u8 failed");
assert_eq!(buf[1], 1);
assert_eq!(my_enum.value(), 1);
assert_eq!(my_enum.value_typed(), 1);
assert_eq!(my_enum.value(), 1);
let enum_as_u8: u8 = my_enum.into();
assert_eq!(enum_as_u8, 1);
let vec = my_enum.to_vec();
@@ -436,7 +566,7 @@ mod tests {
assert_eq!(buf[1], 0x1f);
assert_eq!(buf[2], 0x2f);
assert_eq!(my_enum.value(), 0x1f2f);
assert_eq!(my_enum.value_typed(), 0x1f2f);
assert_eq!(my_enum.value(), 0x1f2f);
let enum_as_raw: u16 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f);
let vec = my_enum.to_vec();
@@ -473,7 +603,7 @@ mod tests {
assert_eq!(buf[3], 0x3f);
assert_eq!(buf[4], 0x4f);
assert_eq!(my_enum.value(), 0x1f2f3f4f);
assert_eq!(my_enum.value_typed(), 0x1f2f3f4f);
assert_eq!(my_enum.value(), 0x1f2f3f4f);
let enum_as_raw: u32 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f3f4f);
let vec = my_enum.to_vec();
@@ -511,7 +641,7 @@ mod tests {
assert_eq!(buf[6], 0x4f);
assert_eq!(buf[7], 0x5f);
assert_eq!(my_enum.value(), 0x1f2f3f4f5f);
assert_eq!(my_enum.value_typed(), 0x1f2f3f4f5f);
assert_eq!(my_enum.value(), 0x1f2f3f4f5f);
let enum_as_raw: u64 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f3f4f5f);
assert_eq!(u64::from_be_bytes(buf), 0x1f2f3f4f5f);
@@ -521,9 +651,10 @@ mod tests {
#[test]
fn test_pus_error_display() {
let unsupport_version = PusError::VersionNotSupported(super::PusVersion::EsaPus);
let unsupport_version =
PusError::VersionNotSupported(super::PusVersion::EsaPus.raw_value());
let write_str = unsupport_version.to_string();
assert_eq!(write_str, "PUS version EsaPus not supported")
assert_eq!(write_str, "PUS version 0 not supported")
}
#[test]
@@ -557,8 +688,8 @@ mod tests {
#[test]
fn test_pus_error_eq_impl() {
assert_eq!(
PusError::VersionNotSupported(PusVersion::EsaPus),
PusError::VersionNotSupported(PusVersion::EsaPus)
PusError::VersionNotSupported(PusVersion::EsaPus.raw_value()),
PusError::VersionNotSupported(PusVersion::EsaPus.raw_value())
);
}

View File

@@ -3,53 +3,85 @@ use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Scheduling service subtype ID.
#[derive(Debug, PartialEq, Eq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Subservice {
pub enum MessageSubtypeId {
// Core subservices
/// Telecommand - Enable scheduling.
TcEnableScheduling = 1,
/// Telecommand - Disable scheduling.
TcDisableScheduling = 2,
/// Telecommand - Reset scheduling.
TcResetScheduling = 3,
/// Telecommand - Insert activity.
TcInsertActivity = 4,
/// Telecommand - Delete activity by request ID.
TcDeleteActivityByRequestId = 5,
/// Telecommand - Delete activity by filter.
TcDeleteActivitiesByFilter = 6,
// Time shift subservices
/// Telecommand - Time shift activity by request ID.
TcTimeShiftActivityWithRequestId = 7,
/// Telecommand - Time shift activity by filter.
TcTimeShiftActivitiesByFilter = 8,
/// Telecommand - Time shift all.
TcTimeShiftAll = 15,
// Reporting subservices
/// Telecommand - Detail report by request ID.
TcDetailReportByRequestId = 9,
/// Telemetry - Detail report.
TmDetailReport = 10,
/// Telecommand - Detail report by filter.
TcDetailReportByFilter = 11,
/// Telecommand - Summary report by request ID.
TcSummaryReportByRequestId = 12,
/// Telemetry - Summary report.
TmSummaryReport = 13,
/// Telecommand - Summary report by filter.
TcSummaryReportByFilter = 14,
/// Telecommand - Detail report all.
TcDetailReportAll = 16,
/// Telecommand - Summary report all.
TcSummaryReportAll = 17,
// Subschedule subservices
/// Telecommand - Report subschedule status.
TcReportSubscheduleStatus = 18,
/// Telemetry - Subschedule status report.
TmReportSubscheduleStatus = 19,
/// Telecommand - Enable subschedule.
TcEnableSubschedule = 20,
/// Telecommand - Disable subschedule.
TcDisableSubschedule = 21,
// Group subservices
/// Telecommand - Create schedule group.
TcCreateScheduleGroup = 22,
/// Telecommand - Delete schedule group.
TcDeleteScheduleGroup = 23,
/// Telecommand - Enable schedule group.
TcEnableScheduleGroup = 24,
/// Telecommand - Disable schedule group.
TcDisableScheduleGroup = 25,
/// Telecommand - Report all group status.
TcReportAllGroupsStatus = 26,
/// Telemetry - All group status report.
TmReportAllGroupsStatus = 27,
}
/// This status applies to sub-schedules and groups as well as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum SchedStatus {
/// Scheduling disabled.
Disabled = 0,
/// Scheduling enabled.
Enabled = 1,
}
@@ -66,11 +98,16 @@ impl From<bool> for SchedStatus {
/// Time window types as specified in ECSS-E-ST-70-41C 8.11.3
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TimeWindowType {
/// Select all.
SelectAll = 0,
/// From time tag to time tag.
TimeTagToTimeTag = 1,
/// Starting from a time tag.
FromTimeTag = 2,
/// Until a time tag.
ToTimeTag = 3,
}
@@ -96,20 +133,20 @@ mod tests {
#[test]
fn test_conv_into_u8() {
let subservice: u8 = Subservice::TcCreateScheduleGroup.into();
let subservice: u8 = MessageSubtypeId::TcCreateScheduleGroup.into();
assert_eq!(subservice, 22);
}
#[test]
fn test_conv_from_u8() {
let subservice: Subservice = 22u8.try_into().unwrap();
assert_eq!(subservice, Subservice::TcCreateScheduleGroup);
let subservice: MessageSubtypeId = 22u8.try_into().unwrap();
assert_eq!(subservice, MessageSubtypeId::TcCreateScheduleGroup);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_subservice_id() {
generic_serde_test(Subservice::TcEnableScheduling);
generic_serde_test(MessageSubtypeId::TcEnableScheduling);
}
#[test]

File diff suppressed because it is too large Load Diff

1440
src/ecss/tc_pus_a.rs Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

2081
src/ecss/tm_pus_a.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -3,17 +3,27 @@ use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Message subtype ID.
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Subservice {
pub enum MessageSubtypeId {
/// Telemetry - Acceptance success.
TmAcceptanceSuccess = 1,
/// Telemetry - Acceptance failure.
TmAcceptanceFailure = 2,
/// Telemetry - Start success.
TmStartSuccess = 3,
/// Telemetry - Start failure.
TmStartFailure = 4,
/// Telemetry - Step success.
TmStepSuccess = 5,
/// Telemetry - Step failure.
TmStepFailure = 6,
/// Telemetry - Completion success.
TmCompletionSuccess = 7,
/// Telemetry - Completion failure.
TmCompletionFailure = 8,
}
@@ -23,13 +33,13 @@ mod tests {
#[test]
fn test_conv_into_u8() {
let subservice: u8 = Subservice::TmCompletionSuccess.into();
let subservice: u8 = MessageSubtypeId::TmCompletionSuccess.into();
assert_eq!(subservice, 7);
}
#[test]
fn test_conv_from_u8() {
let subservice: Subservice = 7.try_into().unwrap();
assert_eq!(subservice, Subservice::TmCompletionSuccess);
let subservice: MessageSubtypeId = 7.try_into().unwrap();
assert_eq!(subservice, MessageSubtypeId::TmCompletionSuccess);
}
}

2483
src/lib.rs

File diff suppressed because it is too large Load Diff

473
src/seq_count.rs Normal file
View File

@@ -0,0 +1,473 @@
//! # Sequence counter module.
//!
//! CCSDS and ECSS packet standard oftentimes use sequence counters, for example to allow detecting
//! packet gaps. This module provides basic abstractions and helper components to implement
//! sequence counters.
use crate::MAX_SEQ_COUNT;
use arbitrary_int::traits::Integer;
use core::cell::Cell;
use paste::paste;
/// Core trait for objects which can provide a sequence count.
///
/// The core functions are not mutable on purpose to allow easier usage with
/// static structs when using the interior mutability pattern. This can be achieved by using
/// [Cell], [core::cell::RefCell] or atomic types.
pub trait SequenceCounter {
/// Raw type of the counter.
type Raw: Into<u64>;
/// Bit width of the counter.
const MAX_BIT_WIDTH: usize;
/// Get the current sequence count value.
fn get(&self) -> Self::Raw;
/// Increment the sequence count by one.
fn increment(&self);
/// Increment the sequence count by one, mutable API.
fn increment_mut(&mut self) {
self.increment();
}
/// Get the current sequence count value and increment the counter by one.
fn get_and_increment(&self) -> Self::Raw {
let val = self.get();
self.increment();
val
}
/// Get the current sequence count value and increment the counter by one, mutable API.
fn get_and_increment_mut(&mut self) -> Self::Raw {
self.get_and_increment()
}
}
/// Simple sequence counter which wraps at ´T::MAX´.
#[derive(Clone)]
pub struct SequenceCounterSimple<T: Copy> {
seq_count: Cell<T>,
// The maximum value
max_val: T,
}
macro_rules! impl_for_primitives {
($($ty: ident,)+) => {
$(
paste! {
impl SequenceCounterSimple<$ty> {
/// Constructor with a custom maximum value.
pub fn [<new_custom_max_val_ $ty>](max_val: $ty) -> Self {
Self {
seq_count: Cell::new(0),
max_val,
}
}
/// Generic constructor.
pub fn [<new_ $ty>]() -> Self {
Self {
seq_count: Cell::new(0),
max_val: $ty::MAX
}
}
}
impl Default for SequenceCounterSimple<$ty> {
fn default() -> Self {
Self::[<new_ $ty>]()
}
}
impl SequenceCounter for SequenceCounterSimple<$ty> {
type Raw = $ty;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
fn get(&self) -> Self::Raw {
self.seq_count.get()
}
fn increment(&self) {
self.get_and_increment();
}
fn get_and_increment(&self) -> Self::Raw {
let curr_count = self.seq_count.get();
if curr_count == self.max_val {
self.seq_count.set(0);
} else {
self.seq_count.set(curr_count + 1);
}
curr_count
}
}
}
)+
}
}
impl_for_primitives!(u8, u16, u32, u64,);
/// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT].
#[derive(Clone)]
pub struct SequenceCounterCcsdsSimple {
provider: SequenceCounterSimple<u16>,
}
impl Default for SequenceCounterCcsdsSimple {
fn default() -> Self {
Self {
provider: SequenceCounterSimple::new_custom_max_val_u16(MAX_SEQ_COUNT.as_u16()),
}
}
}
impl SequenceCounter for SequenceCounterCcsdsSimple {
type Raw = u16;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
delegate::delegate! {
to self.provider {
fn get(&self) -> u16;
fn increment(&self);
fn get_and_increment(&self) -> u16;
}
}
}
#[cfg(target_has_atomic = "8")]
impl SequenceCounter for core::sync::atomic::AtomicU8 {
type Raw = u8;
const MAX_BIT_WIDTH: usize = 8;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(target_has_atomic = "16")]
impl SequenceCounter for core::sync::atomic::AtomicU16 {
type Raw = u16;
const MAX_BIT_WIDTH: usize = 16;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(target_has_atomic = "32")]
impl SequenceCounter for core::sync::atomic::AtomicU32 {
type Raw = u32;
const MAX_BIT_WIDTH: usize = 32;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(target_has_atomic = "64")]
impl SequenceCounter for core::sync::atomic::AtomicU64 {
type Raw = u64;
const MAX_BIT_WIDTH: usize = 64;
fn get(&self) -> Self::Raw {
self.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
}
}
#[cfg(feature = "portable-atomic")]
impl SequenceCounter for portable_atomic::AtomicU8 {
type Raw = u8;
const MAX_BIT_WIDTH: usize = 8;
fn get(&self) -> Self::Raw {
self.load(portable_atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, portable_atomic::Ordering::Relaxed);
}
}
#[cfg(feature = "portable-atomic")]
impl SequenceCounter for portable_atomic::AtomicU16 {
type Raw = u16;
const MAX_BIT_WIDTH: usize = 16;
fn get(&self) -> Self::Raw {
self.load(portable_atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, portable_atomic::Ordering::Relaxed);
}
}
#[cfg(feature = "portable-atomic")]
impl SequenceCounter for portable_atomic::AtomicU32 {
type Raw = u32;
const MAX_BIT_WIDTH: usize = 32;
fn get(&self) -> Self::Raw {
self.load(portable_atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, portable_atomic::Ordering::Relaxed);
}
}
#[cfg(feature = "portable-atomic")]
impl SequenceCounter for portable_atomic::AtomicU64 {
type Raw = u64;
const MAX_BIT_WIDTH: usize = 64;
fn get(&self) -> Self::Raw {
self.load(portable_atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.fetch_add(1, portable_atomic::Ordering::Relaxed);
}
}
impl<T: SequenceCounter + ?Sized> SequenceCounter for &T {
type Raw = T::Raw;
const MAX_BIT_WIDTH: usize = T::MAX_BIT_WIDTH;
fn get(&self) -> Self::Raw {
(**self).get()
}
fn increment(&self) {
(**self).increment()
}
}
#[cfg(any(
target_has_atomic = "8",
target_has_atomic = "16",
target_has_atomic = "32",
target_has_atomic = "64"
))]
macro_rules! sync_clonable_seq_counter_impl {
($ty: ident) => {
paste::paste! {
/// This can be used if a custom wrap value is required when using a thread-safe
/// atomic based sequence counter.
#[derive(Debug)]
pub struct [<SequenceCounterSyncCustomWrap $ty:upper>] {
seq_count: core::sync::atomic::[<Atomic $ty:upper>],
max_val: $ty,
}
impl [<SequenceCounterSyncCustomWrap $ty:upper>] {
/// Generic constructor.
pub fn new(max_val: $ty) -> Self {
Self {
seq_count: core::sync::atomic::[<Atomic $ty:upper>]::new(0),
max_val,
}
}
}
impl SequenceCounter for [<SequenceCounterSyncCustomWrap $ty:upper>] {
type Raw = $ty;
const MAX_BIT_WIDTH: usize = core::mem::size_of::<Self::Raw>() * 8;
fn get(&self) -> $ty {
self.seq_count.load(core::sync::atomic::Ordering::Relaxed)
}
fn increment(&self) {
self.get_and_increment();
}
fn get_and_increment(&self) -> $ty {
self.seq_count.fetch_update(
core::sync::atomic::Ordering::Relaxed,
core::sync::atomic::Ordering::Relaxed,
|cur| {
// compute the next value, wrapping at MAX_VAL
let next = if cur == self.max_val { 0 } else { cur + 1 };
Some(next)
},
).unwrap()
}
}
}
};
}
#[cfg(target_has_atomic = "8")]
sync_clonable_seq_counter_impl!(u8);
#[cfg(target_has_atomic = "16")]
sync_clonable_seq_counter_impl!(u16);
#[cfg(target_has_atomic = "32")]
sync_clonable_seq_counter_impl!(u32);
#[cfg(target_has_atomic = "64")]
sync_clonable_seq_counter_impl!(u64);
#[cfg(test)]
mod tests {
use core::sync::atomic::{AtomicU16, AtomicU32, AtomicU64, AtomicU8};
use crate::seq_count::{
SequenceCounter, SequenceCounterCcsdsSimple, SequenceCounterSimple,
SequenceCounterSyncCustomWrapU8,
};
use crate::MAX_SEQ_COUNT;
#[test]
fn test_u8_counter() {
let u8_counter = SequenceCounterSimple::<u8>::default();
assert_eq!(u8_counter.get(), 0);
assert_eq!(u8_counter.get_and_increment(), 0);
assert_eq!(u8_counter.get_and_increment(), 1);
assert_eq!(u8_counter.get(), 2);
}
#[test]
fn test_u8_counter_overflow() {
let u8_counter = SequenceCounterSimple::new_u8();
for _ in 0..256 {
u8_counter.increment();
}
assert_eq!(u8_counter.get(), 0);
}
#[test]
fn test_ccsds_counter() {
let ccsds_counter = SequenceCounterCcsdsSimple::default();
assert_eq!(ccsds_counter.get(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 0);
assert_eq!(ccsds_counter.get_and_increment(), 1);
assert_eq!(ccsds_counter.get(), 2);
}
#[test]
fn test_ccsds_counter_overflow() {
let ccsds_counter = SequenceCounterCcsdsSimple::default();
for _ in 0..MAX_SEQ_COUNT.value() + 1 {
ccsds_counter.increment();
}
assert_eq!(ccsds_counter.get(), 0);
}
fn common_counter_test(seq_counter: &mut impl SequenceCounter) {
assert_eq!(seq_counter.get().into(), 0);
assert_eq!(seq_counter.get_and_increment().into(), 0);
assert_eq!(seq_counter.get_and_increment().into(), 1);
assert_eq!(seq_counter.get().into(), 2);
seq_counter.increment_mut();
assert_eq!(seq_counter.get().into(), 3);
assert_eq!(seq_counter.get_and_increment_mut().into(), 3);
assert_eq!(seq_counter.get().into(), 4);
}
#[test]
fn test_atomic_counter_u8() {
let mut sync_u8_counter = AtomicU8::new(0);
common_counter_test(&mut sync_u8_counter);
}
#[test]
fn test_atomic_counter_u16() {
let mut sync_u16_counter = AtomicU16::new(0);
common_counter_test(&mut sync_u16_counter);
}
#[test]
fn test_atomic_counter_u32() {
let mut sync_u32_counter = AtomicU32::new(0);
common_counter_test(&mut sync_u32_counter);
}
#[test]
fn test_atomic_counter_u64() {
let mut sync_u64_counter = AtomicU64::new(0);
common_counter_test(&mut sync_u64_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_counter_u8() {
let mut sync_u8_counter = portable_atomic::AtomicU8::new(0);
common_counter_test(&mut sync_u8_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_counter_u16() {
let mut sync_u16_counter = portable_atomic::AtomicU16::new(0);
common_counter_test(&mut sync_u16_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_counter_u32() {
let mut sync_u32_counter = portable_atomic::AtomicU32::new(0);
common_counter_test(&mut sync_u32_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_counter_u64() {
let mut sync_u64_counter = portable_atomic::AtomicU64::new(0);
common_counter_test(&mut sync_u64_counter);
}
fn common_overflow_test_u8(seq_counter: &impl SequenceCounter) {
for _ in 0..u8::MAX as u16 + 1 {
seq_counter.increment();
}
assert_eq!(seq_counter.get().into(), 0);
}
#[test]
fn test_atomic_u8_counter_overflow() {
let sync_u8_counter = AtomicU8::new(0);
common_overflow_test_u8(&sync_u8_counter);
}
#[test]
#[cfg(feature = "portable-atomic")]
fn test_portable_atomic_u8_counter_overflow() {
let sync_u8_counter = portable_atomic::AtomicU8::new(0);
common_overflow_test_u8(&sync_u8_counter);
}
#[test]
fn test_atomic_ref_counters_overflow_custom_max_val() {
let sync_u8_counter = SequenceCounterSyncCustomWrapU8::new(128);
for _ in 0..129 {
sync_u8_counter.increment();
}
assert_eq!(sync_u8_counter.get(), 0);
}
}

View File

@@ -31,6 +31,7 @@ pub const FMT_STR_CODE_B_WITH_SIZE: (&str, usize) = ("%Y-%jT%T%.3f", 21);
/// Three digits are used for the decimal fraction and a terminator is added at the end.
pub const FMT_STR_CODE_B_TERMINATED_WITH_SIZE: (&str, usize) = ("%Y-%jT%T%.3fZ", 22);
/// Functions requiring both [chrono] and [alloc] support.
#[cfg(all(feature = "alloc", feature = "chrono"))]
pub mod alloc_mod_chrono {
use super::*;
@@ -71,7 +72,19 @@ mod tests {
use std::format;
#[test]
fn test_ascii_timestamp_a_unterminated() {
fn test_ascii_timestamp_a_unterminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_a(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
assert_eq!(stamp.len(), FMT_STR_CODE_A_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_a_unterminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_a(&date);
let stamp = format!("{}", stamp_formatter);
@@ -82,7 +95,24 @@ mod tests {
}
#[test]
fn test_ascii_timestamp_a_terminated() {
fn test_ascii_timestamp_a_terminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_a_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
FMT_STR_CODE_A_TERMINATED_WITH_SIZE.1 - 1
);
assert_eq!(stamp.len(), FMT_STR_CODE_A_TERMINATED_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_a_terminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_a_terminated(&date);
let stamp = format!("{}", stamp_formatter);
@@ -99,7 +129,19 @@ mod tests {
}
#[test]
fn test_ascii_timestamp_b_unterminated() {
fn test_ascii_timestamp_b_unterminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_b(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
assert_eq!(stamp.len(), FMT_STR_CODE_B_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_b_unterminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_b(&date);
let stamp = format!("{}", stamp_formatter);
@@ -110,7 +152,25 @@ mod tests {
}
#[test]
fn test_ascii_timestamp_b_terminated() {
fn test_ascii_timestamp_b_terminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_b_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
FMT_STR_CODE_B_TERMINATED_WITH_SIZE.1 - 1
);
assert_eq!(stamp.len(), FMT_STR_CODE_B_TERMINATED_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_b_terminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_b_terminated(&date);
let stamp = format!("{}", stamp_formatter);

File diff suppressed because it is too large Load Diff

View File

@@ -6,10 +6,9 @@
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use core::fmt::{Debug, Display, Formatter};
use core::fmt::Debug;
use core::ops::{Add, AddAssign};
use core::time::Duration;
use core::u64;
use crate::ByteConversionError;
@@ -21,8 +20,6 @@ use super::{
TimestampError, UnixTime,
};
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")]
use std::time::SystemTime;
#[cfg(feature = "chrono")]
@@ -35,6 +32,7 @@ pub const P_FIELD_BASE: u8 = (CcsdsTimeCode::CucCcsdsEpoch as u8) << 4;
/// Maximum length if the preamble field is not extended.
pub const MAX_CUC_LEN_SMALL_PREAMBLE: usize = 8;
/// Fractional resolution for the fractional part of the CUC time code.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
@@ -73,13 +71,14 @@ pub fn convert_fractional_part_to_ns(fractional_part: FractionalPart) -> u64 {
10_u64.pow(9) * fractional_part.counter as u64 / div as u64
}
/// Convert the fractional resolution to the divisor used to calculate the fractional part.
#[inline(always)]
pub const fn fractional_res_to_div(res: FractionalResolution) -> u32 {
// We do not use the full possible range for a given resolution. This is because if we did
// that, the largest value would be equal to the counter being incremented by one. Thus, the
// smallest allowed fractions value is 0 while the largest allowed fractions value is the
// closest fractions value to the next counter increment.
2_u32.pow(8 * res as u32) - 1
(1u32 << (8 * res as u32)) - 1
}
/// Calculate the fractional part for a given resolution and subsecond nanoseconds.
@@ -104,63 +103,36 @@ pub fn fractional_part_from_subsec_ns(res: FractionalResolution, ns: u64) -> Fra
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
/// CUC error.
#[derive(Copy, Clone, PartialEq, Eq, Debug, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum CucError {
/// Invalid CUC counter width.
#[error("invalid cuc counter byte width {0}")]
InvalidCounterWidth(u8),
/// Invalid counter supplied.
#[error("invalid cuc counter {counter} for width {width}")]
InvalidCounter {
/// Width.
width: u8,
/// Counter.
counter: u64,
},
/// Invalid fractions.
#[error("invalid cuc fractional part {value} for resolution {resolution:?}")]
InvalidFractions {
/// Resolution.
resolution: FractionalResolution,
/// Value.
value: u64,
},
/// Error while correcting for leap seconds.
#[error("error while correcting for leap seconds")]
LeapSecondCorrectionError,
DateBeforeCcsdsEpoch(DateBeforeCcsdsEpochError),
}
impl Display for CucError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
CucError::InvalidCounterWidth(w) => {
write!(f, "invalid cuc counter byte width {w}")
}
CucError::InvalidCounter { width, counter } => {
write!(f, "invalid cuc counter {counter} for width {width}")
}
CucError::InvalidFractions { resolution, value } => {
write!(
f,
"invalid cuc fractional part {value} for resolution {resolution:?}"
)
}
CucError::LeapSecondCorrectionError => {
write!(f, "error while correcting for leap seconds")
}
CucError::DateBeforeCcsdsEpoch(e) => {
write!(f, "date before ccsds epoch: {e}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for CucError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
CucError::DateBeforeCcsdsEpoch(e) => Some(e),
_ => None,
}
}
}
impl From<DateBeforeCcsdsEpochError> for CucError {
fn from(e: DateBeforeCcsdsEpochError) -> Self {
Self::DateBeforeCcsdsEpoch(e)
}
/// Data is before the CCSDS epoch.
#[error("date before ccsds epoch: {0}")]
DateBeforeCcsdsEpoch(#[from] DateBeforeCcsdsEpochError),
}
/// Tuple object where the first value is the width of the counter and the second value
@@ -169,14 +141,20 @@ impl From<DateBeforeCcsdsEpochError> for CucError {
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct WidthCounterPair(pub u8, pub u32);
/// Fractional part of the CUC time code.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FractionalPart {
/// Resolution.
pub resolution: FractionalResolution,
/// Counter.
pub counter: u32,
}
impl FractionalPart {
/// Generic constructor.
///
/// This function will panic if the counter is smaller than the calculated divisor.
#[inline]
pub const fn new(resolution: FractionalResolution, counter: u32) -> Self {
let div = fractional_res_to_div(resolution);
@@ -199,6 +177,7 @@ impl FractionalPart {
Self::new_with_seconds_resolution()
}
/// Check constructor which verifies that the counter is larger than the divisor.
#[inline]
pub fn new_checked(resolution: FractionalResolution, counter: u32) -> Option<Self> {
let div = fractional_res_to_div(resolution);
@@ -211,16 +190,19 @@ impl FractionalPart {
})
}
/// Fractional resolution.
#[inline]
pub fn resolution(&self) -> FractionalResolution {
self.resolution
}
/// Counter value.
#[inline]
pub fn counter(&self) -> u32 {
self.counter
}
/// Check whether the timestamp does not have a fractional part.
#[inline]
pub fn no_fractional_part(&self) -> bool {
self.resolution == FractionalResolution::Seconds
@@ -287,17 +269,21 @@ pub struct CucTime {
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct CucTimeWithLeapSecs {
/// CUC time.
pub time: CucTime,
/// Leap seconds.
pub leap_seconds: u32,
}
impl CucTimeWithLeapSecs {
/// Generic constructor.
#[inline]
pub fn new(time: CucTime, leap_seconds: u32) -> Self {
Self { time, leap_seconds }
}
}
/// p-field length.
#[inline]
pub fn pfield_len(pfield: u8) -> usize {
if ((pfield >> 7) & 0b1) == 1 {
@@ -423,6 +409,7 @@ impl CucTime {
Ok(())
}
/// Creates a CUC timestamp from a Chrono DateTime object.
#[cfg(feature = "chrono")]
pub fn from_chrono_date_time(
dt: &chrono::DateTime<chrono::Utc>,
@@ -490,21 +477,25 @@ impl CucTime {
})
}
/// CCSDS time code.
#[inline]
pub fn ccsds_time_code(&self) -> CcsdsTimeCode {
CcsdsTimeCode::CucCcsdsEpoch
}
/// Width and counter pair.
#[inline]
pub fn width_counter_pair(&self) -> WidthCounterPair {
self.counter
}
/// Counter width.
#[inline]
pub fn counter_width(&self) -> u8 {
self.counter.0
}
/// Counter value.
#[inline]
pub fn counter(&self) -> u32 {
self.counter.1
@@ -516,11 +507,13 @@ impl CucTime {
self.fractions
}
/// Convert to the leap seconds helper.
#[inline]
pub fn to_leap_sec_helper(&self, leap_seconds: u32) -> CucTimeWithLeapSecs {
CucTimeWithLeapSecs::new(*self, leap_seconds)
}
/// Set the fractional part.
#[inline]
pub fn set_fractions(&mut self, fractions: FractionalPart) -> Result<(), CucError> {
Self::verify_fractions_value(fractions)?;
@@ -567,16 +560,19 @@ impl CucTime {
self.pfield |= self.fractions.resolution() as u8;
}
/// Length of the counter from the p-field.
#[inline]
pub fn len_cntr_from_pfield(pfield: u8) -> u8 {
((pfield >> 2) & 0b11) + 1
}
/// Length of the fractional part from the p-field.
#[inline]
pub fn len_fractions_from_pfield(pfield: u8) -> u8 {
pfield & 0b11
}
/// UNIX seconds.
#[inline]
pub fn unix_secs(&self, leap_seconds: u32) -> i64 {
ccsds_epoch_to_unix_epoch(self.counter.1 as i64)
@@ -584,6 +580,7 @@ impl CucTime {
.unwrap()
}
/// Subsecond milliseconds part of the CUC time.
#[inline]
pub fn subsec_millis(&self) -> u16 {
(self.subsec_nanos() / 1_000_000) as u16
@@ -606,6 +603,7 @@ impl CucTime {
)
}
/// Packed length from the raw p-field.
#[inline]
pub fn len_packed_from_pfield(pfield: u8) -> usize {
let mut base_len: usize = 1;
@@ -947,6 +945,7 @@ mod tests {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_datetime_now() {
let now = chrono::Utc::now();
let cuc_now = CucTime::now(FractionalResolution::SixtyNs, LEAP_SECONDS);
@@ -1278,6 +1277,7 @@ mod tests {
}
#[test]
#[cfg_attr(miri, ignore)]
fn set_fract_resolution() {
let mut stamp = CucTime::new(2000);
stamp.set_fractional_resolution(FractionalResolution::SixtyNs);

View File

@@ -3,10 +3,8 @@ use crate::ByteConversionError;
#[cfg(feature = "chrono")]
use chrono::{TimeZone, Utc};
use core::cmp::Ordering;
use core::fmt::{Display, Formatter};
use core::ops::{Add, AddAssign, Sub};
use core::time::Duration;
use core::u8;
#[allow(unused_imports)]
#[cfg(not(feature = "std"))]
@@ -15,8 +13,6 @@ use num_traits::float::FloatCore;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")]
use std::time::{SystemTime, SystemTimeError};
#[cfg(feature = "std")]
@@ -26,19 +22,29 @@ pub mod ascii;
pub mod cds;
pub mod cuc;
/// Conversion constant for converting CCSDS days to UNIX days.
pub const DAYS_CCSDS_TO_UNIX: i32 = -4383;
/// Seconds per day.
pub const SECONDS_PER_DAY: u32 = 86400;
/// Milliseconds per day.
pub const MS_PER_DAY: u32 = SECONDS_PER_DAY * 1000;
/// Nanoseconds per second.
pub const NANOS_PER_SECOND: u32 = 1_000_000_000;
/// CCSDS time code identifiers.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum CcsdsTimeCode {
/// CUC with a CCSDS epoch (1958-01-01T00:00:00+00:00).
CucCcsdsEpoch = 0b001,
/// CUC with a custom agency epoch.
CucAgencyEpoch = 0b010,
/// CDS time code.
Cds = 0b100,
/// CCS time code.
Ccs = 0b101,
/// Agency defined time code.
AgencyDefined = 0b110,
}
@@ -64,96 +70,61 @@ pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCode, u8> {
CcsdsTimeCode::try_from(raw_bits).map_err(|_| raw_bits)
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
/// Date is before the CCSDS epoch.
#[derive(Debug, PartialEq, Eq, Copy, Clone, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[error("date before ccsds epoch: {0:?}")]
pub struct DateBeforeCcsdsEpochError(UnixTime);
impl Display for DateBeforeCcsdsEpochError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
write!(f, "date before ccsds epoch: {:?}", self.0)
}
}
#[cfg(feature = "std")]
impl Error for DateBeforeCcsdsEpochError {}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
/// Generic timestamp error.
#[derive(Debug, PartialEq, Eq, Copy, Clone, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum TimestampError {
InvalidTimeCode { expected: CcsdsTimeCode, found: u8 },
ByteConversion(ByteConversionError),
Cds(cds::CdsError),
Cuc(cuc::CucError),
/// Invalid time code.
#[error("invalid time code, expected {expected:?}, found {found}")]
InvalidTimeCode {
/// Expected time code.
expected: CcsdsTimeCode,
/// Found raw time code.
found: u8,
},
/// Byte conversion error.
#[error("time stamp: byte conversion error: {0}")]
ByteConversion(#[from] ByteConversionError),
/// CDS timestamp error.
#[error("CDS error: {0}")]
Cds(#[from] cds::CdsError),
/// CUC timestamp error.
#[error("CUC error: {0}")]
Cuc(#[from] cuc::CucError),
/// Custom epoch is not supported.
#[error("custom epoch not supported")]
CustomEpochNotSupported,
}
impl Display for TimestampError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TimestampError::InvalidTimeCode { expected, found } => {
write!(
f,
"invalid raw time code value {found} for time code {expected:?}"
)
}
TimestampError::Cds(e) => {
write!(f, "cds error: {e}")
}
TimestampError::Cuc(e) => {
write!(f, "cuc error: {e}")
}
TimestampError::ByteConversion(e) => {
write!(f, "time stamp: {e}")
}
TimestampError::CustomEpochNotSupported => {
write!(f, "custom epochs are not supported")
}
}
}
}
#[cfg(feature = "std")]
impl Error for TimestampError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TimestampError::ByteConversion(e) => Some(e),
TimestampError::Cds(e) => Some(e),
TimestampError::Cuc(e) => Some(e),
_ => None,
}
}
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::Cds(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::Cuc(e)
}
}
/// [std] module.
#[cfg(feature = "std")]
pub mod std_mod {
use crate::time::TimestampError;
use std::time::SystemTimeError;
use thiserror::Error;
/// [std] timestamp error.
#[derive(Debug, Clone, Error)]
pub enum StdTimestampError {
/// System time error.
#[error("system time error: {0:?}")]
SystemTime(#[from] SystemTimeError),
/// Generic timestamp error.
#[error("timestamp error: {0}")]
Timestamp(#[from] TimestampError),
}
}
/// Seconds since epoch for the current system time.
#[cfg(feature = "std")]
pub fn seconds_since_epoch() -> f64 {
SystemTime::now()
@@ -187,16 +158,19 @@ pub const fn unix_epoch_to_ccsds_epoch(unix_epoch: i64) -> i64 {
unix_epoch - (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
}
/// Convert CCSDS epoch to UNIX epoch.
#[inline]
pub const fn ccsds_epoch_to_unix_epoch(ccsds_epoch: i64) -> i64 {
ccsds_epoch + (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
}
/// Milliseconds of day for the current system time.
#[cfg(feature = "std")]
pub fn ms_of_day_using_sysclock() -> u32 {
ms_of_day(seconds_since_epoch())
}
/// Milliseconds for the given seconds since epoch.
pub fn ms_of_day(seconds_since_epoch: f64) -> u32 {
let fraction_ms = seconds_since_epoch - seconds_since_epoch.floor();
let ms_of_day: u32 = (((seconds_since_epoch.floor() as u32 % SECONDS_PER_DAY) * 1000) as f64
@@ -205,13 +179,16 @@ pub fn ms_of_day(seconds_since_epoch: f64) -> u32 {
ms_of_day
}
/// Generic writable timestamp trait.
pub trait TimeWriter {
/// Written length.
fn len_written(&self) -> usize;
/// Generic function to convert write a timestamp into a raw buffer.
/// Returns the number of written bytes on success.
fn write_to_bytes(&self, bytes: &mut [u8]) -> Result<usize, TimestampError>;
/// Convert to a owned [alloc::vec::Vec].
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<alloc::vec::Vec<u8>, TimestampError> {
let mut vec = alloc::vec![0; self.len_written()];
@@ -220,7 +197,9 @@ pub trait TimeWriter {
}
}
/// Genmeric readable timestamp trait.
pub trait TimeReader: Sized {
/// Create a timestamp from a raw byte buffer.
fn from_bytes(buf: &[u8]) -> Result<Self, TimestampError>;
}
@@ -230,6 +209,7 @@ pub trait TimeReader: Sized {
/// practical because they are a very common and simple exchange format for time information.
/// Therefore, it was decided to keep them in this trait as well.
pub trait CcsdsTimeProvider {
/// Length when written to bytes.
fn len_as_bytes(&self) -> usize;
/// Returns the pfield of the time provider. The pfield can have one or two bytes depending
@@ -237,24 +217,37 @@ pub trait CcsdsTimeProvider {
/// entry denotes the length of the pfield and the second entry is the value of the pfield
/// in big endian format.
fn p_field(&self) -> (usize, [u8; 2]);
/// CCSDS time code field.
fn ccdsd_time_code(&self) -> CcsdsTimeCode;
fn unix_secs(&self) -> i64;
fn subsec_nanos(&self) -> u32;
/// UNIX time as seconds.
fn unix_secs(&self) -> i64 {
self.unix_time().secs
}
/// Subsecond nanoseconds.
fn subsec_nanos(&self) -> u32 {
self.unix_time().subsec_nanos
}
/// Subsecond milliseconds.
fn subsec_millis(&self) -> u16 {
(self.subsec_nanos() / 1_000_000) as u16
}
/// UNIX time.
fn unix_time(&self) -> UnixTime {
UnixTime::new(self.unix_secs(), self.subsec_nanos())
}
/// [chrono] date time.
#[cfg(feature = "chrono")]
fn chrono_date_time(&self) -> chrono::LocalResult<chrono::DateTime<chrono::Utc>> {
chrono::Utc.timestamp_opt(self.unix_secs(), self.subsec_nanos())
}
/// [time] library date] library date time.
#[cfg(feature = "timelib")]
fn timelib_date_time(&self) -> Result<time::OffsetDateTime, time::error::ComponentRange> {
Ok(time::OffsetDateTime::from_unix_timestamp(self.unix_secs())?
@@ -336,6 +329,7 @@ impl UnixTime {
}
}
/// New UNIX time with only seconds, subseconds set to zero.
pub fn new_only_secs(unix_seconds: i64) -> Self {
Self {
secs: unix_seconds,
@@ -343,15 +337,18 @@ impl UnixTime {
}
}
/// Sub-second milliseconds.
#[inline]
pub fn subsec_millis(&self) -> u16 {
(self.subsec_nanos / 1_000_000) as u16
}
/// Sub-second nanoseconds.
pub fn subsec_nanos(&self) -> u32 {
self.subsec_nanos
}
/// Create a UNIX timestamp from the current system time.
#[cfg(feature = "std")]
pub fn now() -> Result<Self, SystemTimeError> {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
@@ -359,27 +356,31 @@ impl UnixTime {
Ok(Self::new(epoch as i64, now.subsec_nanos()))
}
/// UNIX timestamp as a floating point number in seconds.
#[inline]
pub fn unix_secs_f64(&self) -> f64 {
self.secs as f64 + (self.subsec_nanos as f64 / 1_000_000_000.0)
}
/// UNIX timestamp as seconds, discards the sub-second part.
pub fn as_secs(&self) -> i64 {
self.secs
}
/// UNIX timestamp as [chrono] date time.
#[cfg(feature = "chrono")]
pub fn chrono_date_time(&self) -> chrono::LocalResult<chrono::DateTime<chrono::Utc>> {
Utc.timestamp_opt(self.secs, self.subsec_nanos)
}
/// UNIX timestamp as [time] library date time.
#[cfg(feature = "timelib")]
pub fn timelib_date_time(&self) -> Result<time::OffsetDateTime, time::error::ComponentRange> {
Ok(time::OffsetDateTime::from_unix_timestamp(self.as_secs())?
+ time::Duration::nanoseconds(self.subsec_nanos().into()))
}
// Calculate the difference in milliseconds between two UnixTimestamps
/// Calculate the difference in milliseconds between two UnixTimestamps
pub fn diff_in_millis(&self, other: &UnixTime) -> Option<i64> {
let seconds_difference = self.secs.checked_sub(other.secs)?;
// Convert seconds difference to milliseconds
@@ -449,7 +450,9 @@ impl Ord for UnixTime {
/// so the sign information is supplied separately.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct StampDiff {
/// Positive duration flag.
pub positive_duration: bool,
/// Absolute duration.
pub duration_absolute: Duration,
}
@@ -551,6 +554,7 @@ mod tests {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_get_current_time() {
let sec_floats = seconds_since_epoch();
assert!(sec_floats > 0.0);
@@ -565,6 +569,7 @@ mod tests {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ccsds_epoch() {
let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
@@ -685,6 +690,7 @@ mod tests {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_from_now() {
let stamp_now = UnixTime::now().unwrap();
let dt_now = stamp_now.chrono_date_time().unwrap();
@@ -741,7 +747,7 @@ mod tests {
fn test_cuc_error_printout() {
let cuc_error = CucError::InvalidCounterWidth(12);
let stamp_error = TimestampError::from(cuc_error);
assert_eq!(stamp_error.to_string(), format!("cuc error: {cuc_error}"));
assert_eq!(stamp_error.to_string(), format!("CUC error: {cuc_error}"));
}
#[test]

1005
src/uslp/mod.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,16 @@
//! # Utility module.
use crate::ByteConversionError;
use core::fmt::{Debug, Display, Formatter};
use core::fmt::Debug;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
/// Helper traits for types which can be converted to a byte array.
pub trait ToBeBytes {
/// Concrete byte array type.
type ByteArray: AsRef<[u8]>;
/// Length when written to big endian bytes.
fn written_len(&self) -> usize;
/// Convert to big endian byte array.
fn to_be_bytes(&self) -> Self::ByteArray;
}
@@ -82,14 +84,17 @@ impl ToBeBytes for u64 {
}
}
/// Helper trait for unsigned enumerations.
pub trait UnsignedEnum {
/// Size of the unsigned enumeration in bytes.
fn size(&self) -> usize;
/// Write the unsigned enumeration to a raw buffer. Returns the written size on success.
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value(&self) -> u64;
/// Type-erased raw value.
fn value_raw(&self) -> u64;
/// Convert to a [alloc::vec::Vec].
#[cfg(feature = "alloc")]
fn to_vec(&self) -> alloc::vec::Vec<u8> {
let mut buf = alloc::vec![0; self.size()];
@@ -98,51 +103,36 @@ pub trait UnsignedEnum {
}
}
/// Extension trait for unsigned enumerations.
pub trait UnsignedEnumExt: UnsignedEnum + Debug + Copy + Clone + PartialEq + Eq {}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
/// Unsigned byte field errors.
#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum UnsignedByteFieldError {
/// Value is too large for specified width of byte field.
#[error("value {value} too large for width {width}")]
ValueTooLargeForWidth {
/// Width in bytes.
width: usize,
/// Value.
value: u64,
},
/// Only 1, 2, 4 and 8 are allow width values. Optionally contains the expected width if
/// applicable, for example for conversions.
#[error("invalid width {found}, expected {expected:?}")]
InvalidWidth {
/// Found width.
found: usize,
/// Expected width.
expected: Option<usize>,
},
ByteConversionError(ByteConversionError),
/// Error during byte conversion.
#[error("byte conversion error: {0}")]
ByteConversionError(#[from] ByteConversionError),
}
impl From<ByteConversionError> for UnsignedByteFieldError {
#[inline]
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl Display for UnsignedByteFieldError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {e}")
}
Self::InvalidWidth { found, .. } => {
write!(f, "invalid width {found}, only 1, 2, 4 and 8 are allowed.")
}
Self::ValueTooLargeForWidth { width, value } => {
write!(f, "value {value} too large for width {width}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for UnsignedByteFieldError {}
/// Type erased variant.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
@@ -153,16 +143,19 @@ pub struct UnsignedByteField {
}
impl UnsignedByteField {
/// Generic constructor.
#[inline]
pub const fn new(width: usize, value: u64) -> Self {
Self { width, value }
}
/// Type-erased raw value.
#[inline]
pub const fn value_const(&self) -> u64 {
pub const fn value(&self) -> u64 {
self.value
}
/// Construct from raw bytes, assuming big-endian byte order.
#[inline]
pub fn new_from_be_bytes(width: usize, buf: &[u8]) -> Result<Self, UnsignedByteFieldError> {
if width > buf.len() {
@@ -202,8 +195,8 @@ impl UnsignedEnum for UnsignedByteField {
}
#[inline]
fn value(&self) -> u64 {
self.value_const()
fn value_raw(&self) -> u64 {
self.value()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
@@ -239,6 +232,7 @@ impl UnsignedEnum for UnsignedByteField {
}
}
/// Generic type erased unsigned byte field.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
@@ -247,11 +241,13 @@ pub struct GenericUnsignedByteField<TYPE: Copy + Into<u64>> {
}
impl<TYPE: Copy + Into<u64>> GenericUnsignedByteField<TYPE> {
/// Generic constructor.
pub const fn new(val: TYPE) -> Self {
Self { value: val }
}
pub const fn value_typed(&self) -> TYPE {
/// Raw value.
pub const fn value(&self) -> TYPE {
self.value
}
}
@@ -274,20 +270,29 @@ impl<TYPE: Copy + ToBeBytes + Into<u64>> UnsignedEnum for GenericUnsignedByteFie
}
#[inline]
fn value(&self) -> u64 {
self.value_typed().into()
fn value_raw(&self) -> u64 {
self.value().into()
}
}
/// Alias for [GenericUnsignedByteField] with [()] generic.
pub type UnsignedByteFieldEmpty = GenericUnsignedByteField<()>;
/// Alias for [GenericUnsignedByteField] with [u8] generic.
pub type UnsignedByteFieldU8 = GenericUnsignedByteField<u8>;
/// Alias for [GenericUnsignedByteField] with [u16] generic.
pub type UnsignedByteFieldU16 = GenericUnsignedByteField<u16>;
/// Alias for [GenericUnsignedByteField] with [u32] generic.
pub type UnsignedByteFieldU32 = GenericUnsignedByteField<u32>;
/// Alias for [GenericUnsignedByteField] with [u64] generic.
pub type UnsignedByteFieldU64 = GenericUnsignedByteField<u64>;
/// Alias for [UnsignedByteFieldU8]
pub type UbfU8 = UnsignedByteFieldU8;
/// Alias for [UnsignedByteFieldU16]
pub type UbfU16 = UnsignedByteFieldU16;
/// Alias for [UnsignedByteFieldU32]
pub type UbfU32 = UnsignedByteFieldU32;
/// Alias for [UnsignedByteFieldU64]
pub type UbfU64 = UnsignedByteFieldU64;
impl From<UnsignedByteFieldU8> for UnsignedByteField {
@@ -378,7 +383,7 @@ impl TryFrom<UnsignedByteField> for UnsignedByteFieldU64 {
}
#[cfg(test)]
pub mod tests {
mod tests {
use crate::util::{
UnsignedByteField, UnsignedByteFieldError, UnsignedByteFieldU16, UnsignedByteFieldU32,
UnsignedByteFieldU64, UnsignedByteFieldU8, UnsignedEnum,
@@ -399,7 +404,7 @@ pub mod tests {
for val in buf.iter().skip(1) {
assert_eq!(*val, 0);
}
assert_eq!(u8.value_typed(), 5);
assert_eq!(u8.value_raw(), 5);
assert_eq!(u8.value(), 5);
}
@@ -417,7 +422,7 @@ pub mod tests {
for val in buf.iter().skip(2) {
assert_eq!(*val, 0);
}
assert_eq!(u16.value_typed(), 3823);
assert_eq!(u16.value_raw(), 3823);
assert_eq!(u16.value(), 3823);
}
@@ -435,7 +440,7 @@ pub mod tests {
(4..8).for_each(|i| {
assert_eq!(buf[i], 0);
});
assert_eq!(u32.value_typed(), 80932);
assert_eq!(u32.value_raw(), 80932);
assert_eq!(u32.value(), 80932);
}
@@ -450,7 +455,7 @@ pub mod tests {
assert_eq!(len, 8);
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999);
assert_eq!(u64.value_typed(), 5999999);
assert_eq!(u64.value_raw(), 5999999);
assert_eq!(u64.value(), 5999999);
}