Compare commits

...

395 Commits
v0.5.4 ... main

Author SHA1 Message Date
Robin Müller b8be9ae641 Merge pull request 'Fixes for Miri' (#95) from fixes-for-miri into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #95
2024-05-15 13:03:24 +02:00
Robin Müller c2506dbba9 Merge branch 'main' into fixes-for-miri
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-05-14 19:25:07 +02:00
Robin Müller b842b9d11a Merge pull request 'remove defmt::Format impl for MetadataPduCreator' (#94) from fix-defmt-derives into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #94
2024-05-14 19:24:57 +02:00
Robin Müller 374c034e92 add miri chapter in README
Rust/spacepackets/pipeline/head This commit looks good Details
2024-05-14 15:37:20 +02:00
Robin Müller 791c7f6e02 it is now possible to run cargo miri 2024-05-14 15:34:40 +02:00
Robin Müller 8001938507 remove defmt::Format impl for MetadataPduCreator
Rust/spacepackets/pipeline/head This commit looks good Details
2024-05-14 15:01:26 +02:00
Robin Müller 73ab7ff148 Merge pull request 'add doctests to github CI' (#93) from github-ci-doctest into main
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Reviewed-on: #93
2024-05-02 14:56:13 +02:00
Robin Müller c59d01174f
add doctests to github CI
Rust/spacepackets/pipeline/head Build queued... Details
2024-05-02 14:48:31 +02:00
Robin Müller eb49bff0c9 Merge pull request 'update github CI' (#92) from update-github-ci into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #92
2024-05-02 14:29:53 +02:00
Robin Müller af392d40d0
this might work
Rust/spacepackets/pipeline/head Build queued... Details
Rust/spacepackets/pipeline/pr-main Build queued... Details
2024-05-02 14:22:03 +02:00
Robin Müller b78bfe2114
some fixes
Rust/spacepackets/pipeline/head Build queued... Details
Rust/spacepackets/pipeline/pr-main Build queued... Details
2024-05-02 14:16:20 +02:00
Robin Müller 69a3b1d8f3
update github CI
Rust/spacepackets/pipeline/pr-main Build queued... Details
Rust/spacepackets/pipeline/head Build started... Details
2024-05-02 14:12:26 +02:00
Robin Müller e7b3ba9575 Merge pull request 'date correction' (#91) from date-correction into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #91
2024-04-22 10:19:19 +02:00
Robin Müller c515535ccd
date correction
Rust/spacepackets/pipeline/head Build queued... Details
2024-04-22 10:18:35 +02:00
Robin Müller 95158a8cd2 Merge pull request 'prepare next patch version' (#90) from small-improvements-and-fixes into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #90
2024-04-22 10:15:21 +02:00
Robin Müller 8b1ccb0cd0
prepare next patch version 2024-04-20 10:42:36 +02:00
Robin Müller 619b22e58f Merge pull request 'prepare v0.11.0' (#89) from prep_v0.11.0 into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #89
2024-04-16 19:23:17 +02:00
Robin Müller 55222d92b3
small typo fix
Rust/spacepackets/pipeline/head Build started... Details
2024-04-16 19:17:17 +02:00
Robin Müller 8e1934e604
prepare release v0.11.0
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-16 19:15:04 +02:00
Robin Müller 5f37978c56 Merge pull request 'added small defmt test' (#88) from added-small-defmt-test into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #88
2024-04-16 15:34:41 +02:00
Robin Müller 97bbb14168 Merge branch 'main' into added-small-defmt-test 2024-04-16 15:34:34 +02:00
Robin Müller a65a98f43f Merge pull request 'clippy and msrv fix' (#87) from ci-github-fixes into main
Reviewed-on: #87
2024-04-16 15:34:27 +02:00
Robin Müller e1a200e65b Merge branch 'main' into added-small-defmt-test
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-04-16 15:14:23 +02:00
Robin Müller b55c7db3fc clippy and msrv fix
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-04-16 15:13:43 +02:00
Robin Müller 944bcf1320 Merge pull request 'bump MSRV' (#86) from bump-msrv into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #86
2024-04-13 18:48:45 +02:00
Robin Müller 8972dcbfc0
bump MSRV
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-13 17:39:53 +02:00
Robin Müller 04b671fa6f Merge pull request 'moved CCSDS constant' (#85) from move-ccsds-constant into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #85
2024-04-13 17:19:15 +02:00
Robin Müller 533afc33fa
moved CCSDS constant
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-13 12:10:32 +02:00
Robin Müller 50c56f6504
added small defmt test
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-04 16:35:22 +02:00
Robin Müller 9e02e00d1a Merge pull request 'prepare next release candidate' (#84) from prep_v0.11.0-rc.2 into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #84
2024-04-04 14:21:40 +02:00
Robin Müller d8676ae711
prepare next release candidate
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-04 14:12:33 +02:00
Robin Müller 9711159969 Merge pull request 'use cargo nextest in CI for testing' (#83) from use-nextest-as-test-runner-ci into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #83
2024-04-04 13:20:27 +02:00
Robin Müller 57adb619b3
use cargo nextest in CI for testing
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-04 13:13:43 +02:00
Robin Müller fe52657d11 Merge pull request 'ECSS Ctors: Expect SP header by copy' (#82) from ecss-take-sp-header-by-copy into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #82
2024-04-04 12:20:45 +02:00
Robin Müller 50b86939a1
this API is a bit more ergonomic
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-04 12:07:37 +02:00
Robin Müller 179984f258 Merge pull request 'More smaller tweaks' (#81) from more-smaller-tweaks into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #81
2024-04-04 11:58:48 +02:00
Robin Müller deb89362a4 More smaller tweaks
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-04-04 11:47:39 +02:00
Robin Müller 4cd40f37ce Merge pull request 'added additional ctors which only set the APID' (#80) from addition-sp-header-ctors into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #80
2024-04-03 22:59:19 +02:00
Robin Müller bbd66a6a8b
added a lot of inline attrs
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-04-03 21:56:26 +02:00
Robin Müller 0115461bb5
added additional ctors which only set the APID
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-03 21:30:23 +02:00
Robin Müller ca90393d95 Merge pull request 'unify CCSDS API as well' (#79) from unify-ccsds-api into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #79
2024-04-03 19:45:14 +02:00
Robin Müller 325e7d6ff3 unify CCSDS API as well
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-04-03 18:47:00 +02:00
Robin Müller 228f198006 Merge pull request 'prepare next release candidate' (#78) from prep_v0.11.0-rc.1 into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #78
2024-04-03 15:07:06 +02:00
Robin Müller 54f065ed74
small fix
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-04-03 14:18:12 +02:00
Robin Müller 4ef65279ea
doc update
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-04-03 14:16:42 +02:00
Robin Müller f0af16dc29
prepare next release candidate
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-03 14:13:04 +02:00
Robin Müller d05a1077e8 Merge pull request 'consistent ECSS object constructors' (#77) from consistent-ecss-ctors into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #77
2024-04-03 14:09:36 +02:00
Robin Müller fc684a42a8
consistent ECSS object constructors
Rust/spacepackets/pipeline/head This commit looks good Details
2024-04-03 13:30:01 +02:00
Robin Müller e9ddc316c8 Merge pull request 'update ECSS code' (#75) from update-ecss-code into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #75
2024-03-29 14:22:43 +01:00
Robin Müller 4da417dfd2
cargo fmt
Rust/spacepackets/pipeline/pr-main Build started... Details
Rust/spacepackets/pipeline/head This commit looks good Details
2024-03-29 14:13:44 +01:00
Robin Müller cabb3a19ef
Update ECSS code
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2024-03-29 14:06:52 +01:00
Robin Müller 5eef376351 Merge pull request 'Start adding defmt support' (#76) from start-adding-defmt-support into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #76
2024-03-29 14:06:04 +01:00
Robin Müller 538548b05e Merge branch 'main' into start-adding-defmt-support
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-03-29 13:50:01 +01:00
Robin Müller caaecdff0c Merge pull request 'Some more API adaptions' (#74) from api-name-updates into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #74
2024-03-29 13:44:03 +01:00
Robin Müller 3045a27d8c
just add support for everything
Rust/spacepackets/pipeline/head This commit looks good Details
2024-03-29 13:42:02 +01:00
Robin Müller c7cf83d468
some defmt support would be good
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2024-03-28 22:48:58 +01:00
Robin Müller ef37a84edc
Make API more inline with other time API out there
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-03-25 16:08:30 +01:00
Robin Müller c1b32bca21 Merge pull request 'More granular error handling' (#73) from more-granular-error-handling into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #73
2024-03-25 14:18:37 +01:00
Robin Müller d9525674c3
doc fixes
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2024-03-25 14:05:04 +01:00
Robin Müller 8b151d942d
Merge remote-tracking branch 'origin/main' into more-granular-error-handling
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-03-25 13:43:19 +01:00
Robin Müller 85a8eb3f4a
more granular error handling
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head This commit looks good Details
2024-03-25 13:42:18 +01:00
Robin Müller fb71185b4a Merge pull request 'Introduce automatic doc feature configuration' (#72) from update-docs into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #72
2024-03-25 10:55:34 +01:00
Robin Müller 3e62d7d411
introduce doc_auto_cfg
Rust/spacepackets/pipeline/head This commit looks good Details
2024-03-24 12:22:08 +01:00
Robin Müller 3faffd52fc Merge pull request 'Update Time API' (#71) from update-time-api into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #71
2024-03-18 15:57:17 +01:00
Robin Müller 7476fc8096
some more fixes and cleaning up
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-03-18 15:23:26 +01:00
Robin Müller 59c7ece126
Major refactoring of the time API
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-03-18 15:14:40 +01:00
Robin Müller 6f5254bdbd Merge pull request 'More useful conversions' (#68) from missing-ecss-enum-conversion into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #68
2024-03-11 14:57:51 +01:00
Robin Müller bd1927c5c2
use a more generic blanket impl
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-03-11 14:35:09 +01:00
Robin Müller 77862868d5
these conversions are also useful
Rust/spacepackets/pipeline/head This commit looks good Details
2024-03-11 14:28:18 +01:00
Robin Müller ea05a547ac Merge pull request 'add missing doc_cfg attr' (#67) from missing-doc-cfg-attr into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #67
2024-03-04 12:58:28 +01:00
Robin Müller 0ab69b3ddc
add missing doc_cfg attr
Rust/spacepackets/pipeline/head Build started... Details
2024-03-04 12:55:16 +01:00
Robin Müller 240f0bc267 Merge pull request 'CHANGELOG' (#66) from merge-conflict into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #66
2024-03-04 12:52:49 +01:00
Robin Müller 00744a22fc
Merge branch 'main' of egit.irs.uni-stuttgart.de:rust/spacepackets
Rust/spacepackets/pipeline/head This commit looks good Details
2024-03-04 12:26:29 +01:00
Robin Müller c5aeeec19f
prepare rc.0 2024-03-04 12:25:47 +01:00
Robin Müller d13cd28962 Merge pull request 'add from impls' (#65) from ecss-enum-from-impls into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #65
2024-03-04 12:20:01 +01:00
Robin Müller 5641d9007e
Merge remote-tracking branch 'origin/main' into ecss-enum-from-impls
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-03-01 17:55:16 +01:00
Robin Müller f39ea2f793 Merge pull request 'improve the time API' (#64) from improve-time-api into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #64
2024-03-01 17:54:31 +01:00
Robin Müller e4730d4b8f
changelog
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2024-03-01 17:54:02 +01:00
Robin Müller 64ea7e609d
better naming
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-03-01 17:52:51 +01:00
Robin Müller ebaa6210a4
add from impls
Rust/spacepackets/pipeline/head This commit looks good Details
2024-03-01 17:51:16 +01:00
Robin Müller d14f532f62 improve the time API
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-02-27 15:59:04 +01:00
Robin Müller 6ea18d3715 Merge pull request 'added missing doc_cfg attribute' (#63) from add-missing-doc-cfg-attr into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #63
2024-02-19 20:12:15 +01:00
Robin Müller 6056342334
added missing doc_cfg attribute
Rust/spacepackets/pipeline/head This commit looks good Details
2024-02-17 21:01:11 +01:00
Robin Müller 4e6dcc5afa Merge pull request 'UnsignedEnum trait extensions' (#62) from unsigned-enum-vec-ext into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #62
2024-02-17 13:41:51 +01:00
Robin Müller 200593bfb4
added tests for vec converters
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-02-17 13:27:28 +01:00
Robin Müller 60bf876dd3
Extensions for UnsignedEnum trait
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2024-02-17 13:24:25 +01:00
Robin Müller f47604346e Merge pull request 'update PusTmZeroCopyWriter' (#61) from update-pus-tm-zero-copy-writer into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #61
2024-02-07 11:28:55 +01:00
Robin Müller 0d0d7a256a
update PusTmZeroCopyWriter
Rust/spacepackets/pipeline/head This commit looks good Details
2024-02-07 11:07:20 +01:00
Robin Müller 2fd5860e18 Merge pull request 'v0.8.1' (#60) from prep-v0.8.1 into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #60
2024-02-05 15:32:09 +01:00
Robin Müller 7e8b71db6d
prep patch release
Rust/spacepackets/pipeline/head This commit looks good Details
2024-02-05 15:29:18 +01:00
Robin Müller c3cc6d5c73 Merge pull request 'extended time writer trait' (#58) from extend-time-writer-trait into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #58
2024-02-05 15:07:14 +01:00
Robin Müller d01309cccf
extended time writer trait
Rust/spacepackets/pipeline/head This commit looks good Details
2024-02-05 15:04:29 +01:00
Robin Müller 92403738ca Merge pull request 'v0.7.0' (#57) from prep_v0.7.0 into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #57
2024-02-01 17:54:35 +01:00
Robin Müller 3353475261
prep next release
Rust/spacepackets/pipeline/head This commit looks good Details
2024-02-01 17:52:45 +01:00
Robin Müller 84c1c47fe1
not sure what this does
Rust/spacepackets/pipeline/head This commit looks good Details
2024-02-01 17:47:25 +01:00
Robin Müller c4bbf91be8
lets keep all features
Rust/spacepackets/pipeline/head This commit looks good Details
2024-01-31 14:56:41 +01:00
Robin Müller 7200e10250
these flags are new
Rust/spacepackets/pipeline/head This commit looks good Details
2024-01-31 14:55:19 +01:00
Robin Müller 66ae83c0ce Merge pull request 'prep next beta release' (#56) from prep_v0.7.0-beta.4 into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #56
2024-01-23 18:39:48 +01:00
Robin Müller 2439c9e5fd prep next beta release
Rust/spacepackets/pipeline/head This commit looks good Details
2024-01-23 17:59:56 +01:00
Robin Müller e992aad52c Merge pull request 'bugfix for metadata PDU creator' (#55) from metadata-pdu-creator-bugfix into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #55
2024-01-23 17:57:52 +01:00
Robin Müller 77135af2bc bugfix for metadata PDU creator
Rust/spacepackets/pipeline/head This commit looks good Details
2024-01-23 17:55:07 +01:00
Robin Müller b4e49fdecc Merge pull request 'fix badge link for new website' (#54) from second-badge-fix into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #54
2023-12-13 15:40:58 +01:00
Robin Müller 9c1a98139a
fix badge link for new website
Rust/spacepackets/pipeline/head Build started... Details
2023-12-13 15:39:41 +01:00
Robin Müller 5a773cc0be Merge pull request 'fix deploy path' (#53) from fix-deployment-path into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #53
2023-12-12 11:53:32 +01:00
Robin Müller adab462539 fix deploy path
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-12 11:48:00 +01:00
Robin Müller 297cfad226 Merge pull request 'Clean up ECSS API' (#52) from clean-up-ecss-api into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #52
2023-12-07 13:53:59 +01:00
Robin Müller efba19db3e Merge remote-tracking branch 'origin/main' into clean-up-ecss-api
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-07 13:50:33 +01:00
Robin Müller 6017de9ec7 Merge pull request 'This magically fixes the upload' (#51) from cov-deployment into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #51
2023-12-07 13:50:21 +01:00
Robin Müller f57b84b862 clean up ECSS API a bit
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-07 13:49:59 +01:00
Robin Müller 960835f99d it's the clean?!
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-12-07 13:11:14 +01:00
Robin Müller 4b6b935b06 clean
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-07 11:09:06 +01:00
Robin Müller c45846819b this is weird
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-07 11:04:58 +01:00
Robin Müller d1b9f4a4d5 that did not help, let's try the clear command
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-07 11:03:52 +01:00
Robin Müller 5112338263 Merge pull request 'Some more tests' (#50) from some-more-tests into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #50
2023-12-07 11:00:03 +01:00
Robin Müller 4ee57c104b delete stray file
Rust/spacepackets/pipeline/head Build started... Details
2023-12-07 01:05:40 +01:00
Robin Müller 6d0f71bc12 try with build command 2023-12-07 01:00:07 +01:00
Robin Müller 688174e23d Merge pull request 'let's try it with a trailing slash' (#49) from jenkinsfile-fix-possibly into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #49
2023-12-06 20:14:20 +01:00
Robin Müller e4fab72745 let's try it with a trailing slash
Rust/spacepackets/pipeline/head Build started... Details
2023-12-06 20:12:41 +01:00
Robin Müller cfeb74d4c2 Merge pull request 'prepare new beta version' (#48) from prep_v0.7.0-beta.3 into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #48
2023-12-06 20:02:27 +01:00
Robin Müller 0ddb9c69f1
jenkinsfile fixes
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-06 20:01:36 +01:00
Robin Müller 3c72328466
prepare new beta version
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-12-06 18:13:22 +01:00
Robin Müller 47a9335495 Merge pull request 'Coverage Update' (#47) from coverage-update into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #47
2023-12-06 18:05:56 +01:00
Robin Müller 044ce7a300
changelog update for CUC time
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-12-06 17:53:35 +01:00
Robin Müller 8b0a5d1d2c
that should do the job
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-12-06 17:50:33 +01:00
Robin Müller 9dbb7429e8
let's hope this was the last issue
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-12-06 17:35:28 +01:00
Robin Müller d472c8476a
this can not really be deserialized
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-06 17:33:28 +01:00
Robin Müller 28e9dd9b29
added missing alloc feature gate
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-06 17:07:28 +01:00
Robin Müller 90cca0fd9e
cargo fmt
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-06 17:06:30 +01:00
Robin Müller 56c3b7474d
better finished PDU API
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-06 17:04:30 +01:00
Robin Müller 3818dcd46f
this is less confusing
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-06 16:17:54 +01:00
Robin Müller 38f5e3ba5f
something is wrong with the function coverage..
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-06 16:16:21 +01:00
Robin Müller 3650507715
some more serde tests
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-06 15:17:03 +01:00
Robin Müller bf13a432b8
add some more serde test
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-06 01:33:26 +01:00
Robin Müller c21ddf3cf0
that reduced coverage again.. 2023-12-06 01:12:33 +01:00
Robin Müller 5b7c500ee7
tests green again 2023-12-05 20:50:15 +01:00
Robin Müller 059f5ba5f5
simplified CDS impl 2023-12-05 20:47:13 +01:00
Robin Müller c19e8e6464
what is this
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 18:35:08 +01:00
Robin Müller ed4c8af164 added FS response deserialization test
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 17:06:12 +01:00
Robin Müller 9e40dcde95 more stuff to test yay
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 16:58:57 +01:00
Robin Müller dc2b97b848 add filestore response abstraction
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 16:29:30 +01:00
Robin Müller 4945ea804d small test for TC reader invalid input
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 15:31:00 +01:00
Robin Müller c6c80edb84 added some more basic tests
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 15:26:34 +01:00
Robin Müller f620304b3a fix that test
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 15:08:03 +01:00
Robin Müller 71e043e159 more time module tests
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 15:05:00 +01:00
Robin Müller 149b4d65a2 add seq count
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 14:01:21 +01:00
Robin Müller fc18a01b4c more tests
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 13:50:10 +01:00
Robin Müller 08b1ddc41d
oh shit gotta go
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-05 10:08:19 +01:00
Robin Müller 13b9ca356c
added another finished PDU test
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-04 17:55:56 +01:00
Robin Müller 299d37d894
introduce new TLV abstractions
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-04 15:54:35 +01:00
Robin Müller 7965e71c49
continue coverage imrpvoements 2023-12-04 13:44:53 +01:00
Robin Müller 52063320be
more
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-03 20:25:32 +01:00
Robin Müller a2535502ea
move coverage improvements
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-03 19:55:09 +01:00
Robin Müller 44383c10a8
added some more tests 2023-12-03 19:45:31 +01:00
Robin Müller 175315e44e
improved coverage a bit 2023-12-03 16:00:28 +01:00
Robin Müller 4faf1c99d8
chhangelog
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-03 15:46:07 +01:00
Robin Müller 7b66061625
remove duplicate error variant 2023-12-03 15:45:11 +01:00
Robin Müller da201a91e5
well this is annoying work
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-03 13:05:55 +01:00
Robin Müller 834d56c9bd Merge pull request 'add segment metadata accessors' (#46) from seg-metadata-accessors into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #46
2023-12-03 12:38:55 +01:00
Robin Müller f11a23c7c7 Merge branch 'main' into seg-metadata-accessors
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-12-02 14:58:54 +01:00
Robin Müller 5205cc0758 Merge pull request 'maybe those links are secure' (#45) from smaller-improvements into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #45
2023-12-02 14:58:33 +01:00
Robin Müller d5f945305d
add segment metadata accessors
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-02 14:58:03 +01:00
Robin Müller 58ec20c629 Merge branch 'smaller-improvements' of egit.irs.uni-stuttgart.de:rust/spacepackets into smaller-improvements
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-12-02 14:45:19 +01:00
Robin Müller 2fdf057305
even better 2023-12-02 14:45:12 +01:00
Robin Müller b357fba212 Merge branch 'main' into smaller-improvements
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-02 14:43:39 +01:00
Robin Müller e4acb9fe4f
maybe those links are secure 2023-12-02 14:42:54 +01:00
Robin Müller 7e85ea7cd1 Merge pull request 'Small improvements' (#44) from smaller-improvements into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #44
2023-12-01 18:33:45 +01:00
Robin Müller 60a00bae99
README update
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-01 17:21:36 +01:00
Robin Müller 7a8c3784f5
improve coverage python script 2023-12-01 17:19:58 +01:00
Robin Müller 347d40bcf0 Merge pull request 'split up metadata PDU API' (#42) from metadata-pdu-split-api into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #42
2023-12-01 16:54:23 +01:00
Robin Müller 94ed37d596
this should work
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-01 16:53:21 +01:00
Robin Müller ec886ba83d
README update
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-12-01 16:48:43 +01:00
Robin Müller 983d69140a
let's try this..
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-12-01 16:42:26 +01:00
Robin Müller be86e3055e
I think this stays the same
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-01 16:37:58 +01:00
Robin Müller c7e98a964a
that's odd..
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-01 16:35:46 +01:00
Robin Müller 490b05e612
that should do the job
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-01 16:28:22 +01:00
Robin Müller c4847850d9
this might work better
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-01 16:20:38 +01:00
Robin Müller 2ea996b9d0
serde does not work here either..
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-01 16:12:50 +01:00
Robin Müller 681271a53c
lets try to make serde work
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-01 16:02:37 +01:00
Robin Müller 4d21a79a46 Merge branch 'metadata-pdu-split-api' of egit.irs.uni-stuttgart.de:rust/spacepackets into metadata-pdu-split-api
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-01 16:00:36 +01:00
Robin Müller 0f3cf48c0e
update CI 2023-12-01 16:00:29 +01:00
Robin Müller 8a78f27d41 Merge branch 'main' into metadata-pdu-split-api
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-01 15:35:38 +01:00
Robin Müller 48b0362dc1
split up metadata PDU API
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-01 15:35:09 +01:00
Robin Müller 80b80f6777 Merge pull request 'Add ACK and NAK PDU abstractions' (#41) from add-ack-nak-pdus into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #41
2023-12-01 15:33:35 +01:00
Robin Müller e355de3f10
fmt
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-12-01 14:46:01 +01:00
Robin Müller 1f1aa68485
remove std usages
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-01 14:34:16 +01:00
Robin Müller e2ae959d03
serde impl is tricky here..
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-01 14:27:21 +01:00
Robin Müller e422f4f969
fix for serde
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-01 14:22:35 +01:00
Robin Müller 56113ffbcb
NAK PDU crc check
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-01 14:18:33 +01:00
Robin Müller ea1edfb3c1
improve coverage
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-01 12:11:31 +01:00
Robin Müller 94ff4fbb51
added coverage section in README
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-12-01 10:33:12 +01:00
Robin Müller c99b6fddec
added coverage helper script
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-12-01 10:22:44 +01:00
Robin Müller 2ba2998426 add more tests
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-11-30 22:59:35 +01:00
Robin Müller 80aa963226 that was complicated but it works
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-11-30 20:20:30 +01:00
Robin Müller 5ae86619b4 last push
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-11-28 22:33:34 +01:00
Robin Müller b8dacc12b5 add test
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-11-28 22:28:49 +01:00
Robin Müller 6ed023d50d
split up NAK PDU API
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-11-28 01:19:03 +01:00
Robin Müller 65d3b4e4e5
added NAK PDU impl
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-11-27 17:48:05 +01:00
Robin Müller dcb697bf6f
added some tests for new ACK PDU
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-11-27 16:25:55 +01:00
Robin Müller c0bedac058
Serialize/Deserialize
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-11-27 15:52:47 +01:00
Robin Müller c0805db137
first test 2023-11-26 23:52:34 +01:00
Robin Müller fdf6e1de90
added ACK PDU impl
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-11-26 21:24:24 +01:00
Robin Müller 9976b53f65 added basic state tests for FD and Finished PDU
Rust/spacepackets/pipeline/head This commit looks good Details
2023-11-26 15:04:44 +01:00
Robin Müller 478f8aa216 Merge pull request 'custom impl for CommonPduConfig PartialEq' (#40) from common-pdu-abstractions into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #40
2023-11-25 19:21:22 +01:00
Robin Müller a23e5107f2
fmt
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-11-25 18:16:21 +01:00
Robin Müller 7650429c5b custom impl for CommonPduConfig PartialEq
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-11-24 17:09:23 +01:00
Robin Müller 5e892f86b3 Merge pull request 'Improvements for writable abstractions' (#39) from improvements-for-writable-abstractions into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #39
2023-11-24 16:36:35 +01:00
Robin Müller b82a93757c add unittests for vec conversion
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-11-24 16:34:06 +01:00
Robin Müller 4e90bbdc04 fix doctest
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-11-24 16:19:59 +01:00
Robin Müller 48c9b12ee2 changelog fix
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-11-24 16:16:30 +01:00
Robin Müller b8d6cf9d85 improvements for writable abstractions
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-11-24 16:15:46 +01:00
Robin Müller 9e74266b76 Merge pull request 'introduce new writable PDU packet abstraction' (#38) from writable_pdu_packet into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #38
2023-11-12 17:48:35 +01:00
Robin Müller e6408f74c1
new trait is public now
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-11-11 18:48:48 +01:00
Robin Müller 18d650316c
introduce new writable PDU packet abstraction
Rust/spacepackets/pipeline/head This commit looks good Details
2023-11-11 18:17:41 +01:00
Robin Müller 016e0d8673 Merge pull request 'clippy fix' (#37) from clippy-fix into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #37
2023-10-10 10:03:17 +02:00
Robin Müller 8cbfef4a1c
clippy fix
Rust/spacepackets/pipeline/head This commit looks good Details
2023-10-10 10:00:14 +02:00
Robin Müller 57c1d037df Merge pull request 'set direction field correctly' (#36) from pdu-set-direction-field-correctly into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #36
2023-10-06 15:04:17 +02:00
Robin Müller 422b0107e5
changelog
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-10-06 15:02:12 +02:00
Robin Müller 6c201206cc
set direction field correctly
Rust/spacepackets/pipeline/head This commit looks good Details
2023-10-06 14:54:11 +02:00
Robin Müller 5ebf0a5f4c Merge pull request 'update release checklist' (#35) from small-tweak into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #35
2023-09-26 18:15:32 +02:00
Robin Müller f1cf3802b5
update release checklist
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-09-26 18:15:05 +02:00
Robin Müller 419acb19c4 Merge pull request 'oopsie' (#34) from oopsie into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #34
2023-09-26 17:17:22 +02:00
Robin Müller 686b8eaaec
oopsie
Rust/spacepackets/pipeline/head Build started... Details
2023-09-26 17:17:01 +02:00
Robin Müller 34b58fe9cc Merge pull request 'bump version specifier' (#33) from prep_v0.7.0-beta2 into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #33
2023-09-26 17:15:29 +02:00
Robin Müller 393c73cedf
re-enable failing docs builds
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-09-26 17:11:39 +02:00
Robin Müller 3e97bf0c15
bump version specifier
Rust/spacepackets/pipeline/head This commit looks good Details
2023-09-26 17:10:21 +02:00
Robin Müller 7839fb3776 Merge pull request 'use non-deprecated API' (#32) from test-tweaks into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #32
2023-09-26 17:09:24 +02:00
Robin Müller 55ad24db34
use non-deprecated API
Rust/spacepackets/pipeline/head Build started... Details
2023-09-26 17:08:58 +02:00
Robin Müller 3b4a909ce1 Merge pull request 'Added to_vec method for SerializablePusPacket' (#31) from serializable-pus-to-vec into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #31
2023-09-26 16:59:41 +02:00
Robin Müller 76ad1c7ead Added to_vec method for SerializablePusPacket
Rust/spacepackets/pipeline/head This commit looks good Details
2023-09-26 16:55:07 +02:00
Robin Müller 79d26e1a67 Merge pull request 'Packet ID trait implementations' (#30) from packet-id-trait-impls into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #30
2023-09-18 18:19:31 +02:00
Robin Müller be37c15478
docs failure should not fail the whole build
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-09-18 18:00:14 +02:00
Robin Müller a6bced7983
this is okay
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-09-18 17:50:50 +02:00
Robin Müller 5d8b5ce370
please stop
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-09-18 17:42:10 +02:00
Robin Müller b94d07f6c9
try 2
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-09-18 17:38:56 +02:00
Robin Müller 90e48483bb
next try
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-09-18 17:36:10 +02:00
Robin Müller 963b9dbb5f
inline PacketId raw call
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-09-18 17:34:49 +02:00
Robin Müller 2a0db6b21c
maybe this fixes the issue? 2023-09-18 17:34:04 +02:00
Robin Müller a4b14250c2
add stage to display toolchain info
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-09-18 17:32:54 +02:00
Robin Müller 6116cdb27c
add some tests
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-09-18 17:13:22 +02:00
Robin Müller 6ebdf7e330
added packet ID trait impls
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-09-18 16:59:38 +02:00
Robin Müller e935b3825a Merge pull request 'release-checklist: missing bullet point' (#29) from release-checklist-note into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #29
2023-08-28 18:56:55 +02:00
Robin Müller a49737fc34
release-checklist: missing bullet point
Rust/spacepackets/pipeline/head Build queued... Details
2023-08-28 18:56:08 +02:00
Robin Müller 3081539bb2 Merge pull request 'prep next beta release' (#28) from prep_v0.7.0-beta.1 into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #28
2023-08-28 18:54:32 +02:00
Robin Müller 1b01c8bb0b
small changelog note
Rust/spacepackets/pipeline/head Build started... Details
Rust/spacepackets/pipeline/pr-main Build started... Details
2023-08-28 18:54:01 +02:00
Robin Müller 2ee3eee32e
only allow zerocopy v0.7.0
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-28 18:47:41 +02:00
Robin Müller 406d731bbe
fix zerocopy usage
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-28 18:45:24 +02:00
Robin Müller 49b50ec682
prep next beta release
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-08-28 18:36:00 +02:00
Robin Müller 00fdfde015 Merge pull request 'invalid time code struct variant' (#26) from invalid-time-code-struct-variant into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #26
2023-08-28 17:42:46 +02:00
Robin Müller 491b03c701
Merge remote-tracking branch 'origin/main' into invalid-time-code-struct-variant
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-28 17:32:10 +02:00
Robin Müller e090beedde
CHANGELOG
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-28 17:31:41 +02:00
Robin Müller 6f2ed3003f Merge pull request 'UnsignedByteFieldError: Use struct variants' (#27) from ubf-error-struct-variants into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #27
2023-08-28 17:27:34 +02:00
Robin Müller 0b5a384743
fix tests
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-28 17:24:54 +02:00
Robin Müller 925b2aa8d8
remove obsolete comment
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-28 17:22:36 +02:00
Robin Müller d98d4b55c8
convert UnsigedByteFieldError variants to struct variants
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-28 17:21:48 +02:00
Robin Müller 94c378fa3b Merge branch 'main' into invalid-time-code-struct-variant
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-08-28 17:11:32 +02:00
Robin Müller 1fc15230fa Merge pull request 'Size missmatch struct variant' (#25) from size-missmatch-struct-variant into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #25
2023-08-28 17:10:57 +02:00
Robin Müller e78f196a42
invalid time code struct variant
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-08-28 17:10:45 +02:00
Robin Müller 2a11359a81 Merge branch 'main' into size-missmatch-struct-variant
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-28 17:01:14 +02:00
Robin Müller c226c5ea0f
changelog
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-28 17:00:29 +02:00
Robin Müller ab65845573 Merge pull request 'PDU improvements and additions' (#24) from pdu-additions into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #24
2023-08-28 16:52:25 +02:00
Robin Müller 3206af690c
well that was a lot
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-18 10:09:32 +02:00
Robin Müller 805065a7b9
cargo fmt
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-17 22:13:00 +02:00
Robin Müller 62533bb91c Merge remote-tracking branch 'origin/main' into pdu-additions
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-08-17 21:34:38 +02:00
Robin Müller c085f9ab32 Merge pull request 'update LV and TLV code' (#22) from update-lv-tlv into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #22
2023-08-17 21:33:48 +02:00
Robin Müller f208a9b0f0
fixed a test
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-08-17 21:24:12 +02:00
Robin Müller c96a86a994
this should be better 2023-08-17 21:22:49 +02:00
Robin Müller 9dfc593d2a
fixes for pdu error enum 2023-08-17 21:04:27 +02:00
Robin Müller 990b8de519
changelog
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-17 20:42:45 +02:00
Robin Müller 965541e422
getter function for datafield len 2023-08-17 20:41:45 +02:00
Robin Müller 9a52066314
fmt
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-16 18:19:41 +02:00
Robin Müller 6ab05e2d83
fix for docs
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-08-16 18:19:15 +02:00
Robin Müller 2d81a79321
Merge remote-tracking branch 'origin/main' into update-lv-tlv
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-08-16 18:17:51 +02:00
Robin Müller fc7bee342c Merge pull request 'Const UBF' (#23) from const-ubf into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #23
2023-08-16 18:17:13 +02:00
Robin Müller 1789cff2b8
why is this still not a test
Rust/spacepackets/pipeline/head Build queued... Details
2023-08-16 18:16:43 +02:00
Robin Müller 5ae5abe09a
make UnsignedByteField helpers const
Rust/spacepackets/pipeline/head Build started... Details
2023-08-16 18:15:49 +02:00
Robin Müller 407d1e1154
additional test for new method
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-08-16 18:10:39 +02:00
Robin Müller 3ba575aac1
changelog
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-08-16 18:01:54 +02:00
Robin Müller 81db36d159
additional docs
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-08-16 18:01:09 +02:00
Robin Müller 081f6e840f
added additional API
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-08-16 17:58:19 +02:00
Robin Müller 3cb19298c8
some restructuring
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-08-16 17:27:02 +02:00
Robin Müller 4e2c0f1aa7
added a few additional tests
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-16 16:36:15 +02:00
Robin Müller 83db710950
update LV and TLV code
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-16 16:27:10 +02:00
Robin Müller 0f49672829 Merge pull request 'v0.7.0 beta' (#21) from prep_v0.7.0-beta into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #21
2023-08-16 14:22:40 +02:00
Robin Müller ffd1bf3769
add missing doc_cfg
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-16 14:11:26 +02:00
Robin Müller ccf7592284
doc fix
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-16 14:02:33 +02:00
Robin Müller 2b7fcbdd8e
changelog 2023-08-16 14:00:38 +02:00
Robin Müller e389f77063
lets release a beta version 2023-08-16 13:59:34 +02:00
Robin Müller fcf4449c14 Merge pull request 'gitlab ci fix' (#20) from gitlab-ci-fix into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #20
2023-08-14 14:06:56 +02:00
Robin Müller 05916130a8
gitlab ci fix
Rust/spacepackets/pipeline/head Build started... Details
2023-08-14 14:03:25 +02:00
Robin Müller 4b7f2b4817 Merge pull request 'bump msrv to v1.61' (#19) from bump_msrv into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #19
2023-08-11 14:01:43 +02:00
Robin Müller 91e7d8549c
bump msrv to v1.61
Rust/spacepackets/pipeline/head Build started... Details
2023-08-11 14:00:46 +02:00
Robin Müller 3430275638 Merge pull request 'PDU config tweak' (#17) from pdu-conf-tweaK into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #17
2023-08-10 21:40:43 +02:00
Robin Müller a3bab2619c
compile fix
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-10 21:28:27 +02:00
Robin Müller 70815fa1e3
Merge remote-tracking branch 'origin/main' into pdu-conf-tweaK
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-08-10 21:06:56 +02:00
Robin Müller ebfd3c636d Merge pull request 'Improve CommonPduConfig' (#18) from improve-pdu-conf into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #18
2023-08-10 20:25:33 +02:00
Robin Müller a3da71668f
also added a unittest
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-08-10 20:22:07 +02:00
Robin Müller fd893fbf89
changelog fix
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-08-10 20:08:25 +02:00
Robin Müller 17296ade19
fmt
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-10 20:04:50 +02:00
Robin Müller 85476162cf
missing changelog entry
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-10 20:03:50 +02:00
Robin Müller 837b412ef0
changelog
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-08-10 20:01:13 +02:00
Robin Müller 02098977a5
added pdu conf src and dest id setter 2023-08-10 19:55:42 +02:00
Robin Müller 20584b45ca
changelog
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-08-10 18:17:32 +02:00
Robin Müller 105c598c53
some more extensions for PDU impl
Rust/spacepackets/pipeline/head This commit looks good Details
2023-07-26 23:27:40 +02:00
Robin Müller c65a024d97
make metadata params accessible
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-26 22:53:05 +02:00
Robin Müller 041959e546
no error handling necessary here
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-26 22:00:03 +02:00
Robin Müller cc1ec56b33
change for common pdu conf
Rust/spacepackets/pipeline/head This commit looks good Details
2023-07-25 00:42:31 +02:00
Robin Müller 0a59f3258a Merge pull request 'Refactor ECSS packet modules' (#16) from refactoring-ecss-packet-mods into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #16
2023-07-11 22:26:26 +02:00
Robin Müller 62df510147
new TM simple constructor
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-07-11 22:12:17 +02:00
Robin Müller 5b8cc30012
oh boy
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-11 14:16:17 +02:00
Robin Müller 3c8f9f9f07
added some more deprecation warnings
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-07-11 00:41:35 +02:00
Robin Müller 46eab35290
added trait impls to legacy classes
Rust/spacepackets/pipeline/head This commit looks good Details
2023-07-11 00:07:38 +02:00
Robin Müller 7db2190ec3
more improvements
Rust/spacepackets/pipeline/head This commit looks good Details
2023-07-11 00:03:07 +02:00
Robin Müller 77be96e8de
changelog
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 23:37:23 +02:00
Robin Müller 6eb3cbbd84
added new marker traits for PUS TC and PUS TM
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 23:36:27 +02:00
Robin Müller c0e70daf58
some tweaks and test update
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 23:29:55 +02:00
Robin Müller eca7e09d19
changelog
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 23:18:09 +02:00
Robin Müller 98e2a73aa2
move deprecation blocks
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 11:59:45 +02:00
Robin Müller 05eb9d44ef
how do you deal with these deprecation warnings?
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 11:20:55 +02:00
Robin Müller ba22618449 Merge remote-tracking branch 'origin/main' into refactoring-ecss-packet-mods
Rust/spacepackets/pipeline/head This commit looks good Details
2023-07-10 10:30:26 +02:00
Robin Müller 9ecc1f1ff2 Merge pull request 'try fixing CI' (#15) from try-fix-ci into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #15
2023-07-10 10:27:31 +02:00
Robin Müller 240004b814
make thiserror optional
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-07-10 10:25:11 +02:00
Robin Müller 79791a75bd
try fixing CI
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 10:15:37 +02:00
Robin Müller b553cdc2ec
Added PusTmCreator and PusTmReader
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 01:11:42 +02:00
Robin Müller e46de3421e
resolve merge conflict
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 00:38:34 +02:00
Robin Müller fd95c86294
Merge remote-tracking branch 'origin/main' into refactoring-ecss-packet-mods 2023-07-10 00:38:07 +02:00
Robin Müller f117c8c4de
changelog correction
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 00:37:16 +02:00
Robin Müller 784564a20e
now its a proper re-export
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-10 00:00:20 +02:00
Robin Müller e3d2d88538
remove another error suffix
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-09 19:31:32 +02:00
Robin Müller 4969d6c33c
moved re-export
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-09 16:48:53 +02:00
Robin Müller 581b51c61c
improve std timestamp error further
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-09 16:46:25 +02:00
Robin Müller b5bea3e1c6
implement thiserror::Error for StdTimestampError 2023-07-09 16:43:45 +02:00
Robin Müller defd7609e7
I am not sure this is required..
Rust/spacepackets/pipeline/head This commit looks good Details
2023-07-09 12:39:00 +02:00
Robin Müller b9774c4c9f
doc fixes and tweaks 2023-07-09 12:33:34 +02:00
Robin Müller 4fdfb20946
refactoring ECSS packetm odules 2023-07-09 12:27:44 +02:00
Robin Müller 2704c589be
prep v0.6.0
Rust/spacepackets/pipeline/head This commit looks good Details
2023-07-06 01:26:53 +02:00
Robin Müller 32cbbb1c19
some minor fixes and tweaks 2023-07-06 01:24:40 +02:00
Robin Müller 4485ed2669
this API is useful for funnels
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-05 19:28:43 +02:00
Robin Müller 2deac938bb
better API name
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-05 19:26:22 +02:00
Robin Müller 929590ecb0
add additional API to set APID
Rust/spacepackets/pipeline/head This commit looks good Details
2023-07-05 19:25:05 +02:00
Robin Müller c85177ece9
API update
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-05 19:07:31 +02:00
Robin Müller f406957752
docs
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-03 00:57:38 +02:00
Robin Müller 28cd8c02ac
move additional function to trait
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-03 00:54:21 +02:00
Robin Müller ef4244c8cb
better name for function
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-02 20:57:50 +02:00
Robin Müller 94cfe59235
allow delegate update
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-02 18:43:05 +02:00
Robin Müller ec5d98a9b5
require this for less duplicate code
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-02 18:38:18 +02:00
Robin Müller 1ddfc432f3 Merge pull request 'CFDP initial packet support' (#14) from cfdp_first_support into main
Rust/spacepackets/pipeline/head This commit looks good Details
Reviewed-on: #14
2023-07-02 17:31:16 +02:00
Robin Müller ae8fb8ee14
core compatibility fix
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-07-02 17:25:10 +02:00
Robin Müller 188651c4a4
typos
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-07-02 17:22:07 +02:00
Robin Müller ad64957342
some documentation
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-02 17:20:57 +02:00
Robin Müller a313a784ff
finish FS request unittests
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-07-02 17:18:33 +02:00
Robin Müller 3727e7e668
added more FS request tests
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-06-12 16:19:20 +02:00
Robin Müller 0e87039010
added first basic state test
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-06-12 16:04:32 +02:00
Robin Müller d9028d21da
start adding tests
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-06-12 15:52:10 +02:00
Robin Müller 99dbf9dc85
finished basic FS request impl
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-06-12 14:26:40 +02:00
Robin Müller 895080bbc0
add crc serialization for all packets
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-06-12 13:00:58 +02:00
Robin Müller e48c2fe368
some docs 2023-06-12 04:13:41 +02:00
Robin Müller d217a669b2
add more docs 2023-06-12 04:04:52 +02:00
Robin Müller 15bc12aede
that should be sufficient for the first FSM approach 2023-06-12 04:02:18 +02:00
Robin Müller 02675ba086
first basic Finished PDU impl 2023-06-12 03:57:38 +02:00
Robin Müller eb6bc4b8a8
added some basic API for finished PDU
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-06-08 20:55:46 +02:00
Robin Müller 006bc39ff6
finished basic EOF unittests
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-06-08 20:50:18 +02:00
Robin Müller bce16a6018 Merge branch 'cfdp_first_support' of https://egit.irs.uni-stuttgart.de/rust/spacepackets into cfdp_first_support
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-06-08 19:25:22 +02:00
Robin Müller 84b909b722
continue EOF 2023-06-08 19:24:55 +02:00
Robin Müller ab5c28d304 finish PDU
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-06-08 16:51:22 +02:00
Robin Müller 912c03b5c7
base line EOF model
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-06-07 01:12:07 +02:00
Robin Müller 0b714b7426
start EOF
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-06-06 08:59:18 +02:00
Robin Müller bb1ecb29b6
fmt and clippy
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-30 19:35:59 +02:00
Robin Müller 9f574ff443
continue file data test 2023-05-30 19:35:38 +02:00
Robin Müller 0ad8dd6eef
add test with segment metadata
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-30 17:39:33 +02:00
Robin Müller 81eb8e7887
continue file data PDU
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-30 15:36:02 +02:00
Robin Müller 5c3c9a9bde
add test run config
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-05-30 11:09:41 +02:00
Robin Müller 44223c1c0f
add file data test harness
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
Rust/spacepackets/pipeline/head There was a failure building this commit Details
2023-05-30 08:33:03 +02:00
Robin Müller 9d758cce45
first basic fd PDU impl
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-05-30 00:16:16 +02:00
Robin Müller 3166a280bc
metadata PDU done 2023-05-29 23:38:07 +02:00
Robin Müller bf4e841499
added correct PDU datafield length handling
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-29 14:28:15 +02:00
Robin Müller ce0848dc28
base line metadata PDU impl done 2023-05-29 13:46:19 +02:00
Robin Müller e13183764e
continue metadata PDU tests 2023-05-29 01:29:04 +02:00
Robin Müller 6865898102
add first test
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-05-28 23:50:12 +02:00
Robin Müller e343faa1c5
clippy fixes
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-21 20:30:16 +02:00
Robin Müller f6e309d2ee
docs and stuff
Rust/spacepackets/pipeline/pr-main This commit looks good Details
Rust/spacepackets/pipeline/head This commit looks good Details
2023-05-19 00:42:31 +02:00
Robin Müller 9a9694981a
start adding LV tests
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-18 20:37:41 +02:00
Robin Müller b37d932e4f
TLV and TV abstractions complete
Rust/spacepackets/pipeline/head There was a failure building this commit Details
Rust/spacepackets/pipeline/pr-main There was a failure building this commit Details
2023-05-18 15:01:08 +02:00
Robin Müller 0c085ef27b
added basic TLV impl 2023-05-18 14:05:51 +02:00
Robin Müller d5a3e7c0d4
PDU header base impl done
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-18 13:32:45 +02:00
Robin Müller 02bae5de6c
almost completed PDU header impl 2023-05-18 11:08:46 +02:00
Robin Müller 0a3848d0a2
add more tests 2023-05-18 00:46:58 +02:00
Robin Müller a67718cff2
bugfix
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-15 01:03:15 +02:00
Robin Müller 53eb184534
add first PDU header tests 2023-05-15 01:01:46 +02:00
Robin Müller 074882c160
tests done 2023-05-15 00:11:41 +02:00
Robin Müller 8bb4d6d32e
tests almost complete 2023-05-14 23:56:26 +02:00
Robin Müller 8d15091b42
added more to changelog
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-14 20:11:31 +02:00
Robin Müller d2f944580c
thats a lot
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-14 20:10:34 +02:00
Robin Müller 4bbf38916a
completed base definitions
Rust/spacepackets/pipeline/head This commit looks good Details
Rust/spacepackets/pipeline/pr-main This commit looks good Details
2023-05-14 16:55:25 +02:00
Robin Müller 3457b3a8f9
add first definitions
Rust/spacepackets/pipeline/head This commit looks good Details
2023-02-19 20:52:42 +01:00
Robin Müller 0304f132e3
add cfdp mod
Rust/spacepackets/pipeline/head This commit looks good Details
2023-02-19 18:45:14 +01:00
33 changed files with 12526 additions and 2682 deletions

View File

@ -1,42 +1,39 @@
on: [push]
name: ci
on: [push, pull_request]
jobs:
check:
name: Check
name: Check build
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: actions-rs/cargo@v1
with:
command: check
args: --release
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo check --release
msrv:
name: Check with MSRV
test:
name: Run Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.60.0
override: true
profile: minimal
- uses: actions-rs/cargo@v1
with:
command: check
args: --release
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install nextest
uses: taiki-e/install-action@nextest
- run: cargo nextest run --all-features
- run: cargo test --doc
msrv:
name: Check MSRV
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.65.0
- run: cargo check --release
cross-check:
name: Check Cross
name: Check Cross-Compilation
runs-on: ubuntu-latest
strategy:
matrix:
@ -44,70 +41,32 @@ jobs:
- armv7-unknown-linux-gnueabihf
- thumbv7em-none-eabihf
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
profile: minimal
toolchain: stable
target: ${{ matrix.target }}
override: true
- uses: actions-rs/cargo@v1
with:
use-cross: true
command: check
args: --release --target=${{ matrix.target }} --no-default-features
targets: "armv7-unknown-linux-gnueabihf, thumbv7em-none-eabihf"
- run: cargo check --release --target=${{matrix.target}} --no-default-features
fmt:
name: Rustfmt
name: Check formatting
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- run: rustup component add rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo fmt --all -- --check
check-doc:
docs:
name: Check Documentation Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
profile: minimal
- uses: actions-rs/cargo@v1
with:
command: doc
args: --all-features
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]'
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings
ci:
if: ${{ success() }}
# all new jobs must be added to this list
needs: [check, fmt, clippy]
runs-on: ubuntu-latest
steps:
- name: CI succeeded
run: exit 0
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo clippy -- -D warnings

View File

@ -0,0 +1,19 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Check" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
<option name="command" value="check --all-features" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="emulateTerminal" value="false" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="true" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />
<option name="redirectInputPath" value="" />
<method v="2">
<option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
</method>
</configuration>
</component>

View File

@ -0,0 +1,19 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="Test" type="CargoCommandRunConfiguration" factoryName="Cargo Command" nameIsGenerated="true">
<option name="command" value="test" />
<option name="workingDirectory" value="file://$PROJECT_DIR$" />
<option name="emulateTerminal" value="false" />
<option name="channel" value="DEFAULT" />
<option name="requiredFeatures" value="true" />
<option name="allFeatures" value="true" />
<option name="withSudo" value="false" />
<option name="buildTarget" value="REMOTE" />
<option name="backtrace" value="SHORT" />
<envs />
<option name="isRedirectInput" value="false" />
<option name="redirectInputPath" value="" />
<method v="2">
<option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
</method>
</configuration>
</component>

View File

@ -8,6 +8,296 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.11.2] 2024-05-14
## Fixed
- Removed `defmt::Format` impl for `MetadataPduCreator` which seems to be problematic.
# [v0.11.1] 2024-04-22
## Fixed
- The default data length for for `SpHeader` constructors where the data field length is not
specified is now 0.
- The `SpHeader::new_from_fields` is public now.
## Added
- `SpHeader::to_vec` method.
# [v0.11.0] 2024-04-16
## Changed
- Moved `CCSDS_HEADER_LEN` constant to the crate root.
## Added
- Added `SpacePacketHeader` type alias for `SpHeader` type.
# [v0.11.0-rc.2] 2024-04-04
## Changed
- Renamed `PacketId` and `PacketSequenceCtrl` `new` method to `new_checked` and former
`new_const` method to `new`.
- Renamed `tc`, `tm`, `tc_unseg` and `tm_unseg` variants for `PacketId` and `SpHeader`
to `new_for_tc_checked`, `new_for_tm_checked`, `new_for_unseg_tc_checked` and
`new_for_unseg_tm_checked`.
- `PusTmCreator` and `PusTcCreator` now expect a regular instance of `SpHeader` instead of
a mutable reference.
## Added
- `SpHeader::new_from_apid` and `SpHeader::new_from_apid_checked` constructor.
- `#[inline]` attribute for a lot of small functions.
# [v0.11.0-rc.1] 2024-04-03
Major API changes for the time API. If you are using the time API, it is strongly recommended
to check all the API changes in the **Changed** chapter.
## Fixed
- CUC timestamp was fixed to include leap second corrections because it is based on the TAI
time reference. The default CUC time object do not implement `CcsdsTimeProvider` anymore
because the trait methods require cached leap second information. This task is now performed
by the `cuc::CucTimeWithLeapSecs` which implements the trait.
## Added
- `From<$EcssEnum$TY> from $TY` for the ECSS enum type definitions.
- Added basic support conversions to the `time` library. Introduce new `chrono` and `timelib`
feature gate.
- Added `CcsdsTimeProvider::timelib_date_time`.
- Optional support for `defmt` by adding optional `defmt::Format` derives for common types.
## Changed
- `PusTcCreator::new_simple` now expects a valid slice for the source data instead of an optional
slice. For telecommands without application data, `&[]` can be passed.
- `PusTmSecondaryHeader` constructors now expects a valid slice for the time stamp instead of an
optional slice.
- Renamed `CcsdsTimeProvider::date_time` to `CcsdsTimeProvider::chrono_date_time`
- Renamed `CcsdsTimeCodes` to `CcsdsTimeCode`
- Renamed `cds::TimeProvider` to `cds::CdsTime`
- Renamed `cuc::TimeProviderCcsdsEpoch` to `cuc::CucTime`
- `UnixTimestamp` renamed to `UnixTime`
- `UnixTime` seconds are now private and can be retrieved using the `secs` member method.
- `UnixTime::new` renamed to `UnixTime::new_checked`.
- `UnixTime::secs` renamed to `UnixTime::as_secs`.
- `UnixTime` now has a nanosecond subsecond precision. The `new` constructor now expects
nanoseconds as the second argument.
- Added new `UnixTime::new_subsec_millis` and `UnixTime::new_subsec_millis_checked` API
to still allow creating a timestamp with only millisecond subsecond resolution.
- `CcsdsTimeProvider` now has a new `subsec_nanos` method in addition to a default
implementation for the `subsec_millis` method.
- `CcsdsTimeProvider::date_time` renamed to `CcsdsTimeProvider::chrono_date_time`.
- Added `UnixTime::MIN`, `UnixTime::MAX` and `UnixTime::EPOCH`.
- Added `UnixTime::timelib_date_time`.
- Error handling for ECSS and time module is more granular now, with a new
`DateBeforeCcsdsEpochError` error and a `DateBeforeCcsdsEpoch` enum variant for both
`CdsError` and `CucError`.
- `PusTmCreator` now has two lifetimes: One for the raw source data buffer and one for the
raw timestamp.
- Time API `from_now*` API renamed to `now*`.
## Removed
- Legacy `PusTm` and `PusTc` objects.
# [v0.11.0-rc.0] 2024-03-04
## Added
- `From<$TY>` for the `EcssEnum$TY` ECSS enum type definitions.
- `Sub` implementation for `UnixTimestamp` to calculate the duration between two timestamps.
## Changed
- `CcsdsTimeProvider` `subsecond_millis` function now returns `u16` instead of `Option<u16>`.
- `UnixTimestamp` `subsecond_millis` function now returns `u16` instead of `Option<u16>`.
# [v0.10.0] 2024-02-17
## Added
- Added `value` and `to_vec` methods for the `UnsignedEnum` trait. The value is returned as
as `u64`. Renamed former `value` method on `GenericUnsignedByteField` to `value_typed`.
- Added `value_const` const function for `UnsignedByteField` type.
- Added `value_typed` const functions for `GenericUnsignedByteField` and `GenericEcssEnumWrapper`.
# [v0.9.0] 2024-02-07
## Added
- `CcsdsPacket`, `PusPacket` and `GenericPusTmSecondaryHeader` implementation for
`PusTmZeroCopyWriter`.
- Additional length checks for `PusTmZeroCopyWriter`.
## Changed
- `PusTmZeroCopyWriter`: Added additional timestamp length argument for `new` constructor.
## Fixed
- Typo: `PUC_TM_MIN_HEADER_LEN` -> `PUS_TM_MIN_HEADER_LEN`
# [v0.8.1] 2024-02-05
## Fixed
- Added `pub` visibility for `PacketSequenceCtrl::const_new`.
# [v0.8.0] 2024-02-05
## Added
- Added `len_written` and `to_vec` methods to the `TimeWriter` trait.
# [v0.7.0] 2024-02-01
# [v0.7.0-beta.4] 2024-01-23
## Fixed
- `MetadataPduCreator`: The serialization function shifted the closure requested information
to the wrong position (first reserved bit) inside the raw content field.
# [v0.7.0-beta.3] 2023-12-06
## Added
- Add `WritablePduPacket` trait which is a common trait of all CFDP PDU implementations.
- Add `CfdpPdu` trait which exposes fields and attributes common to all CFDP PDUs.
- Add `GenericTlv` and `WritableTlv` trait as abstractions for the various TLV types.
## Fixed
- Set the direction field inside the PDU header field correctly explicitely for all CFDP PDU
packets.
## Changed
- Split up `FinishedPdu`into `FinishedPduCreator` and `FinishedPduReader` to expose specialized
APIs.
- Split up `MetadataPdu`into `MetadataPduCreator` and `MetadataPduReader` to expose specialized
APIs.
- Cleaned up CUC time implementation. Added `width` and `counter` getter methods.
- Renamed `SerializablePusPacket` to `WritablePusPacket`.
- Renamed `UnsignedPfc` to `PfcUnsigned` and `RealPfc` to `PfcReal`.
- Renamed `WritablePduPacket.written_len` and `SerializablePusPacket.len_packed` to `len_written`.
- Introduce custom implementation of `PartialEq` for `CommonPduConfig` which only compares the
values for the source entity ID, destination entity ID and transaction sequence number field to
allow those fields to have different widths.
- Removed the `PusError::RawDataTooShort` variant which is already covered by
`PusError::ByteConversionError` variant.
- Ranamed `TlvLvError::ByteConversionError` to `TlvLvError::ByteConversion`.
- Renamed `PusError::IncorrectCrc` to `PusError::ChecksumFailure`.
- Some more struct variant changes for error enumerations.
## Removed
- `PusError::NoRawData` variant.
- `cfdp::LenInBytes` which was not used.
# [v0.7.0-beta.2] 2023-09-26
## Added
- `PacketId` trait impls: `Ord`, `PartialOrd` and `Hash`
- `SerializablePusPacket` trait: Add `to_vec` method with default implementation.
# [v0.7.0-beta.1] 2023-08-28
- Bump `zerocopy` dependency to v0.7.0
## Changed
- The `Tlv` and `Lv` API return `&[u8]` instead of `Option<&[u8]>`.
- `ByteConversionError` error variants `ToSliceTooSmall` and `FromSliceTooSmall` are struct
variants now. `SizeMissmatch` was removed appropriately.
- `UnsignedByteFieldError` error variants `ValueTooLargeForWidth` and `InvalidWidth` are struct
variants now.
- `TimestampError` error variant `InvalidTimeCode` is struct variant now.
## Added
- Added `raw_data` API for `Tlv` and `Lv` to retrieve the whole `Lv`/`Tlv` slice if the object
was created from a raw bytestream.
- Added `MsgToUserTlv` helper class which wraps a regular `Tlv` and adds some useful functionality.
- `UnsignedByteField` and `GenericUnsignedByteField` `new` methods are `const` now.
- `PduError` variants which contained a tuple variant with multiple fields were converted to a
struct variant.
# Added
- Added `pdu_datafield_len` getter function for `PduHeader`
## Removed
- `SizeMissmatch` because it is not required for the `ByteConversionError` error enumeration
anymore.
# [v0.7.0-beta.0] 2023-08-16
- Moved MSRV from v1.60 to v1.61.
## Changed
- `PusPacket` trait: `user_data` now returns `&[u8]` instead of `Option<&[u8]>`. Empty user data
can simply be an empty slice.
- Moved ECSS TC components from `tc` to `ecss.tc`.
- Moved ECSS TM components from `tm` to `ecss.tm`.
- Converted `PusTc` class to more specialized `PusTcCreator` and `PusTcReader`
classes. The old `PusTc` class is deprecated now.
- Converted `PusTm` class to more specialized `PusTmCreator` and `PusTmReader`
classes. The old `PusTm` class is deprecated now.
- Implement `Display` and `Error` for `StdTimestampError` properly.
- Remove some redundant `Error` suffixes for enum error variants.
- `CommonPduConfig`: `new_with_defaults` replaced by `new_with_byte_fields`.
## Added
- `source_data` and `app_data` API provided for PUS TM and PUS TC reader classes. These simply
call `user_data` but are also in line with the PUS packet standard names for those fields.
- Added new marker trait `IsPusTelemetry` implemented by `PusTmCreator` and `PusTmReader`.
- Added new marker trait `IsPusTelecommand` implemented by `PusTcCreator` and `PusTcReader`.
- `metadata_param` getter method for the `MetadataPdu` object.
- `Default` impl for CFDP `ChecksumType`
- `Default` impl for CFDP `CommonPduConfig`
## Fixed
- All `MetadataGenericParam` fields are now public.
- New setter method `set_source_and_dest_id` for `CommonPduConfig`.
# [v0.6.0] 2023-07-06
## Added
- Added new `util` module which contains the following (new) helper modules:
- `UnsignedEnum` trait as an abstraction for unsigned byte fields with variable lengths. It is
not tied to the ECSS PFC value like the `EcssEnumeration` trait. The method to retrieve
the size of the unsigned enumeration in bytes is now called `size`.
- `GenericUnsignedByteField<TYPE>` and helper typedefs `UnsignedU8`, `UnsignedU16`, `UnsignedU32`
and `UnsignedU64` as helper types implementing `UnsignedEnum`
- `UnsignedByteField` as a type-erased helper.
- Initial CFDP support: Added PDU packet implementation.
- Added `SerializablePusPacket` as a generic abstraction for PUS packets which are
writable.
- Added new `PusTmZeroCopyWriter` class which allows to set fields on a raw TM packet,
which might be more efficient that modification and re-writing a packet with the
`PusTm` object.
## Changed
- The `EcssEnumeration` now requires the `UnsignedEnum` trait and only adds the `pfc` method to it.
- Renamed `byte_width` usages to `size` (part of new `UnsignedEnum` trait)
- Moved `ecss::CRC_CCITT_FALSE` CRC constant to the root module. This CRC type is not just used by
the PUS standard, but by the CCSDS Telecommand standard and the CFDP standard as well.
# [v0.5.4] 2023-02-12
## Added

View File

@ -1,8 +1,8 @@
[package]
name = "spacepackets"
version = "0.5.4"
version = "0.11.1"
edition = "2021"
rust-version = "1.60"
rust-version = "1.65"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "Generic implementations for various CCSDS and ECSS packet standards"
homepage = "https://egit.irs.uni-stuttgart.de/rust/spacepackets"
@ -13,12 +13,19 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
zerocopy = "0.6"
crc = "3"
delegate = ">=0.8, <0.10"
delegate = ">=0.8, <0.11"
[dependencies.zerocopy]
version = "0.7"
features = ["derive"]
[dependencies.thiserror]
version = "1"
optional = true
[dependencies.num_enum]
version = "0.5"
version = ">0.5, <=0.7"
default-features = false
[dependencies.serde]
@ -27,23 +34,37 @@ optional = true
default-features = false
features = ["derive"]
[dependencies.time]
version = "0.3"
default-features = false
optional = true
[dependencies.chrono]
version = "0.4"
default-features = false
optional = true
[dependencies.num-traits]
version = "0.2"
default-features = false
[dev-dependencies.postcard]
version = "1.0"
[dependencies.defmt]
version = "0.3"
optional = true
[dev-dependencies]
postcard = "1"
chrono = "0.4"
[features]
default = ["std"]
std = ["chrono/std", "chrono/clock", "alloc"]
std = ["chrono/std", "chrono/clock", "alloc", "thiserror"]
serde = ["dep:serde", "chrono/serde"]
alloc = ["postcard/alloc", "chrono/alloc"]
chrono = ["dep:chrono"]
timelib = ["dep:time"]
defmt = ["dep:defmt"]
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "doc_cfg"]
rustdoc-args = ["--cfg", "docs_rs", "--generate-link-to-definition"]

View File

@ -1,6 +1,7 @@
[![Crates.io](https://img.shields.io/crates/v/spacepackets)](https://crates.io/crates/spacepackets)
[![docs.rs](https://img.shields.io/docsrs/spacepackets)](https://docs.rs/spacepackets)
[![ci](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/us-irs/spacepackets-rs/actions/workflows/ci.yml)
[![coverage](https://shields.io/endpoint?url=https://absatsw.irs.uni-stuttgart.de/projects/spacepackets/coverage-rs/latest/coverage.json)](https://absatsw.irs.uni-stuttgart.de/projects/spacepackets/coverage-rs/latest/index.html)
ECSS and CCSDS Spacepackets
======
@ -13,6 +14,8 @@ Currently, this includes the following components:
- Space Packet implementation according to
[CCSDS Blue Book 133.0-B-2](https://public.ccsds.org/Pubs/133x0b2e1.pdf)
- CCSDS File Delivery Protocol (CFDP) packet implementations according to
[CCSDS Blue Book 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf)
- PUS Telecommand and PUS Telemetry implementation according to the
[ECSS-E-ST-70-41C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
- CUC (CCSDS Unsegmented Time Code) implementation according to
@ -26,10 +29,6 @@ Currently, this includes the following components:
`spacepackets` supports various runtime environments and is also suitable for `no_std` environments.
It also offers optional support for [`serde`](https://serde.rs/). This allows serializing and
deserializing them with an appropriate `serde` provider like
[`postcard`](https://github.com/jamesmunns/postcard).
## Default features
- [`std`](https://doc.rust-lang.org/std/): Enables functionality relying on the standard library.
@ -40,8 +39,35 @@ deserializing them with an appropriate `serde` provider like
## Optional Features
- [`serde`](https://serde.rs/): Adds `serde` support for most types by adding `Serialize` and `Deserialize` `derive`s
- [`chrono`](https://crates.io/crates/chrono): Add basic support for the `chrono` time library.
- [`timelib`](https://crates.io/crates/time): Add basic support for the `time` time library.
- [`defmt`](https://defmt.ferrous-systems.com/): Add support for the `defmt` by adding the
[`defmt::Format`](https://defmt.ferrous-systems.com/format) derive on many types.
# Examples
You can check the [documentation](https://docs.rs/spacepackets) of individual modules for various
usage examples.
# Coverage
Coverage was generated using [`grcov`](https://github.com/mozilla/grcov). If you have not done so
already, install the `llvm-tools-preview`:
```sh
rustup component add llvm-tools-preview
cargo install grcov --locked
```
After that, you can simply run `coverage.py` to test the project with coverage. You can optionally
supply the `--open` flag to open the coverage report in your webbrowser.
# Miri
You can run the [`miri`](https://github.com/rust-lang/miri) tool on this library to check for
undefined behaviour (UB). This library does not use use any `unsafe` code blocks, but `miri` could
still catch UB from used libraries.
```sh
cargo +nightly miri nextest run --all-features
```

View File

@ -6,10 +6,23 @@ RUN apt-get update
RUN apt-get --yes upgrade
# tzdata is a dependency, won't install otherwise
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get --yes install rsync curl
# set CROSS_CONTAINER_IN_CONTAINER to inform `cross` that it is executed from within a container
ENV CROSS_CONTAINER_IN_CONTAINER=true
RUN rustup install nightly && \
rustup target add thumbv7em-none-eabihf armv7-unknown-linux-gnueabihf && \
rustup component add rustfmt clippy
rustup component add rustfmt clippy llvm-tools-preview
# Get grcov
RUN curl -sSL https://github.com/mozilla/grcov/releases/download/v0.8.19/grcov-x86_64-unknown-linux-gnu.tar.bz2 | tar -xj --directory /usr/local/bin
# Get nextest
RUN curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin
# SSH stuff to allow deployment to doc server
RUN adduser --uid 114 jenkins
# Add documentation server to known hosts
RUN echo "|1|/LzCV4BuTmTb2wKnD146l9fTKgQ=|NJJtVjvWbtRt8OYqFgcYRnMQyVw= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNL8ssTonYtgiR/6RRlSIK9WU1ywOcJmxFTLcEblAwH7oifZzmYq3XRfwXrgfMpylEfMFYfCU8JRqtmi19xc21A=" >> /etc/ssh/ssh_known_hosts
RUN echo "|1|CcBvBc3EG03G+XM5rqRHs6gK/Gg=|oGeJQ+1I8NGI2THIkJsW92DpTzs= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNL8ssTonYtgiR/6RRlSIK9WU1ywOcJmxFTLcEblAwH7oifZzmYq3XRfwXrgfMpylEfMFYfCU8JRqtmi19xc21A=" >> /etc/ssh/ssh_known_hosts

117
automation/Jenkinsfile vendored
View File

@ -1,52 +1,79 @@
pipeline {
agent {
dockerfile {
dir 'automation'
reuseNode true
}
agent {
dockerfile {
dir 'automation'
reuseNode true
args '--network host'
}
}
stages {
stage('Clippy') {
steps {
sh 'cargo clippy'
}
}
stage('Docs') {
steps {
sh 'cargo +nightly doc --all-features'
}
}
stage('Rustfmt') {
steps {
sh 'cargo fmt --all --check'
}
}
stage('Test') {
steps {
sh 'cargo test --all-features'
}
}
stage('Check with all features') {
steps {
sh 'cargo check --all-features'
}
}
stage('Check with no features') {
steps {
sh 'cargo check --no-default-features'
}
}
stage('Check Cross Embedded Bare Metal') {
steps {
sh 'cargo check --target thumbv7em-none-eabihf --no-default-features'
}
}
stage('Check Cross Embedded Linux') {
steps {
sh 'cargo check --target armv7-unknown-linux-gnueabihf'
}
stages {
stage('Rust Toolchain Info') {
steps {
sh 'rustc --version'
}
}
stage('Clippy') {
steps {
sh 'cargo clippy'
}
}
stage('Docs') {
steps {
sh 'cargo +nightly doc --all-features'
}
}
stage('Rustfmt') {
steps {
sh 'cargo fmt --all --check'
}
}
stage('Test') {
steps {
sh 'cargo nextest r --all-features'
sh 'cargo test --doc'
}
}
stage('Check with all features') {
steps {
sh 'cargo check --all-features'
}
}
stage('Check with no features') {
steps {
sh 'cargo check --no-default-features'
}
}
stage('Check Cross Embedded Bare Metal') {
steps {
sh 'cargo check --target thumbv7em-none-eabihf --no-default-features'
}
}
stage('Check Cross Embedded Linux') {
steps {
sh 'cargo check --target armv7-unknown-linux-gnueabihf'
}
}
stage('Run test with Coverage') {
when {
anyOf {
branch 'main';
branch pattern: 'cov-deployment*'
}
}
steps {
withEnv(['RUSTFLAGS=-Cinstrument-coverage', 'LLVM_PROFILE_FILE=target/coverage/%p-%m.profraw']) {
echo "Executing tests with coverage"
sh 'cargo clean'
sh 'cargo test --all-features'
sh 'grcov . -s . --binary-path ./target/debug -t html --branch --ignore-not-existing -o ./target/debug/coverage/'
sshagent(credentials: ['documentation-buildfix']) {
// Deploy to Apache webserver
sh 'rsync --mkpath -r --delete ./target/debug/coverage/ buildfix@documentation.irs.uni-stuttgart.de:/projects/spacepackets/coverage-rs/latest/'
}
}
}
}
}
}

54
coverage.py Executable file
View File

@ -0,0 +1,54 @@
#!/usr/bin/env python3
import os
import logging
import argparse
import webbrowser
_LOGGER = logging.getLogger()
def generate_cov_report(open_report: bool, format: str):
logging.basicConfig(level=logging.INFO)
os.environ["RUSTFLAGS"] = "-Cinstrument-coverage"
os.environ["LLVM_PROFILE_FILE"] = "target/coverage/%p-%m.profraw"
_LOGGER.info("Executing tests with coverage")
os.system("cargo test --all-features")
out_path = "./target/debug/coverage"
if format == "lcov":
out_path = "./target/debug/lcov.info"
os.system(
f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing "
f"-o {out_path}"
)
if format == "lcov":
os.system(
"genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source "
"--legend ./target/debug/lcov.info"
)
if open_report:
coverage_report_path = os.path.abspath("./target/debug/coverage/index.html")
webbrowser.open_new_tab(coverage_report_path)
_LOGGER.info("Done")
def main():
parser = argparse.ArgumentParser(
description="Generate coverage report and optionally open it in a browser"
)
parser.add_argument(
"--open", action="store_true", help="Open the coverage report in a browser"
)
parser.add_argument(
"--format",
choices=["html", "lcov"],
default="html",
help="Choose report format (html or lcov)",
)
args = parser.parse_args()
generate_cov_report(args.open, args.format)
if __name__ == "__main__":
main()

View File

@ -4,15 +4,20 @@ Checklist for new releases
# Pre-Release
1. Make sure any new modules are documented sufficiently enough and check docs with
`cargo doc --all-features --open`.
`cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]' --open`.
2. Bump version specifier in `Cargo.toml`.
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
`unreleased` section.
4. Run `cargo test --all-features`.
4. Run `cargo test --all-features` or `cargo nextest r --all-features` together with
`cargo test --doc`.
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
targets.
# Release
1. `cargo publish`
# Post-Release
1. Create a new release on `EGit` based on the release branch.

347
src/cfdp/lv.rs Normal file
View File

@ -0,0 +1,347 @@
//! Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
use crate::cfdp::TlvLvError;
use crate::ByteConversionError;
use core::str::Utf8Error;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::string::String;
pub const MIN_LV_LEN: usize = 1;
/// Generic CFDP length-value (LV) abstraction as specified in CFDP 5.1.8.
///
/// Please note that this class is zero-copy and does not generate a copy of the value data for
/// both the regular [Self::new] constructor and the [Self::from_bytes] constructor.
///
/// # Lifetimes
/// * `data`: If the LV is generated from a raw bytestream, this will be the lifetime of
/// the raw bytestream. If the LV is generated from a raw slice or a similar data reference,
/// this will be the lifetime of that data reference.
#[derive(Debug, Copy, Clone, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct Lv<'data> {
data: &'data [u8],
// If the LV was generated from a raw bytestream, this will contain the start of the
// full LV.
pub(crate) raw_data: Option<&'data [u8]>,
}
impl PartialEq for Lv<'_> {
fn eq(&self, other: &Self) -> bool {
self.data == other.data
}
}
pub(crate) fn generic_len_check_data_serialization(
buf: &[u8],
data_len: usize,
min_overhead: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < data_len + min_overhead {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: data_len + min_overhead,
});
}
Ok(())
}
pub(crate) fn generic_len_check_deserialization(
buf: &[u8],
min_overhead: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < min_overhead {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: min_overhead,
});
}
Ok(())
}
impl<'data> Lv<'data> {
#[inline]
pub fn new(data: &[u8]) -> Result<Lv, TlvLvError> {
if data.len() > u8::MAX as usize {
return Err(TlvLvError::DataTooLarge(data.len()));
}
Ok(Lv {
data,
raw_data: None,
})
}
/// Creates a LV with an empty value field.
#[inline]
pub fn new_empty() -> Lv<'data> {
Lv {
data: &[],
raw_data: None,
}
}
/// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs
#[inline]
pub fn new_from_str(str_slice: &str) -> Result<Lv, TlvLvError> {
Self::new(str_slice.as_bytes())
}
/// Helper function to build a string LV. This is especially useful for the file or directory
/// path LVs
#[cfg(feature = "std")]
#[inline]
pub fn new_from_string(string: &'data String) -> Result<Lv<'data>, TlvLvError> {
Self::new(string.as_bytes())
}
/// Returns the length of the value part, not including the length byte.
#[inline]
pub fn len_value(&self) -> usize {
self.data.len()
}
/// Returns the full raw length, including the length byte.
#[inline]
pub fn len_full(&self) -> usize {
self.len_value() + 1
}
/// Checks whether the value field is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.data.len() == 0
}
#[inline]
pub fn value(&self) -> &[u8] {
self.data
}
/// If the LV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the LV can be retrieved with this method.
#[inline]
pub fn raw_data(&self) -> Option<&[u8]> {
self.raw_data
}
/// Convenience function to extract the value as a [str]. This is useful if the LV is
/// known to contain a [str], for example being a file name.
#[inline]
pub fn value_as_str(&self) -> Option<Result<&'data str, Utf8Error>> {
if self.is_empty() {
return None;
}
Some(core::str::from_utf8(self.data))
}
/// Writes the LV to a raw buffer. Please note that the first byte will contain the length
/// of the value, but the values may not exceed a length of [u8::MAX].
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
generic_len_check_data_serialization(buf, self.len_value(), MIN_LV_LEN)?;
Ok(self.write_to_be_bytes_no_len_check(buf))
}
/// Reads a LV from a raw buffer.
#[inline]
pub fn from_bytes(buf: &'data [u8]) -> Result<Lv<'data>, ByteConversionError> {
generic_len_check_deserialization(buf, MIN_LV_LEN)?;
Self::from_be_bytes_no_len_check(buf)
}
pub(crate) fn write_to_be_bytes_no_len_check(&self, buf: &mut [u8]) -> usize {
if self.is_empty() {
buf[0] = 0;
return MIN_LV_LEN;
}
// Length check in constructor ensures the length always has a valid value.
buf[0] = self.data.len() as u8;
buf[MIN_LV_LEN..self.data.len() + MIN_LV_LEN].copy_from_slice(self.data);
MIN_LV_LEN + self.data.len()
}
#[inline]
pub(crate) fn from_be_bytes_no_len_check(
buf: &'data [u8],
) -> Result<Lv<'data>, ByteConversionError> {
let value_len = buf[0] as usize;
generic_len_check_deserialization(buf, value_len + MIN_LV_LEN)?;
Ok(Self {
data: &buf[MIN_LV_LEN..MIN_LV_LEN + value_len],
raw_data: Some(buf),
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use alloc::string::ToString;
use crate::cfdp::TlvLvError;
use crate::ByteConversionError;
use std::string::String;
#[test]
fn test_basic() {
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv_res = Lv::new(&lv_data);
assert!(lv_res.is_ok());
let lv = lv_res.unwrap();
assert!(!lv.value().is_empty());
let val = lv.value();
assert_eq!(val[0], 1);
assert_eq!(val[1], 2);
assert_eq!(val[2], 3);
assert_eq!(val[3], 4);
assert!(!lv.is_empty());
assert_eq!(lv.len_full(), 5);
assert_eq!(lv.len_value(), 4);
}
#[test]
fn test_empty() {
let lv_empty = Lv::new_empty();
assert_eq!(lv_empty.len_value(), 0);
assert_eq!(lv_empty.len_full(), 1);
assert!(lv_empty.is_empty());
let mut buf: [u8; 4] = [0xff; 4];
let res = lv_empty.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 1);
assert_eq!(buf[0], 0);
}
#[test]
fn test_serialization() {
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv_res = Lv::new(&lv_data);
assert!(lv_res.is_ok());
let lv = lv_res.unwrap();
let mut buf: [u8; 16] = [0; 16];
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, 5);
assert_eq!(buf[0], 4);
assert_eq!(buf[1], 1);
assert_eq!(buf[2], 2);
assert_eq!(buf[3], 3);
assert_eq!(buf[4], 4);
}
#[test]
fn test_deserialization() {
let mut buf: [u8; 16] = [0; 16];
buf[0] = 4;
buf[1] = 1;
buf[2] = 2;
buf[3] = 3;
buf[4] = 4;
let lv = Lv::from_bytes(&buf);
assert!(lv.is_ok());
let lv = lv.unwrap();
assert!(!lv.is_empty());
assert_eq!(lv.len_value(), 4);
assert_eq!(lv.len_full(), 5);
assert!(lv.raw_data().is_some());
assert_eq!(lv.raw_data().unwrap(), buf);
let val = lv.value();
assert_eq!(val[0], 1);
assert_eq!(val[1], 2);
assert_eq!(val[2], 3);
assert_eq!(val[3], 4);
}
#[test]
fn test_deserialization_empty() {
let buf: [u8; 2] = [0; 2];
let lv_empty = Lv::from_bytes(&buf);
assert!(lv_empty.is_ok());
let lv_empty = lv_empty.unwrap();
assert!(lv_empty.is_empty());
}
#[test]
fn test_data_too_large() {
let data_big: [u8; u8::MAX as usize + 1] = [0; u8::MAX as usize + 1];
let lv = Lv::new(&data_big);
assert!(lv.is_err());
let error = lv.unwrap_err();
if let TlvLvError::DataTooLarge(size) = error {
assert_eq!(size, u8::MAX as usize + 1);
assert_eq!(
error.to_string(),
"data with size 256 larger than allowed 255 bytes"
);
} else {
panic!("invalid exception {:?}", error)
}
}
#[test]
fn test_serialization_buf_too_small() {
let mut buf: [u8; 3] = [0; 3];
let lv_data: [u8; 4] = [1, 2, 3, 4];
let lv = Lv::new(&lv_data).unwrap();
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = error {
assert_eq!(expected, 5);
assert_eq!(found, 3);
} else {
panic!("invalid error {}", error);
}
}
#[test]
fn test_deserialization_buf_too_small() {
let mut buf: [u8; 3] = [0; 3];
buf[0] = 4;
let res = Lv::from_bytes(&buf);
assert!(res.is_err());
let error = res.unwrap_err();
if let ByteConversionError::FromSliceTooSmall { found, expected } = error {
assert_eq!(found, 3);
assert_eq!(expected, 5);
} else {
panic!("invalid error {}", error);
}
}
fn verify_test_str_lv(lv: Lv) {
let mut buf: [u8; 16] = [0; 16];
let res = lv.write_to_be_bytes(&mut buf);
assert!(res.is_ok());
let res = res.unwrap();
assert_eq!(res, 8 + 1);
assert_eq!(buf[0], 8);
assert_eq!(buf[1], b't');
assert_eq!(buf[2], b'e');
assert_eq!(buf[3], b's');
assert_eq!(buf[4], b't');
assert_eq!(buf[5], b'.');
assert_eq!(buf[6], b'b');
assert_eq!(buf[7], b'i');
assert_eq!(buf[8], b'n');
}
#[test]
fn test_str_helper() {
let test_str = "test.bin";
let str_lv = Lv::new_from_str(test_str);
assert!(str_lv.is_ok());
verify_test_str_lv(str_lv.unwrap());
}
#[test]
fn test_string_helper() {
let string = String::from("test.bin");
let str_lv = Lv::new_from_string(&string);
assert!(str_lv.is_ok());
verify_test_str_lv(str_lv.unwrap());
}
}

304
src/cfdp/mod.rs Normal file
View File

@ -0,0 +1,304 @@
//! Low-level CCSDS File Delivery Protocol (CFDP) support according to [CCSDS 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf).
use crate::ByteConversionError;
use core::fmt::{Display, Formatter};
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub mod lv;
pub mod pdu;
pub mod tlv;
/// This is the name of the standard this module is based on.
pub const CFDP_VERSION_2_NAME: &str = "CCSDS 727.0-B-5";
/// Currently, only this version is supported.
pub const CFDP_VERSION_2: u8 = 0b001;
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum PduType {
FileDirective = 0,
FileData = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Direction {
TowardsReceiver = 0,
TowardsSender = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum TransmissionMode {
Acknowledged = 0,
Unacknowledged = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum CrcFlag {
NoCrc = 0,
WithCrc = 1,
}
impl From<bool> for CrcFlag {
fn from(value: bool) -> Self {
if value {
return CrcFlag::WithCrc;
}
CrcFlag::NoCrc
}
}
impl From<CrcFlag> for bool {
fn from(value: CrcFlag) -> Self {
if value == CrcFlag::WithCrc {
return true;
}
false
}
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum SegmentMetadataFlag {
NotPresent = 0,
Present = 1,
}
/// Always 0 and ignored for File Directive PDUs (CCSDS 727.0-B-5 P.75)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum SegmentationControl {
NoRecordBoundaryPreservation = 0,
WithRecordBoundaryPreservation = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum FaultHandlerCode {
NoticeOfCancellation = 0b0001,
NoticeOfSuspension = 0b0010,
IgnoreError = 0b0011,
AbandonTransaction = 0b0100,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum ConditionCode {
/// This is not an error condition for which a faulty handler override can be specified
NoError = 0b0000,
PositiveAckLimitReached = 0b0001,
KeepAliveLimitReached = 0b0010,
InvalidTransmissionMode = 0b0011,
FilestoreRejection = 0b0100,
FileChecksumFailure = 0b0101,
FileSizeError = 0b0110,
NakLimitReached = 0b0111,
InactivityDetected = 0b1000,
CheckLimitReached = 0b1001,
UnsupportedChecksumType = 0b1011,
/// Not an actual fault condition for which fault handler overrides can be specified
SuspendRequestReceived = 0b1110,
/// Not an actual fault condition for which fault handler overrides can be specified
CancelRequestReceived = 0b1111,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum LargeFileFlag {
/// 32 bit maximum file size and FSS size
Normal = 0,
/// 64 bit maximum file size and FSS size
Large = 1,
}
/// Transaction status for the ACK PDU field according to chapter 5.2.4 of the CFDP standard.
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum TransactionStatus {
/// Transaction is not currently active and the CFDP implementation does not retain a
/// transaction history.
Undefined = 0b00,
Active = 0b01,
/// Transaction was active in the past and was terminated.
Terminated = 0b10,
/// The CFDP implementation does retain a tranaction history, and the transaction is not and
/// never was active at this entity.
Unrecognized = 0b11,
}
/// Checksum types according to the
/// [SANA Checksum Types registry](https://sanaregistry.org/r/checksum_identifiers/)
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum ChecksumType {
/// Modular legacy checksum
Modular = 0,
Crc32Proximity1 = 1,
Crc32C = 2,
/// Polynomial: 0x4C11DB7. Preferred checksum for now.
Crc32 = 3,
NullChecksum = 15,
}
impl Default for ChecksumType {
fn default() -> Self {
Self::NullChecksum
}
}
pub const NULL_CHECKSUM_U32: [u8; 4] = [0; 4];
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum TlvLvError {
DataTooLarge(usize),
ByteConversion(ByteConversionError),
/// First value: Found value. Second value: Expected value if there is one.
InvalidTlvTypeField {
found: u8,
expected: Option<u8>,
},
/// Logically invalid value length detected. The value length may not exceed 255 bytes.
/// Depending on the concrete TLV type, the value length may also be logically invalid.
InvalidValueLength(usize),
/// Only applies to filestore requests and responses. Second name was missing where one is
/// expected.
SecondNameMissing,
/// Invalid action code for filestore requests or responses.
InvalidFilestoreActionCode(u8),
}
impl From<ByteConversionError> for TlvLvError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversion(value)
}
}
impl Display for TlvLvError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TlvLvError::DataTooLarge(data_len) => {
write!(
f,
"data with size {} larger than allowed {} bytes",
data_len,
u8::MAX
)
}
TlvLvError::ByteConversion(e) => {
write!(f, "tlv or lv byte conversion: {}", e)
}
TlvLvError::InvalidTlvTypeField { found, expected } => {
write!(
f,
"invalid TLV type field, found {found}, expected {expected:?}"
)
}
TlvLvError::InvalidValueLength(len) => {
write!(f, "invalid value length {len}")
}
TlvLvError::SecondNameMissing => {
write!(f, "second name missing for filestore request or response")
}
TlvLvError::InvalidFilestoreActionCode(raw) => {
write!(f, "invalid filestore action code with raw value {raw}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for TlvLvError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TlvLvError::ByteConversion(e) => Some(e),
_ => None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
#[test]
fn test_crc_from_bool() {
assert_eq!(CrcFlag::from(false), CrcFlag::NoCrc);
}
#[test]
fn test_crc_flag_to_bool() {
let is_true: bool = CrcFlag::WithCrc.into();
assert!(is_true);
let is_false: bool = CrcFlag::NoCrc.into();
assert!(!is_false);
}
#[test]
fn test_default_checksum_type() {
let checksum = ChecksumType::default();
assert_eq!(checksum, ChecksumType::NullChecksum);
}
#[test]
fn test_fault_handler_code_from_u8() {
let fault_handler_code_raw = FaultHandlerCode::NoticeOfSuspension as u8;
let fault_handler_code = FaultHandlerCode::try_from(fault_handler_code_raw).unwrap();
assert_eq!(fault_handler_code, FaultHandlerCode::NoticeOfSuspension);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_impl_pdu_type() {
generic_serde_test(PduType::FileData);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_impl_direction() {
generic_serde_test(Direction::TowardsReceiver);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_impl_transmission_mode() {
generic_serde_test(TransmissionMode::Unacknowledged);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_fault_handler_code() {
generic_serde_test(FaultHandlerCode::NoticeOfCancellation);
}
}

335
src/cfdp/pdu/ack.rs Normal file
View File

@ -0,0 +1,335 @@
use crate::{
cfdp::{ConditionCode, CrcFlag, Direction, TransactionStatus},
ByteConversionError,
};
use super::{
add_pdu_crc, generic_length_checks_pdu_deserialization, CfdpPdu, FileDirectiveType, PduError,
PduHeader, WritablePduPacket,
};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// ACK PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.4.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct AckPdu {
pdu_header: PduHeader,
directive_code_of_acked_pdu: FileDirectiveType,
condition_code: ConditionCode,
transaction_status: TransactionStatus,
}
impl AckPdu {
pub fn new(
mut pdu_header: PduHeader,
directive_code_of_acked_pdu: FileDirectiveType,
condition_code: ConditionCode,
transaction_status: TransactionStatus,
) -> Result<Self, PduError> {
if directive_code_of_acked_pdu == FileDirectiveType::EofPdu {
pdu_header.pdu_conf.direction = Direction::TowardsSender;
} else if directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu {
pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
} else {
return Err(PduError::InvalidDirectiveType {
found: directive_code_of_acked_pdu as u8,
expected: None,
});
}
// Force correct direction flag.
let mut ack_pdu = Self {
pdu_header,
directive_code_of_acked_pdu,
condition_code,
transaction_status,
};
ack_pdu.pdu_header.pdu_datafield_len = ack_pdu.calc_pdu_datafield_len() as u16;
Ok(ack_pdu)
}
pub fn new_for_eof_pdu(
pdu_header: PduHeader,
condition_code: ConditionCode,
transaction_status: TransactionStatus,
) -> Self {
// Unwrap okay here, [new] can only fail on invalid directive codes.
Self::new(
pdu_header,
FileDirectiveType::EofPdu,
condition_code,
transaction_status,
)
.unwrap()
}
pub fn new_for_finished_pdu(
pdu_header: PduHeader,
condition_code: ConditionCode,
transaction_status: TransactionStatus,
) -> Self {
// Unwrap okay here, [new] can only fail on invalid directive codes.
Self::new(
pdu_header,
FileDirectiveType::FinishedPdu,
condition_code,
transaction_status,
)
.unwrap()
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn directive_code_of_acked_pdu(&self) -> FileDirectiveType {
self.directive_code_of_acked_pdu
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn transaction_status(&self) -> TransactionStatus {
self.transaction_status
}
fn calc_pdu_datafield_len(&self) -> usize {
if self.crc_flag() == CrcFlag::WithCrc {
return 5;
}
3
}
pub fn from_bytes(buf: &[u8]) -> Result<AckPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
generic_length_checks_pdu_deserialization(buf, current_idx + 3, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::AckPdu),
}
})?;
if directive_type != FileDirectiveType::AckPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::AckPdu,
});
}
current_idx += 1;
let acked_directive_type =
FileDirectiveType::try_from(buf[current_idx] >> 4).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: None,
}
})?;
if acked_directive_type != FileDirectiveType::EofPdu
&& acked_directive_type != FileDirectiveType::FinishedPdu
{
return Err(PduError::InvalidDirectiveType {
found: acked_directive_type as u8,
expected: None,
});
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
let transaction_status = TransactionStatus::try_from(buf[current_idx] & 0b11).unwrap();
Self::new(
pdu_header,
acked_directive_type,
condition_code,
transaction_status,
)
}
}
impl CfdpPdu for AckPdu {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::AckPdu)
}
}
impl WritablePduPacket for AckPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::AckPdu as u8;
current_idx += 1;
buf[current_idx] = (self.directive_code_of_acked_pdu as u8) << 4;
if self.directive_code_of_acked_pdu == FileDirectiveType::FinishedPdu {
// This is the directive subtype code. It needs to be set to 0b0001 if the ACK PDU
// acknowledges a Finished PDU, and to 0b0000 otherwise.
buf[current_idx] |= 0b0001;
}
current_idx += 1;
buf[current_idx] = ((self.condition_code as u8) << 4) | (self.transaction_status as u8);
current_idx += 1;
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
#[cfg(test)]
mod tests {
use crate::cfdp::{
pdu::tests::{common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID},
LargeFileFlag, PduType, TransmissionMode,
};
use super::*;
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
fn verify_state(ack_pdu: &AckPdu, expected_crc_flag: CrcFlag, expected_dir: Direction) {
assert_eq!(ack_pdu.condition_code(), ConditionCode::NoError);
assert_eq!(ack_pdu.transaction_status(), TransactionStatus::Active);
assert_eq!(ack_pdu.crc_flag(), expected_crc_flag);
assert_eq!(ack_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(ack_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
ack_pdu.file_directive_type(),
Some(FileDirectiveType::AckPdu)
);
assert_eq!(ack_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(ack_pdu.direction(), expected_dir);
assert_eq!(ack_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(ack_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(ack_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
#[test]
fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new(
pdu_header,
FileDirectiveType::FinishedPdu,
ConditionCode::NoError,
TransactionStatus::Active,
)
.expect("creating ACK PDU failed");
assert_eq!(
ack_pdu.directive_code_of_acked_pdu(),
FileDirectiveType::FinishedPdu
);
verify_state(&ack_pdu, CrcFlag::NoCrc, Direction::TowardsReceiver);
}
fn generic_serialization_test(
condition_code: ConditionCode,
transaction_status: TransactionStatus,
) {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(pdu_header, condition_code, transaction_status);
let mut buf: [u8; 64] = [0; 64];
let res = ack_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, ack_pdu.len_written());
verify_raw_header(ack_pdu.pdu_header(), &buf);
assert_eq!(buf[7], FileDirectiveType::AckPdu as u8);
assert_eq!((buf[8] >> 4) & 0b1111, FileDirectiveType::FinishedPdu as u8);
assert_eq!(buf[8] & 0b1111, 0b0001);
assert_eq!(buf[9] >> 4 & 0b1111, condition_code as u8);
assert_eq!(buf[9] & 0b11, transaction_status as u8);
assert_eq!(written, 10);
}
#[test]
fn test_serialization_no_error() {
generic_serialization_test(ConditionCode::NoError, TransactionStatus::Active);
}
#[test]
fn test_serialization_fs_error() {
generic_serialization_test(ConditionCode::FileSizeError, TransactionStatus::Terminated);
}
#[test]
fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header,
ConditionCode::NoError,
TransactionStatus::Active,
);
let ack_vec = ack_pdu.to_vec().unwrap();
let ack_deserialized =
AckPdu::from_bytes(&ack_vec).expect("ACK PDU deserialization failed");
assert_eq!(ack_deserialized, ack_pdu);
}
#[test]
fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_finished_pdu(
pdu_header,
ConditionCode::NoError,
TransactionStatus::Active,
);
let ack_vec = ack_pdu.to_vec().unwrap();
assert_eq!(ack_vec.len(), ack_pdu.len_written());
assert_eq!(ack_vec.len(), 12);
let ack_deserialized =
AckPdu::from_bytes(&ack_vec).expect("ACK PDU deserialization failed");
assert_eq!(ack_deserialized, ack_pdu);
}
#[test]
fn test_for_eof_pdu() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header,
ConditionCode::NoError,
TransactionStatus::Active,
);
assert_eq!(
ack_pdu.directive_code_of_acked_pdu(),
FileDirectiveType::EofPdu
);
verify_state(&ack_pdu, CrcFlag::WithCrc, Direction::TowardsSender);
}
#[test]
#[cfg(feature = "serde")]
fn test_ack_pdu_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let ack_pdu = AckPdu::new_for_eof_pdu(
pdu_header,
ConditionCode::NoError,
TransactionStatus::Active,
);
generic_serde_test(ack_pdu);
}
}

298
src/cfdp/pdu/eof.rs Normal file
View File

@ -0,0 +1,298 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::{EntityIdTlv, WritableTlv};
use crate::cfdp::{ConditionCode, CrcFlag, Direction, LargeFileFlag};
use crate::ByteConversionError;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::{CfdpPdu, WritablePduPacket};
/// Finished PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.2.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EofPdu {
pdu_header: PduHeader,
condition_code: ConditionCode,
file_checksum: u32,
file_size: u64,
fault_location: Option<EntityIdTlv>,
}
impl EofPdu {
pub fn new_no_error(mut pdu_header: PduHeader, file_checksum: u32, file_size: u64) -> Self {
// Force correct direction flag.
pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
let mut eof_pdu = Self {
pdu_header,
condition_code: ConditionCode::NoError,
file_checksum,
file_size,
fault_location: None,
};
eof_pdu.pdu_header.pdu_datafield_len = eof_pdu.calc_pdu_datafield_len() as u16;
eof_pdu
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn file_checksum(&self) -> u32 {
self.file_checksum
}
pub fn file_size(&self) -> u64 {
self.file_size
}
fn calc_pdu_datafield_len(&self) -> usize {
// One directive type octet, 4 bits condition code, 4 spare bits.
let mut len = 2 + core::mem::size_of::<u32>() + 4;
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4;
}
if let Some(fault_location) = self.fault_location {
len += fault_location.len_full();
}
if self.crc_flag() == CrcFlag::WithCrc {
len += 2;
}
len
}
pub fn from_bytes(buf: &[u8]) -> Result<EofPdu, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let is_large_file = pdu_header.pdu_conf.file_flag == LargeFileFlag::Large;
let mut min_expected_len = 2 + 4 + 4;
if is_large_file {
min_expected_len += 4;
}
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::EofPdu),
}
})?;
if directive_type != FileDirectiveType::EofPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::EofPdu,
});
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
current_idx += 1;
let file_checksum =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
current_idx += 4;
let (fss_field_len, file_size) =
read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss_field_len;
let mut fault_location = None;
if condition_code != ConditionCode::NoError && current_idx < full_len_without_crc {
fault_location = Some(EntityIdTlv::from_bytes(&buf[current_idx..])?);
}
Ok(Self {
pdu_header,
condition_code,
file_checksum,
file_size,
fault_location,
})
}
}
impl CfdpPdu for EofPdu {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::EofPdu)
}
}
impl WritablePduPacket for EofPdu {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::EofPdu as u8;
current_idx += 1;
buf[current_idx] = (self.condition_code as u8) << 4;
current_idx += 1;
buf[current_idx..current_idx + 4].copy_from_slice(&self.file_checksum.to_be_bytes());
current_idx += 4;
current_idx += write_fss_field(
self.pdu_header.pdu_conf.file_flag,
self.file_size,
&mut buf[current_idx..],
)?;
if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_bytes(buf)?;
}
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cfdp::pdu::tests::{
common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID,
};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::{ConditionCode, CrcFlag, LargeFileFlag, PduType, TransmissionMode};
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
fn verify_state(&eof_pdu: &EofPdu, file_flag: LargeFileFlag) {
assert_eq!(eof_pdu.file_checksum(), 0x01020304);
assert_eq!(eof_pdu.file_size(), 12);
assert_eq!(eof_pdu.condition_code(), ConditionCode::NoError);
assert_eq!(eof_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(eof_pdu.file_flag(), file_flag);
assert_eq!(eof_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
eof_pdu.file_directive_type(),
Some(FileDirectiveType::EofPdu)
);
assert_eq!(eof_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(eof_pdu.direction(), Direction::TowardsReceiver);
assert_eq!(eof_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(eof_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(eof_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
#[test]
fn test_basic() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 4 + 4);
verify_state(&eof_pdu, LargeFileFlag::Normal);
}
#[test]
fn test_serialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let res = eof_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(written, eof_pdu.len_written());
verify_raw_header(eof_pdu.pdu_header(), &buf);
let mut current_idx = eof_pdu.pdu_header().header_len();
buf[current_idx] = FileDirectiveType::EofPdu as u8;
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
ConditionCode::NoError as u8
);
current_idx += 1;
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
0x01020304
);
current_idx += 4;
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
12
);
current_idx += 4;
assert_eq!(current_idx, written);
}
#[test]
fn test_deserialization() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
eof_pdu.write_to_bytes(&mut buf).unwrap();
let eof_read_back = EofPdu::from_bytes(&buf);
if let Err(e) = eof_read_back {
panic!("deserialization failed with: {e}")
}
let eof_read_back = eof_read_back.unwrap();
assert_eq!(eof_read_back, eof_pdu);
}
#[test]
fn test_write_to_vec() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_vec = eof_pdu.to_vec().unwrap();
assert_eq!(buf[0..written], pdu_vec);
}
#[test]
fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
let mut buf: [u8; 64] = [0; 64];
let written = eof_pdu.write_to_bytes(&mut buf).unwrap();
assert_eq!(written, eof_pdu.len_written());
let eof_from_raw = EofPdu::from_bytes(&buf).expect("creating EOF PDU failed");
assert_eq!(eof_from_raw, eof_pdu);
buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16;
let error = EofPdu::from_bytes(&buf).unwrap_err();
if let PduError::ChecksumError(e) = error {
assert_eq!(e, crc);
} else {
panic!("expected crc error");
}
}
#[test]
fn test_with_large_file_flag() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Large);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
verify_state(&eof_pdu, LargeFileFlag::Large);
assert_eq!(eof_pdu.len_written(), pdu_header.header_len() + 2 + 8 + 4);
}
#[test]
#[cfg(feature = "serde")]
fn test_eof_serde() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let eof_pdu = EofPdu::new_no_error(pdu_header, 0x01020304, 12);
generic_serde_test(eof_pdu);
}
}

485
src/cfdp/pdu/file_data.rs Normal file
View File

@ -0,0 +1,485 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
PduError, PduHeader,
};
use crate::cfdp::{CrcFlag, LargeFileFlag, PduType, SegmentMetadataFlag};
use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::{CfdpPdu, FileDirectiveType, WritablePduPacket};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum RecordContinuationState {
NoStartNoEnd = 0b00,
StartWithoutEnd = 0b01,
EndWithoutStart = 0b10,
StartAndEnd = 0b11,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct SegmentMetadata<'seg_meta> {
record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>,
}
impl<'seg_meta> SegmentMetadata<'seg_meta> {
pub fn new(
record_continuation_state: RecordContinuationState,
metadata: Option<&'seg_meta [u8]>,
) -> Option<Self> {
if let Some(metadata) = metadata {
if metadata.len() > 2_usize.pow(6) - 1 {
return None;
}
}
Some(Self {
record_continuation_state,
metadata,
})
}
pub fn record_continuation_state(&self) -> RecordContinuationState {
self.record_continuation_state
}
pub fn metadata(&self) -> Option<&'seg_meta [u8]> {
self.metadata
}
pub fn written_len(&self) -> usize {
// Map empty metadata to 0 and slice to its length.
1 + self.metadata.map_or(0, |meta| meta.len())
}
pub(crate) fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.written_len() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.written_len(),
});
}
buf[0] = ((self.record_continuation_state as u8) << 6)
| self.metadata.map_or(0, |meta| meta.len() as u8);
if let Some(metadata) = self.metadata {
buf[1..1 + metadata.len()].copy_from_slice(metadata)
}
Ok(self.written_len())
}
pub(crate) fn from_bytes(buf: &'seg_meta [u8]) -> Result<Self, ByteConversionError> {
if buf.is_empty() {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: 2,
});
}
let mut metadata = None;
let seg_metadata_len = (buf[0] & 0b111111) as usize;
if seg_metadata_len > 0 {
metadata = Some(&buf[1..1 + seg_metadata_len]);
}
Ok(Self {
// Can't fail, only 2 bits
record_continuation_state: RecordContinuationState::try_from((buf[0] >> 6) & 0b11)
.unwrap(),
metadata,
})
}
}
/// File Data PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct FileDataPdu<'seg_meta, 'file_data> {
pdu_header: PduHeader,
#[cfg_attr(feature = "serde", serde(borrow))]
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
}
impl<'seg_meta, 'file_data> FileDataPdu<'seg_meta, 'file_data> {
pub fn new_with_seg_metadata(
pdu_header: PduHeader,
segment_metadata: SegmentMetadata<'seg_meta>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, Some(segment_metadata), offset, file_data)
}
pub fn new_no_seg_metadata(
pdu_header: PduHeader,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
Self::new_generic(pdu_header, None, offset, file_data)
}
pub fn new_generic(
mut pdu_header: PduHeader,
segment_metadata: Option<SegmentMetadata<'seg_meta>>,
offset: u64,
file_data: &'file_data [u8],
) -> Self {
pdu_header.pdu_type = PduType::FileData;
if segment_metadata.is_some() {
pdu_header.seg_metadata_flag = SegmentMetadataFlag::Present;
}
let mut pdu = Self {
pdu_header,
segment_metadata,
offset,
file_data,
};
pdu.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut len = core::mem::size_of::<u32>();
if self.pdu_header.pdu_conf.file_flag == LargeFileFlag::Large {
len += 4;
}
if self.segment_metadata.is_some() {
len += self.segment_metadata.as_ref().unwrap().written_len()
}
len += self.file_data.len();
if self.crc_flag() == CrcFlag::WithCrc {
len += 2;
}
len
}
pub fn offset(&self) -> u64 {
self.offset
}
pub fn file_data(&self) -> &'file_data [u8] {
self.file_data
}
pub fn segment_metadata(&self) -> Option<&SegmentMetadata> {
self.segment_metadata.as_ref()
}
pub fn from_bytes<'buf: 'seg_meta + 'file_data>(buf: &'buf [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + core::mem::size_of::<u32>();
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let mut segment_metadata = None;
if pdu_header.seg_metadata_flag == SegmentMetadataFlag::Present {
segment_metadata = Some(SegmentMetadata::from_bytes(&buf[current_idx..])?);
current_idx += segment_metadata.as_ref().unwrap().written_len();
}
let (fss, offset) = read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx..]);
current_idx += fss;
if current_idx > full_len_without_crc {
return Err(ByteConversionError::FromSliceTooSmall {
found: current_idx,
expected: full_len_without_crc,
}
.into());
}
Ok(Self {
pdu_header,
segment_metadata,
offset,
file_data: &buf[current_idx..full_len_without_crc],
})
}
}
impl CfdpPdu for FileDataPdu<'_, '_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
None
}
}
impl WritablePduPacket for FileDataPdu<'_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
if buf.len() < self.len_written() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.len_written(),
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
if self.segment_metadata.is_some() {
current_idx += self
.segment_metadata
.as_ref()
.unwrap()
.write_to_bytes(&mut buf[current_idx..])?;
}
current_idx += write_fss_field(
self.pdu_header.common_pdu_conf().file_flag,
self.offset,
&mut buf[current_idx..],
)?;
buf[current_idx..current_idx + self.file_data.len()].copy_from_slice(self.file_data);
current_idx += self.file_data.len();
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cfdp::pdu::tests::{TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID};
use crate::cfdp::pdu::{CommonPduConfig, PduHeader};
use crate::cfdp::{Direction, SegmentMetadataFlag, SegmentationControl, TransmissionMode};
#[cfg(feature = "serde")]
use postcard::{from_bytes, to_allocvec};
#[test]
fn test_basic() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
assert_eq!(fd_pdu.file_data(), file_data);
assert_eq!(fd_pdu.offset(), 10);
assert!(fd_pdu.segment_metadata().is_none());
assert_eq!(
fd_pdu.len_written(),
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4
);
assert_eq!(fd_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(fd_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(fd_pdu.pdu_type(), PduType::FileData);
assert_eq!(fd_pdu.file_directive_type(), None);
assert_eq!(fd_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(fd_pdu.direction(), Direction::TowardsReceiver);
assert_eq!(fd_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(fd_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(fd_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
#[test]
fn test_serialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
let res = fd_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
assert_eq!(
written,
fd_pdu.pdu_header.header_len() + core::mem::size_of::<u32>() + 4
);
let mut current_idx = fd_pdu.pdu_header.header_len();
let file_size = u32::from_be_bytes(
buf[fd_pdu.pdu_header.header_len()..fd_pdu.pdu_header.header_len() + 4]
.try_into()
.unwrap(),
);
current_idx += 4;
assert_eq!(file_size, 10);
assert_eq!(buf[current_idx], 1);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 4);
}
#[test]
fn test_write_to_vec() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 64] = [0; 64];
let written = fd_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_vec = fd_pdu.to_vec().unwrap();
assert_eq!(buf[0..written], pdu_vec);
}
#[test]
fn test_deserialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
fd_pdu.write_to_bytes(&mut buf).unwrap();
let fd_pdu_read_back = FileDataPdu::from_bytes(&buf);
assert!(fd_pdu_read_back.is_ok());
let fd_pdu_read_back = fd_pdu_read_back.unwrap();
assert_eq!(fd_pdu_read_back, fd_pdu);
}
#[test]
fn test_with_crc() {
let mut common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
common_conf.crc_flag = true.into();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let mut buf: [u8; 64] = [0; 64];
let written = fd_pdu.write_to_bytes(&mut buf).unwrap();
assert_eq!(written, fd_pdu.len_written());
let finished_pdu_from_raw = FileDataPdu::from_bytes(&buf).unwrap();
assert_eq!(finished_pdu_from_raw, fd_pdu);
buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) | buf[written - 1] as u16;
let error = FileDataPdu::from_bytes(&buf).unwrap_err();
if let PduError::ChecksumError(e) = error {
assert_eq!(e, crc);
} else {
panic!("expected crc error");
}
}
#[test]
fn test_with_seg_metadata_serialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
assert!(fd_pdu.segment_metadata().is_some());
assert_eq!(*fd_pdu.segment_metadata().unwrap(), segment_meta);
assert_eq!(
fd_pdu.len_written(),
fd_pdu.pdu_header.header_len()
+ 1
+ seg_metadata.len()
+ core::mem::size_of::<u32>()
+ 4
);
let mut buf: [u8; 32] = [0; 32];
fd_pdu
.write_to_bytes(&mut buf)
.expect("writing FD PDU failed");
let mut current_idx = fd_pdu.pdu_header.header_len();
assert_eq!(
RecordContinuationState::try_from((buf[current_idx] >> 6) & 0b11).unwrap(),
RecordContinuationState::StartAndEnd
);
assert_eq!((buf[current_idx] & 0b111111) as usize, seg_metadata.len());
current_idx += 1;
assert_eq!(buf[current_idx], 4);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 1);
current_idx += 1;
// Still verify that the rest is written correctly.
assert_eq!(
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()),
10
);
current_idx += 4;
assert_eq!(buf[current_idx], 1);
current_idx += 1;
assert_eq!(buf[current_idx], 2);
current_idx += 1;
assert_eq!(buf[current_idx], 3);
current_idx += 1;
assert_eq!(buf[current_idx], 4);
current_idx += 1;
assert_eq!(current_idx, fd_pdu.len_written());
}
#[test]
fn test_with_seg_metadata_deserialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
let mut buf: [u8; 32] = [0; 32];
fd_pdu
.write_to_bytes(&mut buf)
.expect("writing FD PDU failed");
let fd_pdu_read_back = FileDataPdu::from_bytes(&buf);
assert!(fd_pdu_read_back.is_ok());
let fd_pdu_read_back = fd_pdu_read_back.unwrap();
assert_eq!(fd_pdu_read_back, fd_pdu);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_serialization() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data_default(common_conf, 0);
let file_data: [u8; 4] = [1, 2, 3, 4];
let fd_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 10, &file_data);
let output = to_allocvec(&fd_pdu).unwrap();
let output_converted_back: FileDataPdu = from_bytes(&output).unwrap();
assert_eq!(output_converted_back, fd_pdu);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_serialization_with_seg_metadata() {
let common_conf =
CommonPduConfig::new_with_byte_fields(TEST_SRC_ID, TEST_DEST_ID, TEST_SEQ_NUM).unwrap();
let pdu_header = PduHeader::new_for_file_data(
common_conf,
0,
SegmentMetadataFlag::Present,
SegmentationControl::WithRecordBoundaryPreservation,
);
let file_data: [u8; 4] = [1, 2, 3, 4];
let seg_metadata: [u8; 4] = [4, 3, 2, 1];
let segment_meta =
SegmentMetadata::new(RecordContinuationState::StartAndEnd, Some(&seg_metadata))
.unwrap();
let fd_pdu = FileDataPdu::new_with_seg_metadata(pdu_header, segment_meta, 10, &file_data);
let output = to_allocvec(&fd_pdu).unwrap();
let output_converted_back: FileDataPdu = from_bytes(&output).unwrap();
assert_eq!(output_converted_back, fd_pdu);
}
}

682
src/cfdp/pdu/finished.rs Normal file
View File

@ -0,0 +1,682 @@
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::{
EntityIdTlv, FilestoreResponseTlv, GenericTlv, Tlv, TlvType, TlvTypeField, WritableTlv,
};
use crate::cfdp::{ConditionCode, CrcFlag, Direction, PduType, TlvLvError};
use crate::ByteConversionError;
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::{CfdpPdu, WritablePduPacket};
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum DeliveryCode {
Complete = 0,
Incomplete = 1,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, TryFromPrimitive, IntoPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum FileStatus {
DiscardDeliberately = 0b00,
DiscardedFsRejection = 0b01,
Retained = 0b10,
Unreported = 0b11,
}
/// Finished PDU abstraction.
///
/// For more information, refer to CFDP chapter 5.2.3.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct FinishedPduCreator<'fs_responses> {
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses:
&'fs_responses [FilestoreResponseTlv<'fs_responses, 'fs_responses, 'fs_responses>],
fault_location: Option<EntityIdTlv>,
}
impl<'fs_responses> FinishedPduCreator<'fs_responses> {
/// Default finished PDU: No error (no fault location field) and no filestore responses.
pub fn new_default(
pdu_header: PduHeader,
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> Self {
Self::new_generic(
pdu_header,
ConditionCode::NoError,
delivery_code,
file_status,
&[],
None,
)
}
pub fn new_with_error(
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fault_location: EntityIdTlv,
) -> Self {
Self::new_generic(
pdu_header,
condition_code,
delivery_code,
file_status,
&[],
Some(fault_location),
)
}
pub fn new_generic(
mut pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses: &'fs_responses [FilestoreResponseTlv<
'fs_responses,
'fs_responses,
'fs_responses,
>],
fault_location: Option<EntityIdTlv>,
) -> Self {
pdu_header.pdu_type = PduType::FileDirective;
// Enforce correct direction bit.
pdu_header.pdu_conf.direction = Direction::TowardsSender;
let mut finished_pdu = Self {
pdu_header,
condition_code,
delivery_code,
file_status,
fs_responses,
fault_location,
};
finished_pdu.pdu_header.pdu_datafield_len = finished_pdu.calc_pdu_datafield_len() as u16;
finished_pdu
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code
}
pub fn file_status(&self) -> FileStatus {
self.file_status
}
// If there are no filestore responses, an empty slice will be returned.
pub fn filestore_responses(&self) -> &[FilestoreResponseTlv<'_, '_, '_>] {
self.fs_responses
}
pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut datafield_len = 2;
for fs_response in self.fs_responses {
datafield_len += fs_response.len_full();
}
if let Some(fault_location) = self.fault_location {
datafield_len += fault_location.len_full();
}
if self.crc_flag() == CrcFlag::WithCrc {
datafield_len += 2;
}
datafield_len
}
}
impl CfdpPdu for FinishedPduCreator<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu)
}
}
impl WritablePduPacket for FinishedPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::FinishedPdu as u8;
current_idx += 1;
buf[current_idx] = ((self.condition_code as u8) << 4)
| ((self.delivery_code as u8) << 2)
| self.file_status as u8;
current_idx += 1;
for fs_responses in self.fs_responses {
current_idx += fs_responses.write_to_bytes(&mut buf[current_idx..])?;
}
if let Some(fault_location) = self.fault_location {
current_idx += fault_location.write_to_bytes(&mut buf[current_idx..])?;
}
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
/// Helper structure to loop through all filestore responses of a read Finished PDU. It should be
/// noted that iterators in Rust are not fallible, but the TLV creation can fail, for example if
/// the raw TLV data is invalid for some reason. In that case, the iterator will yield [None]
/// because there is no way to recover from this.
///
/// The user can accumulate the length of all TLVs yielded by the iterator and compare it against
/// the full length of the options to check whether the iterator was able to parse all TLVs
/// successfully.
pub struct FilestoreResponseIterator<'buf> {
responses_buf: &'buf [u8],
current_idx: usize,
}
impl<'buf> Iterator for FilestoreResponseIterator<'buf> {
type Item = FilestoreResponseTlv<'buf, 'buf, 'buf>;
fn next(&mut self) -> Option<Self::Item> {
if self.current_idx == self.responses_buf.len() {
return None;
}
let tlv = FilestoreResponseTlv::from_bytes(&self.responses_buf[self.current_idx..]);
// There are not really fallible iterators so we can't continue here..
if tlv.is_err() {
return None;
}
let tlv = tlv.unwrap();
self.current_idx += tlv.len_full();
Some(tlv)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct FinishedPduReader<'buf> {
pdu_header: PduHeader,
condition_code: ConditionCode,
delivery_code: DeliveryCode,
file_status: FileStatus,
fs_responses_raw: &'buf [u8],
fault_location: Option<EntityIdTlv>,
}
impl<'buf> FinishedPduReader<'buf> {
/// Generates [Self] from a raw bytestream.
pub fn new(buf: &'buf [u8]) -> Result<Self, PduError> {
Self::from_bytes(buf)
}
/// Generates [Self] from a raw bytestream.
pub fn from_bytes(buf: &'buf [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let min_expected_len = current_idx + 2;
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::FinishedPdu),
}
})?;
if directive_type != FileDirectiveType::FinishedPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::FinishedPdu,
});
}
current_idx += 1;
let condition_code = ConditionCode::try_from((buf[current_idx] >> 4) & 0b1111)
.map_err(|_| PduError::InvalidConditionCode((buf[current_idx] >> 4) & 0b1111))?;
// Unwrap is okay here for both of the following operations which can not fail.
let delivery_code = DeliveryCode::try_from((buf[current_idx] >> 2) & 0b1).unwrap();
let file_status = FileStatus::try_from(buf[current_idx] & 0b11).unwrap();
current_idx += 1;
let (fs_responses_raw, fault_location) =
Self::parse_tlv_fields(current_idx, full_len_without_crc, buf)?;
Ok(Self {
pdu_header,
condition_code,
delivery_code,
file_status,
fs_responses_raw,
fault_location,
})
}
pub fn fs_responses_raw(&self) -> &[u8] {
self.fs_responses_raw
}
pub fn fs_responses_iter(&self) -> FilestoreResponseIterator<'_> {
FilestoreResponseIterator {
responses_buf: self.fs_responses_raw,
current_idx: 0,
}
}
pub fn condition_code(&self) -> ConditionCode {
self.condition_code
}
pub fn delivery_code(&self) -> DeliveryCode {
self.delivery_code
}
pub fn file_status(&self) -> FileStatus {
self.file_status
}
pub fn fault_location(&self) -> Option<EntityIdTlv> {
self.fault_location
}
fn parse_tlv_fields(
mut current_idx: usize,
full_len_without_crc: usize,
buf: &[u8],
) -> Result<(&[u8], Option<EntityIdTlv>), PduError> {
let mut fs_responses: &[u8] = &[];
let mut fault_location = None;
let start_of_fs_responses = current_idx;
// There are leftover filestore response(s) and/or a fault location field.
while current_idx < full_len_without_crc {
let next_tlv = Tlv::from_bytes(&buf[current_idx..])?;
match next_tlv.tlv_type_field() {
TlvTypeField::Standard(tlv_type) => {
if tlv_type == TlvType::FilestoreResponse {
current_idx += next_tlv.len_full();
if current_idx == full_len_without_crc {
fs_responses = &buf[start_of_fs_responses..current_idx];
}
} else if tlv_type == TlvType::EntityId {
// At least one FS response is included.
if current_idx > start_of_fs_responses {
fs_responses = &buf[start_of_fs_responses..current_idx];
}
fault_location = Some(EntityIdTlv::from_bytes(&buf[current_idx..])?);
current_idx += fault_location.as_ref().unwrap().len_full();
// This is considered a configuration error: The entity ID has to be the
// last TLV, everything else would break the whole handling of the packet
// TLVs.
if current_idx != full_len_without_crc {
return Err(PduError::FormatError);
}
} else {
return Err(TlvLvError::InvalidTlvTypeField {
found: tlv_type.into(),
expected: Some(TlvType::FilestoreResponse.into()),
}
.into());
}
}
TlvTypeField::Custom(raw) => {
return Err(TlvLvError::InvalidTlvTypeField {
found: raw,
expected: None,
}
.into());
}
}
}
Ok((fs_responses, fault_location))
}
}
impl CfdpPdu for FinishedPduReader<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::FinishedPdu)
}
}
impl PartialEq<FinishedPduCreator<'_>> for FinishedPduReader<'_> {
fn eq(&self, other: &FinishedPduCreator<'_>) -> bool {
self.pdu_header == other.pdu_header
&& self.condition_code == other.condition_code
&& self.delivery_code == other.delivery_code
&& self.file_status == other.file_status
&& self.fault_location == other.fault_location
&& self
.fs_responses_iter()
.zip(other.filestore_responses().iter())
.all(|(a, b)| a == *b)
}
}
impl PartialEq<FinishedPduReader<'_>> for FinishedPduCreator<'_> {
fn eq(&self, other: &FinishedPduReader<'_>) -> bool {
other.eq(self)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::tests::{
common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID,
};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::tlv::FilestoreResponseTlv;
use crate::cfdp::{ConditionCode, CrcFlag, Direction, LargeFileFlag, TransmissionMode};
fn generic_finished_pdu(
crc_flag: CrcFlag,
fss: LargeFileFlag,
delivery_code: DeliveryCode,
file_status: FileStatus,
) -> FinishedPduCreator<'static> {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
FinishedPduCreator::new_default(pdu_header, delivery_code, file_status)
}
#[test]
fn test_basic() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
assert_eq!(finished_pdu.condition_code(), ConditionCode::NoError);
assert_eq!(
finished_pdu.pdu_header().pdu_conf.direction,
Direction::TowardsSender
);
assert_eq!(finished_pdu.delivery_code(), DeliveryCode::Complete);
assert_eq!(finished_pdu.file_status(), FileStatus::Retained);
assert_eq!(finished_pdu.filestore_responses(), &[]);
assert_eq!(finished_pdu.fault_location(), None);
assert_eq!(finished_pdu.pdu_header().pdu_datafield_len, 2);
assert_eq!(finished_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(finished_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(finished_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
finished_pdu.file_directive_type(),
Some(FileDirectiveType::FinishedPdu)
);
assert_eq!(
finished_pdu.transmission_mode(),
TransmissionMode::Acknowledged
);
assert_eq!(finished_pdu.direction(), Direction::TowardsSender);
assert_eq!(finished_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(finished_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(finished_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
fn generic_serialization_test_no_error(delivery_code: DeliveryCode, file_status: FileStatus) {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
delivery_code,
file_status,
);
let mut buf: [u8; 64] = [0; 64];
let written = finished_pdu.write_to_bytes(&mut buf);
assert!(written.is_ok());
let written = written.unwrap();
assert_eq!(written, 9);
assert_eq!(written, finished_pdu.len_written());
assert_eq!(written, finished_pdu.pdu_header().header_len() + 2);
assert_eq!(
finished_pdu.pdu_header().pdu_conf.direction,
Direction::TowardsSender
);
verify_raw_header(finished_pdu.pdu_header(), &buf);
let mut current_idx = finished_pdu.pdu_header().header_len();
assert_eq!(buf[current_idx], FileDirectiveType::FinishedPdu as u8);
current_idx += 1;
assert_eq!(
(buf[current_idx] >> 4) & 0b1111,
ConditionCode::NoError as u8
);
assert_eq!((buf[current_idx] >> 2) & 0b1, delivery_code as u8);
assert_eq!(buf[current_idx] & 0b11, file_status as u8);
assert_eq!(current_idx + 1, written);
}
#[test]
fn test_serialization_simple() {
generic_serialization_test_no_error(DeliveryCode::Complete, FileStatus::Retained);
}
#[test]
fn test_serialization_simple_2() {
generic_serialization_test_no_error(
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
);
}
#[test]
fn test_serialization_simple_3() {
generic_serialization_test_no_error(DeliveryCode::Incomplete, FileStatus::Unreported);
}
#[test]
fn test_write_to_vec() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 64] = [0; 64];
let written = finished_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_vec = finished_pdu.to_vec().unwrap();
assert_eq!(buf[0..written], pdu_vec);
}
#[test]
fn test_deserialization_simple() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 64] = [0; 64];
finished_pdu.write_to_bytes(&mut buf).unwrap();
let read_back = FinishedPduReader::from_bytes(&buf);
assert!(read_back.is_ok());
let read_back = read_back.unwrap();
assert_eq!(finished_pdu, read_back);
// Use all getter functions here explicitely once.
assert_eq!(finished_pdu.pdu_header(), read_back.pdu_header());
assert_eq!(finished_pdu.condition_code(), read_back.condition_code());
assert_eq!(finished_pdu.fault_location(), read_back.fault_location());
assert_eq!(finished_pdu.file_status(), read_back.file_status());
assert_eq!(finished_pdu.delivery_code(), read_back.delivery_code());
}
#[test]
fn test_serialization_buf_too_small() {
let finished_pdu = generic_finished_pdu(
CrcFlag::NoCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 8] = [0; 8];
let error = finished_pdu.write_to_bytes(&mut buf);
assert!(error.is_err());
if let PduError::ByteConversion(ByteConversionError::ToSliceTooSmall { found, expected }) =
error.unwrap_err()
{
assert_eq!(found, 8);
assert_eq!(expected, 9);
} else {
panic!("expected to_slice_too_small error");
}
}
#[test]
fn test_with_crc() {
let finished_pdu = generic_finished_pdu(
CrcFlag::WithCrc,
LargeFileFlag::Normal,
DeliveryCode::Complete,
FileStatus::Retained,
);
let mut buf: [u8; 64] = [0; 64];
let written = finished_pdu.write_to_bytes(&mut buf).unwrap();
assert_eq!(written, finished_pdu.len_written());
let finished_pdu_from_raw = FinishedPduReader::new(&buf).unwrap();
assert_eq!(finished_pdu, finished_pdu_from_raw);
buf[written - 1] -= 1;
let crc: u16 = ((buf[written - 2] as u16) << 8) as u16 | buf[written - 1] as u16;
let error = FinishedPduReader::new(&buf).unwrap_err();
if let PduError::ChecksumError(e) = error {
assert_eq!(e, crc);
} else {
panic!("expected crc error");
}
}
#[test]
fn test_with_fault_location() {
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
EntityIdTlv::new(TEST_DEST_ID.into()),
);
let finished_pdu_vec = finished_pdu.to_vec().unwrap();
assert_eq!(finished_pdu_vec.len(), 12);
assert_eq!(finished_pdu_vec[9], TlvType::EntityId.into());
assert_eq!(finished_pdu_vec[10], 1);
assert_eq!(finished_pdu_vec[11], TEST_DEST_ID.value_typed());
assert_eq!(
finished_pdu.fault_location().unwrap().entity_id(),
&TEST_DEST_ID.into()
);
}
#[test]
fn test_deserialization_with_fault_location() {
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let entity_id_tlv = EntityIdTlv::new(TEST_DEST_ID.into());
let finished_pdu = FinishedPduCreator::new_with_error(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
entity_id_tlv,
);
let finished_pdu_vec = finished_pdu.to_vec().unwrap();
let finished_pdu_deserialized = FinishedPduReader::from_bytes(&finished_pdu_vec).unwrap();
assert_eq!(finished_pdu, finished_pdu_deserialized);
}
#[test]
fn test_deserialization_with_fs_responses() {
let entity_id_tlv = EntityIdTlv::new(TEST_DEST_ID.into());
let first_name = "first.txt";
let first_name_lv = Lv::new_from_str(first_name).unwrap();
let fs_response_0 = FilestoreResponseTlv::new_no_filestore_message(
crate::cfdp::tlv::FilestoreActionCode::CreateFile,
0,
first_name_lv,
None,
)
.unwrap();
let fs_response_1 = FilestoreResponseTlv::new_no_filestore_message(
crate::cfdp::tlv::FilestoreActionCode::DeleteFile,
0,
first_name_lv,
None,
)
.unwrap();
let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let finished_pdu = FinishedPduCreator::new_generic(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
fs_responses,
Some(entity_id_tlv),
);
let finished_pdu_vec = finished_pdu.to_vec().unwrap();
let finished_pdu_deserialized = FinishedPduReader::from_bytes(&finished_pdu_vec).unwrap();
assert_eq!(finished_pdu_deserialized, finished_pdu);
}
#[test]
fn test_deserialization_with_fs_responses_and_fault_location() {
let first_name = "first.txt";
let first_name_lv = Lv::new_from_str(first_name).unwrap();
let fs_response_0 = FilestoreResponseTlv::new_no_filestore_message(
crate::cfdp::tlv::FilestoreActionCode::CreateFile,
0,
first_name_lv,
None,
)
.unwrap();
let fs_response_1 = FilestoreResponseTlv::new_no_filestore_message(
crate::cfdp::tlv::FilestoreActionCode::DeleteFile,
0,
first_name_lv,
None,
)
.unwrap();
let fs_responses = &[fs_response_0, fs_response_1];
let pdu_header =
PduHeader::new_no_file_data(common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal), 0);
let finished_pdu = FinishedPduCreator::new_generic(
pdu_header,
ConditionCode::NakLimitReached,
DeliveryCode::Incomplete,
FileStatus::DiscardDeliberately,
fs_responses,
None,
);
let finished_pdu_vec = finished_pdu.to_vec().unwrap();
let finished_pdu_deserialized = FinishedPduReader::from_bytes(&finished_pdu_vec).unwrap();
assert_eq!(finished_pdu_deserialized, finished_pdu);
}
}

770
src/cfdp/pdu/metadata.rs Normal file
View File

@ -0,0 +1,770 @@
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::{
add_pdu_crc, generic_length_checks_pdu_deserialization, read_fss_field, write_fss_field,
FileDirectiveType, PduError, PduHeader,
};
use crate::cfdp::tlv::{Tlv, WritableTlv};
use crate::cfdp::{ChecksumType, CrcFlag, Direction, LargeFileFlag, PduType};
use crate::ByteConversionError;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use super::{CfdpPdu, WritablePduPacket};
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct MetadataGenericParams {
pub closure_requested: bool,
pub checksum_type: ChecksumType,
pub file_size: u64,
}
impl MetadataGenericParams {
pub fn new(closure_requested: bool, checksum_type: ChecksumType, file_size: u64) -> Self {
Self {
closure_requested,
checksum_type,
file_size,
}
}
}
pub fn build_metadata_opts_from_slice(
buf: &mut [u8],
tlvs: &[Tlv],
) -> Result<usize, ByteConversionError> {
let mut written = 0;
for tlv in tlvs {
written += tlv.write_to_bytes(&mut buf[written..])?;
}
Ok(written)
}
#[cfg(feature = "alloc")]
pub fn build_metadata_opts_from_vec(
buf: &mut [u8],
tlvs: &Vec<Tlv>,
) -> Result<usize, ByteConversionError> {
build_metadata_opts_from_slice(buf, tlvs.as_slice())
}
/// Metadata PDU creator abstraction.
///
/// This abstraction exposes a specialized API for creating metadata PDUs as specified in
/// CFDP chapter 5.2.5.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct MetadataPduCreator<'src_name, 'dest_name, 'opts> {
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [Tlv<'opts>],
}
impl<'src_name, 'dest_name, 'opts> MetadataPduCreator<'src_name, 'dest_name, 'opts> {
pub fn new_no_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
) -> Self {
Self::new(
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
&[],
)
}
pub fn new_with_opts(
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [Tlv<'opts>],
) -> Self {
Self::new(
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options,
)
}
pub fn new(
mut pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
src_file_name: Lv<'src_name>,
dest_file_name: Lv<'dest_name>,
options: &'opts [Tlv<'opts>],
) -> Self {
pdu_header.pdu_type = PduType::FileDirective;
pdu_header.pdu_conf.direction = Direction::TowardsReceiver;
let mut pdu = Self {
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options,
};
pdu.pdu_header.pdu_datafield_len = pdu.calc_pdu_datafield_len() as u16;
pdu
}
pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params
}
pub fn src_file_name(&self) -> Lv<'src_name> {
self.src_file_name
}
pub fn dest_file_name(&self) -> Lv<'dest_name> {
self.dest_file_name
}
pub fn options(&self) -> &'opts [Tlv<'opts>] {
self.options
}
fn calc_pdu_datafield_len(&self) -> usize {
// One directve type octet and one byte of the directive parameter field.
let mut len = 2;
if self.pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
len += 8;
} else {
len += 4;
}
len += self.src_file_name.len_full();
len += self.dest_file_name.len_full();
for tlv in self.options() {
len += tlv.len_full()
}
if self.crc_flag() == CrcFlag::WithCrc {
len += 2;
}
len
}
}
impl CfdpPdu for MetadataPduCreator<'_, '_, '_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu)
}
}
impl WritablePduPacket for MetadataPduCreator<'_, '_, '_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::MetadataPdu as u8;
current_idx += 1;
buf[current_idx] = ((self.metadata_params.closure_requested as u8) << 6)
| (self.metadata_params.checksum_type as u8);
current_idx += 1;
current_idx += write_fss_field(
self.pdu_header.common_pdu_conf().file_flag,
self.metadata_params.file_size,
&mut buf[current_idx..],
)?;
current_idx += self
.src_file_name
.write_to_be_bytes(&mut buf[current_idx..])?;
current_idx += self
.dest_file_name
.write_to_be_bytes(&mut buf[current_idx..])?;
for opt in self.options() {
opt.write_to_bytes(&mut buf[current_idx..current_idx + opt.len_full()])?;
current_idx += opt.len_full();
}
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
/// Helper structure to loop through all options of a metadata PDU. It should be noted that
/// iterators in Rust are not fallible, but the TLV creation can fail, for example if the raw TLV
/// data is invalid for some reason. In that case, the iterator will yield [None] because there
/// is no way to recover from this.
///
/// The user can accumulate the length of all TLVs yielded by the iterator and compare it against
/// the full length of the options to check whether the iterator was able to parse all TLVs
/// successfully.
pub struct OptionsIter<'opts> {
opt_buf: &'opts [u8],
current_idx: usize,
}
impl<'opts> Iterator for OptionsIter<'opts> {
type Item = Tlv<'opts>;
fn next(&mut self) -> Option<Self::Item> {
if self.current_idx == self.opt_buf.len() {
return None;
}
let tlv = Tlv::from_bytes(&self.opt_buf[self.current_idx..]);
// There are not really fallible iterators so we can't continue here..
if tlv.is_err() {
return None;
}
let tlv = tlv.unwrap();
self.current_idx += tlv.len_full();
Some(tlv)
}
}
/// Metadata PDU reader abstraction.
///
/// This abstraction exposes a specialized API for reading a metadata PDU with minimal copying
/// involved.
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct MetadataPduReader<'buf> {
pdu_header: PduHeader,
metadata_params: MetadataGenericParams,
#[cfg_attr(feature = "serde", serde(borrow))]
src_file_name: Lv<'buf>,
#[cfg_attr(feature = "serde", serde(borrow))]
dest_file_name: Lv<'buf>,
options: &'buf [u8],
}
impl<'raw> MetadataPduReader<'raw> {
pub fn new(buf: &'raw [u8]) -> Result<Self, PduError> {
Self::from_bytes(buf)
}
pub fn from_bytes(buf: &'raw [u8]) -> Result<Self, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
let is_large_file = pdu_header.pdu_conf.file_flag == LargeFileFlag::Large;
// Minimal length: 1 byte + FSS (4 byte) + 2 empty LV (1 byte)
let mut min_expected_len = current_idx + 7;
if is_large_file {
min_expected_len += 4;
}
generic_length_checks_pdu_deserialization(buf, min_expected_len, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::MetadataPdu),
}
})?;
if directive_type != FileDirectiveType::MetadataPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::MetadataPdu,
});
}
current_idx += 1;
let (fss_len, file_size) =
read_fss_field(pdu_header.pdu_conf.file_flag, &buf[current_idx + 1..]);
let metadata_params = MetadataGenericParams {
closure_requested: ((buf[current_idx] >> 6) & 0b1) != 0,
checksum_type: ChecksumType::try_from(buf[current_idx] & 0b1111)
.map_err(|_| PduError::InvalidChecksumType(buf[current_idx] & 0b1111))?,
file_size,
};
current_idx += 1 + fss_len;
let src_file_name = Lv::from_bytes(&buf[current_idx..])?;
current_idx += src_file_name.len_full();
let dest_file_name = Lv::from_bytes(&buf[current_idx..])?;
current_idx += dest_file_name.len_full();
// All left-over bytes are options.
Ok(Self {
pdu_header,
metadata_params,
src_file_name,
dest_file_name,
options: &buf[current_idx..full_len_without_crc],
})
}
/// Yield an iterator which can be used to loop through all options. Returns [None] if the
/// options field is empty.
pub fn options_iter(&self) -> Option<OptionsIter<'_>> {
Some(OptionsIter {
opt_buf: self.options,
current_idx: 0,
})
}
pub fn options(&self) -> &'raw [u8] {
self.options
}
pub fn metadata_params(&self) -> &MetadataGenericParams {
&self.metadata_params
}
pub fn src_file_name(&self) -> Lv {
self.src_file_name
}
pub fn dest_file_name(&self) -> Lv {
self.dest_file_name
}
}
impl CfdpPdu for MetadataPduReader<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::MetadataPdu)
}
}
#[cfg(test)]
pub mod tests {
use alloc::string::ToString;
use crate::cfdp::lv::Lv;
use crate::cfdp::pdu::metadata::{
build_metadata_opts_from_slice, build_metadata_opts_from_vec, MetadataGenericParams,
MetadataPduCreator, MetadataPduReader,
};
use crate::cfdp::pdu::tests::{
common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID,
};
use crate::cfdp::pdu::{CfdpPdu, PduError, WritablePduPacket};
use crate::cfdp::pdu::{FileDirectiveType, PduHeader};
use crate::cfdp::tlv::{Tlv, TlvType};
use crate::cfdp::{
ChecksumType, CrcFlag, Direction, LargeFileFlag, PduType, SegmentMetadataFlag,
SegmentationControl, TransmissionMode,
};
use std::vec;
const SRC_FILENAME: &str = "hello-world.txt";
const DEST_FILENAME: &str = "hello-world2.txt";
fn generic_metadata_pdu<'opts>(
crc_flag: CrcFlag,
checksum_type: ChecksumType,
closure_requested: bool,
fss: LargeFileFlag,
opts: &'opts [Tlv],
) -> (
Lv<'static>,
Lv<'static>,
MetadataPduCreator<'static, 'static, 'opts>,
) {
let pdu_header = PduHeader::new_no_file_data(common_pdu_conf(crc_flag, fss), 0);
let metadata_params = MetadataGenericParams::new(closure_requested, checksum_type, 0x1010);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename =
Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed");
(
src_filename,
dest_filename,
MetadataPduCreator::new(
pdu_header,
metadata_params,
src_filename,
dest_filename,
opts,
),
)
}
#[test]
fn test_basic() {
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&[],
);
assert_eq!(
metadata_pdu.len_written(),
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
);
assert_eq!(metadata_pdu.src_file_name(), src_filename);
assert_eq!(metadata_pdu.dest_file_name(), dest_filename);
assert!(metadata_pdu.options().is_empty());
assert_eq!(metadata_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(metadata_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(metadata_pdu.pdu_type(), PduType::FileDirective);
assert!(!metadata_pdu.metadata_params().closure_requested);
assert_eq!(
metadata_pdu.metadata_params().checksum_type,
ChecksumType::Crc32
);
assert_eq!(
metadata_pdu.file_directive_type(),
Some(FileDirectiveType::MetadataPdu)
);
assert_eq!(
metadata_pdu.transmission_mode(),
TransmissionMode::Acknowledged
);
assert_eq!(metadata_pdu.direction(), Direction::TowardsReceiver);
assert_eq!(metadata_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(metadata_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(metadata_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
fn check_metadata_raw_fields(
metadata_pdu: &MetadataPduCreator,
buf: &[u8],
written_bytes: usize,
checksum_type: ChecksumType,
closure_requested: bool,
expected_src_filename: &Lv,
expected_dest_filename: &Lv,
) {
verify_raw_header(metadata_pdu.pdu_header(), buf);
assert_eq!(
written_bytes,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ expected_src_filename.len_full()
+ expected_dest_filename.len_full()
);
assert_eq!(buf[7], FileDirectiveType::MetadataPdu as u8);
assert_eq!(buf[8] >> 6, closure_requested as u8);
assert_eq!(buf[8] & 0b1111, checksum_type as u8);
assert_eq!(u32::from_be_bytes(buf[9..13].try_into().unwrap()), 0x1010);
let mut current_idx = 13;
let src_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating source name LV failed");
assert_eq!(src_name_from_raw, *expected_src_filename);
current_idx += src_name_from_raw.len_full();
let dest_name_from_raw =
Lv::from_bytes(&buf[current_idx..]).expect("Creating dest name LV failed");
assert_eq!(dest_name_from_raw, *expected_dest_filename);
current_idx += dest_name_from_raw.len_full();
// No options, so no additional data here.
assert_eq!(current_idx, written_bytes);
}
#[test]
fn test_serialization_0() {
let checksum_type = ChecksumType::Crc32;
let closure_requested = false;
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
checksum_type,
closure_requested,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let res = metadata_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
check_metadata_raw_fields(
&metadata_pdu,
&buf,
written,
checksum_type,
closure_requested,
&src_filename,
&dest_filename,
);
}
#[test]
fn test_serialization_1() {
let checksum_type = ChecksumType::Modular;
let closure_requested = true;
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
checksum_type,
closure_requested,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let res = metadata_pdu.write_to_bytes(&mut buf);
assert!(res.is_ok());
let written = res.unwrap();
check_metadata_raw_fields(
&metadata_pdu,
&buf,
written,
checksum_type,
closure_requested,
&src_filename,
&dest_filename,
);
}
#[test]
fn test_write_to_vec() {
let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let pdu_vec = metadata_pdu.to_vec().unwrap();
let written = metadata_pdu.write_to_bytes(&mut buf).unwrap();
assert_eq!(buf[0..written], pdu_vec);
}
fn compare_read_pdu_to_written_pdu(written: &MetadataPduCreator, read: &MetadataPduReader) {
assert_eq!(written.metadata_params(), read.metadata_params());
assert_eq!(written.src_file_name(), read.src_file_name());
assert_eq!(written.dest_file_name(), read.dest_file_name());
let opts = written.options();
for (tlv_written, tlv_read) in opts.iter().zip(read.options_iter().unwrap()) {
assert_eq!(tlv_written, &tlv_read);
}
}
#[test]
fn test_deserialization() {
let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
true,
LargeFileFlag::Normal,
&[],
);
let mut buf: [u8; 64] = [0; 64];
metadata_pdu.write_to_bytes(&mut buf).unwrap();
let pdu_read_back = MetadataPduReader::from_bytes(&buf);
assert!(pdu_read_back.is_ok());
let pdu_read_back = pdu_read_back.unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
}
#[test]
fn test_with_crc_flag() {
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::WithCrc,
ChecksumType::Crc32,
true,
LargeFileFlag::Normal,
&[],
);
assert_eq!(metadata_pdu.crc_flag(), CrcFlag::WithCrc);
let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ core::mem::size_of::<u32>()
+ src_filename.len_full()
+ dest_filename.len_full()
+ 2
);
assert_eq!(written, metadata_pdu.len_written());
let pdu_read_back = MetadataPduReader::new(&buf).unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
}
#[test]
fn test_with_large_file_flag() {
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Large,
&[],
);
let mut buf: [u8; 64] = [0; 64];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header().header_len()
+ 1
+ 1
+ core::mem::size_of::<u64>()
+ src_filename.len_full()
+ dest_filename.len_full()
);
let pdu_read_back = MetadataPduReader::new(&buf).unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
}
#[test]
fn test_opts_builders() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_slice = [tlv1, tlv2];
let mut buf: [u8; 32] = [0; 32];
let opts = build_metadata_opts_from_slice(&mut buf, &tlv_slice);
assert!(opts.is_ok());
let opts_len = opts.unwrap();
assert_eq!(opts_len, tlv1.len_full() + tlv2.len_full());
let tlv1_conv_back = Tlv::from_bytes(&buf).unwrap();
assert_eq!(tlv1_conv_back, tlv1);
let tlv2_conv_back = Tlv::from_bytes(&buf[tlv1_conv_back.len_full()..]).unwrap();
assert_eq!(tlv2_conv_back, tlv2);
}
#[test]
fn test_opts_builders_from_vec() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_vec = vec![tlv1, tlv2];
let mut buf: [u8; 32] = [0; 32];
let opts = build_metadata_opts_from_vec(&mut buf, &tlv_vec);
assert!(opts.is_ok());
let opts_len = opts.unwrap();
assert_eq!(opts_len, tlv1.len_full() + tlv2.len_full());
let tlv1_conv_back = Tlv::from_bytes(&buf).unwrap();
assert_eq!(tlv1_conv_back, tlv1);
let tlv2_conv_back = Tlv::from_bytes(&buf[tlv1_conv_back.len_full()..]).unwrap();
assert_eq!(tlv2_conv_back, tlv2);
}
#[test]
fn test_with_opts() {
let tlv1 = Tlv::new_empty(TlvType::FlowLabel);
let msg_to_user: [u8; 4] = [1, 2, 3, 4];
let tlv2 = Tlv::new(TlvType::MsgToUser, &msg_to_user).unwrap();
let tlv_vec = vec![tlv1, tlv2];
let opts_len = tlv1.len_full() + tlv2.len_full();
let (src_filename, dest_filename, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Normal,
&tlv_vec,
);
let mut buf: [u8; 128] = [0; 128];
let write_res = metadata_pdu.write_to_bytes(&mut buf);
assert!(write_res.is_ok());
let written = write_res.unwrap();
assert_eq!(
written,
metadata_pdu.pdu_header.header_len()
+ 1
+ 1
+ 4
+ src_filename.len_full()
+ dest_filename.len_full()
+ opts_len
);
let pdu_read_back = MetadataPduReader::from_bytes(&buf).unwrap();
compare_read_pdu_to_written_pdu(&metadata_pdu, &pdu_read_back);
let opts_iter = pdu_read_back.options_iter();
assert!(opts_iter.is_some());
let opts_iter = opts_iter.unwrap();
let mut accumulated_len = 0;
for (idx, opt) in opts_iter.enumerate() {
assert_eq!(tlv_vec[idx], opt);
accumulated_len += opt.len_full();
}
assert_eq!(accumulated_len, pdu_read_back.options().len());
}
#[test]
fn test_invalid_directive_code() {
let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
true,
LargeFileFlag::Large,
&[],
);
let mut metadata_vec = metadata_pdu.to_vec().unwrap();
metadata_vec[7] = 0xff;
let metadata_error = MetadataPduReader::from_bytes(&metadata_vec);
assert!(metadata_error.is_err());
let error = metadata_error.unwrap_err();
if let PduError::InvalidDirectiveType { found, expected } = error {
assert_eq!(found, 0xff);
assert_eq!(expected, Some(FileDirectiveType::MetadataPdu));
assert_eq!(
error.to_string(),
"invalid directive type value 255, expected Some(MetadataPdu)"
);
} else {
panic!("Expected InvalidDirectiveType error, got {:?}", error);
}
}
#[test]
fn test_wrong_directive_code() {
let (_, _, metadata_pdu) = generic_metadata_pdu(
CrcFlag::NoCrc,
ChecksumType::Crc32,
false,
LargeFileFlag::Large,
&[],
);
let mut metadata_vec = metadata_pdu.to_vec().unwrap();
metadata_vec[7] = FileDirectiveType::EofPdu as u8;
let metadata_error = MetadataPduReader::from_bytes(&metadata_vec);
assert!(metadata_error.is_err());
let error = metadata_error.unwrap_err();
if let PduError::WrongDirectiveType { found, expected } = error {
assert_eq!(found, FileDirectiveType::EofPdu);
assert_eq!(expected, FileDirectiveType::MetadataPdu);
assert_eq!(
error.to_string(),
"found directive type EofPdu, expected MetadataPdu"
);
} else {
panic!("Expected InvalidDirectiveType error, got {:?}", error);
}
}
#[test]
fn test_corrects_pdu_header() {
let pdu_header = PduHeader::new_for_file_data(
common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal),
0,
SegmentMetadataFlag::NotPresent,
SegmentationControl::NoRecordBoundaryPreservation,
);
let metadata_params = MetadataGenericParams::new(false, ChecksumType::Crc32, 10);
let src_filename = Lv::new_from_str(SRC_FILENAME).expect("Generating string LV failed");
let dest_filename =
Lv::new_from_str(DEST_FILENAME).expect("Generating destination LV failed");
let metadata_pdu = MetadataPduCreator::new_no_opts(
pdu_header,
metadata_params,
src_filename,
dest_filename,
);
assert_eq!(metadata_pdu.pdu_header().pdu_type(), PduType::FileDirective);
}
}

1155
src/cfdp/pdu/mod.rs Normal file

File diff suppressed because it is too large Load Diff

806
src/cfdp/pdu/nak.rs Normal file
View File

@ -0,0 +1,806 @@
use crate::{
cfdp::{CrcFlag, Direction, LargeFileFlag},
ByteConversionError,
};
use core::{marker::PhantomData, mem::size_of};
use super::{
add_pdu_crc, generic_length_checks_pdu_deserialization, CfdpPdu, FileDirectiveType, PduError,
PduHeader, WritablePduPacket,
};
/// Helper type to encapsulate both normal file size segment requests and large file size segment
/// requests.
#[derive(Debug, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum SegmentRequests<'a> {
U32Pairs(&'a [(u32, u32)]),
U64Pairs(&'a [(u64, u64)]),
}
impl SegmentRequests<'_> {
pub fn is_empty(&self) -> bool {
match self {
SegmentRequests::U32Pairs(pairs) => pairs.is_empty(),
SegmentRequests::U64Pairs(pairs) => pairs.is_empty(),
}
}
}
/// NAK PDU abstraction specialized in the creation of NAK PDUs.
///
/// It exposes a specialized API which simplifies to generate these NAK PDUs with the
/// format according to CFDP chapter 5.2.6.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct NakPduCreator<'seg_reqs> {
pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
segment_requests: Option<SegmentRequests<'seg_reqs>>,
}
impl<'seg_reqs> NakPduCreator<'seg_reqs> {
/// Please note that the start of scope and the end of scope need to be smaller or equal
/// to [u32::MAX] if the large file flag of the passed PDU configuration is
/// [LargeFileFlag::Normal].
///
/// ## Errrors
///
pub fn new_no_segment_requests(
pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
) -> Result<NakPduCreator<'seg_reqs>, PduError> {
Self::new_generic(pdu_header, start_of_scope, end_of_scope, None)
}
/// Default constructor for normal file sizes.
pub fn new(
pdu_header: PduHeader,
start_of_scope: u32,
end_of_scope: u32,
segment_requests: &'seg_reqs [(u32, u32)],
) -> Result<NakPduCreator, PduError> {
let mut passed_segment_requests = None;
if !segment_requests.is_empty() {
passed_segment_requests = Some(SegmentRequests::U32Pairs(segment_requests));
}
Self::new_generic(
pdu_header,
start_of_scope.into(),
end_of_scope.into(),
passed_segment_requests,
)
}
pub fn new_large_file_size(
pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
segment_requests: &'seg_reqs [(u64, u64)],
) -> Result<NakPduCreator, PduError> {
let mut passed_segment_requests = None;
if !segment_requests.is_empty() {
passed_segment_requests = Some(SegmentRequests::U64Pairs(segment_requests));
}
Self::new_generic(
pdu_header,
start_of_scope,
end_of_scope,
passed_segment_requests,
)
}
fn new_generic(
mut pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
segment_requests: Option<SegmentRequests<'seg_reqs>>,
) -> Result<NakPduCreator, PduError> {
// Force correct direction flag.
pdu_header.pdu_conf.direction = Direction::TowardsSender;
if let Some(ref segment_requests) = segment_requests {
match segment_requests {
SegmentRequests::U32Pairs(_) => {
if start_of_scope > u32::MAX as u64 || end_of_scope > u32::MAX as u64 {
return Err(PduError::InvalidStartOrEndOfScopeValue);
}
pdu_header.pdu_conf.file_flag = LargeFileFlag::Normal;
}
SegmentRequests::U64Pairs(_) => {
pdu_header.pdu_conf.file_flag = LargeFileFlag::Large;
}
}
};
let mut nak_pdu = Self {
pdu_header,
start_of_scope,
end_of_scope,
segment_requests,
};
nak_pdu.pdu_header.pdu_datafield_len = nak_pdu.calc_pdu_datafield_len() as u16;
Ok(nak_pdu)
}
pub fn start_of_scope(&self) -> u64 {
self.start_of_scope
}
pub fn end_of_scope(&self) -> u64 {
self.end_of_scope
}
pub fn segment_requests(&self) -> Option<&SegmentRequests> {
self.segment_requests.as_ref()
}
pub fn num_segment_reqs(&self) -> usize {
match &self.segment_requests {
Some(seg_reqs) => match seg_reqs {
SegmentRequests::U32Pairs(pairs) => pairs.len(),
SegmentRequests::U64Pairs(pairs) => pairs.len(),
},
None => 0,
}
}
pub fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn calc_pdu_datafield_len(&self) -> usize {
let mut datafield_len = 1;
if self.file_flag() == LargeFileFlag::Normal {
datafield_len += 8;
datafield_len += self.num_segment_reqs() * 8;
} else {
datafield_len += 16;
datafield_len += self.num_segment_reqs() * 16;
}
if self.crc_flag() == CrcFlag::WithCrc {
datafield_len += 2;
}
datafield_len
}
}
impl CfdpPdu for NakPduCreator<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::NakPdu)
}
}
impl WritablePduPacket for NakPduCreator<'_> {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, PduError> {
let expected_len = self.len_written();
if buf.len() < expected_len {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: expected_len,
}
.into());
}
let mut current_idx = self.pdu_header.write_to_bytes(buf)?;
buf[current_idx] = FileDirectiveType::NakPdu as u8;
current_idx += 1;
let mut write_start_end_of_scope_normal = || {
let start_of_scope = u32::try_from(self.start_of_scope).unwrap();
let end_of_scope = u32::try_from(self.end_of_scope).unwrap();
buf[current_idx..current_idx + 4].copy_from_slice(&start_of_scope.to_be_bytes());
current_idx += 4;
buf[current_idx..current_idx + 4].copy_from_slice(&end_of_scope.to_be_bytes());
current_idx += 4;
};
if let Some(ref seg_reqs) = self.segment_requests {
match seg_reqs {
SegmentRequests::U32Pairs(pairs) => {
// Unwrap is okay here, the API should prevent invalid values which would trigger a
// panic here.
write_start_end_of_scope_normal();
for (next_start_offset, next_end_offset) in *pairs {
buf[current_idx..current_idx + 4]
.copy_from_slice(&next_start_offset.to_be_bytes());
current_idx += 4;
buf[current_idx..current_idx + 4]
.copy_from_slice(&next_end_offset.to_be_bytes());
current_idx += 4;
}
}
SegmentRequests::U64Pairs(pairs) => {
buf[current_idx..current_idx + 8]
.copy_from_slice(&self.start_of_scope.to_be_bytes());
current_idx += 8;
buf[current_idx..current_idx + 8]
.copy_from_slice(&self.end_of_scope.to_be_bytes());
current_idx += 8;
for (next_start_offset, next_end_offset) in *pairs {
buf[current_idx..current_idx + 8]
.copy_from_slice(&next_start_offset.to_be_bytes());
current_idx += 8;
buf[current_idx..current_idx + 8]
.copy_from_slice(&next_end_offset.to_be_bytes());
current_idx += 8;
}
}
}
} else {
write_start_end_of_scope_normal();
}
if self.crc_flag() == CrcFlag::WithCrc {
current_idx = add_pdu_crc(buf, current_idx);
}
Ok(current_idx)
}
fn len_written(&self) -> usize {
self.pdu_header.header_len() + self.calc_pdu_datafield_len()
}
}
/// Special iterator type for the NAK PDU which allows to iterate over both normal and large file
/// segment requests.
#[derive(Debug)]
pub struct SegmentRequestIter<'a, T> {
seq_req_raw: &'a [u8],
current_idx: usize,
phantom: core::marker::PhantomData<T>,
}
pub trait SegReqFromBytes {
fn from_bytes(bytes: &[u8]) -> Self;
}
impl SegReqFromBytes for u32 {
fn from_bytes(bytes: &[u8]) -> u32 {
u32::from_be_bytes(bytes.try_into().unwrap())
}
}
impl SegReqFromBytes for u64 {
fn from_bytes(bytes: &[u8]) -> u64 {
u64::from_be_bytes(bytes.try_into().unwrap())
}
}
impl<'a, T> Iterator for SegmentRequestIter<'a, T>
where
T: SegReqFromBytes,
{
type Item = (T, T);
fn next(&mut self) -> Option<Self::Item> {
let value = self.next_at_offset(self.current_idx);
self.current_idx += 2 * size_of::<T>();
value
}
}
impl<'a, 'b> PartialEq<SegmentRequests<'a>> for SegmentRequestIter<'b, u32> {
fn eq(&self, other: &SegmentRequests) -> bool {
match other {
SegmentRequests::U32Pairs(pairs) => self.compare_pairs(pairs),
SegmentRequests::U64Pairs(pairs) => {
if pairs.is_empty() && self.seq_req_raw.is_empty() {
return true;
}
false
}
}
}
}
impl<'a, 'b> PartialEq<SegmentRequests<'a>> for SegmentRequestIter<'b, u64> {
fn eq(&self, other: &SegmentRequests) -> bool {
match other {
SegmentRequests::U32Pairs(pairs) => {
if pairs.is_empty() && self.seq_req_raw.is_empty() {
return true;
}
false
}
SegmentRequests::U64Pairs(pairs) => self.compare_pairs(pairs),
}
}
}
impl<'a, T> SegmentRequestIter<'a, T>
where
T: SegReqFromBytes + PartialEq,
{
fn compare_pairs(&self, pairs: &[(T, T)]) -> bool {
if pairs.is_empty() && self.seq_req_raw.is_empty() {
return true;
}
let size = size_of::<T>();
if pairs.len() * 2 * size != self.seq_req_raw.len() {
return false;
}
for (i, pair) in pairs.iter().enumerate() {
let next_val = self.next_at_offset(i * 2 * size).unwrap();
if next_val != *pair {
return false;
}
}
true
}
}
impl<T: SegReqFromBytes> SegmentRequestIter<'_, T> {
fn next_at_offset(&self, mut offset: usize) -> Option<(T, T)> {
if offset + size_of::<T>() * 2 > self.seq_req_raw.len() {
return None;
}
let start_offset = T::from_bytes(&self.seq_req_raw[offset..offset + size_of::<T>()]);
offset += size_of::<T>();
let end_offset = T::from_bytes(&self.seq_req_raw[offset..offset + size_of::<T>()]);
Some((start_offset, end_offset))
}
}
/// NAK PDU abstraction specialized in the reading NAK PDUs from a raw bytestream.
///
/// This is a zero-copy class where the segment requests can be read using a special iterator
/// API without the need to copy them.
///
/// The NAK format is expected to be conforming to CFDP chapter 5.2.6.
#[derive(Debug, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct NakPduReader<'seg_reqs> {
pdu_header: PduHeader,
start_of_scope: u64,
end_of_scope: u64,
seg_reqs_raw: &'seg_reqs [u8],
}
impl CfdpPdu for NakPduReader<'_> {
fn pdu_header(&self) -> &PduHeader {
&self.pdu_header
}
fn file_directive_type(&self) -> Option<FileDirectiveType> {
Some(FileDirectiveType::NakPdu)
}
}
impl<'seg_reqs> NakPduReader<'seg_reqs> {
pub fn new(buf: &'seg_reqs [u8]) -> Result<NakPduReader, PduError> {
Self::from_bytes(buf)
}
pub fn from_bytes(buf: &'seg_reqs [u8]) -> Result<NakPduReader, PduError> {
let (pdu_header, mut current_idx) = PduHeader::from_bytes(buf)?;
let full_len_without_crc = pdu_header.verify_length_and_checksum(buf)?;
// Minimum length of 9: 1 byte directive field and start and end of scope for normal file
// size.
generic_length_checks_pdu_deserialization(buf, 9, full_len_without_crc)?;
let directive_type = FileDirectiveType::try_from(buf[current_idx]).map_err(|_| {
PduError::InvalidDirectiveType {
found: buf[current_idx],
expected: Some(FileDirectiveType::NakPdu),
}
})?;
if directive_type != FileDirectiveType::NakPdu {
return Err(PduError::WrongDirectiveType {
found: directive_type,
expected: FileDirectiveType::AckPdu,
});
}
current_idx += 1;
let start_of_scope;
let end_of_scope;
if pdu_header.common_pdu_conf().file_flag == LargeFileFlag::Large {
if current_idx + 16 > buf.len() {
return Err(PduError::ByteConversion(
ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: current_idx + 16,
},
));
}
start_of_scope =
u64::from_be_bytes(buf[current_idx..current_idx + 8].try_into().unwrap());
current_idx += 8;
end_of_scope =
u64::from_be_bytes(buf[current_idx..current_idx + 8].try_into().unwrap());
current_idx += 8;
} else {
start_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()) as u64;
current_idx += 4;
end_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap()) as u64;
current_idx += 4;
}
Ok(Self {
pdu_header,
start_of_scope,
end_of_scope,
seg_reqs_raw: &buf[current_idx..full_len_without_crc],
})
}
pub fn start_of_scope(&self) -> u64 {
self.start_of_scope
}
pub fn end_of_scope(&self) -> u64 {
self.end_of_scope
}
pub fn num_segment_reqs(&self) -> usize {
if self.seg_reqs_raw.is_empty() {
return 0;
}
if self.file_flag() == LargeFileFlag::Normal {
self.seg_reqs_raw.len() / 8
} else {
self.seg_reqs_raw.len() / 16
}
}
/// This function returns [None] if this NAK PDUs contains segment requests for a large file.
pub fn get_normal_segment_requests_iterator(&self) -> Option<SegmentRequestIter<'_, u32>> {
if self.file_flag() == LargeFileFlag::Large {
return None;
}
Some(SegmentRequestIter {
seq_req_raw: self.seg_reqs_raw,
current_idx: 0,
phantom: PhantomData,
})
}
/// This function returns [None] if this NAK PDUs contains segment requests for a normal file.
pub fn get_large_segment_requests_iterator(&self) -> Option<SegmentRequestIter<'_, u64>> {
if self.file_flag() == LargeFileFlag::Normal {
return None;
}
Some(SegmentRequestIter {
seq_req_raw: self.seg_reqs_raw,
current_idx: 0,
phantom: PhantomData,
})
}
}
impl<'a, 'b> PartialEq<NakPduCreator<'a>> for NakPduReader<'b> {
fn eq(&self, other: &NakPduCreator<'a>) -> bool {
if self.pdu_header() != other.pdu_header()
|| self.end_of_scope() != other.end_of_scope()
|| self.start_of_scope() != other.start_of_scope()
{
return false;
}
// Check if both segment requests are empty or None
match (self.seg_reqs_raw.is_empty(), other.segment_requests()) {
(true, None) => true,
(true, Some(seg_reqs)) => seg_reqs.is_empty(),
(false, None) => false,
_ => {
// Compare based on file_flag
if self.file_flag() == LargeFileFlag::Normal {
let normal_iter = self.get_normal_segment_requests_iterator().unwrap();
normal_iter == *other.segment_requests().unwrap()
} else {
let large_iter = self.get_large_segment_requests_iterator().unwrap();
large_iter == *other.segment_requests().unwrap()
}
}
}
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use crate::cfdp::{
pdu::tests::{common_pdu_conf, verify_raw_header, TEST_DEST_ID, TEST_SEQ_NUM, TEST_SRC_ID},
PduType, TransmissionMode,
};
use super::*;
fn check_generic_fields(nak_pdu: &impl CfdpPdu) {
assert_eq!(nak_pdu.crc_flag(), CrcFlag::NoCrc);
assert_eq!(nak_pdu.file_flag(), LargeFileFlag::Normal);
assert_eq!(nak_pdu.pdu_type(), PduType::FileDirective);
assert_eq!(
nak_pdu.file_directive_type(),
Some(FileDirectiveType::NakPdu),
);
assert_eq!(nak_pdu.transmission_mode(), TransmissionMode::Acknowledged);
assert_eq!(nak_pdu.direction(), Direction::TowardsSender);
assert_eq!(nak_pdu.source_id(), TEST_SRC_ID.into());
assert_eq!(nak_pdu.dest_id(), TEST_DEST_ID.into());
assert_eq!(nak_pdu.transaction_seq_num(), TEST_SEQ_NUM.into());
}
#[test]
fn test_seg_request_api() {
let seg_req = SegmentRequests::U32Pairs(&[]);
assert!(seg_req.is_empty());
let seg_req = SegmentRequests::U64Pairs(&[]);
assert!(seg_req.is_empty());
}
#[test]
fn test_basic_creator() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 0, 0)
.expect("creating NAK PDU creator failed");
assert_eq!(nak_pdu.start_of_scope(), 0);
assert_eq!(nak_pdu.end_of_scope(), 0);
assert_eq!(nak_pdu.segment_requests(), None);
assert_eq!(nak_pdu.num_segment_reqs(), 0);
check_generic_fields(&nak_pdu);
}
#[test]
fn test_serialization_empty() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 100, 300)
.expect("creating NAK PDU creator failed");
assert_eq!(nak_pdu.start_of_scope(), 100);
assert_eq!(nak_pdu.end_of_scope(), 300);
let mut buf: [u8; 64] = [0; 64];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
verify_raw_header(nak_pdu.pdu_header(), &buf);
let mut current_idx = nak_pdu.pdu_header().header_len();
assert_eq!(current_idx + 9, nak_pdu.len_written());
assert_eq!(buf[current_idx], FileDirectiveType::NakPdu as u8);
current_idx += 1;
let start_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(start_of_scope, 100);
current_idx += 4;
let end_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(end_of_scope, 300);
current_idx += 4;
assert_eq!(current_idx, nak_pdu.len_written());
}
#[test]
fn test_serialization_two_segments() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new(pdu_header, 100, 300, &[(0, 0), (32, 64)])
.expect("creating NAK PDU creator failed");
let mut buf: [u8; 64] = [0; 64];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
verify_raw_header(nak_pdu.pdu_header(), &buf);
let mut current_idx = nak_pdu.pdu_header().header_len();
assert_eq!(current_idx + 9 + 16, nak_pdu.len_written());
assert_eq!(buf[current_idx], FileDirectiveType::NakPdu as u8);
current_idx += 1;
let start_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(start_of_scope, 100);
current_idx += 4;
let end_of_scope =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(end_of_scope, 300);
current_idx += 4;
let first_seg_start =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(first_seg_start, 0);
current_idx += 4;
let first_seg_end =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(first_seg_end, 0);
current_idx += 4;
let second_seg_start =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(second_seg_start, 32);
current_idx += 4;
let second_seg_end =
u32::from_be_bytes(buf[current_idx..current_idx + 4].try_into().unwrap());
assert_eq!(second_seg_end, 64);
current_idx += 4;
assert_eq!(current_idx, nak_pdu.len_written());
}
#[test]
fn test_deserialization_empty() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 100, 300)
.expect("creating NAK PDU creator failed");
let mut buf: [u8; 64] = [0; 64];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
let nak_pdu_deser = NakPduReader::from_bytes(&buf).expect("deserializing NAK PDU failed");
assert_eq!(nak_pdu_deser, nak_pdu);
check_generic_fields(&nak_pdu_deser);
}
#[test]
fn test_deserialization_large_segments() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Large);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu =
NakPduCreator::new_large_file_size(pdu_header, 100, 300, &[(50, 100), (200, 300)])
.expect("creating NAK PDU creator failed");
let mut buf: [u8; 128] = [0; 128];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
let nak_pdu_deser = NakPduReader::from_bytes(&buf).expect("deserializing NAK PDU failed");
assert_eq!(nak_pdu_deser, nak_pdu);
assert_eq!(nak_pdu_deser.start_of_scope(), 100);
assert_eq!(nak_pdu_deser.end_of_scope(), 300);
assert_eq!(nak_pdu_deser.num_segment_reqs(), 2);
assert!(nak_pdu_deser
.get_large_segment_requests_iterator()
.is_some());
assert!(nak_pdu_deser
.get_normal_segment_requests_iterator()
.is_none());
assert_eq!(
nak_pdu_deser
.get_large_segment_requests_iterator()
.unwrap()
.count(),
2
);
for (idx, large_segments) in nak_pdu_deser
.get_large_segment_requests_iterator()
.unwrap()
.enumerate()
{
if idx == 0 {
assert_eq!(large_segments.0, 50);
assert_eq!(large_segments.1, 100);
} else {
assert_eq!(large_segments.0, 200);
assert_eq!(large_segments.1, 300);
}
}
}
#[test]
fn test_deserialization_normal_segments() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new(pdu_header, 100, 300, &[(50, 100), (200, 300)])
.expect("creating NAK PDU creator failed");
let mut buf: [u8; 128] = [0; 128];
nak_pdu
.write_to_bytes(&mut buf)
.expect("writing NAK PDU to buffer failed");
let nak_pdu_deser = NakPduReader::from_bytes(&buf).expect("deserializing NAK PDU failed");
assert_eq!(nak_pdu_deser, nak_pdu);
assert_eq!(nak_pdu_deser.start_of_scope(), 100);
assert_eq!(nak_pdu_deser.end_of_scope(), 300);
assert_eq!(nak_pdu_deser.num_segment_reqs(), 2);
assert!(nak_pdu_deser
.get_normal_segment_requests_iterator()
.is_some());
assert!(nak_pdu_deser
.get_large_segment_requests_iterator()
.is_none());
assert_eq!(
nak_pdu_deser
.get_normal_segment_requests_iterator()
.unwrap()
.count(),
2
);
for (idx, large_segments) in nak_pdu_deser
.get_normal_segment_requests_iterator()
.unwrap()
.enumerate()
{
if idx == 0 {
assert_eq!(large_segments.0, 50);
assert_eq!(large_segments.1, 100);
} else {
assert_eq!(large_segments.0, 200);
assert_eq!(large_segments.1, 300);
}
}
}
#[test]
fn test_empty_is_empty() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu_0 =
NakPduCreator::new(pdu_header, 100, 300, &[]).expect("creating NAK PDU creator failed");
let nak_pdu_1 = NakPduCreator::new_no_segment_requests(pdu_header, 100, 300)
.expect("creating NAK PDU creator failed");
assert_eq!(nak_pdu_0, nak_pdu_1);
// Assert the segment request is mapped to None.
assert!(nak_pdu_0.segment_requests().is_none());
}
#[test]
fn test_new_generic_invalid_input() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let u32_list = SegmentRequests::U32Pairs(&[(0, 50), (50, 100)]);
//let error = NakPduCreator::new_generic(pdu_header, 100, 300, Some(u32_list));
let error = NakPduCreator::new_generic(
pdu_header,
u32::MAX as u64 + 1,
u32::MAX as u64 + 2,
Some(u32_list),
);
assert!(error.is_err());
let error = error.unwrap_err();
if let PduError::InvalidStartOrEndOfScopeValue = error {
assert_eq!(
error.to_string(),
"invalid start or end of scope for NAK PDU"
);
} else {
panic!("unexpected error {error}");
}
}
#[test]
fn test_target_buf_too_small() {
let pdu_conf = common_pdu_conf(CrcFlag::NoCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 100, 300)
.expect("creating NAK PDU creator failed");
assert_eq!(nak_pdu.start_of_scope(), 100);
assert_eq!(nak_pdu.end_of_scope(), 300);
let mut buf: [u8; 5] = [0; 5];
let error = nak_pdu.write_to_bytes(&mut buf);
assert!(error.is_err());
let e = error.unwrap_err();
match e {
PduError::ByteConversion(conv_error) => match conv_error {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(expected, nak_pdu.len_written());
assert_eq!(found, 5);
}
_ => panic!("unexpected error {conv_error}"),
},
_ => panic!("unexpected error {e}"),
}
}
#[test]
fn test_with_crc() {
let pdu_conf = common_pdu_conf(CrcFlag::WithCrc, LargeFileFlag::Normal);
let pdu_header = PduHeader::new_no_file_data(pdu_conf, 0);
let nak_pdu = NakPduCreator::new_no_segment_requests(pdu_header, 0, 0)
.expect("creating NAK PDU creator failed");
let mut nak_vec = nak_pdu.to_vec().expect("writing NAK to vector failed");
assert_eq!(nak_vec.len(), pdu_header.header_len() + 9 + 2);
assert_eq!(nak_vec.len(), nak_pdu.len_written());
let nak_pdu_deser = NakPduReader::new(&nak_vec).expect("reading NAK PDU failed");
assert_eq!(nak_pdu_deser, nak_pdu);
nak_vec[nak_pdu.len_written() - 1] -= 1;
let nak_pdu_deser = NakPduReader::new(&nak_vec);
assert!(nak_pdu_deser.is_err());
if let Err(PduError::ChecksumError(raw)) = nak_pdu_deser {
assert_eq!(
raw,
u16::from_be_bytes(nak_vec[nak_pdu.len_written() - 2..].try_into().unwrap())
);
}
}
}

1303
src/cfdp/tlv/mod.rs Normal file

File diff suppressed because it is too large Load Diff

173
src/cfdp/tlv/msg_to_user.rs Normal file
View File

@ -0,0 +1,173 @@
//! Abstractions for the Message to User CFDP TLV subtype.
use super::{GenericTlv, Tlv, TlvLvError, TlvType, TlvTypeField, WritableTlv};
use crate::ByteConversionError;
use delegate::delegate;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct MsgToUserTlv<'data> {
pub tlv: Tlv<'data>,
}
impl<'data> MsgToUserTlv<'data> {
/// Create a new message to user TLV where the type field is set correctly.
pub fn new(value: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvError> {
Ok(Self {
tlv: Tlv::new(TlvType::MsgToUser, value)?,
})
}
delegate! {
to self.tlv {
pub fn value(&self) -> &[u8];
/// Helper method to retrieve the length of the value. Simply calls the [slice::len] method of
/// [Self::value]
pub fn len_value(&self) -> usize;
/// Returns the full raw length, including the length byte.
pub fn len_full(&self) -> usize;
/// Checks whether the value field is empty.
pub fn is_empty(&self) -> bool;
/// If the TLV was generated from a raw bytestream using [Self::from_bytes], the raw start
/// of the TLV can be retrieved with this method.
pub fn raw_data(&self) -> Option<&[u8]>;
}
}
pub fn is_standard_tlv(&self) -> bool {
true
}
pub fn tlv_type(&self) -> Option<TlvType> {
Some(TlvType::MsgToUser)
}
/// Check whether this message is a reserved CFDP message like a Proxy Operation Message.
pub fn is_reserved_cfdp_msg(&self) -> bool {
if self.value().len() < 4 {
return false;
}
let value = self.value();
if value[0] == b'c' && value[1] == b'f' && value[2] == b'd' && value[3] == b'p' {
return true;
}
false
}
/// This is a thin wrapper around [Tlv::from_bytes] with the additional type check.
pub fn from_bytes(buf: &'data [u8]) -> Result<MsgToUserTlv<'data>, TlvLvError> {
let msg_to_user = Self {
tlv: Tlv::from_bytes(buf)?,
};
match msg_to_user.tlv.tlv_type_field() {
TlvTypeField::Standard(tlv_type) => {
if tlv_type != TlvType::MsgToUser {
return Err(TlvLvError::InvalidTlvTypeField {
found: tlv_type as u8,
expected: Some(TlvType::MsgToUser as u8),
});
}
}
TlvTypeField::Custom(raw) => {
return Err(TlvLvError::InvalidTlvTypeField {
found: raw,
expected: Some(TlvType::MsgToUser as u8),
});
}
}
Ok(msg_to_user)
}
}
impl WritableTlv for MsgToUserTlv<'_> {
fn len_written(&self) -> usize {
self.len_full()
}
delegate!(
to self.tlv {
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
}
);
}
impl GenericTlv for MsgToUserTlv<'_> {
fn tlv_type_field(&self) -> TlvTypeField {
self.tlv.tlv_type_field()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_basic() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value);
assert!(msg_to_user.is_ok());
let msg_to_user = msg_to_user.unwrap();
assert!(msg_to_user.is_standard_tlv());
assert_eq!(msg_to_user.tlv_type().unwrap(), TlvType::MsgToUser);
assert_eq!(
msg_to_user.tlv_type_field(),
TlvTypeField::Standard(TlvType::MsgToUser)
);
assert_eq!(msg_to_user.value(), custom_value);
assert_eq!(msg_to_user.value().len(), 4);
assert_eq!(msg_to_user.len_value(), 4);
assert_eq!(msg_to_user.len_full(), 6);
assert!(!msg_to_user.is_empty());
assert!(msg_to_user.raw_data().is_none());
assert!(!msg_to_user.is_reserved_cfdp_msg());
}
#[test]
fn test_reserved_msg_serialization() {
let custom_value: [u8; 4] = [1, 2, 3, 4];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let mut buf: [u8; 6] = [0; 6];
msg_to_user.write_to_bytes(&mut buf).unwrap();
assert_eq!(
buf,
[
TlvType::MsgToUser as u8,
custom_value.len() as u8,
1,
2,
3,
4
]
);
}
#[test]
fn test_reserved_msg_deserialization() {
let custom_value: [u8; 3] = [1, 2, 3];
let msg_to_user = MsgToUserTlv::new(&custom_value).unwrap();
let msg_to_user_vec = msg_to_user.to_vec();
let msg_to_user_from_bytes = MsgToUserTlv::from_bytes(&msg_to_user_vec).unwrap();
assert!(!msg_to_user.is_reserved_cfdp_msg());
assert_eq!(msg_to_user_from_bytes, msg_to_user);
assert_eq!(msg_to_user_from_bytes.value(), msg_to_user.value());
assert_eq!(msg_to_user_from_bytes.tlv_type(), msg_to_user.tlv_type());
}
#[test]
fn test_reserved_msg_deserialization_invalid_type() {
let trash: [u8; 5] = [TlvType::FlowLabel as u8, 3, 1, 2, 3];
let error = MsgToUserTlv::from_bytes(&trash).unwrap_err();
if let TlvLvError::InvalidTlvTypeField { found, expected } = error {
assert_eq!(found, TlvType::FlowLabel as u8);
assert_eq!(expected, Some(TlvType::MsgToUser as u8));
} else {
panic!("Wrong error type returned: {:?}", error);
}
}
#[test]
fn test_reserved_msg() {
let reserved_str = "cfdp";
let msg_to_user = MsgToUserTlv::new(reserved_str.as_bytes());
assert!(msg_to_user.is_ok());
let msg_to_user = msg_to_user.unwrap();
assert!(msg_to_user.is_reserved_cfdp_msg());
}
}

View File

@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize};
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Subservice {
// Regular HK
@ -29,3 +30,33 @@ pub enum Subservice {
TcGenerateOneShotDiag = 28,
TcModifyDiagCollectionInterval = 32,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_try_from_u8() {
let hk_report_subservice_raw = 25;
let hk_report: Subservice = Subservice::try_from(hk_report_subservice_raw).unwrap();
assert_eq!(hk_report, Subservice::TmHkPacket);
}
#[test]
fn test_into_u8() {
let hk_report_raw: u8 = Subservice::TmHkPacket.into();
assert_eq!(hk_report_raw, 25);
}
#[test]
fn test_partial_eq() {
let hk_report_raw = Subservice::TmHkPacket;
assert_ne!(hk_report_raw, Subservice::TcGenerateOneShotHk);
assert_eq!(hk_report_raw, Subservice::TmHkPacket);
}
#[test]
fn test_copy_clone() {
let hk_report = Subservice::TmHkPacket;
let hk_report_copy = hk_report;
assert_eq!(hk_report, hk_report_copy);
}
}

View File

@ -1,12 +1,13 @@
//! Common definitions and helpers required to create PUS TMTC packets according to
//! [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/)
//!
//! You can find the PUS telecommand definitions in the [crate::tc] module and ithe PUS telemetry definitions
//! inside the [crate::tm] module.
use crate::{ByteConversionError, CcsdsPacket, SizeMissmatch};
//! You can find the PUS telecommand types in the [tc] module and the the PUS telemetry
//! types inside the [tm] module.
use crate::{ByteConversionError, CcsdsPacket, CRC_CCITT_FALSE};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::fmt::{Debug, Display, Formatter};
use core::mem::size_of;
use crc::{Crc, CRC_16_IBM_3740};
use num_enum::{IntoPrimitive, TryFromPrimitive};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
@ -16,14 +17,12 @@ use std::error::Error;
pub mod event;
pub mod hk;
pub mod scheduling;
pub mod tc;
pub mod tm;
pub mod verification;
pub type CrcType = u16;
/// CRC algorithm used by the PUS standard.
pub const CRC_CCITT_FALSE: Crc<u16> = Crc::<u16>::new(&CRC_16_IBM_3740);
pub const CCSDS_HEADER_LEN: usize = size_of::<crate::zc::SpHeader>();
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
@ -74,6 +73,8 @@ pub enum PusServiceId {
/// All PUS versions. Only PUS C is supported by this library.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum PusVersion {
EsaPus = 0,
PusA = 1,
@ -95,8 +96,9 @@ impl TryFrom<u8> for PusVersion {
}
/// ECSS Packet Type Codes (PTC)s.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[repr(u8)]
pub enum PacketTypeCodes {
Boolean = 1,
Enumerated = 2,
@ -115,9 +117,10 @@ pub enum PacketTypeCodes {
pub type Ptc = PacketTypeCodes;
/// ECSS Packet Field Codes (PFC)s for the unsigned [Ptc].
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum UnsignedPfc {
#[repr(u8)]
pub enum PfcUnsigned {
OneByte = 4,
TwelveBits = 8,
TwoBytes = 12,
@ -131,9 +134,10 @@ pub enum UnsignedPfc {
}
/// ECSS Packet Field Codes (PFC)s for the real (floating point) [Ptc].
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[derive(Debug, Copy, Clone, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum RealPfc {
#[repr(u8)]
pub enum PfcReal {
/// 4 octets simple precision format (IEEE)
Float = 1,
/// 8 octets simple precision format (IEEE)
@ -146,14 +150,13 @@ pub enum RealPfc {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PusError {
VersionNotSupported(PusVersion),
IncorrectCrc(u16),
RawDataTooShort(usize),
NoRawData,
ChecksumFailure(u16),
/// CRC16 needs to be calculated first
CrcCalculationMissing,
ByteConversionError(ByteConversionError),
ByteConversion(ByteConversionError),
}
impl Display for PusError {
@ -162,23 +165,14 @@ impl Display for PusError {
PusError::VersionNotSupported(v) => {
write!(f, "PUS version {v:?} not supported")
}
PusError::IncorrectCrc(crc) => {
write!(f, "crc16 {crc:#04x} is incorrect")
}
PusError::RawDataTooShort(size) => {
write!(
f,
"deserialization error, provided raw data with size {size} too short"
)
}
PusError::NoRawData => {
write!(f, "no raw data provided")
PusError::ChecksumFailure(crc) => {
write!(f, "checksum verification for crc16 {crc:#06x} failed")
}
PusError::CrcCalculationMissing => {
write!(f, "crc16 was not calculated")
}
PusError::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {e}")
PusError::ByteConversion(e) => {
write!(f, "pus error: {e}")
}
}
}
@ -187,7 +181,7 @@ impl Display for PusError {
#[cfg(feature = "std")]
impl Error for PusError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
if let PusError::ByteConversionError(e) = self {
if let PusError::ByteConversion(e) = self {
return Some(e);
}
None
@ -196,7 +190,7 @@ impl Error for PusError {
impl From<ByteConversionError> for PusError {
fn from(e: ByteConversionError) -> Self {
PusError::ByteConversionError(e)
PusError::ByteConversion(e)
}
}
@ -208,14 +202,16 @@ pub trait PusPacket: CcsdsPacket {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn user_data(&self) -> Option<&[u8]>;
fn user_data(&self) -> &[u8];
fn crc16(&self) -> Option<u16>;
}
pub(crate) fn crc_from_raw_data(raw_data: &[u8]) -> Result<u16, PusError> {
pub(crate) fn crc_from_raw_data(raw_data: &[u8]) -> Result<u16, ByteConversionError> {
if raw_data.len() < 2 {
return Err(PusError::RawDataTooShort(raw_data.len()));
return Err(ByteConversionError::FromSliceTooSmall {
found: raw_data.len(),
expected: 2,
});
}
Ok(u16::from_be_bytes(
raw_data[raw_data.len() - 2..raw_data.len()]
@ -230,52 +226,48 @@ pub(crate) fn calc_pus_crc16(bytes: &[u8]) -> u16 {
digest.finalize()
}
pub(crate) fn crc_procedure(
calc_on_serialization: bool,
cached_crc16: &Option<u16>,
start_idx: usize,
curr_idx: usize,
slice: &[u8],
) -> Result<u16, PusError> {
let crc16;
if calc_on_serialization {
crc16 = calc_pus_crc16(&slice[start_idx..curr_idx])
} else if cached_crc16.is_none() {
return Err(PusError::CrcCalculationMissing);
} else {
crc16 = cached_crc16.unwrap();
}
Ok(crc16)
}
pub(crate) fn user_data_from_raw(
current_idx: usize,
total_len: usize,
raw_data_len: usize,
slice: &[u8],
) -> Result<Option<&[u8]>, PusError> {
) -> Result<&[u8], ByteConversionError> {
match current_idx {
_ if current_idx == total_len - 2 => Ok(None),
_ if current_idx > total_len - 2 => Err(PusError::RawDataTooShort(raw_data_len)),
_ => Ok(Some(&slice[current_idx..total_len - 2])),
_ if current_idx > total_len - 2 => Err(ByteConversionError::FromSliceTooSmall {
found: total_len - 2,
expected: current_idx,
}),
_ => Ok(&slice[current_idx..total_len - 2]),
}
}
pub(crate) fn verify_crc16_from_raw(raw_data: &[u8], crc16: u16) -> Result<(), PusError> {
pub(crate) fn verify_crc16_ccitt_false_from_raw_to_pus_error(
raw_data: &[u8],
crc16: u16,
) -> Result<(), PusError> {
verify_crc16_ccitt_false_from_raw(raw_data)
.then_some(())
.ok_or(PusError::ChecksumFailure(crc16))
}
pub(crate) fn verify_crc16_ccitt_false_from_raw(raw_data: &[u8]) -> bool {
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(raw_data);
if digest.finalize() == 0 {
return Ok(());
return true;
}
Err(PusError::IncorrectCrc(crc16))
false
}
macro_rules! ccsds_impl {
() => {
delegate!(to self.sp_header {
#[inline]
fn ccsds_version(&self) -> u8;
#[inline]
fn packet_id(&self) -> crate::PacketId;
#[inline]
fn psc(&self) -> crate::PacketSequenceCtrl;
#[inline]
fn data_len(&self) -> u16;
});
}
@ -284,13 +276,17 @@ macro_rules! ccsds_impl {
macro_rules! sp_header_impls {
() => {
delegate!(to self.sp_header {
#[inline]
pub fn set_apid(&mut self, apid: u16) -> bool;
#[inline]
pub fn set_seq_count(&mut self, seq_count: u16) -> bool;
#[inline]
pub fn set_seq_flags(&mut self, seq_flag: SequenceFlags);
});
}
}
use crate::util::{GenericUnsignedByteField, ToBeBytes, UnsignedEnum};
pub(crate) use ccsds_impl;
pub(crate) use sp_header_impls;
@ -298,118 +294,134 @@ pub(crate) use sp_header_impls;
/// and an unsigned value. The trait makes no assumptions about the actual type of the unsigned
/// value and only requires implementors to implement a function which writes the enumeration into
/// a raw byte format.
pub trait EcssEnumeration {
pub trait EcssEnumeration: UnsignedEnum {
/// Packet Format Code, which denotes the number of bits of the enumeration
fn pfc(&self) -> u8;
fn byte_width(&self) -> usize {
(self.pfc() / 8) as usize
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError>;
}
pub trait EcssEnumerationExt: EcssEnumeration + Debug + Copy + Clone + PartialEq + Eq {}
pub trait ToBeBytes {
type ByteArray: AsRef<[u8]>;
fn to_be_bytes(&self) -> Self::ByteArray;
}
impl ToBeBytes for () {
type ByteArray = [u8; 0];
fn to_be_bytes(&self) -> Self::ByteArray {
[]
}
}
impl ToBeBytes for u8 {
type ByteArray = [u8; 1];
fn to_be_bytes(&self) -> Self::ByteArray {
u8::to_be_bytes(*self)
}
}
impl ToBeBytes for u16 {
type ByteArray = [u8; 2];
fn to_be_bytes(&self) -> Self::ByteArray {
u16::to_be_bytes(*self)
}
}
impl ToBeBytes for u32 {
type ByteArray = [u8; 4];
fn to_be_bytes(&self) -> Self::ByteArray {
u32::to_be_bytes(*self)
}
}
impl ToBeBytes for u64 {
type ByteArray = [u8; 8];
fn to_be_bytes(&self) -> Self::ByteArray {
u64::to_be_bytes(*self)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericEcssEnumWrapper<TYPE> {
val: TYPE,
pub struct GenericEcssEnumWrapper<TYPE: Copy + Into<u64>> {
field: GenericUnsignedByteField<TYPE>,
}
impl<TYPE> GenericEcssEnumWrapper<TYPE> {
impl<TYPE: Copy + Into<u64>> GenericEcssEnumWrapper<TYPE> {
pub const fn ptc() -> PacketTypeCodes {
PacketTypeCodes::Enumerated
}
pub const fn value_typed(&self) -> TYPE {
self.field.value_typed()
}
pub fn new(val: TYPE) -> Self {
Self { val }
Self {
field: GenericUnsignedByteField::new(val),
}
}
}
impl<TYPE: ToBeBytes> EcssEnumeration for GenericEcssEnumWrapper<TYPE> {
impl<TYPE: Copy + ToBeBytes + Into<u64>> UnsignedEnum for GenericEcssEnumWrapper<TYPE> {
fn size(&self) -> usize {
(self.pfc() / 8) as usize
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.field.write_to_be_bytes(buf)
}
fn value(&self) -> u64 {
self.field.value()
}
}
impl<TYPE: Copy + ToBeBytes + Into<u64>> EcssEnumeration for GenericEcssEnumWrapper<TYPE> {
fn pfc(&self) -> u8 {
size_of::<TYPE>() as u8 * 8_u8
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError> {
if buf.len() < self.byte_width() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.byte_width(),
}));
}
buf[0..self.byte_width()].copy_from_slice(self.val.to_be_bytes().as_ref());
Ok(())
}
}
impl<TYPE: Debug + Copy + Clone + PartialEq + Eq + ToBeBytes> EcssEnumerationExt
impl<TYPE: Debug + Copy + Clone + PartialEq + Eq + ToBeBytes + Into<u64>> EcssEnumerationExt
for GenericEcssEnumWrapper<TYPE>
{
}
pub type EcssEnumU8 = GenericEcssEnumWrapper<u8>;
pub type EcssEnumU16 = GenericEcssEnumWrapper<u16>;
pub type EcssEnumU32 = GenericEcssEnumWrapper<u32>;
pub type EcssEnumU64 = GenericEcssEnumWrapper<u64>;
impl<T: Copy + Into<u64>> From<T> for GenericEcssEnumWrapper<T> {
fn from(value: T) -> Self {
Self::new(value)
}
}
macro_rules! generic_ecss_enum_typedefs_and_from_impls {
($($ty:ty => $Enum:ident),*) => {
$(
pub type $Enum = GenericEcssEnumWrapper<$ty>;
impl From<$Enum> for $ty {
fn from(value: $Enum) -> Self {
value.value_typed()
}
}
)*
};
}
// Generates EcssEnum<$TY> type definitions as well as a From<$TY> for EcssEnum<$TY>
// implementation.
generic_ecss_enum_typedefs_and_from_impls! {
u8 => EcssEnumU8,
u16 => EcssEnumU16,
u32 => EcssEnumU32,
u64 => EcssEnumU64
}
/// Generic trait for PUS packet abstractions which can written to a raw slice as their raw
/// byte representation. This is especially useful for generic abstractions which depend only
/// on the serialization of those packets.
pub trait WritablePusPacket {
fn len_written(&self) -> usize;
fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError>;
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<Vec<u8>, PusError> {
// This is the correct way to do this. See
// [this issue](https://github.com/rust-lang/rust-clippy/issues/4483) for caveats of more
// "efficient" implementations.
let mut vec = alloc::vec![0; self.len_written()];
self.write_to_bytes(&mut vec)?;
Ok(vec)
}
}
#[cfg(test)]
mod tests {
use crate::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, EcssEnumeration};
use alloc::string::ToString;
use crate::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, UnsignedEnum};
use crate::ByteConversionError;
use super::*;
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
#[test]
fn test_enum_u8() {
let mut buf = [0, 0, 0];
let my_enum = EcssEnumU8::new(1);
assert_eq!(EcssEnumU8::ptc(), Ptc::Enumerated);
assert_eq!(my_enum.size(), 1);
assert_eq!(my_enum.pfc(), 8);
my_enum
.write_to_be_bytes(&mut buf[1..2])
.expect("To byte conversion of u8 failed");
assert_eq!(buf[1], 1);
assert_eq!(my_enum.value(), 1);
assert_eq!(my_enum.value_typed(), 1);
let enum_as_u8: u8 = my_enum.into();
assert_eq!(enum_as_u8, 1);
let vec = my_enum.to_vec();
assert_eq!(vec, buf[1..2]);
}
#[test]
@ -419,8 +431,16 @@ mod tests {
my_enum
.write_to_be_bytes(&mut buf[1..3])
.expect("To byte conversion of u8 failed");
assert_eq!(my_enum.size(), 2);
assert_eq!(my_enum.pfc(), 16);
assert_eq!(buf[1], 0x1f);
assert_eq!(buf[2], 0x2f);
assert_eq!(my_enum.value(), 0x1f2f);
assert_eq!(my_enum.value_typed(), 0x1f2f);
let enum_as_raw: u16 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f);
let vec = my_enum.to_vec();
assert_eq!(vec, buf[1..3]);
}
#[test]
@ -431,9 +451,9 @@ mod tests {
assert!(res.is_err());
let error = res.unwrap_err();
match error {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.expected, 2);
assert_eq!(missmatch.found, 1);
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(expected, 2);
assert_eq!(found, 1);
}
_ => {
panic!("Unexpected error {:?}", error);
@ -452,6 +472,12 @@ mod tests {
assert_eq!(buf[2], 0x2f);
assert_eq!(buf[3], 0x3f);
assert_eq!(buf[4], 0x4f);
assert_eq!(my_enum.value(), 0x1f2f3f4f);
assert_eq!(my_enum.value_typed(), 0x1f2f3f4f);
let enum_as_raw: u32 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f3f4f);
let vec = my_enum.to_vec();
assert_eq!(vec, buf[1..5]);
}
#[test]
@ -462,13 +488,108 @@ mod tests {
assert!(res.is_err());
let error = res.unwrap_err();
match error {
ByteConversionError::ToSliceTooSmall(missmatch) => {
assert_eq!(missmatch.expected, 4);
assert_eq!(missmatch.found, 3);
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(expected, 4);
assert_eq!(found, 3);
}
_ => {
panic!("Unexpected error {:?}", error);
}
}
}
#[test]
fn test_enum_u64() {
let mut buf = [0; 8];
let my_enum = EcssEnumU64::new(0x1f2f3f4f5f);
my_enum
.write_to_be_bytes(&mut buf)
.expect("To byte conversion of u64 failed");
assert_eq!(buf[3], 0x1f);
assert_eq!(buf[4], 0x2f);
assert_eq!(buf[5], 0x3f);
assert_eq!(buf[6], 0x4f);
assert_eq!(buf[7], 0x5f);
assert_eq!(my_enum.value(), 0x1f2f3f4f5f);
assert_eq!(my_enum.value_typed(), 0x1f2f3f4f5f);
let enum_as_raw: u64 = my_enum.into();
assert_eq!(enum_as_raw, 0x1f2f3f4f5f);
assert_eq!(u64::from_be_bytes(buf), 0x1f2f3f4f5f);
let vec = my_enum.to_vec();
assert_eq!(vec, buf);
}
#[test]
fn test_pus_error_display() {
let unsupport_version = PusError::VersionNotSupported(super::PusVersion::EsaPus);
let write_str = unsupport_version.to_string();
assert_eq!(write_str, "PUS version EsaPus not supported")
}
#[test]
fn test_service_id_from_u8() {
let verification_id_raw = 1;
let verification_id = PusServiceId::try_from(verification_id_raw).unwrap();
assert_eq!(verification_id, PusServiceId::Verification);
}
#[test]
fn test_ptc_from_u8() {
let ptc_raw = Ptc::AbsoluteTime as u8;
let ptc = Ptc::try_from(ptc_raw).unwrap();
assert_eq!(ptc, Ptc::AbsoluteTime);
}
#[test]
fn test_unsigned_pfc_from_u8() {
let pfc_raw = PfcUnsigned::OneByte as u8;
let pfc = PfcUnsigned::try_from(pfc_raw).unwrap();
assert_eq!(pfc, PfcUnsigned::OneByte);
}
#[test]
fn test_real_pfc_from_u8() {
let pfc_raw = PfcReal::Double as u8;
let pfc = PfcReal::try_from(pfc_raw).unwrap();
assert_eq!(pfc, PfcReal::Double);
}
#[test]
fn test_pus_error_eq_impl() {
assert_eq!(
PusError::VersionNotSupported(PusVersion::EsaPus),
PusError::VersionNotSupported(PusVersion::EsaPus)
);
}
#[test]
fn test_pus_error_clonable() {
let pus_error = PusError::ChecksumFailure(0x0101);
let cloned = pus_error;
assert_eq!(pus_error, cloned);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_pus_service_id() {
generic_serde_test(PusServiceId::Verification);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_ptc() {
generic_serde_test(Ptc::AbsoluteTime);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_pfc_unsigned() {
generic_serde_test(PfcUnsigned::EightBytes);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_pfc_real() {
generic_serde_test(PfcReal::Double);
}
}

View File

@ -54,6 +54,7 @@ pub enum SchedStatus {
}
impl From<bool> for SchedStatus {
#[inline]
fn from(value: bool) -> Self {
if value {
SchedStatus::Enabled
@ -76,6 +77,8 @@ pub enum TimeWindowType {
#[cfg(test)]
mod tests {
use super::*;
#[cfg(feature = "serde")]
use crate::tests::generic_serde_test;
#[test]
fn test_bool_conv_0() {
@ -102,4 +105,22 @@ mod tests {
let subservice: Subservice = 22u8.try_into().unwrap();
assert_eq!(subservice, Subservice::TcCreateScheduleGroup);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_subservice_id() {
generic_serde_test(Subservice::TcEnableScheduling);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_sched_status() {
generic_serde_test(SchedStatus::Enabled);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_time_window_type() {
generic_serde_test(TimeWindowType::SelectAll);
}
}

File diff suppressed because it is too large Load Diff

1331
src/ecss/tm.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,8 @@
//!
//! - Space Packet implementation according to
//! [CCSDS Blue Book 133.0-B-2](https://public.ccsds.org/Pubs/133x0b2e1.pdf)
//! - CCSDS File Delivery Protocol (CFDP) packet implementations according to
//! [CCSDS Blue Book 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf)
//! - PUS Telecommand and PUS Telemetry implementation according to the
//! [ECSS-E-ST-70-41C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
//! - CUC (CCSDS Unsegmented Time Code) implementation according to
@ -20,10 +22,6 @@
//!
//! `spacepackets` supports various runtime environments and is also suitable for `no_std` environments.
//!
//! It also offers optional support for [`serde`](https://serde.rs/). This allows serializing and
//! deserializing them with an appropriate `serde` provider like
//! [`postcard`](https://github.com/jamesmunns/postcard).
//!
//! ### Default features
//!
//! - [`std`](https://doc.rust-lang.org/std/): Enables functionality relying on the standard library.
@ -34,7 +32,11 @@
//! ### Optional features
//!
//! - [`serde`](https://serde.rs/): Adds `serde` support for most types by adding `Serialize` and
//! `Deserialize` `derive`s
//! `Deserialize` `derives.
//! - [`chrono`](https://crates.io/crates/chrono): Add basic support for the `chrono` time library.
//! - [`timelib`](https://crates.io/crates/time): Add basic support for the `time` time library.
//! - [`defmt`](https://defmt.ferrous-systems.com/): Add support for the `defmt` by adding the
//! [`defmt::Format`](https://defmt.ferrous-systems.com/format) derive on many types.
//!
//! ## Module
//!
@ -46,54 +48,64 @@
//!
//! ```rust
//! use spacepackets::SpHeader;
//! let sp_header = SpHeader::tc_unseg(0x42, 12, 1).expect("Error creating CCSDS TC header");
//! let sp_header = SpHeader::new_for_unseg_tc_checked(0x42, 12, 1).expect("error creating CCSDS TC header");
//! println!("{:?}", sp_header);
//! let mut ccsds_buf: [u8; 32] = [0; 32];
//! sp_header.write_to_be_bytes(&mut ccsds_buf).expect("Writing CCSDS TC header failed");
//! println!("{:x?}", &ccsds_buf[0..6]);
//! ```
#![no_std]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
#![cfg_attr(docs_rs, feature(doc_auto_cfg))]
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(any(feature = "std", test))]
extern crate std;
use crate::ecss::CCSDS_HEADER_LEN;
use core::fmt::{Display, Formatter};
use core::{
fmt::{Debug, Display, Formatter},
hash::Hash,
};
use crc::{Crc, CRC_16_IBM_3740};
use delegate::delegate;
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub mod cfdp;
pub mod ecss;
pub mod tc;
pub mod time;
pub mod tm;
pub mod util;
mod private {
pub trait Sealed {}
}
pub const CCSDS_HEADER_LEN: usize = core::mem::size_of::<crate::zc::SpHeader>();
/// CRC algorithm used by the PUS standard, the CCSDS TC standard and the CFDP standard.
pub const CRC_CCITT_FALSE: Crc<u16> = Crc::<u16>::new(&CRC_16_IBM_3740);
pub const MAX_APID: u16 = 2u16.pow(11) - 1;
pub const MAX_SEQ_COUNT: u16 = 2u16.pow(14) - 1;
/// Generic error type when converting to and from raw byte slices.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct SizeMissmatch {
pub found: usize,
pub expected: usize,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum ByteConversionError {
/// The passed slice is too small. Returns the passed slice length and expected minimum size
ToSliceTooSmall(SizeMissmatch),
ToSliceTooSmall {
found: usize,
expected: usize,
},
/// The provider buffer is too small. Returns the passed slice length and expected minimum size
FromSliceTooSmall(SizeMissmatch),
FromSliceTooSmall {
found: usize,
expected: usize,
},
/// The [zerocopy] library failed to write to bytes
ZeroCopyToError,
ZeroCopyFromError,
@ -102,18 +114,18 @@ pub enum ByteConversionError {
impl Display for ByteConversionError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
ByteConversionError::ToSliceTooSmall(missmatch) => {
ByteConversionError::ToSliceTooSmall { found, expected } => {
write!(
f,
"target slice with size {} is too small, expected size of at least {}",
missmatch.found, missmatch.expected
found, expected
)
}
ByteConversionError::FromSliceTooSmall(missmatch) => {
ByteConversionError::FromSliceTooSmall { found, expected } => {
write!(
f,
"source slice with size {} too small, expected at least {} bytes",
missmatch.found, missmatch.expected
found, expected
)
}
ByteConversionError::ZeroCopyToError => {
@ -132,6 +144,7 @@ impl Error for ByteConversionError {}
/// CCSDS packet type enumeration.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PacketType {
Tm = 0,
Tc = 1,
@ -155,6 +168,7 @@ pub fn packet_type_in_raw_packet_id(packet_id: u16) -> PacketType {
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum SequenceFlags {
ContinuationSegment = 0b00,
FirstSegment = 0b01,
@ -180,15 +194,45 @@ impl TryFrom<u8> for SequenceFlags {
/// Abstraction for the CCSDS Packet ID, which forms the last thirteen bits
/// of the first two bytes in the CCSDS primary header.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[derive(Debug, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct PacketId {
pub ptype: PacketType,
pub sec_header_flag: bool,
apid: u16,
}
impl PartialEq for PacketId {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.raw().eq(&other.raw())
}
}
impl PartialOrd for PacketId {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for PacketId {
#[inline]
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.raw().cmp(&other.raw())
}
}
impl Hash for PacketId {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
let raw = self.raw();
raw.hash(state);
}
}
impl Default for PacketId {
#[inline]
fn default() -> Self {
PacketId {
ptype: PacketType::Tm,
@ -199,23 +243,34 @@ impl Default for PacketId {
}
impl PacketId {
pub const fn const_tc(sec_header: bool, apid: u16) -> Self {
Self::const_new(PacketType::Tc, sec_header, apid)
}
pub const fn const_tm(sec_header: bool, apid: u16) -> Self {
Self::const_new(PacketType::Tm, sec_header, apid)
}
pub fn tc(sec_header: bool, apid: u16) -> Option<Self> {
/// This constructor will panic if the passed APID exceeds [MAX_APID].
/// Use the checked constructor variants to avoid panics.
#[inline]
pub const fn new_for_tc(sec_header: bool, apid: u16) -> Self {
Self::new(PacketType::Tc, sec_header, apid)
}
pub fn tm(sec_header: bool, apid: u16) -> Option<Self> {
/// This constructor will panic if the passed APID exceeds [MAX_APID].
/// Use the checked constructor variants to avoid panics.
#[inline]
pub const fn new_for_tm(sec_header: bool, apid: u16) -> Self {
Self::new(PacketType::Tm, sec_header, apid)
}
pub const fn const_new(ptype: PacketType, sec_header: bool, apid: u16) -> Self {
#[inline]
pub fn new_for_tc_checked(sec_header: bool, apid: u16) -> Option<Self> {
Self::new_checked(PacketType::Tc, sec_header, apid)
}
#[inline]
pub fn new_for_tm_checked(sec_header: bool, apid: u16) -> Option<Self> {
Self::new_checked(PacketType::Tm, sec_header, apid)
}
/// This constructor will panic if the passed APID exceeds [MAX_APID].
/// Use the checked variants to avoid panics.
#[inline]
pub const fn new(ptype: PacketType, sec_header: bool, apid: u16) -> Self {
if apid > MAX_APID {
panic!("APID too large");
}
@ -226,16 +281,18 @@ impl PacketId {
}
}
pub fn new(ptype: PacketType, sec_header_flag: bool, apid: u16) -> Option<PacketId> {
#[inline]
pub fn new_checked(ptype: PacketType, sec_header_flag: bool, apid: u16) -> Option<PacketId> {
if apid > MAX_APID {
return None;
}
Some(PacketId::const_new(ptype, sec_header_flag, apid))
Some(PacketId::new(ptype, sec_header_flag, apid))
}
/// Set a new Application Process ID (APID). If the passed number is invalid, the APID will
/// not be set and false will be returned. The maximum allowed value for the 11-bit field is
/// 2047
#[inline]
pub fn set_apid(&mut self, apid: u16) -> bool {
if apid > MAX_APID {
return false;
@ -244,10 +301,12 @@ impl PacketId {
true
}
#[inline]
pub fn apid(&self) -> u16 {
self.apid
}
#[inline]
pub fn raw(&self) -> u16 {
((self.ptype as u16) << 12) | ((self.sec_header_flag as u16) << 11) | self.apid
}
@ -267,15 +326,17 @@ impl From<u16> for PacketId {
/// third and the fourth byte in the CCSDS primary header.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct PacketSequenceCtrl {
pub seq_flags: SequenceFlags,
seq_count: u16,
}
impl PacketSequenceCtrl {
/// const variant of [PacketSequenceCtrl::new], but panics if the sequence count exceeds
/// [MAX_SEQ_COUNT].
const fn const_new(seq_flags: SequenceFlags, seq_count: u16) -> PacketSequenceCtrl {
/// This constructor panics if the sequence count exceeds [MAX_SEQ_COUNT].
/// Use [Self::new_checked] to avoid panics.
#[inline]
pub const fn new(seq_flags: SequenceFlags, seq_count: u16) -> PacketSequenceCtrl {
if seq_count > MAX_SEQ_COUNT {
panic!("Sequence count too large");
}
@ -286,18 +347,22 @@ impl PacketSequenceCtrl {
}
/// Returns [None] if the passed sequence count exceeds [MAX_SEQ_COUNT].
pub fn new(seq_flags: SequenceFlags, seq_count: u16) -> Option<PacketSequenceCtrl> {
#[inline]
pub fn new_checked(seq_flags: SequenceFlags, seq_count: u16) -> Option<PacketSequenceCtrl> {
if seq_count > MAX_SEQ_COUNT {
return None;
}
Some(PacketSequenceCtrl::const_new(seq_flags, seq_count))
Some(PacketSequenceCtrl::new(seq_flags, seq_count))
}
#[inline]
pub fn raw(&self) -> u16 {
((self.seq_flags as u16) << 14) | self.seq_count
}
/// Set a new sequence count. If the passed number is invalid, the sequence count will not be
/// set and false will be returned. The maximum allowed value for the 14-bit field is 16383.
#[inline]
pub fn set_seq_count(&mut self, ssc: u16) -> bool {
if ssc > MAX_SEQ_COUNT {
return false;
@ -306,6 +371,7 @@ impl PacketSequenceCtrl {
true
}
#[inline]
pub fn seq_count(&self) -> u16 {
self.seq_count
}
@ -347,6 +413,7 @@ pub trait CcsdsPacket {
/// Retrieve data length field
fn data_len(&self) -> u16;
/// Retrieve the total packet size based on the data length field
#[inline]
fn total_len(&self) -> usize {
usize::from(self.data_len()) + CCSDS_HEADER_LEN + 1
}
@ -427,6 +494,7 @@ pub trait CcsdsPrimaryHeader {
/// * `data_len` - Data length field occupies the fifth and the sixth byte of the raw header
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct SpHeader {
pub version: u8,
pub packet_id: PacketId,
@ -434,9 +502,12 @@ pub struct SpHeader {
pub data_len: u16,
}
pub type SpacePacketHeader = SpHeader;
impl Default for SpHeader {
/// The default function sets the sequence flag field to [SequenceFlags::Unsegmented]. The data
/// length field is set to 1, which denotes an empty space packets.
/// The default function sets the sequence flag field to [SequenceFlags::Unsegmented] and the
/// data length to 0.
#[inline]
fn default() -> Self {
SpHeader {
version: 0,
@ -445,12 +516,13 @@ impl Default for SpHeader {
seq_flags: SequenceFlags::Unsegmented,
seq_count: 0,
},
data_len: 1,
data_len: 0,
}
}
}
impl SpHeader {
#[inline]
pub const fn new(packet_id: PacketId, psc: PacketSequenceCtrl, data_len: u16) -> Self {
Self {
version: 0,
@ -460,9 +532,43 @@ impl SpHeader {
}
}
/// const variant of the [SpHeader::new_fron_single_fields] function. Panics if the passed
/// APID exceeds [MAX_APID] or the passed packet sequence count exceeds [MAX_SEQ_COUNT].
const fn const_new_from_single_fields(
/// This constructor sets the sequence flag field to [SequenceFlags::Unsegmented] and the data
/// length to 0.
///
/// This constructor will panic if the APID exceeds [MAX_APID].
#[inline]
pub const fn new_from_apid(apid: u16) -> Self {
SpHeader {
version: 0,
packet_id: PacketId::new(PacketType::Tm, false, apid),
psc: PacketSequenceCtrl {
seq_flags: SequenceFlags::Unsegmented,
seq_count: 0,
},
data_len: 0,
}
}
/// Checked variant of [Self::new_from_apid].
#[inline]
pub fn new_from_apid_checked(apid: u16) -> Option<Self> {
Some(SpHeader {
version: 0,
packet_id: PacketId::new_checked(PacketType::Tm, false, apid)?,
psc: PacketSequenceCtrl {
seq_flags: SequenceFlags::Unsegmented,
seq_count: 0,
},
data_len: 0,
})
}
/// This constructor panics if the passed APID exceeds [MAX_APID] or the passed packet sequence
/// count exceeds [MAX_SEQ_COUNT].
///
/// The checked constructor variants can be used to avoid panics.
#[inline]
pub const fn new_from_fields(
ptype: PacketType,
sec_header: bool,
apid: u16,
@ -477,17 +583,20 @@ impl SpHeader {
panic!("APID is too large");
}
Self {
psc: PacketSequenceCtrl::const_new(seq_flags, seq_count),
packet_id: PacketId::const_new(ptype, sec_header, apid),
psc: PacketSequenceCtrl::new(seq_flags, seq_count),
packet_id: PacketId::new(ptype, sec_header, apid),
data_len,
version: 0,
}
}
/// Create a new Space Packet Header instance which can be used to create generic
/// Space Packets. This will return [None] if the APID or sequence count argument
/// Space Packets.
///
/// This will return [None] if the APID or sequence count argument
/// exceed [MAX_APID] or [MAX_SEQ_COUNT] respectively. The version field is set to 0b000.
pub fn new_from_single_fields(
#[inline]
pub fn new_from_fields_checked(
ptype: PacketType,
sec_header: bool,
apid: u16,
@ -498,56 +607,117 @@ impl SpHeader {
if seq_count > MAX_SEQ_COUNT || apid > MAX_APID {
return None;
}
Some(SpHeader::const_new_from_single_fields(
Some(SpHeader::new_from_fields(
ptype, sec_header, apid, seq_flags, seq_count, data_len,
))
}
/// Helper function for telemetry space packet headers. The packet type field will be
/// set accordingly. The secondary header flag field is set to false.
pub fn tm(apid: u16, seq_flags: SequenceFlags, seq_count: u16, data_len: u16) -> Option<Self> {
Self::new_from_single_fields(PacketType::Tm, false, apid, seq_flags, seq_count, data_len)
#[inline]
pub fn new_for_tm_checked(
apid: u16,
seq_flags: SequenceFlags,
seq_count: u16,
data_len: u16,
) -> Option<Self> {
Self::new_from_fields_checked(PacketType::Tm, false, apid, seq_flags, seq_count, data_len)
}
/// Helper function for telemetry space packet headers. The packet type field will be
/// set accordingly. The secondary header flag field is set to false.
pub fn tc(apid: u16, seq_flags: SequenceFlags, seq_count: u16, data_len: u16) -> Option<Self> {
Self::new_from_single_fields(PacketType::Tc, false, apid, seq_flags, seq_count, data_len)
#[inline]
pub fn new_for_tc_checked(
apid: u16,
seq_flags: SequenceFlags,
seq_count: u16,
data_len: u16,
) -> Option<Self> {
Self::new_from_fields_checked(PacketType::Tc, false, apid, seq_flags, seq_count, data_len)
}
/// Variant of [SpHeader::tm] which sets the sequence flag field to [SequenceFlags::Unsegmented]
pub fn tm_unseg(apid: u16, seq_count: u16, data_len: u16) -> Option<Self> {
Self::tm(apid, SequenceFlags::Unsegmented, seq_count, data_len)
/// This is an unchecked constructor which can panic on invalid input.
#[inline]
pub const fn new_for_tm(
apid: u16,
seq_flags: SequenceFlags,
seq_count: u16,
data_len: u16,
) -> Self {
Self::new_from_fields(PacketType::Tm, false, apid, seq_flags, seq_count, data_len)
}
/// Variant of [SpHeader::tc] which sets the sequence flag field to [SequenceFlags::Unsegmented]
pub fn tc_unseg(apid: u16, seq_count: u16, data_len: u16) -> Option<Self> {
Self::tc(apid, SequenceFlags::Unsegmented, seq_count, data_len)
/// This is an unchecked constructor which can panic on invalid input.
#[inline]
pub const fn new_for_tc(
apid: u16,
seq_flags: SequenceFlags,
seq_count: u16,
data_len: u16,
) -> Self {
Self::new_from_fields(PacketType::Tc, false, apid, seq_flags, seq_count, data_len)
}
//noinspection RsTraitImplementation
delegate!(to self.packet_id {
/// Returns [false] and fails if the APID exceeds [MAX_APID]
pub fn set_apid(&mut self, apid: u16) -> bool;
});
/// Variant of [SpHeader::new_for_tm_checked] which sets the sequence flag field to [SequenceFlags::Unsegmented]
#[inline]
pub fn new_for_unseg_tm_checked(apid: u16, seq_count: u16, data_len: u16) -> Option<Self> {
Self::new_for_tm_checked(apid, SequenceFlags::Unsegmented, seq_count, data_len)
}
delegate!(to self.psc {
/// Returns [false] and fails if the sequence count exceeds [MAX_SEQ_COUNT]
pub fn set_seq_count(&mut self, seq_count: u16) -> bool;
});
/// Variant of [SpHeader::new_for_tc_checked] which sets the sequence flag field to [SequenceFlags::Unsegmented]
#[inline]
pub fn new_for_unseg_tc_checked(apid: u16, seq_count: u16, data_len: u16) -> Option<Self> {
Self::new_for_tc_checked(apid, SequenceFlags::Unsegmented, seq_count, data_len)
}
/// Variant of [SpHeader::new_for_tc] which sets the sequence flag field to [SequenceFlags::Unsegmented].
///
/// This is an unchecked constructor which can panic on invalid input.
#[inline]
pub const fn new_for_unseg_tc(apid: u16, seq_count: u16, data_len: u16) -> Self {
Self::new_for_tc(apid, SequenceFlags::Unsegmented, seq_count, data_len)
}
/// Variant of [SpHeader::new_for_tm] which sets the sequence flag field to [SequenceFlags::Unsegmented].
///
/// This is an unchecked constructor which can panic on invalid input.
#[inline]
pub const fn new_for_unseg_tm(apid: u16, seq_count: u16, data_len: u16) -> Self {
Self::new_for_tm(apid, SequenceFlags::Unsegmented, seq_count, data_len)
}
delegate! {
to self.packet_id {
/// Returns [false] and fails if the APID exceeds [MAX_APID]
#[inline]
pub fn set_apid(&mut self, apid: u16) -> bool;
}
}
delegate! {
to self.psc {
/// Returns [false] and fails if the sequence count exceeds [MAX_SEQ_COUNT]
#[inline]
pub fn set_seq_count(&mut self, seq_count: u16) -> bool;
}
}
#[inline]
pub fn set_seq_flags(&mut self, seq_flags: SequenceFlags) {
self.psc.seq_flags = seq_flags;
}
#[inline]
pub fn set_sec_header_flag(&mut self) {
self.packet_id.sec_header_flag = true;
}
#[inline]
pub fn clear_sec_header_flag(&mut self) {
self.packet_id.sec_header_flag = false;
}
#[inline]
pub fn set_packet_type(&mut self, packet_type: PacketType) {
self.packet_id.ptype = packet_type;
}
@ -557,10 +727,10 @@ impl SpHeader {
/// CCSDS header.
pub fn from_be_bytes(buf: &[u8]) -> Result<(Self, &[u8]), ByteConversionError> {
if buf.len() < CCSDS_HEADER_LEN {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: CCSDS_HEADER_LEN,
}));
});
}
let zc_header = zc::SpHeader::from_bytes(&buf[0..CCSDS_HEADER_LEN])
.ok_or(ByteConversionError::ZeroCopyFromError)?;
@ -574,10 +744,10 @@ impl SpHeader {
buf: &'a mut [u8],
) -> Result<&'a mut [u8], ByteConversionError> {
if buf.len() < CCSDS_HEADER_LEN {
return Err(ByteConversionError::FromSliceTooSmall(SizeMissmatch {
return Err(ByteConversionError::FromSliceTooSmall {
found: buf.len(),
expected: CCSDS_HEADER_LEN,
}));
});
}
let zc_header: zc::SpHeader = zc::SpHeader::from(*self);
zc_header
@ -585,6 +755,15 @@ impl SpHeader {
.ok_or(ByteConversionError::ZeroCopyToError)?;
Ok(&mut buf[CCSDS_HEADER_LEN..])
}
/// Create a vector containing the CCSDS header.
#[cfg(feature = "alloc")]
pub fn to_vec(&self) -> alloc::vec::Vec<u8> {
let mut vec = alloc::vec![0; CCSDS_HEADER_LEN];
// This can not fail.
self.write_to_be_bytes(&mut vec[..]).unwrap();
vec
}
}
impl CcsdsPacket for SpHeader {
@ -610,6 +789,7 @@ impl CcsdsPacket for SpHeader {
}
impl CcsdsPrimaryHeader for SpHeader {
#[inline]
fn from_composite_fields(
packet_id: PacketId,
psc: PacketSequenceCtrl,
@ -634,9 +814,9 @@ sph_from_other!(SpHeader, crate::zc::SpHeader);
pub mod zc {
use crate::{CcsdsPacket, CcsdsPrimaryHeader, PacketId, PacketSequenceCtrl, VERSION_MASK};
use zerocopy::byteorder::NetworkEndian;
use zerocopy::{AsBytes, FromBytes, Unaligned, U16};
use zerocopy::{AsBytes, FromBytes, FromZeroes, Unaligned, U16};
#[derive(FromBytes, AsBytes, Unaligned, Debug)]
#[derive(FromBytes, FromZeroes, AsBytes, Unaligned, Debug)]
#[repr(C)]
pub struct SpHeader {
version_packet_id: U16<NetworkEndian>,
@ -677,9 +857,12 @@ pub mod zc {
((self.version_packet_id.get() >> 13) as u8) & 0b111
}
#[inline]
fn packet_id(&self) -> PacketId {
PacketId::from(self.packet_id_raw())
}
#[inline]
fn psc(&self) -> PacketSequenceCtrl {
PacketSequenceCtrl::from(self.psc_raw())
}
@ -689,10 +872,12 @@ pub mod zc {
self.data_len.get()
}
#[inline]
fn packet_id_raw(&self) -> u16 {
self.version_packet_id.get() & (!VERSION_MASK)
}
#[inline]
fn psc_raw(&self) -> u16 {
self.psc.get()
}
@ -713,7 +898,11 @@ pub mod zc {
}
#[cfg(all(test, feature = "std"))]
mod tests {
pub(crate) mod tests {
use std::collections::HashSet;
#[allow(unused_imports)]
use crate::ByteConversionError;
#[cfg(feature = "serde")]
use crate::CcsdsPrimaryHeader;
use crate::{
@ -721,24 +910,38 @@ mod tests {
};
use crate::{SequenceFlags, SpHeader};
use alloc::vec;
#[cfg(feature = "serde")]
use core::fmt::Debug;
use num_traits::pow;
#[cfg(feature = "serde")]
use postcard::{from_bytes, to_allocvec};
#[cfg(feature = "serde")]
use serde::{de::DeserializeOwned, Serialize};
const CONST_SP: SpHeader = SpHeader::new(
PacketId::const_tc(true, 0x36),
PacketSequenceCtrl::const_new(SequenceFlags::ContinuationSegment, 0x88),
PacketId::new_for_tc(true, 0x36),
PacketSequenceCtrl::new(SequenceFlags::ContinuationSegment, 0x88),
0x90,
);
const PACKET_ID_TM: PacketId = PacketId::const_tm(true, 0x22);
const PACKET_ID_TM: PacketId = PacketId::new_for_tm(true, 0x22);
#[cfg(feature = "serde")]
pub(crate) fn generic_serde_test<T: Serialize + DeserializeOwned + PartialEq + Debug>(
value: T,
) {
let output: alloc::vec::Vec<u8> = to_allocvec(&value).unwrap();
let output_converted_back: T = from_bytes(&output).unwrap();
assert_eq!(output_converted_back, value);
}
#[test]
#[allow(clippy::assertions_on_constants)]
fn verify_const_packet_id() {
assert_eq!(PACKET_ID_TM.apid(), 0x22);
assert_eq!(PACKET_ID_TM.sec_header_flag, true);
assert!(PACKET_ID_TM.sec_header_flag);
assert_eq!(PACKET_ID_TM.ptype, PacketType::Tm);
let const_tc_id = PacketId::const_tc(true, 0x23);
let const_tc_id = PacketId::new_for_tc(true, 0x23);
assert_eq!(const_tc_id.ptype, PacketType::Tc);
}
@ -747,32 +950,32 @@ mod tests {
let id_default = PacketId::default();
assert_eq!(id_default.ptype, PacketType::Tm);
assert_eq!(id_default.apid, 0x000);
assert_eq!(id_default.sec_header_flag, false);
assert!(!id_default.sec_header_flag);
}
#[test]
fn test_packet_id_ctors() {
let packet_id = PacketId::new(PacketType::Tc, true, 0x1ff);
let packet_id = PacketId::new_checked(PacketType::Tc, true, 0x1ff);
assert!(packet_id.is_some());
let packet_id = packet_id.unwrap();
assert_eq!(packet_id.apid(), 0x1ff);
assert_eq!(packet_id.ptype, PacketType::Tc);
assert_eq!(packet_id.sec_header_flag, true);
let packet_id_tc = PacketId::tc(true, 0x1ff);
assert!(packet_id.sec_header_flag);
let packet_id_tc = PacketId::new_for_tc_checked(true, 0x1ff);
assert!(packet_id_tc.is_some());
let packet_id_tc = packet_id_tc.unwrap();
assert_eq!(packet_id_tc, packet_id);
let packet_id_tm = PacketId::tm(true, 0x2ff);
let packet_id_tm = PacketId::new_for_tm_checked(true, 0x2ff);
assert!(packet_id_tm.is_some());
let packet_id_tm = packet_id_tm.unwrap();
assert_eq!(packet_id_tm.sec_header_flag, true);
assert!(packet_id_tm.sec_header_flag);
assert_eq!(packet_id_tm.ptype, PacketType::Tm);
assert_eq!(packet_id_tm.apid, 0x2ff);
}
#[test]
fn verify_const_sp_header() {
assert_eq!(CONST_SP.sec_header_flag(), true);
assert!(CONST_SP.sec_header_flag());
assert_eq!(CONST_SP.apid(), 0x36);
assert_eq!(
CONST_SP.sequence_flags(),
@ -813,7 +1016,7 @@ mod tests {
#[test]
fn test_packet_id() {
let packet_id =
PacketId::new(PacketType::Tm, false, 0x42).expect("Packet ID creation failed");
PacketId::new_checked(PacketType::Tm, false, 0x42).expect("Packet ID creation failed");
assert_eq!(packet_id.raw(), 0x0042);
let packet_id_from_raw = PacketId::from(packet_id.raw());
assert_eq!(
@ -821,33 +1024,34 @@ mod tests {
PacketType::Tm
);
assert_eq!(packet_id_from_raw, packet_id);
let packet_id_from_new = PacketId::new(PacketType::Tm, false, 0x42).unwrap();
let packet_id_from_new = PacketId::new_checked(PacketType::Tm, false, 0x42).unwrap();
assert_eq!(packet_id_from_new, packet_id);
}
#[test]
fn test_invalid_packet_id() {
let packet_id_invalid = PacketId::new(PacketType::Tc, true, 0xFFFF);
let packet_id_invalid = PacketId::new_checked(PacketType::Tc, true, 0xFFFF);
assert!(packet_id_invalid.is_none());
}
#[test]
fn test_invalid_apid_setter() {
let mut packet_id =
PacketId::new(PacketType::Tm, false, 0x42).expect("Packet ID creation failed");
PacketId::new_checked(PacketType::Tm, false, 0x42).expect("Packet ID creation failed");
assert!(!packet_id.set_apid(0xffff));
}
#[test]
fn test_invalid_seq_count() {
let mut psc = PacketSequenceCtrl::new(SequenceFlags::ContinuationSegment, 77)
let mut psc = PacketSequenceCtrl::new_checked(SequenceFlags::ContinuationSegment, 77)
.expect("PSC creation failed");
assert_eq!(psc.seq_count(), 77);
assert!(!psc.set_seq_count(0xffff));
}
#[test]
fn test_packet_seq_ctrl() {
let mut psc = PacketSequenceCtrl::new(SequenceFlags::ContinuationSegment, 77)
let mut psc = PacketSequenceCtrl::new_checked(SequenceFlags::ContinuationSegment, 77)
.expect("PSC creation failed");
assert_eq!(psc.raw(), 77);
let psc_from_raw = PacketSequenceCtrl::from(psc.raw());
@ -856,16 +1060,18 @@ mod tests {
assert!(!psc.set_seq_count(2u16.pow(15)));
assert_eq!(psc.raw(), 77);
let psc_invalid = PacketSequenceCtrl::new(SequenceFlags::FirstSegment, 0xFFFF);
let psc_invalid = PacketSequenceCtrl::new_checked(SequenceFlags::FirstSegment, 0xFFFF);
assert!(psc_invalid.is_none());
let psc_from_new = PacketSequenceCtrl::new(SequenceFlags::ContinuationSegment, 77).unwrap();
let psc_from_new =
PacketSequenceCtrl::new_checked(SequenceFlags::ContinuationSegment, 77).unwrap();
assert_eq!(psc_from_new, psc);
}
#[test]
#[cfg(feature = "serde")]
fn test_serde_sph() {
let sp_header = SpHeader::tc_unseg(0x42, 12, 0).expect("Error creating SP header");
let sp_header =
SpHeader::new_for_unseg_tc_checked(0x42, 12, 0).expect("Error creating SP header");
assert_eq!(sp_header.ccsds_version(), 0b000);
assert!(sp_header.is_tc());
assert!(!sp_header.sec_header_flag());
@ -887,7 +1093,8 @@ mod tests {
assert_eq!(sp_header.ccsds_version(), 0b000);
assert_eq!(sp_header.data_len, 0);
let sp_header = SpHeader::tm_unseg(0x7, 22, 36).expect("Error creating SP header");
let sp_header =
SpHeader::new_for_unseg_tm_checked(0x7, 22, 36).expect("Error creating SP header");
assert_eq!(sp_header.ccsds_version(), 0b000);
assert!(sp_header.is_tm());
assert!(!sp_header.sec_header_flag());
@ -901,8 +1108,8 @@ mod tests {
assert_eq!(sp_header.ccsds_version(), 0b000);
let from_comp_fields = SpHeader::from_composite_fields(
PacketId::new(PacketType::Tc, true, 0x42).unwrap(),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0x7).unwrap(),
PacketId::new(PacketType::Tc, true, 0x42),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0x7),
0,
None,
);
@ -919,7 +1126,7 @@ mod tests {
#[test]
fn test_setters() {
let sp_header = SpHeader::tc(0x42, SequenceFlags::Unsegmented, 25, 0);
let sp_header = SpHeader::new_for_tc_checked(0x42, SequenceFlags::Unsegmented, 25, 0);
assert!(sp_header.is_some());
let mut sp_header = sp_header.unwrap();
sp_header.set_apid(0x12);
@ -937,7 +1144,7 @@ mod tests {
#[test]
fn test_tc_ctor() {
let sp_header = SpHeader::tc(0x42, SequenceFlags::Unsegmented, 25, 0);
let sp_header = SpHeader::new_for_tc_checked(0x42, SequenceFlags::Unsegmented, 25, 0);
assert!(sp_header.is_some());
let sp_header = sp_header.unwrap();
verify_sp_fields(PacketType::Tc, &sp_header);
@ -945,23 +1152,35 @@ mod tests {
#[test]
fn test_tc_ctor_unseg() {
let sp_header = SpHeader::tc_unseg(0x42, 25, 0);
let sp_header = SpHeader::new_for_unseg_tc_checked(0x42, 25, 0);
assert!(sp_header.is_some());
let sp_header = sp_header.unwrap();
verify_sp_fields(PacketType::Tc, &sp_header);
}
#[test]
fn test_tc_ctor_unseg_const() {
let sp_header = SpHeader::new_for_unseg_tc(0x42, 25, 0);
verify_sp_fields(PacketType::Tc, &sp_header);
}
#[test]
fn test_tm_ctor() {
let sp_header = SpHeader::tm(0x42, SequenceFlags::Unsegmented, 25, 0);
let sp_header = SpHeader::new_for_tm_checked(0x42, SequenceFlags::Unsegmented, 25, 0);
assert!(sp_header.is_some());
let sp_header = sp_header.unwrap();
verify_sp_fields(PacketType::Tm, &sp_header);
}
#[test]
fn test_tm_ctor_const() {
let sp_header = SpHeader::new_for_tm(0x42, SequenceFlags::Unsegmented, 25, 0);
verify_sp_fields(PacketType::Tm, &sp_header);
}
#[test]
fn test_tm_ctor_unseg() {
let sp_header = SpHeader::tm_unseg(0x42, 25, 0);
let sp_header = SpHeader::new_for_unseg_tm_checked(0x42, 25, 0);
assert!(sp_header.is_some());
let sp_header = sp_header.unwrap();
verify_sp_fields(PacketType::Tm, &sp_header);
@ -979,8 +1198,8 @@ mod tests {
fn test_zc_sph() {
use zerocopy::AsBytes;
let sp_header =
SpHeader::tc_unseg(0x7FF, pow(2, 14) - 1, 0).expect("Error creating SP header");
let sp_header = SpHeader::new_for_unseg_tc_checked(0x7FF, pow(2, 14) - 1, 0)
.expect("Error creating SP header");
assert_eq!(sp_header.ptype(), PacketType::Tc);
assert_eq!(sp_header.apid(), 0x7FF);
assert_eq!(sp_header.data_len(), 0);
@ -1027,4 +1246,58 @@ mod tests {
assert_eq!(sp_header.ptype(), PacketType::Tc);
assert_eq!(sp_header.data_len(), 0);
}
#[test]
fn packet_id_ord_partial_ord() {
let packet_id_small = PacketId::from(1_u16);
let packet_id_larger = PacketId::from(2_u16);
assert!(packet_id_small < packet_id_larger);
assert!(packet_id_larger > packet_id_small);
assert_eq!(
packet_id_small.cmp(&packet_id_larger),
core::cmp::Ordering::Less
);
}
#[test]
fn packet_id_hashable() {
let mut id_set = HashSet::new();
id_set.insert(PacketId::from(1_u16));
}
#[test]
fn sp_header_from_apid() {
let sp_header = SpHeader::new_from_apid(0x03);
assert_eq!(sp_header.apid(), 0x03);
assert_eq!(sp_header.data_len(), 0);
}
#[test]
fn sp_header_from_apid_checked() {
let sp_header = SpHeader::new_from_apid_checked(0x03).unwrap();
assert_eq!(sp_header.apid(), 0x03);
assert_eq!(sp_header.data_len(), 0);
}
#[cfg(feature = "defmt")]
fn is_defmt_format<T: defmt::Format>(_t: T) {}
#[test]
#[cfg(feature = "defmt")]
fn test_defmt_format() {
is_defmt_format(ByteConversionError::ToSliceTooSmall {
found: 1,
expected: 2,
});
}
#[test]
fn test_sp_header_as_vec() {
let sp_header = SpHeader::new_for_unseg_tc(0x42, 25, 1);
let sp_header_as_vec = sp_header.to_vec();
let sp_header_read_back = SpHeader::from_be_bytes(&sp_header_as_vec)
.expect("Error reading back SP header")
.0;
assert_eq!(sp_header, sp_header_read_back);
}
}

View File

@ -2,11 +2,8 @@
//! [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf) section 3.5 .
//! See [chrono::DateTime::format] for a usage example of the generated
//! [chrono::format::DelayedFormat] structs.
#[cfg(feature = "alloc")]
use chrono::{
format::{DelayedFormat, StrftimeItems},
DateTime, Utc,
};
#[cfg(all(feature = "alloc", feature = "chrono"))]
pub use alloc_mod_chrono::*;
/// Tuple of format string and formatted size for time code A.
///
@ -34,36 +31,37 @@ pub const FMT_STR_CODE_B_WITH_SIZE: (&str, usize) = ("%Y-%jT%T%.3f", 21);
/// Three digits are used for the decimal fraction and a terminator is added at the end.
pub const FMT_STR_CODE_B_TERMINATED_WITH_SIZE: (&str, usize) = ("%Y-%jT%T%.3fZ", 22);
/// Generates a time code formatter using the [FMT_STR_CODE_A_WITH_SIZE] format.
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn generate_time_code_a(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_A_WITH_SIZE.0)
}
#[cfg(all(feature = "alloc", feature = "chrono"))]
pub mod alloc_mod_chrono {
use super::*;
use chrono::{
format::{DelayedFormat, StrftimeItems},
DateTime, Utc,
};
/// Generates a time code formatter using the [FMT_STR_CODE_A_TERMINATED_WITH_SIZE] format.
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn generate_time_code_a_terminated(
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_A_TERMINATED_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_A_WITH_SIZE] format.
pub fn generate_time_code_a(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_A_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_B_WITH_SIZE] format.
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn generate_time_code_b(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_A_TERMINATED_WITH_SIZE] format.
pub fn generate_time_code_a_terminated(
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_A_TERMINATED_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_B_TERMINATED_WITH_SIZE] format.
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn generate_time_code_b_terminated(
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_TERMINATED_WITH_SIZE.0)
/// Generates a time code formatter using the [FMT_STR_CODE_B_WITH_SIZE] format.
pub fn generate_time_code_b(date: &DateTime<Utc>) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_WITH_SIZE.0)
}
/// Generates a time code formatter using the [FMT_STR_CODE_B_TERMINATED_WITH_SIZE] format.
pub fn generate_time_code_b_terminated(
date: &DateTime<Utc>,
) -> DelayedFormat<StrftimeItems<'static>> {
date.format(FMT_STR_CODE_B_TERMINATED_WITH_SIZE.0)
}
}
#[cfg(test)]
@ -73,25 +71,54 @@ mod tests {
use std::format;
#[test]
fn test_ascii_timestamp_a_unterminated() {
let date = Utc::now();
fn test_ascii_timestamp_a_unterminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_a(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find("T");
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
assert_eq!(stamp.len(), FMT_STR_CODE_A_WITH_SIZE.1);
}
#[test]
fn test_ascii_timestamp_a_terminated() {
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_a_unterminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_a(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
assert_eq!(stamp.len(), FMT_STR_CODE_A_WITH_SIZE.1);
}
#[test]
fn test_ascii_timestamp_a_terminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_a_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
FMT_STR_CODE_A_TERMINATED_WITH_SIZE.1 - 1
);
assert_eq!(stamp.len(), FMT_STR_CODE_A_TERMINATED_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_a_terminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_a_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find("T");
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 10);
let z_terminator = stamp.find("Z");
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
@ -101,25 +128,55 @@ mod tests {
}
#[test]
fn test_ascii_timestamp_b_unterminated() {
let date = Utc::now();
fn test_ascii_timestamp_b_unterminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_b(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find("T");
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
assert_eq!(stamp.len(), FMT_STR_CODE_B_WITH_SIZE.1);
}
#[test]
fn test_ascii_timestamp_b_terminated() {
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_b_unterminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_b(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
assert_eq!(stamp.len(), FMT_STR_CODE_B_WITH_SIZE.1);
}
#[test]
fn test_ascii_timestamp_b_terminated_epoch() {
let date = chrono::DateTime::UNIX_EPOCH;
let stamp_formatter = generate_time_code_b_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),
FMT_STR_CODE_B_TERMINATED_WITH_SIZE.1 - 1
);
assert_eq!(stamp.len(), FMT_STR_CODE_B_TERMINATED_WITH_SIZE.1);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ascii_timestamp_b_terminated_now() {
let date = Utc::now();
let stamp_formatter = generate_time_code_b_terminated(&date);
let stamp = format!("{}", stamp_formatter);
let t_sep = stamp.find("T");
let t_sep = stamp.find('T');
assert!(t_sep.is_some());
assert_eq!(t_sep.unwrap(), 8);
let z_terminator = stamp.find("Z");
let z_terminator = stamp.find('Z');
assert!(z_terminator.is_some());
assert_eq!(
z_terminator.unwrap(),

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,12 @@
//! CCSDS Time Code Formats according to [CCSDS 301.0-B-4](https://public.ccsds.org/Pubs/301x0b4e1.pdf)
use crate::{ByteConversionError, SizeMissmatch};
use chrono::{DateTime, LocalResult, TimeZone, Utc};
use crate::ByteConversionError;
#[cfg(feature = "chrono")]
use chrono::{TimeZone, Utc};
use core::cmp::Ordering;
use core::fmt::{Display, Formatter};
use core::ops::{Add, AddAssign};
use core::ops::{Add, AddAssign, Sub};
use core::time::Duration;
use core::u8;
#[allow(unused_imports)]
#[cfg(not(feature = "std"))]
@ -12,10 +14,13 @@ use num_traits::float::FloatCore;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")]
use std::time::{SystemTime, SystemTimeError};
#[cfg(feature = "std")]
pub use std_mod::*;
pub mod ascii;
pub mod cds;
@ -24,10 +29,12 @@ pub mod cuc;
pub const DAYS_CCSDS_TO_UNIX: i32 = -4383;
pub const SECONDS_PER_DAY: u32 = 86400;
pub const MS_PER_DAY: u32 = SECONDS_PER_DAY * 1000;
pub const NANOS_PER_SECOND: u32 = 1_000_000_000;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum CcsdsTimeCodes {
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum CcsdsTimeCode {
CucCcsdsEpoch = 0b001,
CucAgencyEpoch = 0b010,
Cds = 0b100,
@ -35,16 +42,16 @@ pub enum CcsdsTimeCodes {
AgencyDefined = 0b110,
}
impl TryFrom<u8> for CcsdsTimeCodes {
impl TryFrom<u8> for CcsdsTimeCode {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == CcsdsTimeCodes::CucCcsdsEpoch as u8 => Ok(CcsdsTimeCodes::CucCcsdsEpoch),
x if x == CcsdsTimeCodes::CucAgencyEpoch as u8 => Ok(CcsdsTimeCodes::CucAgencyEpoch),
x if x == CcsdsTimeCodes::Cds as u8 => Ok(CcsdsTimeCodes::Cds),
x if x == CcsdsTimeCodes::Ccs as u8 => Ok(CcsdsTimeCodes::Ccs),
x if x == CcsdsTimeCodes::AgencyDefined as u8 => Ok(CcsdsTimeCodes::AgencyDefined),
x if x == CcsdsTimeCode::CucCcsdsEpoch as u8 => Ok(CcsdsTimeCode::CucCcsdsEpoch),
x if x == CcsdsTimeCode::CucAgencyEpoch as u8 => Ok(CcsdsTimeCode::CucAgencyEpoch),
x if x == CcsdsTimeCode::Cds as u8 => Ok(CcsdsTimeCode::Cds),
x if x == CcsdsTimeCode::Ccs as u8 => Ok(CcsdsTimeCode::Ccs),
x if x == CcsdsTimeCode::AgencyDefined as u8 => Ok(CcsdsTimeCode::AgencyDefined),
_ => Err(()),
}
}
@ -52,79 +59,54 @@ impl TryFrom<u8> for CcsdsTimeCodes {
/// Retrieve the CCSDS time code from the p-field. If no valid time code identifier is found, the
/// value of the raw time code identification field is returned.
pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCodes, u8> {
pub fn ccsds_time_code_from_p_field(pfield: u8) -> Result<CcsdsTimeCode, u8> {
let raw_bits = (pfield >> 4) & 0b111;
CcsdsTimeCodes::try_from(raw_bits).map_err(|_| raw_bits)
CcsdsTimeCode::try_from(raw_bits).map_err(|_| raw_bits)
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct DateBeforeCcsdsEpochError(UnixTime);
impl Display for DateBeforeCcsdsEpochError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
write!(f, "date before ccsds epoch: {:?}", self.0)
}
}
#[cfg(feature = "std")]
impl Error for DateBeforeCcsdsEpochError {}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum TimestampError {
/// Contains tuple where first value is the expected time code and the second
/// value is the found raw value
InvalidTimeCode(CcsdsTimeCodes, u8),
ByteConversionError(ByteConversionError),
CdsError(cds::CdsError),
CucError(cuc::CucError),
DateBeforeCcsdsEpoch(DateTime<Utc>),
InvalidTimeCode { expected: CcsdsTimeCode, found: u8 },
ByteConversion(ByteConversionError),
Cds(cds::CdsError),
Cuc(cuc::CucError),
CustomEpochNotSupported,
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::CdsError(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::CucError(e)
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[derive(Debug, Clone)]
pub enum StdTimestampError {
SystemTimeError(SystemTimeError),
TimestampError(TimestampError),
}
#[cfg(feature = "std")]
impl From<TimestampError> for StdTimestampError {
fn from(v: TimestampError) -> Self {
Self::TimestampError(v)
}
}
#[cfg(feature = "std")]
impl From<SystemTimeError> for StdTimestampError {
fn from(v: SystemTimeError) -> Self {
Self::SystemTimeError(v)
}
}
impl Display for TimestampError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
TimestampError::InvalidTimeCode(time_code, raw_val) => {
TimestampError::InvalidTimeCode { expected, found } => {
write!(
f,
"invalid raw time code value {raw_val} for time code {time_code:?}"
"invalid raw time code value {found} for time code {expected:?}"
)
}
TimestampError::CdsError(e) => {
write!(f, "cds error {e}")
TimestampError::Cds(e) => {
write!(f, "cds error: {e}")
}
TimestampError::CucError(e) => {
write!(f, "cuc error {e}")
TimestampError::Cuc(e) => {
write!(f, "cuc error: {e}")
}
TimestampError::ByteConversionError(e) => {
write!(f, "byte conversion error {e}")
}
TimestampError::DateBeforeCcsdsEpoch(e) => {
write!(f, "datetime with date before ccsds epoch: {e}")
TimestampError::ByteConversion(e) => {
write!(f, "time stamp: {e}")
}
TimestampError::CustomEpochNotSupported => {
write!(f, "custom epochs are not supported")
@ -137,16 +119,42 @@ impl Display for TimestampError {
impl Error for TimestampError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
TimestampError::ByteConversionError(e) => Some(e),
TimestampError::CdsError(e) => Some(e),
TimestampError::CucError(e) => Some(e),
TimestampError::ByteConversion(e) => Some(e),
TimestampError::Cds(e) => Some(e),
TimestampError::Cuc(e) => Some(e),
_ => None,
}
}
}
impl From<cds::CdsError> for TimestampError {
fn from(e: cds::CdsError) -> Self {
TimestampError::Cds(e)
}
}
impl From<cuc::CucError> for TimestampError {
fn from(e: cuc::CucError) -> Self {
TimestampError::Cuc(e)
}
}
#[cfg(feature = "std")]
pub mod std_mod {
use crate::time::TimestampError;
use std::time::SystemTimeError;
use thiserror::Error;
#[derive(Debug, Clone, Error)]
pub enum StdTimestampError {
#[error("system time error: {0:?}")]
SystemTime(#[from] SystemTimeError),
#[error("timestamp error: {0}")]
Timestamp(#[from] TimestampError),
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn seconds_since_epoch() -> f64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
@ -158,6 +166,7 @@ pub fn seconds_since_epoch() -> f64 {
///
/// - CCSDS epoch: 1958-01-01T00:00:00+00:00
/// - UNIX Epoch: 1970-01-01T00:00:00+00:00
#[inline]
pub const fn unix_to_ccsds_days(unix_days: i64) -> i64 {
unix_days - DAYS_CCSDS_TO_UNIX as i64
}
@ -166,22 +175,24 @@ pub const fn unix_to_ccsds_days(unix_days: i64) -> i64 {
///
/// - CCSDS epoch: 1958-01-01T00:00:00+00:00
/// - UNIX Epoch: 1970-01-01T00:00:00+00:00
#[inline]
pub const fn ccsds_to_unix_days(ccsds_days: i64) -> i64 {
ccsds_days + DAYS_CCSDS_TO_UNIX as i64
}
/// Similar to [unix_to_ccsds_days] but converts the epoch instead, which is the number of elpased
/// seconds since the CCSDS and UNIX epoch times.
#[inline]
pub const fn unix_epoch_to_ccsds_epoch(unix_epoch: i64) -> i64 {
unix_epoch - (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
}
#[inline]
pub const fn ccsds_epoch_to_unix_epoch(ccsds_epoch: i64) -> i64 {
ccsds_epoch + (DAYS_CCSDS_TO_UNIX as i64 * SECONDS_PER_DAY as i64)
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn ms_of_day_using_sysclock() -> u32 {
ms_of_day(seconds_since_epoch())
}
@ -195,21 +206,29 @@ pub fn ms_of_day(seconds_since_epoch: f64) -> u32 {
}
pub trait TimeWriter {
fn len_written(&self) -> usize;
/// Generic function to convert write a timestamp into a raw buffer.
/// Returns the number of written bytes on success.
fn write_to_bytes(&self, bytes: &mut [u8]) -> Result<usize, TimestampError>;
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<alloc::vec::Vec<u8>, TimestampError> {
let mut vec = alloc::vec![0; self.len_written()];
self.write_to_bytes(&mut vec)?;
Ok(vec)
}
}
pub trait TimeReader {
fn from_bytes(buf: &[u8]) -> Result<Self, TimestampError>
where
Self: Sized;
pub trait TimeReader: Sized {
fn from_bytes(buf: &[u8]) -> Result<Self, TimestampError>;
}
/// Trait for generic CCSDS time providers.
///
/// The UNIX helper methods and the [Self::date_time] method are not strictly necessary but extremely
/// The UNIX helper methods and the helper method are not strictly necessary but extremely
/// practical because they are a very common and simple exchange format for time information.
/// Therefore, it was decided to keep them in this trait as well.
pub trait CcsdsTimeProvider {
fn len_as_bytes(&self) -> usize;
@ -218,148 +237,244 @@ pub trait CcsdsTimeProvider {
/// entry denotes the length of the pfield and the second entry is the value of the pfield
/// in big endian format.
fn p_field(&self) -> (usize, [u8; 2]);
fn ccdsd_time_code(&self) -> CcsdsTimeCodes;
fn ccdsd_time_code(&self) -> CcsdsTimeCode;
fn unix_seconds(&self) -> i64;
fn subsecond_millis(&self) -> Option<u16>;
fn unix_stamp(&self) -> UnixTimestamp {
if self.subsecond_millis().is_none() {
return UnixTimestamp::new_only_seconds(self.unix_seconds());
}
UnixTimestamp::const_new(self.unix_seconds(), self.subsecond_millis().unwrap())
fn unix_secs(&self) -> i64;
fn subsec_nanos(&self) -> u32;
fn subsec_millis(&self) -> u16 {
(self.subsec_nanos() / 1_000_000) as u16
}
fn date_time(&self) -> Option<DateTime<Utc>>;
fn unix_time(&self) -> UnixTime {
UnixTime::new(self.unix_secs(), self.subsec_nanos())
}
#[cfg(feature = "chrono")]
fn chrono_date_time(&self) -> chrono::LocalResult<chrono::DateTime<chrono::Utc>> {
chrono::Utc.timestamp_opt(self.unix_secs(), self.subsec_nanos())
}
#[cfg(feature = "timelib")]
fn timelib_date_time(&self) -> Result<time::OffsetDateTime, time::error::ComponentRange> {
Ok(time::OffsetDateTime::from_unix_timestamp(self.unix_secs())?
+ time::Duration::nanoseconds(self.subsec_nanos().into()))
}
}
/// UNIX timestamp: Elapsed seconds since 1970-01-01T00:00:00+00:00.
/// UNIX time: Elapsed non-leap seconds since 1970-01-01T00:00:00+00:00 UTC.
///
/// Also can optionally include subsecond millisecond for greater accuracy. Please note that a
/// subsecond millisecond value of 0 gets converted to [None].
/// This is a commonly used time format and can therefore also be used as a generic format to
/// convert other CCSDS time formats to and from. The subsecond precision is in nanoseconds
/// similarly to other common time formats and libraries.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct UnixTimestamp {
pub unix_seconds: i64,
subsecond_millis: Option<u16>,
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct UnixTime {
secs: i64,
subsec_nanos: u32,
}
impl UnixTimestamp {
/// Returns none if the subsecond millisecond value is larger than 999. 0 is converted to
/// a [None] value.
pub fn new(unix_seconds: i64, subsec_millis: u16) -> Option<Self> {
if subsec_millis > 999 {
impl UnixTime {
/// The UNIX epoch time: 1970-01-01T00:00:00+00:00 UTC.
pub const EPOCH: Self = Self {
secs: 0,
subsec_nanos: 0,
};
/// The minimum possible `UnixTime`.
pub const MIN: Self = Self {
secs: i64::MIN,
subsec_nanos: 0,
};
/// The maximum possible `UnixTime`.
pub const MAX: Self = Self {
secs: i64::MAX,
subsec_nanos: NANOS_PER_SECOND - 1,
};
/// Returns [None] if the subsecond nanosecond value is invalid (larger than fraction of a
/// second)
pub fn new_checked(unix_seconds: i64, subsec_nanos: u32) -> Option<Self> {
if subsec_nanos >= NANOS_PER_SECOND {
return None;
}
Some(Self::const_new(unix_seconds, subsec_millis))
Some(Self::new(unix_seconds, subsec_nanos))
}
/// Like [Self::new] but const. Panics if the subsecond value is larger than 999.
pub const fn const_new(unix_seconds: i64, subsec_millis: u16) -> Self {
if subsec_millis > 999 {
panic!("subsec milliseconds exceeds 999");
/// Returns [None] if the subsecond millisecond value is invalid (larger than fraction of a
/// second)
pub fn new_subsec_millis_checked(unix_seconds: i64, subsec_millis: u16) -> Option<Self> {
if subsec_millis >= 1000 {
return None;
}
Self::new_checked(unix_seconds, subsec_millis as u32 * 1_000_000)
}
/// This function will panic if the subsecond value is larger than the fraction of a second.
/// Use [Self::new_checked] if you want to handle this case without a panic.
pub const fn new(unix_seconds: i64, subsecond_nanos: u32) -> Self {
if subsecond_nanos >= NANOS_PER_SECOND {
panic!("invalid subsecond nanos value");
}
let subsecond_millis = if subsec_millis == 0 {
None
} else {
Some(subsec_millis)
};
Self {
unix_seconds,
subsecond_millis,
secs: unix_seconds,
subsec_nanos: subsecond_nanos,
}
}
pub fn new_only_seconds(unix_seconds: i64) -> Self {
/// This function will panic if the subsecond value is larger than the fraction of a second.
/// Use [Self::new_subsec_millis_checked] if you want to handle this case without a panic.
pub const fn new_subsec_millis(unix_seconds: i64, subsecond_millis: u16) -> Self {
if subsecond_millis >= 1000 {
panic!("invalid subsecond millisecond value");
}
Self {
unix_seconds,
subsecond_millis: None,
secs: unix_seconds,
subsec_nanos: subsecond_millis as u32 * 1_000_000,
}
}
pub fn subsecond_millis(&self) -> Option<u16> {
self.subsecond_millis
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn from_now() -> Result<Self, SystemTimeError> {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let epoch = now.as_secs();
Ok(Self::const_new(epoch as i64, now.subsec_millis() as u16))
pub fn new_only_secs(unix_seconds: i64) -> Self {
Self {
secs: unix_seconds,
subsec_nanos: 0,
}
}
#[inline]
pub fn unix_seconds_f64(&self) -> f64 {
let mut secs = self.unix_seconds as f64;
if let Some(subsec_millis) = self.subsecond_millis {
secs += subsec_millis as f64 / 1000.0;
}
secs
pub fn subsec_millis(&self) -> u16 {
(self.subsec_nanos / 1_000_000) as u16
}
pub fn as_date_time(&self) -> LocalResult<DateTime<Utc>> {
Utc.timestamp_opt(
self.unix_seconds,
self.subsecond_millis.unwrap_or(0) as u32 * 10_u32.pow(6),
)
pub fn subsec_nanos(&self) -> u32 {
self.subsec_nanos
}
#[cfg(feature = "std")]
pub fn now() -> Result<Self, SystemTimeError> {
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;
let epoch = now.as_secs();
Ok(Self::new(epoch as i64, now.subsec_nanos()))
}
#[inline]
pub fn unix_secs_f64(&self) -> f64 {
self.secs as f64 + (self.subsec_nanos as f64 / 1_000_000_000.0)
}
pub fn as_secs(&self) -> i64 {
self.secs
}
#[cfg(feature = "chrono")]
pub fn chrono_date_time(&self) -> chrono::LocalResult<chrono::DateTime<chrono::Utc>> {
Utc.timestamp_opt(self.secs, self.subsec_nanos)
}
#[cfg(feature = "timelib")]
pub fn timelib_date_time(&self) -> Result<time::OffsetDateTime, time::error::ComponentRange> {
Ok(time::OffsetDateTime::from_unix_timestamp(self.as_secs())?
+ time::Duration::nanoseconds(self.subsec_nanos().into()))
}
// Calculate the difference in milliseconds between two UnixTimestamps
pub fn diff_in_millis(&self, other: &UnixTime) -> Option<i64> {
let seconds_difference = self.secs.checked_sub(other.secs)?;
// Convert seconds difference to milliseconds
let milliseconds_difference = seconds_difference.checked_mul(1000)?;
// Calculate the difference in subsecond milliseconds directly
let subsecond_difference_nanos = self.subsec_nanos as i64 - other.subsec_nanos as i64;
// Combine the differences
Some(milliseconds_difference + (subsecond_difference_nanos / 1_000_000))
}
}
impl From<DateTime<Utc>> for UnixTimestamp {
fn from(value: DateTime<Utc>) -> Self {
Self::const_new(value.timestamp(), value.timestamp_subsec_millis() as u16)
#[cfg(feature = "chrono")]
impl From<chrono::DateTime<chrono::Utc>> for UnixTime {
fn from(value: chrono::DateTime<chrono::Utc>) -> Self {
Self::new(value.timestamp(), value.timestamp_subsec_nanos())
}
}
impl PartialOrd for UnixTimestamp {
#[cfg(feature = "timelib")]
impl From<time::OffsetDateTime> for UnixTime {
fn from(value: time::OffsetDateTime) -> Self {
Self::new(value.unix_timestamp(), value.nanosecond())
}
}
impl PartialOrd for UnixTime {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for UnixTime {
fn cmp(&self, other: &Self) -> Ordering {
if self == other {
return Some(Ordering::Equal);
return Ordering::Equal;
}
match self.unix_seconds.cmp(&other.unix_seconds) {
Ordering::Less => return Some(Ordering::Less),
Ordering::Greater => return Some(Ordering::Greater),
match self.secs.cmp(&other.secs) {
Ordering::Less => return Ordering::Less,
Ordering::Greater => return Ordering::Greater,
_ => (),
}
match self
.subsecond_millis()
.unwrap_or(0)
.cmp(&other.subsecond_millis().unwrap_or(0))
{
match self.subsec_millis().cmp(&other.subsec_millis()) {
Ordering::Less => {
return if self.unix_seconds < 0 {
Some(Ordering::Greater)
return if self.secs < 0 {
Ordering::Greater
} else {
Some(Ordering::Less)
Ordering::Less
}
}
Ordering::Greater => {
return if self.unix_seconds < 0 {
Some(Ordering::Less)
return if self.secs < 0 {
Ordering::Less
} else {
Some(Ordering::Greater)
Ordering::Greater
}
}
Ordering::Equal => (),
}
Some(Ordering::Equal)
Ordering::Equal
}
}
impl Ord for UnixTimestamp {
fn cmp(&self, other: &Self) -> Ordering {
PartialOrd::partial_cmp(self, other).unwrap()
/// Difference between two UNIX timestamps. The [Duration] type can not contain negative durations,
/// so the sign information is supplied separately.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct StampDiff {
pub positive_duration: bool,
pub duration_absolute: Duration,
}
impl Sub for UnixTime {
type Output = Option<StampDiff>;
fn sub(self, rhs: Self) -> Self::Output {
let difference = self.diff_in_millis(&rhs)?;
Some(if difference < 0 {
StampDiff {
positive_duration: false,
duration_absolute: Duration::from_millis(-difference as u64),
}
} else {
StampDiff {
positive_duration: true,
duration_absolute: Duration::from_millis(difference as u64),
}
})
}
}
fn get_new_stamp_after_addition(
current_stamp: &UnixTimestamp,
duration: Duration,
) -> UnixTimestamp {
let mut new_subsec_millis =
current_stamp.subsecond_millis().unwrap_or(0) + duration.subsec_millis() as u16;
let mut new_unix_seconds = current_stamp.unix_seconds;
fn get_new_stamp_after_addition(current_stamp: &UnixTime, duration: Duration) -> UnixTime {
let mut new_subsec_nanos = current_stamp.subsec_nanos() + duration.subsec_nanos();
let mut new_unix_seconds = current_stamp.secs;
let mut increment_seconds = |value: u32| {
if new_unix_seconds < 0 {
new_unix_seconds = new_unix_seconds
@ -371,8 +486,8 @@ fn get_new_stamp_after_addition(
.expect("new unix seconds would exceed i64::MAX");
}
};
if new_subsec_millis >= 1000 {
new_subsec_millis -= 1000;
if new_subsec_nanos >= 1_000_000_000 {
new_subsec_nanos -= 1_000_000_000;
increment_seconds(1);
}
increment_seconds(
@ -381,7 +496,7 @@ fn get_new_stamp_after_addition(
.try_into()
.expect("duration seconds exceeds u32::MAX"),
);
UnixTimestamp::const_new(new_unix_seconds, new_subsec_millis)
UnixTime::new(new_unix_seconds, new_subsec_nanos)
}
/// Please note that this operation will panic on the following conditions:
@ -389,7 +504,7 @@ fn get_new_stamp_after_addition(
/// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN].
/// - Unix seconds after addition exceeds [i64::MAX].
/// - Seconds from duration to add exceeds [u32::MAX].
impl AddAssign<Duration> for UnixTimestamp {
impl AddAssign<Duration> for UnixTime {
fn add_assign(&mut self, duration: Duration) {
*self = get_new_stamp_after_addition(self, duration);
}
@ -400,7 +515,7 @@ impl AddAssign<Duration> for UnixTimestamp {
/// - Unix seconds after subtraction for stamps before the unix epoch exceeds [i64::MIN].
/// - Unix seconds after addition exceeds [i64::MAX].
/// - Unix seconds exceeds [u32::MAX].
impl Add<Duration> for UnixTimestamp {
impl Add<Duration> for UnixTime {
type Output = Self;
fn add(self, duration: Duration) -> Self::Output {
@ -408,8 +523,8 @@ impl Add<Duration> for UnixTimestamp {
}
}
impl Add<Duration> for &UnixTimestamp {
type Output = UnixTimestamp;
impl Add<Duration> for &UnixTime {
type Output = UnixTime;
fn add(self, duration: Duration) -> Self::Output {
get_new_stamp_after_addition(self, duration)
@ -418,7 +533,16 @@ impl Add<Duration> for &UnixTimestamp {
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
use alloc::string::ToString;
use chrono::{Datelike, Timelike};
use std::{format, println};
use super::{cuc::CucError, *};
#[allow(dead_code)]
const UNIX_STAMP_CONST: UnixTime = UnixTime::new(5, 999_999_999);
#[allow(dead_code)]
const UNIX_STAMP_CONST_2: UnixTime = UnixTime::new_subsec_millis(5, 999);
#[test]
fn test_days_conversion() {
@ -427,12 +551,22 @@ mod tests {
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_get_current_time() {
let sec_floats = seconds_since_epoch();
assert!(sec_floats > 0.0);
}
#[test]
fn test_ms_of_day() {
let ms = ms_of_day(0.0);
assert_eq!(ms, 0);
let ms = ms_of_day(5.0);
assert_eq!(ms, 5000);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_ccsds_epoch() {
let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
@ -447,29 +581,29 @@ mod tests {
#[test]
fn basic_unix_stamp_test() {
let stamp = UnixTimestamp::new_only_seconds(-200);
assert_eq!(stamp.unix_seconds, -200);
assert!(stamp.subsecond_millis().is_none());
let stamp = UnixTimestamp::new_only_seconds(250);
assert_eq!(stamp.unix_seconds, 250);
assert!(stamp.subsecond_millis().is_none());
let stamp = UnixTime::new_only_secs(-200);
assert_eq!(stamp.secs, -200);
assert_eq!(stamp.subsec_millis(), 0);
let stamp = UnixTime::new_only_secs(250);
assert_eq!(stamp.secs, 250);
assert_eq!(stamp.subsec_millis(), 0);
}
#[test]
fn basic_float_unix_stamp_test() {
let stamp = UnixTimestamp::new(500, 600).unwrap();
assert!(stamp.subsecond_millis.is_some());
assert_eq!(stamp.unix_seconds, 500);
let subsec_millis = stamp.subsecond_millis().unwrap();
let stamp = UnixTime::new_subsec_millis_checked(500, 600).unwrap();
assert_eq!(stamp.secs, 500);
let subsec_millis = stamp.subsec_millis();
assert_eq!(subsec_millis, 600);
assert!((500.6 - stamp.unix_seconds_f64()).abs() < 0.0001);
println!("{:?}", (500.6 - stamp.unix_secs_f64()).to_string());
assert!((500.6 - stamp.unix_secs_f64()).abs() < 0.0001);
}
#[test]
fn test_ord_larger() {
let stamp0 = UnixTimestamp::new_only_seconds(5);
let stamp1 = UnixTimestamp::new(5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(6);
let stamp0 = UnixTime::new_only_secs(5);
let stamp1 = UnixTime::new_subsec_millis_checked(5, 500).unwrap();
let stamp2 = UnixTime::new_only_secs(6);
assert!(stamp1 > stamp0);
assert!(stamp2 > stamp0);
assert!(stamp2 > stamp1);
@ -477,9 +611,9 @@ mod tests {
#[test]
fn test_ord_smaller() {
let stamp0 = UnixTimestamp::new_only_seconds(5);
let stamp1 = UnixTimestamp::new(5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(6);
let stamp0 = UnixTime::new_only_secs(5);
let stamp1 = UnixTime::new_subsec_millis_checked(5, 500).unwrap();
let stamp2 = UnixTime::new_only_secs(6);
assert!(stamp0 < stamp1);
assert!(stamp0 < stamp2);
assert!(stamp1 < stamp2);
@ -487,9 +621,9 @@ mod tests {
#[test]
fn test_ord_larger_neg_numbers() {
let stamp0 = UnixTimestamp::new_only_seconds(-5);
let stamp1 = UnixTimestamp::new(-5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(-6);
let stamp0 = UnixTime::new_only_secs(-5);
let stamp1 = UnixTime::new_subsec_millis_checked(-5, 500).unwrap();
let stamp2 = UnixTime::new_only_secs(-6);
assert!(stamp0 > stamp1);
assert!(stamp0 > stamp2);
assert!(stamp1 > stamp2);
@ -499,9 +633,9 @@ mod tests {
#[test]
fn test_ord_smaller_neg_numbers() {
let stamp0 = UnixTimestamp::new_only_seconds(-5);
let stamp1 = UnixTimestamp::new(-5, 500).unwrap();
let stamp2 = UnixTimestamp::new_only_seconds(-6);
let stamp0 = UnixTime::new_only_secs(-5);
let stamp1 = UnixTime::new_subsec_millis_checked(-5, 500).unwrap();
let stamp2 = UnixTime::new_only_secs(-6);
assert!(stamp2 < stamp1);
assert!(stamp2 < stamp0);
assert!(stamp1 < stamp0);
@ -509,10 +643,11 @@ mod tests {
assert!(stamp2 <= stamp1);
}
#[allow(clippy::nonminimal_bool)]
#[test]
fn test_eq() {
let stamp0 = UnixTimestamp::new(5, 0).unwrap();
let stamp1 = UnixTimestamp::new_only_seconds(5);
let stamp0 = UnixTime::new(5, 0);
let stamp1 = UnixTime::new_only_secs(5);
assert_eq!(stamp0, stamp1);
assert!(stamp0 <= stamp1);
assert!(stamp0 >= stamp1);
@ -522,32 +657,115 @@ mod tests {
#[test]
fn test_addition() {
let mut stamp0 = UnixTimestamp::new_only_seconds(1);
let mut stamp0 = UnixTime::new_only_secs(1);
stamp0 += Duration::from_secs(5);
assert_eq!(stamp0.unix_seconds, 6);
assert!(stamp0.subsecond_millis().is_none());
assert_eq!(stamp0.as_secs(), 6);
assert_eq!(stamp0.subsec_millis(), 0);
let stamp1 = stamp0 + Duration::from_millis(500);
assert_eq!(stamp1.unix_seconds, 6);
assert!(stamp1.subsecond_millis().is_some());
assert_eq!(stamp1.subsecond_millis().unwrap(), 500);
assert_eq!(stamp1.secs, 6);
assert_eq!(stamp1.subsec_millis(), 500);
}
#[test]
fn test_addition_on_ref() {
let stamp0 = &UnixTimestamp::new(20, 500).unwrap();
let stamp0 = &UnixTime::new_subsec_millis_checked(20, 500).unwrap();
let stamp1 = stamp0 + Duration::from_millis(2500);
assert_eq!(stamp1.unix_seconds, 23);
assert!(stamp1.subsecond_millis().is_none());
assert_eq!(stamp1.secs, 23);
assert_eq!(stamp1.subsec_millis(), 0);
}
#[test]
fn test_as_dt() {
let stamp = UnixTime::new_only_secs(0);
let dt = stamp.chrono_date_time().unwrap();
assert_eq!(dt.year(), 1970);
assert_eq!(dt.month(), 1);
assert_eq!(dt.day(), 1);
assert_eq!(dt.hour(), 0);
assert_eq!(dt.minute(), 0);
assert_eq!(dt.second(), 0);
}
#[test]
#[cfg_attr(miri, ignore)]
fn test_from_now() {
let stamp_now = UnixTime::now().unwrap();
let dt_now = stamp_now.chrono_date_time().unwrap();
assert!(dt_now.year() >= 2020);
}
#[test]
fn test_stamp_diff_positive_0() {
let stamp_later = UnixTime::new(2, 0);
let StampDiff {
positive_duration,
duration_absolute,
} = (stamp_later - UnixTime::new(1, 0)).expect("stamp diff error");
assert!(positive_duration);
assert_eq!(duration_absolute, Duration::from_secs(1));
}
#[test]
fn test_stamp_diff_positive_1() {
let stamp_later = UnixTime::new(3, 800 * 1_000_000);
let stamp_earlier = UnixTime::new_subsec_millis_checked(1, 900).unwrap();
let StampDiff {
positive_duration,
duration_absolute,
} = (stamp_later - stamp_earlier).expect("stamp diff error");
assert!(positive_duration);
assert_eq!(duration_absolute, Duration::from_millis(1900));
}
#[test]
fn test_stamp_diff_negative() {
let stamp_later = UnixTime::new_subsec_millis_checked(3, 800).unwrap();
let stamp_earlier = UnixTime::new_subsec_millis_checked(1, 900).unwrap();
let StampDiff {
positive_duration,
duration_absolute,
} = (stamp_earlier - stamp_later).expect("stamp diff error");
assert!(!positive_duration);
assert_eq!(duration_absolute, Duration::from_millis(1900));
}
#[test]
fn test_addition_spillover() {
let mut stamp0 = UnixTimestamp::new(1, 900).unwrap();
let mut stamp0 = UnixTime::new_subsec_millis_checked(1, 900).unwrap();
stamp0 += Duration::from_millis(100);
assert_eq!(stamp0.unix_seconds, 2);
assert!(stamp0.subsecond_millis().is_none());
assert_eq!(stamp0.secs, 2);
assert_eq!(stamp0.subsec_millis(), 0);
stamp0 += Duration::from_millis(1100);
assert_eq!(stamp0.unix_seconds, 3);
assert_eq!(stamp0.subsecond_millis().unwrap(), 100);
assert_eq!(stamp0.secs, 3);
assert_eq!(stamp0.subsec_millis(), 100);
}
#[test]
fn test_cuc_error_printout() {
let cuc_error = CucError::InvalidCounterWidth(12);
let stamp_error = TimestampError::from(cuc_error);
assert_eq!(stamp_error.to_string(), format!("cuc error: {cuc_error}"));
}
#[test]
#[cfg(feature = "timelib")]
fn test_unix_stamp_as_timelib_datetime() {
let stamp_epoch = UnixTime::EPOCH;
let timelib_dt = stamp_epoch.timelib_date_time().unwrap();
assert_eq!(timelib_dt.year(), 1970);
assert_eq!(timelib_dt.month(), time::Month::January);
assert_eq!(timelib_dt.day(), 1);
assert_eq!(timelib_dt.hour(), 0);
assert_eq!(timelib_dt.minute(), 0);
assert_eq!(timelib_dt.second(), 0);
}
#[test]
#[cfg(feature = "timelib")]
fn test_unix_stamp_from_timelib_datetime() {
let timelib_dt = time::OffsetDateTime::UNIX_EPOCH;
let unix_time = UnixTime::from(timelib_dt);
let timelib_converted_back = unix_time.timelib_date_time().unwrap();
assert_eq!(timelib_dt, timelib_converted_back);
}
}

715
src/tm.rs
View File

@ -1,715 +0,0 @@
//! This module contains all components required to create a ECSS PUS C telemetry packets according
//! to [ECSS-E-ST-70-41C](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
use crate::ecss::{
ccsds_impl, crc_from_raw_data, crc_procedure, sp_header_impls, user_data_from_raw,
verify_crc16_from_raw, CrcType, PusError, PusPacket, PusVersion, CRC_CCITT_FALSE,
};
use crate::{
ByteConversionError, CcsdsPacket, PacketType, SequenceFlags, SizeMissmatch, SpHeader,
CCSDS_HEADER_LEN,
};
use core::mem::size_of;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zerocopy::AsBytes;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use delegate::delegate;
/// Length without timestamp
pub const PUC_TM_MIN_SEC_HEADER_LEN: usize = 7;
pub const PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA: usize =
CCSDS_HEADER_LEN + PUC_TM_MIN_SEC_HEADER_LEN + size_of::<CrcType>();
pub trait GenericPusTmSecondaryHeader {
fn pus_version(&self) -> PusVersion;
fn sc_time_ref_status(&self) -> u8;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn msg_counter(&self) -> u16;
fn dest_id(&self) -> u16;
}
pub mod zc {
use super::GenericPusTmSecondaryHeader;
use crate::ecss::{PusError, PusVersion};
use zerocopy::{AsBytes, FromBytes, NetworkEndian, Unaligned, U16};
#[derive(FromBytes, AsBytes, Unaligned)]
#[repr(C)]
pub struct PusTmSecHeaderWithoutTimestamp {
pus_version_and_sc_time_ref_status: u8,
service: u8,
subservice: u8,
msg_counter: U16<NetworkEndian>,
dest_id: U16<NetworkEndian>,
}
pub struct PusTmSecHeader<'slice> {
pub(crate) zc_header: PusTmSecHeaderWithoutTimestamp,
pub(crate) timestamp: Option<&'slice [u8]>,
}
impl TryFrom<crate::tm::PusTmSecondaryHeader<'_>> for PusTmSecHeaderWithoutTimestamp {
type Error = PusError;
fn try_from(header: crate::tm::PusTmSecondaryHeader) -> Result<Self, Self::Error> {
if header.pus_version != PusVersion::PusC {
return Err(PusError::VersionNotSupported(header.pus_version));
}
Ok(PusTmSecHeaderWithoutTimestamp {
pus_version_and_sc_time_ref_status: ((header.pus_version as u8) << 4)
| header.sc_time_ref_status,
service: header.service,
subservice: header.subservice,
msg_counter: U16::from(header.msg_counter),
dest_id: U16::from(header.dest_id),
})
}
}
impl PusTmSecHeaderWithoutTimestamp {
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Option<()> {
self.write_to(slice)
}
pub fn from_bytes(slice: &[u8]) -> Option<Self> {
Self::read_from(slice)
}
}
impl GenericPusTmSecondaryHeader for PusTmSecHeaderWithoutTimestamp {
fn pus_version(&self) -> PusVersion {
PusVersion::try_from(self.pus_version_and_sc_time_ref_status >> 4 & 0b1111)
.unwrap_or(PusVersion::Invalid)
}
fn sc_time_ref_status(&self) -> u8 {
self.pus_version_and_sc_time_ref_status & 0b1111
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn msg_counter(&self) -> u16 {
self.msg_counter.get()
}
fn dest_id(&self) -> u16 {
self.dest_id.get()
}
}
}
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTmSecondaryHeader<'stamp> {
pus_version: PusVersion,
pub sc_time_ref_status: u8,
pub service: u8,
pub subservice: u8,
pub msg_counter: u16,
pub dest_id: u16,
pub timestamp: Option<&'stamp [u8]>,
}
impl<'stamp> PusTmSecondaryHeader<'stamp> {
pub fn new_simple(service: u8, subservice: u8, timestamp: &'stamp [u8]) -> Self {
Self::new(service, subservice, 0, 0, Some(timestamp))
}
/// Like [Self::new_simple] but without a timestamp.
pub fn new_simple_no_timestamp(service: u8, subservice: u8) -> Self {
Self::new(service, subservice, 0, 0, None)
}
pub fn new(
service: u8,
subservice: u8,
msg_counter: u16,
dest_id: u16,
timestamp: Option<&'stamp [u8]>,
) -> Self {
PusTmSecondaryHeader {
pus_version: PusVersion::PusC,
sc_time_ref_status: 0,
service,
subservice,
msg_counter,
dest_id,
timestamp,
}
}
}
impl GenericPusTmSecondaryHeader for PusTmSecondaryHeader<'_> {
fn pus_version(&self) -> PusVersion {
self.pus_version
}
fn sc_time_ref_status(&self) -> u8 {
self.sc_time_ref_status
}
fn service(&self) -> u8 {
self.service
}
fn subservice(&self) -> u8 {
self.subservice
}
fn msg_counter(&self) -> u16 {
self.msg_counter
}
fn dest_id(&self) -> u16 {
self.dest_id
}
}
impl<'slice> TryFrom<zc::PusTmSecHeader<'slice>> for PusTmSecondaryHeader<'slice> {
type Error = ();
fn try_from(sec_header: zc::PusTmSecHeader<'slice>) -> Result<Self, Self::Error> {
Ok(PusTmSecondaryHeader {
pus_version: sec_header.zc_header.pus_version(),
sc_time_ref_status: sec_header.zc_header.sc_time_ref_status(),
service: sec_header.zc_header.service(),
subservice: sec_header.zc_header.subservice(),
msg_counter: sec_header.zc_header.msg_counter(),
dest_id: sec_header.zc_header.dest_id(),
timestamp: sec_header.timestamp,
})
}
}
/// This class models the PUS C telemetry packet. It is the primary data structure to generate the
/// raw byte representation of PUS telemetry or to deserialize from one from raw bytes.
///
/// This class also derives the [serde::Serialize] and [serde::Deserialize] trait if the [serde]
/// feature is used which allows to send around TM packets in a raw byte format using a serde
/// provider like [postcard](https://docs.rs/postcard/latest/postcard/).
///
/// There is no spare bytes support yet.
///
/// # Lifetimes
///
/// * `'raw_data` - If the TM is not constructed from a raw slice, this will be the life time of
/// a buffer where the user provided time stamp and source data will be serialized into. If it
/// is, this is the lifetime of the raw byte slice it is constructed from.
#[derive(Eq, Debug, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct PusTm<'raw_data> {
pub sp_header: SpHeader,
pub sec_header: PusTmSecondaryHeader<'raw_data>,
/// If this is set to false, a manual call to [PusTm::calc_own_crc16] or
/// [PusTm::update_packet_fields] is necessary for the serialized or cached CRC16 to be valid.
pub calc_crc_on_serialization: bool,
#[cfg_attr(feature = "serde", serde(skip))]
raw_data: Option<&'raw_data [u8]>,
source_data: Option<&'raw_data [u8]>,
crc16: Option<u16>,
}
impl<'raw_data> PusTm<'raw_data> {
/// Generates a new struct instance.
///
/// # Arguments
///
/// * `sp_header` - Space packet header information. The correct packet type will be set
/// automatically
/// * `sec_header` - Information contained in the secondary header, including the service
/// and subservice type
/// * `app_data` - Custom application data
/// * `set_ccsds_len` - Can be used to automatically update the CCSDS space packet data length
/// field. If this is not set to true, [PusTm::update_ccsds_data_len] can be called to set
/// the correct value to this field manually
pub fn new(
sp_header: &mut SpHeader,
sec_header: PusTmSecondaryHeader<'raw_data>,
source_data: Option<&'raw_data [u8]>,
set_ccsds_len: bool,
) -> Self {
sp_header.set_packet_type(PacketType::Tm);
sp_header.set_sec_header_flag();
let mut pus_tm = PusTm {
sp_header: *sp_header,
raw_data: None,
source_data,
sec_header,
calc_crc_on_serialization: true,
crc16: None,
};
if set_ccsds_len {
pus_tm.update_ccsds_data_len();
}
pus_tm
}
pub fn len_packed(&self) -> usize {
let mut length = PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA;
if let Some(timestamp) = self.sec_header.timestamp {
length += timestamp.len();
}
if let Some(src_data) = self.source_data {
length += src_data.len();
}
length
}
pub fn timestamp(&self) -> Option<&'raw_data [u8]> {
self.sec_header.timestamp
}
pub fn source_data(&self) -> Option<&'raw_data [u8]> {
self.source_data
}
pub fn set_dest_id(&mut self, dest_id: u16) {
self.sec_header.dest_id = dest_id;
}
pub fn set_msg_counter(&mut self, msg_counter: u16) {
self.sec_header.msg_counter = msg_counter
}
pub fn set_sc_time_ref_status(&mut self, sc_time_ref_status: u8) {
self.sec_header.sc_time_ref_status = sc_time_ref_status & 0b1111;
}
sp_header_impls!();
/// This is called automatically if the `set_ccsds_len` argument in the [PusTm::new] call was
/// used.
/// If this was not done or the time stamp or source data is set or changed after construction,
/// this function needs to be called to ensure that the data length field of the CCSDS header
/// is set correctly
pub fn update_ccsds_data_len(&mut self) {
self.sp_header.data_len =
self.len_packed() as u16 - size_of::<crate::zc::SpHeader>() as u16 - 1;
}
/// This function should be called before the TM packet is serialized if
/// [PusTm.calc_crc_on_serialization] is set to False. It will calculate and cache the CRC16.
pub fn calc_own_crc16(&mut self) {
let mut digest = CRC_CCITT_FALSE.digest();
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
digest.update(sph_zc.as_bytes());
let pus_tc_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
digest.update(pus_tc_header.as_bytes());
if let Some(stamp) = self.sec_header.timestamp {
digest.update(stamp);
}
if let Some(src_data) = self.source_data {
digest.update(src_data);
}
self.crc16 = Some(digest.finalize())
}
/// This helper function calls both [PusTm.update_ccsds_data_len] and [PusTm.calc_own_crc16]
pub fn update_packet_fields(&mut self) {
self.update_ccsds_data_len();
self.calc_own_crc16();
}
/// Write the raw PUS byte representation to a provided buffer.
pub fn write_to_bytes(&self, slice: &mut [u8]) -> Result<usize, PusError> {
let mut curr_idx = 0;
let total_size = self.len_packed();
if total_size > slice.len() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: slice.len(),
expected: total_size,
})
.into());
}
self.sp_header
.write_to_be_bytes(&mut slice[0..CCSDS_HEADER_LEN])?;
curr_idx += CCSDS_HEADER_LEN;
let sec_header_len = size_of::<zc::PusTmSecHeaderWithoutTimestamp>();
let sec_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
sec_header
.write_to_bytes(&mut slice[curr_idx..curr_idx + sec_header_len])
.ok_or(ByteConversionError::ZeroCopyToError)?;
curr_idx += sec_header_len;
if let Some(timestamp) = self.sec_header.timestamp {
let timestamp_len = timestamp.len();
slice[curr_idx..curr_idx + timestamp_len].copy_from_slice(timestamp);
curr_idx += timestamp_len;
}
if let Some(src_data) = self.source_data {
slice[curr_idx..curr_idx + src_data.len()].copy_from_slice(src_data);
curr_idx += src_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
0,
curr_idx,
slice,
)?;
slice[curr_idx..curr_idx + 2].copy_from_slice(crc16.to_be_bytes().as_slice());
curr_idx += 2;
Ok(curr_idx)
}
/// Append the raw PUS byte representation to a provided [alloc::vec::Vec]
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub fn append_to_vec(&self, vec: &mut Vec<u8>) -> Result<usize, PusError> {
let sph_zc = crate::zc::SpHeader::from(self.sp_header);
let mut appended_len = PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA;
if let Some(timestamp) = self.sec_header.timestamp {
appended_len += timestamp.len();
}
if let Some(src_data) = self.source_data {
appended_len += src_data.len();
};
let start_idx = vec.len();
let mut ser_len = 0;
vec.extend_from_slice(sph_zc.as_bytes());
ser_len += sph_zc.as_bytes().len();
// The PUS version is hardcoded to PUS C
let sec_header = zc::PusTmSecHeaderWithoutTimestamp::try_from(self.sec_header).unwrap();
vec.extend_from_slice(sec_header.as_bytes());
ser_len += sec_header.as_bytes().len();
if let Some(timestamp) = self.sec_header.timestamp {
ser_len += timestamp.len();
vec.extend_from_slice(timestamp);
}
if let Some(src_data) = self.source_data {
vec.extend_from_slice(src_data);
ser_len += src_data.len();
}
let crc16 = crc_procedure(
self.calc_crc_on_serialization,
&self.crc16,
start_idx,
ser_len,
&vec[start_idx..start_idx + ser_len],
)?;
vec.extend_from_slice(crc16.to_be_bytes().as_slice());
Ok(appended_len)
}
/// Create a [PusTm] instance from a raw slice. On success, it returns a tuple containing
/// the instance and the found byte length of the packet. The timestamp length needs to be
/// known beforehand.
pub fn from_bytes(
slice: &'raw_data [u8],
timestamp_len: usize,
) -> Result<(Self, usize), PusError> {
let raw_data_len = slice.len();
if raw_data_len < PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let mut current_idx = 0;
let (sp_header, _) = SpHeader::from_be_bytes(&slice[0..CCSDS_HEADER_LEN])?;
current_idx += 6;
let total_len = sp_header.total_len();
if raw_data_len < total_len || total_len < PUS_TM_MIN_LEN_WITHOUT_SOURCE_DATA {
return Err(PusError::RawDataTooShort(raw_data_len));
}
let sec_header_zc = zc::PusTmSecHeaderWithoutTimestamp::from_bytes(
&slice[current_idx..current_idx + PUC_TM_MIN_SEC_HEADER_LEN],
)
.ok_or(ByteConversionError::ZeroCopyFromError)?;
current_idx += PUC_TM_MIN_SEC_HEADER_LEN;
let mut timestamp = None;
if timestamp_len > 0 {
timestamp = Some(&slice[current_idx..current_idx + timestamp_len]);
}
let zc_sec_header_wrapper = zc::PusTmSecHeader {
zc_header: sec_header_zc,
timestamp,
};
current_idx += timestamp_len;
let raw_data = &slice[0..total_len];
let pus_tm = PusTm {
sp_header,
sec_header: PusTmSecondaryHeader::try_from(zc_sec_header_wrapper).unwrap(),
raw_data: Some(&slice[0..total_len]),
source_data: user_data_from_raw(current_idx, total_len, raw_data_len, slice)?,
calc_crc_on_serialization: false,
crc16: Some(crc_from_raw_data(raw_data)?),
};
verify_crc16_from_raw(raw_data, pus_tm.crc16.expect("CRC16 invalid"))?;
Ok((pus_tm, total_len))
}
/// If [Self] was constructed [Self::from_bytes], this function will return the slice it was
/// constructed from. Otherwise, [None] will be returned.
pub fn raw_bytes(&self) -> Option<&'raw_data [u8]> {
self.raw_data
}
}
impl PartialEq for PusTm<'_> {
fn eq(&self, other: &Self) -> bool {
self.sp_header == other.sp_header
&& self.sec_header == other.sec_header
&& self.source_data == other.source_data
}
}
//noinspection RsTraitImplementation
impl CcsdsPacket for PusTm<'_> {
ccsds_impl!();
}
//noinspection RsTraitImplementation
impl PusPacket for PusTm<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
});
fn user_data(&self) -> Option<&[u8]> {
self.source_data
}
fn crc16(&self) -> Option<u16> {
self.crc16
}
}
//noinspection RsTraitImplementation
impl GenericPusTmSecondaryHeader for PusTm<'_> {
delegate!(to self.sec_header {
fn pus_version(&self) -> PusVersion;
fn service(&self) -> u8;
fn subservice(&self) -> u8;
fn dest_id(&self) -> u16;
fn msg_counter(&self) -> u16;
fn sc_time_ref_status(&self) -> u8;
});
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ecss::PusVersion::PusC;
use crate::SpHeader;
fn base_ping_reply_full_ctor(timestamp: &[u8]) -> PusTm {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tm_header = PusTmSecondaryHeader::new_simple(17, 2, &timestamp);
PusTm::new(&mut sph, tm_header, None, true)
}
fn base_hk_reply<'a>(timestamp: &'a [u8], src_data: &'a [u8]) -> PusTm<'a> {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(3, 5, &timestamp);
PusTm::new(&mut sph, tc_header, Some(src_data), true)
}
fn dummy_timestamp() -> &'static [u8] {
return &[0, 1, 2, 3, 4, 5, 6];
}
#[test]
fn test_basic() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
verify_ping_reply(&pus_tm, false, 22, dummy_timestamp());
}
#[test]
fn test_serialization_no_source_data() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
let mut buf: [u8; 32] = [0; 32];
let ser_len = pus_tm
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 22);
verify_raw_ping_reply(&buf);
}
#[test]
fn test_serialization_with_source_data() {
let src_data = [1, 2, 3];
let hk_reply = base_hk_reply(dummy_timestamp(), &src_data);
let mut buf: [u8; 32] = [0; 32];
let ser_len = hk_reply
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 25);
assert_eq!(buf[20], 1);
assert_eq!(buf[21], 2);
assert_eq!(buf[22], 3);
}
#[test]
fn test_setters() {
let timestamp = dummy_timestamp();
let mut pus_tm = base_ping_reply_full_ctor(&timestamp);
pus_tm.set_sc_time_ref_status(0b1010);
pus_tm.set_dest_id(0x7fff);
pus_tm.set_msg_counter(0x1f1f);
assert_eq!(pus_tm.sc_time_ref_status(), 0b1010);
assert_eq!(pus_tm.dest_id(), 0x7fff);
assert_eq!(pus_tm.msg_counter(), 0x1f1f);
assert!(pus_tm.set_apid(0x7ff));
assert_eq!(pus_tm.apid(), 0x7ff);
}
#[test]
fn test_deserialization_no_source_data() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
let mut buf: [u8; 32] = [0; 32];
let ser_len = pus_tm
.write_to_bytes(&mut buf)
.expect("Serialization failed");
assert_eq!(ser_len, 22);
let (tm_deserialized, size) = PusTm::from_bytes(&buf, 7).expect("Deserialization failed");
assert_eq!(ser_len, size);
verify_ping_reply(&tm_deserialized, false, 22, dummy_timestamp());
}
#[test]
fn test_manual_field_update() {
let mut sph = SpHeader::tm_unseg(0x123, 0x234, 0).unwrap();
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, dummy_timestamp());
let mut tm = PusTm::new(&mut sph, tc_header, None, false);
tm.calc_crc_on_serialization = false;
assert_eq!(tm.data_len(), 0x00);
let mut buf: [u8; 32] = [0; 32];
let res = tm.write_to_bytes(&mut buf);
assert!(res.is_err());
assert!(matches!(res.unwrap_err(), PusError::CrcCalculationMissing));
tm.update_ccsds_data_len();
assert_eq!(tm.data_len(), 15);
tm.calc_own_crc16();
let res = tm.write_to_bytes(&mut buf);
assert!(res.is_ok());
tm.sp_header.data_len = 0;
tm.update_packet_fields();
assert_eq!(tm.data_len(), 15);
}
#[test]
fn test_target_buf_too_small() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
let mut buf: [u8; 16] = [0; 16];
let res = pus_tm.write_to_bytes(&mut buf);
assert!(res.is_err());
let error = res.unwrap_err();
assert!(matches!(error, PusError::ByteConversionError { .. }));
match error {
PusError::ByteConversionError(err) => match err {
ByteConversionError::ToSliceTooSmall(size_missmatch) => {
assert_eq!(size_missmatch.expected, 22);
assert_eq!(size_missmatch.found, 16);
}
_ => panic!("Invalid PUS error {:?}", err),
},
_ => {
panic!("Invalid error {:?}", error);
}
}
}
#[test]
#[cfg(feature = "alloc")]
fn test_append_to_vec() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(&timestamp);
let mut vec = Vec::new();
let res = pus_tm.append_to_vec(&mut vec);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 22);
verify_raw_ping_reply(vec.as_slice());
}
#[test]
#[cfg(feature = "alloc")]
fn test_append_to_vec_with_src_data() {
let src_data = [1, 2, 3];
let hk_reply = base_hk_reply(dummy_timestamp(), &src_data);
let mut vec = Vec::new();
vec.push(4);
let res = hk_reply.append_to_vec(&mut vec);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 25);
assert_eq!(vec.len(), 26);
}
fn verify_raw_ping_reply(buf: &[u8]) {
// Secondary header is set -> 0b0000_1001 , APID occupies last bit of first byte
assert_eq!(buf[0], 0x09);
// Rest of APID 0x123
assert_eq!(buf[1], 0x23);
// Unsegmented is the default, and first byte of 0x234 occupies this byte as well
assert_eq!(buf[2], 0xc2);
assert_eq!(buf[3], 0x34);
assert_eq!(((buf[4] as u16) << 8) | buf[5] as u16, 15);
// SC time ref status is 0
assert_eq!(buf[6], (PusC as u8) << 4);
assert_eq!(buf[7], 17);
assert_eq!(buf[8], 2);
// MSG counter 0
assert_eq!(buf[9], 0x00);
assert_eq!(buf[10], 0x00);
// Destination ID
assert_eq!(buf[11], 0x00);
assert_eq!(buf[12], 0x00);
// Timestamp
assert_eq!(&buf[13..20], dummy_timestamp());
let mut digest = CRC_CCITT_FALSE.digest();
digest.update(&buf[0..20]);
let crc16 = digest.finalize();
assert_eq!(((crc16 >> 8) & 0xff) as u8, buf[20]);
assert_eq!((crc16 & 0xff) as u8, buf[21]);
}
fn verify_ping_reply(
tm: &PusTm,
has_user_data: bool,
exp_full_len: usize,
exp_timestamp: &[u8],
) {
assert!(tm.is_tm());
assert_eq!(PusPacket::service(tm), 17);
assert_eq!(PusPacket::subservice(tm), 2);
assert!(tm.sec_header_flag());
assert_eq!(tm.len_packed(), exp_full_len);
assert_eq!(tm.timestamp().unwrap(), exp_timestamp);
if has_user_data {
assert!(!tm.user_data().is_none());
}
assert_eq!(PusPacket::pus_version(tm), PusC);
assert_eq!(tm.apid(), 0x123);
assert_eq!(tm.seq_count(), 0x234);
assert_eq!(tm.data_len(), exp_full_len as u16 - 7);
assert_eq!(tm.dest_id(), 0x0000);
assert_eq!(tm.msg_counter(), 0x0000);
assert_eq!(tm.sc_time_ref_status(), 0b0000);
}
#[test]
fn partial_eq_pus_tm() {
let timestamp = dummy_timestamp();
let pus_tm_1 = base_ping_reply_full_ctor(timestamp);
let pus_tm_2 = base_ping_reply_full_ctor(timestamp);
assert_eq!(pus_tm_1, pus_tm_2);
}
#[test]
fn partial_eq_serialized_vs_derialized() {
let timestamp = dummy_timestamp();
let pus_tm = base_ping_reply_full_ctor(timestamp);
let mut buf = [0; 32];
pus_tm.write_to_bytes(&mut buf).unwrap();
assert_eq!(pus_tm, PusTm::from_bytes(&buf, timestamp.len()).unwrap().0);
}
}

734
src/util.rs Normal file
View File

@ -0,0 +1,734 @@
use crate::ByteConversionError;
use core::fmt::{Debug, Display, Formatter};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::error::Error;
pub trait ToBeBytes {
type ByteArray: AsRef<[u8]>;
/// Length when written to big endian bytes.
fn written_len(&self) -> usize;
fn to_be_bytes(&self) -> Self::ByteArray;
}
impl ToBeBytes for () {
type ByteArray = [u8; 0];
#[inline]
fn written_len(&self) -> usize {
0
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
[]
}
}
impl ToBeBytes for u8 {
type ByteArray = [u8; 1];
#[inline]
fn written_len(&self) -> usize {
1
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
u8::to_be_bytes(*self)
}
}
impl ToBeBytes for u16 {
type ByteArray = [u8; 2];
#[inline]
fn written_len(&self) -> usize {
2
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
u16::to_be_bytes(*self)
}
}
impl ToBeBytes for u32 {
type ByteArray = [u8; 4];
#[inline]
fn written_len(&self) -> usize {
4
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
u32::to_be_bytes(*self)
}
}
impl ToBeBytes for u64 {
type ByteArray = [u8; 8];
#[inline]
fn written_len(&self) -> usize {
8
}
#[inline]
fn to_be_bytes(&self) -> Self::ByteArray {
u64::to_be_bytes(*self)
}
}
pub trait UnsignedEnum {
/// Size of the unsigned enumeration in bytes.
fn size(&self) -> usize;
/// Write the unsigned enumeration to a raw buffer. Returns the written size on success.
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value(&self) -> u64;
#[cfg(feature = "alloc")]
fn to_vec(&self) -> alloc::vec::Vec<u8> {
let mut buf = alloc::vec![0; self.size()];
self.write_to_be_bytes(&mut buf).unwrap();
buf
}
}
pub trait UnsignedEnumExt: UnsignedEnum + Debug + Copy + Clone + PartialEq + Eq {}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum UnsignedByteFieldError {
/// Value is too large for specified width of byte field.
ValueTooLargeForWidth {
width: usize,
value: u64,
},
/// Only 1, 2, 4 and 8 are allow width values. Optionally contains the expected width if
/// applicable, for example for conversions.
InvalidWidth {
found: usize,
expected: Option<usize>,
},
ByteConversionError(ByteConversionError),
}
impl From<ByteConversionError> for UnsignedByteFieldError {
#[inline]
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
impl Display for UnsignedByteFieldError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::ByteConversionError(e) => {
write!(f, "low level byte conversion error: {e}")
}
Self::InvalidWidth { found, .. } => {
write!(f, "invalid width {found}, only 1, 2, 4 and 8 are allowed.")
}
Self::ValueTooLargeForWidth { width, value } => {
write!(f, "value {value} too large for width {width}")
}
}
}
}
#[cfg(feature = "std")]
impl Error for UnsignedByteFieldError {}
/// Type erased variant.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct UnsignedByteField {
width: usize,
value: u64,
}
impl UnsignedByteField {
#[inline]
pub const fn new(width: usize, value: u64) -> Self {
Self { width, value }
}
#[inline]
pub const fn value_const(&self) -> u64 {
self.value
}
#[inline]
pub fn new_from_be_bytes(width: usize, buf: &[u8]) -> Result<Self, UnsignedByteFieldError> {
if width > buf.len() {
return Err(ByteConversionError::FromSliceTooSmall {
expected: width,
found: buf.len(),
}
.into());
}
match width {
0 => Ok(Self::new(width, 0)),
1 => Ok(Self::new(width, buf[0] as u64)),
2 => Ok(Self::new(
width,
u16::from_be_bytes(buf[0..2].try_into().unwrap()) as u64,
)),
4 => Ok(Self::new(
width,
u32::from_be_bytes(buf[0..4].try_into().unwrap()) as u64,
)),
8 => Ok(Self::new(
width,
u64::from_be_bytes(buf[0..8].try_into().unwrap()),
)),
_ => Err(UnsignedByteFieldError::InvalidWidth {
found: width,
expected: None,
}),
}
}
}
impl UnsignedEnum for UnsignedByteField {
#[inline]
fn size(&self) -> usize {
self.width
}
#[inline]
fn value(&self) -> u64 {
self.value_const()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.size() {
return Err(ByteConversionError::ToSliceTooSmall {
expected: self.size(),
found: buf.len(),
});
}
match self.size() {
0 => Ok(0),
1 => {
let u8 = UnsignedByteFieldU8::try_from(*self).unwrap();
u8.write_to_be_bytes(buf)
}
2 => {
let u16 = UnsignedByteFieldU16::try_from(*self).unwrap();
u16.write_to_be_bytes(buf)
}
4 => {
let u32 = UnsignedByteFieldU32::try_from(*self).unwrap();
u32.write_to_be_bytes(buf)
}
8 => {
let u64 = UnsignedByteFieldU64::try_from(*self).unwrap();
u64.write_to_be_bytes(buf)
}
_ => {
// The API does not allow this.
panic!("unexpected written length");
}
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct GenericUnsignedByteField<TYPE: Copy + Into<u64>> {
value: TYPE,
}
impl<TYPE: Copy + Into<u64>> GenericUnsignedByteField<TYPE> {
pub const fn new(val: TYPE) -> Self {
Self { value: val }
}
pub const fn value_typed(&self) -> TYPE {
self.value
}
}
impl<TYPE: Copy + ToBeBytes + Into<u64>> UnsignedEnum for GenericUnsignedByteField<TYPE> {
#[inline]
fn size(&self) -> usize {
self.value.written_len()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < self.size() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: self.size(),
});
}
buf[..self.size()].copy_from_slice(self.value.to_be_bytes().as_ref());
Ok(self.value.written_len())
}
#[inline]
fn value(&self) -> u64 {
self.value_typed().into()
}
}
pub type UnsignedByteFieldEmpty = GenericUnsignedByteField<()>;
pub type UnsignedByteFieldU8 = GenericUnsignedByteField<u8>;
pub type UnsignedByteFieldU16 = GenericUnsignedByteField<u16>;
pub type UnsignedByteFieldU32 = GenericUnsignedByteField<u32>;
pub type UnsignedByteFieldU64 = GenericUnsignedByteField<u64>;
pub type UbfU8 = UnsignedByteFieldU8;
pub type UbfU16 = UnsignedByteFieldU16;
pub type UbfU32 = UnsignedByteFieldU32;
pub type UbfU64 = UnsignedByteFieldU64;
impl From<UnsignedByteFieldU8> for UnsignedByteField {
fn from(value: UnsignedByteFieldU8) -> Self {
Self::new(1, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU8 {
type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 1 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(1),
});
}
Ok(Self::new(value.value as u8))
}
}
impl From<UnsignedByteFieldU16> for UnsignedByteField {
#[inline]
fn from(value: UnsignedByteFieldU16) -> Self {
Self::new(2, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU16 {
type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 2 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(2),
});
}
Ok(Self::new(value.value as u16))
}
}
impl From<UnsignedByteFieldU32> for UnsignedByteField {
#[inline]
fn from(value: UnsignedByteFieldU32) -> Self {
Self::new(4, value.value as u64)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU32 {
type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 4 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(4),
});
}
Ok(Self::new(value.value as u32))
}
}
impl From<UnsignedByteFieldU64> for UnsignedByteField {
#[inline]
fn from(value: UnsignedByteFieldU64) -> Self {
Self::new(8, value.value)
}
}
impl TryFrom<UnsignedByteField> for UnsignedByteFieldU64 {
type Error = UnsignedByteFieldError;
#[inline]
fn try_from(value: UnsignedByteField) -> Result<Self, Self::Error> {
if value.width != 8 {
return Err(UnsignedByteFieldError::InvalidWidth {
found: value.width,
expected: Some(8),
});
}
Ok(Self::new(value.value))
}
}
#[cfg(test)]
pub mod tests {
use crate::util::{
UnsignedByteField, UnsignedByteFieldError, UnsignedByteFieldU16, UnsignedByteFieldU32,
UnsignedByteFieldU64, UnsignedByteFieldU8, UnsignedEnum,
};
use crate::ByteConversionError;
use std::format;
#[test]
fn test_simple_u8() {
let u8 = UnsignedByteFieldU8::new(5);
assert_eq!(u8.size(), 1);
let mut buf: [u8; 8] = [0; 8];
let len = u8
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 1);
assert_eq!(buf[0], 5);
for val in buf.iter().skip(1) {
assert_eq!(*val, 0);
}
assert_eq!(u8.value_typed(), 5);
assert_eq!(u8.value(), 5);
}
#[test]
fn test_simple_u16() {
let u16 = UnsignedByteFieldU16::new(3823);
assert_eq!(u16.size(), 2);
let mut buf: [u8; 8] = [0; 8];
let len = u16
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 2);
let raw_val = u16::from_be_bytes(buf[0..2].try_into().unwrap());
assert_eq!(raw_val, 3823);
for val in buf.iter().skip(2) {
assert_eq!(*val, 0);
}
assert_eq!(u16.value_typed(), 3823);
assert_eq!(u16.value(), 3823);
}
#[test]
fn test_simple_u32() {
let u32 = UnsignedByteFieldU32::new(80932);
assert_eq!(u32.size(), 4);
let mut buf: [u8; 8] = [0; 8];
let len = u32
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 4);
let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap());
assert_eq!(raw_val, 80932);
(4..8).for_each(|i| {
assert_eq!(buf[i], 0);
});
assert_eq!(u32.value_typed(), 80932);
assert_eq!(u32.value(), 80932);
}
#[test]
fn test_simple_u64() {
let u64 = UnsignedByteFieldU64::new(5999999);
assert_eq!(u64.size(), 8);
let mut buf: [u8; 8] = [0; 8];
let len = u64
.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(len, 8);
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999);
assert_eq!(u64.value_typed(), 5999999);
assert_eq!(u64.value(), 5999999);
}
#[test]
fn conversions_u8() {
let u8 = UnsignedByteFieldU8::new(5);
let u8_type_erased = UnsignedByteField::from(u8);
assert_eq!(u8_type_erased.width, 1);
assert_eq!(u8_type_erased.value, 5);
let u8_conv_back =
UnsignedByteFieldU8::try_from(u8_type_erased).expect("conversion failed for u8");
assert_eq!(u8, u8_conv_back);
assert_eq!(u8_conv_back.value, 5);
}
#[test]
fn conversion_u8_fails() {
let field = UnsignedByteField::new(2, 60000);
let conv_fails = UnsignedByteFieldU8::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 2);
assert_eq!(expected, 1);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u16() {
let u16 = UnsignedByteFieldU16::new(64444);
let u16_type_erased = UnsignedByteField::from(u16);
assert_eq!(u16_type_erased.width, 2);
assert_eq!(u16_type_erased.value, 64444);
let u16_conv_back =
UnsignedByteFieldU16::try_from(u16_type_erased).expect("conversion failed for u16");
assert_eq!(u16, u16_conv_back);
assert_eq!(u16_conv_back.value, 64444);
}
#[test]
fn conversion_u16_fails() {
let field = UnsignedByteField::new(4, 75000);
let conv_fails = UnsignedByteFieldU16::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 4);
assert_eq!(expected, 2);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u32() {
let u32 = UnsignedByteFieldU32::new(75000);
let u32_type_erased = UnsignedByteField::from(u32);
assert_eq!(u32_type_erased.width, 4);
assert_eq!(u32_type_erased.value, 75000);
let u32_conv_back =
UnsignedByteFieldU32::try_from(u32_type_erased).expect("conversion failed for u32");
assert_eq!(u32, u32_conv_back);
assert_eq!(u32_conv_back.value, 75000);
}
#[test]
fn conversion_u32_fails() {
let field = UnsignedByteField::new(8, 75000);
let conv_fails = UnsignedByteFieldU32::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 8);
assert_eq!(expected, 4);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn conversions_u64() {
let u64 = UnsignedByteFieldU64::new(5999999);
let u64_type_erased = UnsignedByteField::from(u64);
assert_eq!(u64_type_erased.width, 8);
assert_eq!(u64_type_erased.value, 5999999);
let u64_conv_back =
UnsignedByteFieldU64::try_from(u64_type_erased).expect("conversion failed for u64");
assert_eq!(u64, u64_conv_back);
assert_eq!(u64_conv_back.value, 5999999);
}
#[test]
fn conversion_u64_fails() {
let field = UnsignedByteField::new(4, 60000);
let conv_fails = UnsignedByteFieldU64::try_from(field);
assert!(conv_fails.is_err());
let err = conv_fails.unwrap_err();
match err {
UnsignedByteFieldError::InvalidWidth {
found,
expected: Some(expected),
} => {
assert_eq!(found, 4);
assert_eq!(expected, 8);
}
_ => {
panic!("{}", format!("invalid error {err}"))
}
}
}
#[test]
fn type_erased_u8_write() {
let u8 = UnsignedByteField::new(1, 5);
assert_eq!(u8.size(), 1);
let mut buf: [u8; 8] = [0; 8];
u8.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
assert_eq!(buf[0], 5);
(1..8).for_each(|i| {
assert_eq!(buf[i], 0);
});
}
#[test]
fn type_erased_u16_write() {
let u16 = UnsignedByteField::new(2, 3823);
assert_eq!(u16.size(), 2);
let mut buf: [u8; 8] = [0; 8];
u16.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u16::from_be_bytes(buf[0..2].try_into().unwrap());
assert_eq!(raw_val, 3823);
for val in buf.iter().skip(2) {
assert_eq!(*val, 0);
}
}
#[test]
fn type_erased_u32_write() {
let u32 = UnsignedByteField::new(4, 80932);
assert_eq!(u32.size(), 4);
let mut buf: [u8; 8] = [0; 8];
u32.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u32::from_be_bytes(buf[0..4].try_into().unwrap());
assert_eq!(raw_val, 80932);
(4..8).for_each(|i| {
assert_eq!(buf[i], 0);
});
}
#[test]
fn type_erased_u64_write() {
let u64 = UnsignedByteField::new(8, 5999999);
assert_eq!(u64.size(), 8);
let mut buf: [u8; 8] = [0; 8];
u64.write_to_be_bytes(&mut buf)
.expect("writing to raw buffer failed");
let raw_val = u64::from_be_bytes(buf[0..8].try_into().unwrap());
assert_eq!(raw_val, 5999999);
}
#[test]
fn type_erased_u8_construction() {
let buf: [u8; 2] = [5, 10];
let u8 = UnsignedByteField::new_from_be_bytes(1, &buf).expect("construction failed");
assert_eq!(u8.width, 1);
assert_eq!(u8.value, 5);
}
#[test]
fn type_erased_u16_construction() {
let buf: [u8; 2] = [0x10, 0x15];
let u16 = UnsignedByteField::new_from_be_bytes(2, &buf).expect("construction failed");
assert_eq!(u16.width, 2);
assert_eq!(u16.value, 0x1015);
}
#[test]
fn type_erased_u32_construction() {
let buf: [u8; 4] = [0x01, 0x02, 0x03, 0x04];
let u32 = UnsignedByteField::new_from_be_bytes(4, &buf).expect("construction failed");
assert_eq!(u32.width, 4);
assert_eq!(u32.value, 0x01020304);
}
#[test]
fn type_erased_u64_construction() {
let buf: [u8; 8] = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08];
let u64 = UnsignedByteField::new_from_be_bytes(8, &buf).expect("construction failed");
assert_eq!(u64.width, 8);
assert_eq!(u64.value, 0x0102030405060708);
}
#[test]
fn type_u16_target_buf_too_small() {
let u16 = UnsignedByteFieldU16::new(500);
let mut buf: [u8; 1] = [0; 1];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, 1);
assert_eq!(expected, 2);
}
_ => {
panic!("invalid exception")
}
}
}
#[test]
fn type_erased_u16_target_buf_too_small() {
let u16 = UnsignedByteField::new(2, 500);
let mut buf: [u8; 1] = [0; 1];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, 1);
assert_eq!(expected, 2);
}
_ => {
panic!("invalid exception {}", err)
}
}
let u16 = UnsignedByteField::new_from_be_bytes(2, &buf);
assert!(u16.is_err());
let err = u16.unwrap_err();
if let UnsignedByteFieldError::ByteConversionError(
ByteConversionError::FromSliceTooSmall { found, expected },
) = err
{
assert_eq!(expected, 2);
assert_eq!(found, 1);
} else {
panic!("unexpected exception {}", err);
}
}
#[test]
fn type_u32_target_buf_too_small() {
let u16 = UnsignedByteFieldU32::new(500);
let mut buf: [u8; 3] = [0; 3];
let res = u16.write_to_be_bytes(&mut buf);
assert!(res.is_err());
let err = res.unwrap_err();
match err {
ByteConversionError::ToSliceTooSmall { found, expected } => {
assert_eq!(found, 3);
assert_eq!(expected, 4);
}
_ => {
panic!("invalid exception")
}
}
}
}