Compare commits

...

155 Commits
v0.0.1 ... main

Author SHA1 Message Date
0a5457a6d4
add license files and pass log 2024-05-22 18:55:33 +02:00
51fea9f2a2
bump some package versions 2024-05-19 13:42:07 +02:00
2f11c614b4 Merge pull request 'add TM DB handling' (#34) from update-pytmtc into main
Reviewed-on: #34
2024-05-19 08:06:10 +02:00
9acc34b86a
add TM DB handling 2024-05-19 08:05:18 +02:00
7396b4bdc5 Merge pull request 'smaller fix and TODO' (#33) from smaller-fixes-and-todo into main
Reviewed-on: #33
2024-05-02 15:26:39 +02:00
0883da5763
smaller fix and TODO 2024-05-02 15:25:37 +02:00
778ae06bbd Merge pull request 'prepare v0.2.0' (#32) from prep_v0.2.0 into main
Reviewed-on: #32
2024-05-02 15:16:46 +02:00
078ac459f8
update README 2024-05-02 15:13:34 +02:00
51fbb2a46b
prepare v0.2.0 2024-05-02 15:11:05 +02:00
d37da28efb Merge pull request 'Some bugfixes and docs' (#31) from some-bugfixes into main
Reviewed-on: #31
2024-05-02 15:08:32 +02:00
adc262c57f
bump sat-rs 2024-05-02 15:08:01 +02:00
fd025da4b8
some documentation 2024-05-02 13:39:20 +02:00
9282526392
some bugfixes 2024-05-02 13:35:58 +02:00
f4d0a86d7d Merge pull request 'take image metadata generation' (#30) from take-img-metadata-impl into main
Reviewed-on: #30
2024-05-02 13:15:17 +02:00
be261da778
metadata test works 2024-05-02 13:14:51 +02:00
a350e96fc4
Merge remote-tracking branch 'origin/main' into take-img-metadata-impl 2024-05-02 12:37:00 +02:00
ebb58e4fd4 Merge pull request 'bump sat-rs and improve PUS stack' (#29) from bump-satrs-improve-pus-stack into main
Reviewed-on: #29
2024-05-02 12:35:37 +02:00
66a18e08e5
bump sat-rs and improve PUS stack 2024-05-02 12:32:57 +02:00
c4fffdfe2d
take image metadata generation 2024-05-02 11:15:06 +02:00
62cc933f88 Merge pull request 'Move Images Commad and Bugfixes' (#28) from move-images-command into main
Reviewed-on: #28
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-05-01 11:14:07 +02:00
6e4abc44d2 this should be it 2024-04-30 19:48:58 +02:00
ecea83fc4b implement move images command
- Also implement various importantr bugfixes for shutdown handling
2024-04-30 19:45:00 +02:00
36a42f95a5 move images command 2024-04-30 18:35:46 +02:00
7770347f4d Merge pull request 'Fixes and optimizations for camera' (#27) from fixes-and-optimizations-camera into main
Reviewed-on: #27
2024-04-30 17:34:27 +02:00
3bad422046 this is stupid 2024-04-30 15:54:34 +02:00
5b1392af4f update cargo.lock 2024-04-30 15:43:13 +02:00
087aed7f78 Merge remote-tracking branch 'origin/main' into fixes-and-optimizations-camera 2024-04-30 13:37:38 +02:00
26ecb6ee33 Merge pull request 'move generic struct' (#26) from move-generic-struct into main
Reviewed-on: #26
2024-04-30 13:33:16 +02:00
3173b18ceb
updated sat-rs 2024-04-29 23:46:21 +02:00
e47523a734 move generic struct 2024-04-29 21:18:32 +02:00
9d8104be40
a lot of bugfixes 2024-04-29 18:52:11 +02:00
c9e5b9ffdb Merge pull request 'some more optimizations' (#25) from some-more-optimizations into main
Reviewed-on: #25
2024-04-29 16:47:24 +02:00
a095132d57
some more optimizations 2024-04-29 16:45:08 +02:00
e227fa1d01 Merge pull request 'tweaks and fixes' (#24) from some-tweaks-and-fixes into main
Reviewed-on: #24
2024-04-29 16:23:53 +02:00
ec4b16ed9e
tweaks and fixes 2024-04-29 16:21:45 +02:00
d5ea52f9bf Merge pull request 'Use pydantic' (#23) from use-pydantic-for-python-serialization into main
Reviewed-on: #23
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-29 15:24:49 +02:00
c184a5f0b3 Merge branch 'main' into use-pydantic-for-python-serialization 2024-04-29 15:24:38 +02:00
2cda1011f7 improvements for deployment script 2024-04-28 13:14:19 +02:00
2c34f46eca update dependencies 2024-04-28 12:47:08 +02:00
b4bf834c39 minor tweak 2024-04-28 12:45:01 +02:00
56b5076230 use pydantic instead of serde in Python 2024-04-28 12:39:34 +02:00
14aa2f39a5 bump minor version 2024-04-26 19:25:31 +02:00
ff1fa9c8fa changelog 2024-04-26 19:25:08 +02:00
d86172c436 Merge pull request 'added host feature' (#22) from introduce-host-feature into main
Reviewed-on: #22
2024-04-26 19:20:36 +02:00
fab1859d78 added host feature 2024-04-26 19:18:37 +02:00
ec69c7d581 now only some tests are missing 2024-04-26 18:53:11 +02:00
lkoester
c89db2e2d7 removed weird clone 2024-04-26 14:24:46 +02:00
4cb9124ab7 Merge pull request 'add blocking shell cmd execution' (#20) from shell-cmd-executor into main
Reviewed-on: #20
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-25 20:23:41 +02:00
a25b55baed Merge remote-tracking branch 'origin/main' into shell-cmd-executor 2024-04-25 20:23:23 +02:00
2992829ccf Merge pull request 'downlink_logfile' (#21) from downlink_logfile into main
Reviewed-on: #21
Reviewed-by: Robin Müller <muellerr@irs.uni-stuttgart.de>
2024-04-25 17:49:18 +02:00
lkoester
f8f3bc73ac added minor logging changes 2024-04-25 17:48:44 +02:00
b4a84dbf20
that should do the job 2024-04-25 17:11:52 +02:00
lkoester
279fa42f31 added commander actions for logfile and image downlink 2024-04-25 16:50:08 +02:00
eea2f76b9f
remove stray printouts 2024-04-25 16:49:15 +02:00
c5eddcb292
add unittest for shell cmd executor 2024-04-25 16:45:00 +02:00
lkoester
2566050b3b added get latest image function 2024-04-25 16:31:05 +02:00
lkoester
df556acbf5 added moving images into downlink lp folder 2024-04-25 16:12:22 +02:00
lkoester
eeba6fab44 added low priority downlink folder and downlinking logs 2024-04-25 15:17:44 +02:00
lkoester
cbdb017fe2 added logging directories with date 2024-04-25 14:38:51 +02:00
1e57b1f978
add blocking shell cmd execution 2024-04-25 01:20:54 +02:00
60e4af435a
print version str at program start 2024-04-24 20:55:14 +02:00
17c9b8694d Merge pull request 'prep first full release' (#19) from prepare-init-version into main
Reviewed-on: #19
2024-04-24 20:46:37 +02:00
7468fe9845
prep first full release 2024-04-24 20:43:23 +02:00
404d1c1c1e Merge pull request 'clean up python code a bit' (#18) from some-python-client-cleanup into main
Reviewed-on: #18
2024-04-24 20:41:02 +02:00
597f4ca977
some more cleaning up 2024-04-24 20:40:51 +02:00
879a50f79e
pytmtc is a package now 2024-04-24 20:35:59 +02:00
380b36f1de
clean up python code a bit 2024-04-24 20:15:32 +02:00
03c1dc8d64 Merge pull request 'add event management, small fix for CAM handler loop' (#17) from events-small-bugfix into main
Reviewed-on: #17
2024-04-24 20:04:01 +02:00
dcaf67d6d0
add event management, small fix for CAM handler loop 2024-04-24 20:02:42 +02:00
707843ec9f Merge pull request 'camera_tests' (#13) from camera_tests into main
Reviewed-on: #13
2024-04-24 17:43:41 +02:00
9b047ac0d5
Merge remote-tracking branch 'origin/main' into camera_tests 2024-04-24 17:39:23 +02:00
638abf4259 Merge pull request 'Add HK and mode service' (#16) from add-hk-and-mode-service into main
Reviewed-on: #16
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-24 17:34:03 +02:00
b6d3bb7712 Merge branch 'main' into add-hk-and-mode-service 2024-04-24 17:10:56 +02:00
4af249612d Merge pull request 'add scheduler service' (#15) from add-scheduler-service into main
Reviewed-on: #15
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-24 17:10:44 +02:00
8c6f100c06 Merge branch 'main' into add-scheduler-service 2024-04-24 17:10:33 +02:00
ccf8048ce0 Merge pull request 'updated pyclient' (#14) from update-pyclient into main
Reviewed-on: #14
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-24 17:10:27 +02:00
lkoester
6f3e14af3b fmt and clippy 2024-04-24 16:45:38 +02:00
lkoester
83322ae415 added debug output in action reply handler and tm handling in tmtcpy, ready to merge into main 2024-04-24 16:43:59 +02:00
lkoester
09bef401c0 added data reply to camera handler, now only missing tmtcpy counterpart 2024-04-24 16:26:54 +02:00
e3ad841d04 added HK and mode service 2024-04-23 16:49:06 +02:00
511214f903 add scheduler service 2024-04-23 16:14:16 +02:00
f6a6b005af updated pyclient 2024-04-23 15:00:00 +02:00
lkoester
88d4384beb this should be good for now 2024-04-22 16:56:49 +02:00
lkoester
1e867a51f5 merge main 2024-04-22 15:55:47 +02:00
lkoester
028de494e4 got basic action stuff running, now to make error handling better 2024-04-22 15:47:25 +02:00
ffeb7951a8 Merge pull request 'Client tests' (#12) from add-tcp-client-unittests into main
Reviewed-on: #12
2024-04-22 15:41:56 +02:00
e82139ac91 README 2024-04-22 15:41:28 +02:00
51473e7060 that should suffice 2024-04-22 15:38:53 +02:00
9c74246eb3
added test stub 2024-04-22 10:39:44 +02:00
96d5802c4f
added tmtc test 2024-04-22 10:38:18 +02:00
627dd64eca Merge branch 'main' into add-tcp-client-unittests 2024-04-20 11:23:08 +02:00
cd428577fa Merge pull request 'Commanding docs' (#11) from update-commanding-docs into main
Reviewed-on: #11
2024-04-20 11:22:28 +02:00
ee29961f62
add first client unittests 2024-04-20 11:21:02 +02:00
1501e5a421
fnish README update 2024-04-20 00:31:53 +02:00
lkoester
0f391c2087 some minor cleaning up plus initial image file handling 2024-04-19 22:05:57 +02:00
dd831fb1b6 larger img 2024-04-19 18:10:17 +02:00
0573d81e57 docs 2024-04-19 18:09:52 +02:00
fae2e90a65 add some docs 2024-04-19 18:04:27 +02:00
5f69f14652 Merge pull request 'Networking update' (#10) from networking-update into main
Reviewed-on: #10
2024-04-19 17:42:11 +02:00
ddac5ceab3 Networking update 2024-04-19 17:40:38 +02:00
lkoester
4f94e9cade fixed action tests 2024-04-19 17:11:38 +02:00
0da70ab5ac Merge pull request 'camera_merge_main' (#7) from camera_merge_main into main
Reviewed-on: #7
2024-04-19 11:45:24 +02:00
lkoester
c6ef1394c9 removed old low level camera implementations 2024-04-19 11:40:22 +02:00
d7acd93ee9 Merge pull request 'Use releases dependencies' (#8) from use-released-dependencies into main
Reviewed-on: #8
2024-04-18 14:08:17 +02:00
8ecd6f2847
bump version again 2024-04-18 14:08:03 +02:00
f18ae0e165
this works 2024-04-17 16:40:13 +02:00
cba2767272
use released dependencies 2024-04-17 16:29:59 +02:00
lkoester
22584c3f9c changed default camera parameters to constants 2024-04-17 13:03:31 +02:00
lkoester
9dbdd1ebbc Merge branch 'camera_merge_main' of https://egit.irs.uni-stuttgart.de/rust/ops-sat-rs into camera_merge_main 2024-04-17 10:39:56 +02:00
lkoester
f960f24415 removed pus tm handler generic for camera handler 2024-04-17 10:38:40 +02:00
5b70bbf173 remove log file 2024-04-16 17:13:46 +02:00
d9720c9ff2 remove leftover files, fmt and clippy 2024-04-16 17:13:14 +02:00
lkoester
2d3a4cd90c merged main into branch 2024-04-16 16:03:06 +02:00
lkoester
649e903c0a first spring of camera impl done, testing up next 2024-04-16 15:44:40 +02:00
7da9e2364b create logs in subfolder 2024-04-16 15:08:34 +02:00
45930a104b Merge branch 'main' of https://egit.irs.uni-stuttgart.de/rust/ops-sat-rs 2024-04-16 15:03:18 +02:00
e0c583cca8 better logging handling 2024-04-16 15:03:10 +02:00
2c3a3930fc Merge pull request 'MIO TCP client' (#6) from mio-tcp-client into main
Reviewed-on: #6
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-16 14:50:36 +02:00
8ce305491b extend mio client to allow reconnection 2024-04-16 13:08:10 +02:00
b5e048a13b
start TM handling for TCP client 2024-04-16 10:05:27 +02:00
b359ff9d33
updated TCP code 2024-04-16 09:59:31 +02:00
lkoester
efe686becf initial camera handling things 2024-04-16 08:30:55 +02:00
192e701785
MIO tcp client 2024-04-15 16:42:48 +02:00
8313a0b26c
another small update 2024-04-15 14:13:10 +02:00
df72676c0d
cleaned up code and bumped sat-rs 2024-04-15 12:16:01 +02:00
6dddfd5a70
start with mio tcp client 2024-04-13 15:16:53 +02:00
d9629dee38 Merge pull request 'Add action service and controller component' (#5) from add-action-service-controller-obj into main
Reviewed-on: #5
2024-04-13 11:19:13 +02:00
99842e2a13 Merge branch 'main' into add-action-service-controller-obj 2024-04-13 11:00:59 +02:00
9da9cf5b1f Merge pull request 'Start adding stop logic' (#3) from stop-logic into main
Reviewed-on: #3
2024-04-13 10:56:45 +02:00
1d92084e65
more improvements 2024-04-10 17:13:29 +02:00
d0835f9393
some more fixes and improvements 2024-04-10 17:03:56 +02:00
5cc561cbad
important bugfix for PUS stack 2024-04-10 15:50:02 +02:00
5c0b1a3256
some minor fixes for python client 2024-04-10 15:44:39 +02:00
ec9a042f09
fixes for pyclient APID 2024-04-10 15:39:53 +02:00
5d87cab9cc
implemented action req handling 2024-04-10 15:37:24 +02:00
443995fe5e
smaller fixes 2024-04-10 15:05:24 +02:00
47eba99da1 Merge branch 'stop-logic' into add-action-service-controller-obj 2024-04-10 15:03:21 +02:00
7d7cd99d6a Merge remote-tracking branch 'origin/main' into stop-logic 2024-04-10 15:02:59 +02:00
6b89f00d90
Merge branch 'stop-logic' into add-action-service-controller-obj 2024-04-10 15:02:45 +02:00
35be75afa3 Merge pull request 'can_pus_handler' (#4) from can_pus_handler into main
Reviewed-on: #4
Reviewed-by: Robin Müller <muellerr@irs.uni-stuttgart.de>
2024-04-10 15:01:46 +02:00
d86be82447 Merge branch 'main' into can_pus_handler 2024-04-10 15:01:27 +02:00
59a06b5c50
add action service and controller obj 2024-04-10 14:59:34 +02:00
ebd3514dec
delete cached json file 2024-04-10 14:31:25 +02:00
62d4572f31 Merge remote-tracking branch 'origin/main' into stop-logic 2024-04-10 14:26:14 +02:00
31b68dd041
update default tmtc config handling 2024-04-10 14:25:52 +02:00
88d1956dbf
cargo fmt + clippy 2024-04-10 12:51:15 +02:00
02b4a51457
Merge remote-tracking branch 'origin/main' into can_pus_handler 2024-04-10 12:50:31 +02:00
dc66dcd469
it works 2024-04-10 12:47:26 +02:00
458759a1df Merge branch 'main' into stop-logic 2024-04-09 18:11:15 +02:00
lkoester
ef580e5634 added empty can file 2024-04-09 17:51:42 +02:00
710fc94384 start adding stop logic 2024-04-09 17:07:39 +02:00
2480ee6e06 README 2024-04-09 15:59:22 +02:00
526739fd9c update cargo.lock file 2024-04-09 15:40:23 +02:00
lkoester
aac2ad206c typo in readme 2024-04-09 11:34:02 +02:00
lkoester
406687d6d8 added tmtc and interface directories 2024-04-09 11:32:40 +02:00
57 changed files with 7453 additions and 1023 deletions

5
.gitignore vendored
View File

@ -4,4 +4,7 @@
/.vscode /.vscode
# Ignore this, may include user specific paths. # Ignore this, may include user specific paths.
/.cargo/config.toml /.cargo/config.toml
output.log
# Ignore logs folder generared by application.
/logs
/exp278.toml

40
CHANGELOG.md Normal file
View File

@ -0,0 +1,40 @@
Change Log
=======
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
# [v0.2.0] 2024-05-02
- Use released `sat-rs` version v0.2.0
## Added
- Taking an image now generates a metadata file.
- Implemented a command to move all camera image related files to the `toGroundLP` folder.
## Fixed
- Various important bugfixes for stop handling and home path handling
# [v0.1.1] 2024-04-26
Various smaller improvements and tweaks.
## Fixed
- Logger file now has unique time-stamped name.
## Added
- Printout of SW version at startup.
- Setup of to ground directories.
- Camera handler commands: Image copying and shell command execution.
- host feature for testing on the development computer.
# [v0.1.0] 2024-04-24
Initial release with PUS stack, TM sink, TC source, TMTC TCP/IP infrastructure
and camera handler.

718
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,27 +1,48 @@
[package] [package]
name = "ops-sat-rs" name = "ops-sat-rs"
version = "0.0.1" version = "0.2.0"
edition = "2021" edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
fern = "0.6" fern = "0.6"
toml = "0.8"
chrono = "0.4" chrono = "0.4"
log = "0.4" log = "0.4"
lazy_static = "1" delegate = "0.12"
humantime = "2"
strum = { version = "0.26", features = ["derive"] } strum = { version = "0.26", features = ["derive"] }
thiserror = "1" thiserror = "1"
derive-new = "0.6" derive-new = "0.6"
num_enum = "0.7" num_enum = "0.7"
serde = "1"
serde_json = "1"
mio = "0.8"
homedir = "0.2"
socket2 = "0.5"
once_cell = "1.19"
[dependencies.satrs] [dependencies.satrs]
version = "0.2.0-rc.0" version = "0.2.0"
git = "https://egit.irs.uni-stuttgart.de/rust/sat-rs.git" # git = "https://egit.irs.uni-stuttgart.de/rust/sat-rs.git"
branch = "main" # branch = "main"
features = ["test_util"] features = ["test_util"]
[dependencies.satrs-mib] [dependencies.satrs-mib]
version = "0.1.1" version = ">=0.1.2, <0.2"
git = "https://egit.irs.uni-stuttgart.de/rust/sat-rs.git"
branch = "main" [dev-dependencies]
env_logger = "0.11"
tempfile = "3"
[features]
host = []
# I don't think we need insane performance. If anything, a small binary is easier to upload
# to the satellite.
[profile.release]
strip = true
opt-level = "z" # Optimize for size.
lto = true
codegen-units = 1

201
LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

3
NOTICE Normal file
View File

@ -0,0 +1,3 @@
This project was developed as part of a OPS-SAT experiment submission.
This software contains code developed at the University of Stuttgart's Institute of Space Systems.

113
README.md
View File

@ -2,6 +2,8 @@ ESA OPS-SAT Rust experiment
======== ========
This is the primary repository for the ESA OPS-SAT experiment. This is the primary repository for the ESA OPS-SAT experiment.
The primary repository to generate packages for ESOC can be found [here](https://egit.irs.uni-stuttgart.de/rust/ops-sat-experiment).
You can also find some more general documentation about OPS-SAT there.
## Pre-Requisites ## Pre-Requisites
@ -9,25 +11,114 @@ This is the primary repository for the ESA OPS-SAT experiment.
[podman](https://podman.io/) installed [podman](https://podman.io/) installed
- [`cross`](https://github.com/cross-rs/cross) package installed - [`cross`](https://github.com/cross-rs/cross) package installed
## Build ## Build for Target Hardware
You might need to set the [`CROSS_CONTAINER_ENGINE`](https://github.com/cross-rs/cross/wiki/FAQ#explicitly-choose-the-container-engine) You might need to set the [`CROSS_CONTAINER_ENGINE`](https://github.com/cross-rs/cross/wiki/FAQ#explicitly-choose-the-container-engine)
and [`CROSS_ROOTLESS_CONTAINER_ENGINE`](https://github.com/cross-rs/cross/blob/main/docs/environment_variables.md#configuring-cross-with-environment-variables) and [`CROSS_ROOTLESS_CONTAINER_ENGINE`](https://github.com/cross-rs/cross/blob/main/docs/environment_variables.md#configuring-cross-with-environment-variables)
variables manually before calling cross. variables manually before calling cross.
### Debug Build
```sh ```sh
cross build cross build
``` ```
## Documentation ### Release Build
The [wiki](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki) ```sh
appears to be a useful source for documentation. cross build --release
```
- [OBSW documents](https://opssat1.esoc.esa.int/projects/experimenter-information/dmsf?folder_id=7) ## Build for Host
- [Software Integration Process](https://opssat1.esoc.esa.int/dmsf/files/34/view)
- [Cross-compiling SEPP](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Cross-compiling_SEPP_application) The software was designed to be runnable and testable on a host computer.
- [TMTC infrastructure](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Live_TM_TC_data) You can use the regular cargo workflow for this.
- [Submitting an Experiment](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Building_and_submitting_your_application_to_ESOC)
- [Building with Yocto and Docker](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Building_an_application_locally_using_Yocto_Toolchain_in_a_Docker) ### Running
- [SPP over CAN](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/SPP_over_CAN_communication)
```sh
cargo run --features host
```
### Testing
```sh
cargo test
```
## Commanding Infrastructure
Commanding of the `ops-sat-rs` application is possible by different means.
<img src="docs/networking-structure.png" alt="Networking and Commanding Structure" width="500"/>
### Using the `pyclient` and `pyserver` applications
You can find both commanding application inside the `pytmtc` folder.
It is recommended to set up a virtual environment first, for example by running the following
code inside the `pytmtc` folder:
```sh
python3 -m venv venv
source venv/bin/activate
```
After that, you can install all requirements for both the client and server application
interactively using
```sh
pip install -e .
```
If you want to command the satellite using the OPS-SAT infrastrucute, start the `pyserver.py`
as a background application first, for example by simply running `pyserver.py` inside a
new terminal window.
After that, you can run `pyclient.py -p /test/ping -l` to send a ping telecommand and then
go into listener mode using the following `tmtc_conf.json` file:
```json
{
"com_if": "tcp",
"tcpip_tcp_ip_addr": "127.0.0.1",
"tcpip_tcp_port": 4097
}
```
You can command the TCP server in the OPS-SAT software directly by running the commands with
the following configuration:
```json
{
"com_if": "tcp",
"tcpip_tcp_ip_addr": "127.0.0.1",
"tcpip_tcp_port": 7031
}
```
You can run `pyclient.py -T` or `pyclient.py -h` for more information on the client application.
## Knowledge Base
### Home Path Handling
The OPS-SAT software filesystem handling will determine a home path at the start of the software.
This home path is used for various mechanisms inside the OPS-SAT infrastructure.
Currently, there are 3 possible configurations:
1. If the software is built with the `host` feature, the HOME path will be the current path the
software is run at.
2. If the `host` feature is not set and the `/home/exp278` folder exists, that folder will be
the home directory.
3. Otherwise, the default OS home directory will be the home directory.
### Application Shutdown Handling
The application can be stopped by creating a `stop-experiment` file either in the home path
specified in the previous section, or inside the temporary folder. There is also an action command
available to stop the application.
### Camera Handling
TODO

View File

@ -0,0 +1,193 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
<!--Created by yEd 3.23.2-->
<key attr.name="Description" attr.type="string" for="graph" id="d0"/>
<key for="port" id="d1" yfiles.type="portgraphics"/>
<key for="port" id="d2" yfiles.type="portgeometry"/>
<key for="port" id="d3" yfiles.type="portuserdata"/>
<key attr.name="url" attr.type="string" for="node" id="d4"/>
<key attr.name="description" attr.type="string" for="node" id="d5"/>
<key for="node" id="d6" yfiles.type="nodegraphics"/>
<key for="graphml" id="d7" yfiles.type="resources"/>
<key attr.name="url" attr.type="string" for="edge" id="d8"/>
<key attr.name="description" attr.type="string" for="edge" id="d9"/>
<key for="edge" id="d10" yfiles.type="edgegraphics"/>
<graph edgedefault="directed" id="G">
<data key="d0"/>
<node id="n0">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="100.0" width="304.0" x="551.5" y="196.5"/>
<y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="133.7265625" x="20.866945876288696" xml:space="preserve" y="9.230939716312065">ops-sat-rs on satellite<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.4313587306701031" nodeRatioY="-0.4076906028368794" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n1">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="37.0" width="72.0" x="572.5" y="233.0"/>
<y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="31.9375" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="52.849609375" x="9.5751953125" xml:space="preserve" y="2.53125">TCP SPP
Client<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n2">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="182.0" width="197.5" x="551.5" y="328.5"/>
<y:Fill color="#FFFF99" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="31.9375" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="56.705078125" x="12.1474609375" xml:space="preserve" y="13.53125">pyserver
daemon<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.4384938686708861" nodeRatioY="-0.4256524725274725" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n3">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="53.0" width="77.5" x="640.75" y="346.0"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="45.90625" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="62.5703125" x="7.46484375" xml:space="preserve" y="3.546875">OPS-SAT
Server
Port 4096<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n4">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="53.0" width="77.5" x="640.75" y="440.5"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="45.90625" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="62.5703125" x="7.46484375" xml:space="preserve" y="3.546875">TMTC
Server
Port 4097<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n5">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="30.0" width="99.0" x="756.5" y="536.5"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="51.677734375" x="23.6611328125" xml:space="preserve" y="6.015625">pyclient<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n6">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="53.0" width="72.0" x="661.0" y="225.0"/>
<y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="45.90625" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="62.5703125" x="4.71484375" xml:space="preserve" y="3.546875">TCP
Server
Port 7301<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n7">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="53.0" width="72.0" x="756.0" y="225.0"/>
<y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="45.90625" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="62.5703125" x="4.71484375" xml:space="preserve" y="3.546875">UDP
Server
Port 7301<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<edge id="e0" source="n4" target="n3">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="0.0" sy="0.0" tx="0.0" ty="0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="standard" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="31.9375" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="43.421875" x="8.2890625" xml:space="preserve" y="-37.21875">TMTC
Queue<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="0.0" distance="30.0" distanceToCenter="true" position="right" ratio="0.5" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e1" source="n1" target="n3">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="4.333333333333334" sy="0.0" tx="-19.5" ty="0.0">
<y:Point x="612.8333333333334" y="312.5"/>
<y:Point x="660.0" y="312.5"/>
</y:Path>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="standard" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e2" source="n5" target="n4">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-29.5" sy="-8.875" tx="6.75" ty="10.0">
<y:Point x="776.5" y="516.5"/>
<y:Point x="686.25" y="516.5"/>
</y:Path>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="standard" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e3" source="n5" target="n7">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-14.0" sy="0.0" tx="0.0" ty="0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="standard" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e4" source="n5" target="n6">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-14.0" sy="0.0" tx="15.75" ty="0.0">
<y:Point x="792.0" y="311.5"/>
<y:Point x="712.75" y="311.5"/>
</y:Path>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
</graph>
<data key="d7">
<y:Resources/>
</data>
</graphml>

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

View File

@ -0,0 +1,47 @@
ops-sat-rs/pytmtc on  update-pytmtc [$] is 📦 v0.1.0 via 🐍 v3.10.12 (venv) took 17s
./pyclient.py -l --pp
-- tmtccmd v8.0.1 CLI Mode --
No command path (-p) argument specified, prompting from user
Additional commands for prompt:
:p[b][f][<depth>] Tree Print | :r Retry | :h Help Text | :c Cancel
Auto complete is available using Tab after typing the slash character.
If a command history was passed, use arrow up to access it.
You can also print a subtree by typing the path and appending :p[b][p][<depth>].
The b option for printouts enables brief printouts without descriptions.
The p option for printouts overrides hide flags to display all hidden nodes.
> test/ping
Using command path: /test/ping
-- One Queue Mode --
INFO 2024-05-19 08:27:32.173 Loading TC queue
INFO 2024-05-19 08:27:32.173 Sending PUS ping telecommand
INFO 2024-05-19 08:27:32.173 Sending PUS TC[17, 1] with Request ID 0x1d16c0ab, APID 0x516, SSC 171
INFO 2024-05-19 08:27:34.579 Acceptance success of TC | Request ID 0x1d16c0ab | acc (✓) sta (-) ste (-, 0) fin (-)
INFO 2024-05-19 08:27:34.581 Start success of TC | Request ID 0x1d16c0ab | acc (✓) sta (✓) ste (-, 0) fin (-)
INFO 2024-05-19 08:27:34.583 Received Ping Reply TM[17,2]
INFO 2024-05-19 08:27:34.586 Completion success of TC | Request ID 0x1d16c0ab | acc (✓) sta (✓) ste (-, 0) fin (✓) ✨
INFO 2024-05-19 08:27:36.174 Queue handling finished for command /test/ping
INFO 2024-05-19 08:27:40.180 Received Action Data Reply TM[8,130]
INFO 2024-05-19 08:27:40.180 Data Reply Content:
ctrl port : /dev/cam_tty
data port : /dev/cam_sd
exposure time : 2
number of images : 1
milliseconds between images : 1000
bw img sensor : 0
default conf : 1
video duration : 0
gain (RGB) : 8 8 8
-------------------------
BST IMS100 Telemetry:
version :
status : 00
temp[°C]: 0
Set config, Starting picture taking
exit success
INFO 2024-05-19 08:27:40.983 Received Verification TM[1, 7] with Request ID 0x1d16c0aa
WARNING 2024-05-19 08:27:40.984 [opssat_tmtc.pus_tm:61] No matching telecommand found for Request ID: [Packet ID:

136
pytmtc/.gitignore vendored
View File

@ -1,3 +1,6 @@
/tm.db
/tc.db
/tmtc_conf.json
__pycache__ __pycache__
/venv /venv
@ -7,3 +10,136 @@ __pycache__
/seqcnt.txt /seqcnt.txt
/.tmtc-history.txt /.tmtc-history.txt
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# PyCharm
.idea

View File

@ -0,0 +1,176 @@
from typing import Any, Optional
import select
import time
import socket
import logging
from threading import Thread, Event, Lock
from collections import deque
from tmtccmd.com import ComInterface
from tmtccmd.tmtc import TelemetryListT
_LOGGER = logging.getLogger(__name__)
class TcpServer(ComInterface):
def __init__(self, port: int):
self.port = port
self._max_num_packets_in_tc_queue = 500
self._max_num_packets_in_tm_queue = 500
self._default_timeout_secs = 0.5
self._server_addr = ("localhost", self.port)
self._tc_packet_queue = deque()
self._tm_packet_queue = deque()
self._tc_lock = Lock()
self._tm_lock = Lock()
self._kill_signal = Event()
self._server_socket: Optional[socket.socket] = None
self._server_thread = Thread(target=self._server_task, daemon=True)
self._connected = False
# self._conn_start = None
# self._writing_done = False
# self._reading_done = False
@property
def connected(self) -> bool:
return self._connected
@property
def id(self) -> str:
return "tcp_server"
def initialize(self, args: Any = 0) -> Any:
"""Perform initializations step which can not be done in constructor or which require
returnvalues.
"""
pass
def open(self, args: Any = 0):
"""Opens the communication interface to allow communication.
:return:
"""
if self.connected:
return
self._connected = True
self._server_thread.start()
def _server_task(self):
self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# We need to check the kill signal periodically to allow closing the server.
self._server_socket.settimeout(self._default_timeout_secs)
self._server_socket.bind(self._server_addr)
self._server_socket.listen()
while True and not self._kill_signal.is_set():
try:
(conn_socket, conn_addr) = self._server_socket.accept()
self._handle_connection(conn_socket, conn_addr)
# conn_socket.close()
"""
if (
self._reading_done and self._writing_done
) or time.time() - self.conn_start > 0.5:
print("reading and writing done")
break
"""
except TimeoutError:
continue
def _handle_connection(self, conn_socket: socket.socket, conn_addr: Any):
_LOGGER.info(f"TCP client {conn_addr} connected")
queue_len = 0
while True:
with self._tc_lock:
queue_len = len(self._tc_packet_queue)
outputs = []
if queue_len > 0:
outputs.append(conn_socket)
(readable, writable, _) = select.select(
[conn_socket],
outputs,
[],
0.2,
)
if writable and writable[0]:
print("writeable")
while queue_len > 0:
next_packet = bytes()
with self._tc_lock:
next_packet = self._tc_packet_queue.popleft()
if len(next_packet) > 0:
conn_socket.sendall(next_packet)
queue_len -= 1
if readable and readable[0]:
print("readable")
while True:
bytes_recvd = conn_socket.recv(4096)
if len(bytes_recvd) > 0:
print(f"Received bytes from TCP client: {bytes_recvd.decode()}")
with self._tm_lock:
self._tm_packet_queue.append(bytes_recvd)
elif len(bytes_recvd) == 0:
break
else:
print("error receiving data from TCP client")
def is_open(self) -> bool:
"""Can be used to check whether the communication interface is open. This is useful if
opening a COM interface takes a longer time and is non-blocking
"""
return self.connected
def close(self, args: Any = 0):
"""Closes the ComIF and releases any held resources (for example a Communication Port).
:return:
"""
self._kill_signal.set()
self._server_thread.join()
self._connected = False
def send(self, data: bytes):
"""Send raw data.
:raises SendError: Sending failed for some reason.
"""
with self._tc_lock:
if len(self._tc_packet_queue) >= self._max_num_packets_in_tc_queue:
# Remove oldest packet
self._tc_packet_queue.popleft()
self._tc_packet_queue.append(data)
def receive(self, parameters: Any = 0) -> TelemetryListT:
"""Returns a list of received packets. The child class can use a separate thread to poll for
the packets or use some other mechanism and container like a deque to store packets
to be returned here.
:param parameters:
:raises ReceptionDecodeError: If the underlying COM interface uses encoding and
decoding and the decoding fails, this exception will be returned.
:return:
"""
with self._tm_lock:
packet_list = []
while self._tm_packet_queue:
packet_list.append(self._tm_packet_queue.popleft())
return packet_list
def data_available(self, timeout: float, parameters: Any = 0) -> int:
"""Check whether TM packets are available.
:param timeout: Can be used to block on available data if supported by the specific
communication interface.
:param parameters: Can be an arbitrary parameter.
:raises ReceptionDecodeError: If the underlying COM interface uses encoding and
decoding when determining the number of available packets, this exception can be
thrown on decoding errors.
:return: 0 if no data is available, number of packets otherwise.
"""
with self._tm_lock:
return len(self._tm_packet_queue)

View File

@ -0,0 +1,96 @@
import enum
from typing import List
from spacepackets.ecss import PusTc
from tmtccmd.config import CmdTreeNode
from pydantic import BaseModel
from tmtccmd.tmtc import DefaultPusQueueHelper
from opssat_tmtc.common import EXPERIMENT_APID, UniqueId, make_action_cmd_header
class ActionId(enum.IntEnum):
DEFAULT_SINGLE = 1
BALANCED_SINGLE = 2
DEFAULT_SINGLE_FLATSAT = 3
BALANCED_SNGLE_FLATSAT = 4
CUSTOM_PARAMS = 5
class CameraParameters(BaseModel):
R: int
G: int
B: int
N: int
P: bool
E: int
W: int
def create_camera_node() -> CmdTreeNode:
cam_node = CmdTreeNode("cam", "OPS-SAT IMS1000 batch handler commands")
cam_node.add_child(
CmdTreeNode("default_single", "Default Single Image Camera Parameters")
)
cam_node.add_child(
CmdTreeNode("balanced_single", "Balanced Single Image Camera Parameters")
)
cam_node.add_child(
CmdTreeNode(
"default_single_flatsat",
"Default Single Image Camera Parameters for use on FlatSat",
)
)
cam_node.add_child(
CmdTreeNode(
"balanced_single_flatsat",
"Balanced Single Image Camera Parameters for use on FlatSat",
)
)
cam_node.add_child(
CmdTreeNode("custom_params", "Custom Camera Parameters as specified from file")
)
return cam_node
def create_cam_cmd(q: DefaultPusQueueHelper, cmd_path: List[str]):
assert len(cmd_path) >= 1
q.add_log_cmd(
"Sending PUS take image action request for command " + cmd_path[0] + " params."
)
data = bytearray()
if cmd_path[0] == "default_single":
data.extend(
make_action_cmd_header(UniqueId.CameraHandler, ActionId.DEFAULT_SINGLE)
)
elif cmd_path[0] == "balanced_single":
data.extend(
make_action_cmd_header(UniqueId.CameraHandler, ActionId.BALANCED_SINGLE)
)
elif cmd_path[0] == "default_single_flatsat":
data.extend(
make_action_cmd_header(
UniqueId.CameraHandler, ActionId.DEFAULT_SINGLE_FLATSAT
)
)
elif cmd_path[0] == "balanced_single_flatsat":
data.extend(
make_action_cmd_header(
UniqueId.CameraHandler, ActionId.BALANCED_SNGLE_FLATSAT
)
)
elif cmd_path[0] == "custom":
data.extend(
make_action_cmd_header(UniqueId.CameraHandler, ActionId.CUSTOM_PARAMS)
)
# TODO: Implement asking params from user.
# params = CameraParameters(8, 8, 8, 1, True, 200, 1000)
# data.extend(params.model_dump_json().encode())
raise NotImplementedError()
else:
raise ValueError("unknown camera action {}", cmd_path[0])
return q.add_pus_tc(
PusTc(service=8, subservice=128, apid=EXPERIMENT_APID, app_data=data)
)

View File

@ -4,12 +4,25 @@ import dataclasses
import enum import enum
import struct import struct
TM_DB_PATH = "tm.db"
TC_DB_PATH = "tc.db"
EXPERIMENT_ID = 278
EXPERIMENT_APID = 1024 + EXPERIMENT_ID
class Apid(enum.IntEnum):
SCHED = 1 class UniqueId(enum.IntEnum):
GENERIC_PUS = 2 Controller = 0
ACS = 3 PusEventManagement = 1
CFDP = 4 PusRouting = 2
PusTest = 3
PusAction = 4
PusMode = 5
PusHk = 6
UdpServer = 7
TcpServer = 8
TcpSppClient = 9
PusScheduler = 10
CameraHandler = 11
class EventSeverity(enum.IntEnum): class EventSeverity(enum.IntEnum):
@ -45,7 +58,11 @@ class AcsHkIds(enum.IntEnum):
MGM_SET = 1 MGM_SET = 1
def make_addressable_id(target_id: int, unique_id: int) -> bytes: def make_unique_id(unique_id: int) -> bytes:
byte_string = bytearray(struct.pack("!I", target_id)) return struct.pack("!I", unique_id)
byte_string.extend(struct.pack("!I", unique_id))
def make_action_cmd_header(unique_id: int, action_id: int) -> bytes:
byte_string = bytearray(struct.pack("!I", unique_id))
byte_string.extend(struct.pack("!I", action_id))
return byte_string return byte_string

View File

@ -0,0 +1,53 @@
import enum
from typing import List
from spacepackets.ecss import PusTc
from tmtccmd.config import CmdTreeNode
from tmtccmd.tmtc import DefaultPusQueueHelper
from opssat_tmtc.common import EXPERIMENT_APID, UniqueId, make_action_cmd_header
class ActionId(enum.IntEnum):
STOP_EXPERIMENT = 1
DOWNLINK_LOG_FILE = 2
DOWNLINK_IMAGES_BY_MOVING = 3
EXECUTE_SHELL_CMD_BLOCKING = 4
class OpCode:
DOWNLINK_LOGS = "downlink_logs"
DOWNLINK_IMAGES_BY_MOVING = "move_image_files"
def create_controller_node():
controller_node = CmdTreeNode("controller", "Main OBSW Controller")
controller_node.add_child(
CmdTreeNode(OpCode.DOWNLINK_LOGS, "Downlink Logs via toGround folder")
)
controller_node.add_child(
CmdTreeNode(
OpCode.DOWNLINK_IMAGES_BY_MOVING,
"Downlink all image files via the toGroundLP folder",
)
)
return controller_node
def create_ctrl_cmd(q: DefaultPusQueueHelper, cmd_path: List[str]):
assert len(cmd_path) >= 1
data = bytearray()
if cmd_path[0] == OpCode.DOWNLINK_LOGS:
data.extend(
make_action_cmd_header(UniqueId.Controller, ActionId.DOWNLINK_LOG_FILE)
)
elif cmd_path[0] == OpCode.DOWNLINK_IMAGES_BY_MOVING:
data.extend(
make_action_cmd_header(
UniqueId.Controller, ActionId.DOWNLINK_IMAGES_BY_MOVING
)
)
else:
raise ValueError("unknown controller action {}", cmd_path[0])
return q.add_pus_tc(
PusTc(service=8, subservice=128, apid=EXPERIMENT_APID, app_data=data)
)

View File

@ -10,7 +10,9 @@ from tmtccmd.tmtc import DefaultPusQueueHelper
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
from tmtccmd.pus.s200_fsfw_mode import Subservice as ModeSubservice from tmtccmd.pus.s200_fsfw_mode import Subservice as ModeSubservice
from common import AcsId, Apid from opssat_tmtc.camera import create_cam_cmd, create_camera_node
from opssat_tmtc.controller import create_controller_node, create_ctrl_cmd
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
@ -31,7 +33,6 @@ def create_set_mode_cmd(
def create_cmd_definition_tree() -> CmdTreeNode: def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node() root_node = CmdTreeNode.root_node()
hk_node = CmdTreeNode("hk", "Housekeeping Node", hide_children_for_print=True) hk_node = CmdTreeNode("hk", "Housekeeping Node", hide_children_for_print=True)
@ -66,14 +67,8 @@ def create_cmd_definition_tree() -> CmdTreeNode:
) )
root_node.add_child(scheduler_node) root_node.add_child(scheduler_node)
acs_node = CmdTreeNode("acs", "ACS Subsystem Node") root_node.add_child(create_camera_node())
mgm_node = CmdTreeNode("mgms", "MGM devices node") root_node.add_child(create_controller_node())
mgm_node.add_child(mode_node)
mgm_node.add_child(hk_node)
acs_node.add_child(mgm_node)
root_node.add_child(acs_node)
return root_node return root_node
@ -87,14 +82,10 @@ def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
assert len(cmd_path_list) >= 2 assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "ping": if cmd_path_list[1] == "ping":
q.add_log_cmd("Sending PUS ping telecommand") q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc( return q.add_pus_tc(PusTelecommand(service=17, subservice=1))
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=1)
)
elif cmd_path_list[1] == "trigger_event": elif cmd_path_list[1] == "trigger_event":
q.add_log_cmd("Triggering test event") q.add_log_cmd("Triggering test event")
return q.add_pus_tc( return q.add_pus_tc(PusTelecommand(service=17, subservice=128))
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=128)
)
if cmd_path_list[0] == "scheduler": if cmd_path_list[0] == "scheduler":
assert len(cmd_path_list) >= 2 assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "schedule_ping_10_secs_ahead": if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
@ -106,27 +97,14 @@ def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
create_time_tagged_cmd( create_time_tagged_cmd(
time_stamp, time_stamp,
PusTelecommand(service=17, subservice=1), PusTelecommand(service=17, subservice=1),
apid=Apid.SCHED,
) )
) )
if cmd_path_list[0] == "acs": if cmd_path_list[0] == "acs":
assert len(cmd_path_list) >= 2 assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "mgms": if cmd_path_list[0] == "cam":
assert len(cmd_path_list) >= 3 create_cam_cmd(q, cmd_path_list[1:])
if cmd_path_list[2] == "hk": if cmd_path_list[0] == "controller":
if cmd_path_list[3] == "one_shot_hk": create_ctrl_cmd(q, cmd_path_list[1:])
q.add_log_cmd("Sending HK one shot request")
# TODO: Fix
# q.add_pus_tc(
# create_request_one_hk_command(
# make_addressable_id(Apid.ACS, AcsId.MGM_SET)
# )
# )
if cmd_path_list[2] == "mode":
if cmd_path_list[3] == "set_mode":
handle_set_mode_cmd(
q, "MGM 0", cmd_path_list[4], Apid.ACS, AcsId.MGM_0
)
def handle_set_mode_cmd( def handle_set_mode_cmd(

View File

@ -0,0 +1,147 @@
from typing import Any
import uuid
import sqlite3
import logging
from spacepackets.ccsds import CdsShortTimestamp
from spacepackets.ecss import PusTm
from spacepackets.ecss.pus_17_test import Service17Tm
from spacepackets.ecss.pus_1_verification import Service1Tm, UnpackParams
from tmtccmd.logging.pus import RawTmtcTimedLogWrapper
from tmtccmd.pus import VerificationWrapper
from tmtccmd.tmtc import GenericApidHandlerBase
from opssat_tmtc.common import TM_DB_PATH, EventU32
_LOGGER = logging.getLogger(__name__)
class PusHandler(GenericApidHandlerBase):
def __init__(
self,
file_logger: logging.Logger,
verif_wrapper: VerificationWrapper,
raw_logger: RawTmtcTimedLogWrapper,
):
super().__init__(None)
self.file_logger = file_logger
self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper
def handle_tm(self, apid: int, packet: bytes, _user_args: Any):
packet_uuid = uuid.uuid4()
try:
pus_tm = PusTm.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
except ValueError as e:
_LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
raise e
timestamp = CdsShortTimestamp.unpack(pus_tm.timestamp)
db_con = sqlite3.connect(TM_DB_PATH)
self._store_packet_in_db(
db_con=db_con,
packet=packet,
tm_packet=pus_tm,
timestamp=timestamp,
packet_uuid=packet_uuid,
)
service = pus_tm.service
if service == 1:
tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(CdsShortTimestamp.TIMESTAMP_SIZE, 1, 2)
)
res = self.verif_wrapper.add_tm(tm_packet)
if res is None:
_LOGGER.info(
f"Received Verification TM[{tm_packet.service}, {tm_packet.subservice}] "
f"with Request ID {tm_packet.tc_req_id.as_u32():#08x}"
)
_LOGGER.warning(
f"No matching telecommand found for {tm_packet.tc_req_id}"
)
else:
self.verif_wrapper.log_to_console(tm_packet, res)
self.verif_wrapper.log_to_file(tm_packet, res)
elif service == 3:
_LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTm.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet")
json_str = pus_tm.source_data[8:]
_LOGGER.info(json_str)
elif service == 5:
tm_packet = PusTm.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
src_data = tm_packet.source_data
event_u32 = EventU32.unpack(src_data)
_LOGGER.info(f"Received event packet. Event: {event_u32}")
if event_u32.group_id == 0 and event_u32.unique_id == 0:
_LOGGER.info("Received test event")
elif service == 8:
if pus_tm.subservice == 130:
_LOGGER.info("Received Action Data Reply TM[8,130]")
reply = pus_tm.source_data
reply = reply[6:]
_LOGGER.info(f"Data Reply Content: {reply.decode()}")
elif service == 17:
tm_packet = Service17Tm.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
if tm_packet.subservice == 2:
self.file_logger.info("Received Ping Reply TM[17,2]")
_LOGGER.info("Received Ping Reply TM[17,2]")
else:
self.file_logger.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
_LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
else:
_LOGGER.info(
f"The service {service} is not implemented in Telemetry Factory"
)
tm_packet = PusTm.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
self.raw_logger.log_tm(pus_tm)
def _store_packet_in_db(
self,
db_con: sqlite3.Connection,
packet: bytes,
timestamp: CdsShortTimestamp,
tm_packet: PusTm,
packet_uuid: uuid.UUID,
):
cursor = db_con.cursor()
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS pus_tm(
packet_uuid TEXT PRIMARY KEY,
generation_time TEXT,
service NUM,
subservice NUM,
data_len NUM,
raw_data BLOB
)"""
)
cursor.execute(
"INSERT INTO pus_tm VALUES(?, ?, ?, ?, ?, ?)",
(
str(packet_uuid),
timestamp.as_datetime(),
tm_packet.service,
tm_packet.subservice,
len(packet),
packet,
),
)
db_con.commit()

View File

@ -1,7 +1,7 @@
from tmtccmd.config import OpCodeEntry, TmtcDefinitionWrapper, CoreServiceList from tmtccmd.config import OpCodeEntry, TmtcDefinitionWrapper, CoreServiceList
from tmtccmd.config.globals import get_default_tmtc_defs from tmtccmd.config.globals import get_default_tmtc_defs
from common import HkOpCodes from opssat_tmtc.common import HkOpCodes
def tc_definitions() -> TmtcDefinitionWrapper: def tc_definitions() -> TmtcDefinitionWrapper:
@ -35,4 +35,11 @@ def tc_definitions() -> TmtcDefinitionWrapper:
info="PUS Service 11 TC Scheduling", info="PUS Service 11 TC Scheduling",
op_code_entry=srv_11, op_code_entry=srv_11,
) )
srv_8 = OpCodeEntry()
srv_8.add("pic", "Action Request Image")
defs.add_service(
name=CoreServiceList.SERVICE_8,
info="PUS Service 8 Action",
op_code_entry=srv_8,
)
return defs return defs

View File

@ -1,23 +1,22 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
"""Example client for the sat-rs example application""" """Example client for the sat-rs example application"""
from __future__ import annotations
import logging import logging
import sys import sys
import time import time
from typing import Any, Optional from typing import Optional
from prompt_toolkit.history import History from prompt_toolkit.history import History
from prompt_toolkit.history import FileHistory from prompt_toolkit.history import FileHistory
from spacepackets.ccsds import PacketId, PacketType from spacepackets.ccsds import PacketId, PacketType
import tmtccmd import tmtccmd
from spacepackets.ecss import PusTelemetry, PusVerificator from spacepackets.ecss import PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm
from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm
from spacepackets.ccsds.time import CdsShortTimestamp from spacepackets.ccsds.time import CdsShortTimestamp
from tmtccmd import TcHandlerBase, ProcedureParamsWrapper from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest from tmtccmd.core.base import BackendRequest
from tmtccmd.pus import VerificationWrapper from tmtccmd.pus import VerificationWrapper
from tmtccmd.tmtc import CcsdsTmHandler, GenericApidHandlerBase from tmtccmd.tmtc import CcsdsTmHandler
from tmtccmd.com import ComInterface from tmtccmd.com import ComInterface
from tmtccmd.config import ( from tmtccmd.config import (
CmdTreeNode, CmdTreeNode,
@ -45,16 +44,16 @@ from tmtccmd.tmtc import (
from spacepackets.seqcount import FileSeqCountProvider, PusFileSeqCountProvider from spacepackets.seqcount import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.util.obj_id import ObjectIdDictT from tmtccmd.util.obj_id import ObjectIdDictT
from opssat_tmtc.pus_tc import create_cmd_definition_tree, pack_pus_telecommands
import pus_tc from opssat_tmtc.common import EXPERIMENT_APID
from common import Apid, EventU32 from opssat_tmtc.pus_tm import PusHandler
_LOGGER = logging.getLogger() _LOGGER = logging.getLogger()
class SatRsConfigHook(HookBase): class SatRsConfigHook(HookBase):
def __init__(self, json_cfg_path: str): def __init__(self, json_cfg_path: str):
super().__init__(json_cfg_path=json_cfg_path) super().__init__(json_cfg_path)
def get_communication_interface(self, com_if_key: str) -> Optional[ComInterface]: def get_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com import ( from tmtccmd.config.com import (
@ -64,8 +63,7 @@ class SatRsConfigHook(HookBase):
assert self.cfg_path is not None assert self.cfg_path is not None
packet_id_list = [] packet_id_list = []
for apid in Apid: packet_id_list.append(PacketId(PacketType.TM, True, EXPERIMENT_APID))
packet_id_list.append(PacketId(PacketType.TM, True, apid))
cfg = create_com_interface_cfg_default( cfg = create_com_interface_cfg_default(
com_if_key=com_if_key, com_if_key=com_if_key,
json_cfg_path=self.cfg_path, json_cfg_path=self.cfg_path,
@ -76,7 +74,7 @@ class SatRsConfigHook(HookBase):
def get_command_definitions(self) -> CmdTreeNode: def get_command_definitions(self) -> CmdTreeNode:
"""This function should return the root node of the command definition tree.""" """This function should return the root node of the command definition tree."""
return pus_tc.create_cmd_definition_tree() return create_cmd_definition_tree()
def get_cmd_history(self) -> Optional[History]: def get_cmd_history(self) -> Optional[History]:
"""Optionlly return a history class for the past command paths which will be used """Optionlly return a history class for the past command paths which will be used
@ -89,84 +87,6 @@ class SatRsConfigHook(HookBase):
return get_core_object_ids() return get_core_object_ids()
class PusHandler(GenericApidHandlerBase):
def __init__(
self,
file_logger: logging.Logger,
verif_wrapper: VerificationWrapper,
raw_logger: RawTmtcTimedLogWrapper,
):
super().__init__(None)
self.file_logger = file_logger
self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper
def handle_tm(self, apid: int, packet: bytes, _user_args: Any):
try:
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
except ValueError as e:
_LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
raise e
service = pus_tm.service
if service == 1:
tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(CdsShortTimestamp.empty(), 1, 2)
)
res = self.verif_wrapper.add_tm(tm_packet)
if res is None:
_LOGGER.info(
f"Received Verification TM[{tm_packet.service}, {tm_packet.subservice}] "
f"with Request ID {tm_packet.tc_req_id.as_u32():#08x}"
)
_LOGGER.warning(
f"No matching telecommand found for {tm_packet.tc_req_id}"
)
else:
self.verif_wrapper.log_to_console(tm_packet, res)
self.verif_wrapper.log_to_file(tm_packet, res)
elif service == 3:
_LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet")
json_str = pus_tm.source_data[8:]
_LOGGER.info(json_str)
elif service == 5:
tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty()
)
src_data = tm_packet.source_data
event_u32 = EventU32.unpack(src_data)
_LOGGER.info(f"Received event packet. Event: {event_u32}")
if event_u32.group_id == 0 and event_u32.unique_id == 0:
_LOGGER.info("Received test event")
elif service == 17:
tm_packet = Service17Tm.unpack(
packet, time_reader=CdsShortTimestamp.empty()
)
if tm_packet.subservice == 2:
self.file_logger.info("Received Ping Reply TM[17,2]")
_LOGGER.info("Received Ping Reply TM[17,2]")
else:
self.file_logger.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
_LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
else:
_LOGGER.info(
f"The service {service} is not implemented in Telemetry Factory"
)
tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty()
)
self.raw_logger.log_tm(pus_tm)
class TcHandler(TcHandlerBase): class TcHandler(TcHandlerBase):
def __init__( def __init__(
self, self,
@ -181,7 +101,7 @@ class TcHandler(TcHandlerBase):
tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE, tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE,
seq_cnt_provider=seq_count_provider, seq_cnt_provider=seq_count_provider,
pus_verificator=self.verif_wrapper.pus_verificator, pus_verificator=self.verif_wrapper.pus_verificator,
default_pus_apid=None, default_pus_apid=EXPERIMENT_APID,
) )
def send_cb(self, send_params: SendCbParams): def send_cb(self, send_params: SendCbParams):
@ -197,17 +117,17 @@ class TcHandler(TcHandlerBase):
_LOGGER.info(log_entry.log_str) _LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, info: ProcedureWrapper): def queue_finished_cb(self, info: ProcedureWrapper):
if info.proc_type == TcProcedureType.DEFAULT: if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_def_procedure() def_proc = info.to_tree_commanding_procedure()
_LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}") _LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper): def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper q.queue_wrapper = wrapper.queue_wrapper
if info.proc_type == TcProcedureType.DEFAULT: if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_def_procedure() def_proc = info.to_tree_commanding_procedure()
assert def_proc.cmd_path is not None assert def_proc.cmd_path is not None
pus_tc.pack_pus_telecommands(q, def_proc.cmd_path) pack_pus_telecommands(q, def_proc.cmd_path)
def main(): def main():
@ -234,13 +154,13 @@ def main():
raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1) raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1)
verificator = PusVerificator() verificator = PusVerificator()
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger) verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler # Create primary TM handlers and add it to the CCSDS Packet Handler
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger) tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=tm_handler) ccsds_handler = CcsdsTmHandler(generic_handler=tm_handler)
# TODO: We could add the CFDP handler for the CFDP APID at a later stage. # TODO: We could add the CFDP handlers for the CFDP APID at a later stage.
# ccsds_handler.add_apid_handler(tm_handler) # ccsds_handler.add_apid_handler(tm_handler)
# Create TC handler # Create TC handlers
seq_count_provider = PusFileSeqCountProvider() seq_count_provider = PusFileSeqCountProvider()
tc_handler = TcHandler(seq_count_provider, verification_wrapper) tc_handler = TcHandler(seq_count_provider, verification_wrapper)
tmtccmd.setup(setup_args=setup_args) tmtccmd.setup(setup_args=setup_args)
@ -256,6 +176,7 @@ def main():
while True: while True:
state = tmtc_backend.periodic_op(None) state = tmtc_backend.periodic_op(None)
if state.request == BackendRequest.TERMINATION_NO_ERROR: if state.request == BackendRequest.TERMINATION_NO_ERROR:
tmtc_backend.close_com_if()
sys.exit(0) sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE: elif state.request == BackendRequest.DELAY_IDLE:
_LOGGER.info("TMTC Client in IDLE mode") _LOGGER.info("TMTC Client in IDLE mode")
@ -270,6 +191,7 @@ def main():
elif state.request == BackendRequest.CALL_NEXT: elif state.request == BackendRequest.CALL_NEXT:
pass pass
except KeyboardInterrupt: except KeyboardInterrupt:
tmtc_backend.close_com_if()
sys.exit(0) sys.exit(0)

28
pytmtc/pyproject.toml Normal file
View File

@ -0,0 +1,28 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "opssat-tmtc"
description = "Python TMTC client for OPS-SAT"
readme = "README.md"
version = "0.1.0"
requires-python = ">=3.8"
authors = [
{name = "Robin Mueller", email = "robin.mueller.m@gmail.com"},
{name = "Linus Köster", email = "st167799@stud.uni-stuttgart.de"}
]
dependencies = [
"tmtccmd~=8.0",
"pydantic==2.7.1"
]
[tool.setuptools.packages]
find = {}
[tool.ruff]
extend-exclude = ["archive"]
[tool.ruff.lint]
ignore = ["E501"]
[tool.ruff.lint.extend-per-file-ignores]
"__init__.py" = ["F401"]

197
pytmtc/pyserver.py Executable file
View File

@ -0,0 +1,197 @@
#!/usr/bin/env python3
import socket
import json
import abc
import time
import select
import logging
from typing import Any
from threading import Event, Thread
from collections import deque
from multiprocessing import Queue
from spacepackets.ccsds.spacepacket import parse_space_packets, PacketId
from spacepackets.ecss.tc import PacketType
EXP_ID = 278
EXP_APID = 1024 + EXP_ID
EXP_PACKET_ID_TM = PacketId(PacketType.TM, True, EXP_APID)
EXP_PACKET_ID_TC = PacketId(PacketType.TC, True, EXP_APID)
OPSSAT_DEFAULT_SERVER_PORT = 4096
TMTC_SERVER_PORT = 4097
LOG_LEVEL = logging.INFO
TC_QUEUE = Queue()
TM_QUEUE = Queue()
KILL_SIGNAL = Event()
_LOGGER = logging.getLogger(__name__)
def main():
logging.basicConfig(
format="[%(asctime)s] [%(levelname)-5s] %(message)s",
level=LOG_LEVEL,
datefmt="%Y-%m-%d %H:%M:%S",
)
print("Starting OPS-SAT ground TMTC server")
KILL_SIGNAL.clear()
ops_sat_server_port = OPSSAT_DEFAULT_SERVER_PORT
with open("tmtc_conf.json") as cfg_file:
# Load JSON data
data = json.loads(cfg_file.read())
# Access the value of the tcpip_tcp_server_port key
maybe_ops_sat_server_port = data.get("tcpip_tcp_server_port")
if maybe_ops_sat_server_port is not None:
ops_sat_server_port = maybe_ops_sat_server_port
_LOGGER.info(f"creating OPS-SAT server on port {ops_sat_server_port}")
ops_sat_thread = OpsSatServer(ops_sat_server_port)
ops_sat_thread.start()
tmtc_thread = TmtcServer()
tmtc_thread.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
_LOGGER.info("Shutting down server gracefully")
KILL_SIGNAL.set()
ops_sat_thread.join()
tmtc_thread.join()
class BaseServer(Thread):
def __init__(self, log_prefix: str, port: int):
self.log_prefix = log_prefix
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_addr = ("0.0.0.0", port)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.setblocking(False)
self.server_socket.settimeout(0.4)
self.server_socket.bind(server_addr)
super().__init__()
def run(self) -> None:
self.run_sync_version()
def run_sync_version(self) -> None:
self.server_socket.listen()
while True and not KILL_SIGNAL.is_set():
try:
(conn_socket, conn_addr) = self.server_socket.accept()
self.handle_connection(conn_socket, conn_addr)
except TimeoutError:
continue
def handle_connection(self, conn_socket: socket.socket, conn_addr: Any):
conn_socket.setblocking(False)
print(f"{self.log_prefix} TCP client {conn_addr} connected")
analysis_deque = deque()
while True and not KILL_SIGNAL.is_set():
conn_socket.settimeout(0.2)
try:
bytes_recvd = conn_socket.recv(4096)
if len(bytes_recvd) > 0:
_LOGGER.debug(f"{self.log_prefix} RX RAW: {bytes_recvd}")
analysis_deque.append(bytes_recvd)
elif len(bytes_recvd) == 0:
self.handle_read_bytestream(analysis_deque)
break
else:
print("error receiving data from TCP client")
except BlockingIOError:
self.handle_timeout(conn_socket, analysis_deque)
time.sleep(0.2)
except TimeoutError:
self.handle_timeout(conn_socket, analysis_deque)
def handle_timeout(self, conn_socket: socket.socket, analysis_deque: deque):
if len(analysis_deque) > 0:
self.handle_read_bytestream(analysis_deque)
self.send_data_to_client(conn_socket)
def run_select_version(self) -> None:
while True:
self.server_socket.listen()
(conn_socket, conn_addr) = self.server_socket.accept()
print(f"{self.log_prefix} TCP client {conn_addr} connected")
analysis_deque = deque()
while True:
outputs = []
if self.send_data_available():
outputs.append(conn_socket)
(readable, writable, _) = select.select([conn_socket], outputs, [], 0.2)
if readable and readable[0]:
bytes_recvd = conn_socket.recv(4096)
if len(bytes_recvd) > 0:
_LOGGER.debug("received data from TCP client: {}", bytes_recvd)
analysis_deque.append(bytes_recvd)
elif len(bytes_recvd) == 0:
self.handle_read_bytestream(analysis_deque)
break
else:
print("error receiving data from TCP client")
if writable and writable[0]:
self.send_data_to_client(conn_socket)
if not writable and not readable:
if len(analysis_deque) > 0:
self.handle_read_bytestream(analysis_deque)
self.send_data_to_client(conn_socket)
@abc.abstractmethod
def handle_read_bytestream(self, analysis_deque: deque):
pass
@abc.abstractmethod
def send_data_to_client(self, conn_socket: socket.socket):
pass
@abc.abstractmethod
def send_data_available(self) -> bool:
pass
class OpsSatServer(BaseServer):
def __init__(self, port: int):
self.port = port
super().__init__("[OPS-SAT]", port)
def handle_read_bytestream(self, analysis_deque: deque):
parsed_packets = parse_space_packets(analysis_deque, [EXP_PACKET_ID_TM])
for packet in parsed_packets:
_LOGGER.info(f"{self.log_prefix} RX TM: [{packet.hex(sep=',')}]")
TM_QUEUE.put(packet)
def send_data_to_client(self, conn_socket: socket.socket):
while not TC_QUEUE.empty():
next_packet = TC_QUEUE.get()
_LOGGER.info(f"{self.log_prefix} TX TC [{next_packet.hex(sep=',')}]")
conn_socket.sendall(next_packet)
def send_data_available(self) -> bool:
return not TC_QUEUE.empty()
class TmtcServer(BaseServer):
def __init__(self):
super().__init__("[TMTC]", TMTC_SERVER_PORT)
def handle_read_bytestream(self, analysis_deque: deque):
parsed_packets = parse_space_packets(analysis_deque, [EXP_PACKET_ID_TC])
for packet in parsed_packets:
_LOGGER.info(f"{self.log_prefix} RX TM: [{packet.hex(sep=',')}]")
TC_QUEUE.put(packet)
def send_data_to_client(self, conn_socket: socket.socket):
while not TM_QUEUE.empty():
next_packet = TM_QUEUE.get()
_LOGGER.info(f"{self.log_prefix} TX TM [{next_packet.hex(sep=',')}]")
conn_socket.sendall(next_packet)
def send_data_available(self) -> bool:
return not TM_QUEUE.empty()
if __name__ == "__main__":
main()

View File

@ -1,2 +1,2 @@
tmtccmd == 8.0.0rc1 .
# -e git+https://github.com/robamu-org/tmtccmd@97e5e51101a08b21472b3ddecc2063359f7e307a#egg=tmtccmd # -e git+https://github.com/robamu-org/tmtccmd@97e5e51101a08b21472b3ddecc2063359f7e307a#egg=tmtccmd

0
pytmtc/tests/__init__.py Normal file
View File

27
pytmtc/tests/test_cam.py Normal file
View File

@ -0,0 +1,27 @@
from unittest import TestCase
from opssat_tmtc.camera_params import CameraParameters
TEST_CAM_PARAMS = CameraParameters(R=8, G=8, B=8, N=1, P=True, E=200, W=1000)
EXPECTED_JSON = '{"R":8,"G":8,"B":8,"N":1,"P":true,"E":200,"W":1000}'
class TestCamInterface(TestCase):
def test_serialization_to_dict(self):
model = TEST_CAM_PARAMS.model_dump()
self.assertEqual(model["R"], 8)
self.assertEqual(model["G"], 8)
self.assertEqual(model["B"], 8)
self.assertEqual(model["N"], 1)
self.assertEqual(model["P"], True)
self.assertEqual(model["E"], 200)
self.assertEqual(model["W"], 1000)
def test_serialization_to_json(self):
json = TEST_CAM_PARAMS.model_dump_json()
self.assertEqual(json, EXPECTED_JSON)
print(json)
def test_deserialization(self):
model_deserialized = CameraParameters.model_validate_json(EXPECTED_JSON)
self.assertEqual(TEST_CAM_PARAMS, model_deserialized)

View File

@ -4,22 +4,22 @@ on a remote machine, e.g. a Raspberry Pi"""
import argparse import argparse
import os import os
import sys import sys
import platform
import time import time
import platform
from typing import Final from typing import Final
# TODO: Should we make this configurable?
BUILDER = "cross" BUILDER = "cross"
USE_SSHPASS = False
# This script can easily be adapted to other remote machines, Linux boards and # This script can easily be adapted to other remote machines, Linux boards and
# remote configurations by tweaking / hardcoding these parameter, which generally are constant # remote configurations by tweaking / hardcoding these parameter, which generally are constant
# for a given board # for a given board
DEFAULT_USER_NAME: Final = "root" DEFAULT_USER_NAME: Final = "root"
DEFAULT_ADDRESS: Final = "192.254.108.30" DEFAULT_ADDRESS: Final = "small_flatsat"
DEFAULT_TOOLCHAIN: Final = "armv7-unknown-linux-gnueabihf" DEFAULT_TOOLCHAIN: Final = "armv7-unknown-linux-gnueabihf"
DEFAULT_APP_NAME: Final = "ops-sat-rs" DEFAULT_APP_NAME: Final = "ops-sat-rs"
DEFAULT_TARGET_FOLDER: Final = "/tmp" DEFAULT_TARGET_FOLDER: Final = "/home/exp278/"
DEFAULT_DEBUG_PORT: Final = "1234" DEFAULT_DEBUG_PORT: Final = "1234"
DEFAULT_GDB_APP = "gdb-multiarch" DEFAULT_GDB_APP = "gdb-multiarch"
@ -140,9 +140,10 @@ def bld_deploy_run(args):
sshpass_args = f"-f {args.sshfile}" sshpass_args = f"-f {args.sshfile}"
elif args.sshenv: elif args.sshenv:
sshpass_args = "-e" sshpass_args = "-e"
ssh_target_ident = f"{args.user}@{args.address}" # ssh_target_ident = f"{args.user}@{args.address}"
ssh_target_ident = "small_flatsat"
sshpass_cmd = "" sshpass_cmd = ""
if platform.system() != "Windows": if USE_SSHPASS and platform.system() != "Windows":
sshpass_cmd = f"sshpass {sshpass_args}" sshpass_cmd = f"sshpass {sshpass_args}"
dest_path = f"{args.dest}/{args.app}" dest_path = f"{args.dest}/{args.app}"
if not args.source: if not args.source:

View File

@ -1,53 +0,0 @@
use ops_sat_rs::config::components::Apid;
use ops_sat_rs::config::APID_VALIDATOR;
use satrs::pus::ReceivesEcssPusTc;
use satrs::spacepackets::{CcsdsPacket, SpHeader};
use satrs::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc};
use satrs::ValidatorU16Id;
#[derive(Clone)]
pub struct CcsdsReceiver<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone,
E,
> {
pub tc_source: TcSource,
}
impl<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static,
E: 'static,
> ValidatorU16Id for CcsdsReceiver<TcSource, E>
{
fn validate(&self, apid: u16) -> bool {
APID_VALIDATOR.contains(&apid)
}
}
impl<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static,
E: 'static,
> CcsdsPacketHandler for CcsdsReceiver<TcSource, E>
{
type Error = E;
fn handle_packet_with_valid_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
if sp_header.apid() == Apid::Cfdp as u16 {
} else {
return self.tc_source.pass_ccsds(sp_header, tc_raw);
}
Ok(())
}
fn handle_packet_with_unknown_apid(
&mut self,
sp_header: &SpHeader,
_tc_raw: &[u8],
) -> Result<(), Self::Error> {
log::warn!("unknown APID 0x{:x?} detected", sp_header.apid());
Ok(())
}
}

View File

@ -1,30 +1,38 @@
use lazy_static::lazy_static;
use num_enum::{IntoPrimitive, TryFromPrimitive}; use num_enum::{IntoPrimitive, TryFromPrimitive};
use satrs::spacepackets::{PacketId, PacketType}; use once_cell::sync::OnceCell;
use satrs::events::{EventU32TypedSev, SeverityInfo};
use satrs::res_code::ResultU16;
use satrs::spacepackets::PacketId;
use satrs_mib::res_code::ResultU16Info; use satrs_mib::res_code::ResultU16Info;
use satrs_mib::resultcode; use satrs_mib::resultcode;
use std::{collections::HashSet, net::Ipv4Addr}; use std::net::Ipv4Addr;
use strum::IntoEnumIterator; use std::path::{Path, PathBuf};
pub const STOP_FILE_NAME: &str = "stop-experiment";
pub const CONFIG_FILE_NAME: &str = "exp278.toml";
pub const HOME_FOLDER_EXPERIMENT: &str = "/home/exp278"; // also where IMS-100 images are placed
pub const TO_GROUND_FOLDER_NAME: &str = "toGround";
pub const TO_GROUND_LP_FOLDER_NAME: &str = "toGroundLP";
pub const LOG_FOLDER: &str = "logs";
pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED; pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
pub const SERVER_PORT: u16 = 7301; pub const SERVER_PORT: u16 = 7301;
pub const TCP_SPP_SERVER_PORT: u16 = 4096;
pub const EXPERIMENT_ID: u32 = 278;
pub const EXPERIMENT_APID: u16 = 1024 + EXPERIMENT_ID as u16;
pub const EXPERIMENT_PACKET_ID: PacketId = PacketId::new_for_tc(true, EXPERIMENT_APID);
pub const VALID_PACKET_ID_LIST: &[PacketId] = &[PacketId::new_for_tc(true, EXPERIMENT_APID)];
lazy_static! { // TODO: Would be nice if this can be commanded as well..
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = { /// Can be enabled to print all SPP packets received from the SPP server on port 4096.
let mut set = HashSet::new(); pub const SPP_CLIENT_WIRETAPPING_RX: bool = false;
for id in components::Apid::iter() { pub const SPP_CLIENT_WIRETAPPING_TX: bool = false;
set.insert(PacketId::new(PacketType::Tc, true, id as u16));
} pub const VERSION: Option<&str> = option_env!("CARGO_PKG_VERSION");
set
}; pub static TO_GROUND_FOLDER_DIR: OnceCell<PathBuf> = OnceCell::new();
pub static ref APID_VALIDATOR: HashSet<u16> = { pub static TO_GROUND_LP_FOLDER_DIR: OnceCell<PathBuf> = OnceCell::new();
let mut set = HashSet::new(); pub static HOME_PATH: OnceCell<PathBuf> = OnceCell::new();
for id in components::Apid::iter() {
set.insert(id as u16);
}
set
};
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)] #[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)] #[repr(u8)]
@ -35,14 +43,156 @@ pub enum CustomPusServiceId {
#[derive(Debug)] #[derive(Debug)]
pub enum GroupId { pub enum GroupId {
Tmtc = 0, Generic = 0,
Hk = 1, Tmtc = 1,
Mode = 2, Hk = 2,
Mode = 3,
Action = 4,
Controller = 5,
Camera = 6,
} }
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::new(GroupId::Tmtc as u16, 0);
pub fn set_up_home_path() -> PathBuf {
let mut home_path = PathBuf::new();
if cfg!(feature = "host") {
home_path = std::env::current_dir()
.expect("getting current dir failed")
.to_path_buf();
} else {
let home_path_default = homedir::get_my_home()
.expect("Getting home dir from OS failed.")
.expect("No home dir found.");
if Path::new(HOME_FOLDER_EXPERIMENT).exists() {
home_path.push(HOME_FOLDER_EXPERIMENT);
} else {
home_path = home_path_default;
}
}
HOME_PATH
.set(home_path.clone())
.expect("attempting to set once cell twice");
home_path
}
pub fn set_up_low_prio_ground_dir(home_path: PathBuf) {
let mut to_ground_lp_dir = home_path.to_path_buf();
to_ground_lp_dir.push(TO_GROUND_LP_FOLDER_NAME);
if !Path::new(&to_ground_lp_dir).exists() {
log::info!(
"creating low priority to ground directory at {:?}",
to_ground_lp_dir
);
if std::fs::create_dir_all(&to_ground_lp_dir).is_err() {
log::error!(
"Failed to create low priority to ground directory '{:?}'",
to_ground_lp_dir
);
}
}
TO_GROUND_LP_FOLDER_DIR
.set(to_ground_lp_dir)
.expect("attemting to set once cell twice");
}
pub fn set_up_ground_dir(home_path: PathBuf) {
let mut to_ground_dir = home_path.to_path_buf();
to_ground_dir.push(TO_GROUND_FOLDER_NAME);
if !Path::new(&to_ground_dir).exists() {
log::info!("creating to ground directory at {:?}", to_ground_dir);
if std::fs::create_dir_all(&to_ground_dir).is_err() {
log::error!(
"Failed to create low priority to ground directory '{:?}'",
to_ground_dir
);
}
}
TO_GROUND_FOLDER_DIR
.set(to_ground_dir)
.expect("attemting to set once cell twice");
}
pub mod cfg_file {
use std::{
fs::File,
io::Read,
path::{Path, PathBuf},
};
use super::{CONFIG_FILE_NAME, TCP_SPP_SERVER_PORT};
pub const SPP_CLIENT_PORT_CFG_KEY: &str = "tcp_spp_server_port";
#[derive(Debug)]
pub struct AppCfg {
pub tcp_spp_server_port: u16,
}
impl Default for AppCfg {
fn default() -> Self {
Self {
tcp_spp_server_port: TCP_SPP_SERVER_PORT,
}
}
}
pub fn create_app_config(base_path: PathBuf) -> AppCfg {
let mut cfg_path = base_path;
cfg_path.push(CONFIG_FILE_NAME);
let cfg_path_home = cfg_path.as_path();
let relevant_path = if Path::new(CONFIG_FILE_NAME).exists() {
Some(PathBuf::from(Path::new(CONFIG_FILE_NAME)))
} else if cfg_path_home.exists() {
Some(PathBuf::from(cfg_path_home))
} else {
None
};
let mut app_cfg = AppCfg::default();
if relevant_path.is_none() {
log::warn!("No config file found, using default values");
return app_cfg;
}
let relevant_path = relevant_path.unwrap();
match File::open(relevant_path.as_path()) {
Ok(mut file) => {
let mut toml_str = String::new();
match file.read_to_string(&mut toml_str) {
Ok(_size) => match toml_str.parse::<toml::Table>() {
Ok(table) => {
handle_config_file_table(table, &mut app_cfg);
}
Err(e) => log::error!("error parsing TOML config file: {e}"),
},
Err(e) => log::error!("error reading TOML config file: {e}"),
}
}
Err(e) => log::error!("error opening TOML config file: {e}"),
}
app_cfg
}
#[allow(clippy::collapsible_match)]
pub fn handle_config_file_table(table: toml::Table, app_cfg: &mut AppCfg) {
if let Some(value) = table.get(SPP_CLIENT_PORT_CFG_KEY) {
if let toml::Value::Integer(port) = value {
if *port < 0 {
log::warn!("invalid port value, is negative");
} else {
app_cfg.tcp_spp_server_port = *port as u16
}
}
}
}
}
#[resultcode]
pub const GENERIC_FAILED: ResultU16 = ResultU16::new(GroupId::Generic as u8, 1);
pub mod tmtc_err { pub mod tmtc_err {
use super::*; use super::*;
use satrs::res_code::ResultU16;
#[resultcode] #[resultcode]
pub const INVALID_PUS_SERVICE: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 0); pub const INVALID_PUS_SERVICE: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 0);
@ -73,55 +223,193 @@ pub mod tmtc_err {
UNKNOWN_TARGET_ID_EXT, UNKNOWN_TARGET_ID_EXT,
ROUTING_ERROR_EXT, ROUTING_ERROR_EXT,
NOT_ENOUGH_APP_DATA_EXT, NOT_ENOUGH_APP_DATA_EXT,
REQUEST_TIMEOUT_EXT,
]; ];
} }
pub mod action_err {
use super::*;
#[resultcode]
pub const INVALID_ACTION_ID: ResultU16 = ResultU16::new(GroupId::Action as u8, 0);
pub const ACTION_RESULTS: &[ResultU16Info] = &[INVALID_ACTION_ID_EXT];
}
pub mod hk_err {
use super::*;
#[resultcode]
pub const TARGET_ID_MISSING: ResultU16 = ResultU16::new(GroupId::Hk as u8, 0);
#[resultcode]
pub const UNIQUE_ID_MISSING: ResultU16 = ResultU16::new(GroupId::Hk as u8, 1);
#[resultcode]
pub const UNKNOWN_TARGET_ID: ResultU16 = ResultU16::new(GroupId::Hk as u8, 2);
#[resultcode]
pub const COLLECTION_INTERVAL_MISSING: ResultU16 = ResultU16::new(GroupId::Hk as u8, 3);
pub const HK_ERR_RESULTS: &[ResultU16Info] = &[
TARGET_ID_MISSING_EXT,
UNKNOWN_TARGET_ID_EXT,
UNKNOWN_TARGET_ID_EXT,
COLLECTION_INTERVAL_MISSING_EXT,
];
}
pub mod mode_err {
use super::*;
#[resultcode]
pub const WRONG_MODE: ResultU16 = ResultU16::new(GroupId::Mode as u8, 0);
}
pub mod ctrl_err {
use super::*;
#[resultcode]
pub const INVALID_CMD_FORMAT: ResultU16 = ResultU16::new(GroupId::Controller as u8, 0);
#[resultcode]
pub const SHELL_CMD_IO_ERROR: ResultU16 = ResultU16::new(GroupId::Controller as u8, 1);
#[resultcode]
pub const SHELL_CMD_EXECUTION_FAILURE: ResultU16 = ResultU16::new(GroupId::Controller as u8, 2);
#[resultcode]
pub const SHELL_CMD_INVALID_FORMAT: ResultU16 = ResultU16::new(GroupId::Controller as u8, 3);
// TODO: Probably could be in a dedicated modules for these returnvalues.
#[resultcode]
pub const FILESYSTEM_COPY_ERROR: ResultU16 = ResultU16::new(GroupId::Controller as u8, 4);
#[resultcode]
pub const IMAGE_NOT_FOUND_FOR_COPY: ResultU16 = ResultU16::new(GroupId::Controller as u8, 5);
#[resultcode]
pub const INVALID_LOGFILE_PATH: ResultU16 = ResultU16::new(GroupId::Controller as u8, 6);
#[resultcode]
pub const IO_ERROR: ResultU16 = ResultU16::new(GroupId::Controller as u8, 7);
pub const CTRL_ERR_RESULTS: &[ResultU16Info] = &[
INVALID_CMD_FORMAT_EXT,
SHELL_CMD_IO_ERROR_EXT,
SHELL_CMD_EXECUTION_FAILURE_EXT,
SHELL_CMD_INVALID_FORMAT_EXT,
FILESYSTEM_COPY_ERROR_EXT,
IMAGE_NOT_FOUND_FOR_COPY_EXT,
INVALID_LOGFILE_PATH_EXT,
IO_ERROR_EXT,
];
}
pub mod cam_error {
use super::*;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum CameraError {
#[error("Error taking image: {0}")]
TakeImageError(String),
#[error("error listing image files: {0}")]
ListFileError(String),
#[error("IO error: {0}")]
IoError(#[from] std::io::Error),
}
#[resultcode]
pub const TAKE_IMAGE_ERROR: ResultU16 = ResultU16::new(GroupId::Camera as u8, 0);
#[resultcode]
pub const NO_DATA: ResultU16 = ResultU16::new(GroupId::Camera as u8, 1);
#[resultcode]
pub const ACTION_REQ_VARIANT_NOT_IMPL: ResultU16 = ResultU16::new(GroupId::Camera as u8, 2);
#[resultcode]
pub const DESERIALIZE_ERROR: ResultU16 = ResultU16::new(GroupId::Camera as u8, 3);
// TODO: Probably could be in a dedicated modules for these returnvalues.
#[resultcode]
pub const LIST_FILE_ERROR: ResultU16 = ResultU16::new(GroupId::Camera as u8, 4);
#[resultcode]
pub const IO_ERROR: ResultU16 = ResultU16::new(GroupId::Camera as u8, 5);
pub const CAM_ERR_RESULTS: &[ResultU16Info] = &[
TAKE_IMAGE_ERROR_EXT,
NO_DATA_EXT,
ACTION_REQ_VARIANT_NOT_IMPL_EXT,
DESERIALIZE_ERROR_EXT,
LIST_FILE_ERROR_EXT,
IO_ERROR_EXT,
];
}
pub mod pool {
use satrs::pool::{StaticMemoryPool, StaticPoolConfig};
pub fn create_sched_tc_pool() -> StaticMemoryPool {
StaticMemoryPool::new(StaticPoolConfig::new(
vec![
(100, 32),
(50, 64),
(50, 128),
(50, 256),
(50, 1024),
(100, 2048),
],
true,
))
}
}
pub mod components { pub mod components {
use satrs::request::UniqueApidTargetId; use satrs::request::UniqueApidTargetId;
use strum::EnumIter;
#[derive(Copy, Clone, PartialEq, Eq, EnumIter)] use super::EXPERIMENT_APID;
pub enum Apid {
Sched = 1,
GenericPus = 2,
Cfdp = 4,
}
// Component IDs for components with the PUS APID. // Component IDs for components with the PUS APID.
#[derive(Copy, Clone, PartialEq, Eq)] #[derive(Copy, Clone, PartialEq, Eq)]
pub enum PusId { pub enum UniqueId {
PusEventManagement = 0, Controller = 0,
PusRouting = 1, PusEventManagement = 1,
PusTest = 2, PusRouting = 2,
PusAction = 3, PusTest = 3,
PusMode = 4, PusAction = 4,
PusHk = 5, PusMode = 5,
} PusHk = 6,
UdpServer = 7,
#[derive(Copy, Clone, PartialEq, Eq)] TcpServer = 8,
pub enum AcsId { TcpSppClient = 9,
Mgm0 = 0, PusScheduler = 10,
CameraHandler = 11,
} }
pub const CONTROLLER_ID: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::Controller as u32);
pub const PUS_ACTION_SERVICE: UniqueApidTargetId = pub const PUS_ACTION_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32); UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusAction as u32);
pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId = pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, 0); UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusEventManagement as u32);
pub const PUS_ROUTING_SERVICE: UniqueApidTargetId = pub const PUS_ROUTING_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32); UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusRouting as u32);
pub const PUS_TEST_SERVICE: UniqueApidTargetId = pub const PUS_TEST_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusTest as u32); UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusTest as u32);
pub const PUS_MODE_SERVICE: UniqueApidTargetId = pub const PUS_MODE_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusMode as u32); UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusMode as u32);
pub const PUS_SCHEDULER_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusScheduler as u32);
pub const PUS_HK_SERVICE: UniqueApidTargetId = pub const PUS_HK_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusHk as u32); UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusHk as u32);
pub const PUS_SCHED_SERVICE: UniqueApidTargetId = pub const UDP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Sched as u16, 0); UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::UdpServer as u32);
pub const TCP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::TcpServer as u32);
pub const TCP_SPP_CLIENT: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::TcpSppClient as u32);
pub const CAMERA_HANDLER: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::CameraHandler as u32);
} }
pub mod tasks { pub mod tasks {
use std::time::Duration;
pub const FREQ_MS_UDP_TMTC: u64 = 200; pub const FREQ_MS_UDP_TMTC: u64 = 200;
pub const FREQ_MS_EVENT_HANDLING: u64 = 400; pub const FREQ_MS_EVENT_HANDLING: u64 = 400;
pub const FREQ_MS_AOCS: u64 = 500; pub const FREQ_MS_AOCS: u64 = 500;
pub const FREQ_MS_PUS_STACK: u64 = 200; pub const FREQ_MS_PUS_STACK: u64 = 200;
pub const FREQ_MS_CTRL: u64 = 400;
pub const FREQ_MS_CAMERA_HANDLING: u64 = 400;
pub const STOP_CHECK_FREQUENCY_MS: u64 = 400;
pub const STOP_CHECK_FREQUENCY: Duration = Duration::from_millis(STOP_CHECK_FREQUENCY_MS);
} }

529
src/controller.rs Normal file
View File

@ -0,0 +1,529 @@
use crate::logger::LOGFILE_PATH;
use num_enum::TryFromPrimitive;
use ops_sat_rs::config::{action_err::INVALID_ACTION_ID, HOME_FOLDER_EXPERIMENT};
use ops_sat_rs::config::{
HOME_PATH, STOP_FILE_NAME, TO_GROUND_FOLDER_DIR, TO_GROUND_LP_FOLDER_DIR,
};
use satrs::action::ActionRequestVariant;
use satrs::{
action::ActionRequest,
params::Params,
pus::action::{ActionReplyPus, ActionReplyVariant},
request::{GenericMessage, MessageMetadata},
res_code::ResultU16,
};
use serde::{Deserialize, Serialize};
use std::env::temp_dir;
use std::io;
use std::{
path::{Path, PathBuf},
process::Command,
sync::{atomic::AtomicBool, mpsc, Arc},
};
use ops_sat_rs::config::ctrl_err::{
FILESYSTEM_COPY_ERROR, INVALID_LOGFILE_PATH, IO_ERROR, SHELL_CMD_EXECUTION_FAILURE,
SHELL_CMD_INVALID_FORMAT, SHELL_CMD_IO_ERROR,
};
use crate::requests::CompositeRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct ShellCmd<'a> {
cmd: &'a str,
args: Vec<&'a str>,
}
#[derive(Debug, Clone, Copy, TryFromPrimitive)]
#[repr(u32)]
pub enum ActionId {
StopExperiment = 1,
DownlinkLogfile = 2,
/// Standard command to download the images made by the camera. It moves all image related
/// files inside the home folder into the toGroundLP (low priority to ground download) folder.
DownlinkImagesByMoving = 3,
ExecuteShellCommandBlocking = 4,
}
#[derive(Debug)]
pub struct ControllerPathCollection {
pub home_path: PathBuf,
pub stop_file_home_path: PathBuf,
pub stop_file_tmp_path: PathBuf,
pub to_ground_dir: PathBuf,
pub to_ground_low_prio_dir: PathBuf,
}
impl ControllerPathCollection {
pub fn new(base_path: &Path) -> Self {
let home_path = base_path.to_path_buf();
let mut home_path_stop_file = home_path.clone();
home_path_stop_file.push(STOP_FILE_NAME);
let mut tmp_path_stop_file = temp_dir();
tmp_path_stop_file.push(STOP_FILE_NAME);
Self {
home_path: home_path.clone(),
stop_file_home_path: home_path_stop_file,
stop_file_tmp_path: tmp_path_stop_file,
to_ground_dir: TO_GROUND_FOLDER_DIR
.get()
.expect("to ground directory not set")
.clone(),
to_ground_low_prio_dir: TO_GROUND_LP_FOLDER_DIR
.get()
.expect("to ground low prio directory not set")
.clone(),
}
}
}
pub struct ExperimentController {
pub composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
pub action_reply_tx: mpsc::Sender<GenericMessage<ActionReplyPus>>,
pub stop_signal: Arc<AtomicBool>,
pub paths: ControllerPathCollection,
}
impl ExperimentController {
pub fn new(
composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
action_reply_tx: mpsc::Sender<GenericMessage<ActionReplyPus>>,
stop_signal: Arc<AtomicBool>,
paths: ControllerPathCollection,
) -> Self {
Self {
composite_request_rx,
action_reply_tx,
stop_signal,
paths,
}
}
}
impl ExperimentController {
pub fn perform_operation(&mut self) {
match self.composite_request_rx.try_recv() {
Ok(msg) => match msg.message {
CompositeRequest::Hk(_) => {
log::warn!("hk request handling unimplemented")
}
CompositeRequest::Action(action_req) => {
self.handle_action_request(msg.requestor_info, action_req);
}
},
Err(e) => {
if e != mpsc::TryRecvError::Empty {
log::error!("composite request rx error: {:?}", e);
}
}
}
self.check_stop_file();
}
pub fn handle_action_request(&mut self, requestor: MessageMetadata, action_req: ActionRequest) {
let send_completion_failure = |error_code: ResultU16, params: Option<Params>| {
let result = self.action_reply_tx.send(GenericMessage::new_action_reply(
requestor,
action_req.action_id,
ActionReplyVariant::CompletionFailed { error_code, params },
));
if result.is_err() {
log::error!("sending action reply failed");
}
};
let action_id = ActionId::try_from(action_req.action_id);
if action_id.is_err() {
send_completion_failure(INVALID_ACTION_ID, None);
return;
}
match action_id.unwrap() {
ActionId::StopExperiment => {
self.stop_signal
.store(true, std::sync::atomic::Ordering::Relaxed);
self.send_completion_success(&requestor, &action_req);
}
ActionId::ExecuteShellCommandBlocking => {
self.handle_shell_command_execution(&requestor, &action_req);
}
ActionId::DownlinkLogfile => self.handle_downlink_logfile(&requestor, &action_req),
ActionId::DownlinkImagesByMoving => {
let result = self.handle_downlink_cam_image_by_moving(&requestor, &action_req);
if let Err(e) = result {
send_completion_failure(IO_ERROR, Some(e.to_string().into()));
}
}
}
}
pub fn handle_downlink_cam_image_by_moving(
&self,
requestor: &MessageMetadata,
action_req: &ActionRequest,
) -> io::Result<()> {
log::info!("moving images into low priority downlink folder");
let num_moved_files = move_images_inside_home_dir_to_low_prio_ground_dir(
HOME_PATH.get().unwrap(),
&self.paths.to_ground_low_prio_dir,
)?;
log::info!("moved {} image files", num_moved_files);
// TODO: Trigger event containing the number of moved files?
self.send_completion_success(requestor, action_req);
Ok(())
}
pub fn handle_downlink_logfile(&self, requestor: &MessageMetadata, action_req: &ActionRequest) {
log::info!("copying logfile into {:?}", self.paths.to_ground_dir);
if let Some(logfile_path) = LOGFILE_PATH.get() {
self.handle_file_copy(
requestor,
action_req,
logfile_path,
&self.paths.to_ground_dir,
)
} else {
log::error!("downlink path emtpy");
self.send_completion_failure(requestor, action_req, INVALID_LOGFILE_PATH, None);
}
}
pub fn handle_file_copy(
&self,
requestor: &MessageMetadata,
action_req: &ActionRequest,
source_path: &Path,
target_path: &Path,
) {
if let Err(e) = std::fs::copy(source_path, target_path) {
log::warn!("copying logfile into downlink path failed: {}", e);
self.send_completion_failure(
requestor,
action_req,
FILESYSTEM_COPY_ERROR,
Some(e.to_string().into()),
);
return;
}
self.send_completion_success(requestor, action_req)
}
pub fn send_completion_success(&self, requestor: &MessageMetadata, action_req: &ActionRequest) {
let result = self.action_reply_tx.send(GenericMessage::new_action_reply(
*requestor,
action_req.action_id,
ActionReplyVariant::Completed,
));
if result.is_err() {
log::error!("sending action reply failed");
}
}
pub fn send_completion_failure(
&self,
requestor: &MessageMetadata,
action_req: &ActionRequest,
error_code: ResultU16,
params: Option<Params>,
) {
let result = self.action_reply_tx.send(GenericMessage::new_action_reply(
*requestor,
action_req.action_id,
ActionReplyVariant::CompletionFailed { error_code, params },
));
if result.is_err() {
log::error!("sending action reply failed");
}
}
pub fn handle_shell_command_execution(
&self,
requestor: &MessageMetadata,
action_req: &ActionRequest,
) {
if let ActionRequestVariant::VecData(data) = &action_req.variant {
let shell_cmd_result: serde_json::Result<ShellCmd> = serde_json::from_slice(data);
match shell_cmd_result {
Ok(shell_cmd) => {
log::info!("executing shell cmd {:?}", shell_cmd);
match Command::new(shell_cmd.cmd).args(shell_cmd.args).status() {
Ok(status) => {
if status.success() {
self.send_completion_success(requestor, action_req);
} else {
log::warn!("execution of command failed: {}", status);
self.send_completion_failure(
requestor,
action_req,
SHELL_CMD_EXECUTION_FAILURE,
Some(status.to_string().into()),
);
}
}
Err(e) => {
log::warn!("execution of command failed with IO error: {}", e);
self.send_completion_failure(
requestor,
action_req,
SHELL_CMD_IO_ERROR,
Some(e.to_string().into()),
);
}
}
}
Err(e) => {
log::warn!("failed to deserialize shell command: {}", e);
let result = self.action_reply_tx.send(GenericMessage::new_action_reply(
*requestor,
action_req.action_id,
ActionReplyVariant::Completed,
));
if result.is_err() {
log::error!("Sending action reply failed");
}
}
}
} else {
log::warn!("no shell command was supplied for shell command action command");
self.send_completion_failure(requestor, action_req, SHELL_CMD_INVALID_FORMAT, None);
}
}
pub fn check_stop_file(&self) {
let check_at_path = |path: &Path| {
if path.exists() {
log::warn!(
"Detected stop file name at {:?}. Initiating experiment shutdown",
path
);
// By default, clear the stop file.
let result = std::fs::remove_file(path);
if result.is_err() {
log::error!(
"failed to remove stop file at {:?}: {}",
path,
result.unwrap_err()
);
}
self.stop_signal
.store(true, std::sync::atomic::Ordering::Relaxed);
}
};
check_at_path(self.paths.stop_file_tmp_path.as_path());
check_at_path(self.paths.stop_file_home_path.as_path());
}
}
pub fn move_images_inside_home_dir_to_low_prio_ground_dir(
home_dir: &Path,
low_prio_target_dir: &Path,
) -> io::Result<u32> {
let mut moved_files = 0;
for dir_entry_result in std::fs::read_dir(home_dir)? {
if let Ok(dir_entry) = &dir_entry_result {
if let Ok(file_type) = dir_entry.file_type() {
if file_type.is_file() {
let path_name = dir_entry.file_name();
let path_name_str = path_name.to_string_lossy();
if path_name_str.contains("img_msec_") {
let mut target_path = PathBuf::new();
target_path.push(low_prio_target_dir);
target_path.push(&path_name);
log::info!("moving file {}", &path_name_str);
std::fs::rename(dir_entry.path(), target_path)?;
moved_files += 1;
}
}
}
}
}
Ok(moved_files)
}
// TODO no idea if this works in any way shape or form
#[allow(dead_code)]
pub fn get_latest_image(index: usize) -> Result<PathBuf, std::io::Error> {
// Get the most recently modified file
let mut png_files = std::fs::read_dir(HOME_FOLDER_EXPERIMENT)?
.flatten()
.filter(|f| match f.metadata() {
Ok(metadata) => metadata.is_file(),
Err(_) => false,
})
.filter(|f| match f.file_name().into_string() {
Ok(name) => name.ends_with(".png"),
Err(_) => false,
})
.collect::<Vec<std::fs::DirEntry>>();
png_files.sort_by_key(|x| match x.metadata() {
Ok(metadata) => {
if let Ok(time) = metadata.modified() {
time
} else {
std::time::SystemTime::UNIX_EPOCH
}
}
Err(_) => std::time::SystemTime::UNIX_EPOCH,
});
png_files.reverse();
if let Some(png) = png_files.into_iter().nth(index) {
return Ok(png.path());
}
Err(std::io::Error::other("No latest image found"))
}
#[cfg(test)]
mod tests {
use std::sync::{mpsc, Arc};
use tempfile::NamedTempFile;
use super::*;
fn init() {
env_logger::builder().is_test(true).init();
}
pub struct ControllerTestbench {
pub composite_req_tx: mpsc::Sender<GenericMessage<CompositeRequest>>,
pub action_reply_rx: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
pub stop_signal: Arc<AtomicBool>,
pub ctrl: ExperimentController,
}
impl ControllerTestbench {
pub fn new() -> Self {
init();
let (composite_req_tx, composite_req_rx) = mpsc::channel();
let (action_reply_tx, action_reply_rx) = mpsc::channel();
let stop_signal = Arc::new(AtomicBool::new(false));
let test_tmp_dir = tempfile::tempdir().expect("creating tmpdir failed");
let base_dir = PathBuf::from(test_tmp_dir.path());
let mut stop_file_tmp_path = base_dir.clone();
stop_file_tmp_path.push(STOP_FILE_NAME);
let mut stop_file_home_path = base_dir.clone();
stop_file_home_path.push("home");
stop_file_home_path.push(STOP_FILE_NAME);
let mut to_ground_dir = base_dir.clone();
to_ground_dir.push("toGround");
let mut to_ground_low_prio_dir = base_dir.clone();
to_ground_low_prio_dir.push("toGroundLP");
let test_paths = ControllerPathCollection {
home_path: test_tmp_dir.path().to_path_buf(),
stop_file_home_path,
stop_file_tmp_path,
to_ground_dir,
to_ground_low_prio_dir,
};
ControllerTestbench {
composite_req_tx,
action_reply_rx,
stop_signal: stop_signal.clone(),
ctrl: ExperimentController::new(
composite_req_rx,
action_reply_tx,
stop_signal,
test_paths,
),
}
}
}
#[test]
fn test_shell_cmd_exection() {
let mut testbench = ControllerTestbench::new();
let named_temp_file = NamedTempFile::new().expect("creating temp file failed");
let args = vec![named_temp_file
.path()
.to_str()
.expect("converting path to str failed")];
let cmd = ShellCmd { cmd: "rm", args };
let cmd_serialized = serde_json::to_string(&cmd).expect("serialization failed");
let action_req = satrs::action::ActionRequest {
action_id: ActionId::ExecuteShellCommandBlocking as u32,
variant: satrs::action::ActionRequestVariant::VecData(cmd_serialized.into_bytes()),
};
testbench
.composite_req_tx
.send(GenericMessage::new(
MessageMetadata::new(1, 2),
CompositeRequest::Action(action_req),
))
.expect("sending action request failed");
testbench.ctrl.perform_operation();
assert!(!named_temp_file.path().exists());
let action_reply = testbench
.action_reply_rx
.try_recv()
.expect("receiving action reply failed");
assert_eq!(
action_reply.message.action_id,
ActionId::ExecuteShellCommandBlocking as u32
);
match action_reply.message.variant {
ActionReplyVariant::Completed => (),
_ => {
panic!(
"unexecpted action reply variant {:?}",
action_reply.message.variant
)
}
}
}
}
// Need to think about the value of this again. This is not easy to do in Rust..
/*
pub trait ActionHelperHook {
fn is_valid_action_id(&self, action_id: satrs::action::ActionId) -> bool;
fn send_reply(&self, action_reply: GenericActionReplyPus) -> Result<(), GenericSendError>;
}
pub struct ActionHelper<Hook: ActionHelperHook> {
pub requestor: MessageMetadata,
pub action_id: satrs::action::ActionId,
pub user_hook: Hook,
}
impl<Hook: ActionHelperHook> ActionHelper<Hook> {
fn new(
&mut self,
requestor: MessageMetadata,
action_id: satrs::action::ActionId,
) -> Result<Option<Self>, GenericSendError> {
if !self.user_hook.is_valid_action_id(action_id) {
self.report_completion_failed(INVALID_ACTION_ID, None)?;
return Ok(None);
}
Ok(Some(Self {
requestor,
action_id
}))
}
fn report_completion_success(&self) -> Result<(), GenericSendError> {
self.user_hook.send_reply(GenericMessage::new_action_reply(
self.requestor,
self.action_id,
ActionReplyVariant::Completed,
))?;
Ok(())
}
fn report_completion_failed(
&self,
error_code: ResultU16,
params: Option<Params>,
) -> Result<(), GenericSendError> {
self.user_hook.send_reply(GenericMessage::new_action_reply(
self.requestor,
self.action_id,
ActionReplyVariant::CompletionFailed { error_code, params },
))?;
Ok(())
}
}
*/

286
src/events.rs Normal file
View File

@ -0,0 +1,286 @@
use std::sync::mpsc::{self};
use crate::pus::create_verification_reporter;
use ops_sat_rs::config::components::PUS_EVENT_MANAGEMENT;
use satrs::event_man::{EventMessageU32, EventRoutingError};
use satrs::pus::event::EventTmHookProvider;
use satrs::pus::verification::VerificationReporter;
use satrs::request::UniqueApidTargetId;
use satrs::tmtc::PacketAsVec;
use satrs::{
event_man::{EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded},
pus::{
event_man::{
DefaultPusEventU32TmCreator, EventReporter, EventRequest, EventRequestWithToken,
},
verification::{TcStateStarted, VerificationReportingProvider, VerificationToken},
},
spacepackets::time::cds::CdsTime,
};
use ops_sat_rs::update_time;
// This helper sets the APID of the event sender for the PUS telemetry.
#[derive(Default)]
pub struct EventApidSetter {
pub next_apid: u16,
}
impl EventTmHookProvider for EventApidSetter {
fn modify_tm(&self, tm: &mut satrs::spacepackets::ecss::tm::PusTmCreator) {
tm.set_apid(self.next_apid);
}
}
/// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event
/// packets. It also handles the verification completion of PUS event service requests.
pub struct PusEventHandler {
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
pus_event_tm_creator: DefaultPusEventU32TmCreator<EventApidSetter>,
pus_event_man_rx: mpsc::Receiver<EventMessageU32>,
tm_sender: mpsc::Sender<PacketAsVec>,
time_provider: CdsTime,
timestamp: [u8; 7],
small_params_buf: [u8; 64],
verif_handler: VerificationReporter,
}
impl PusEventHandler {
pub fn new(
tm_sender: mpsc::Sender<PacketAsVec>,
verif_handler: VerificationReporter,
event_manager: &mut EventManagerWithBoundedMpsc,
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
) -> Self {
let event_queue_cap = 30;
let (pus_event_man_tx, pus_event_man_rx) = mpsc::sync_channel(event_queue_cap);
// All events sent to the manager are routed to the PUS event manager, which generates PUS event
// telemetry for each event.
let event_reporter = EventReporter::new_with_hook(
PUS_EVENT_MANAGEMENT.raw(),
0,
0,
128,
EventApidSetter::default(),
)
.unwrap();
let pus_event_dispatcher =
DefaultPusEventU32TmCreator::new_with_default_backend(event_reporter);
let pus_event_man_send_provider = EventU32SenderMpscBounded::new(
PUS_EVENT_MANAGEMENT.raw(),
pus_event_man_tx,
event_queue_cap,
);
event_manager.subscribe_all(pus_event_man_send_provider.target_id());
event_manager.add_sender(pus_event_man_send_provider);
Self {
event_request_rx,
pus_event_tm_creator: pus_event_dispatcher,
pus_event_man_rx,
time_provider: CdsTime::new_with_u16_days(0, 0),
timestamp: [0; 7],
small_params_buf: [0; 64],
verif_handler,
tm_sender,
}
}
pub fn handle_event_requests(&mut self) {
let report_completion = |event_req: EventRequestWithToken, timestamp: &[u8]| {
let started_token: VerificationToken<TcStateStarted> = event_req
.token
.try_into()
.expect("expected start verification token");
self.verif_handler
.completion_success(&self.tm_sender, started_token, timestamp)
.expect("Sending completion success failed");
};
loop {
// handle event requests
match self.event_request_rx.try_recv() {
Ok(event_req) => match event_req.request {
EventRequest::Enable(event) => {
self.pus_event_tm_creator
.enable_tm_for_event(&event)
.expect("Enabling TM failed");
update_time(&mut self.time_provider, &mut self.timestamp);
report_completion(event_req, &self.timestamp);
}
EventRequest::Disable(event) => {
self.pus_event_tm_creator
.disable_tm_for_event(&event)
.expect("Disabling TM failed");
update_time(&mut self.time_provider, &mut self.timestamp);
report_completion(event_req, &self.timestamp);
}
},
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("all event request senders have disconnected");
break;
}
},
}
}
}
pub fn generate_pus_event_tm(&mut self) {
loop {
// Perform the generation of PUS event packets
match self.pus_event_man_rx.try_recv() {
Ok(event_msg) => {
// We use the TM modification hook to set the sender APID for each event.
self.pus_event_tm_creator.reporter.tm_hook.next_apid =
UniqueApidTargetId::from(event_msg.sender_id()).apid;
update_time(&mut self.time_provider, &mut self.timestamp);
self.pus_event_tm_creator
.generate_pus_event_tm_generic_with_generic_params(
&self.tm_sender,
&self.timestamp,
event_msg.event(),
&mut self.small_params_buf,
event_msg.params(),
)
.expect("Sending TM as event failed");
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("All event senders have disconnected");
break;
}
},
}
}
}
}
pub struct EventHandler {
pub pus_event_handler: PusEventHandler,
event_manager: EventManagerWithBoundedMpsc,
}
impl EventHandler {
pub fn new(
tm_sender: mpsc::Sender<PacketAsVec>,
event_rx: mpsc::Receiver<EventMessageU32>,
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
) -> Self {
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
let pus_event_handler = PusEventHandler::new(
tm_sender,
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid, 16),
&mut event_manager,
event_request_rx,
);
Self {
pus_event_handler,
event_manager,
}
}
#[allow(dead_code)]
pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
&mut self.event_manager
}
pub fn periodic_operation(&mut self) {
self.pus_event_handler.handle_event_requests();
self.try_event_routing();
self.pus_event_handler.generate_pus_event_tm();
}
pub fn try_event_routing(&mut self) {
let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| {
self.routing_error_handler(event_msg, error)
};
// Perform the event routing.
self.event_manager.try_event_handling(error_handler);
}
pub fn routing_error_handler(&self, event_msg: &EventMessageU32, error: EventRoutingError) {
log::warn!("event routing error for event {event_msg:?}: {error:?}");
}
}
#[cfg(test)]
mod tests {
use satrs::{
events::EventU32,
pus::verification::VerificationReporterCfg,
spacepackets::{
ecss::{tm::PusTmReader, PusPacket},
CcsdsPacket,
},
tmtc::PacketAsVec,
};
use super::*;
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(1, 2);
const TEST_EVENT: EventU32 = EventU32::new(satrs::events::Severity::Info, 1, 1);
pub struct EventManagementTestbench {
pub event_tx: mpsc::SyncSender<EventMessageU32>,
pub event_manager: EventManagerWithBoundedMpsc,
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub pus_event_handler: PusEventHandler,
}
impl EventManagementTestbench {
pub fn new() -> Self {
let (event_tx, event_rx) = mpsc::sync_channel(10);
let (_event_req_tx, event_req_rx) = mpsc::sync_channel(10);
let (tm_sender, tm_receiver) = mpsc::channel();
let verif_reporter_cfg = VerificationReporterCfg::new(0x05, 2, 2, 128).unwrap();
let verif_reporter =
VerificationReporter::new(PUS_EVENT_MANAGEMENT.id(), &verif_reporter_cfg);
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
let pus_event_handler =
PusEventHandler::new(tm_sender, verif_reporter, &mut event_manager, event_req_rx);
Self {
event_tx,
tm_receiver,
event_manager,
pus_event_handler,
}
}
}
#[test]
fn test_basic_event_generation() {
let mut testbench = EventManagementTestbench::new();
testbench
.event_tx
.send(EventMessageU32::new(
TEST_CREATOR_ID.id(),
EventU32::new(satrs::events::Severity::Info, 1, 1),
))
.expect("failed to send event");
testbench.pus_event_handler.handle_event_requests();
testbench.event_manager.try_event_handling(|_, _| {});
testbench.pus_event_handler.generate_pus_event_tm();
let tm_packet = testbench
.tm_receiver
.try_recv()
.expect("failed to receive TM packet");
assert_eq!(tm_packet.sender_id, PUS_EVENT_MANAGEMENT.id());
let tm_reader = PusTmReader::new(&tm_packet.packet, 7)
.expect("failed to create TM reader")
.0;
assert_eq!(tm_reader.apid(), TEST_CREATOR_ID.apid);
assert_eq!(tm_reader.user_data().len(), 4);
let event_read_back = EventU32::from_be_bytes(tm_reader.user_data().try_into().unwrap());
assert_eq!(event_read_back, TEST_EVENT);
}
#[test]
fn test_basic_event_disabled() {
// TODO: Add test.
}
}

617
src/handlers/camera.rs Normal file
View File

@ -0,0 +1,617 @@
/// Device handler implementation for the IMS-100 Imager used on the OPS-SAT mission.
///
/// from the [OPSSAT Experimenter Wiki](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Camera_Introduction):
/// OPS-SAT has a BST IMS-100 Imager onboard for image acquisition. These RGGB images are 2048x1944px in size.
///
/// There are two ways of taking pictures, with the NMF or by using the camera API directly.
///
/// As the NMF method is already explained in the NMF documentation we will focus on triggering the camera API.
///
/// The camera is located on the -Z face of OPS-SAT
///
/// Mapping between camera and satellite frames:
/// cam body
/// +x -z
/// +y -x
/// +z +y
///
/// If you look onto Flatsat as in your picture coordinate system for camera it is
///
/// Z Z pointing inside Flatsat
/// x---> X
/// |
/// |
/// v Y
///
/// see also https://opssat1.esoc.esa.int/dmsf/files/6/view
use crate::pus::action::send_data_reply;
use crate::requests::CompositeRequest;
use derive_new::new;
use log::info;
use num_enum::TryFromPrimitive;
use ops_sat_rs::config::cam_error::{self, CameraError};
use ops_sat_rs::config::GENERIC_FAILED;
use ops_sat_rs::TimeStampHelper;
use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::hk::HkRequest;
use satrs::params::Params;
use satrs::pus::action::{ActionReplyPus, ActionReplyVariant};
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs::res_code::ResultU16;
use satrs::tmtc::PacketAsVec;
use serde::{Deserialize, Serialize};
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, Output};
use std::sync::mpsc;
use std::time::{SystemTime, UNIX_EPOCH};
const IMS_TESTAPP: &str = "ims100_testapp";
const DEFAULT_SINGLE_CAM_PARAMS: CameraPictureParameters = CameraPictureParameters {
R: 8,
G: 8,
B: 8,
N: 1,
P: true,
E: 2,
W: 1000,
};
const BALANCED_SINGLE_CAM_PARAMS: CameraPictureParameters = CameraPictureParameters {
R: 13,
G: 7,
B: 8,
N: 1,
P: true,
E: 2,
W: 1000,
};
const DEFAULT_SINGLE_FLATSAT_CAM_PARAMS: CameraPictureParameters = CameraPictureParameters {
R: 8,
G: 8,
B: 8,
N: 1,
P: true,
E: 200,
W: 1000,
};
const BALANCED_SINGLE_FLATSAT_CAM_PARAMS: CameraPictureParameters = CameraPictureParameters {
R: 13,
G: 7,
B: 8,
N: 1,
P: true,
E: 200,
W: 1000,
};
// TODO copy as action
// TODO ls -l via cfdp
// TODO howto downlink
#[derive(Debug, TryFromPrimitive)]
#[repr(u32)]
pub enum ActionId {
DefaultSingle = 1,
BalancedSingle = 2,
DefaultSingleFlatSat = 3,
BalancedSingleFlatSat = 4,
CustomParameters = 5,
}
// TODO what happens if limits are exceded
#[allow(non_snake_case)]
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, new)]
pub struct CameraPictureParameters {
pub R: u8,
pub G: u8,
pub B: u8,
pub N: u8, // number of images, max: 26
pub P: bool, // .png flag, true converts raw extracted image from camera to a png
pub E: u32, // exposure time in ms, max: 1580, default: 2, FlatSat: 200
pub W: u32, // wait time between pictures in ms, max: 40000
}
pub trait TakeImageExecutor {
fn take_image(&self, param: &CameraPictureParameters) -> io::Result<(Command, Output)>;
}
#[derive(Default)]
pub struct Ims100ImageExecutor {}
pub fn build_take_image_command(param: &CameraPictureParameters) -> Command {
let mut cmd = Command::new(IMS_TESTAPP);
cmd.arg("-R")
.arg(param.R.to_string())
.arg("-G")
.arg(param.G.to_string())
.arg("-B")
.arg(param.B.to_string())
.arg("-c")
.arg("/dev/cam_tty")
.arg("-m")
.arg("/dev/cam_sd")
.arg("-v")
.arg("0")
.arg("-n")
.arg(param.N.to_string());
if param.P {
cmd.arg("-p");
}
cmd.arg("-e")
.arg(param.E.to_string())
.arg("-w")
.arg(param.W.to_string());
cmd
}
impl TakeImageExecutor for Ims100ImageExecutor {
fn take_image(&self, param: &CameraPictureParameters) -> io::Result<(Command, Output)> {
let mut cmd = build_take_image_command(param);
info!("taking image with command: {cmd:?}");
let output = cmd.output()?;
Ok((cmd, output))
}
}
pub struct Ims100BatchHandler<ImgExecutor: TakeImageExecutor = Ims100ImageExecutor> {
id: UniqueApidTargetId,
pub image_executor: ImgExecutor,
pub home_path: PathBuf,
composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
tm_tx: mpsc::Sender<PacketAsVec>,
action_reply_tx: mpsc::Sender<GenericMessage<ActionReplyPus>>,
stamp_helper: TimeStampHelper,
}
impl<ImgExecutor: TakeImageExecutor> Ims100BatchHandler<ImgExecutor> {
pub fn new(
id: UniqueApidTargetId,
image_executor: ImgExecutor,
home_path: &Path,
composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
tm_tx: mpsc::Sender<PacketAsVec>,
action_reply_tx: mpsc::Sender<GenericMessage<ActionReplyPus>>,
stamp_helper: TimeStampHelper,
) -> Self {
Self {
id,
image_executor,
home_path: home_path.to_path_buf(),
composite_request_rx,
tm_tx,
action_reply_tx,
stamp_helper,
}
}
pub fn periodic_operation(&mut self) {
self.stamp_helper.update_from_now();
// Handle requests.
self.handle_composite_requests();
}
pub fn handle_composite_requests(&mut self) {
loop {
match self.composite_request_rx.try_recv() {
Ok(ref msg) => match &msg.message {
CompositeRequest::Hk(hk_request) => {
self.handle_hk_request(&msg.requestor_info, hk_request);
}
CompositeRequest::Action(action_request) => {
self.handle_action_request(&msg.requestor_info, action_request);
}
},
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("composite request receiver disconnected");
break;
}
},
}
}
}
pub fn handle_hk_request(
&mut self,
_requestor_info: &MessageMetadata,
_hk_request: &HkRequest,
) {
// TODO add hk to opssat
}
pub fn handle_action_request(
&mut self,
requestor_info: &MessageMetadata,
action_request: &ActionRequest,
) {
let param = match ActionId::try_from(action_request.action_id).expect("Invalid action id") {
ActionId::DefaultSingle => DEFAULT_SINGLE_CAM_PARAMS,
ActionId::BalancedSingle => BALANCED_SINGLE_CAM_PARAMS,
ActionId::DefaultSingleFlatSat => DEFAULT_SINGLE_FLATSAT_CAM_PARAMS,
ActionId::BalancedSingleFlatSat => BALANCED_SINGLE_FLATSAT_CAM_PARAMS,
ActionId::CustomParameters => match &action_request.variant {
ActionRequestVariant::NoData => {
self.send_completion_failure(
requestor_info,
action_request,
cam_error::NO_DATA,
None,
);
return;
}
ActionRequestVariant::VecData(data) => {
let param: serde_json::Result<CameraPictureParameters> =
serde_json::from_slice(data.as_slice());
match param {
Ok(param) => param,
Err(e) => {
self.send_completion_failure(
requestor_info,
action_request,
cam_error::DESERIALIZE_ERROR,
Some(e.to_string().into()),
);
return;
}
}
}
_ => {
self.send_completion_failure(
requestor_info,
action_request,
cam_error::ACTION_REQ_VARIANT_NOT_IMPL,
None,
);
return;
}
},
};
match self.take_picture(&param) {
Ok((cmd, ref output)) => {
self.send_completion_success(requestor_info, action_request);
if let Err(e) =
send_data_reply(self.id, &output.stdout, &self.stamp_helper, &self.tm_tx)
{
log::error!("sending data reply unexpectedly failed: {e}");
}
if let Err(e) = self.create_metadata_file(cmd, &param) {
// TODO: Generate event?
log::error!("issue creating metadata file: {e}");
}
}
Err(e) => match e {
CameraError::TakeImageError(ref err_str) => {
self.send_completion_failure(
requestor_info,
action_request,
cam_error::TAKE_IMAGE_ERROR,
Some(err_str.to_string().into()),
);
}
CameraError::IoError(ref e) => {
self.send_completion_failure(
requestor_info,
action_request,
cam_error::IO_ERROR,
Some(e.to_string().into()),
);
}
_ => {
log::warn!("unexpected error: {:?}", e);
self.send_completion_failure(
requestor_info,
action_request,
GENERIC_FAILED,
None,
);
}
},
}
}
pub fn create_metadata_file(
&mut self,
cmd: Command,
param: &CameraPictureParameters,
) -> io::Result<()> {
let now = SystemTime::now();
let unix_timestamp = now.duration_since(UNIX_EPOCH);
if unix_timestamp.is_err() {
log::error!("failed to get unix timestamp, time went backwards?");
return Ok(());
}
let unix_timestamp = unix_timestamp.unwrap().as_millis();
let mut metadata_path = self.home_path.clone();
metadata_path.push(format!("img_msec_{}.txt", unix_timestamp));
let mut file = std::fs::File::create(metadata_path)?;
writeln!(file, "time: {}", humantime::format_rfc3339_seconds(now))?;
writeln!(file, "cmd params: {:?}", param)?;
writeln!(file, "cmd: {:?}", cmd)?;
Ok(())
}
pub fn send_completion_success(&self, requestor: &MessageMetadata, action_req: &ActionRequest) {
let result = self.action_reply_tx.send(GenericMessage::new_action_reply(
*requestor,
action_req.action_id,
ActionReplyVariant::Completed,
));
if result.is_err() {
log::error!("sending action reply failed");
}
}
pub fn send_completion_failure(
&self,
requestor: &MessageMetadata,
action_req: &ActionRequest,
error_code: ResultU16,
params: Option<Params>,
) {
let result = self.action_reply_tx.send(GenericMessage::new_action_reply(
*requestor,
action_req.action_id,
ActionReplyVariant::CompletionFailed { error_code, params },
));
if result.is_err() {
log::error!("sending action reply failed");
}
}
pub fn take_picture(
&mut self,
param: &CameraPictureParameters,
) -> Result<(Command, Output), CameraError> {
let (cmd, output) = self.image_executor.take_image(param)?;
info!("imager cmd status: {}", &output.status);
info!("imager output: {}", String::from_utf8_lossy(&output.stdout));
let mut error_string = String::new();
if !output.stderr.is_empty() {
error_string = String::from_utf8_lossy(&output.stderr).to_string();
log::warn!("imager error: {}", error_string);
}
if !output.status.success() {
return Err(CameraError::TakeImageError(error_string.to_string()));
}
Ok((cmd, output))
}
#[allow(dead_code)]
pub fn list_current_images(&self) -> Result<Vec<String>, CameraError> {
let output = Command::new("ls").arg("-l").arg("*.png").output()?;
if output.status.success() {
let output_str = String::from_utf8(output.stdout).unwrap();
let files: Vec<String> = output_str.lines().map(|s| s.to_string()).collect();
Ok(files)
} else {
Err(CameraError::ListFileError(
String::from_utf8_lossy(&output.stderr).to_string(),
))
}
}
}
impl Ims100BatchHandler {
pub fn new_with_default_img_executor(
id: UniqueApidTargetId,
home_path: &Path,
composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
tm_tx: mpsc::Sender<PacketAsVec>,
action_reply_tx: mpsc::Sender<GenericMessage<ActionReplyPus>>,
stamp_helper: TimeStampHelper,
) -> Self {
Self::new(
id,
Ims100ImageExecutor::default(),
home_path,
composite_request_rx,
tm_tx,
action_reply_tx,
stamp_helper,
)
}
}
#[cfg(test)]
mod tests {
use crate::handlers::camera::{
ActionId, CameraPictureParameters, Ims100BatchHandler, DEFAULT_SINGLE_FLATSAT_CAM_PARAMS,
};
use crate::requests::CompositeRequest;
use ops_sat_rs::config::components::CAMERA_HANDLER;
use ops_sat_rs::TimeStampHelper;
use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::pus::action::{ActionReplyPus, ActionReplyVariant};
use satrs::request::{GenericMessage, MessageMetadata};
use satrs::tmtc::PacketAsVec;
use satrs::ComponentId;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::os::unix::process::ExitStatusExt;
use std::sync::mpsc;
use tempfile::{tempdir, TempDir};
use super::{build_take_image_command, TakeImageExecutor};
const REQUESTOR_ID: ComponentId = 1;
#[derive(Default)]
struct Ims100TestImageExecutor {
pub called_with_params: RefCell<VecDeque<CameraPictureParameters>>,
}
impl TakeImageExecutor for Ims100TestImageExecutor {
fn take_image(
&self,
param: &CameraPictureParameters,
) -> std::io::Result<(std::process::Command, std::process::Output)> {
let mut param_deque = self.called_with_params.borrow_mut();
param_deque.push_back(param.clone());
// We fake the test output, with no way to execute the actual command.
let output = std::process::Output {
status: std::process::ExitStatus::from_raw(0),
stdout: Vec::new(),
stderr: Vec::new(),
};
// We could generate the files as they are generated by the real batch handler.. But
// I think it's okay to verify that the function is called with the correct parameters
// and the metadata file is created for now.
Ok((build_take_image_command(param), output))
}
}
#[allow(dead_code)]
struct Ims100Testbench {
pub handler: Ims100BatchHandler<Ims100TestImageExecutor>,
pub tmp_home_dir: TempDir,
pub composite_req_tx: mpsc::Sender<GenericMessage<CompositeRequest>>,
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub action_reply_rx: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
}
impl Default for Ims100Testbench {
fn default() -> Self {
let tmp_home_dir = tempdir().expect("errror creating temp directory");
let (composite_request_tx, composite_request_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let (action_reply_tx, action_reply_rx) = mpsc::channel();
let time_helper = TimeStampHelper::default();
let cam_handler = Ims100BatchHandler::new(
CAMERA_HANDLER,
Ims100TestImageExecutor::default(),
tmp_home_dir.path(),
composite_request_rx,
tm_tx,
action_reply_tx,
time_helper,
);
// Even though we set the temporary home directory into HOME_PATH, we still need to
// cache the TempDir, so it is not dropped.
Ims100Testbench {
handler: cam_handler,
tmp_home_dir,
composite_req_tx: composite_request_tx,
tm_receiver: tm_rx,
action_reply_rx,
}
}
}
#[test]
fn command_line_execution() {
let mut testbench = Ims100Testbench::default();
testbench
.handler
.take_picture(&DEFAULT_SINGLE_FLATSAT_CAM_PARAMS)
.unwrap();
}
#[test]
fn serialize_and_deserialize_command() {
let data = serde_json::to_string(&DEFAULT_SINGLE_FLATSAT_CAM_PARAMS).unwrap();
println!("{}", data);
let param: CameraPictureParameters = serde_json::from_str(&data).unwrap();
println!("{:?}", param);
}
#[test]
fn test_take_image_action_req() {
let request_id = 5;
let mut testbench = Ims100Testbench::default();
let data = serde_json::to_string(&DEFAULT_SINGLE_FLATSAT_CAM_PARAMS).unwrap();
let req = ActionRequest::new(
ActionId::CustomParameters as u32,
ActionRequestVariant::VecData(data.as_bytes().to_vec()),
);
testbench
.handler
.handle_action_request(&MessageMetadata::new(request_id, REQUESTOR_ID), &req);
let action_reply = testbench
.action_reply_rx
.try_recv()
.expect("expected action reply");
assert!(matches!(
action_reply.message.variant,
ActionReplyVariant::Completed
));
assert_eq!(action_reply.request_id(), request_id);
assert_eq!(action_reply.sender_id(), REQUESTOR_ID);
let mut image_executor = testbench
.handler
.image_executor
.called_with_params
.borrow_mut();
let called_params = image_executor.pop_front().expect("expected called params");
assert_eq!(called_params, DEFAULT_SINGLE_FLATSAT_CAM_PARAMS);
let mut detected_metadata_file = false;
for dir_entry_result in std::fs::read_dir(&testbench.handler.home_path)
.unwrap_or_else(|_| panic!("can not read {:?}", testbench.handler.home_path.as_path()))
{
if let Ok(dir_entry) = &dir_entry_result {
if let Ok(file_type) = dir_entry.file_type() {
if file_type.is_file() {
let path_name = dir_entry.file_name();
let path_name_str = path_name.to_string_lossy();
if path_name_str.contains("img_msec_") {
let file = File::open(dir_entry.path()).expect("file not found");
let buf_reader = BufReader::new(file);
for (idx, line) in buf_reader.lines().enumerate() {
let line = line.expect("line is not proper string");
if idx == 0 {
assert!(line.contains("time:"));
// Tricky to check, would have to mock this.. I think it's okay
// for now.
}
if idx == 1 {
assert!(line.contains("cmd params:"));
assert!(line.contains(&format!(
"{:?}",
&DEFAULT_SINGLE_FLATSAT_CAM_PARAMS
)));
}
if idx == 2 {
assert!(line.contains("cmd:"));
let cmd = build_take_image_command(
&DEFAULT_SINGLE_FLATSAT_CAM_PARAMS,
);
let cmd_str = format!("{:?}", cmd);
assert!(line.contains(&cmd_str));
}
}
detected_metadata_file = true;
}
}
}
}
}
assert!(detected_metadata_file, "no metadata file was generated");
}
#[test]
fn test_action_req_channel() {
let mut testbench = Ims100Testbench::default();
let data = serde_json::to_string(&DEFAULT_SINGLE_FLATSAT_CAM_PARAMS).unwrap();
let req = ActionRequest::new(
ActionId::CustomParameters as u32,
ActionRequestVariant::VecData(data.as_bytes().to_vec()),
);
let req = CompositeRequest::Action(req);
testbench
.composite_req_tx
.send(GenericMessage::new(MessageMetadata::new(1, 1), req))
.unwrap();
testbench.handler.periodic_operation();
}
}

1
src/handlers/mod.rs Normal file
View File

@ -0,0 +1 @@
pub mod camera;

1
src/interface/can.rs Normal file
View File

@ -0,0 +1 @@
//! This is a preliminary implementation of the necessary infrastructure to enable communication over OPS-SAT's internal CAN Bus.

View File

@ -1,2 +1,36 @@
pub mod tcp; use derive_new::new;
pub mod udp; use ops_sat_rs::config::SPP_CLIENT_WIRETAPPING_RX;
use satrs::{
encoding::ccsds::{SpValidity, SpacePacketValidator},
spacepackets::PacketId,
};
pub mod can;
pub mod tcp_server;
pub mod tcp_spp_client;
pub mod udp_server;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TcpComponent {
Server,
Client,
}
#[derive(new, Clone)]
pub struct SimpleSpValidator {
component: TcpComponent,
valid_ids: Vec<PacketId>,
}
impl SpacePacketValidator for SimpleSpValidator {
fn validate(&self, sp_header: &satrs::spacepackets::SpHeader, raw_buf: &[u8]) -> SpValidity {
if SPP_CLIENT_WIRETAPPING_RX && self.component == TcpComponent::Client {
log::debug!("sp header: {:?}", sp_header);
log::debug!("raw data: {:x?}", raw_buf);
}
if self.valid_ids.contains(&sp_header.packet_id) {
return SpValidity::Valid;
}
SpValidity::Skip
}
}

View File

@ -1,17 +1,18 @@
use std::{ use std::{
collections::{HashSet, VecDeque}, collections::VecDeque,
sync::{Arc, Mutex}, sync::{atomic::AtomicBool, mpsc, Arc, Mutex},
}; };
use log::{info, warn}; use log::{info, warn};
use ops_sat_rs::config::tasks::STOP_CHECK_FREQUENCY;
use satrs::{ use satrs::{
hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer}, hal::std::tcp_server::{HandledConnectionHandler, ServerConfig, TcpSpacepacketsServer},
pus::ReceivesEcssPusTc, queue::GenericSendError,
spacepackets::PacketId, spacepackets::PacketId,
tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, TmPacketSourceCore}, tmtc::{PacketAsVec, PacketSource},
}; };
use crate::ccsds::CcsdsReceiver; use super::{SimpleSpValidator, TcpComponent};
#[derive(Default, Clone)] #[derive(Default, Clone)]
pub struct SyncTcpTmSource { pub struct SyncTcpTmSource {
@ -41,7 +42,7 @@ impl SyncTcpTmSource {
} }
} }
impl TmPacketSourceCore for SyncTcpTmSource { impl PacketSource for SyncTcpTmSource {
type Error = (); type Error = ();
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> { fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
@ -69,59 +70,54 @@ impl TmPacketSourceCore for SyncTcpTmSource {
} }
} }
pub type TcpServerType<TcSource, MpscErrorType> = TcpSpacepacketsServer< #[derive(Default)]
(), pub struct ConnectionFinishedHandler {}
CcsdsError<MpscErrorType>,
SyncTcpTmSource,
CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>,
HashSet<PacketId>,
>;
pub struct TcpTask< impl HandledConnectionHandler for ConnectionFinishedHandler {
TcSource: ReceivesCcsdsTc<Error = MpscErrorType> fn handled_connection(&mut self, info: satrs::hal::std::tcp_server::HandledConnectionInfo) {
+ ReceivesEcssPusTc<Error = MpscErrorType> info!(
+ Clone "Served {} TMs and {} TCs for client {:?}",
+ Send info.num_sent_tms, info.num_received_tcs, info.addr
+ 'static, );
MpscErrorType: 'static, }
> {
server: TcpServerType<TcSource, MpscErrorType>,
} }
impl< pub type TcpServer = TcpSpacepacketsServer<
TcSource: ReceivesCcsdsTc<Error = MpscErrorType> SyncTcpTmSource,
+ ReceivesEcssPusTc<Error = MpscErrorType> mpsc::Sender<PacketAsVec>,
+ Clone SimpleSpValidator,
+ Send ConnectionFinishedHandler,
+ 'static, (),
MpscErrorType: 'static + core::fmt::Debug, GenericSendError,
> TcpTask<TcSource, MpscErrorType> >;
{
pub struct TcpTask(pub TcpServer);
impl TcpTask {
pub fn new( pub fn new(
cfg: ServerConfig, cfg: ServerConfig,
tm_source: SyncTcpTmSource, tm_source: SyncTcpTmSource,
tc_receiver: CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>, tc_sender: mpsc::Sender<PacketAsVec>,
packet_id_lookup: HashSet<PacketId>, valid_ids: Vec<PacketId>,
stop_signal: Arc<AtomicBool>,
) -> Result<Self, std::io::Error> { ) -> Result<Self, std::io::Error> {
Ok(Self { Ok(Self(TcpSpacepacketsServer::new(
server: TcpSpacepacketsServer::new(cfg, tm_source, tc_receiver, packet_id_lookup)?, cfg,
}) tm_source,
tc_sender,
SimpleSpValidator::new(TcpComponent::Server, valid_ids),
ConnectionFinishedHandler::default(),
Some(stop_signal),
)?))
} }
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
loop { let result = self.0.handle_all_connections(Some(STOP_CHECK_FREQUENCY));
let result = self.server.handle_next_connection();
match result { match result {
Ok(conn_result) => { Ok(_conn_result) => (),
info!(
"Served {} TMs and {} TCs for client {:?}",
conn_result.num_sent_tms, conn_result.num_received_tcs, conn_result.addr
);
}
Err(e) => { Err(e) => {
warn!("TCP server error: {e:?}"); warn!("TCP server error: {e:?}");
} }
} }
} }
} }
}

View File

@ -0,0 +1,685 @@
use std::io::{self, Read};
use std::net::TcpStream as StdTcpStream;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::mpsc;
use std::time::Duration;
use mio::net::TcpStream as MioTcpStream;
use mio::{Events, Interest, Poll, Token};
use ops_sat_rs::config::tasks::STOP_CHECK_FREQUENCY;
use ops_sat_rs::config::{SPP_CLIENT_WIRETAPPING_RX, SPP_CLIENT_WIRETAPPING_TX};
use satrs::encoding::ccsds::parse_buffer_for_ccsds_space_packets;
use satrs::queue::GenericSendError;
use satrs::spacepackets::PacketId;
use satrs::tmtc::PacketAsVec;
use satrs::ComponentId;
use thiserror::Error;
use super::{SimpleSpValidator, TcpComponent};
#[derive(Debug, Error)]
pub enum ClientError {
#[error("send error: {0}")]
Send(#[from] GenericSendError),
#[error("io error: {0}")]
Io(#[from] io::Error),
}
#[derive(Debug, PartialEq, Eq)]
pub enum ClientResult {
Ok,
AttemptedReconnection,
ConnectionLost,
}
#[allow(dead_code)]
pub struct TcpSppClientCommon {
id: ComponentId,
read_buf: [u8; 4096],
tm_tcp_client_rx: mpsc::Receiver<PacketAsVec>,
server_addr: SocketAddr,
tc_source_tx: mpsc::Sender<PacketAsVec>,
validator: SimpleSpValidator,
}
#[allow(dead_code)]
impl TcpSppClientCommon {
pub fn handle_read_bytstream(&mut self, read_bytes: usize) -> Result<(), ClientError> {
if SPP_CLIENT_WIRETAPPING_RX {
log::debug!(
"SPP TCP RX {} bytes: {:x?}",
read_bytes,
&self.read_buf[..read_bytes]
);
}
// This parser is able to deal with broken tail packets, but we ignore those for now..
parse_buffer_for_ccsds_space_packets(
&self.read_buf[..read_bytes],
&self.validator,
self.id,
&self.tc_source_tx,
)?;
Ok(())
}
pub fn write_to_server(&mut self, client: &mut impl io::Write) -> io::Result<()> {
loop {
match self.tm_tcp_client_rx.try_recv() {
Ok(tm) => {
if SPP_CLIENT_WIRETAPPING_TX {
log::debug!(
"SPP TCP TX {}: {:x?}",
tm.packet.len(),
tm.packet.as_slice()
);
}
client.write_all(&tm.packet)?;
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::error!("TM sender to TCP client has disconnected");
break;
}
},
}
}
Ok(())
}
}
pub struct TcpSppClientStd {
common: TcpSppClientCommon,
read_and_idle_delay: Duration,
reconnect_flag: bool,
// Optional to allow periodic reconnection attempts on the TCP server.
stream: Option<StdTcpStream>,
}
impl TcpSppClientStd {
pub fn new(
id: ComponentId,
tc_source_tx: mpsc::Sender<PacketAsVec>,
tm_tcp_client_rx: mpsc::Receiver<PacketAsVec>,
valid_ids: &'static [PacketId],
read_timeout: Duration,
port: u16,
) -> io::Result<Self> {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port);
let mut client = Self {
common: TcpSppClientCommon {
id,
read_buf: [0; 4096],
tm_tcp_client_rx,
server_addr,
tc_source_tx,
validator: SimpleSpValidator::new(TcpComponent::Client, valid_ids.to_vec()),
},
reconnect_flag: false,
read_and_idle_delay: read_timeout,
stream: None,
};
client.attempt_connect(true)?;
Ok(client)
}
pub fn attempt_connect(&mut self, log_error: bool) -> io::Result<bool> {
Ok(match StdTcpStream::connect(self.common.server_addr) {
Ok(stream) => {
stream.set_read_timeout(Some(self.read_and_idle_delay))?;
self.stream = Some(stream);
true
}
Err(e) => {
if log_error {
log::warn!("error connecting to server: {}", e);
}
false
}
})
}
#[allow(dead_code)]
pub fn connected(&self) -> bool {
self.stream.is_some()
}
pub fn operation(&mut self) -> Result<ClientResult, ClientError> {
let result = self.operation_inner();
if let Ok(client_result) = &result {
if *client_result != ClientResult::Ok {
std::thread::sleep(self.read_and_idle_delay);
}
}
result
}
fn operation_inner(&mut self) -> Result<ClientResult, ClientError> {
if let Some(client) = &mut self.stream {
// Write TM first before blocking on the read call.
self.common.write_to_server(client)?;
match client.read(&mut self.common.read_buf) {
// Not sure if this can happen or this is actually an error condition..
Ok(0) => {
// To avoid spam.
if !self.reconnect_flag {
log::info!("server closed connection");
}
self.stream = None;
return Ok(ClientResult::ConnectionLost);
}
Ok(read_bytes) => {
self.reconnect_flag = false;
self.common.handle_read_bytstream(read_bytes)?;
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock || e.kind() == io::ErrorKind::TimedOut
{
self.common.write_to_server(client)?;
return Ok(ClientResult::ConnectionLost);
}
log::warn!("server error: {e:?}");
if e.kind() == io::ErrorKind::ConnectionReset {
self.stream = None;
return Ok(ClientResult::ConnectionLost);
}
return Err(e.into());
}
}
} else {
if self.attempt_connect(false)? {
// To avoid spam.
if !self.reconnect_flag {
log::info!("reconnected to server succesfully");
}
self.reconnect_flag = true;
return self.operation();
}
return Ok(ClientResult::AttemptedReconnection);
}
Ok(ClientResult::Ok)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum ConnectionStatus {
Unknown,
Connected,
LostConnection,
TryingReconnect,
}
/// Currently not used, not behaving as expected..
#[allow(dead_code)]
pub struct TcpSppClientMio {
common: TcpSppClientCommon,
poll: Poll,
events: Events,
// Optional to allow periodic reconnection attempts on the TCP server.
client: Option<MioTcpStream>,
connection: ConnectionStatus,
}
#[allow(dead_code)]
impl TcpSppClientMio {
pub fn new(
id: ComponentId,
tc_source_tx: mpsc::Sender<PacketAsVec>,
tm_tcp_client_rx: mpsc::Receiver<PacketAsVec>,
valid_ids: &'static [PacketId],
port: u16,
) -> io::Result<Self> {
let poll = Poll::new()?;
let events = Events::with_capacity(128);
let mut client = Self {
common: TcpSppClientCommon {
id,
read_buf: [0; 4096],
tm_tcp_client_rx,
server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
tc_source_tx,
validator: SimpleSpValidator::new(TcpComponent::Client, valid_ids.to_vec()),
},
poll,
events,
client: None,
connection: ConnectionStatus::Unknown,
};
client.connect()?;
Ok(client)
}
pub fn connect(&mut self) -> io::Result<()> {
let mut client = MioTcpStream::connect(self.common.server_addr)?;
self.poll.registry().register(
&mut client,
Token(0),
Interest::READABLE | Interest::WRITABLE,
)?;
self.client = Some(client);
self.connection = ConnectionStatus::TryingReconnect;
Ok(())
}
pub fn operation(&mut self) -> Result<(), ClientError> {
match self.connection {
ConnectionStatus::TryingReconnect | ConnectionStatus::Unknown => {
self.check_conn_status()?
}
ConnectionStatus::Connected => {
self.check_conn_status()?;
self.poll
.poll(&mut self.events, Some(STOP_CHECK_FREQUENCY))?;
let events: Vec<mio::event::Event> = self.events.iter().cloned().collect();
for event in events {
if event.token() == Token(0) {
if event.is_readable() {
self.read_from_server()?;
}
// For some reason, we only get this once..
if event.is_writable() {
self.common.write_to_server(self.client.as_mut().unwrap())?;
}
}
}
return Ok(());
}
ConnectionStatus::LostConnection => self.connect()?,
};
std::thread::sleep(STOP_CHECK_FREQUENCY);
Ok(())
}
pub fn read_from_server(&mut self) -> Result<(), ClientError> {
match self
.client
.as_mut()
.unwrap()
.read(&mut self.common.read_buf)
{
Ok(0) => (),
Ok(read_bytes) => self.common.handle_read_bytstream(read_bytes)?,
Err(e) => return Err(e.into()),
}
Ok(())
}
pub fn check_conn_status(&mut self) -> io::Result<()> {
match self.client.as_mut().unwrap().peer_addr() {
Ok(_) => {
if self.connection == ConnectionStatus::Unknown
|| self.connection == ConnectionStatus::TryingReconnect
{
self.connection = ConnectionStatus::Connected;
}
Ok(())
}
Err(e) => {
if e.kind() == io::ErrorKind::NotConnected {
log::warn!("lost connection, or do not have one");
self.connection = ConnectionStatus::LostConnection;
return Ok(());
}
Err(e)
}
}
}
}
#[cfg(test)]
mod tests {
use ops_sat_rs::config::EXPERIMENT_APID;
use satrs::spacepackets::{PacketSequenceCtrl, PacketType, SequenceFlags, SpHeader};
use std::{
io::Write,
net::{TcpListener, TcpStream},
sync::{atomic::AtomicBool, Arc},
thread,
time::Duration,
};
use super::*;
const VALID_IDS: &[PacketId] = &[PacketId::new_for_tc(true, EXPERIMENT_APID)];
const TEST_TC: SpHeader = SpHeader::new(
PacketId::new(PacketType::Tc, true, EXPERIMENT_APID),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0),
1,
);
const TEST_TM: SpHeader = SpHeader::new(
PacketId::new(PacketType::Tm, true, EXPERIMENT_APID),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0),
1,
);
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
struct TcpServerTestbench {
tcp_server: TcpListener,
}
impl TcpServerTestbench {
fn new(port: u16) -> Self {
let tcp_server =
TcpListener::bind(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)).unwrap();
tcp_server
.set_nonblocking(true)
.expect("setting TCP server non-blocking failed");
Self { tcp_server }
}
fn local_addr(&self) -> SocketAddr {
self.tcp_server.local_addr().unwrap()
}
fn check_for_connections(&mut self, limit: u32) -> Result<TcpStream, ()> {
for _ in 0..limit {
match self.tcp_server.accept() {
Ok((stream, _)) => {
return Ok(stream);
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
thread::sleep(Duration::from_millis(10));
continue;
}
panic!("TCP server accept error: {:?}", e);
}
}
}
Err(())
}
fn try_reading_one_packet(
&mut self,
stream: &mut TcpStream,
limit: u32,
read_buf: &mut [u8],
) -> usize {
let mut read_data = 0;
for _ in 0..limit {
match stream.read(read_buf) {
Ok(0) => {}
Ok(len) => {
// assert_eq!(&tm_buf, &read_buf[0..len]);
// read_bufd_expected_data = true;
read_data = len;
break;
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
continue;
}
panic!("TCP server read error: {:?}", e);
}
}
if read_data > 0 {
break;
}
}
read_data
}
}
// This test just simplifies that the client properly connects to a server.
#[test]
fn basic_client_test() {
let (tc_source_tx, _tc_source_rx) = mpsc::channel();
let (_tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_addr = tcp_server.local_addr();
let jh0 = thread::spawn(move || {
tcp_server
.check_for_connections(3)
.expect("no client connection detected");
});
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_addr.port(),
)
.expect("creating TCP SPP client failed");
spp_client.operation().unwrap();
jh0.join().unwrap();
}
// This test verifies that TM is sent to the server properly.
#[test]
fn basic_client_tm_test() {
let (tc_source_tx, _tc_source_rx) = mpsc::channel();
let (tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_addr = tcp_server.local_addr();
let mut buf: [u8; 7] = [0; 7];
TEST_TM
.write_to_be_bytes(&mut buf)
.expect("writing TM failed");
let jh0 = thread::spawn(move || {
let mut read_buf: [u8; 64] = [0; 64];
let mut stream = tcp_server
.check_for_connections(3)
.expect("no client connection detected");
stream
.set_read_timeout(Some(Duration::from_millis(10)))
.expect("setting read timeout failed");
let read_bytes = tcp_server.try_reading_one_packet(&mut stream, 5, &mut read_buf);
if read_bytes == 0 {
panic!("did not receive expected data");
} else {
assert_eq!(&buf, &read_buf[0..read_bytes]);
}
});
tm_tcp_client_tx
.send(PacketAsVec::new(0, buf.to_vec()))
.unwrap();
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_addr.port(),
)
.expect("creating TCP SPP client failed");
spp_client.operation().unwrap();
jh0.join().unwrap();
}
// Test that the client can read telecommands from the server.
#[test]
fn basic_client_tc_test() {
let (tc_source_tx, tc_source_rx) = mpsc::channel();
let (_tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_addr = tcp_server.local_addr();
let mut buf: [u8; 8] = [0; 8];
TEST_TC
.write_to_be_bytes(&mut buf)
.expect("writing TM failed");
let jh0 = thread::spawn(move || {
let mut stream = tcp_server
.check_for_connections(3)
.expect("no client connection detected");
stream
.set_read_timeout(Some(Duration::from_millis(10)))
.expect("setting read timeout failed");
stream.write_all(&buf).expect("writing TC failed");
});
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_addr.port(),
)
.expect("creating TCP SPP client failed");
assert!(spp_client.connected());
let mut received_packet = false;
(0..3).for_each(|_| {
spp_client.operation().unwrap();
if let Ok(packet) = tc_source_rx.try_recv() {
assert_eq!(packet.packet, buf.to_vec());
received_packet = true;
}
});
if !received_packet {
panic!("did not receive expected data");
}
jh0.join().unwrap();
}
// Test that the client can both read telecommands from the server and send back
// telemetry to the server.
#[test]
fn basic_client_tmtc_test() {
let (tc_source_tx, tc_source_rx) = mpsc::channel();
let (tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_addr = tcp_server.local_addr();
let mut tc_buf: [u8; 8] = [0; 8];
let mut tm_buf: [u8; 8] = [0; 8];
TEST_TC
.write_to_be_bytes(&mut tc_buf)
.expect("writing TM failed");
TEST_TM
.write_to_be_bytes(&mut tm_buf)
.expect("writing TM failed");
let jh0 = thread::spawn(move || {
let mut read_buf: [u8; 64] = [0; 64];
let mut stream = tcp_server
.check_for_connections(3)
.expect("no client connection detected");
stream
.set_read_timeout(Some(Duration::from_millis(10)))
.expect("setting read timeout failed");
stream.write_all(&tc_buf).expect("writing TC failed");
let read_bytes = tcp_server.try_reading_one_packet(&mut stream, 5, &mut read_buf);
if read_bytes == 0 {
panic!("did not receive expected data");
} else {
assert_eq!(&tm_buf, &read_buf[0..read_bytes]);
}
});
tm_tcp_client_tx
.send(PacketAsVec::new(0, tm_buf.to_vec()))
.unwrap();
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_addr.port(),
)
.expect("creating TCP SPP client failed");
assert!(spp_client.connected());
let mut received_packet = false;
(0..3).for_each(|_| {
spp_client.operation().unwrap();
if let Ok(packet) = tc_source_rx.try_recv() {
assert_eq!(packet.packet, tc_buf.to_vec());
received_packet = true;
}
});
if !received_packet {
panic!("did not receive expected data");
}
jh0.join().unwrap();
}
#[test]
fn test_broken_connection() {
init();
let (tc_source_tx, _tc_source_rx) = mpsc::channel();
let (tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_port = tcp_server.local_addr().port();
let drop_signal = Arc::new(AtomicBool::new(false));
let drop_signal_0 = drop_signal.clone();
let mut tc_buf: [u8; 8] = [0; 8];
let mut tm_buf: [u8; 8] = [0; 8];
TEST_TC
.write_to_be_bytes(&mut tc_buf)
.expect("writing TM failed");
TEST_TM
.write_to_be_bytes(&mut tm_buf)
.expect("writing TM failed");
let mut jh0 = thread::spawn(move || {
tcp_server
.check_for_connections(3)
.expect("no client connection detected");
drop_signal_0.store(true, std::sync::atomic::Ordering::Relaxed);
});
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_port,
)
.expect("creating TCP SPP client failed");
while !drop_signal.load(std::sync::atomic::Ordering::Relaxed) {
std::thread::sleep(Duration::from_millis(100));
}
tm_tcp_client_tx
.send(PacketAsVec::new(0, tm_buf.to_vec()))
.unwrap();
match spp_client.operation() {
Ok(ClientResult::ConnectionLost) => (),
Ok(ClientResult::Ok) => {
panic!("expected operation error");
}
Err(ClientError::Io(e)) => {
println!("io error: {:?}", e);
if e.kind() != io::ErrorKind::ConnectionReset
&& e.kind() != io::ErrorKind::ConnectionAborted
{
panic!("expected some disconnet error");
}
}
_ => {
panic!("unexpected error")
}
};
assert!(!spp_client.connected());
jh0.join().unwrap();
// spp_client.operation();
tcp_server = TcpServerTestbench::new(local_port);
tm_tcp_client_tx
.send(PacketAsVec::new(0, tm_buf.to_vec()))
.unwrap();
jh0 = thread::spawn(move || {
let mut stream = tcp_server
.check_for_connections(3)
.expect("no client connection detected");
let mut read_buf: [u8; 64] = [0; 64];
let read_bytes = tcp_server.try_reading_one_packet(&mut stream, 5, &mut read_buf);
if read_bytes == 0 {
panic!("did not receive expected data");
} else {
assert_eq!(&tm_buf, &read_buf[0..read_bytes]);
}
});
let result = spp_client.operation();
println!("{:?}", result);
assert!(!spp_client.connected());
assert!(result.is_ok());
jh0.join().unwrap();
}
}

View File

@ -2,18 +2,17 @@ use std::net::{SocketAddr, UdpSocket};
use std::sync::mpsc; use std::sync::mpsc;
use log::{info, warn}; use log::{info, warn};
use satrs::pus::PusTmAsVec; use satrs::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use satrs::{ use satrs::pus::HandlingStatus;
hal::std::udp_server::{ReceiveResult, UdpTcServer}, use satrs::queue::GenericSendError;
tmtc::CcsdsError, use satrs::tmtc::PacketAsVec;
};
pub trait UdpTmHandler { pub trait UdpTmHandler {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr); fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr);
} }
pub struct DynamicUdpTmHandler { pub struct DynamicUdpTmHandler {
pub tm_rx: mpsc::Receiver<PusTmAsVec>, pub tm_rx: mpsc::Receiver<PacketAsVec>,
} }
impl UdpTmHandler for DynamicUdpTmHandler { impl UdpTmHandler for DynamicUdpTmHandler {
@ -34,42 +33,39 @@ impl UdpTmHandler for DynamicUdpTmHandler {
} }
} }
pub struct UdpTmtcServer<TmHandler: UdpTmHandler, SendError> { pub struct UdpTmtcServer<TmHandler: UdpTmHandler> {
pub udp_tc_server: UdpTcServer<CcsdsError<SendError>>, pub udp_tc_server: UdpTcServer<mpsc::Sender<PacketAsVec>, GenericSendError>,
pub tm_handler: TmHandler, pub tm_handler: TmHandler,
} }
impl<TmHandler: UdpTmHandler, SendError: core::fmt::Debug + 'static> impl<TmHandler: UdpTmHandler> UdpTmtcServer<TmHandler> {
UdpTmtcServer<TmHandler, SendError>
{
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
while self.poll_tc_server() {} loop {
if self.poll_tc_server() == HandlingStatus::Empty {
break;
}
}
if let Some(recv_addr) = self.udp_tc_server.last_sender() { if let Some(recv_addr) = self.udp_tc_server.last_sender() {
self.tm_handler self.tm_handler
.send_tm_to_udp_client(&self.udp_tc_server.socket, &recv_addr); .send_tm_to_udp_client(&self.udp_tc_server.socket, &recv_addr);
} }
} }
fn poll_tc_server(&mut self) -> bool { fn poll_tc_server(&mut self) -> HandlingStatus {
match self.udp_tc_server.try_recv_tc() { match self.udp_tc_server.try_recv_tc() {
Ok(_) => true, Ok(_) => HandlingStatus::HandledOne,
Err(e) => match e { Err(e) => {
ReceiveResult::ReceiverError(e) => match e { match e {
CcsdsError::ByteConversionError(e) => { ReceiveResult::NothingReceived => (),
warn!("packet error: {e:?}"); ReceiveResult::Io(io_error) => {
true warn!("Error receiving TC from UDP server: {io_error}");
} }
CcsdsError::CustomError(e) => { ReceiveResult::Send(send_error) => {
warn!("mpsc custom error {e:?}"); warn!("error sending TM to UDP client: {send_error}");
true
} }
},
ReceiveResult::IoError(e) => {
warn!("IO error {e}");
false
} }
ReceiveResult::NothingReceived => false, HandlingStatus::Empty
}, }
} }
} }
} }
@ -79,29 +75,35 @@ mod tests {
use std::{ use std::{
collections::VecDeque, collections::VecDeque,
net::IpAddr, net::IpAddr,
sync::{Arc, Mutex}, sync::{mpsc::TryRecvError, Arc, Mutex},
}; };
use ops_sat_rs::config::{EXPERIMENT_APID, OBSW_SERVER_ADDR};
use satrs::{ use satrs::{
spacepackets::{ spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket}, ecss::{tc::PusTcCreator, WritablePusPacket},
SpHeader, SpHeader,
}, },
tmtc::ReceivesTcCore, tmtc::PacketSenderRaw,
ComponentId,
}; };
use ops_sat_rs::config::{components, OBSW_SERVER_ADDR};
use super::*; use super::*;
const UDP_SERVER_ID: ComponentId = 0x05;
#[derive(Default, Debug, Clone)] #[derive(Default, Debug, Clone)]
pub struct TestReceiver { pub struct TestReceiver {
tc_vec: Arc<Mutex<VecDeque<Vec<u8>>>>, tc_vec: Arc<Mutex<VecDeque<PacketAsVec>>>,
} }
impl ReceivesTcCore for TestReceiver { impl PacketSenderRaw for TestReceiver {
type Error = CcsdsError<()>; type Error = ();
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error> {
self.tc_vec.lock().unwrap().push_back(tc_raw.to_vec()); self.tc_vec
.lock()
.unwrap()
.push_back(PacketAsVec::new(sender_id, packet.to_vec()));
Ok(()) Ok(())
} }
} }
@ -120,26 +122,23 @@ mod tests {
#[test] #[test]
fn test_basic() { fn test_basic() {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
let test_receiver = TestReceiver::default(); let (tx, rx) = mpsc::channel();
let tc_queue = test_receiver.tc_vec.clone(); let udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, tx).unwrap();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap();
let tm_handler = TestTmHandler::default(); let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone(); let _tm_handler_calls = tm_handler.addrs_to_send_to.clone();
let mut udp_dyn_server = UdpTmtcServer { let mut udp_dyn_server = UdpTmtcServer {
udp_tc_server, udp_tc_server,
tm_handler, tm_handler,
}; };
udp_dyn_server.periodic_operation(); udp_dyn_server.periodic_operation();
assert!(tc_queue.lock().unwrap().is_empty()); matches!(rx.try_recv(), Err(TryRecvError::Empty));
assert!(tm_handler_calls.lock().unwrap().is_empty());
} }
#[test] #[test]
fn test_transactions() { fn test_transactions() {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
let test_receiver = TestReceiver::default(); let (tx, rx) = mpsc::channel();
let tc_queue = test_receiver.tc_vec.clone(); let udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, tx).unwrap();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap();
let server_addr = udp_tc_server.socket.local_addr().unwrap(); let server_addr = udp_tc_server.socket.local_addr().unwrap();
let tm_handler = TestTmHandler::default(); let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone(); let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
@ -147,7 +146,7 @@ mod tests {
udp_tc_server, udp_tc_server,
tm_handler, tm_handler,
}; };
let sph = SpHeader::new_for_unseg_tc(components::Apid::GenericPus as u16, 0, 0); let sph = SpHeader::new_for_unseg_tc(EXPERIMENT_APID, 0, 0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true) let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true)
.to_vec() .to_vec()
.unwrap(); .unwrap();
@ -157,10 +156,9 @@ mod tests {
client.send(&ping_tc).unwrap(); client.send(&ping_tc).unwrap();
udp_dyn_server.periodic_operation(); udp_dyn_server.periodic_operation();
{ {
let mut tc_queue = tc_queue.lock().unwrap(); let packet_with_sender = rx.try_recv().unwrap();
assert!(!tc_queue.is_empty()); assert_eq!(packet_with_sender.packet, ping_tc);
let received_tc = tc_queue.pop_front().unwrap(); matches!(rx.try_recv(), Err(TryRecvError::Empty));
assert_eq!(received_tc, ping_tc);
} }
{ {
@ -171,7 +169,7 @@ mod tests {
assert_eq!(received_addr, client_addr); assert_eq!(received_addr, client_addr);
} }
udp_dyn_server.periodic_operation(); udp_dyn_server.periodic_operation();
assert!(tc_queue.lock().unwrap().is_empty()); matches!(rx.try_recv(), Err(TryRecvError::Empty));
// Still tries to send to the same client. // Still tries to send to the same client.
{ {
let mut tm_handler_calls = tm_handler_calls.lock().unwrap(); let mut tm_handler_calls = tm_handler_calls.lock().unwrap();

View File

@ -3,6 +3,7 @@ use satrs::spacepackets::time::TimeWriter;
pub mod config; pub mod config;
#[derive(Debug)]
pub struct TimeStampHelper { pub struct TimeStampHelper {
stamper: CdsTime, stamper: CdsTime,
time_stamp: [u8; 7], time_stamp: [u8; 7],
@ -31,3 +32,12 @@ impl Default for TimeStampHelper {
} }
} }
} }
pub fn update_time(time_provider: &mut CdsTime, timestamp: &mut [u8]) {
time_provider
.update_from_now()
.expect("Could not get current time");
time_provider
.write_to_bytes(timestamp)
.expect("Writing timestamp failed");
}

View File

@ -1,17 +1,38 @@
use once_cell::sync::OnceCell;
use ops_sat_rs::config::LOG_FOLDER;
use std::path::{Path, PathBuf};
pub static LOGFILE_PATH: OnceCell<PathBuf> = OnceCell::new();
pub fn setup_logger() -> Result<(), fern::InitError> { pub fn setup_logger() -> Result<(), fern::InitError> {
if !Path::new(LOG_FOLDER).exists() && std::fs::create_dir_all(LOG_FOLDER).is_err() {
eprintln!("Failed to create log folder '{}'", LOG_FOLDER);
}
let mut path_buf = PathBuf::from(LOG_FOLDER);
path_buf.push(
format!(
"output_{}.log",
humantime::format_rfc3339_seconds(std::time::SystemTime::now())
)
.replace(':', "_"),
);
println!("Creating logfile {:?}", path_buf);
LOGFILE_PATH
.set(path_buf.clone())
.expect("Error setting global logfile path");
fern::Dispatch::new() fern::Dispatch::new()
.format(|out, message, record| { .format(move |out, message, record| {
out.finish(format_args!( out.finish(format_args!(
"{}[{}][{}] {}", "[{}][{}][{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"), humantime::format_rfc3339_millis(std::time::SystemTime::now()),
std::thread::current().name().expect("unnamed_thread"), std::thread::current().name().unwrap_or("unnamed_thread"),
record.level(), record.level(),
message message
)) ))
}) })
.level(log::LevelFilter::Debug) .level(log::LevelFilter::Debug)
.chain(std::io::stdout()) .chain(std::io::stdout())
.chain(fern::log_file("output.log")?) .chain(fern::log_file(path_buf.as_os_str())?)
.apply()?; .apply()?;
Ok(()) Ok(())
} }

View File

@ -1,195 +1,387 @@
use std::{ use std::{
env::temp_dir,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
sync::mpsc, sync::{atomic::AtomicBool, mpsc, Arc},
thread, thread,
time::Duration, time::Duration,
}; };
use log::info; use log::info;
use ops_sat_rs::config::tasks::FREQ_MS_PUS_STACK;
use ops_sat_rs::config::{ use ops_sat_rs::config::{
tasks::FREQ_MS_UDP_TMTC, OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT, cfg_file::create_app_config,
components::{CONTROLLER_ID, TCP_SERVER, TCP_SPP_CLIENT, UDP_SERVER},
pool::create_sched_tc_pool,
set_up_ground_dir, set_up_home_path, set_up_low_prio_ground_dir,
tasks::{FREQ_MS_CAMERA_HANDLING, FREQ_MS_CTRL, FREQ_MS_PUS_STACK, STOP_CHECK_FREQUENCY},
HOME_PATH, STOP_FILE_NAME, VALID_PACKET_ID_LIST, VERSION,
}; };
use ops_sat_rs::config::{components::CAMERA_HANDLER, tasks::FREQ_MS_EVENT_HANDLING};
use ops_sat_rs::config::{tasks::FREQ_MS_UDP_TMTC, OBSW_SERVER_ADDR, SERVER_PORT};
use ops_sat_rs::TimeStampHelper;
use satrs::{ use satrs::{
hal::std::{tcp_server::ServerConfig, udp_server::UdpTcServer}, hal::std::{tcp_server::ServerConfig, udp_server::UdpTcServer},
tmtc::CcsdsDistributor, pus::event_man::EventRequestWithToken,
}; };
use crate::pus::stack::PusStack; use crate::{controller::ControllerPathCollection, tmtc::tm_sink::TmFunnelDynamic};
use crate::pus::test::create_test_service_dynamic; use crate::{controller::ExperimentController, pus::test::create_test_service};
use crate::pus::{PusReceiver, PusTcMpscRouter};
use crate::tm_funnel::TmFunnelDynamic;
use crate::tmtc::TcSourceTaskDynamic;
use crate::{ use crate::{
ccsds::CcsdsReceiver, events::EventHandler,
interface::tcp::{SyncTcpTmSource, TcpTask}, pus::{
interface::udp::{DynamicUdpTmHandler, UdpTmtcServer}, hk::create_hk_service, mode::create_mode_service, scheduler::create_scheduler_service,
PusTcDistributor, PusTcMpscRouter,
},
};
use crate::{handlers::camera::Ims100BatchHandler, pus::event::create_event_service};
use crate::{
interface::tcp_server::{SyncTcpTmSource, TcpTask},
interface::udp_server::{DynamicUdpTmHandler, UdpTmtcServer},
logger::setup_logger, logger::setup_logger,
tmtc::PusTcSourceProviderDynamic, };
use crate::{interface::tcp_spp_client::TcpSppClientStd, tmtc::tc_source::TcSourceTaskDynamic};
use crate::{
pus::{action::create_action_service, stack::PusStack},
requests::GenericRequestRouter,
}; };
mod ccsds; mod controller;
mod events;
mod handlers;
mod interface; mod interface;
mod logger; mod logger;
mod pus; mod pus;
mod requests; mod requests;
mod tm_funnel;
mod tmtc; mod tmtc;
#[allow(dead_code)]
fn main() { fn main() {
let version_str = VERSION.unwrap_or("?");
println!("OPS-SAT Rust Experiment OBSW v{}", version_str);
setup_logger().expect("setting up logging with fern failed"); setup_logger().expect("setting up logging with fern failed");
println!("OPS-SAT Rust experiment OBSW");
let home_path = set_up_home_path();
set_up_low_prio_ground_dir(home_path.clone());
set_up_ground_dir(home_path.clone());
let app_cfg = create_app_config(home_path.clone());
info!("App Configuration: {:?}", app_cfg);
let stop_signal = Arc::new(AtomicBool::new(false));
let (tc_source_tx, tc_source_rx) = mpsc::channel(); let (tc_source_tx, tc_source_rx) = mpsc::channel();
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel(); let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (tm_server_tx, tm_server_rx) = mpsc::channel(); let (tm_tcp_server_tx, tm_tcp_server_rx) = mpsc::channel();
let (tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let tc_source = PusTcSourceProviderDynamic(tc_source_tx);
let (pus_test_tx, pus_test_rx) = mpsc::channel(); let (pus_test_tx, pus_test_rx) = mpsc::channel();
// let (pus_event_tx, pus_event_rx) = mpsc::channel(); let (pus_event_tx, pus_event_rx) = mpsc::channel();
// let (pus_sched_tx, pus_sched_rx) = mpsc::channel(); let (pus_sched_tx, pus_sched_rx) = mpsc::channel();
// let (pus_hk_tx, pus_hk_rx) = mpsc::channel(); let (pus_hk_tx, pus_hk_rx) = mpsc::channel();
// let (pus_action_tx, pus_action_rx) = mpsc::channel(); let (pus_action_tx, pus_action_rx) = mpsc::channel();
// let (pus_mode_tx, pus_mode_rx) = mpsc::channel(); let (pus_mode_tx, pus_mode_rx) = mpsc::channel();
// let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel(); // Create event handling components
// let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel(); // These sender handles are used to send event requests, for example to enable or disable
// let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel(); // certain events.
let (event_tx, event_rx) = mpsc::sync_channel(100);
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
// The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation.
let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_rx, event_request_rx);
let (pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
let (_pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel();
let (_pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel();
let (controller_composite_tx, controller_composite_rx) = mpsc::channel();
// let (controller_action_reply_tx, controller_action_reply_rx) = mpsc::channel();
let (camera_composite_tx, camera_composite_rx) = mpsc::channel();
// Some request are targetable. This map is used to retrieve sender handles based on a target ID.
let mut request_map = GenericRequestRouter::default();
request_map
.composite_router_map
.insert(CONTROLLER_ID.id(), controller_composite_tx);
request_map
.composite_router_map
.insert(CAMERA_HANDLER.id(), camera_composite_tx);
let pus_router = PusTcMpscRouter { let pus_router = PusTcMpscRouter {
test_tc_sender: pus_test_tx, test_tc_sender: pus_test_tx,
// event_tc_sender: pus_event_tx, event_tc_sender: pus_event_tx,
// sched_tc_sender: pus_sched_tx, sched_tc_sender: pus_sched_tx,
// hk_tc_sender: pus_hk_tx, hk_tc_sender: pus_hk_tx,
// action_tc_sender: pus_action_tx, action_tc_sender: pus_action_tx,
// mode_tc_sender: pus_mode_tx, mode_tc_sender: pus_mode_tx,
}; };
let pus_test_service = create_test_service_dynamic( let pus_test_service = create_test_service(tm_funnel_tx.clone(), event_tx.clone(), pus_test_rx);
let pus_scheduler_service = create_scheduler_service(
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
// event_handler.clone_event_sender(), tc_source_tx.clone(),
pus_test_rx, pus_sched_rx,
create_sched_tc_pool(),
);
let pus_event_service =
create_event_service(tm_funnel_tx.clone(), pus_event_rx, event_request_tx);
let pus_action_service = create_action_service(
tm_funnel_tx.clone(),
pus_action_rx,
request_map.clone(),
pus_action_reply_rx,
);
let pus_hk_service = create_hk_service(
tm_funnel_tx.clone(),
pus_hk_rx,
request_map.clone(),
pus_hk_reply_rx,
);
let pus_mode_service = create_mode_service(
tm_funnel_tx.clone(),
pus_mode_rx,
request_map,
pus_mode_reply_rx,
); );
// let pus_scheduler_service = create_scheduler_service_dynamic(
// tm_funnel_tx.clone(),
// tc_source.0.clone(),
// pus_sched_rx,
// create_sched_tc_pool(),
// );
//
// let pus_event_service =
// create_event_service_dynamic(tm_funnel_tx.clone(), pus_event_rx, event_request_tx);
// let pus_action_service = create_action_service_dynamic(
// tm_funnel_tx.clone(),
// pus_action_rx,
// request_map.clone(),
// pus_action_reply_rx,
// );
// let pus_hk_service = create_hk_service_dynamic(
// tm_funnel_tx.clone(),
// pus_hk_rx,
// request_map.clone(),
// pus_hk_reply_rx,
// );
// let pus_mode_service = create_mode_service_dynamic(
// tm_funnel_tx.clone(),
// pus_mode_rx,
// request_map,
// pus_mode_reply_rx,
// );
let mut pus_stack = PusStack::new( let mut pus_stack = PusStack::new(
pus_test_service, pus_test_service,
// pus_hk_service, pus_hk_service,
// pus_event_service, pus_event_service,
// pus_action_service, pus_action_service,
// pus_scheduler_service, pus_scheduler_service,
// pus_mode_service, pus_mode_service,
); );
let ccsds_receiver = CcsdsReceiver { tc_source };
let mut tmtc_task = TcSourceTaskDynamic::new( let mut tmtc_task = TcSourceTaskDynamic::new(
tc_source_rx, tc_source_rx,
PusReceiver::new(tm_funnel_tx.clone(), pus_router), PusTcDistributor::new(tm_funnel_tx.clone(), pus_router),
); );
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone()); let udp_tc_server_result =
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor)) UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source_tx.clone());
.expect("creating UDP TMTC server failed"); if udp_tc_server_result.is_err() {
let mut udp_tmtc_server = UdpTmtcServer { log::error!("UDP server creation failed");
}
let mut opt_udp_tmtc_server = None;
if let Ok(udp_tc_server) = udp_tc_server_result {
opt_udp_tmtc_server = Some(UdpTmtcServer {
udp_tc_server, udp_tc_server,
tm_handler: DynamicUdpTmHandler { tm_handler: DynamicUdpTmHandler {
tm_rx: tm_server_rx, tm_rx: tm_tcp_server_rx,
}, },
}; });
}
let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver); let tcp_server_cfg = ServerConfig::new(
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192); TCP_SERVER.id(),
sock_addr,
Duration::from_millis(400),
4096,
8192,
);
let sync_tm_tcp_source = SyncTcpTmSource::new(200); let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new( let mut tcp_server = TcpTask::new(
tcp_server_cfg, tcp_server_cfg,
sync_tm_tcp_source.clone(), sync_tm_tcp_source.clone(),
tcp_ccsds_distributor, tc_source_tx.clone(),
PACKET_ID_VALIDATOR.clone(), VALID_PACKET_ID_LIST.to_vec(),
stop_signal.clone(),
) )
.expect("tcp server creation failed"); .expect("tcp server creation failed");
let mut tm_funnel = TmFunnelDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx); let mut tm_sink = TmFunnelDynamic::new(
sync_tm_tcp_source,
tm_funnel_rx,
tm_tcp_server_tx,
tm_tcp_client_tx,
stop_signal.clone(),
);
let mut home_path_stop_file = home_path.clone();
home_path_stop_file.push(STOP_FILE_NAME);
let mut tmp_path_stop_file = temp_dir();
tmp_path_stop_file.push(STOP_FILE_NAME);
let mut controller = ExperimentController::new(
controller_composite_rx,
pus_action_reply_tx.clone(),
stop_signal.clone(),
ControllerPathCollection::new(&home_path),
);
let mut tcp_spp_client = TcpSppClientStd::new(
TCP_SPP_CLIENT.id(),
tc_source_tx,
tm_tcp_client_rx,
VALID_PACKET_ID_LIST,
STOP_CHECK_FREQUENCY,
app_cfg.tcp_spp_server_port,
)
.expect("creating TCP SPP client failed");
let timestamp_helper = TimeStampHelper::default();
// TODO: If the host feature is active, we should use an image executor
// which only displays the execution parameters and does not try
// to call the batch application which does not exist.
let mut camera_handler: Ims100BatchHandler = Ims100BatchHandler::new_with_default_img_executor(
CAMERA_HANDLER,
HOME_PATH.get().unwrap(),
camera_composite_rx,
tm_funnel_tx.clone(),
pus_action_reply_tx.clone(),
timestamp_helper,
);
// Main Task Thread Definitions
// Main Experiment Control Task
info!("Starting CTRL task");
let ctrl_stop_signal = stop_signal.clone();
let jh_ctrl_thread = thread::Builder::new()
.name("ops-sat ctrl".to_string())
.spawn(move || loop {
controller.perform_operation();
if ctrl_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(FREQ_MS_CTRL));
})
.unwrap();
// TMTC and UDP Task
info!("Starting TMTC and UDP task"); info!("Starting TMTC and UDP task");
let tmtc_stop_signal = stop_signal.clone();
let jh_udp_tmtc = thread::Builder::new() let jh_udp_tmtc = thread::Builder::new()
.name("TMTC and UDP".to_string()) .name("ops-sat tmtc-udp".to_string())
.spawn(move || { .spawn(move || {
info!("Running UDP server on port {SERVER_PORT}"); info!("Running UDP server on port {SERVER_PORT}");
loop { loop {
if let Some(ref mut udp_tmtc_server) = opt_udp_tmtc_server {
udp_tmtc_server.periodic_operation(); udp_tmtc_server.periodic_operation();
}
tmtc_task.periodic_operation(); tmtc_task.periodic_operation();
if tmtc_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(FREQ_MS_UDP_TMTC)); thread::sleep(Duration::from_millis(FREQ_MS_UDP_TMTC));
} }
}) })
.unwrap(); .unwrap();
info!("Starting TCP task"); // TCP Server Task
let jh_tcp = thread::Builder::new() let tcp_server_stop_signal = stop_signal.clone();
.name("TCP".to_string()) info!("Starting TCP server task");
let jh_tcp_server = thread::Builder::new()
.name("ops-sat tcp-server".to_string())
.spawn(move || { .spawn(move || {
info!("Running TCP server on port {SERVER_PORT}"); info!("Running TCP server on port {SERVER_PORT}");
loop { loop {
tcp_server.periodic_operation(); tcp_server.periodic_operation();
if tcp_server_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
} }
}) })
.unwrap(); .unwrap();
info!("Starting TM funnel task"); // TCP SPP Client Task
let jh_tm_funnel = thread::Builder::new() // We could also move this to the existing TCP server thread, but we would have to adapt
.name("TM Funnel".to_string()) // the server code for this so we do not block anymore and we pause manually if both the client
.spawn(move || loop { // and server are IDLE and have nothing to do..
tm_funnel.operation(); let tcp_client_stop_signal = stop_signal.clone();
info!("Starting TCP SPP client task");
let jh_tcp_client = thread::Builder::new()
.name("ops-sat tcp-client".to_string())
.spawn(move || {
info!("Running TCP SPP client");
loop {
match tcp_spp_client.operation() {
Ok(_result) => (),
Err(e) => {
log::error!("TCP SPP client error: {}", e);
}
}
if tcp_client_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
}
}) })
.unwrap(); .unwrap();
info!("Starting PUS handler thread"); // TM Funnel Task
info!("Starting TM funnel task");
let tm_sink_stop_signal = stop_signal.clone();
let jh_tm_funnel = thread::Builder::new()
.name("ops-sat tm-sink".to_string())
.spawn(move || loop {
tm_sink.operation();
if tm_sink_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
})
.unwrap();
info!("Starting event handling task");
let event_stop_signal = stop_signal.clone();
let jh_event_handling = thread::Builder::new()
.name("sat-rs events".to_string())
.spawn(move || loop {
event_handler.periodic_operation();
if event_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING));
})
.unwrap();
// PUS Handler Task
info!("Starting PUS handlers task");
let pus_stop_signal = stop_signal.clone();
let jh_pus_handler = thread::Builder::new() let jh_pus_handler = thread::Builder::new()
.name("PUS".to_string()) .name("ops-sat pus".to_string())
.spawn(move || loop { .spawn(move || loop {
pus_stack.periodic_operation(); pus_stack.periodic_operation();
if pus_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK)); thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK));
}) })
.unwrap(); .unwrap();
// Camera Handler Task
info!("Starting camera handler task");
let camera_stop_signal = stop_signal.clone();
let jh_camera_handler = thread::Builder::new()
.name("ops-sat camera".to_string())
.spawn(move || loop {
camera_handler.periodic_operation();
if camera_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(FREQ_MS_CAMERA_HANDLING));
})
.unwrap();
// Join Threads
jh_ctrl_thread
.join()
.expect("Joining Controller thread failed");
jh_udp_tmtc jh_udp_tmtc
.join() .join()
.expect("Joining UDP TMTC server thread failed"); .expect("Joining UDP TMTC server thread failed");
jh_tcp jh_tcp_server
.join() .join()
.expect("Joining TCP TMTC server thread failed"); .expect("Joining TCP TMTC server thread failed");
jh_tcp_client
.join()
.expect("Joining TCP TMTC client thread failed");
jh_tm_funnel jh_tm_funnel
.join() .join()
.expect("Joining TM Funnel thread failed"); .expect("Joining TM Funnel thread failed");
jh_pus_handler jh_pus_handler
.join() .join()
.expect("Joining PUS handler thread failed"); .expect("Joining PUS handlers thread failed");
jh_event_handling
.join()
.expect("Joining PUS handlers thread failed");
jh_camera_handler
.join()
.expect("Joining camera handler thread failed");
} }

737
src/pus/action.rs Normal file
View File

@ -0,0 +1,737 @@
use log::warn;
use ops_sat_rs::config::components::PUS_ACTION_SERVICE;
use ops_sat_rs::config::tmtc_err;
use ops_sat_rs::TimeStampHelper;
use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::pus::action::{
ActionReplyPus, ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap,
};
use satrs::pus::verification::{
handle_completion_failure_with_generic_params, handle_step_failure_with_generic_params,
FailParamHelper, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActiveRequestProvider, EcssTcAndToken, EcssTcInVecConverter, EcssTmSender, EcssTmtcError,
GenericConversionError, HandlingStatus, PusPacketHandlingError, PusReplyHandler,
PusServiceHelper, PusTcToRequestConverter, PusTmVariant,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket, PusServiceId};
use satrs::spacepackets::SpHeader;
use satrs::tmtc::PacketAsVec;
use std::sync::mpsc;
use std::time::Duration;
use crate::requests::GenericRequestRouter;
use super::{
create_verification_reporter, generic_pus_request_timeout_handler, PusTargetedRequestService,
TargetedPusService,
};
pub const DATA_REPLY: u8 = 130;
pub struct ActionReplyHandler {
fail_data_buf: [u8; 128],
}
impl Default for ActionReplyHandler {
fn default() -> Self {
Self {
fail_data_buf: [0; 128],
}
}
}
impl PusReplyHandler<ActivePusActionRequestStd, ActionReplyPus> for ActionReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<ActionReplyPus>,
_tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> {
warn!("received unexpected reply for service 8: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<ActionReplyPus>,
active_request: &ActivePusActionRequestStd,
tm_sender: &(impl EcssTmSender + ?Sized),
verification_handler: &impl VerificationReportingProvider,
timestamp: &[u8],
) -> Result<bool, Self::Error> {
let verif_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
let remove_entry = match &reply.message.variant {
ActionReplyVariant::CompletionFailed { error_code, params } => {
let error_propagated = handle_completion_failure_with_generic_params(
tm_sender,
verif_token,
verification_handler,
FailParamHelper {
error_code,
params: params.as_ref(),
timestamp,
small_data_buf: &mut self.fail_data_buf,
},
)?;
if !error_propagated {
log::warn!(
"error params for completion failure were not propated: {:?}",
params.as_ref()
);
}
true
}
ActionReplyVariant::StepFailed {
error_code,
step,
params,
} => {
let error_propagated = handle_step_failure_with_generic_params(
tm_sender,
verif_token,
verification_handler,
FailParamHelper {
error_code,
params: params.as_ref(),
timestamp,
small_data_buf: &mut self.fail_data_buf,
},
&EcssEnumU16::new(*step),
)?;
if !error_propagated {
log::warn!(
"error params for completion failure were not propated: {:?}",
params.as_ref()
);
}
true
}
ActionReplyVariant::Completed => {
verification_handler.completion_success(tm_sender, verif_token, timestamp)?;
true
}
ActionReplyVariant::StepSuccess { step } => {
verification_handler.step_success(
tm_sender,
&verif_token,
timestamp,
EcssEnumU16::new(*step),
)?;
false
}
_ => false,
};
Ok(remove_entry)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusActionRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"action",
)
}
}
#[derive(Default)]
pub struct ActionRequestConverter {}
impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for ActionRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> {
let subservice = tc.subservice();
let user_data = tc.user_data();
if user_data.len() < 8 {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
)
.expect("Sending start failure failed");
return Err(GenericConversionError::NotEnoughAppData {
expected: 8,
found: user_data.len(),
});
}
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap());
if subservice == 128 {
let req_variant = if user_data.len() == 8 {
ActionRequestVariant::NoData
} else {
ActionRequestVariant::VecData(user_data[8..].to_vec())
};
Ok((
ActivePusActionRequestStd::new(
action_id,
target_id_and_apid.into(),
token.into(),
Duration::from_secs(30),
),
ActionRequest::new(action_id, req_variant),
))
} else {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
)
.expect("Sending start failure failed");
Err(GenericConversionError::InvalidSubservice(subservice))
}
}
}
pub fn create_action_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
) -> ActionServiceWrapper {
let action_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_ACTION_SERVICE.id(),
pus_action_rx,
tm_funnel_tx,
create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid, 2048),
EcssTcInVecConverter::default(),
),
ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
action_router,
reply_receiver,
);
ActionServiceWrapper {
service: action_request_handler,
}
}
pub struct ActionServiceWrapper {
pub(crate) service: PusTargetedRequestService<
VerificationReporter,
ActionRequestConverter,
ActionReplyHandler,
DefaultActiveActionRequestMap,
ActivePusActionRequestStd,
ActionRequest,
ActionReplyPus,
>,
}
impl TargetedPusService for ActionServiceWrapper {
const SERVICE_ID: u8 = PusServiceId::Action as u8;
const SERVICE_STR: &'static str = "action";
delegate::delegate! {
to self.service {
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
}
}
pub fn send_data_reply<TmSender: EcssTmSender>(
apid_target: UniqueApidTargetId,
reply_data: &Vec<u8>,
stamp_helper: &TimeStampHelper,
tm_sender: &TmSender,
) -> Result<(), EcssTmtcError> {
let sp_header = SpHeader::new_from_apid(apid_target.apid);
let sec_header = PusTmSecondaryHeader::new(8, DATA_REPLY, 0, 0, stamp_helper.stamp());
let mut data = Vec::new();
data.extend(apid_target.apid.to_be_bytes());
data.extend(apid_target.unique_id.to_be_bytes());
data.extend(reply_data);
log::trace!(
"PUS action reply: {}",
String::from_utf8(data.clone()[6..].to_vec()).expect("Error decoding data reply.")
);
let data_reply_tm = PusTmCreator::new(sp_header, sec_header, &data, true);
tm_sender.send_tm(apid_target.id(), PusTmVariant::Direct(data_reply_tm))
}
#[cfg(test)]
mod tests {
use satrs::pus::test_util::{
TEST_APID, TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::pus::verification;
use satrs::pus::verification::test_util::TestVerificationReporter;
use satrs::request::MessageMetadata;
use satrs::ComponentId;
use satrs::{
res_code::ResultU16,
spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader,
WritablePusPacket,
},
SpHeader,
},
};
use crate::{
pus::tests::{PusConverterTestbench, ReplyHandlerTestbench, TargetedPusRequestTestbench},
requests::CompositeRequest,
};
use super::*;
impl
TargetedPusRequestTestbench<
ActionRequestConverter,
ActionReplyHandler,
DefaultActiveActionRequestMap,
ActivePusActionRequestStd,
ActionRequest,
ActionReplyPus,
>
{
pub fn new_for_action(owner_id: ComponentId, target_id: ComponentId) -> Self {
let _ = env_logger::builder().is_test(true).try_init();
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (pus_action_tx, pus_action_rx) = mpsc::channel();
let (action_reply_tx, action_reply_rx) = mpsc::channel();
let (action_req_tx, action_req_rx) = mpsc::channel();
let verif_reporter = TestVerificationReporter::new(owner_id);
let mut generic_req_router = GenericRequestRouter::default();
generic_req_router
.composite_router_map
.insert(target_id, action_req_tx);
Self {
service: PusTargetedRequestService::new(
PusServiceHelper::new(
owner_id,
pus_action_rx,
tm_funnel_tx.clone(),
verif_reporter,
EcssTcInVecConverter::default(),
),
ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
generic_req_router,
action_reply_rx,
),
request_id: None,
pus_packet_tx: pus_action_tx,
tm_funnel_rx,
reply_tx: action_reply_tx,
request_rx: action_req_rx,
}
}
pub fn verify_packet_started(&self) {
self.service
.service_helper
.common
.verif_reporter
.check_next_is_started_success(
self.service.service_helper.id(),
self.request_id.expect("request ID not set").into(),
);
}
pub fn verify_packet_completed(&self) {
self.service
.service_helper
.common
.verif_reporter
.check_next_is_completion_success(
self.service.service_helper.id(),
self.request_id.expect("request ID not set").into(),
);
}
pub fn verify_tm_empty(&self) {
let packet = self.tm_funnel_rx.try_recv();
if let Err(mpsc::TryRecvError::Empty) = packet {
} else {
let tm = packet.unwrap();
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0;
panic!("unexpected TM packet {unexpected_tm:?}");
}
}
pub fn verify_next_tc_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
panic!("unexpected error {:?}", e);
}
let result = result.unwrap();
match result {
HandlingStatus::HandledOne => (),
_ => panic!("unexpected result {result:?}"),
}
}
pub fn verify_all_tcs_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
panic!("unexpected error {:?}", e);
}
let result = result.unwrap();
match result {
HandlingStatus::Empty => (),
_ => panic!("unexpected result {result:?}"),
}
}
pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::HandledOne);
}
pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::Empty);
}
pub fn add_tc(&mut self, tc: &PusTcCreator) {
self.request_id = Some(verification::RequestId::new(tc).into());
let token = self.service.service_helper.verif_reporter_mut().add_tc(tc);
let accepted_token = self
.service
.service_helper
.verif_reporter()
.acceptance_success(self.service.service_helper.tm_sender(), token, &[0; 7])
.expect("TC acceptance failed");
self.service
.service_helper
.verif_reporter()
.check_next_was_added(accepted_token.request_id());
let id = self.service.service_helper.id();
self.service
.service_helper
.verif_reporter()
.check_next_is_acceptance_success(id, accepted_token.request_id());
self.pus_packet_tx
.send(EcssTcAndToken::new(
PacketAsVec::new(self.service.service_helper.id(), tc.to_vec().unwrap()),
accepted_token,
))
.unwrap();
}
}
#[test]
fn basic_request() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding.
let sp_header = SpHeader::new_from_apid(TEST_APID);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(sp_header, sec_header, &app_data, true);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
testbench.verify_next_tc_is_handled_properly(&time_stamp);
testbench.verify_all_tcs_handled(&time_stamp);
testbench.verify_packet_started();
let possible_req = testbench.request_rx.try_recv();
assert!(possible_req.is_ok());
let req = possible_req.unwrap();
if let CompositeRequest::Action(action_req) = req.message {
assert_eq!(action_req.action_id, action_id);
assert_eq!(action_req.variant, ActionRequestVariant::NoData);
let action_reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
testbench
.reply_tx
.send(GenericMessage::new(req.requestor_info, action_reply))
.unwrap();
} else {
panic!("unexpected request type");
}
testbench.verify_next_reply_is_handled_properly(&time_stamp);
testbench.verify_all_replies_handled(&time_stamp);
testbench.verify_packet_completed();
testbench.verify_tm_empty();
}
#[test]
fn basic_request_routing_error() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding.
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&0_u32.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
let result = testbench.service.poll_and_handle_next_tc(&time_stamp);
assert!(result.is_err());
// Verify the correct result and completion failure.
}
#[test]
fn converter_action_req_no_data() {
let mut testbench = PusConverterTestbench::new(
TEST_COMPONENT_ID_0.raw(),
ActionRequestConverter::default(),
);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok());
let (active_req, request) = result.unwrap();
if let ActionRequestVariant::NoData = request.variant {
assert_eq!(request.action_id, action_id);
assert_eq!(active_req.action_id, action_id);
assert_eq!(
active_req.target_id(),
UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_0).raw()
);
assert_eq!(
active_req.token().request_id(),
testbench.request_id().unwrap()
);
} else {
panic!("unexpected action request variant");
}
}
#[test]
fn converter_action_req_with_data() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ActionRequestConverter::default());
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 16] = [0; 16];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
for i in 0..8 {
app_data[i + 8] = i as u8;
}
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok());
let (active_req, request) = result.unwrap();
if let ActionRequestVariant::VecData(vec) = request.variant {
assert_eq!(request.action_id, action_id);
assert_eq!(active_req.action_id, action_id);
assert_eq!(vec, app_data[8..].to_vec());
} else {
panic!("unexpected action request variant");
}
}
#[test]
fn reply_handling_completion_success() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.assert_full_completion_success(
TEST_COMPONENT_ID_0.id(),
req_id,
None,
);
}
#[test]
fn reply_handling_completion_failure() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3);
let reply = ActionReplyPus::new(
action_id,
ActionReplyVariant::CompletionFailed {
error_code,
params: None,
},
);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.into(),
req_id,
None,
error_code.raw() as u64,
);
}
#[test]
fn reply_handling_step_success() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = ActionReplyPus::new(action_id, ActionReplyVariant::StepSuccess { step: 1 });
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
// Entry should not be removed, completion not done yet.
assert!(!result.unwrap());
testbench.verif_reporter.check_next_was_added(req_id);
testbench
.verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench
.verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench
.verif_reporter
.check_next_is_step_success(TEST_COMPONENT_ID_0.raw(), req_id, 1);
}
#[test]
fn reply_handling_step_failure() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3);
let reply = ActionReplyPus::new(
action_id,
ActionReplyVariant::StepFailed {
error_code,
step: 1,
params: None,
},
);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.check_next_was_added(req_id);
testbench
.verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench
.verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench.verif_reporter.check_next_is_step_failure(
TEST_COMPONENT_ID_0.id(),
req_id,
error_code.raw().into(),
);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_reply = ActionReplyPus::new(5_u32, ActionReplyVariant::Completed);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout(
&ActivePusActionRequestStd::new_from_common_req(action_id, active_request),
&[],
);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

93
src/pus/event.rs Normal file
View File

@ -0,0 +1,93 @@
use std::sync::mpsc;
use super::{DirectPusService, HandlingStatus};
use crate::pus::create_verification_reporter;
use ops_sat_rs::config::components::PUS_EVENT_MANAGEMENT;
use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::event_srv::PusEventServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInVecConverter, MpscTcReceiver,
PartialPusHandlingError, PusServiceHelper,
};
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::PacketAsVec;
pub fn create_event_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> EventServiceWrapper {
let pus_5_handler = PusEventServiceHandler::new(
PusServiceHelper::new(
PUS_EVENT_MANAGEMENT.id(),
pus_event_rx,
tm_funnel_tx,
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid, 16),
EcssTcInVecConverter::default(),
),
event_request_tx,
);
EventServiceWrapper {
handler: pus_5_handler,
}
}
pub struct EventServiceWrapper {
pub handler: PusEventServiceHandler<
MpscTcReceiver,
mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter,
VerificationReporter,
>,
}
impl DirectPusService for EventServiceWrapper {
const SERVICE_ID: u8 = PusServiceId::Event as u8;
const SERVICE_STR: &'static str = "events";
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let result = self
.handler
.poll_and_handle_next_tc(error_handler, time_stamp);
if let Err(e) = result {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops.
return HandlingStatus::Empty;
}
match result.unwrap() {
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
}
HandlingStatus::HandledOne
}
}

513
src/pus/hk.rs Normal file
View File

@ -0,0 +1,513 @@
use derive_new::new;
use ops_sat_rs::config::components::PUS_HK_SERVICE;
use ops_sat_rs::config::{hk_err, tmtc_err};
use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId};
use satrs::pus::verification::{
FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError,
PusPacketHandlingError, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{hk, PusPacket};
use satrs::tmtc::PacketAsVec;
use std::sync::mpsc;
use std::time::Duration;
use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler};
use crate::requests::GenericRequestRouter;
use super::{HandlingStatus, PusTargetedRequestService, TargetedPusService};
#[derive(Clone, PartialEq, Debug, new)]
pub struct HkReply {
pub unique_id: UniqueId,
pub variant: HkReplyVariant,
}
#[derive(Clone, PartialEq, Debug)]
#[allow(dead_code)]
pub enum HkReplyVariant {
Ack,
}
#[derive(Default)]
pub struct HkReplyHandler {}
impl PusReplyHandler<ActivePusRequestStd, HkReply> for HkReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<HkReply>,
_tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for service 3: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<HkReply>,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
match reply.message.variant {
HkReplyVariant::Ack => {
verification_handler
.completion_success(tm_sender, started_token, time_stamp)
.expect("sending completion success verification failed");
}
};
Ok(true)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"HK",
)?;
Ok(())
}
}
pub struct HkRequestConverter {
timeout: Duration,
}
impl Default for HkRequestConverter {
fn default() -> Self {
Self {
timeout: Duration::from_secs(60),
}
}
}
impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, HkRequest), Self::Error> {
let user_data = tc.user_data();
if user_data.is_empty() {
let user_data_len = user_data.len() as u32;
let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(
time_stamp,
&tmtc_err::NOT_ENOUGH_APP_DATA,
&user_data_len_raw,
),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::NotEnoughAppData {
expected: 4,
found: 0,
});
}
if user_data.len() < 8 {
let err = if user_data.len() < 4 {
&hk_err::TARGET_ID_MISSING
} else {
&hk_err::UNIQUE_ID_MISSING
};
let user_data_len = user_data.len() as u32;
let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(time_stamp, err, &user_data_len_raw),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::NotEnoughAppData {
expected: 8,
found: 4,
});
}
let subservice = tc.subservice();
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).expect("invalid tc format");
let unique_id = u32::from_be_bytes(tc.user_data()[4..8].try_into().unwrap());
let standard_subservice = hk::Subservice::try_from(subservice);
if standard_subservice.is_err() {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::InvalidSubservice(subservice));
}
let request = match standard_subservice.unwrap() {
hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => {
HkRequest::new(unique_id, HkRequestVariant::EnablePeriodic)
}
hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => {
HkRequest::new(unique_id, HkRequestVariant::DisablePeriodic)
}
hk::Subservice::TcReportHkReportStructures => todo!(),
hk::Subservice::TmHkPacket => todo!(),
hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => {
HkRequest::new(unique_id, HkRequestVariant::OneShot)
}
hk::Subservice::TcModifyDiagCollectionInterval
| hk::Subservice::TcModifyHkCollectionInterval => {
if user_data.len() < 12 {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(
time_stamp,
&tmtc_err::NOT_ENOUGH_APP_DATA,
),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::NotEnoughAppData {
expected: 12,
found: user_data.len(),
});
}
HkRequest::new(
unique_id,
HkRequestVariant::ModifyCollectionInterval(
CollectionIntervalFactor::from_be_bytes(
user_data[8..12].try_into().unwrap(),
),
),
)
}
_ => {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(
time_stamp,
&tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED,
&[subservice],
),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::InvalidSubservice(subservice));
}
};
Ok((
ActivePusRequestStd::new(target_id_and_apid.into(), token, self.timeout),
request,
))
}
}
pub fn create_hk_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
) -> HkServiceWrapper {
let pus_3_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_HK_SERVICE.id(),
pus_hk_rx,
tm_funnel_tx,
create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid, 16),
EcssTcInVecConverter::default(),
),
HkRequestConverter::default(),
DefaultActiveRequestMap::default(),
HkReplyHandler::default(),
request_router,
reply_receiver,
);
HkServiceWrapper {
service: pus_3_handler,
}
}
pub struct HkServiceWrapper {
pub(crate) service: PusTargetedRequestService<
VerificationReporter,
HkRequestConverter,
HkReplyHandler,
DefaultActiveRequestMap<ActivePusRequestStd>,
ActivePusRequestStd,
HkRequest,
HkReply,
>,
}
impl TargetedPusService for HkServiceWrapper {
const SERVICE_ID: u8 = 3;
const SERVICE_STR: &'static str = "housekeeping";
delegate::delegate! {
to self.service {
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
}
}
#[cfg(test)]
mod tests {
use ops_sat_rs::config::tmtc_err;
use satrs::pus::test_util::{
TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::request::MessageMetadata;
use satrs::{
hk::HkRequestVariant,
pus::test_util::TEST_APID,
request::GenericMessage,
spacepackets::{
ecss::{hk::Subservice, tc::PusTcCreator},
SpHeader,
},
};
use crate::pus::{
hk::HkReplyVariant,
tests::{PusConverterTestbench, ReplyHandlerTestbench},
};
use super::{HkReply, HkReplyHandler, HkRequestConverter};
#[test]
fn hk_converter_one_shot_req() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let hk_req = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcGenerateOneShotHk as u8,
&app_data,
true,
);
let accepted_token = hk_bench.add_tc(&hk_req);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::OneShot = req.variant {
} else {
panic!("unexpected HK request")
}
}
#[test]
fn hk_converter_enable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::EnablePeriodic = req.variant {
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableHkGeneration as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableDiagGeneration as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_conversion_disable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::DisablePeriodic = req.variant {
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableHkGeneration as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableDiagGeneration as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_conversion_modify_interval() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 12] = [0; 12];
let collection_interval_factor = 5_u32;
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
app_data[8..12].copy_from_slice(&collection_interval_factor.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::ModifyCollectionInterval(interval_factor) = req.variant {
assert_eq!(interval_factor, collection_interval_factor);
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyHkCollectionInterval as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyDiagCollectionInterval as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_reply_handler() {
let mut reply_testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), HkReplyHandler::default());
let sender_id = 2_u64;
let apid_target_id = 3_u32;
let unique_id = 5_u32;
let (req_id, active_req) = reply_testbench.add_tc(TEST_APID, apid_target_id, &[]);
let reply = GenericMessage::new(
MessageMetadata::new(req_id.into(), sender_id),
HkReply::new(unique_id, HkReplyVariant::Ack),
);
let result = reply_testbench.handle_reply(&reply, &active_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
reply_testbench
.verif_reporter
.assert_full_completion_success(TEST_COMPONENT_ID_0.raw(), req_id, None);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_1, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_1.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

View File

@ -1,8 +1,12 @@
pub mod action;
pub mod event;
pub mod hk;
pub mod mode;
pub mod scheduler;
pub mod stack; pub mod stack;
pub mod test; pub mod test;
use crate::requests::GenericRequestRouter; use crate::requests::GenericRequestRouter;
use crate::tmtc::MpscStoreAndSendError;
use log::warn; use log::warn;
use ops_sat_rs::config::components::PUS_ROUTING_SERVICE; use ops_sat_rs::config::components::PUS_ROUTING_SERVICE;
use ops_sat_rs::config::{tmtc_err, CustomPusServiceId}; use ops_sat_rs::config::{tmtc_err, CustomPusServiceId};
@ -13,35 +17,25 @@ use satrs::pus::verification::{
}; };
use satrs::pus::{ use satrs::pus::{
ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter,
EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, GenericConversionError, EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError,
GenericRoutingError, PusPacketHandlerResult, PusPacketHandlingError, PusReplyHandler, HandlingStatus, MpscTcReceiver, MpscTmAsVecSender, PusPacketHandlingError, PusReplyHandler,
PusRequestRouter, PusServiceHelper, PusTcToRequestConverter, TcInMemory, PusRequestRouter, PusServiceHelper, PusTcToRequestConverter, TcInMemory,
}; };
use satrs::queue::GenericReceiveError; use satrs::queue::{GenericReceiveError, GenericSendError};
use satrs::request::{Apid, GenericMessage, MessageMetadata}; use satrs::request::{Apid, GenericMessage, MessageMetadata};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusServiceId; use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs::tmtc::PacketAsVec;
use satrs::ComponentId; use satrs::ComponentId;
use std::fmt::Debug; use std::fmt::Debug;
use std::sync::mpsc::{self, Sender}; use std::sync::mpsc::{self, Sender};
// pub mod action; pub fn create_verification_reporter(
// pub mod event; owner_id: ComponentId,
// pub mod hk; apid: Apid,
// pub mod mode; max_fail_data_len: usize,
// pub mod scheduler; ) -> VerificationReporter {
// pub mod stack; let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, max_fail_data_len).unwrap();
// pub mod test;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[allow(dead_code)]
pub enum HandlingStatus {
Empty,
HandledOne,
}
pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> VerificationReporter {
let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap();
// Every software component which needs to generate verification telemetry, gets a cloned // Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter. // verification reporter.
VerificationReporter::new(owner_id, &verif_cfg) VerificationReporter::new(owner_id, &verif_cfg)
@ -50,14 +44,14 @@ pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> Verifi
/// Simple router structure which forwards PUS telecommands to dedicated handlers. /// Simple router structure which forwards PUS telecommands to dedicated handlers.
pub struct PusTcMpscRouter { pub struct PusTcMpscRouter {
pub test_tc_sender: Sender<EcssTcAndToken>, pub test_tc_sender: Sender<EcssTcAndToken>,
// pub event_tc_sender: Sender<EcssTcAndToken>, pub event_tc_sender: Sender<EcssTcAndToken>,
// pub sched_tc_sender: Sender<EcssTcAndToken>, pub sched_tc_sender: Sender<EcssTcAndToken>,
// pub hk_tc_sender: Sender<EcssTcAndToken>, pub hk_tc_sender: Sender<EcssTcAndToken>,
// pub action_tc_sender: Sender<EcssTcAndToken>, pub action_tc_sender: Sender<EcssTcAndToken>,
// pub mode_tc_sender: Sender<EcssTcAndToken>, pub mode_tc_sender: Sender<EcssTcAndToken>,
} }
pub struct PusReceiver<TmSender: EcssTmSenderCore> { pub struct PusTcDistributor<TmSender: EcssTmSender> {
pub id: ComponentId, pub id: ComponentId,
pub tm_sender: TmSender, pub tm_sender: TmSender,
pub verif_reporter: VerificationReporter, pub verif_reporter: VerificationReporter,
@ -65,7 +59,7 @@ pub struct PusReceiver<TmSender: EcssTmSenderCore> {
stamp_helper: TimeStampHelper, stamp_helper: TimeStampHelper,
} }
impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> { impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self { pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self {
Self { Self {
id: PUS_ROUTING_SERVICE.raw(), id: PUS_ROUTING_SERVICE.raw(),
@ -73,6 +67,7 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
verif_reporter: create_verification_reporter( verif_reporter: create_verification_reporter(
PUS_ROUTING_SERVICE.id(), PUS_ROUTING_SERVICE.id(),
PUS_ROUTING_SERVICE.apid, PUS_ROUTING_SERVICE.apid,
16,
), ),
pus_router, pus_router,
stamp_helper: TimeStampHelper::default(), stamp_helper: TimeStampHelper::default(),
@ -81,39 +76,48 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
pub fn handle_tc_packet( pub fn handle_tc_packet(
&mut self, &mut self,
tc_in_memory: TcInMemory, sender_id: ComponentId,
service: u8, tc: Vec<u8>,
pus_tc: &PusTcReader, ) -> Result<HandlingStatus, GenericSendError> {
) -> Result<PusPacketHandlerResult, MpscStoreAndSendError> { let pus_tc_result = PusTcReader::new(&tc);
let init_token = self.verif_reporter.add_tc(pus_tc); if pus_tc_result.is_err() {
log::warn!(
"error creating PUS TC received from {}: {}",
sender_id,
pus_tc_result.unwrap_err()
);
log::warn!("raw data: {:x?}", tc);
return Ok(HandlingStatus::HandledOne);
}
let pus_tc = pus_tc_result.unwrap().0;
let init_token = self.verif_reporter.add_tc(&pus_tc);
self.stamp_helper.update_from_now(); self.stamp_helper.update_from_now();
let accepted_token = self let accepted_token = self
.verif_reporter .verif_reporter
.acceptance_success(&self.tm_sender, init_token, self.stamp_helper.stamp()) .acceptance_success(&self.tm_sender, init_token, self.stamp_helper.stamp())
.expect("Acceptance success failure"); .expect("Acceptance success failure");
let service = PusServiceId::try_from(service); let service = PusServiceId::try_from(pus_tc.service());
let tc_in_memory = TcInMemory::Vec(PacketAsVec::new(sender_id, tc));
match service { match service {
Ok(standard_service) => match standard_service { Ok(standard_service) => match standard_service {
PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken { PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken {
tc_in_memory, tc_in_memory,
token: Some(accepted_token.into()), token: Some(accepted_token.into()),
})?, })?,
// PusServiceId::Housekeeping => { PusServiceId::Action => self.pus_router.action_tc_sender.send(EcssTcAndToken {
// self.pus_router.hk_tc_sender.send(EcssTcAndToken { tc_in_memory,
// tc_in_memory, token: Some(accepted_token.into()),
// token: Some(accepted_token.into()), })?,
// })? PusServiceId::Event => self.pus_router.event_tc_sender.send(EcssTcAndToken {
// } tc_in_memory,
// PusServiceId::Event => self.pus_router.event_tc_sender.send(EcssTcAndToken { token: Some(accepted_token.into()),
// tc_in_memory, })?,
// token: Some(accepted_token.into()), PusServiceId::Scheduling => {
// })?, self.pus_router.sched_tc_sender.send(EcssTcAndToken {
// PusServiceId::Scheduling => { tc_in_memory,
// self.pus_router.sched_tc_sender.send(EcssTcAndToken { token: Some(accepted_token.into()),
// tc_in_memory, })?
// token: Some(accepted_token.into()), }
// })?
// }
_ => { _ => {
let result = self.verif_reporter.start_failure( let result = self.verif_reporter.start_failure(
&self.tm_sender, &self.tm_sender,
@ -133,10 +137,10 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) { if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) {
match custom_service { match custom_service {
CustomPusServiceId::Mode => { CustomPusServiceId::Mode => {
// self.pus_router.mode_tc_sender.send(EcssTcAndToken { self.pus_router.mode_tc_sender.send(EcssTcAndToken {
// tc_in_memory, tc_in_memory,
// token: Some(accepted_token.into()), token: Some(accepted_token.into()),
// })? })?
} }
CustomPusServiceId::Health => {} CustomPusServiceId::Health => {}
} }
@ -155,18 +159,66 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
} }
} }
} }
Ok(PusPacketHandlerResult::RequestHandled) Ok(HandlingStatus::HandledOne)
} }
} }
pub trait TargetedPusService { pub trait TargetedPusService {
/// Returns [true] interface the packet handling is finished. const SERVICE_ID: u8;
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool; const SERVICE_STR: &'static str;
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus;
fn poll_and_handle_next_tc_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let result = self.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
log::error!(
"PUS service {}({}) packet handling error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on error cases.
return HandlingStatus::Empty;
}
result.unwrap()
}
fn poll_and_handle_next_reply_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.poll_and_handle_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!(
"PUS service {}({}): handling reply failed with error {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
HandlingStatus::Empty
})
}
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self); fn check_for_request_timeouts(&mut self);
} }
/// This is a generic handler class for all PUS services where a PUS telecommand is converted /// Generic trait for services which handle packets directly. Kept minimal right now because
/// of the difficulty to allow flexible user code for these services..
pub trait DirectPusService {
const SERVICE_ID: u8;
const SERVICE_STR: &'static str;
fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus;
}
/// This is a generic handlers class for all PUS services where a PUS telecommand is converted
/// to a targeted request. /// to a targeted request.
/// ///
/// The generic steps for this process are the following /// The generic steps for this process are the following
@ -176,21 +228,18 @@ pub trait TargetedPusService {
/// 3. Convert the PUS TC to a typed request using the [PusTcToRequestConverter]. /// 3. Convert the PUS TC to a typed request using the [PusTcToRequestConverter].
/// 4. Route the requests using the [GenericRequestRouter]. /// 4. Route the requests using the [GenericRequestRouter].
/// 5. Add the request to the active request map using the [ActiveRequestMapProvider] abstraction. /// 5. Add the request to the active request map using the [ActiveRequestMapProvider] abstraction.
/// 6. Check for replies which complete the forwarded request. The handler takes care of /// 6. Check for replies which complete the forwarded request. The handlers takes care of
/// the verification process. /// the verification process.
/// 7. Check for timeouts of active requests. Generally, the timeout on the service level should /// 7. Check for timeouts of active requests. Generally, the timeout on the service level should
/// be highest expected timeout for the given target. /// be highest expected timeout for the given target.
/// ///
/// The handler exposes the following API: /// The handlers exposes the following API:
/// ///
/// 1. [Self::handle_one_tc] which tries to poll and handle one TC packet, covering steps 1-5. /// 1. [Self::handle_one_tc] which tries to poll and handle one TC packet, covering steps 1-5.
/// 2. [Self::check_one_reply] which tries to poll and handle one reply, covering step 6. /// 2. [Self::check_one_reply] which tries to poll and handle one reply, covering step 6.
/// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7. /// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7.
#[allow(dead_code)] #[allow(dead_code)]
pub struct PusTargetedRequestService< pub struct PusTargetedRequestService<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>, RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>, ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
@ -199,8 +248,12 @@ pub struct PusTargetedRequestService<
RequestType, RequestType,
ReplyType, ReplyType,
> { > {
pub service_helper: pub service_helper: PusServiceHelper<
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>, MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
VerificationReporter,
>,
pub request_router: GenericRequestRouter, pub request_router: GenericRequestRouter,
pub request_converter: RequestConverter, pub request_converter: RequestConverter,
pub active_request_map: ActiveRequestMap, pub active_request_map: ActiveRequestMap,
@ -209,11 +262,7 @@ pub struct PusTargetedRequestService<
phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>, phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>,
} }
#[allow(dead_code)]
impl< impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>, RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>, ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
@ -223,9 +272,6 @@ impl<
ReplyType, ReplyType,
> >
PusTargetedRequestService< PusTargetedRequestService<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter, VerificationReporter,
RequestConverter, RequestConverter,
ReplyHandler, ReplyHandler,
@ -239,9 +285,9 @@ where
{ {
pub fn new( pub fn new(
service_helper: PusServiceHelper< service_helper: PusServiceHelper<
TcReceiver, MpscTcReceiver,
TmSender, MpscTmAsVecSender,
TcInMemConverter, EcssTcInVecConverter,
VerificationReporter, VerificationReporter,
>, >,
request_converter: RequestConverter, request_converter: RequestConverter,
@ -264,10 +310,10 @@ where
pub fn poll_and_handle_next_tc( pub fn poll_and_handle_next_tc(
&mut self, &mut self,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { ) -> Result<HandlingStatus, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() { if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty); return Ok(HandlingStatus::Empty);
} }
let ecss_tc_and_token = possible_packet.unwrap(); let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper self.service_helper
@ -323,7 +369,7 @@ where
return Err(e.into()); return Err(e.into());
} }
} }
Ok(PusPacketHandlerResult::RequestHandled) Ok(HandlingStatus::HandledOne)
} }
fn handle_conversion_to_request_error( fn handle_conversion_to_request_error(
@ -376,7 +422,7 @@ where
} }
} }
pub fn poll_and_check_next_reply( pub fn poll_and_handle_next_reply(
&mut self, &mut self,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError> { ) -> Result<HandlingStatus, EcssTmtcError> {
@ -406,20 +452,17 @@ where
return Ok(()); return Ok(());
} }
let active_request = active_req_opt.unwrap(); let active_request = active_req_opt.unwrap();
let request_finished = self let result = self.reply_handler.handle_reply(
.reply_handler
.handle_reply(
reply, reply,
active_request, active_request,
&self.service_helper.common.tm_sender, &self.service_helper.common.tm_sender,
&self.service_helper.common.verif_reporter, &self.service_helper.common.verif_reporter,
time_stamp, time_stamp,
) );
.unwrap_or(false); if result.is_err() || (result.is_ok() && *result.as_ref().unwrap()) {
if request_finished {
self.active_request_map.remove(reply.request_id()); self.active_request_map.remove(reply.request_id());
} }
Ok(()) result.map(|_| ())
} }
pub fn check_for_request_timeouts(&mut self) { pub fn check_for_request_timeouts(&mut self) {
@ -442,7 +485,7 @@ where
/// and also log the error. /// and also log the error.
#[allow(dead_code)] #[allow(dead_code)]
pub fn generic_pus_request_timeout_handler( pub fn generic_pus_request_timeout_handler(
sender: &(impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSender + ?Sized),
active_request: &(impl ActiveRequestProvider + Debug), active_request: &(impl ActiveRequestProvider + Debug),
verification_handler: &impl VerificationReportingProvider, verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
@ -466,12 +509,13 @@ pub(crate) mod tests {
use std::time::Duration; use std::time::Duration;
use satrs::pus::test_util::TEST_COMPONENT_ID_0; use satrs::pus::test_util::TEST_COMPONENT_ID_0;
use satrs::pus::{MpscTmAsVecSender, PusTmAsVec, PusTmVariant}; use satrs::pus::{MpscTmAsVecSender, PusTmVariant};
use satrs::request::RequestId; use satrs::request::RequestId;
use satrs::tmtc::PacketAsVec;
use satrs::{ use satrs::{
pus::{ pus::{
verification::test_util::TestVerificationReporter, ActivePusRequestStd, verification::test_util::TestVerificationReporter, ActivePusRequestStd,
ActiveRequestMapProvider, EcssTcInVecConverter, MpscTcReceiver, ActiveRequestMapProvider,
}, },
request::UniqueApidTargetId, request::UniqueApidTargetId,
spacepackets::{ spacepackets::{
@ -496,7 +540,7 @@ pub(crate) mod tests {
pub id: ComponentId, pub id: ComponentId,
pub verif_reporter: TestVerificationReporter, pub verif_reporter: TestVerificationReporter,
pub reply_handler: ReplyHandler, pub reply_handler: ReplyHandler,
pub tm_receiver: mpsc::Receiver<PusTmAsVec>, pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub default_timeout: Duration, pub default_timeout: Duration,
tm_sender: MpscTmAsVecSender, tm_sender: MpscTmAsVecSender,
phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>, phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>,
@ -596,7 +640,7 @@ pub(crate) mod tests {
/// Dummy sender component which does nothing on the [Self::send_tm] call. /// Dummy sender component which does nothing on the [Self::send_tm] call.
/// ///
/// Useful for unit tests. /// Useful for unit tests.
impl EcssTmSenderCore for DummySender { impl EcssTmSender for DummySender {
fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> { fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> {
Ok(()) Ok(())
} }
@ -691,9 +735,6 @@ pub(crate) mod tests {
ReplyType, ReplyType,
> { > {
pub service: PusTargetedRequestService< pub service: PusTargetedRequestService<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
TestVerificationReporter, TestVerificationReporter,
RequestConverter, RequestConverter,
ReplyHandler, ReplyHandler,
@ -703,7 +744,7 @@ pub(crate) mod tests {
ReplyType, ReplyType,
>, >,
pub request_id: Option<RequestId>, pub request_id: Option<RequestId>,
pub tm_funnel_rx: mpsc::Receiver<PusTmAsVec>, pub tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>, pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>,
pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>, pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>,
pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>, pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,

384
src/pus/mode.rs Normal file
View File

@ -0,0 +1,384 @@
use derive_new::new;
use satrs::tmtc::PacketAsVec;
use std::sync::mpsc;
use std::time::Duration;
use crate::requests::GenericRequestRouter;
use ops_sat_rs::config::components::PUS_MODE_SERVICE;
use ops_sat_rs::config::{mode_err, tmtc_err, CustomPusServiceId};
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInVecConverter, PusPacketHandlingError,
PusServiceHelper,
};
use satrs::request::GenericMessage;
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::{
mode::Subservice,
verification::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider,
VerificationToken,
},
ActivePusRequestStd, ActiveRequestProvider, EcssTmSender, EcssTmtcError,
GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
},
request::UniqueApidTargetId,
spacepackets::{
ecss::{
tc::PusTcReader,
tm::{PusTmCreator, PusTmSecondaryHeader},
PusPacket,
},
SpHeader,
},
ComponentId,
};
use super::{
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
PusTargetedRequestService, TargetedPusService,
};
#[derive(new)]
pub struct ModeReplyHandler {
owner_id: ComponentId,
}
impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<ModeReply>,
_tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for mode service 5: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<ModeReply>,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
match reply.message {
ModeReply::ModeReply(mode_reply) => {
let mut source_data: [u8; 12] = [0; 12];
mode_reply
.write_to_be_bytes(&mut source_data)
.expect("writing mode reply failed");
let req_id = verification::RequestId::from(reply.request_id());
let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), 0, 0);
let sec_header =
PusTmSecondaryHeader::new(200, Subservice::TmModeReply as u8, 0, 0, time_stamp);
let pus_tm = PusTmCreator::new(sp_header, sec_header, &source_data, true);
tm_sender.send_tm(self.owner_id, PusTmVariant::Direct(pus_tm))?;
verification_handler.completion_success(tm_sender, started_token, time_stamp)?;
}
ModeReply::CantReachMode(error_code) => {
verification_handler.completion_failure(
tm_sender,
started_token,
FailParams::new(time_stamp, &error_code, &[]),
)?;
}
ModeReply::WrongMode { expected, reached } => {
let mut error_info: [u8; 24] = [0; 24];
let mut written_len = expected
.write_to_be_bytes(&mut error_info[0..ModeAndSubmode::RAW_LEN])
.expect("writing expected mode failed");
written_len += reached
.write_to_be_bytes(&mut error_info[ModeAndSubmode::RAW_LEN..])
.expect("writing reached mode failed");
verification_handler.completion_failure(
tm_sender,
started_token,
FailParams::new(
time_stamp,
&mode_err::WRONG_MODE,
&error_info[..written_len],
),
)?;
}
};
Ok(true)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"HK",
)?;
Ok(())
}
}
#[derive(Default)]
pub struct ModeRequestConverter {}
impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> {
let subservice = tc.subservice();
let user_data = tc.user_data();
let not_enough_app_data = |expected: usize| {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
)
.expect("Sending start failure failed");
Err(GenericConversionError::NotEnoughAppData {
expected,
found: user_data.len(),
})
};
if user_data.len() < core::mem::size_of::<u32>() {
return not_enough_app_data(4);
}
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
let active_request =
ActivePusRequestStd::new(target_id_and_apid.into(), token, Duration::from_secs(30));
let subservice_typed = Subservice::try_from(subservice);
let invalid_subservice = || {
// Invalid subservice
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
)
.expect("Sending start failure failed");
Err(GenericConversionError::InvalidSubservice(subservice))
};
if subservice_typed.is_err() {
return invalid_subservice();
}
let subservice_typed = subservice_typed.unwrap();
match subservice_typed {
Subservice::TcSetMode => {
if user_data.len() < core::mem::size_of::<u32>() + ModeAndSubmode::RAW_LEN {
return not_enough_app_data(4 + ModeAndSubmode::RAW_LEN);
}
let mode_and_submode = ModeAndSubmode::from_be_bytes(&tc.user_data()[4..])
.expect("mode and submode extraction failed");
Ok((active_request, ModeRequest::SetMode(mode_and_submode)))
}
Subservice::TcReadMode => Ok((active_request, ModeRequest::ReadMode)),
Subservice::TcAnnounceMode => Ok((active_request, ModeRequest::AnnounceMode)),
Subservice::TcAnnounceModeRecursive => {
Ok((active_request, ModeRequest::AnnounceModeRecursive))
}
_ => invalid_subservice(),
}
}
}
pub fn create_mode_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper {
let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_MODE_SERVICE.id(),
pus_action_rx,
tm_funnel_tx,
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid, 16),
EcssTcInVecConverter::default(),
),
ModeRequestConverter::default(),
DefaultActiveRequestMap::default(),
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
mode_router,
reply_receiver,
);
ModeServiceWrapper {
service: mode_request_handler,
}
}
pub struct ModeServiceWrapper {
pub(crate) service: PusTargetedRequestService<
VerificationReporter,
ModeRequestConverter,
ModeReplyHandler,
DefaultActiveRequestMap<ActivePusRequestStd>,
ActivePusRequestStd,
ModeRequest,
ModeReply,
>,
}
impl TargetedPusService for ModeServiceWrapper {
const SERVICE_ID: u8 = CustomPusServiceId::Mode as u8;
const SERVICE_STR: &'static str = "mode";
delegate::delegate! {
to self.service {
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
}
}
#[cfg(test)]
mod tests {
use ops_sat_rs::config::tmtc_err;
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0};
use satrs::request::MessageMetadata;
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::mode::Subservice,
request::GenericMessage,
spacepackets::{
ecss::tc::{PusTcCreator, PusTcSecondaryHeader},
SpHeader,
},
};
use crate::pus::{
mode::ModeReplyHandler,
tests::{PusConverterTestbench, ReplyHandlerTestbench},
};
use super::ModeRequestConverter;
#[test]
fn mode_converter_read_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::ReadMode);
}
#[test]
fn mode_converter_set_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8);
let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN];
let mode_and_submode = ModeAndSubmode::new(2, 1);
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
mode_and_submode
.write_to_be_bytes(&mut app_data[4..])
.unwrap();
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::SetMode(mode_and_submode));
}
#[test]
fn mode_converter_announce_mode() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceMode);
}
#[test]
fn mode_converter_announce_mode_recursively() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceModeRecursive);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench = ReplyHandlerTestbench::new(
TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1));
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench = ReplyHandlerTestbench::new(
TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

146
src/pus/scheduler.rs Normal file
View File

@ -0,0 +1,146 @@
use std::sync::mpsc;
use std::time::Duration;
use crate::pus::create_verification_reporter;
use log::info;
use ops_sat_rs::config::components::PUS_SCHEDULER_SERVICE;
use satrs::pool::StaticMemoryPool;
use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInVecConverter, HandlingStatus,
MpscTcReceiver, PartialPusHandlingError, PusServiceHelper,
};
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::PacketAsVec;
use super::DirectPusService;
pub struct SchedulingService {
pub pus_11_handler: PusSchedServiceHandler<
MpscTcReceiver,
mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter,
VerificationReporter,
PusScheduler,
>,
pub sched_tc_pool: StaticMemoryPool,
pub releaser_buf: [u8; 4096],
pub tc_releaser: mpsc::Sender<PacketAsVec>,
}
impl DirectPusService for SchedulingService {
const SERVICE_ID: u8 = PusServiceId::Verification as u8;
const SERVICE_STR: &'static str = "verification";
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let result = self.pus_11_handler.poll_and_handle_next_tc(
error_handler,
time_stamp,
&mut self.sched_tc_pool,
);
if let Err(e) = result {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops.
return HandlingStatus::Empty;
}
match result.unwrap() {
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
}
HandlingStatus::HandledOne
}
}
impl SchedulingService {
pub fn release_tcs(&mut self) {
let id = self.pus_11_handler.service_helper.id();
let releaser = |enabled: bool, _info: &TcInfo, tc: &[u8]| -> bool {
if enabled {
// Send released TC to centralized TC source.
self.tc_releaser
.send(PacketAsVec::new(id, tc.to_vec()))
.expect("sending TC to TC source failed");
}
true
};
self.pus_11_handler
.scheduler_mut()
.update_time_from_now()
.unwrap();
let released_tcs = self
.pus_11_handler
.scheduler_mut()
.release_telecommands_with_buffer(
releaser,
&mut self.sched_tc_pool,
&mut self.releaser_buf,
)
.expect("releasing TCs failed");
if released_tcs > 0 {
info!("{released_tcs} TC(s) released from scheduler");
}
}
}
pub fn create_scheduler_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
tc_source_sender: mpsc::Sender<PacketAsVec>,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool,
) -> SchedulingService {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed");
let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new(
PUS_SCHEDULER_SERVICE.id(),
pus_sched_rx,
tm_funnel_tx,
create_verification_reporter(
PUS_SCHEDULER_SERVICE.id(),
PUS_SCHEDULER_SERVICE.apid,
16,
),
EcssTcInVecConverter::default(),
),
scheduler,
);
SchedulingService {
pus_11_handler,
sched_tc_pool,
releaser_buf: [0; 4096],
tc_releaser: tc_source_sender,
}
}

View File

@ -1,75 +1,88 @@
// use crate::pus::mode::ModeServiceWrapper;
use crate::pus::test::TestCustomServiceWrapper; use crate::pus::test::TestCustomServiceWrapper;
use crate::pus::HandlingStatus; use crate::pus::HandlingStatus;
use derive_new::new; use derive_new::new;
use satrs::{ use satrs::spacepackets::time::{cds, TimeWriter};
pus::{EcssTcInMemConverter, EcssTmSenderCore},
spacepackets::time::{cds, TimeWriter}, use super::{
action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper,
mode::ModeServiceWrapper, scheduler::SchedulingService, DirectPusService, TargetedPusService,
}; };
// use super::{
// action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper,
// scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, HandlingStatus,
// TargetedPusService,
// };
#[derive(new)] #[derive(new)]
pub struct PusStack<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> { pub struct PusStack {
test_srv: TestCustomServiceWrapper<TmSender, TcInMemConverter>, test_srv: TestCustomServiceWrapper,
// hk_srv_wrapper: HkServiceWrapper<TmSender, TcInMemConverter>, hk_srv_wrapper: HkServiceWrapper,
// event_srv: EventServiceWrapper<TmSender, TcInMemConverter>, event_srv: EventServiceWrapper,
// action_srv_wrapper: ActionServiceWrapper<TmSender, TcInMemConverter>, action_srv_wrapper: ActionServiceWrapper,
// schedule_srv: SchedulingServiceWrapper<TmSender, TcInMemConverter>, schedule_srv: SchedulingService,
// mode_srv: ModeServiceWrapper<TmSender, TcInMemConverter>, mode_srv: ModeServiceWrapper,
} }
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> impl PusStack {
PusStack<TmSender, TcInMemConverter>
{
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
// Release all telecommands which reached their release time before calling the service // Release all telecommands which reached their release time before calling the service
// handlers. // handlers.
// self.schedule_srv.release_tcs(); self.schedule_srv.release_tcs();
let time_stamp = cds::CdsTime::now_with_u16_days() let timestamp = cds::CdsTime::now_with_u16_days()
.expect("time stamp generation error") .expect("time stamp generation error")
.to_vec() .to_vec()
.unwrap(); .unwrap();
let mut loop_count = 0_u32;
// Hot loop which will run continuously until all request and reply handling is done.
loop { loop {
let mut nothing_to_do = true; let mut nothing_to_do = true;
let mut is_srv_finished = Self::direct_service_checker(&mut self.test_srv, &timestamp, &mut nothing_to_do);
|tc_handling_done: bool, reply_handling_done: Option<HandlingStatus>| { Self::direct_service_checker(&mut self.schedule_srv, &timestamp, &mut nothing_to_do);
if !tc_handling_done Self::direct_service_checker(&mut self.event_srv, &timestamp, &mut nothing_to_do);
|| (reply_handling_done.is_some() Self::targeted_service_checker(
&& reply_handling_done.unwrap() == HandlingStatus::Empty) &mut self.action_srv_wrapper,
{ &timestamp,
nothing_to_do = false; &mut nothing_to_do,
} );
}; Self::targeted_service_checker(
is_srv_finished(self.test_srv.poll_and_handle_next_packet(&time_stamp), None); &mut self.hk_srv_wrapper,
// is_srv_finished(self.schedule_srv.poll_and_handle_next_tc(&time_stamp), None); &timestamp,
// is_srv_finished(self.event_srv.poll_and_handle_next_tc(&time_stamp), None); &mut nothing_to_do,
// is_srv_finished( );
// self.action_srv_wrapper.poll_and_handle_next_tc(&time_stamp), Self::targeted_service_checker(&mut self.mode_srv, &timestamp, &mut nothing_to_do);
// Some(
// self.action_srv_wrapper
// .poll_and_handle_next_reply(&time_stamp),
// ),
// );
// is_srv_finished(
// self.hk_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
// Some(self.hk_srv_wrapper.poll_and_handle_next_reply(&time_stamp)),
// );
// is_srv_finished(
// self.mode_srv.poll_and_handle_next_tc(&time_stamp),
// Some(self.mode_srv.poll_and_handle_next_reply(&time_stamp)),
// );
if nothing_to_do { if nothing_to_do {
// Timeout checking is only done once. // Timeout checking is only done once.
// self.action_srv_wrapper.check_for_request_timeouts(); self.action_srv_wrapper.check_for_request_timeouts();
// self.hk_srv_wrapper.check_for_request_timeouts(); self.hk_srv_wrapper.check_for_request_timeouts();
// self.mode_srv.check_for_request_timeouts(); self.mode_srv.check_for_request_timeouts();
break;
}
// Safety mechanism to avoid infinite loops.
loop_count += 1;
if loop_count >= 500 {
log::warn!("reached PUS stack loop count 500, breaking");
break; break;
} }
} }
} }
pub fn direct_service_checker<S: DirectPusService>(
service: &mut S,
timestamp: &[u8],
nothing_to_do: &mut bool,
) {
let handling_status = service.poll_and_handle_next_tc(timestamp);
if handling_status == HandlingStatus::HandledOne {
*nothing_to_do = false;
}
}
pub fn targeted_service_checker<S: TargetedPusService>(
service: &mut S,
timestamp: &[u8],
nothing_to_do: &mut bool,
) {
let request_handling = service.poll_and_handle_next_tc_default_handler(timestamp);
let reply_handling = service.poll_and_handle_next_reply_default_handler(timestamp);
if request_handling == HandlingStatus::HandledOne
|| reply_handling == HandlingStatus::HandledOne
{
*nothing_to_do = false;
}
}
} }

View File

@ -1,104 +1,128 @@
use crate::pus::create_verification_reporter; use crate::pus::create_verification_reporter;
use log::{info, warn}; use log::info;
use ops_sat_rs::config::components::PUS_TEST_SERVICE; use ops_sat_rs::config::components::PUS_TEST_SERVICE;
use ops_sat_rs::config::tmtc_err; use ops_sat_rs::config::{tmtc_err, TEST_EVENT};
// use satrs::event_man::{EventMessage, EventMessageU32}; use satrs::event_man::{EventMessage, EventMessageU32};
use satrs::pus::test::PusService17TestHandler; use satrs::pus::test::PusService17TestHandler;
use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider}; use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider};
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTmSenderCore, MpscTcReceiver, DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInVecConverter, HandlingStatus,
MpscTmAsVecSender, PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, MpscTcReceiver, MpscTmAsVecSender, PartialPusHandlingError, PusServiceHelper,
}; };
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::queue::GenericSendError;
use satrs::spacepackets::ecss::PusPacket; use satrs::spacepackets::ecss::PusServiceId;
use satrs::spacepackets::time::cds::CdsTime; use satrs::tmtc::PacketAsVec;
use satrs::spacepackets::time::TimeWriter;
use std::sync::mpsc; use std::sync::mpsc;
pub fn create_test_service_dynamic( use super::DirectPusService;
tm_funnel_tx: mpsc::Sender<PusTmAsVec>,
// event_sender: mpsc::Sender<EventMessageU32>, pub fn create_test_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
event_tx: mpsc::SyncSender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>, pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> TestCustomServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> TestCustomServiceWrapper {
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.id(),
pus_test_rx, pus_test_rx,
tm_funnel_tx, tm_funnel_tx,
create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid), create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid, 16),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
)); ));
TestCustomServiceWrapper { TestCustomServiceWrapper {
handler: pus17_handler, handler: pus17_handler,
// test_srv_event_sender: event_sender, event_tx,
} }
} }
pub struct TestCustomServiceWrapper< pub struct TestCustomServiceWrapper {
TmSender: EcssTmSenderCore, pub handler: PusService17TestHandler<
TcInMemConverter: EcssTcInMemConverter, MpscTcReceiver,
> { MpscTmAsVecSender,
pub handler: EcssTcInVecConverter,
PusService17TestHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>, VerificationReporter,
// pub test_srv_event_sender: mpsc::Sender<EventMessageU32>, >,
pub event_tx: mpsc::SyncSender<EventMessageU32>,
} }
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> impl DirectPusService for TestCustomServiceWrapper {
TestCustomServiceWrapper<TmSender, TcInMemConverter> const SERVICE_ID: u8 = PusServiceId::Test as u8;
{ const SERVICE_STR: &'static str = "test";
pub fn poll_and_handle_next_packet(&mut self, time_stamp: &[u8]) -> bool {
let res = self.handler.poll_and_handle_next_tc(time_stamp); fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus {
if res.is_err() { let error_handler = |partial_error: &PartialPusHandlingError| {
warn!("PUS17 handler failed with error {:?}", res.unwrap_err()); log::warn!(
return true; "PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let res = self
.handler
.poll_and_handle_next_tc(error_handler, timestamp);
if let Err(e) = res {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops.
return HandlingStatus::Empty;
} }
match res.unwrap() { match res.unwrap() {
PusPacketHandlerResult::RequestHandled => { DirectPusPacketHandlerResult::Handled(handling_status) => {
if handling_status == HandlingStatus::HandledOne {
info!("Received PUS ping command TC[17,1]"); info!("Received PUS ping command TC[17,1]");
info!("Sent ping reply PUS TM[17,2]"); info!("Sent ping reply PUS TM[17,2]");
} }
PusPacketHandlerResult::RequestHandledPartialSuccess(partial_err) => { return handling_status;
warn!( }
"Handled PUS ping command with partial success: {:?}", DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
partial_err log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
); );
} }
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { DirectPusPacketHandlerResult::CustomSubservice(subservice, token) => {
warn!("PUS17: Subservice {subservice} not implemented")
}
// TODO: adapt interface events are implemented
PusPacketHandlerResult::CustomSubservice(subservice, token) => {
let (tc, _) = PusTcReader::new(
self.handler
.service_helper
.tc_in_mem_converter
.tc_slice_raw(),
)
.unwrap();
let time_stamper = CdsTime::now_with_u16_days().unwrap();
let mut stamp_buf: [u8; 7] = [0; 7];
time_stamper.write_to_bytes(&mut stamp_buf).unwrap();
if subservice == 128 { if subservice == 128 {
info!("Generating test event"); info!("generating test event");
// self.test_srv_event_sender if let Err(e) = self
// .send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into())) .event_tx
// .expect("Sending test event failed"); .send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
let start_token = self .map_err(|_| GenericSendError::RxDisconnected)
{
// This really should not happen but I want to avoid panicking..
log::warn!("failed to send test event: {:?}", e);
}
match self.handler.service_helper.verif_reporter().start_success(
self.handler.service_helper.tm_sender(),
token,
timestamp,
) {
Ok(started_token) => {
if let Err(e) = self
.handler .handler
.service_helper
.verif_reporter()
.start_success(self.handler.service_helper.tm_sender(), token, &stamp_buf)
.expect("Error sending start success");
self.handler
.service_helper .service_helper
.verif_reporter() .verif_reporter()
.completion_success( .completion_success(
self.handler.service_helper.tm_sender(), self.handler.service_helper.tm_sender(),
start_token, started_token,
&stamp_buf, timestamp,
) )
.expect("Error sending completion success"); {
error_handler(&PartialPusHandlingError::Verification(e));
}
}
Err(e) => {
error_handler(&PartialPusHandlingError::Verification(e));
}
}
} else { } else {
let fail_data = [tc.subservice()]; let fail_data = [subservice];
self.handler self.handler
.service_helper .service_helper
.verif_reporter() .verif_reporter()
@ -106,7 +130,7 @@ impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
self.handler.service_helper.tm_sender(), self.handler.service_helper.tm_sender(),
token, token,
FailParams::new( FailParams::new(
&stamp_buf, timestamp,
&tmtc_err::INVALID_PUS_SUBSERVICE, &tmtc_err::INVALID_PUS_SUBSERVICE,
&fail_data, &fail_data,
), ),
@ -114,10 +138,7 @@ impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
.expect("Sending start failure verification failed"); .expect("Sending start failure verification failed");
} }
} }
PusPacketHandlerResult::Empty => { }
return true; HandlingStatus::HandledOne
}
}
false
} }
} }

View File

@ -10,7 +10,7 @@ use satrs::mode::ModeRequest;
use satrs::pus::verification::{ use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken, FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
}; };
use satrs::pus::{ActiveRequestProvider, EcssTmSenderCore, GenericRoutingError, PusRequestRouter}; use satrs::pus::{ActiveRequestProvider, EcssTmSender, GenericRoutingError, PusRequestRouter};
use satrs::queue::GenericSendError; use satrs::queue::GenericSendError;
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId}; use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
@ -49,7 +49,7 @@ impl GenericRequestRouter {
active_request: &impl ActiveRequestProvider, active_request: &impl ActiveRequestProvider,
tc: &PusTcReader, tc: &PusTcReader,
error: GenericRoutingError, error: GenericRoutingError,
tm_sender: &(impl EcssTmSenderCore + ?Sized), tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider, verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) { ) {

View File

@ -1,109 +0,0 @@
use std::{
collections::HashMap,
sync::mpsc::{self},
};
use log::info;
use satrs::pus::PusTmAsVec;
use satrs::{
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
spacepackets::{
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
time::cds::MIN_CDS_FIELD_LEN,
CcsdsPacket,
},
};
use crate::interface::tcp::SyncTcpTmSource;
#[derive(Default)]
pub struct CcsdsSeqCounterMap {
apid_seq_counter_map: HashMap<u16, CcsdsSimpleSeqCountProvider>,
}
impl CcsdsSeqCounterMap {
pub fn get_and_increment(&mut self, apid: u16) -> u16 {
self.apid_seq_counter_map
.entry(apid)
.or_default()
.get_and_increment()
}
}
pub struct TmFunnelCommon {
seq_counter_map: CcsdsSeqCounterMap,
msg_counter_map: HashMap<u8, u16>,
sync_tm_tcp_source: SyncTcpTmSource,
}
impl TmFunnelCommon {
pub fn new(sync_tm_tcp_source: SyncTcpTmSource) -> Self {
Self {
seq_counter_map: Default::default(),
msg_counter_map: Default::default(),
sync_tm_tcp_source,
}
}
// Applies common packet processing operations for PUS TM packets. This includes setting
// a sequence counter
fn apply_packet_processing(&mut self, mut zero_copy_writer: PusTmZeroCopyWriter) {
// zero_copy_writer.set_apid(PUS_APID);
zero_copy_writer.set_seq_count(
self.seq_counter_map
.get_and_increment(zero_copy_writer.apid()),
);
let entry = self
.msg_counter_map
.entry(zero_copy_writer.service())
.or_insert(0);
zero_copy_writer.set_msg_count(*entry);
if *entry == u16::MAX {
*entry = 0;
} else {
*entry += 1;
}
Self::packet_printout(&zero_copy_writer);
// This operation has to come last!
zero_copy_writer.finish();
}
fn packet_printout(tm: &PusTmZeroCopyWriter) {
info!("Sending PUS TM[{},{}]", tm.service(), tm.subservice());
}
}
pub struct TmFunnelDynamic {
common: TmFunnelCommon,
tm_funnel_rx: mpsc::Receiver<PusTmAsVec>,
tm_server_tx: mpsc::Sender<PusTmAsVec>,
}
impl TmFunnelDynamic {
pub fn new(
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PusTmAsVec>,
tm_server_tx: mpsc::Sender<PusTmAsVec>,
) -> Self {
Self {
common: TmFunnelCommon::new(sync_tm_tcp_source),
tm_funnel_rx,
tm_server_tx,
}
}
pub fn operation(&mut self) {
if let Ok(mut tm) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update
// the CRC.
let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
self.common.sync_tm_tcp_source.add_tm(&tm.packet);
self.tm_server_tx
.send(tm)
.expect("Sending TM to server failed");
}
}
}

View File

@ -1,94 +0,0 @@
use crate::pus::PusReceiver;
use satrs::pool::{StoreAddr, StoreError};
use satrs::pus::{EcssTcAndToken, MpscTmAsVecSender};
use satrs::spacepackets::ecss::PusPacket;
use satrs::{
pus::ReceivesEcssPusTc,
spacepackets::{ecss::tc::PusTcReader, SpHeader},
tmtc::ReceivesCcsdsTc,
};
use std::sync::mpsc::{self, SendError, Sender, TryRecvError};
use thiserror::Error;
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum MpscStoreAndSendError {
#[error("Store error: {0}")]
Store(#[from] StoreError),
#[error("TC send error: {0}")]
TcSend(#[from] SendError<EcssTcAndToken>),
#[error("TMTC send error: {0}")]
TmTcSend(#[from] SendError<StoreAddr>),
}
// Newtype, can not implement necessary traits on MPSC sender directly because of orphan rules.
#[derive(Clone)]
pub struct PusTcSourceProviderDynamic(pub Sender<Vec<u8>>);
impl ReceivesEcssPusTc for PusTcSourceProviderDynamic {
type Error = SendError<Vec<u8>>;
fn pass_pus_tc(&mut self, _: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
self.0.send(pus_tc.raw_data().to_vec())?;
Ok(())
}
}
impl ReceivesCcsdsTc for PusTcSourceProviderDynamic {
type Error = mpsc::SendError<Vec<u8>>;
fn pass_ccsds(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.0.send(tc_raw.to_vec())?;
Ok(())
}
}
// TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic {
pub tc_receiver: mpsc::Receiver<Vec<u8>>,
pus_receiver: PusReceiver<MpscTmAsVecSender>,
}
impl TcSourceTaskDynamic {
pub fn new(
tc_receiver: mpsc::Receiver<Vec<u8>>,
pus_receiver: PusReceiver<MpscTmAsVecSender>,
) -> Self {
Self {
tc_receiver,
pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> bool {
match self.tc_receiver.try_recv() {
Ok(tc) => match PusTcReader::new(&tc) {
Ok((pus_tc, _)) => {
self.pus_receiver
.handle_tc_packet(
satrs::pus::TcInMemory::Vec(tc.clone()),
pus_tc.service(),
&pus_tc,
)
.ok();
true
}
Err(e) => {
log::warn!("error creating PUS TC from raw data: {e}");
log::warn!("raw data: {:x?}", tc);
true
}
},
Err(e) => match e {
TryRecvError::Empty => false,
TryRecvError::Disconnected => {
log::warn!("tmtc thread: sender disconnected");
false
}
},
}
}
}

2
src/tmtc/mod.rs Normal file
View File

@ -0,0 +1,2 @@
pub mod tc_source;
pub mod tm_sink;

50
src/tmtc/tc_source.rs Normal file
View File

@ -0,0 +1,50 @@
use std::sync::mpsc::{self, TryRecvError};
use satrs::{
pus::{HandlingStatus, MpscTmAsVecSender},
tmtc::PacketAsVec,
};
use crate::pus::PusTcDistributor;
// TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic {
pub tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_distrib: PusTcDistributor<MpscTmAsVecSender>,
}
impl TcSourceTaskDynamic {
pub fn new(
tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_receiver: PusTcDistributor<MpscTmAsVecSender>,
) -> Self {
Self {
tc_receiver,
pus_distrib: pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> HandlingStatus {
// Right now, we only expect PUS packets. If any other protocols like CFDP are added at
// a later stage, we probably need to check for the APID before routing the packet.
match self.tc_receiver.try_recv() {
Ok(packet_with_sender) => {
self.pus_distrib
.handle_tc_packet(packet_with_sender.sender_id, packet_with_sender.packet)
.ok();
HandlingStatus::HandledOne
}
Err(e) => match e {
TryRecvError::Empty => HandlingStatus::Empty,
TryRecvError::Disconnected => {
log::warn!("tmtc thread: sender disconnected");
HandlingStatus::Empty
}
},
}
}
}

139
src/tmtc/tm_sink.rs Normal file
View File

@ -0,0 +1,139 @@
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::{collections::HashMap, sync::mpsc};
use log::info;
use ops_sat_rs::config::tasks::STOP_CHECK_FREQUENCY;
use satrs::tmtc::PacketAsVec;
use satrs::{
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
spacepackets::{
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
time::cds::MIN_CDS_FIELD_LEN,
CcsdsPacket,
},
};
use crate::interface::tcp_server::SyncTcpTmSource;
#[derive(Default)]
pub struct CcsdsSeqCounterMap {
apid_seq_counter_map: HashMap<u16, CcsdsSimpleSeqCountProvider>,
}
impl CcsdsSeqCounterMap {
pub fn get_and_increment(&mut self, apid: u16) -> u16 {
self.apid_seq_counter_map
.entry(apid)
.or_default()
.get_and_increment()
}
}
pub struct TmFunnelCommon {
seq_counter_map: CcsdsSeqCounterMap,
msg_counter_map: HashMap<u8, u16>,
sync_tm_tcp_source: SyncTcpTmSource,
}
impl TmFunnelCommon {
pub fn new(sync_tm_tcp_source: SyncTcpTmSource) -> Self {
Self {
seq_counter_map: Default::default(),
msg_counter_map: Default::default(),
sync_tm_tcp_source,
}
}
// Applies common packet processing operations for PUS TM packets. This includes setting
// a sequence counter
fn apply_packet_processing(&mut self, mut zero_copy_writer: PusTmZeroCopyWriter) {
// zero_copy_writer.set_apid(PUS_APID);
zero_copy_writer.set_seq_count(
self.seq_counter_map
.get_and_increment(zero_copy_writer.apid()),
);
let entry = self
.msg_counter_map
.entry(zero_copy_writer.service())
.or_insert(0);
zero_copy_writer.set_msg_count(*entry);
if *entry == u16::MAX {
*entry = 0;
} else {
*entry += 1;
}
Self::packet_printout(&zero_copy_writer);
// This operation has to come last!
zero_copy_writer.finish();
}
fn packet_printout(tm: &PusTmZeroCopyWriter) {
info!("Sending PUS TM[{},{}]", tm.service(), tm.subservice());
}
}
pub struct TmFunnelDynamic {
common: TmFunnelCommon,
tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_udp_server_tx: mpsc::Sender<PacketAsVec>,
tm_tcp_client_tx: mpsc::Sender<PacketAsVec>,
stop_signal: Arc<AtomicBool>,
}
impl TmFunnelDynamic {
pub fn new(
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_udp_server_tx: mpsc::Sender<PacketAsVec>,
tm_tcp_client_tx: mpsc::Sender<PacketAsVec>,
stop_signal: Arc<AtomicBool>,
) -> Self {
Self {
common: TmFunnelCommon::new(sync_tm_tcp_source),
tm_funnel_rx,
tm_udp_server_tx,
tm_tcp_client_tx,
stop_signal,
}
}
pub fn operation(&mut self) {
loop {
match self.tm_funnel_rx.recv_timeout(STOP_CHECK_FREQUENCY) {
Ok(mut tm) => {
// Read the TM, set sequence counter and message counter, and finally update
// the CRC.
let zero_copy_writer =
PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
self.common.sync_tm_tcp_source.add_tm(&tm.packet);
let result = self.tm_udp_server_tx.send(tm.clone());
if result.is_err() {
log::error!("TM UDP server has disconnected");
}
let result = self.tm_tcp_client_tx.send(tm);
if result.is_err() {
log::error!("TM TCP client has disconnected");
}
if self.stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
}
Err(e) => match e {
mpsc::RecvTimeoutError::Timeout => {
if self.stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
}
mpsc::RecvTimeoutError::Disconnected => {
log::warn!("All TM funnel senders have disconnected");
break;
}
},
}
}
}
}

5
templates/exp278.toml Normal file
View File

@ -0,0 +1,5 @@
# This configuration file should either be inside the (experiment) home folder or in the current
# folder the application is run from.
# On the small flatsat, change this to 9999.
tcp_spp_server_port = 4096

Binary file not shown.