95 Commits

Author SHA1 Message Date
60e4af435a print version str at program start 2024-04-24 20:55:14 +02:00
17c9b8694d Merge pull request 'prep first full release' (#19) from prepare-init-version into main
Reviewed-on: #19
2024-04-24 20:46:37 +02:00
7468fe9845 prep first full release 2024-04-24 20:43:23 +02:00
404d1c1c1e Merge pull request 'clean up python code a bit' (#18) from some-python-client-cleanup into main
Reviewed-on: #18
2024-04-24 20:41:02 +02:00
597f4ca977 some more cleaning up 2024-04-24 20:40:51 +02:00
879a50f79e pytmtc is a package now 2024-04-24 20:35:59 +02:00
380b36f1de clean up python code a bit 2024-04-24 20:15:32 +02:00
03c1dc8d64 Merge pull request 'add event management, small fix for CAM handler loop' (#17) from events-small-bugfix into main
Reviewed-on: #17
2024-04-24 20:04:01 +02:00
dcaf67d6d0 add event management, small fix for CAM handler loop 2024-04-24 20:02:42 +02:00
707843ec9f Merge pull request 'camera_tests' (#13) from camera_tests into main
Reviewed-on: #13
2024-04-24 17:43:41 +02:00
9b047ac0d5 Merge remote-tracking branch 'origin/main' into camera_tests 2024-04-24 17:39:23 +02:00
638abf4259 Merge pull request 'Add HK and mode service' (#16) from add-hk-and-mode-service into main
Reviewed-on: #16
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-24 17:34:03 +02:00
b6d3bb7712 Merge branch 'main' into add-hk-and-mode-service 2024-04-24 17:10:56 +02:00
4af249612d Merge pull request 'add scheduler service' (#15) from add-scheduler-service into main
Reviewed-on: #15
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-24 17:10:44 +02:00
8c6f100c06 Merge branch 'main' into add-scheduler-service 2024-04-24 17:10:33 +02:00
ccf8048ce0 Merge pull request 'updated pyclient' (#14) from update-pyclient into main
Reviewed-on: #14
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-24 17:10:27 +02:00
6f3e14af3b fmt and clippy 2024-04-24 16:45:38 +02:00
83322ae415 added debug output in action reply handler and tm handling in tmtcpy, ready to merge into main 2024-04-24 16:43:59 +02:00
09bef401c0 added data reply to camera handler, now only missing tmtcpy counterpart 2024-04-24 16:26:54 +02:00
e3ad841d04 added HK and mode service 2024-04-23 16:49:06 +02:00
511214f903 add scheduler service 2024-04-23 16:14:16 +02:00
f6a6b005af updated pyclient 2024-04-23 15:00:00 +02:00
88d4384beb this should be good for now 2024-04-22 16:56:49 +02:00
1e867a51f5 merge main 2024-04-22 15:55:47 +02:00
028de494e4 got basic action stuff running, now to make error handling better 2024-04-22 15:47:25 +02:00
ffeb7951a8 Merge pull request 'Client tests' (#12) from add-tcp-client-unittests into main
Reviewed-on: #12
2024-04-22 15:41:56 +02:00
e82139ac91 README 2024-04-22 15:41:28 +02:00
51473e7060 that should suffice 2024-04-22 15:38:53 +02:00
9c74246eb3 added test stub 2024-04-22 10:39:44 +02:00
96d5802c4f added tmtc test 2024-04-22 10:38:18 +02:00
627dd64eca Merge branch 'main' into add-tcp-client-unittests 2024-04-20 11:23:08 +02:00
cd428577fa Merge pull request 'Commanding docs' (#11) from update-commanding-docs into main
Reviewed-on: #11
2024-04-20 11:22:28 +02:00
ee29961f62 add first client unittests 2024-04-20 11:21:02 +02:00
1501e5a421 fnish README update 2024-04-20 00:31:53 +02:00
0f391c2087 some minor cleaning up plus initial image file handling 2024-04-19 22:05:57 +02:00
dd831fb1b6 larger img 2024-04-19 18:10:17 +02:00
0573d81e57 docs 2024-04-19 18:09:52 +02:00
fae2e90a65 add some docs 2024-04-19 18:04:27 +02:00
5f69f14652 Merge pull request 'Networking update' (#10) from networking-update into main
Reviewed-on: #10
2024-04-19 17:42:11 +02:00
ddac5ceab3 Networking update 2024-04-19 17:40:38 +02:00
4f94e9cade fixed action tests 2024-04-19 17:11:38 +02:00
0da70ab5ac Merge pull request 'camera_merge_main' (#7) from camera_merge_main into main
Reviewed-on: #7
2024-04-19 11:45:24 +02:00
c6ef1394c9 removed old low level camera implementations 2024-04-19 11:40:22 +02:00
d7acd93ee9 Merge pull request 'Use releases dependencies' (#8) from use-released-dependencies into main
Reviewed-on: #8
2024-04-18 14:08:17 +02:00
8ecd6f2847 bump version again 2024-04-18 14:08:03 +02:00
f18ae0e165 this works 2024-04-17 16:40:13 +02:00
cba2767272 use released dependencies 2024-04-17 16:29:59 +02:00
22584c3f9c changed default camera parameters to constants 2024-04-17 13:03:31 +02:00
9dbdd1ebbc Merge branch 'camera_merge_main' of https://egit.irs.uni-stuttgart.de/rust/ops-sat-rs into camera_merge_main 2024-04-17 10:39:56 +02:00
f960f24415 removed pus tm handler generic for camera handler 2024-04-17 10:38:40 +02:00
5b70bbf173 remove log file 2024-04-16 17:13:46 +02:00
d9720c9ff2 remove leftover files, fmt and clippy 2024-04-16 17:13:14 +02:00
2d3a4cd90c merged main into branch 2024-04-16 16:03:06 +02:00
649e903c0a first spring of camera impl done, testing up next 2024-04-16 15:44:40 +02:00
7da9e2364b create logs in subfolder 2024-04-16 15:08:34 +02:00
45930a104b Merge branch 'main' of https://egit.irs.uni-stuttgart.de/rust/ops-sat-rs 2024-04-16 15:03:18 +02:00
e0c583cca8 better logging handling 2024-04-16 15:03:10 +02:00
2c3a3930fc Merge pull request 'MIO TCP client' (#6) from mio-tcp-client into main
Reviewed-on: #6
Reviewed-by: lkoester <st167799@stud.uni-stuttgart.de>
2024-04-16 14:50:36 +02:00
8ce305491b extend mio client to allow reconnection 2024-04-16 13:08:10 +02:00
b5e048a13b start TM handling for TCP client 2024-04-16 10:05:27 +02:00
b359ff9d33 updated TCP code 2024-04-16 09:59:31 +02:00
efe686becf initial camera handling things 2024-04-16 08:30:55 +02:00
192e701785 MIO tcp client 2024-04-15 16:42:48 +02:00
8313a0b26c another small update 2024-04-15 14:13:10 +02:00
df72676c0d cleaned up code and bumped sat-rs 2024-04-15 12:16:01 +02:00
6dddfd5a70 start with mio tcp client 2024-04-13 15:16:53 +02:00
d9629dee38 Merge pull request 'Add action service and controller component' (#5) from add-action-service-controller-obj into main
Reviewed-on: #5
2024-04-13 11:19:13 +02:00
99842e2a13 Merge branch 'main' into add-action-service-controller-obj 2024-04-13 11:00:59 +02:00
9da9cf5b1f Merge pull request 'Start adding stop logic' (#3) from stop-logic into main
Reviewed-on: #3
2024-04-13 10:56:45 +02:00
1d92084e65 more improvements 2024-04-10 17:13:29 +02:00
d0835f9393 some more fixes and improvements 2024-04-10 17:03:56 +02:00
5cc561cbad important bugfix for PUS stack 2024-04-10 15:50:02 +02:00
5c0b1a3256 some minor fixes for python client 2024-04-10 15:44:39 +02:00
ec9a042f09 fixes for pyclient APID 2024-04-10 15:39:53 +02:00
5d87cab9cc implemented action req handling 2024-04-10 15:37:24 +02:00
443995fe5e smaller fixes 2024-04-10 15:05:24 +02:00
47eba99da1 Merge branch 'stop-logic' into add-action-service-controller-obj 2024-04-10 15:03:21 +02:00
7d7cd99d6a Merge remote-tracking branch 'origin/main' into stop-logic 2024-04-10 15:02:59 +02:00
6b89f00d90 Merge branch 'stop-logic' into add-action-service-controller-obj 2024-04-10 15:02:45 +02:00
35be75afa3 Merge pull request 'can_pus_handler' (#4) from can_pus_handler into main
Reviewed-on: #4
Reviewed-by: Robin Müller <muellerr@irs.uni-stuttgart.de>
2024-04-10 15:01:46 +02:00
d86be82447 Merge branch 'main' into can_pus_handler 2024-04-10 15:01:27 +02:00
59a06b5c50 add action service and controller obj 2024-04-10 14:59:34 +02:00
ebd3514dec delete cached json file 2024-04-10 14:31:25 +02:00
62d4572f31 Merge remote-tracking branch 'origin/main' into stop-logic 2024-04-10 14:26:14 +02:00
31b68dd041 update default tmtc config handling 2024-04-10 14:25:52 +02:00
88d1956dbf cargo fmt + clippy 2024-04-10 12:51:15 +02:00
02b4a51457 Merge remote-tracking branch 'origin/main' into can_pus_handler 2024-04-10 12:50:31 +02:00
dc66dcd469 it works 2024-04-10 12:47:26 +02:00
458759a1df Merge branch 'main' into stop-logic 2024-04-09 18:11:15 +02:00
ef580e5634 added empty can file 2024-04-09 17:51:42 +02:00
710fc94384 start adding stop logic 2024-04-09 17:07:39 +02:00
2480ee6e06 README 2024-04-09 15:59:22 +02:00
526739fd9c update cargo.lock file 2024-04-09 15:40:23 +02:00
aac2ad206c typo in readme 2024-04-09 11:34:02 +02:00
406687d6d8 added tmtc and interface directories 2024-04-09 11:32:40 +02:00
52 changed files with 5922 additions and 808 deletions

5
.gitignore vendored
View File

@ -4,4 +4,7 @@
/.vscode
# Ignore this, may include user specific paths.
/.cargo/config.toml
output.log
# Ignore logs folder generared by application.
/logs
/exp278.toml

12
CHANGELOG.md Normal file
View File

@ -0,0 +1,12 @@
Change Log
=======
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
# [v0.1.0] 2024-04-24
Initial release with PUS stack, TM sink, TC source, TMTC TCP/IP infrastructure
and camera handler.

572
Cargo.lock generated
View File

@ -15,10 +15,19 @@ dependencies = [
]
[[package]]
name = "allocator-api2"
version = "0.2.16"
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "allocator-api2"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
[[package]]
name = "android-tzdata"
@ -35,6 +44,54 @@ dependencies = [
"libc",
]
[[package]]
name = "anstream"
version = "0.6.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc"
[[package]]
name = "anstyle-parse"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648"
dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "anstyle-wincon"
version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7"
dependencies = [
"anstyle",
"windows-sys 0.52.0",
]
[[package]]
name = "array-init"
version = "0.0.4"
@ -81,9 +138,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cc"
version = "1.0.92"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2678b2e3449475e95b0aa6f9b506a28e61b3dc8996592b983695e8ebb58a8b41"
checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b"
[[package]]
name = "cfg-if"
@ -93,9 +150,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.37"
version = "0.4.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e"
checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401"
dependencies = [
"android-tzdata",
"iana-time-zone",
@ -103,13 +160,20 @@ dependencies = [
"num-traits",
"serde",
"wasm-bindgen",
"windows-targets 0.52.4",
"windows-targets 0.52.5",
]
[[package]]
name = "cobs"
version = "0.2.3"
source = "git+https://github.com/robamu/cobs.rs.git?branch=all_features#c70a7f30fd00a7cbdb7666dec12b437977385d40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15"
[[package]]
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "core-foundation-sys"
@ -187,7 +251,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.58",
"syn 2.0.60",
]
[[package]]
@ -202,6 +266,29 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
[[package]]
name = "env_filter"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea"
dependencies = [
"log",
"regex",
]
[[package]]
name = "env_logger"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9"
dependencies = [
"anstream",
"anstyle",
"env_filter",
"humantime",
"log",
]
[[package]]
name = "equivalent"
version = "1.0.1"
@ -217,6 +304,95 @@ dependencies = [
"log",
]
[[package]]
name = "futures"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
name = "futures-core"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
[[package]]
name = "futures-executor"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-io"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
[[package]]
name = "futures-macro"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.60",
]
[[package]]
name = "futures-sink"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
[[package]]
name = "futures-task"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
[[package]]
name = "futures-util"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task",
"memchr",
"pin-project-lite",
"pin-utils",
"slab",
]
[[package]]
name = "hashbrown"
version = "0.14.3"
@ -239,6 +415,26 @@ version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
[[package]]
name = "homedir"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22074da8bba2ef26fc1737ae6c777b5baab5524c2dc403b5c6a76166766ccda5"
dependencies = [
"cfg-if",
"nix",
"serde",
"widestring",
"windows-sys 0.48.0",
"wmi",
]
[[package]]
name = "humantime"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "iana-time-zone"
version = "0.1.60"
@ -317,6 +513,40 @@ version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
[[package]]
name = "memoffset"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4"
dependencies = [
"autocfg",
]
[[package]]
name = "mio"
version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
dependencies = [
"libc",
"log",
"wasi",
"windows-sys 0.48.0",
]
[[package]]
name = "nix"
version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b"
dependencies = [
"bitflags",
"cfg-if",
"libc",
"memoffset",
"pin-utils",
]
[[package]]
name = "nodrop"
version = "0.1.14"
@ -360,7 +590,7 @@ dependencies = [
"proc-macro-crate",
"proc-macro2",
"quote",
"syn 2.0.58",
"syn 2.0.60",
]
[[package]]
@ -375,14 +605,22 @@ version = "0.1.0"
dependencies = [
"chrono",
"derive-new",
"env_logger",
"fern",
"homedir",
"humantime",
"lazy_static",
"log",
"mio",
"num_enum",
"satrs",
"satrs-mib",
"serde",
"serde_json",
"socket2",
"strum",
"thiserror",
"toml",
]
[[package]]
@ -404,29 +642,41 @@ version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
[[package]]
name = "pin-project-lite"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "proc-macro-crate"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284"
dependencies = [
"toml_edit",
"toml_edit 0.21.1",
]
[[package]]
name = "proc-macro2"
version = "1.0.79"
version = "1.0.81"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e"
checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.35"
version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
dependencies = [
"proc-macro2",
]
@ -440,6 +690,35 @@ dependencies = [
"bitflags",
]
[[package]]
name = "regex"
version = "1.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
[[package]]
name = "rustversion"
version = "1.0.15"
@ -454,8 +733,9 @@ checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
[[package]]
name = "satrs"
version = "0.2.0-rc.0"
source = "git+https://egit.irs.uni-stuttgart.de/rust/sat-rs.git?branch=main#0fec99402803b69fbba8ac7537197e0c7f1c836f"
version = "0.2.0-rc.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2adc1d9369e3f7e21dabb3181e36c914d1a3f68f4900207a2baa129c2fd5baba"
dependencies = [
"bus",
"cobs",
@ -465,6 +745,7 @@ dependencies = [
"downcast-rs",
"dyn-clone",
"hashbrown",
"mio",
"num-traits",
"num_enum",
"paste",
@ -478,8 +759,9 @@ dependencies = [
[[package]]
name = "satrs-mib"
version = "0.1.1"
source = "git+https://egit.irs.uni-stuttgart.de/rust/sat-rs.git?branch=main#0fec99402803b69fbba8ac7537197e0c7f1c836f"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "170db5f618bb76aa1d0078def1929ff5f62a20db602955fd8e1f9920424d33a8"
dependencies = [
"csv",
"satrs-mib-codegen",
@ -490,18 +772,20 @@ dependencies = [
[[package]]
name = "satrs-mib-codegen"
version = "0.1.1"
source = "git+https://egit.irs.uni-stuttgart.de/rust/sat-rs.git?branch=main#0fec99402803b69fbba8ac7537197e0c7f1c836f"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "474b583ffa521fbc362ae71da11c9fe29a6b60af47744e067550b6eef4f60d43"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.58",
"syn 2.0.60",
]
[[package]]
name = "satrs-shared"
version = "0.1.3"
source = "git+https://egit.irs.uni-stuttgart.de/rust/sat-rs.git?branch=main#0fec99402803b69fbba8ac7537197e0c7f1c836f"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6042477018c2d43fffccaaa5099bc299a58485139b4d31c5b276889311e474f1"
dependencies = [
"serde",
"spacepackets",
@ -509,9 +793,9 @@ dependencies = [
[[package]]
name = "serde"
version = "1.0.197"
version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc"
dependencies = [
"serde_derive",
]
@ -529,13 +813,42 @@ dependencies = [
[[package]]
name = "serde_derive"
version = "1.0.197"
version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.58",
"syn 2.0.60",
]
[[package]]
name = "serde_json"
version = "1.0.116"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "serde_spanned"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1"
dependencies = [
"serde",
]
[[package]]
name = "slab"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
dependencies = [
"autocfg",
]
[[package]]
@ -560,14 +873,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871"
dependencies = [
"libc",
"windows-sys",
"windows-sys 0.52.0",
]
[[package]]
name = "spacepackets"
version = "0.11.0-rc.2"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2cfd5f9a4c7f10714d21f9bc61f2d176cb7ae092cdd687e7ade2d4e6f7d7125"
checksum = "fa9f4d7df5fa3bc25ecfc95f1f612fc3d16c566df538d3d3c82db0e523096216"
dependencies = [
"chrono",
"crc",
@ -598,7 +911,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
"syn 2.0.58",
"syn 2.0.60",
]
[[package]]
@ -614,9 +927,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.58"
version = "2.0.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687"
checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3"
dependencies = [
"proc-macro2",
"quote",
@ -625,22 +938,34 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.58"
version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.58"
version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.58",
"syn 2.0.60",
]
[[package]]
name = "toml"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit 0.22.12",
]
[[package]]
@ -648,6 +973,9 @@ name = "toml_datetime"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
@ -657,7 +985,20 @@ checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1"
dependencies = [
"indexmap",
"toml_datetime",
"winnow",
"winnow 0.5.40",
]
[[package]]
name = "toml_edit"
version = "0.22.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef"
dependencies = [
"indexmap",
"serde",
"serde_spanned",
"toml_datetime",
"winnow 0.6.6",
]
[[package]]
@ -666,12 +1007,24 @@ version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.92"
@ -693,7 +1046,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.58",
"syn 2.0.60",
"wasm-bindgen-shared",
]
@ -715,7 +1068,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.58",
"syn 2.0.60",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@ -726,13 +1079,62 @@ version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
[[package]]
name = "widestring"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311"
[[package]]
name = "windows"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be"
dependencies = [
"windows-core",
"windows-implement",
"windows-interface",
"windows-targets 0.52.5",
]
[[package]]
name = "windows-core"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
dependencies = [
"windows-targets 0.52.4",
"windows-targets 0.52.5",
]
[[package]]
name = "windows-implement"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12168c33176773b86799be25e2a2ba07c7aab9968b37541f1094dbd7a60c8946"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.60",
]
[[package]]
name = "windows-interface"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d8dc32e0095a7eeccebd0e3f09e9509365ecb3fc6ac4d6f5f14a3f6392942d1"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.60",
]
[[package]]
name = "windows-sys"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
"windows-targets 0.48.5",
]
[[package]]
@ -741,7 +1143,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.4",
"windows-targets 0.52.5",
]
[[package]]
@ -761,17 +1163,18 @@ dependencies = [
[[package]]
name = "windows-targets"
version = "0.52.4"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b"
checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
dependencies = [
"windows_aarch64_gnullvm 0.52.4",
"windows_aarch64_msvc 0.52.4",
"windows_i686_gnu 0.52.4",
"windows_i686_msvc 0.52.4",
"windows_x86_64_gnu 0.52.4",
"windows_x86_64_gnullvm 0.52.4",
"windows_x86_64_msvc 0.52.4",
"windows_aarch64_gnullvm 0.52.5",
"windows_aarch64_msvc 0.52.5",
"windows_i686_gnu 0.52.5",
"windows_i686_gnullvm",
"windows_i686_msvc 0.52.5",
"windows_x86_64_gnu 0.52.5",
"windows_x86_64_gnullvm 0.52.5",
"windows_x86_64_msvc 0.52.5",
]
[[package]]
@ -782,9 +1185,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.4"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9"
checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
[[package]]
name = "windows_aarch64_msvc"
@ -794,9 +1197,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.4"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675"
checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
[[package]]
name = "windows_i686_gnu"
@ -806,9 +1209,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_gnu"
version = "0.52.4"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3"
checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
[[package]]
name = "windows_i686_msvc"
@ -818,9 +1227,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_i686_msvc"
version = "0.52.4"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02"
checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
[[package]]
name = "windows_x86_64_gnu"
@ -830,9 +1239,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.4"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03"
checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
[[package]]
name = "windows_x86_64_gnullvm"
@ -842,9 +1251,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.4"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177"
checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
[[package]]
name = "windows_x86_64_msvc"
@ -854,9 +1263,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.4"
version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
[[package]]
name = "winnow"
@ -867,6 +1276,29 @@ dependencies = [
"memchr",
]
[[package]]
name = "winnow"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352"
dependencies = [
"memchr",
]
[[package]]
name = "wmi"
version = "0.13.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc2f0a4062ca522aad4705a2948fd4061b3857537990202a8ddd5af21607f79a"
dependencies = [
"chrono",
"futures",
"log",
"serde",
"thiserror",
"windows",
]
[[package]]
name = "zerocopy"
version = "0.7.32"
@ -885,5 +1317,5 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.58",
"syn 2.0.60",
]

View File

@ -1,27 +1,43 @@
[package]
name = "ops-sat-rs"
version = "0.0.1"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
fern = "0.6"
toml = "0.8"
chrono = "0.4"
log = "0.4"
lazy_static = "1"
humantime = "2"
strum = { version = "0.26", features = ["derive"] }
thiserror = "1"
derive-new = "0.6"
num_enum = "0.7"
serde = "1"
serde_json = "1"
mio = "0.8"
homedir = "0.2"
socket2 = "0.5"
[dependencies.satrs]
version = "0.2.0-rc.0"
git = "https://egit.irs.uni-stuttgart.de/rust/sat-rs.git"
branch = "main"
version = "0.2.0-rc.5"
# git = "https://egit.irs.uni-stuttgart.de/rust/sat-rs.git"
# branch = "main"
features = ["test_util"]
[dependencies.satrs-mib]
version = "0.1.1"
git = "https://egit.irs.uni-stuttgart.de/rust/sat-rs.git"
branch = "main"
version = ">=0.1.2, <0.2"
[dev-dependencies]
env_logger = "0.11"
# I don't think we need insane performance. If anything, a small binary is easier to upload
# to the satellite.
[profile.release]
strip = true
opt-level = "z" # Optimize for size.
lto = true
codegen-units = 1

View File

@ -2,6 +2,8 @@ ESA OPS-SAT Rust experiment
========
This is the primary repository for the ESA OPS-SAT experiment.
The primary repository to generate packages for ESOC can be found [here](https://egit.irs.uni-stuttgart.de/rust/ops-sat-experiment).
You can also find some more general documentation about OPS-SAT there.
## Pre-Requisites
@ -9,25 +11,89 @@ This is the primary repository for the ESA OPS-SAT experiment.
[podman](https://podman.io/) installed
- [`cross`](https://github.com/cross-rs/cross) package installed
## Build
## Build for Target Hardware
You might need to set the [`CROSS_CONTAINER_ENGINE`](https://github.com/cross-rs/cross/wiki/FAQ#explicitly-choose-the-container-engine)
and [`CROSS_ROOTLESS_CONTAINER_ENGINE`](https://github.com/cross-rs/cross/blob/main/docs/environment_variables.md#configuring-cross-with-environment-variables)
variables manually before calling cross.
### Debug Build
```sh
cross build
```
## Documentation
### Release Build
The [wiki](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki)
appears to be a useful source for documentation.
```sh
cross build --release
```
- [OBSW documents](https://opssat1.esoc.esa.int/projects/experimenter-information/dmsf?folder_id=7)
- [Software Integration Process](https://opssat1.esoc.esa.int/dmsf/files/34/view)
- [Cross-compiling SEPP](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Cross-compiling_SEPP_application)
- [TMTC infrastructure](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Live_TM_TC_data)
- [Submitting an Experiment](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Building_and_submitting_your_application_to_ESOC)
- [Building with Yocto and Docker](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Building_an_application_locally_using_Yocto_Toolchain_in_a_Docker)
- [SPP over CAN](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/SPP_over_CAN_communication)
## Build for Host
The software was designed to be runnable and testable on a host computer.
You can use the regular cargo workflow for this.
### Running
```sh
cargo run
```
### Testing
```sh
cargo test
```
## Commanding Infrastructure
Commanding of the `ops-sat-rs` application is possible by different means.
<img src="docs/networking-structure.png" alt="Networking and Commanding Structure" width="500"/>
### Using the `pyclient` and `pyserver` applications
You can find both commanding application inside the `pytmtc` folder.
It is recommended to set up a virtual environment first, for example by running the following
code inside the `pytmtc` folder:
```sh
python3 -m venv venv
source venv/bin/activate
```
After that, you can install all requirements for both the client and server application
interactively using
```sh
pip install -e .
```
If you want to command the satellite using the OPS-SAT infrastrucute, start the `pyserver.py`
as a background application first, for example by simply running `pyserver.py` inside a
new terminal window.
After that, you can run `pyclient.py -p /test/ping -l` to send a ping telecommand and then
go into listener mode using the following `tmtc_conf.json` file:
```json
{
"com_if": "tcp",
"tcpip_tcp_ip_addr": "127.0.0.1",
"tcpip_tcp_port": 4097
}
```
You can command the TCP server in the OPS-SAT software directly by running the commands with
the following configuration:
```json
{
"com_if": "tcp",
"tcpip_tcp_ip_addr": "127.0.0.1",
"tcpip_tcp_port": 7031
}
```
You can run `pyclient.py -T` or `pyclient.py -h` for more information on the client application.

View File

@ -0,0 +1,193 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
<!--Created by yEd 3.23.2-->
<key attr.name="Description" attr.type="string" for="graph" id="d0"/>
<key for="port" id="d1" yfiles.type="portgraphics"/>
<key for="port" id="d2" yfiles.type="portgeometry"/>
<key for="port" id="d3" yfiles.type="portuserdata"/>
<key attr.name="url" attr.type="string" for="node" id="d4"/>
<key attr.name="description" attr.type="string" for="node" id="d5"/>
<key for="node" id="d6" yfiles.type="nodegraphics"/>
<key for="graphml" id="d7" yfiles.type="resources"/>
<key attr.name="url" attr.type="string" for="edge" id="d8"/>
<key attr.name="description" attr.type="string" for="edge" id="d9"/>
<key for="edge" id="d10" yfiles.type="edgegraphics"/>
<graph edgedefault="directed" id="G">
<data key="d0"/>
<node id="n0">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="100.0" width="304.0" x="551.5" y="196.5"/>
<y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="133.7265625" x="20.866945876288696" xml:space="preserve" y="9.230939716312065">ops-sat-rs on satellite<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.4313587306701031" nodeRatioY="-0.4076906028368794" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n1">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="37.0" width="72.0" x="572.5" y="233.0"/>
<y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="31.9375" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="52.849609375" x="9.5751953125" xml:space="preserve" y="2.53125">TCP SPP
Client<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n2">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="182.0" width="197.5" x="551.5" y="328.5"/>
<y:Fill color="#FFFF99" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="31.9375" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="56.705078125" x="12.1474609375" xml:space="preserve" y="13.53125">pyserver
daemon<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.4384938686708861" nodeRatioY="-0.4256524725274725" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n3">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="53.0" width="77.5" x="640.75" y="346.0"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="45.90625" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="62.5703125" x="7.46484375" xml:space="preserve" y="3.546875">OPS-SAT
Server
Port 4096<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n4">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="53.0" width="77.5" x="640.75" y="440.5"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="45.90625" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="62.5703125" x="7.46484375" xml:space="preserve" y="3.546875">TMTC
Server
Port 4097<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n5">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="30.0" width="99.0" x="756.5" y="536.5"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="51.677734375" x="23.6611328125" xml:space="preserve" y="6.015625">pyclient<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n6">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="53.0" width="72.0" x="661.0" y="225.0"/>
<y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="45.90625" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="62.5703125" x="4.71484375" xml:space="preserve" y="3.546875">TCP
Server
Port 7301<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n7">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="53.0" width="72.0" x="756.0" y="225.0"/>
<y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="45.90625" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="62.5703125" x="4.71484375" xml:space="preserve" y="3.546875">UDP
Server
Port 7301<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<edge id="e0" source="n4" target="n3">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="0.0" sy="0.0" tx="0.0" ty="0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="standard" target="standard"/>
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="31.9375" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="43.421875" x="8.2890625" xml:space="preserve" y="-37.21875">TMTC
Queue<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="0.0" distance="30.0" distanceToCenter="true" position="right" ratio="0.5" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e1" source="n1" target="n3">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="4.333333333333334" sy="0.0" tx="-19.5" ty="0.0">
<y:Point x="612.8333333333334" y="312.5"/>
<y:Point x="660.0" y="312.5"/>
</y:Path>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="standard" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e2" source="n5" target="n4">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-29.5" sy="-8.875" tx="6.75" ty="10.0">
<y:Point x="776.5" y="516.5"/>
<y:Point x="686.25" y="516.5"/>
</y:Path>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="standard" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e3" source="n5" target="n7">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-14.0" sy="0.0" tx="0.0" ty="0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="standard" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e4" source="n5" target="n6">
<data key="d9"/>
<data key="d10">
<y:PolyLineEdge>
<y:Path sx="-14.0" sy="0.0" tx="15.75" ty="0.0">
<y:Point x="792.0" y="311.5"/>
<y:Point x="712.75" y="311.5"/>
</y:Path>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
</graph>
<data key="d7">
<y:Resources/>
</data>
</graphml>

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

134
pytmtc/.gitignore vendored
View File

@ -1,3 +1,4 @@
/tmtc_conf.json
__pycache__
/venv
@ -7,3 +8,136 @@ __pycache__
/seqcnt.txt
/.tmtc-history.txt
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# PyCharm
.idea

View File

@ -0,0 +1,176 @@
from typing import Any, Optional
import select
import time
import socket
import logging
from threading import Thread, Event, Lock
from collections import deque
from tmtccmd.com import ComInterface
from tmtccmd.tmtc import TelemetryListT
_LOGGER = logging.getLogger(__name__)
class TcpServer(ComInterface):
def __init__(self, port: int):
self.port = port
self._max_num_packets_in_tc_queue = 500
self._max_num_packets_in_tm_queue = 500
self._default_timeout_secs = 0.5
self._server_addr = ("localhost", self.port)
self._tc_packet_queue = deque()
self._tm_packet_queue = deque()
self._tc_lock = Lock()
self._tm_lock = Lock()
self._kill_signal = Event()
self._server_socket: Optional[socket.socket] = None
self._server_thread = Thread(target=self._server_task, daemon=True)
self._connected = False
# self._conn_start = None
# self._writing_done = False
# self._reading_done = False
@property
def connected(self) -> bool:
return self._connected
@property
def id(self) -> str:
return "tcp_server"
def initialize(self, args: Any = 0) -> Any:
"""Perform initializations step which can not be done in constructor or which require
returnvalues.
"""
pass
def open(self, args: Any = 0):
"""Opens the communication interface to allow communication.
:return:
"""
if self.connected:
return
self._connected = True
self._server_thread.start()
def _server_task(self):
self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# We need to check the kill signal periodically to allow closing the server.
self._server_socket.settimeout(self._default_timeout_secs)
self._server_socket.bind(self._server_addr)
self._server_socket.listen()
while True and not self._kill_signal.is_set():
try:
(conn_socket, conn_addr) = self._server_socket.accept()
self._handle_connection(conn_socket, conn_addr)
# conn_socket.close()
"""
if (
self._reading_done and self._writing_done
) or time.time() - self.conn_start > 0.5:
print("reading and writing done")
break
"""
except TimeoutError:
continue
def _handle_connection(self, conn_socket: socket.socket, conn_addr: Any):
_LOGGER.info(f"TCP client {conn_addr} connected")
queue_len = 0
while True:
with self._tc_lock:
queue_len = len(self._tc_packet_queue)
outputs = []
if queue_len > 0:
outputs.append(conn_socket)
(readable, writable, _) = select.select(
[conn_socket],
outputs,
[],
0.2,
)
if writable and writable[0]:
print("writeable")
while queue_len > 0:
next_packet = bytes()
with self._tc_lock:
next_packet = self._tc_packet_queue.popleft()
if len(next_packet) > 0:
conn_socket.sendall(next_packet)
queue_len -= 1
if readable and readable[0]:
print("readable")
while True:
bytes_recvd = conn_socket.recv(4096)
if len(bytes_recvd) > 0:
print(f"Received bytes from TCP client: {bytes_recvd.decode()}")
with self._tm_lock:
self._tm_packet_queue.append(bytes_recvd)
elif len(bytes_recvd) == 0:
break
else:
print("error receiving data from TCP client")
def is_open(self) -> bool:
"""Can be used to check whether the communication interface is open. This is useful if
opening a COM interface takes a longer time and is non-blocking
"""
return self.connected
def close(self, args: Any = 0):
"""Closes the ComIF and releases any held resources (for example a Communication Port).
:return:
"""
self._kill_signal.set()
self._server_thread.join()
self._connected = False
def send(self, data: bytes):
"""Send raw data.
:raises SendError: Sending failed for some reason.
"""
with self._tc_lock:
if len(self._tc_packet_queue) >= self._max_num_packets_in_tc_queue:
# Remove oldest packet
self._tc_packet_queue.popleft()
self._tc_packet_queue.append(data)
def receive(self, parameters: Any = 0) -> TelemetryListT:
"""Returns a list of received packets. The child class can use a separate thread to poll for
the packets or use some other mechanism and container like a deque to store packets
to be returned here.
:param parameters:
:raises ReceptionDecodeError: If the underlying COM interface uses encoding and
decoding and the decoding fails, this exception will be returned.
:return:
"""
with self._tm_lock:
packet_list = []
while self._tm_packet_queue:
packet_list.append(self._tm_packet_queue.popleft())
return packet_list
def data_available(self, timeout: float, parameters: Any = 0) -> int:
"""Check whether TM packets are available.
:param timeout: Can be used to block on available data if supported by the specific
communication interface.
:param parameters: Can be an arbitrary parameter.
:raises ReceptionDecodeError: If the underlying COM interface uses encoding and
decoding when determining the number of available packets, this exception can be
thrown on decoding errors.
:return: 0 if no data is available, number of packets otherwise.
"""
with self._tm_lock:
return len(self._tm_packet_queue)

View File

@ -0,0 +1,17 @@
import struct
from serde import Model, fields
from opssat_tmtc.common import EXPERIMENT_APID, UniqueId, make_unique_id
class CameraParameters(Model):
R: fields.Int()
G: fields.Int()
B: fields.Int()
N: fields.Int()
P: fields.Bool()
E: fields.Int()
W: fields.Int()
def serialize_for_uplink(self) -> bytearray:
return self.to_json().encode("utf-8")

View File

@ -4,15 +4,27 @@ import dataclasses
import enum
import struct
EXPERIMENT_ID = 278
EXPERIMENT_APID = 1024 + EXPERIMENT_ID
class Apid(enum.IntEnum):
SCHED = 1
GENERIC_PUS = 2
ACS = 3
CFDP = 4
class UniqueId(enum.IntEnum):
Controller = 0
PusEventManagement = 1
PusRouting = 2
PusTest = 3
PusAction = 4
PusMode = 5
PusHk = 6
UdpServer = 7
TcpServer = 8
TcpSppClient = 9
PusScheduler = 10
CameraHandler = 11
class EventSeverity(enum.IntEnum):
INFO = 0
LOW = 1
MEDIUM = 2
@ -45,7 +57,11 @@ class AcsHkIds(enum.IntEnum):
MGM_SET = 1
def make_addressable_id(target_id: int, unique_id: int) -> bytes:
byte_string = bytearray(struct.pack("!I", target_id))
byte_string.extend(struct.pack("!I", unique_id))
def make_unique_id(unique_id: int) -> bytes:
return struct.pack("!I", unique_id)
def make_action_cmd_header(unique_id: int, action_id: int) -> bytes:
byte_string = bytearray(struct.pack("!I", unique_id))
byte_string.extend(struct.pack("!I", action_id))
return byte_string

View File

@ -10,7 +10,13 @@ from tmtccmd.tmtc import DefaultPusQueueHelper
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
from tmtccmd.pus.s200_fsfw_mode import Subservice as ModeSubservice
from common import AcsId, Apid
from opssat_tmtc.camera_params import CameraParameters
from opssat_tmtc.common import (
EXPERIMENT_APID,
UniqueId,
make_action_cmd_header,
)
_LOGGER = logging.getLogger(__name__)
@ -66,13 +72,31 @@ def create_cmd_definition_tree() -> CmdTreeNode:
)
root_node.add_child(scheduler_node)
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
mgm_node = CmdTreeNode("mgms", "MGM devices node")
mgm_node.add_child(mode_node)
mgm_node.add_child(hk_node)
acs_node.add_child(mgm_node)
root_node.add_child(acs_node)
action_node = CmdTreeNode("action", "Action Node")
cam_node = CmdTreeNode("take_image", "Take Image with IMS Imager")
cam_node.add_child(
CmdTreeNode("default_single", "Default Single Image Camera Parameters")
)
cam_node.add_child(
CmdTreeNode("balanced_single", "Balanced Single Image Camera Parameters")
)
cam_node.add_child(
CmdTreeNode(
"default_single_flatsat",
"Default Single Image Camera Parameters for use on FlatSat",
)
)
cam_node.add_child(
CmdTreeNode(
"balanced_single_flatsat",
"Balanced Single Image Camera Parameters for use on FlatSat",
)
)
cam_node.add_child(
CmdTreeNode("custom_params", "Custom Camera Parameters as specified from file")
)
action_node.add_child(cam_node)
root_node.add_child(action_node)
return root_node
@ -87,14 +111,10 @@ def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "ping":
q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=1)
)
return q.add_pus_tc(PusTelecommand(service=17, subservice=1))
elif cmd_path_list[1] == "trigger_event":
q.add_log_cmd("Triggering test event")
return q.add_pus_tc(
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=128)
)
return q.add_pus_tc(PusTelecommand(service=17, subservice=128))
if cmd_path_list[0] == "scheduler":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
@ -106,26 +126,36 @@ def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
create_time_tagged_cmd(
time_stamp,
PusTelecommand(service=17, subservice=1),
apid=Apid.SCHED,
)
)
if cmd_path_list[0] == "acs":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "mgms":
if cmd_path_list[0] == "action":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "take_image":
assert len(cmd_path_list) >= 3
if cmd_path_list[2] == "hk":
if cmd_path_list[3] == "one_shot_hk":
q.add_log_cmd("Sending HK one shot request")
# TODO: Fix
# q.add_pus_tc(
# create_request_one_hk_command(
# make_addressable_id(Apid.ACS, AcsId.MGM_SET)
# )
# )
if cmd_path_list[2] == "mode":
if cmd_path_list[3] == "set_mode":
handle_set_mode_cmd(
q, "MGM 0", cmd_path_list[4], Apid.ACS, AcsId.MGM_0
q.add_log_cmd(
"Sending PUS take image action request with "
+ cmd_path_list[2]
+ " params."
)
data = bytearray()
if cmd_path_list[2] == "default_single":
data.extend(make_action_cmd_header(UniqueId.CameraHandler, 1))
if cmd_path_list[2] == "balanced_single":
data.extend(make_action_cmd_header(UniqueId.CameraHandler, 2))
if cmd_path_list[2] == "default_single_flatsat":
data.extend(make_action_cmd_header(UniqueId.CameraHandler, 3))
if cmd_path_list[2] == "balanced_single_flatsat":
data.extend(make_action_cmd_header(UniqueId.CameraHandler, 4))
if cmd_path_list[2] == "custom":
data.extend(make_action_cmd_header(UniqueId.CameraHandler, 5))
params = CameraParameters(8, 8, 8, 1, True, 200, 1000)
data.extend(params.serialize_for_uplink())
return q.add_pus_tc(
PusTelecommand(
service=8, subservice=128, apid=EXPERIMENT_APID, app_data=data
)
)

View File

View File

@ -1,7 +1,7 @@
from tmtccmd.config import OpCodeEntry, TmtcDefinitionWrapper, CoreServiceList
from tmtccmd.config.globals import get_default_tmtc_defs
from common import HkOpCodes
from opssat_tmtc.common import HkOpCodes
def tc_definitions() -> TmtcDefinitionWrapper:
@ -35,4 +35,11 @@ def tc_definitions() -> TmtcDefinitionWrapper:
info="PUS Service 11 TC Scheduling",
op_code_entry=srv_11,
)
srv_8 = OpCodeEntry()
srv_8.add("pic", "Action Request Image")
defs.add_service(
name=CoreServiceList.SERVICE_8,
info="PUS Service 8 Action",
op_code_entry=srv_8,
)
return defs

View File

@ -1,5 +1,6 @@
#!/usr/bin/env python3
"""Example client for the sat-rs example application"""
from __future__ import annotations
import logging
import sys
import time
@ -45,9 +46,8 @@ from tmtccmd.tmtc import (
from spacepackets.seqcount import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.util.obj_id import ObjectIdDictT
import pus_tc
from common import Apid, EventU32
from opssat_tmtc.pus_tc import create_cmd_definition_tree, pack_pus_telecommands
from opssat_tmtc.common import EXPERIMENT_APID, EventU32
_LOGGER = logging.getLogger()
@ -64,8 +64,7 @@ class SatRsConfigHook(HookBase):
assert self.cfg_path is not None
packet_id_list = []
for apid in Apid:
packet_id_list.append(PacketId(PacketType.TM, True, apid))
packet_id_list.append(PacketId(PacketType.TM, True, EXPERIMENT_APID))
cfg = create_com_interface_cfg_default(
com_if_key=com_if_key,
json_cfg_path=self.cfg_path,
@ -76,7 +75,7 @@ class SatRsConfigHook(HookBase):
def get_command_definitions(self) -> CmdTreeNode:
"""This function should return the root node of the command definition tree."""
return pus_tc.create_cmd_definition_tree()
return create_cmd_definition_tree()
def get_cmd_history(self) -> Optional[History]:
"""Optionlly return a history class for the past command paths which will be used
@ -103,7 +102,9 @@ class PusHandler(GenericApidHandlerBase):
def handle_tm(self, apid: int, packet: bytes, _user_args: Any):
try:
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
pus_tm = PusTelemetry.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
except ValueError as e:
_LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
@ -111,7 +112,7 @@ class PusHandler(GenericApidHandlerBase):
service = pus_tm.service
if service == 1:
tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(CdsShortTimestamp.empty(), 1, 2)
data=packet, params=UnpackParams(CdsShortTimestamp.TIMESTAMP_SIZE, 1, 2)
)
res = self.verif_wrapper.add_tm(tm_packet)
if res is None:
@ -128,7 +129,9 @@ class PusHandler(GenericApidHandlerBase):
elif service == 3:
_LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
pus_tm = PusTelemetry.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet")
@ -136,16 +139,22 @@ class PusHandler(GenericApidHandlerBase):
_LOGGER.info(json_str)
elif service == 5:
tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty()
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
src_data = tm_packet.source_data
event_u32 = EventU32.unpack(src_data)
_LOGGER.info(f"Received event packet. Event: {event_u32}")
if event_u32.group_id == 0 and event_u32.unique_id == 0:
_LOGGER.info("Received test event")
elif service == 8:
if pus_tm.subservice == 130:
_LOGGER.info("Received Action Data Reply TM[8,130]")
reply = pus_tm.source_data
reply = reply[6:]
_LOGGER.info(f"Data Reply Content: {reply.decode()}")
elif service == 17:
tm_packet = Service17Tm.unpack(
packet, time_reader=CdsShortTimestamp.empty()
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
if tm_packet.subservice == 2:
self.file_logger.info("Received Ping Reply TM[17,2]")
@ -162,7 +171,7 @@ class PusHandler(GenericApidHandlerBase):
f"The service {service} is not implemented in Telemetry Factory"
)
tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty()
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
self.raw_logger.log_tm(pus_tm)
@ -181,7 +190,7 @@ class TcHandler(TcHandlerBase):
tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE,
seq_cnt_provider=seq_count_provider,
pus_verificator=self.verif_wrapper.pus_verificator,
default_pus_apid=None,
default_pus_apid=EXPERIMENT_APID,
)
def send_cb(self, send_params: SendCbParams):
@ -197,17 +206,17 @@ class TcHandler(TcHandlerBase):
_LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, info: ProcedureWrapper):
if info.proc_type == TcProcedureType.DEFAULT:
def_proc = info.to_def_procedure()
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
_LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper
if info.proc_type == TcProcedureType.DEFAULT:
def_proc = info.to_def_procedure()
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
assert def_proc.cmd_path is not None
pus_tc.pack_pus_telecommands(q, def_proc.cmd_path)
pack_pus_telecommands(q, def_proc.cmd_path)
def main():
@ -234,13 +243,13 @@ def main():
raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1)
verificator = PusVerificator()
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler
# Create primary TM handlers and add it to the CCSDS Packet Handler
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=tm_handler)
# TODO: We could add the CFDP handler for the CFDP APID at a later stage.
# TODO: We could add the CFDP handlers for the CFDP APID at a later stage.
# ccsds_handler.add_apid_handler(tm_handler)
# Create TC handler
# Create TC handlers
seq_count_provider = PusFileSeqCountProvider()
tc_handler = TcHandler(seq_count_provider, verification_wrapper)
tmtccmd.setup(setup_args=setup_args)
@ -256,6 +265,7 @@ def main():
while True:
state = tmtc_backend.periodic_op(None)
if state.request == BackendRequest.TERMINATION_NO_ERROR:
tmtc_backend.close_com_if()
sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE:
_LOGGER.info("TMTC Client in IDLE mode")
@ -270,6 +280,7 @@ def main():
elif state.request == BackendRequest.CALL_NEXT:
pass
except KeyboardInterrupt:
tmtc_backend.close_com_if()
sys.exit(0)

26
pytmtc/pyproject.toml Normal file
View File

@ -0,0 +1,26 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "opssat-tmtc"
description = "Python TMTC client for OPS-SAT"
readme = "README.md"
version = "0.1.0"
requires-python = ">=3.8"
authors = [
{name = "Robin Mueller", email = "robin.mueller.m@gmail.com"},
{name = "Linus Köster", email = "st167799@stud.uni-stuttgart.de"}
]
dependencies = [
"tmtccmd==8.0.0rc.2",
"serde==0.9.0"
]
[tool.setuptools.packages]
find = {}
[tool.ruff.lint]
ignore = ["E501"]
[tool.ruff.lint.extend-per-file-ignores]
"__init__.py" = ["F401"]

185
pytmtc/pyserver.py Executable file
View File

@ -0,0 +1,185 @@
#!/usr/bin/env python3
import socket
import abc
import time
import select
import logging
from typing import Any
from threading import Event, Thread
from collections import deque
from multiprocessing import Queue
from spacepackets.ccsds.spacepacket import parse_space_packets, PacketId
from spacepackets.ecss.tc import PacketType
EXP_ID = 278
EXP_APID = 1024 + EXP_ID
EXP_PACKET_ID_TM = PacketId(PacketType.TM, True, EXP_APID)
EXP_PACKET_ID_TC = PacketId(PacketType.TC, True, EXP_APID)
OPSSAT_SERVER_PORT = 4096
TMTC_SERVER_PORT = 4097
LOG_LEVEL = logging.INFO
TC_QUEUE = Queue()
TM_QUEUE = Queue()
KILL_SIGNAL = Event()
_LOGGER = logging.getLogger(__name__)
def main():
logging.basicConfig(
format="[%(asctime)s] [%(levelname)-5s] %(message)s",
level=LOG_LEVEL,
datefmt="%Y-%m-%d %H:%M:%S",
)
print("Starting OPS-SAT ground TMTC server")
KILL_SIGNAL.clear()
ops_sat_thread = OpsSatServer()
ops_sat_thread.start()
tmtc_thread = TmtcServer()
tmtc_thread.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
_LOGGER.info("Shutting down server gracefully")
KILL_SIGNAL.set()
ops_sat_thread.join()
tmtc_thread.join()
class BaseServer(Thread):
def __init__(self, log_prefix: str, port: int):
self.log_prefix = log_prefix
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_addr = ("0.0.0.0", port)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.setblocking(False)
self.server_socket.settimeout(0.4)
self.server_socket.bind(server_addr)
super().__init__()
def run(self) -> None:
self.run_sync_version()
def run_sync_version(self) -> None:
self.server_socket.listen()
while True and not KILL_SIGNAL.is_set():
try:
(conn_socket, conn_addr) = self.server_socket.accept()
self.handle_connection(conn_socket, conn_addr)
except TimeoutError:
continue
def handle_connection(self, conn_socket: socket.socket, conn_addr: Any):
conn_socket.setblocking(False)
print(f"{self.log_prefix} TCP client {conn_addr} connected")
analysis_deque = deque()
while True and not KILL_SIGNAL.is_set():
conn_socket.settimeout(0.2)
try:
bytes_recvd = conn_socket.recv(4096)
if len(bytes_recvd) > 0:
_LOGGER.debug(f"{self.log_prefix} RX RAW: {bytes_recvd}")
analysis_deque.append(bytes_recvd)
elif len(bytes_recvd) == 0:
self.handle_read_bytestream(analysis_deque)
break
else:
print("error receiving data from TCP client")
except BlockingIOError:
self.handle_timeout(conn_socket, analysis_deque)
time.sleep(0.2)
except TimeoutError:
self.handle_timeout(conn_socket, analysis_deque)
def handle_timeout(self, conn_socket: socket.socket, analysis_deque: deque):
if len(analysis_deque) > 0:
self.handle_read_bytestream(analysis_deque)
self.send_data_to_client(conn_socket)
def run_select_version(self) -> None:
while True:
self.server_socket.listen()
(conn_socket, conn_addr) = self.server_socket.accept()
print(f"{self.log_prefix} TCP client {conn_addr} connected")
analysis_deque = deque()
while True:
outputs = []
if self.send_data_available():
outputs.append(conn_socket)
(readable, writable, _) = select.select([conn_socket], outputs, [], 0.2)
if readable and readable[0]:
bytes_recvd = conn_socket.recv(4096)
if len(bytes_recvd) > 0:
_LOGGER.debug("received data from TCP client: {}", bytes_recvd)
analysis_deque.append(bytes_recvd)
elif len(bytes_recvd) == 0:
self.handle_read_bytestream(analysis_deque)
break
else:
print("error receiving data from TCP client")
if writable and writable[0]:
self.send_data_to_client(conn_socket)
if not writable and not readable:
if len(analysis_deque) > 0:
self.handle_read_bytestream(analysis_deque)
self.send_data_to_client(conn_socket)
@abc.abstractmethod
def handle_read_bytestream(self, analysis_deque: deque):
pass
@abc.abstractmethod
def send_data_to_client(self, conn_socket: socket.socket):
pass
@abc.abstractmethod
def send_data_available(self) -> bool:
pass
class OpsSatServer(BaseServer):
def __init__(self):
super().__init__("[OPS-SAT]", OPSSAT_SERVER_PORT)
def handle_read_bytestream(self, analysis_deque: deque):
parsed_packets = parse_space_packets(analysis_deque, [EXP_PACKET_ID_TM])
for packet in parsed_packets:
_LOGGER.info(f"{self.log_prefix} RX TM: [{packet.hex(sep=',')}]")
TM_QUEUE.put(packet)
def send_data_to_client(self, conn_socket: socket.socket):
while not TC_QUEUE.empty():
next_packet = TC_QUEUE.get()
_LOGGER.info(f"{self.log_prefix} TX TC [{next_packet.hex(sep=',')}]")
conn_socket.sendall(next_packet)
def send_data_available(self) -> bool:
return not TC_QUEUE.empty()
class TmtcServer(BaseServer):
def __init__(self):
super().__init__("[TMTC]", TMTC_SERVER_PORT)
def handle_read_bytestream(self, analysis_deque: deque):
parsed_packets = parse_space_packets(analysis_deque, [EXP_PACKET_ID_TC])
for packet in parsed_packets:
_LOGGER.info(f"{self.log_prefix} RX TM: [{packet.hex(sep=',')}]")
TC_QUEUE.put(packet)
def send_data_to_client(self, conn_socket: socket.socket):
while not TM_QUEUE.empty():
next_packet = TM_QUEUE.get()
_LOGGER.info(f"{self.log_prefix} TX TM [{next_packet.hex(sep=',')}]")
conn_socket.sendall(next_packet)
def send_data_available(self) -> bool:
return not TM_QUEUE.empty()
if __name__ == "__main__":
main()

View File

@ -1,2 +1,2 @@
tmtccmd == 8.0.0rc1
.
# -e git+https://github.com/robamu-org/tmtccmd@97e5e51101a08b21472b3ddecc2063359f7e307a#egg=tmtccmd

View File

@ -0,0 +1,20 @@
import struct
from opssat_tmtc.camera_params import CameraParameters
from opssat_tmtc.common import make_unique_id, EXPERIMENT_APID
def test_serde_serialization():
# Example serializatn
data = bytearray(make_unique_id(EXPERIMENT_APID))
params = CameraParameters(8, 8, 8, 1, True, 200, 1000)
serialized = params.to_json().encode("utf-8")
byte_string = bytearray(struct.pack("!{}s".format(len(serialized)), serialized))
print(byte_string)
print(params.serialize_for_uplink())
data.extend(params.serialize_for_uplink())
print(data)
# Example deserialization
data = '{"R": 100, "G": 150, "B": 200, "N": 3, "P": true, "E": 10, "W": 20}'
deserialized_params = CameraParameters.from_json(data)
print(deserialized_params)

View File

@ -4,7 +4,6 @@ on a remote machine, e.g. a Raspberry Pi"""
import argparse
import os
import sys
import platform
import time
from typing import Final
@ -16,10 +15,10 @@ BUILDER = "cross"
# remote configurations by tweaking / hardcoding these parameter, which generally are constant
# for a given board
DEFAULT_USER_NAME: Final = "root"
DEFAULT_ADDRESS: Final = "192.254.108.30"
DEFAULT_ADDRESS: Final = "small_flatsat"
DEFAULT_TOOLCHAIN: Final = "armv7-unknown-linux-gnueabihf"
DEFAULT_APP_NAME: Final = "ops-sat-rs"
DEFAULT_TARGET_FOLDER: Final = "/tmp"
DEFAULT_TARGET_FOLDER: Final = "/home/exp278/"
DEFAULT_DEBUG_PORT: Final = "1234"
DEFAULT_GDB_APP = "gdb-multiarch"
@ -140,10 +139,11 @@ def bld_deploy_run(args):
sshpass_args = f"-f {args.sshfile}"
elif args.sshenv:
sshpass_args = "-e"
ssh_target_ident = f"{args.user}@{args.address}"
# ssh_target_ident = f"{args.user}@{args.address}"
ssh_target_ident = "small_flatsat"
sshpass_cmd = ""
if platform.system() != "Windows":
sshpass_cmd = f"sshpass {sshpass_args}"
# if platform.system() != "Windows":
# sshpass_cmd = f"sshpass {sshpass_args}"
dest_path = f"{args.dest}/{args.app}"
if not args.source:
source_path = f"{os.getcwd()}/target/{args.tc}/{build_folder}/{args.app}"

View File

@ -1,53 +0,0 @@
use ops_sat_rs::config::components::Apid;
use ops_sat_rs::config::APID_VALIDATOR;
use satrs::pus::ReceivesEcssPusTc;
use satrs::spacepackets::{CcsdsPacket, SpHeader};
use satrs::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc};
use satrs::ValidatorU16Id;
#[derive(Clone)]
pub struct CcsdsReceiver<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone,
E,
> {
pub tc_source: TcSource,
}
impl<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static,
E: 'static,
> ValidatorU16Id for CcsdsReceiver<TcSource, E>
{
fn validate(&self, apid: u16) -> bool {
APID_VALIDATOR.contains(&apid)
}
}
impl<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static,
E: 'static,
> CcsdsPacketHandler for CcsdsReceiver<TcSource, E>
{
type Error = E;
fn handle_packet_with_valid_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
if sp_header.apid() == Apid::Cfdp as u16 {
} else {
return self.tc_source.pass_ccsds(sp_header, tc_raw);
}
Ok(())
}
fn handle_packet_with_unknown_apid(
&mut self,
sp_header: &SpHeader,
_tc_raw: &[u8],
) -> Result<(), Self::Error> {
log::warn!("unknown APID 0x{:x?} detected", sp_header.apid());
Ok(())
}
}

View File

@ -1,30 +1,29 @@
use lazy_static::lazy_static;
use num_enum::{IntoPrimitive, TryFromPrimitive};
use satrs::spacepackets::{PacketId, PacketType};
use satrs::events::{EventU32TypedSev, SeverityInfo};
use satrs::spacepackets::PacketId;
use satrs_mib::res_code::ResultU16Info;
use satrs_mib::resultcode;
use std::{collections::HashSet, net::Ipv4Addr};
use strum::IntoEnumIterator;
use std::net::Ipv4Addr;
use std::path::{Path, PathBuf};
pub const STOP_FILE_NAME: &str = "stop-experiment";
pub const CONFIG_FILE_NAME: &str = "exp278.toml";
pub const HOME_FOLDER_EXPERIMENT: &str = "/home/exp278";
pub const LOG_FOLDER: &str = "logs";
pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
pub const SERVER_PORT: u16 = 7301;
pub const TCP_SPP_SERVER_PORT: u16 = 4096;
pub const EXPERIMENT_ID: u32 = 278;
pub const EXPERIMENT_APID: u16 = 1024 + EXPERIMENT_ID as u16;
pub const EXPERIMENT_PACKET_ID: PacketId = PacketId::new_for_tc(true, EXPERIMENT_APID);
pub const VALID_PACKET_ID_LIST: &[PacketId] = &[PacketId::new_for_tc(true, EXPERIMENT_APID)];
lazy_static! {
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(PacketId::new(PacketType::Tc, true, id as u16));
}
set
};
pub static ref APID_VALIDATOR: HashSet<u16> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(id as u16);
}
set
};
}
// TODO: Would be nice if this can be commanded as well..
/// Can be enabled to print all SPP packets received from the SPP server on port 4096.
pub const SPP_CLIENT_WIRETAPPING_RX: bool = false;
pub const SPP_CLIENT_WIRETAPPING_TX: bool = false;
#[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
@ -38,6 +37,104 @@ pub enum GroupId {
Tmtc = 0,
Hk = 1,
Mode = 2,
Action = 3,
}
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::new(GroupId::Tmtc as u16, 0);
pub const VERSION: Option<&str> = option_env!("CARGO_PKG_VERSION");
lazy_static! {
pub static ref HOME_PATH: PathBuf = {
let mut home_path = PathBuf::new();
let home_path_default = homedir::get_my_home()
.expect("Getting home dir from OS failed.")
.expect("No home dir found.");
home_path.push(if Path::new(HOME_FOLDER_EXPERIMENT).exists() {
HOME_FOLDER_EXPERIMENT
} else {
home_path_default
.to_str()
.expect("Error converting to string.")
});
home_path
};
}
pub mod cfg_file {
use std::{
fs::File,
io::Read,
path::{Path, PathBuf},
};
use super::{CONFIG_FILE_NAME, HOME_PATH, TCP_SPP_SERVER_PORT};
pub const SPP_CLIENT_PORT_CFG_KEY: &str = "tcp_spp_server_port";
#[derive(Debug)]
pub struct AppCfg {
pub tcp_spp_server_port: u16,
}
impl Default for AppCfg {
fn default() -> Self {
Self {
tcp_spp_server_port: TCP_SPP_SERVER_PORT,
}
}
}
pub fn create_app_config() -> AppCfg {
let mut cfg_path = HOME_PATH.clone();
cfg_path.push(CONFIG_FILE_NAME);
let cfg_path_home = cfg_path.as_path();
let relevant_path = if Path::new(CONFIG_FILE_NAME).exists() {
Some(PathBuf::from(Path::new(CONFIG_FILE_NAME)))
} else if cfg_path_home.exists() {
Some(PathBuf::from(cfg_path_home))
} else {
None
};
let mut app_cfg = AppCfg::default();
if relevant_path.is_none() {
log::warn!("No config file found, using default values");
return app_cfg;
}
let relevant_path = relevant_path.unwrap();
match File::open(relevant_path.as_path()) {
Ok(mut file) => {
let mut toml_str = String::new();
match file.read_to_string(&mut toml_str) {
Ok(_size) => match toml_str.parse::<toml::Table>() {
Ok(table) => {
handle_config_file_table(table, &mut app_cfg);
}
Err(e) => log::error!("error parsing TOML config file: {e}"),
},
Err(e) => log::error!("error reading TOML config file: {e}"),
}
}
Err(e) => log::error!("error opening TOML config file: {e}"),
}
app_cfg
}
#[allow(clippy::collapsible_match)]
pub fn handle_config_file_table(table: toml::Table, app_cfg: &mut AppCfg) {
if let Some(value) = table.get(SPP_CLIENT_PORT_CFG_KEY) {
if let toml::Value::Integer(port) = value {
if *port < 0 {
log::warn!("invalid port value, is negative");
} else {
app_cfg.tcp_spp_server_port = *port as u16
}
}
}
}
}
pub mod tmtc_err {
@ -76,52 +173,121 @@ pub mod tmtc_err {
];
}
pub mod action_err {
use super::*;
use satrs::res_code::ResultU16;
#[resultcode]
pub const INVALID_ACTION_ID: ResultU16 = ResultU16::new(GroupId::Action as u8, 0);
pub const ACTION_RESULTS: &[ResultU16Info] = &[INVALID_ACTION_ID_EXT];
}
pub mod hk_err {
use super::*;
use satrs::res_code::ResultU16;
#[resultcode]
pub const TARGET_ID_MISSING: ResultU16 = ResultU16::new(GroupId::Hk as u8, 0);
#[resultcode]
pub const UNIQUE_ID_MISSING: ResultU16 = ResultU16::new(GroupId::Hk as u8, 1);
#[resultcode]
pub const UNKNOWN_TARGET_ID: ResultU16 = ResultU16::new(GroupId::Hk as u8, 2);
#[resultcode]
pub const COLLECTION_INTERVAL_MISSING: ResultU16 = ResultU16::new(GroupId::Hk as u8, 3);
pub const HK_ERR_RESULTS: &[ResultU16Info] = &[
TARGET_ID_MISSING_EXT,
UNKNOWN_TARGET_ID_EXT,
UNKNOWN_TARGET_ID_EXT,
COLLECTION_INTERVAL_MISSING_EXT,
];
}
pub mod mode_err {
use super::*;
use satrs::res_code::ResultU16;
#[resultcode]
pub const WRONG_MODE: ResultU16 = ResultU16::new(GroupId::Mode as u8, 0);
}
pub mod pool {
use satrs::pool::{StaticMemoryPool, StaticPoolConfig};
pub fn create_sched_tc_pool() -> StaticMemoryPool {
StaticMemoryPool::new(StaticPoolConfig::new(
vec![
(100, 32),
(50, 64),
(50, 128),
(50, 256),
(50, 1024),
(100, 2048),
],
true,
))
}
}
pub mod components {
use satrs::request::UniqueApidTargetId;
use strum::EnumIter;
#[derive(Copy, Clone, PartialEq, Eq, EnumIter)]
pub enum Apid {
Sched = 1,
GenericPus = 2,
Cfdp = 4,
}
use super::EXPERIMENT_APID;
// Component IDs for components with the PUS APID.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum PusId {
PusEventManagement = 0,
PusRouting = 1,
PusTest = 2,
PusAction = 3,
PusMode = 4,
PusHk = 5,
}
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum AcsId {
Mgm0 = 0,
pub enum UniqueId {
Controller = 0,
PusEventManagement = 1,
PusRouting = 2,
PusTest = 3,
PusAction = 4,
PusMode = 5,
PusHk = 6,
UdpServer = 7,
TcpServer = 8,
TcpSppClient = 9,
PusScheduler = 10,
CameraHandler = 11,
}
pub const CONTROLLER_ID: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::Controller as u32);
pub const PUS_ACTION_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32);
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusAction as u32);
pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, 0);
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusEventManagement as u32);
pub const PUS_ROUTING_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32);
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusRouting as u32);
pub const PUS_TEST_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusTest as u32);
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusTest as u32);
pub const PUS_MODE_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusMode as u32);
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusMode as u32);
pub const PUS_SCHEDULER_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusScheduler as u32);
pub const PUS_HK_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusHk as u32);
pub const PUS_SCHED_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Sched as u16, 0);
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::PusHk as u32);
pub const UDP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::UdpServer as u32);
pub const TCP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::TcpServer as u32);
pub const TCP_SPP_CLIENT: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::TcpSppClient as u32);
pub const CAMERA_HANDLER: UniqueApidTargetId =
UniqueApidTargetId::new(EXPERIMENT_APID, UniqueId::CameraHandler as u32);
}
pub mod tasks {
use std::time::Duration;
pub const FREQ_MS_UDP_TMTC: u64 = 200;
pub const FREQ_MS_EVENT_HANDLING: u64 = 400;
pub const FREQ_MS_AOCS: u64 = 500;
pub const FREQ_MS_PUS_STACK: u64 = 200;
pub const FREQ_MS_CTRL: u64 = 400;
pub const FREQ_MS_CAMERA_HANDLING: u64 = 400;
pub const STOP_CHECK_FREQUENCY_MS: u64 = 400;
pub const STOP_CHECK_FREQUENCY: Duration = Duration::from_millis(STOP_CHECK_FREQUENCY_MS);
}

128
src/controller.rs Normal file
View File

@ -0,0 +1,128 @@
use num_enum::TryFromPrimitive;
use satrs::{
action::ActionRequest,
pus::action::{ActionReplyPus, ActionReplyVariant},
request::{GenericMessage, MessageMetadata},
};
use std::{
env::temp_dir,
path::{Path, PathBuf},
sync::{atomic::AtomicBool, mpsc, Arc},
};
use ops_sat_rs::config::{action_err::INVALID_ACTION_ID, HOME_PATH, STOP_FILE_NAME};
use crate::requests::CompositeRequest;
#[derive(Debug, Clone, Copy, TryFromPrimitive)]
#[repr(u32)]
pub enum ActionId {
StopExperiment = 1,
}
pub struct ExperimentController {
pub composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
pub action_reply_tx: mpsc::Sender<GenericMessage<ActionReplyPus>>,
pub stop_signal: Arc<AtomicBool>,
home_path_stop_file: PathBuf,
tmp_path_stop_file: PathBuf,
}
impl ExperimentController {
pub fn new(
composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
action_reply_tx: mpsc::Sender<GenericMessage<ActionReplyPus>>,
stop_signal: Arc<AtomicBool>,
) -> Self {
let mut home_path_stop_file = PathBuf::new();
home_path_stop_file.push(HOME_PATH.as_path());
home_path_stop_file.push(STOP_FILE_NAME);
let mut tmp_path_stop_file = temp_dir();
tmp_path_stop_file.push(STOP_FILE_NAME);
Self {
composite_request_rx,
action_reply_tx,
stop_signal,
home_path_stop_file,
tmp_path_stop_file,
}
}
}
impl ExperimentController {
pub fn perform_operation(&mut self) {
match self.composite_request_rx.try_recv() {
Ok(msg) => match msg.message {
CompositeRequest::Hk(_) => {
log::warn!("hk request handling unimplemented")
}
CompositeRequest::Action(action_req) => {
self.handle_action_request(msg.requestor_info, action_req);
}
},
Err(e) => {
if e != mpsc::TryRecvError::Empty {
log::error!("composite request rx error: {:?}", e);
}
}
}
self.check_stop_file();
}
pub fn handle_action_request(&mut self, requestor: MessageMetadata, action_req: ActionRequest) {
let action_id = ActionId::try_from(action_req.action_id);
if action_id.is_err() {
let result = self.action_reply_tx.send(GenericMessage::new_action_reply(
requestor,
action_req.action_id,
ActionReplyVariant::CompletionFailed {
error_code: INVALID_ACTION_ID,
params: None,
},
));
if result.is_err() {
log::error!("sending action reply failed");
}
return;
}
let action_id = action_id.unwrap();
match action_id {
ActionId::StopExperiment => {
self.stop_signal
.store(true, std::sync::atomic::Ordering::Relaxed);
let result = self.action_reply_tx.send(GenericMessage::new_action_reply(
requestor,
action_req.action_id,
ActionReplyVariant::Completed,
));
if result.is_err() {
log::error!("sending action reply failed");
}
}
}
}
pub fn check_stop_file(&self) {
let check_at_path = |path: &Path| {
if path.exists() {
log::warn!(
"Detected stop file name at {:?}. Initiating experiment shutdown",
path
);
// By default, clear the stop file.
let result = std::fs::remove_file(path);
if result.is_err() {
log::error!(
"failed to remove stop file at {:?}: {}",
path,
result.unwrap_err()
);
}
self.stop_signal
.store(true, std::sync::atomic::Ordering::Relaxed);
}
};
check_at_path(self.tmp_path_stop_file.as_path());
check_at_path(self.home_path_stop_file.as_path());
}
}

287
src/events.rs Normal file
View File

@ -0,0 +1,287 @@
use std::sync::mpsc::{self};
use crate::pus::create_verification_reporter;
use ops_sat_rs::config::components::PUS_EVENT_MANAGEMENT;
use satrs::event_man::{EventMessageU32, EventRoutingError};
use satrs::params::WritableToBeBytes;
use satrs::pus::event::EventTmHookProvider;
use satrs::pus::verification::VerificationReporter;
use satrs::request::UniqueApidTargetId;
use satrs::tmtc::PacketAsVec;
use satrs::{
event_man::{EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded},
pus::{
event_man::{
DefaultPusEventU32TmCreator, EventReporter, EventRequest, EventRequestWithToken,
},
verification::{TcStateStarted, VerificationReportingProvider, VerificationToken},
},
spacepackets::time::cds::CdsTime,
};
use ops_sat_rs::update_time;
// This helper sets the APID of the event sender for the PUS telemetry.
#[derive(Default)]
pub struct EventApidSetter {
pub next_apid: u16,
}
impl EventTmHookProvider for EventApidSetter {
fn modify_tm(&self, tm: &mut satrs::spacepackets::ecss::tm::PusTmCreator) {
tm.set_apid(self.next_apid);
}
}
/// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event
/// packets. It also handles the verification completion of PUS event service requests.
pub struct PusEventHandler {
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
pus_event_tm_creator: DefaultPusEventU32TmCreator<EventApidSetter>,
pus_event_man_rx: mpsc::Receiver<EventMessageU32>,
tm_sender: mpsc::Sender<PacketAsVec>,
time_provider: CdsTime,
timestamp: [u8; 7],
verif_handler: VerificationReporter,
}
impl PusEventHandler {
pub fn new(
tm_sender: mpsc::Sender<PacketAsVec>,
verif_handler: VerificationReporter,
event_manager: &mut EventManagerWithBoundedMpsc,
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
) -> Self {
let event_queue_cap = 30;
let (pus_event_man_tx, pus_event_man_rx) = mpsc::sync_channel(event_queue_cap);
// All events sent to the manager are routed to the PUS event manager, which generates PUS event
// telemetry for each event.
let event_reporter = EventReporter::new_with_hook(
PUS_EVENT_MANAGEMENT.raw(),
0,
0,
128,
EventApidSetter::default(),
)
.unwrap();
let pus_event_dispatcher =
DefaultPusEventU32TmCreator::new_with_default_backend(event_reporter);
let pus_event_man_send_provider = EventU32SenderMpscBounded::new(
PUS_EVENT_MANAGEMENT.raw(),
pus_event_man_tx,
event_queue_cap,
);
event_manager.subscribe_all(pus_event_man_send_provider.target_id());
event_manager.add_sender(pus_event_man_send_provider);
Self {
event_request_rx,
pus_event_tm_creator: pus_event_dispatcher,
pus_event_man_rx,
time_provider: CdsTime::new_with_u16_days(0, 0),
timestamp: [0; 7],
verif_handler,
tm_sender,
}
}
pub fn handle_event_requests(&mut self) {
let report_completion = |event_req: EventRequestWithToken, timestamp: &[u8]| {
let started_token: VerificationToken<TcStateStarted> = event_req
.token
.try_into()
.expect("expected start verification token");
self.verif_handler
.completion_success(&self.tm_sender, started_token, timestamp)
.expect("Sending completion success failed");
};
loop {
// handle event requests
match self.event_request_rx.try_recv() {
Ok(event_req) => match event_req.request {
EventRequest::Enable(event) => {
self.pus_event_tm_creator
.enable_tm_for_event(&event)
.expect("Enabling TM failed");
update_time(&mut self.time_provider, &mut self.timestamp);
report_completion(event_req, &self.timestamp);
}
EventRequest::Disable(event) => {
self.pus_event_tm_creator
.disable_tm_for_event(&event)
.expect("Disabling TM failed");
update_time(&mut self.time_provider, &mut self.timestamp);
report_completion(event_req, &self.timestamp);
}
},
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("all event request senders have disconnected");
break;
}
},
}
}
}
pub fn generate_pus_event_tm(&mut self) {
loop {
// Perform the generation of PUS event packets
match self.pus_event_man_rx.try_recv() {
Ok(event_msg) => {
update_time(&mut self.time_provider, &mut self.timestamp);
let param_vec = event_msg.params().map_or(Vec::new(), |param| {
param.to_vec().expect("failed to convert params to vec")
});
// We use the TM modification hook to set the sender APID for each event.
self.pus_event_tm_creator.reporter.tm_hook.next_apid =
UniqueApidTargetId::from(event_msg.sender_id()).apid;
self.pus_event_tm_creator
.generate_pus_event_tm_generic(
&self.tm_sender,
&self.timestamp,
event_msg.event(),
Some(&param_vec),
)
.expect("Sending TM as event failed");
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("All event senders have disconnected");
break;
}
},
}
}
}
}
pub struct EventHandler {
pub pus_event_handler: PusEventHandler,
event_manager: EventManagerWithBoundedMpsc,
}
impl EventHandler {
pub fn new(
tm_sender: mpsc::Sender<PacketAsVec>,
event_rx: mpsc::Receiver<EventMessageU32>,
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
) -> Self {
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
let pus_event_handler = PusEventHandler::new(
tm_sender,
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
&mut event_manager,
event_request_rx,
);
Self {
pus_event_handler,
event_manager,
}
}
#[allow(dead_code)]
pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
&mut self.event_manager
}
pub fn periodic_operation(&mut self) {
self.pus_event_handler.handle_event_requests();
self.try_event_routing();
self.pus_event_handler.generate_pus_event_tm();
}
pub fn try_event_routing(&mut self) {
let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| {
self.routing_error_handler(event_msg, error)
};
// Perform the event routing.
self.event_manager.try_event_handling(error_handler);
}
pub fn routing_error_handler(&self, event_msg: &EventMessageU32, error: EventRoutingError) {
log::warn!("event routing error for event {event_msg:?}: {error:?}");
}
}
#[cfg(test)]
mod tests {
use satrs::{
events::EventU32,
pus::verification::VerificationReporterCfg,
spacepackets::{
ecss::{tm::PusTmReader, PusPacket},
CcsdsPacket,
},
tmtc::PacketAsVec,
};
use super::*;
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(1, 2);
const TEST_EVENT: EventU32 = EventU32::new(satrs::events::Severity::Info, 1, 1);
pub struct EventManagementTestbench {
pub event_tx: mpsc::SyncSender<EventMessageU32>,
pub event_manager: EventManagerWithBoundedMpsc,
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub pus_event_handler: PusEventHandler,
}
impl EventManagementTestbench {
pub fn new() -> Self {
let (event_tx, event_rx) = mpsc::sync_channel(10);
let (_event_req_tx, event_req_rx) = mpsc::sync_channel(10);
let (tm_sender, tm_receiver) = mpsc::channel();
let verif_reporter_cfg = VerificationReporterCfg::new(0x05, 2, 2, 128).unwrap();
let verif_reporter =
VerificationReporter::new(PUS_EVENT_MANAGEMENT.id(), &verif_reporter_cfg);
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
let pus_event_handler =
PusEventHandler::new(tm_sender, verif_reporter, &mut event_manager, event_req_rx);
Self {
event_tx,
tm_receiver,
event_manager,
pus_event_handler,
}
}
}
#[test]
fn test_basic_event_generation() {
let mut testbench = EventManagementTestbench::new();
testbench
.event_tx
.send(EventMessageU32::new(
TEST_CREATOR_ID.id(),
EventU32::new(satrs::events::Severity::Info, 1, 1),
))
.expect("failed to send event");
testbench.pus_event_handler.handle_event_requests();
testbench.event_manager.try_event_handling(|_, _| {});
testbench.pus_event_handler.generate_pus_event_tm();
let tm_packet = testbench
.tm_receiver
.try_recv()
.expect("failed to receive TM packet");
assert_eq!(tm_packet.sender_id, PUS_EVENT_MANAGEMENT.id());
let tm_reader = PusTmReader::new(&tm_packet.packet, 7)
.expect("failed to create TM reader")
.0;
assert_eq!(tm_reader.apid(), TEST_CREATOR_ID.apid);
assert_eq!(tm_reader.user_data().len(), 4);
let event_read_back = EventU32::from_be_bytes(tm_reader.user_data().try_into().unwrap());
assert_eq!(event_read_back, TEST_EVENT);
}
#[test]
fn test_basic_event_disabled() {
// TODO: Add test.
}
}

471
src/handlers/camera.rs Normal file
View File

@ -0,0 +1,471 @@
use crate::pus::action::send_data_reply;
/// Device handler implementation for the IMS-100 Imager used on the OPS-SAT mission.
///
/// from the [OPSSAT Experimenter Wiki](https://opssat1.esoc.esa.int/projects/experimenter-information/wiki/Camera_Introduction):
/// OPS-SAT has a BST IMS-100 Imager onboard for image acquisition. These RGGB images are 2048x1944px in size.
///
/// There are two ways of taking pictures, with the NMF or by using the camera API directly.
///
/// As the NMF method is already explained in the NMF documentation we will focus on triggering the camera API.
///
/// The camera is located on the -Z face of OPS-SAT
///
/// Mapping between camera and satellite frames:
/// cam body
/// +x -z
/// +y -x
/// +z +y
///
/// If you look onto Flatsat as in your picture coordinate system for camera it is
///
/// Z Z pointing inside Flatsat
/// x---> X
/// |
/// |
/// v Y
///
/// see also https://opssat1.esoc.esa.int/dmsf/files/6/view
use crate::requests::CompositeRequest;
use derive_new::new;
use log::{debug, info};
use ops_sat_rs::TimeStampHelper;
use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::hk::HkRequest;
use satrs::pus::action::{ActionReplyPus, ActionReplyVariant};
use satrs::pus::EcssTmtcError;
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs::tmtc::PacketAsVec;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::fmt::Formatter;
use std::process::{Command, Output};
use std::sync::mpsc;
// const IMS_TESTAPP: &str = "scripts/ims100_testapp";
const IMS_TESTAPP: &str = "ims100_testapp";
const DEFAULT_SINGLE_CAM_PARAMS: CameraPictureParameters = CameraPictureParameters {
R: 8,
G: 8,
B: 8,
N: 1,
P: true,
E: 2,
W: 1000,
};
const BALANCED_SINGLE_CAM_PARAMS: CameraPictureParameters = CameraPictureParameters {
R: 13,
G: 7,
B: 8,
N: 1,
P: true,
E: 2,
W: 1000,
};
const DEFAULT_SINGLE_FLATSAT_CAM_PARAMS: CameraPictureParameters = CameraPictureParameters {
R: 8,
G: 8,
B: 8,
N: 1,
P: true,
E: 200,
W: 1000,
};
const BALANCED_SINGLE_FLATSAT_CAM_PARAMS: CameraPictureParameters = CameraPictureParameters {
R: 13,
G: 7,
B: 8,
N: 1,
P: true,
E: 200,
W: 1000,
};
// TODO copy as action
// TODO ls -l via cfdp
// TODO howto downlink
#[derive(Debug)]
pub enum CameraActionId {
DefaultSingle = 1,
BalancedSingle = 2,
DefaultSingleFlatSat = 3,
BalancedSingleFlatSat = 4,
CustomParameters = 5,
}
impl TryFrom<u32> for CameraActionId {
type Error = ();
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
value if value == CameraActionId::DefaultSingle as u32 => {
Ok(CameraActionId::DefaultSingle)
}
value if value == CameraActionId::BalancedSingle as u32 => {
Ok(CameraActionId::BalancedSingle)
}
value if value == CameraActionId::DefaultSingleFlatSat as u32 => {
Ok(CameraActionId::DefaultSingleFlatSat)
}
value if value == CameraActionId::BalancedSingleFlatSat as u32 => {
Ok(CameraActionId::BalancedSingleFlatSat)
}
value if value == CameraActionId::CustomParameters as u32 => {
Ok(CameraActionId::CustomParameters)
}
_ => Err(()),
}
}
}
// TODO what happens if limits are exceded
#[allow(non_snake_case)]
#[derive(Debug, Serialize, Deserialize, new)]
pub struct CameraPictureParameters {
pub R: u8,
pub G: u8,
pub B: u8,
pub N: u8, // number of images, max: 26
pub P: bool, // .png flag, true converts raw extracted image from camera to a png
pub E: u32, // exposure time in ms, max: 1580, default: 2, FlatSat: 200
pub W: u32, // wait time between pictures in ms, max: 40000
}
#[derive(Debug)]
#[allow(dead_code)]
pub enum CameraError {
TakeImageError,
NoDataSent,
VariantNotImplemented,
DeserializeError,
ListFileError,
IoError(std::io::Error),
EcssTmtcError(EcssTmtcError),
}
impl From<std::io::Error> for CameraError {
fn from(value: std::io::Error) -> Self {
Self::IoError(value)
}
}
impl From<EcssTmtcError> for CameraError {
fn from(value: EcssTmtcError) -> Self {
Self::EcssTmtcError(value)
}
}
impl fmt::Display for CameraError {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
CameraError::TakeImageError => {
write!(f, "Error taking image.")
}
CameraError::NoDataSent => {
write!(f, "No data sent.")
}
CameraError::VariantNotImplemented => {
write!(f, "Request variant not implemented.")
}
CameraError::DeserializeError => {
write!(f, "Unable to deserialize parameters.")
}
CameraError::ListFileError => {
write!(f, "Error listing image files.")
}
CameraError::IoError(io_error) => {
write!(f, "{}", io_error)
}
CameraError::EcssTmtcError(ecss_tmtc_error) => {
write!(f, "{}", ecss_tmtc_error)
}
}
}
}
#[allow(dead_code)]
#[derive(Debug)]
pub struct IMS100BatchHandler {
id: UniqueApidTargetId,
// mode_interface: MpscModeLeafInterface,
composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
// hk_reply_sender: mpsc::Sender<GenericMessage<HkReply>>,
tm_tx: mpsc::Sender<PacketAsVec>,
action_reply_tx: mpsc::Sender<GenericMessage<ActionReplyPus>>,
stamp_helper: TimeStampHelper,
}
#[allow(non_snake_case)]
#[allow(dead_code)]
impl IMS100BatchHandler {
pub fn new(
id: UniqueApidTargetId,
composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
tm_tx: mpsc::Sender<PacketAsVec>,
action_reply_tx: mpsc::Sender<GenericMessage<ActionReplyPus>>,
stamp_helper: TimeStampHelper,
) -> Self {
Self {
id,
composite_request_rx,
tm_tx,
action_reply_tx,
stamp_helper,
}
}
pub fn periodic_operation(&mut self) {
self.stamp_helper.update_from_now();
// Handle requests.
self.handle_composite_requests();
// self.handle_mode_requests();
}
pub fn handle_composite_requests(&mut self) {
loop {
match self.composite_request_rx.try_recv() {
Ok(ref msg) => match &msg.message {
CompositeRequest::Hk(hk_request) => {
self.handle_hk_request(&msg.requestor_info, hk_request);
}
CompositeRequest::Action(action_request) => {
if let Err(e) =
self.handle_action_request(&msg.requestor_info, action_request)
{
log::warn!("camera action request IO error: {e}");
}
}
},
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("composite request receiver disconnected");
break;
}
},
}
}
}
pub fn handle_hk_request(
&mut self,
_requestor_info: &MessageMetadata,
_hk_request: &HkRequest,
) {
// TODO add hk to opssat
}
pub fn handle_action_request(
&mut self,
requestor_info: &MessageMetadata,
action_request: &ActionRequest,
) -> Result<(), CameraError> {
let param =
match CameraActionId::try_from(action_request.action_id).expect("Invalid action id") {
CameraActionId::DefaultSingle => DEFAULT_SINGLE_CAM_PARAMS,
CameraActionId::BalancedSingle => BALANCED_SINGLE_CAM_PARAMS,
CameraActionId::DefaultSingleFlatSat => DEFAULT_SINGLE_FLATSAT_CAM_PARAMS,
CameraActionId::BalancedSingleFlatSat => BALANCED_SINGLE_FLATSAT_CAM_PARAMS,
CameraActionId::CustomParameters => match &action_request.variant {
ActionRequestVariant::NoData => return Err(CameraError::NoDataSent),
ActionRequestVariant::StoreData(_) => {
// let param = serde_json::from_slice()
// TODO implement non dynamic version
return Err(CameraError::VariantNotImplemented);
}
ActionRequestVariant::VecData(data) => {
let param: serde_json::Result<CameraPictureParameters> =
serde_json::from_slice(data.as_slice());
match param {
Ok(param) => param,
Err(_) => {
return Err(CameraError::DeserializeError);
}
}
}
_ => return Err(CameraError::VariantNotImplemented),
},
};
let output = self.take_picture(param)?;
debug!("Sending action reply!");
send_data_reply(self.id, output.stdout, &self.stamp_helper, &self.tm_tx)?;
self.action_reply_tx
.send(GenericMessage::new(
*requestor_info,
ActionReplyPus::new(action_request.action_id, ActionReplyVariant::Completed),
))
.unwrap();
Ok(())
}
pub fn take_picture(&mut self, param: CameraPictureParameters) -> Result<Output, CameraError> {
info!("Taking image!");
let mut cmd = Command::new(IMS_TESTAPP);
cmd.arg("-R")
.arg(&param.R.to_string())
.arg("-G")
.arg(&param.G.to_string())
.arg("-B")
.arg(&param.B.to_string())
.arg("-c")
.arg("/dev/cam_tty")
.arg("-m")
.arg("/dev/cam_sd")
.arg("-v")
.arg("0")
.arg("-n")
.arg(&param.N.to_string());
if param.P {
cmd.arg("-p");
}
cmd.arg("-e")
.arg(&param.E.to_string())
.arg("-w")
.arg(&param.W.to_string());
let output = cmd.output()?;
debug!("Imager Output: {}", String::from_utf8_lossy(&output.stdout));
Ok(output)
}
pub fn list_current_images(&self) -> Result<Vec<String>, CameraError> {
let output = Command::new("ls").arg("-l").arg("*.png").output()?;
if output.status.success() {
let output_str = String::from_utf8(output.stdout).unwrap();
let files: Vec<String> = output_str.lines().map(|s| s.to_string()).collect();
Ok(files)
} else {
Err(CameraError::ListFileError)
}
}
#[allow(clippy::too_many_arguments)]
pub fn take_picture_from_str(
&mut self,
R: &str,
G: &str,
B: &str,
N: &str,
P: &str,
E: &str,
W: &str,
) -> Result<(), CameraError> {
let mut cmd = Command::new("ims100_testapp");
cmd.arg("-R")
.arg(R)
.arg("-G")
.arg(G)
.arg("-B")
.arg(B)
.arg("-c")
.arg("/dev/cam_tty")
.arg("-m")
.arg("/dev/cam_sd")
.arg("-v")
.arg("0")
.arg("-n")
.arg(N)
.arg(P)
.arg("-e")
.arg(E)
.arg("-w")
.arg(W);
let output = cmd.output()?;
debug!("{}", String::from_utf8_lossy(&output.stdout));
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::handlers::camera::{
CameraActionId, CameraPictureParameters, IMS100BatchHandler,
DEFAULT_SINGLE_FLATSAT_CAM_PARAMS,
};
use crate::requests::CompositeRequest;
use ops_sat_rs::config::components::CAMERA_HANDLER;
use ops_sat_rs::TimeStampHelper;
use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::pus::action::ActionReplyPus;
use satrs::request::{GenericMessage, MessageMetadata};
use satrs::tmtc::PacketAsVec;
use std::sync::mpsc;
use std::sync::mpsc::{Receiver, Sender};
fn create_handler() -> (
IMS100BatchHandler,
Sender<GenericMessage<CompositeRequest>>,
Receiver<PacketAsVec>,
Receiver<GenericMessage<ActionReplyPus>>,
) {
let (composite_request_tx, composite_request_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let (action_reply_tx, action_reply_rx) = mpsc::channel();
let time_helper = TimeStampHelper::default();
let cam_handler: IMS100BatchHandler = IMS100BatchHandler::new(
CAMERA_HANDLER,
composite_request_rx,
tm_tx,
action_reply_tx,
time_helper,
);
(cam_handler, composite_request_tx, tm_rx, action_reply_rx)
}
#[test]
fn command_line_execution() {
let (mut cam_handler, req_tx, tm_rx, action_reply_rx) = create_handler();
cam_handler
.take_picture(DEFAULT_SINGLE_FLATSAT_CAM_PARAMS)
.unwrap();
}
#[test]
fn serialize_and_deserialize_command() {
let data = serde_json::to_string(&DEFAULT_SINGLE_FLATSAT_CAM_PARAMS).unwrap();
println!("{}", data);
let param: CameraPictureParameters = serde_json::from_str(&data).unwrap();
println!("{:?}", param);
}
#[test]
fn test_action_req() {
let (mut cam_handler, req_tx, tm_rx, action_reply_rx) = create_handler();
let data = serde_json::to_string(&DEFAULT_SINGLE_FLATSAT_CAM_PARAMS).unwrap();
let req = ActionRequest::new(
CameraActionId::CustomParameters as u32,
ActionRequestVariant::VecData(data.as_bytes().to_vec()),
);
cam_handler
.handle_action_request(&MessageMetadata::new(1, 1), &req)
.unwrap();
}
#[test]
fn test_action_req_channel() {
let (mut cam_handler, req_tx, tm_rx, action_reply_rx) = create_handler();
let data = serde_json::to_string(&DEFAULT_SINGLE_FLATSAT_CAM_PARAMS).unwrap();
let req = ActionRequest::new(
CameraActionId::CustomParameters as u32,
ActionRequestVariant::VecData(data.as_bytes().to_vec()),
);
let req = CompositeRequest::Action(req);
req_tx
.send(GenericMessage::new(MessageMetadata::new(1, 1), req))
.unwrap();
cam_handler.periodic_operation();
}
}

1
src/handlers/mod.rs Normal file
View File

@ -0,0 +1 @@
pub mod camera;

1
src/interface/can.rs Normal file
View File

@ -0,0 +1 @@
//! This is a preliminary implementation of the necessary infrastructure to enable communication over OPS-SAT's internal CAN Bus.

View File

@ -1,2 +1,36 @@
pub mod tcp;
pub mod udp;
use derive_new::new;
use ops_sat_rs::config::SPP_CLIENT_WIRETAPPING_RX;
use satrs::{
encoding::ccsds::{SpValidity, SpacePacketValidator},
spacepackets::PacketId,
};
pub mod can;
pub mod tcp_server;
pub mod tcp_spp_client;
pub mod udp_server;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TcpComponent {
Server,
Client,
}
#[derive(new, Clone)]
pub struct SimpleSpValidator {
component: TcpComponent,
valid_ids: Vec<PacketId>,
}
impl SpacePacketValidator for SimpleSpValidator {
fn validate(&self, sp_header: &satrs::spacepackets::SpHeader, raw_buf: &[u8]) -> SpValidity {
if SPP_CLIENT_WIRETAPPING_RX && self.component == TcpComponent::Client {
log::debug!("sp header: {:?}", sp_header);
log::debug!("raw data: {:x?}", raw_buf);
}
if self.valid_ids.contains(&sp_header.packet_id) {
return SpValidity::Valid;
}
SpValidity::Skip
}
}

View File

@ -1,17 +1,18 @@
use std::{
collections::{HashSet, VecDeque},
sync::{Arc, Mutex},
collections::VecDeque,
sync::{atomic::AtomicBool, mpsc, Arc, Mutex},
};
use log::{info, warn};
use ops_sat_rs::config::tasks::STOP_CHECK_FREQUENCY;
use satrs::{
hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer},
pus::ReceivesEcssPusTc,
hal::std::tcp_server::{HandledConnectionHandler, ServerConfig, TcpSpacepacketsServer},
queue::GenericSendError,
spacepackets::PacketId,
tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, TmPacketSourceCore},
tmtc::{PacketAsVec, PacketSource},
};
use crate::ccsds::CcsdsReceiver;
use super::{SimpleSpValidator, TcpComponent};
#[derive(Default, Clone)]
pub struct SyncTcpTmSource {
@ -41,7 +42,7 @@ impl SyncTcpTmSource {
}
}
impl TmPacketSourceCore for SyncTcpTmSource {
impl PacketSource for SyncTcpTmSource {
type Error = ();
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
@ -69,59 +70,54 @@ impl TmPacketSourceCore for SyncTcpTmSource {
}
}
pub type TcpServerType<TcSource, MpscErrorType> = TcpSpacepacketsServer<
(),
CcsdsError<MpscErrorType>,
SyncTcpTmSource,
CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>,
HashSet<PacketId>,
>;
#[derive(Default)]
pub struct ConnectionFinishedHandler {}
pub struct TcpTask<
TcSource: ReceivesCcsdsTc<Error = MpscErrorType>
+ ReceivesEcssPusTc<Error = MpscErrorType>
+ Clone
+ Send
+ 'static,
MpscErrorType: 'static,
> {
server: TcpServerType<TcSource, MpscErrorType>,
impl HandledConnectionHandler for ConnectionFinishedHandler {
fn handled_connection(&mut self, info: satrs::hal::std::tcp_server::HandledConnectionInfo) {
info!(
"Served {} TMs and {} TCs for client {:?}",
info.num_sent_tms, info.num_received_tcs, info.addr
);
}
}
impl<
TcSource: ReceivesCcsdsTc<Error = MpscErrorType>
+ ReceivesEcssPusTc<Error = MpscErrorType>
+ Clone
+ Send
+ 'static,
MpscErrorType: 'static + core::fmt::Debug,
> TcpTask<TcSource, MpscErrorType>
{
pub type TcpServer = TcpSpacepacketsServer<
SyncTcpTmSource,
mpsc::Sender<PacketAsVec>,
SimpleSpValidator,
ConnectionFinishedHandler,
(),
GenericSendError,
>;
pub struct TcpTask(pub TcpServer);
impl TcpTask {
pub fn new(
cfg: ServerConfig,
tm_source: SyncTcpTmSource,
tc_receiver: CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>,
packet_id_lookup: HashSet<PacketId>,
tc_sender: mpsc::Sender<PacketAsVec>,
valid_ids: Vec<PacketId>,
stop_signal: Arc<AtomicBool>,
) -> Result<Self, std::io::Error> {
Ok(Self {
server: TcpSpacepacketsServer::new(cfg, tm_source, tc_receiver, packet_id_lookup)?,
})
Ok(Self(TcpSpacepacketsServer::new(
cfg,
tm_source,
tc_sender,
SimpleSpValidator::new(TcpComponent::Server, valid_ids),
ConnectionFinishedHandler::default(),
Some(stop_signal),
)?))
}
pub fn periodic_operation(&mut self) {
loop {
let result = self.server.handle_next_connection();
let result = self.0.handle_all_connections(Some(STOP_CHECK_FREQUENCY));
match result {
Ok(conn_result) => {
info!(
"Served {} TMs and {} TCs for client {:?}",
conn_result.num_sent_tms, conn_result.num_received_tcs, conn_result.addr
);
}
Ok(_conn_result) => (),
Err(e) => {
warn!("TCP server error: {e:?}");
}
}
}
}
}

View File

@ -0,0 +1,663 @@
use std::io::{self, Read};
use std::net::TcpStream as StdTcpStream;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::mpsc;
use std::time::Duration;
use mio::net::TcpStream as MioTcpStream;
use mio::{Events, Interest, Poll, Token};
use ops_sat_rs::config::tasks::STOP_CHECK_FREQUENCY;
use ops_sat_rs::config::{SPP_CLIENT_WIRETAPPING_RX, SPP_CLIENT_WIRETAPPING_TX};
use satrs::encoding::ccsds::parse_buffer_for_ccsds_space_packets;
use satrs::queue::GenericSendError;
use satrs::spacepackets::PacketId;
use satrs::tmtc::PacketAsVec;
use satrs::ComponentId;
use thiserror::Error;
use super::{SimpleSpValidator, TcpComponent};
#[derive(Debug, Error)]
pub enum ClientError {
#[error("send error: {0}")]
Send(#[from] GenericSendError),
#[error("io error: {0}")]
Io(#[from] io::Error),
}
#[derive(Debug)]
pub enum ClientResult {
Ok,
ConnectionLost,
}
#[allow(dead_code)]
pub struct TcpSppClientCommon {
id: ComponentId,
read_buf: [u8; 4096],
tm_tcp_client_rx: mpsc::Receiver<PacketAsVec>,
server_addr: SocketAddr,
tc_source_tx: mpsc::Sender<PacketAsVec>,
validator: SimpleSpValidator,
}
#[allow(dead_code)]
impl TcpSppClientCommon {
pub fn handle_read_bytstream(&mut self, read_bytes: usize) -> Result<(), ClientError> {
if SPP_CLIENT_WIRETAPPING_RX {
log::debug!(
"SPP TCP RX {} bytes: {:x?}",
read_bytes,
&self.read_buf[..read_bytes]
);
}
// This parser is able to deal with broken tail packets, but we ignore those for now..
parse_buffer_for_ccsds_space_packets(
&self.read_buf[..read_bytes],
&self.validator,
self.id,
&self.tc_source_tx,
)?;
Ok(())
}
pub fn write_to_server(&mut self, client: &mut impl io::Write) -> io::Result<()> {
loop {
match self.tm_tcp_client_rx.try_recv() {
Ok(tm) => {
if SPP_CLIENT_WIRETAPPING_TX {
log::debug!(
"SPP TCP TX {}: {:x?}",
tm.packet.len(),
tm.packet.as_slice()
);
}
client.write_all(&tm.packet)?;
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
println!("god fuckikng damn it");
log::error!("TM sender to TCP client has disconnected");
break;
}
},
}
}
Ok(())
}
}
pub struct TcpSppClientStd {
common: TcpSppClientCommon,
read_and_idle_delay: Duration,
// Optional to allow periodic reconnection attempts on the TCP server.
stream: Option<StdTcpStream>,
}
impl TcpSppClientStd {
pub fn new(
id: ComponentId,
tc_source_tx: mpsc::Sender<PacketAsVec>,
tm_tcp_client_rx: mpsc::Receiver<PacketAsVec>,
valid_ids: &'static [PacketId],
read_timeout: Duration,
port: u16,
) -> io::Result<Self> {
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port);
let mut client = Self {
common: TcpSppClientCommon {
id,
read_buf: [0; 4096],
tm_tcp_client_rx,
server_addr,
tc_source_tx,
validator: SimpleSpValidator::new(TcpComponent::Client, valid_ids.to_vec()),
},
read_and_idle_delay: read_timeout,
stream: None,
};
client.attempt_connect(true)?;
Ok(client)
}
pub fn attempt_connect(&mut self, log_error: bool) -> io::Result<bool> {
Ok(match StdTcpStream::connect(self.common.server_addr) {
Ok(stream) => {
stream.set_read_timeout(Some(self.read_and_idle_delay))?;
self.stream = Some(stream);
true
}
Err(e) => {
if log_error {
log::warn!("error connecting to server: {}", e);
}
false
}
})
}
#[allow(dead_code)]
pub fn connected(&self) -> bool {
self.stream.is_some()
}
pub fn operation(&mut self) -> Result<ClientResult, ClientError> {
if let Some(client) = &mut self.stream {
// Write TM first before blocking on the read call.
self.common.write_to_server(client)?;
match client.read(&mut self.common.read_buf) {
// Not sure if this can happen or this is actually an error condition..
Ok(0) => {
log::info!("server closed connection");
self.stream = None;
return Ok(ClientResult::ConnectionLost);
}
Ok(read_bytes) => self.common.handle_read_bytstream(read_bytes)?,
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock || e.kind() == io::ErrorKind::TimedOut
{
self.common.write_to_server(client)?;
return Ok(ClientResult::ConnectionLost);
}
log::warn!("server error: {e:?}");
if e.kind() == io::ErrorKind::ConnectionReset {
self.stream = None;
return Ok(ClientResult::ConnectionLost);
}
return Err(e.into());
}
}
} else {
if self.attempt_connect(false)? {
log::info!("reconnected to server succesfully");
return self.operation();
}
std::thread::sleep(self.read_and_idle_delay);
}
Ok(ClientResult::Ok)
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum ConnectionStatus {
Unknown,
Connected,
LostConnection,
TryingReconnect,
}
/// Currently not used, not behaving as expected..
#[allow(dead_code)]
pub struct TcpSppClientMio {
common: TcpSppClientCommon,
poll: Poll,
events: Events,
// Optional to allow periodic reconnection attempts on the TCP server.
client: Option<MioTcpStream>,
connection: ConnectionStatus,
}
#[allow(dead_code)]
impl TcpSppClientMio {
pub fn new(
id: ComponentId,
tc_source_tx: mpsc::Sender<PacketAsVec>,
tm_tcp_client_rx: mpsc::Receiver<PacketAsVec>,
valid_ids: &'static [PacketId],
port: u16,
) -> io::Result<Self> {
let poll = Poll::new()?;
let events = Events::with_capacity(128);
let mut client = Self {
common: TcpSppClientCommon {
id,
read_buf: [0; 4096],
tm_tcp_client_rx,
server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
tc_source_tx,
validator: SimpleSpValidator::new(TcpComponent::Client, valid_ids.to_vec()),
},
poll,
events,
client: None,
connection: ConnectionStatus::Unknown,
};
client.connect()?;
Ok(client)
}
pub fn connect(&mut self) -> io::Result<()> {
let mut client = MioTcpStream::connect(self.common.server_addr)?;
self.poll.registry().register(
&mut client,
Token(0),
Interest::READABLE | Interest::WRITABLE,
)?;
self.client = Some(client);
self.connection = ConnectionStatus::TryingReconnect;
Ok(())
}
pub fn operation(&mut self) -> Result<(), ClientError> {
match self.connection {
ConnectionStatus::TryingReconnect | ConnectionStatus::Unknown => {
self.check_conn_status()?
}
ConnectionStatus::Connected => {
self.check_conn_status()?;
self.poll
.poll(&mut self.events, Some(STOP_CHECK_FREQUENCY))?;
let events: Vec<mio::event::Event> = self.events.iter().cloned().collect();
for event in events {
if event.token() == Token(0) {
if event.is_readable() {
self.read_from_server()?;
}
// For some reason, we only get this once..
if event.is_writable() {
self.common.write_to_server(self.client.as_mut().unwrap())?;
}
}
}
return Ok(());
}
ConnectionStatus::LostConnection => self.connect()?,
};
std::thread::sleep(STOP_CHECK_FREQUENCY);
Ok(())
}
pub fn read_from_server(&mut self) -> Result<(), ClientError> {
match self
.client
.as_mut()
.unwrap()
.read(&mut self.common.read_buf)
{
Ok(0) => (),
Ok(read_bytes) => self.common.handle_read_bytstream(read_bytes)?,
Err(e) => return Err(e.into()),
}
Ok(())
}
pub fn check_conn_status(&mut self) -> io::Result<()> {
match self.client.as_mut().unwrap().peer_addr() {
Ok(_) => {
if self.connection == ConnectionStatus::Unknown
|| self.connection == ConnectionStatus::TryingReconnect
{
self.connection = ConnectionStatus::Connected;
}
Ok(())
}
Err(e) => {
if e.kind() == io::ErrorKind::NotConnected {
log::warn!("lost connection, or do not have one");
self.connection = ConnectionStatus::LostConnection;
return Ok(());
}
Err(e)
}
}
}
}
#[cfg(test)]
mod tests {
use ops_sat_rs::config::EXPERIMENT_APID;
use satrs::spacepackets::{PacketSequenceCtrl, PacketType, SequenceFlags, SpHeader};
use std::{
io::Write,
net::{TcpListener, TcpStream},
sync::{atomic::AtomicBool, Arc},
thread,
time::Duration,
};
use super::*;
const VALID_IDS: &[PacketId] = &[PacketId::new_for_tc(true, EXPERIMENT_APID)];
const TEST_TC: SpHeader = SpHeader::new(
PacketId::new(PacketType::Tc, true, EXPERIMENT_APID),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0),
1,
);
const TEST_TM: SpHeader = SpHeader::new(
PacketId::new(PacketType::Tm, true, EXPERIMENT_APID),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0),
1,
);
fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
struct TcpServerTestbench {
tcp_server: TcpListener,
}
impl TcpServerTestbench {
fn new(port: u16) -> Self {
let tcp_server =
TcpListener::bind(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)).unwrap();
tcp_server
.set_nonblocking(true)
.expect("setting TCP server non-blocking failed");
Self { tcp_server }
}
fn local_addr(&self) -> SocketAddr {
self.tcp_server.local_addr().unwrap()
}
fn check_for_connections(&mut self, limit: u32) -> Result<TcpStream, ()> {
for _ in 0..limit {
match self.tcp_server.accept() {
Ok((stream, _)) => {
return Ok(stream);
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
thread::sleep(Duration::from_millis(10));
continue;
}
panic!("TCP server accept error: {:?}", e);
}
}
}
Err(())
}
fn try_reading_one_packet(
&mut self,
stream: &mut TcpStream,
limit: u32,
read_buf: &mut [u8],
) -> usize {
let mut read_data = 0;
for _ in 0..limit {
match stream.read(read_buf) {
Ok(0) => {}
Ok(len) => {
// assert_eq!(&tm_buf, &read_buf[0..len]);
// read_bufd_expected_data = true;
read_data = len;
break;
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
continue;
}
panic!("TCP server read error: {:?}", e);
}
}
if read_data > 0 {
break;
}
}
read_data
}
}
// This test just simplifies that the client properly connects to a server.
#[test]
fn basic_client_test() {
let (tc_source_tx, _tc_source_rx) = mpsc::channel();
let (_tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_addr = tcp_server.local_addr();
let jh0 = thread::spawn(move || {
tcp_server
.check_for_connections(3)
.expect("no client connection detected");
});
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_addr.port(),
)
.expect("creating TCP SPP client failed");
spp_client.operation().unwrap();
jh0.join().unwrap();
}
// This test verifies that TM is sent to the server properly.
#[test]
fn basic_client_tm_test() {
let (tc_source_tx, _tc_source_rx) = mpsc::channel();
let (tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_addr = tcp_server.local_addr();
let mut buf: [u8; 7] = [0; 7];
TEST_TM
.write_to_be_bytes(&mut buf)
.expect("writing TM failed");
let jh0 = thread::spawn(move || {
let mut read_buf: [u8; 64] = [0; 64];
let mut stream = tcp_server
.check_for_connections(3)
.expect("no client connection detected");
stream
.set_read_timeout(Some(Duration::from_millis(10)))
.expect("setting read timeout failed");
let read_bytes = tcp_server.try_reading_one_packet(&mut stream, 5, &mut read_buf);
if read_bytes == 0 {
panic!("did not receive expected data");
} else {
assert_eq!(&buf, &read_buf[0..read_bytes]);
}
});
tm_tcp_client_tx
.send(PacketAsVec::new(0, buf.to_vec()))
.unwrap();
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_addr.port(),
)
.expect("creating TCP SPP client failed");
spp_client.operation().unwrap();
jh0.join().unwrap();
}
// Test that the client can read telecommands from the server.
#[test]
fn basic_client_tc_test() {
let (tc_source_tx, tc_source_rx) = mpsc::channel();
let (_tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_addr = tcp_server.local_addr();
let mut buf: [u8; 8] = [0; 8];
TEST_TC
.write_to_be_bytes(&mut buf)
.expect("writing TM failed");
let jh0 = thread::spawn(move || {
let mut stream = tcp_server
.check_for_connections(3)
.expect("no client connection detected");
stream
.set_read_timeout(Some(Duration::from_millis(10)))
.expect("setting read timeout failed");
stream.write_all(&buf).expect("writing TC failed");
});
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_addr.port(),
)
.expect("creating TCP SPP client failed");
assert!(spp_client.connected());
let mut received_packet = false;
(0..3).for_each(|_| {
spp_client.operation().unwrap();
if let Ok(packet) = tc_source_rx.try_recv() {
assert_eq!(packet.packet, buf.to_vec());
received_packet = true;
}
});
if !received_packet {
panic!("did not receive expected data");
}
jh0.join().unwrap();
}
// Test that the client can both read telecommands from the server and send back
// telemetry to the server.
#[test]
fn basic_client_tmtc_test() {
let (tc_source_tx, tc_source_rx) = mpsc::channel();
let (tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_addr = tcp_server.local_addr();
let mut tc_buf: [u8; 8] = [0; 8];
let mut tm_buf: [u8; 8] = [0; 8];
TEST_TC
.write_to_be_bytes(&mut tc_buf)
.expect("writing TM failed");
TEST_TM
.write_to_be_bytes(&mut tm_buf)
.expect("writing TM failed");
let jh0 = thread::spawn(move || {
let mut read_buf: [u8; 64] = [0; 64];
let mut stream = tcp_server
.check_for_connections(3)
.expect("no client connection detected");
stream
.set_read_timeout(Some(Duration::from_millis(10)))
.expect("setting read timeout failed");
stream.write_all(&tc_buf).expect("writing TC failed");
let read_bytes = tcp_server.try_reading_one_packet(&mut stream, 5, &mut read_buf);
if read_bytes == 0 {
panic!("did not receive expected data");
} else {
assert_eq!(&tm_buf, &read_buf[0..read_bytes]);
}
});
tm_tcp_client_tx
.send(PacketAsVec::new(0, tm_buf.to_vec()))
.unwrap();
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_addr.port(),
)
.expect("creating TCP SPP client failed");
assert!(spp_client.connected());
let mut received_packet = false;
(0..3).for_each(|_| {
spp_client.operation().unwrap();
if let Ok(packet) = tc_source_rx.try_recv() {
assert_eq!(packet.packet, tc_buf.to_vec());
received_packet = true;
}
});
if !received_packet {
panic!("did not receive expected data");
}
jh0.join().unwrap();
}
#[test]
fn test_broken_connection() {
init();
let (tc_source_tx, _tc_source_rx) = mpsc::channel();
let (tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let mut tcp_server = TcpServerTestbench::new(0);
let local_port = tcp_server.local_addr().port();
let drop_signal = Arc::new(AtomicBool::new(false));
let drop_signal_0 = drop_signal.clone();
let mut tc_buf: [u8; 8] = [0; 8];
let mut tm_buf: [u8; 8] = [0; 8];
TEST_TC
.write_to_be_bytes(&mut tc_buf)
.expect("writing TM failed");
TEST_TM
.write_to_be_bytes(&mut tm_buf)
.expect("writing TM failed");
let mut jh0 = thread::spawn(move || {
tcp_server
.check_for_connections(3)
.expect("no client connection detected");
drop_signal_0.store(true, std::sync::atomic::Ordering::Relaxed);
});
let mut spp_client = TcpSppClientStd::new(
1,
tc_source_tx,
tm_tcp_client_rx,
VALID_IDS,
Duration::from_millis(30),
local_port,
)
.expect("creating TCP SPP client failed");
while !drop_signal.load(std::sync::atomic::Ordering::Relaxed) {
std::thread::sleep(Duration::from_millis(100));
}
tm_tcp_client_tx
.send(PacketAsVec::new(0, tm_buf.to_vec()))
.unwrap();
match spp_client.operation() {
Ok(ClientResult::ConnectionLost) => (),
Ok(ClientResult::Ok) => {
panic!("expected operation error");
}
Err(ClientError::Io(e)) => {
println!("io error: {:?}", e);
if e.kind() != io::ErrorKind::ConnectionReset
&& e.kind() != io::ErrorKind::ConnectionAborted
{
panic!("expected some disconnet error");
}
}
_ => {
panic!("unexpected error")
}
};
assert!(!spp_client.connected());
jh0.join().unwrap();
// spp_client.operation();
tcp_server = TcpServerTestbench::new(local_port);
tm_tcp_client_tx
.send(PacketAsVec::new(0, tm_buf.to_vec()))
.unwrap();
jh0 = thread::spawn(move || {
let mut stream = tcp_server
.check_for_connections(3)
.expect("no client connection detected");
let mut read_buf: [u8; 64] = [0; 64];
let read_bytes = tcp_server.try_reading_one_packet(&mut stream, 5, &mut read_buf);
if read_bytes == 0 {
panic!("did not receive expected data");
} else {
assert_eq!(&tm_buf, &read_buf[0..read_bytes]);
}
});
let result = spp_client.operation();
println!("{:?}", result);
assert!(!spp_client.connected());
assert!(result.is_ok());
jh0.join().unwrap();
}
}

View File

@ -2,18 +2,18 @@ use std::net::{SocketAddr, UdpSocket};
use std::sync::mpsc;
use log::{info, warn};
use satrs::pus::PusTmAsVec;
use satrs::{
hal::std::udp_server::{ReceiveResult, UdpTcServer},
tmtc::CcsdsError,
};
use satrs::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use satrs::queue::GenericSendError;
use satrs::tmtc::PacketAsVec;
use crate::pus::HandlingStatus;
pub trait UdpTmHandler {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr);
}
pub struct DynamicUdpTmHandler {
pub tm_rx: mpsc::Receiver<PusTmAsVec>,
pub tm_rx: mpsc::Receiver<PacketAsVec>,
}
impl UdpTmHandler for DynamicUdpTmHandler {
@ -34,42 +34,39 @@ impl UdpTmHandler for DynamicUdpTmHandler {
}
}
pub struct UdpTmtcServer<TmHandler: UdpTmHandler, SendError> {
pub udp_tc_server: UdpTcServer<CcsdsError<SendError>>,
pub struct UdpTmtcServer<TmHandler: UdpTmHandler> {
pub udp_tc_server: UdpTcServer<mpsc::Sender<PacketAsVec>, GenericSendError>,
pub tm_handler: TmHandler,
}
impl<TmHandler: UdpTmHandler, SendError: core::fmt::Debug + 'static>
UdpTmtcServer<TmHandler, SendError>
{
impl<TmHandler: UdpTmHandler> UdpTmtcServer<TmHandler> {
pub fn periodic_operation(&mut self) {
while self.poll_tc_server() {}
loop {
if self.poll_tc_server() == HandlingStatus::Empty {
break;
}
}
if let Some(recv_addr) = self.udp_tc_server.last_sender() {
self.tm_handler
.send_tm_to_udp_client(&self.udp_tc_server.socket, &recv_addr);
}
}
fn poll_tc_server(&mut self) -> bool {
fn poll_tc_server(&mut self) -> HandlingStatus {
match self.udp_tc_server.try_recv_tc() {
Ok(_) => true,
Err(e) => match e {
ReceiveResult::ReceiverError(e) => match e {
CcsdsError::ByteConversionError(e) => {
warn!("packet error: {e:?}");
true
Ok(_) => HandlingStatus::HandledOne,
Err(e) => {
match e {
ReceiveResult::NothingReceived => (),
ReceiveResult::Io(io_error) => {
warn!("Error receiving TC from UDP server: {io_error}");
}
CcsdsError::CustomError(e) => {
warn!("mpsc custom error {e:?}");
true
ReceiveResult::Send(send_error) => {
warn!("error sending TM to UDP client: {send_error}");
}
},
ReceiveResult::IoError(e) => {
warn!("IO error {e}");
false
}
ReceiveResult::NothingReceived => false,
},
HandlingStatus::Empty
}
}
}
}
@ -79,29 +76,35 @@ mod tests {
use std::{
collections::VecDeque,
net::IpAddr,
sync::{Arc, Mutex},
sync::{mpsc::TryRecvError, Arc, Mutex},
};
use ops_sat_rs::config::{EXPERIMENT_APID, OBSW_SERVER_ADDR};
use satrs::{
spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket},
SpHeader,
},
tmtc::ReceivesTcCore,
tmtc::PacketSenderRaw,
ComponentId,
};
use ops_sat_rs::config::{components, OBSW_SERVER_ADDR};
use super::*;
const UDP_SERVER_ID: ComponentId = 0x05;
#[derive(Default, Debug, Clone)]
pub struct TestReceiver {
tc_vec: Arc<Mutex<VecDeque<Vec<u8>>>>,
tc_vec: Arc<Mutex<VecDeque<PacketAsVec>>>,
}
impl ReceivesTcCore for TestReceiver {
type Error = CcsdsError<()>;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.tc_vec.lock().unwrap().push_back(tc_raw.to_vec());
impl PacketSenderRaw for TestReceiver {
type Error = ();
fn send_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error> {
self.tc_vec
.lock()
.unwrap()
.push_back(PacketAsVec::new(sender_id, packet.to_vec()));
Ok(())
}
}
@ -120,9 +123,8 @@ mod tests {
#[test]
fn test_basic() {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
let test_receiver = TestReceiver::default();
let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap();
let (tx, rx) = mpsc::channel();
let udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, tx).unwrap();
let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
let mut udp_dyn_server = UdpTmtcServer {
@ -130,16 +132,14 @@ mod tests {
tm_handler,
};
udp_dyn_server.periodic_operation();
assert!(tc_queue.lock().unwrap().is_empty());
assert!(tm_handler_calls.lock().unwrap().is_empty());
matches!(rx.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn test_transactions() {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
let test_receiver = TestReceiver::default();
let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap();
let (tx, rx) = mpsc::channel();
let udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, tx).unwrap();
let server_addr = udp_tc_server.socket.local_addr().unwrap();
let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
@ -147,7 +147,7 @@ mod tests {
udp_tc_server,
tm_handler,
};
let sph = SpHeader::new_for_unseg_tc(components::Apid::GenericPus as u16, 0, 0);
let sph = SpHeader::new_for_unseg_tc(EXPERIMENT_APID, 0, 0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true)
.to_vec()
.unwrap();
@ -157,10 +157,9 @@ mod tests {
client.send(&ping_tc).unwrap();
udp_dyn_server.periodic_operation();
{
let mut tc_queue = tc_queue.lock().unwrap();
assert!(!tc_queue.is_empty());
let received_tc = tc_queue.pop_front().unwrap();
assert_eq!(received_tc, ping_tc);
let packet_with_sender = rx.try_recv().unwrap();
assert_eq!(packet_with_sender.packet, ping_tc);
matches!(rx.try_recv(), Err(TryRecvError::Empty));
}
{
@ -171,7 +170,7 @@ mod tests {
assert_eq!(received_addr, client_addr);
}
udp_dyn_server.periodic_operation();
assert!(tc_queue.lock().unwrap().is_empty());
matches!(rx.try_recv(), Err(TryRecvError::Empty));
// Still tries to send to the same client.
{
let mut tm_handler_calls = tm_handler_calls.lock().unwrap();

View File

@ -3,6 +3,7 @@ use satrs::spacepackets::time::TimeWriter;
pub mod config;
#[derive(Debug)]
pub struct TimeStampHelper {
stamper: CdsTime,
time_stamp: [u8; 7],
@ -31,3 +32,12 @@ impl Default for TimeStampHelper {
}
}
}
pub fn update_time(time_provider: &mut CdsTime, timestamp: &mut [u8]) {
time_provider
.update_from_now()
.expect("Could not get current time");
time_provider
.write_to_bytes(timestamp)
.expect("Writing timestamp failed");
}

View File

@ -1,17 +1,26 @@
use std::path::{Path, PathBuf};
use ops_sat_rs::config::LOG_FOLDER;
pub fn setup_logger() -> Result<(), fern::InitError> {
if !Path::new(LOG_FOLDER).exists() && std::fs::create_dir_all(LOG_FOLDER).is_err() {
eprintln!("Failed to create log folder '{}'", LOG_FOLDER);
}
let mut path_buf = PathBuf::from(LOG_FOLDER);
path_buf.push("output.log");
fern::Dispatch::new()
.format(|out, message, record| {
.format(move |out, message, record| {
out.finish(format_args!(
"{}[{}][{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
std::thread::current().name().expect("unnamed_thread"),
"[{}][{}][{}] {}",
humantime::format_rfc3339_millis(std::time::SystemTime::now()),
std::thread::current().name().unwrap_or("unnamed_thread"),
record.level(),
message
))
})
.level(log::LevelFilter::Debug)
.chain(std::io::stdout())
.chain(fern::log_file("output.log")?)
.chain(fern::log_file(path_buf.as_os_str())?)
.apply()?;
Ok(())
}

View File

@ -1,195 +1,358 @@
use std::{
net::{IpAddr, SocketAddr},
sync::mpsc,
sync::{atomic::AtomicBool, mpsc, Arc},
thread,
time::Duration,
};
use log::info;
use ops_sat_rs::config::tasks::FREQ_MS_PUS_STACK;
use ops_sat_rs::config::{
tasks::FREQ_MS_UDP_TMTC, OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT,
cfg_file::create_app_config,
components::{CONTROLLER_ID, TCP_SERVER, TCP_SPP_CLIENT, UDP_SERVER},
pool::create_sched_tc_pool,
tasks::{FREQ_MS_CAMERA_HANDLING, FREQ_MS_CTRL, FREQ_MS_PUS_STACK, STOP_CHECK_FREQUENCY},
VALID_PACKET_ID_LIST, VERSION,
};
use ops_sat_rs::config::{components::CAMERA_HANDLER, tasks::FREQ_MS_EVENT_HANDLING};
use ops_sat_rs::config::{tasks::FREQ_MS_UDP_TMTC, OBSW_SERVER_ADDR, SERVER_PORT};
use ops_sat_rs::TimeStampHelper;
use satrs::{
hal::std::{tcp_server::ServerConfig, udp_server::UdpTcServer},
tmtc::CcsdsDistributor,
pus::event_man::EventRequestWithToken,
};
use crate::pus::stack::PusStack;
use crate::pus::test::create_test_service_dynamic;
use crate::pus::{PusReceiver, PusTcMpscRouter};
use crate::tm_funnel::TmFunnelDynamic;
use crate::tmtc::TcSourceTaskDynamic;
use crate::tmtc::tm_sink::TmFunnelDynamic;
use crate::{controller::ExperimentController, pus::test::create_test_service};
use crate::{
ccsds::CcsdsReceiver,
interface::tcp::{SyncTcpTmSource, TcpTask},
interface::udp::{DynamicUdpTmHandler, UdpTmtcServer},
events::EventHandler,
pus::{
hk::create_hk_service, mode::create_mode_service, scheduler::create_scheduler_service,
PusTcDistributor, PusTcMpscRouter,
},
};
use crate::{handlers::camera::IMS100BatchHandler, pus::event::create_event_service};
use crate::{
interface::tcp_server::{SyncTcpTmSource, TcpTask},
interface::udp_server::{DynamicUdpTmHandler, UdpTmtcServer},
logger::setup_logger,
tmtc::PusTcSourceProviderDynamic,
};
use crate::{interface::tcp_spp_client::TcpSppClientStd, tmtc::tc_source::TcSourceTaskDynamic};
use crate::{
pus::{action::create_action_service, stack::PusStack},
requests::GenericRequestRouter,
};
mod ccsds;
mod controller;
mod events;
mod handlers;
mod interface;
mod logger;
mod pus;
mod requests;
mod tm_funnel;
mod tmtc;
#[allow(dead_code)]
fn main() {
setup_logger().expect("setting up logging with fern failed");
println!("OPS-SAT Rust experiment OBSW");
let version_str = VERSION.unwrap_or("?");
println!("OPS-SAT Rust Experiment OBSW v{}", version_str);
let app_cfg = create_app_config();
println!("App Configuration: {:?}", app_cfg);
let stop_signal = Arc::new(AtomicBool::new(false));
let (tc_source_tx, tc_source_rx) = mpsc::channel();
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (tm_server_tx, tm_server_rx) = mpsc::channel();
let tc_source = PusTcSourceProviderDynamic(tc_source_tx);
let (tm_tcp_server_tx, tm_tcp_server_rx) = mpsc::channel();
let (tm_tcp_client_tx, tm_tcp_client_rx) = mpsc::channel();
let (pus_test_tx, pus_test_rx) = mpsc::channel();
// let (pus_event_tx, pus_event_rx) = mpsc::channel();
// let (pus_sched_tx, pus_sched_rx) = mpsc::channel();
// let (pus_hk_tx, pus_hk_rx) = mpsc::channel();
// let (pus_action_tx, pus_action_rx) = mpsc::channel();
// let (pus_mode_tx, pus_mode_rx) = mpsc::channel();
let (pus_event_tx, pus_event_rx) = mpsc::channel();
let (pus_sched_tx, pus_sched_rx) = mpsc::channel();
let (pus_hk_tx, pus_hk_rx) = mpsc::channel();
let (pus_action_tx, pus_action_rx) = mpsc::channel();
let (pus_mode_tx, pus_mode_rx) = mpsc::channel();
// let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
// let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel();
// let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel();
// Create event handling components
// These sender handles are used to send event requests, for example to enable or disable
// certain events.
let (event_tx, event_rx) = mpsc::sync_channel(100);
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
// The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation.
let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_rx, event_request_rx);
let (pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
let (_pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel();
let (_pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel();
let (controller_composite_tx, controller_composite_rx) = mpsc::channel();
// let (controller_action_reply_tx, controller_action_reply_rx) = mpsc::channel();
let (camera_composite_tx, camera_composite_rx) = mpsc::channel();
// Some request are targetable. This map is used to retrieve sender handles based on a target ID.
let mut request_map = GenericRequestRouter::default();
request_map
.composite_router_map
.insert(CONTROLLER_ID.id(), controller_composite_tx);
request_map
.composite_router_map
.insert(CAMERA_HANDLER.id(), camera_composite_tx);
let pus_router = PusTcMpscRouter {
test_tc_sender: pus_test_tx,
// event_tc_sender: pus_event_tx,
// sched_tc_sender: pus_sched_tx,
// hk_tc_sender: pus_hk_tx,
// action_tc_sender: pus_action_tx,
// mode_tc_sender: pus_mode_tx,
event_tc_sender: pus_event_tx,
sched_tc_sender: pus_sched_tx,
hk_tc_sender: pus_hk_tx,
action_tc_sender: pus_action_tx,
mode_tc_sender: pus_mode_tx,
};
let pus_test_service = create_test_service_dynamic(
let pus_test_service = create_test_service(tm_funnel_tx.clone(), event_tx.clone(), pus_test_rx);
let pus_scheduler_service = create_scheduler_service(
tm_funnel_tx.clone(),
// event_handler.clone_event_sender(),
pus_test_rx,
tc_source_tx.clone(),
pus_sched_rx,
create_sched_tc_pool(),
);
let pus_event_service =
create_event_service(tm_funnel_tx.clone(), pus_event_rx, event_request_tx);
let pus_action_service = create_action_service(
tm_funnel_tx.clone(),
pus_action_rx,
request_map.clone(),
pus_action_reply_rx,
);
let pus_hk_service = create_hk_service(
tm_funnel_tx.clone(),
pus_hk_rx,
request_map.clone(),
pus_hk_reply_rx,
);
let pus_mode_service = create_mode_service(
tm_funnel_tx.clone(),
pus_mode_rx,
request_map,
pus_mode_reply_rx,
);
// let pus_scheduler_service = create_scheduler_service_dynamic(
// tm_funnel_tx.clone(),
// tc_source.0.clone(),
// pus_sched_rx,
// create_sched_tc_pool(),
// );
//
// let pus_event_service =
// create_event_service_dynamic(tm_funnel_tx.clone(), pus_event_rx, event_request_tx);
// let pus_action_service = create_action_service_dynamic(
// tm_funnel_tx.clone(),
// pus_action_rx,
// request_map.clone(),
// pus_action_reply_rx,
// );
// let pus_hk_service = create_hk_service_dynamic(
// tm_funnel_tx.clone(),
// pus_hk_rx,
// request_map.clone(),
// pus_hk_reply_rx,
// );
// let pus_mode_service = create_mode_service_dynamic(
// tm_funnel_tx.clone(),
// pus_mode_rx,
// request_map,
// pus_mode_reply_rx,
// );
let mut pus_stack = PusStack::new(
pus_test_service,
// pus_hk_service,
// pus_event_service,
// pus_action_service,
// pus_scheduler_service,
// pus_mode_service,
pus_hk_service,
pus_event_service,
pus_action_service,
pus_scheduler_service,
pus_mode_service,
);
let ccsds_receiver = CcsdsReceiver { tc_source };
let mut tmtc_task = TcSourceTaskDynamic::new(
tc_source_rx,
PusReceiver::new(tm_funnel_tx.clone(), pus_router),
PusTcDistributor::new(tm_funnel_tx.clone(), pus_router),
);
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone());
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source_tx.clone())
.expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server,
tm_handler: DynamicUdpTmHandler {
tm_rx: tm_server_rx,
tm_rx: tm_tcp_server_rx,
},
};
let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver);
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
let tcp_server_cfg = ServerConfig::new(
TCP_SERVER.id(),
sock_addr,
Duration::from_millis(400),
4096,
8192,
);
let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new(
tcp_server_cfg,
sync_tm_tcp_source.clone(),
tcp_ccsds_distributor,
PACKET_ID_VALIDATOR.clone(),
tc_source_tx.clone(),
VALID_PACKET_ID_LIST.to_vec(),
stop_signal.clone(),
)
.expect("tcp server creation failed");
let mut tm_funnel = TmFunnelDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx);
let mut tm_sink = TmFunnelDynamic::new(
sync_tm_tcp_source,
tm_funnel_rx,
tm_tcp_server_tx,
tm_tcp_client_tx,
stop_signal.clone(),
);
let mut controller = ExperimentController::new(
controller_composite_rx,
pus_action_reply_tx.clone(),
stop_signal.clone(),
);
let mut tcp_spp_client = TcpSppClientStd::new(
TCP_SPP_CLIENT.id(),
tc_source_tx,
tm_tcp_client_rx,
VALID_PACKET_ID_LIST,
STOP_CHECK_FREQUENCY,
app_cfg.tcp_spp_server_port,
)
.expect("creating TCP SPP client failed");
let timestamp_helper = TimeStampHelper::default();
let mut camera_handler: IMS100BatchHandler = IMS100BatchHandler::new(
CAMERA_HANDLER,
camera_composite_rx,
tm_funnel_tx.clone(),
pus_action_reply_tx.clone(),
timestamp_helper,
);
// Main Task Thread Definitions
// Main Experiment Control Task
info!("Starting CTRL task");
let ctrl_stop_signal = stop_signal.clone();
let jh_ctrl_thread = thread::Builder::new()
.name("ops-sat ctrl".to_string())
.spawn(move || loop {
controller.perform_operation();
if ctrl_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(FREQ_MS_CTRL));
})
.unwrap();
// TMTC and UDP Task
info!("Starting TMTC and UDP task");
let tmtc_stop_signal = stop_signal.clone();
let jh_udp_tmtc = thread::Builder::new()
.name("TMTC and UDP".to_string())
.name("ops-sat tmtc-udp".to_string())
.spawn(move || {
info!("Running UDP server on port {SERVER_PORT}");
loop {
udp_tmtc_server.periodic_operation();
tmtc_task.periodic_operation();
if tmtc_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(FREQ_MS_UDP_TMTC));
}
})
.unwrap();
info!("Starting TCP task");
let jh_tcp = thread::Builder::new()
.name("TCP".to_string())
// TCP Server Task
let tcp_server_stop_signal = stop_signal.clone();
info!("Starting TCP server task");
let jh_tcp_server = thread::Builder::new()
.name("ops-sat tcp-server".to_string())
.spawn(move || {
info!("Running TCP server on port {SERVER_PORT}");
loop {
tcp_server.periodic_operation();
if tcp_server_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
}
})
.unwrap();
info!("Starting TM funnel task");
let jh_tm_funnel = thread::Builder::new()
.name("TM Funnel".to_string())
.spawn(move || loop {
tm_funnel.operation();
// TCP SPP Client Task
// We could also move this to the existing TCP server thread, but we would have to adapt
// the server code for this so we do not block anymore and we pause manually if both the client
// and server are IDLE and have nothing to do..
let tcp_client_stop_signal = stop_signal.clone();
info!("Starting TCP SPP client task");
let jh_tcp_client = thread::Builder::new()
.name("ops-sat tcp-client".to_string())
.spawn(move || {
info!("Running TCP SPP client");
loop {
let result = tcp_spp_client.operation();
if let Err(e) = result {
log::error!("TCP SPP client error: {}", e);
}
if tcp_client_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
}
})
.unwrap();
info!("Starting PUS handler thread");
// TM Funnel Task
info!("Starting TM funnel task");
let tm_sink_stop_signal = stop_signal.clone();
let jh_tm_funnel = thread::Builder::new()
.name("ops-sat tm-sink".to_string())
.spawn(move || loop {
tm_sink.operation();
if tm_sink_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
})
.unwrap();
info!("Starting event handling task");
let jh_event_handling = thread::Builder::new()
.name("sat-rs events".to_string())
.spawn(move || loop {
event_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING));
})
.unwrap();
// PUS Handler Task
info!("Starting PUS handlers task");
let pus_stop_signal = stop_signal.clone();
let jh_pus_handler = thread::Builder::new()
.name("PUS".to_string())
.name("ops-sat pus".to_string())
.spawn(move || loop {
pus_stack.periodic_operation();
if pus_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK));
})
.unwrap();
// Camera Handler Task
info!("Starting camera handler task");
let camera_stop_signal = stop_signal.clone();
let jh_camera_handler = thread::Builder::new()
.name("ops-sat camera".to_string())
.spawn(move || loop {
camera_handler.periodic_operation();
if camera_stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
thread::sleep(Duration::from_millis(FREQ_MS_CAMERA_HANDLING));
})
.unwrap();
// Join Threads
jh_ctrl_thread
.join()
.expect("Joining Controller thread failed");
jh_udp_tmtc
.join()
.expect("Joining UDP TMTC server thread failed");
jh_tcp
jh_tcp_server
.join()
.expect("Joining TCP TMTC server thread failed");
jh_tcp_client
.join()
.expect("Joining TCP TMTC client thread failed");
jh_tm_funnel
.join()
.expect("Joining TM Funnel thread failed");
jh_pus_handler
.join()
.expect("Joining PUS handler thread failed");
.expect("Joining PUS handlers thread failed");
jh_event_handling
.join()
.expect("Joining PUS handlers thread failed");
jh_camera_handler
.join()
.expect("Joining camera handler thread failed");
}

748
src/pus/action.rs Normal file
View File

@ -0,0 +1,748 @@
use log::{debug, error, warn};
use ops_sat_rs::config::components::PUS_ACTION_SERVICE;
use ops_sat_rs::config::tmtc_err;
use ops_sat_rs::TimeStampHelper;
use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::params::WritableToBeBytes;
use satrs::pus::action::{
ActionReplyPus, ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap,
};
use satrs::pus::verification::{
FailParams, FailParamsWithStep, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActiveRequestProvider, EcssTcAndToken, EcssTcInVecConverter, EcssTmSender, EcssTmtcError,
GenericConversionError, PusPacketHandlerResult, PusReplyHandler, PusServiceHelper,
PusTcToRequestConverter, PusTmVariant,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket};
use satrs::spacepackets::SpHeader;
use satrs::tmtc::PacketAsVec;
use std::sync::mpsc;
use std::time::Duration;
use crate::requests::GenericRequestRouter;
use super::{
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
PusTargetedRequestService, TargetedPusService,
};
pub const DATA_REPLY: u8 = 130;
pub struct ActionReplyHandler {
fail_data_buf: [u8; 128],
}
impl Default for ActionReplyHandler {
fn default() -> Self {
Self {
fail_data_buf: [0; 128],
}
}
}
impl PusReplyHandler<ActivePusActionRequestStd, ActionReplyPus> for ActionReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<ActionReplyPus>,
_tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> {
warn!("received unexpected reply for service 8: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<ActionReplyPus>,
active_request: &ActivePusActionRequestStd,
tm_sender: &(impl EcssTmSender + ?Sized),
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let verif_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
let remove_entry = match &reply.message.variant {
ActionReplyVariant::CompletionFailed { error_code, params } => {
let mut fail_data_len = 0;
if let Some(params) = params {
fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?;
}
verification_handler.completion_failure(
tm_sender,
verif_token,
FailParams::new(time_stamp, error_code, &self.fail_data_buf[..fail_data_len]),
)?;
true
}
ActionReplyVariant::StepFailed {
error_code,
step,
params,
} => {
let mut fail_data_len = 0;
if let Some(params) = params {
fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?;
}
verification_handler.step_failure(
tm_sender,
verif_token,
FailParamsWithStep::new(
time_stamp,
&EcssEnumU16::new(*step),
error_code,
&self.fail_data_buf[..fail_data_len],
),
)?;
true
}
ActionReplyVariant::Completed => {
verification_handler.completion_success(tm_sender, verif_token, time_stamp)?;
true
}
ActionReplyVariant::StepSuccess { step } => {
verification_handler.step_success(
tm_sender,
&verif_token,
time_stamp,
EcssEnumU16::new(*step),
)?;
false
}
_ => false,
};
Ok(remove_entry)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusActionRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"action",
)
}
}
#[derive(Default)]
pub struct ActionRequestConverter {}
impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for ActionRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> {
let subservice = tc.subservice();
let user_data = tc.user_data();
if user_data.len() < 8 {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
)
.expect("Sending start failure failed");
return Err(GenericConversionError::NotEnoughAppData {
expected: 8,
found: user_data.len(),
});
}
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap());
if subservice == 128 {
let req_variant = if user_data.len() == 8 {
ActionRequestVariant::NoData
} else {
ActionRequestVariant::VecData(user_data[8..].to_vec())
};
Ok((
ActivePusActionRequestStd::new(
action_id,
target_id_and_apid.into(),
token.into(),
Duration::from_secs(30),
),
ActionRequest::new(action_id, req_variant),
))
} else {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
)
.expect("Sending start failure failed");
Err(GenericConversionError::InvalidSubservice(subservice))
}
}
}
pub fn create_action_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
) -> ActionServiceWrapper {
let action_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_ACTION_SERVICE.id(),
pus_action_rx,
tm_funnel_tx,
create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
EcssTcInVecConverter::default(),
),
ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
action_router,
reply_receiver,
);
ActionServiceWrapper {
service: action_request_handler,
}
}
pub struct ActionServiceWrapper {
pub(crate) service: PusTargetedRequestService<
VerificationReporter,
ActionRequestConverter,
ActionReplyHandler,
DefaultActiveActionRequestMap,
ActivePusActionRequestStd,
ActionRequest,
ActionReplyPus,
>,
}
impl TargetedPusService for ActionServiceWrapper {
/// Returns [true] if the packet handling is finished.
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS 8 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS 8 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS 8 subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS packet handling error: {error:?}");
return HandlingStatus::Empty;
}
}
HandlingStatus::HandledOne
}
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS 8: Handling reply failed with error {e:?}");
HandlingStatus::Empty
})
}
fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
}
}
pub fn send_data_reply<TmSender: EcssTmSender>(
apid_target: UniqueApidTargetId,
reply_data: Vec<u8>,
stamp_helper: &TimeStampHelper,
tm_sender: &TmSender,
) -> Result<(), EcssTmtcError> {
let sp_header = SpHeader::new_from_apid(apid_target.apid);
let sec_header = PusTmSecondaryHeader::new(8, DATA_REPLY, 0, 0, stamp_helper.stamp());
let mut data = Vec::new();
data.extend(apid_target.apid.to_be_bytes());
data.extend(apid_target.unique_id.to_be_bytes());
data.extend(reply_data);
debug!(
"{}",
String::from_utf8(data.clone()[6..].to_vec()).expect("Error decoding data reply.")
);
let data_reply_tm = PusTmCreator::new(sp_header, sec_header, &data, true);
tm_sender.send_tm(apid_target.id(), PusTmVariant::Direct(data_reply_tm))
}
#[cfg(test)]
mod tests {
use satrs::pus::test_util::{
TEST_APID, TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::pus::verification::test_util::TestVerificationReporter;
use satrs::pus::{verification, TcInMemory};
use satrs::request::MessageMetadata;
use satrs::ComponentId;
use satrs::{
res_code::ResultU16,
spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader,
WritablePusPacket,
},
SpHeader,
},
};
use crate::{
pus::tests::{PusConverterTestbench, ReplyHandlerTestbench, TargetedPusRequestTestbench},
requests::CompositeRequest,
};
use super::*;
impl
TargetedPusRequestTestbench<
ActionRequestConverter,
ActionReplyHandler,
DefaultActiveActionRequestMap,
ActivePusActionRequestStd,
ActionRequest,
ActionReplyPus,
>
{
pub fn new_for_action(owner_id: ComponentId, target_id: ComponentId) -> Self {
let _ = env_logger::builder().is_test(true).try_init();
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (pus_action_tx, pus_action_rx) = mpsc::channel();
let (action_reply_tx, action_reply_rx) = mpsc::channel();
let (action_req_tx, action_req_rx) = mpsc::channel();
let verif_reporter = TestVerificationReporter::new(owner_id);
let mut generic_req_router = GenericRequestRouter::default();
generic_req_router
.composite_router_map
.insert(target_id, action_req_tx);
Self {
service: PusTargetedRequestService::new(
PusServiceHelper::new(
owner_id,
pus_action_rx,
tm_funnel_tx.clone(),
verif_reporter,
EcssTcInVecConverter::default(),
),
ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
generic_req_router,
action_reply_rx,
),
request_id: None,
pus_packet_tx: pus_action_tx,
tm_funnel_rx,
reply_tx: action_reply_tx,
request_rx: action_req_rx,
}
}
pub fn verify_packet_started(&self) {
self.service
.service_helper
.common
.verif_reporter
.check_next_is_started_success(
self.service.service_helper.id(),
self.request_id.expect("request ID not set").into(),
);
}
pub fn verify_packet_completed(&self) {
self.service
.service_helper
.common
.verif_reporter
.check_next_is_completion_success(
self.service.service_helper.id(),
self.request_id.expect("request ID not set").into(),
);
}
pub fn verify_tm_empty(&self) {
let packet = self.tm_funnel_rx.try_recv();
if let Err(mpsc::TryRecvError::Empty) = packet {
} else {
let tm = packet.unwrap();
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0;
panic!("unexpected TM packet {unexpected_tm:?}");
}
}
pub fn verify_next_tc_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
panic!("unexpected error {:?}", e);
}
let result = result.unwrap();
match result {
PusPacketHandlerResult::RequestHandled => (),
_ => panic!("unexpected result {result:?}"),
}
}
pub fn verify_all_tcs_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
panic!("unexpected error {:?}", e);
}
let result = result.unwrap();
match result {
PusPacketHandlerResult::Empty => (),
_ => panic!("unexpected result {result:?}"),
}
}
pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_check_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::HandledOne);
}
pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_check_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::Empty);
}
pub fn add_tc(&mut self, tc: &PusTcCreator) {
self.request_id = Some(verification::RequestId::new(tc).into());
let token = self.service.service_helper.verif_reporter_mut().add_tc(tc);
let accepted_token = self
.service
.service_helper
.verif_reporter()
.acceptance_success(self.service.service_helper.tm_sender(), token, &[0; 7])
.expect("TC acceptance failed");
self.service
.service_helper
.verif_reporter()
.check_next_was_added(accepted_token.request_id());
let id = self.service.service_helper.id();
self.service
.service_helper
.verif_reporter()
.check_next_is_acceptance_success(id, accepted_token.request_id());
self.pus_packet_tx
.send(EcssTcAndToken::new(
TcInMemory::Vec(PacketAsVec::new(
self.service.service_helper.id(),
//tc.to_vec().unwrap().into(),
tc.to_vec().unwrap(),
)),
accepted_token,
))
.unwrap();
}
}
#[test]
fn basic_request() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding.
let sp_header = SpHeader::new_from_apid(TEST_APID);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(sp_header, sec_header, &app_data, true);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
testbench.verify_next_tc_is_handled_properly(&time_stamp);
testbench.verify_all_tcs_handled(&time_stamp);
testbench.verify_packet_started();
let possible_req = testbench.request_rx.try_recv();
assert!(possible_req.is_ok());
let req = possible_req.unwrap();
if let CompositeRequest::Action(action_req) = req.message {
assert_eq!(action_req.action_id, action_id);
assert_eq!(action_req.variant, ActionRequestVariant::NoData);
let action_reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
testbench
.reply_tx
.send(GenericMessage::new(req.requestor_info, action_reply))
.unwrap();
} else {
panic!("unexpected request type");
}
testbench.verify_next_reply_is_handled_properly(&time_stamp);
testbench.verify_all_replies_handled(&time_stamp);
testbench.verify_packet_completed();
testbench.verify_tm_empty();
}
#[test]
fn basic_request_routing_error() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding.
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&0_u32.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
let result = testbench.service.poll_and_handle_next_tc(&time_stamp);
assert!(result.is_err());
// Verify the correct result and completion failure.
}
#[test]
fn converter_action_req_no_data() {
let mut testbench = PusConverterTestbench::new(
TEST_COMPONENT_ID_0.raw(),
ActionRequestConverter::default(),
);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok());
let (active_req, request) = result.unwrap();
if let ActionRequestVariant::NoData = request.variant {
assert_eq!(request.action_id, action_id);
assert_eq!(active_req.action_id, action_id);
assert_eq!(
active_req.target_id(),
UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_0).raw()
);
assert_eq!(
active_req.token().request_id(),
testbench.request_id().unwrap()
);
} else {
panic!("unexpected action request variant");
}
}
#[test]
fn converter_action_req_with_data() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ActionRequestConverter::default());
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 16] = [0; 16];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
for i in 0..8 {
app_data[i + 8] = i as u8;
}
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok());
let (active_req, request) = result.unwrap();
if let ActionRequestVariant::VecData(vec) = request.variant {
assert_eq!(request.action_id, action_id);
assert_eq!(active_req.action_id, action_id);
assert_eq!(vec, app_data[8..].to_vec());
} else {
panic!("unexpected action request variant");
}
}
#[test]
fn reply_handling_completion_success() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.assert_full_completion_success(
TEST_COMPONENT_ID_0.id(),
req_id,
None,
);
}
#[test]
fn reply_handling_completion_failure() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3);
let reply = ActionReplyPus::new(
action_id,
ActionReplyVariant::CompletionFailed {
error_code,
params: None,
},
);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.into(),
req_id,
None,
error_code.raw() as u64,
);
}
#[test]
fn reply_handling_step_success() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = ActionReplyPus::new(action_id, ActionReplyVariant::StepSuccess { step: 1 });
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
// Entry should not be removed, completion not done yet.
assert!(!result.unwrap());
testbench.verif_reporter.check_next_was_added(req_id);
testbench
.verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench
.verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench
.verif_reporter
.check_next_is_step_success(TEST_COMPONENT_ID_0.raw(), req_id, 1);
}
#[test]
fn reply_handling_step_failure() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3);
let reply = ActionReplyPus::new(
action_id,
ActionReplyVariant::StepFailed {
error_code,
step: 1,
params: None,
},
);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.check_next_was_added(req_id);
testbench
.verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench
.verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench.verif_reporter.check_next_is_step_failure(
TEST_COMPONENT_ID_0.id(),
req_id,
error_code.raw().into(),
);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_reply = ActionReplyPus::new(5_u32, ActionReplyVariant::Completed);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout(
&ActivePusActionRequestStd::new_from_common_req(action_id, active_request),
&[],
);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

66
src/pus/event.rs Normal file
View File

@ -0,0 +1,66 @@
use std::sync::mpsc;
use super::HandlingStatus;
use crate::pus::create_verification_reporter;
use log::{error, warn};
use ops_sat_rs::config::components::PUS_EVENT_MANAGEMENT;
use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::event_srv::PusEventServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
EcssTcAndToken, EcssTcInVecConverter, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper,
};
use satrs::tmtc::PacketAsVec;
pub fn create_event_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> EventServiceWrapper {
let pus_5_handler = PusEventServiceHandler::new(
PusServiceHelper::new(
PUS_EVENT_MANAGEMENT.id(),
pus_event_rx,
tm_funnel_tx,
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
EcssTcInVecConverter::default(),
),
event_request_tx,
);
EventServiceWrapper {
handler: pus_5_handler,
}
}
pub struct EventServiceWrapper {
pub handler: PusEventServiceHandler<
MpscTcReceiver,
mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter,
VerificationReporter,
>,
}
impl EventServiceWrapper {
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self.handler.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS 5 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS 5 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS 5 subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
}
}
HandlingStatus::HandledOne
}
}

532
src/pus/hk.rs Normal file
View File

@ -0,0 +1,532 @@
use derive_new::new;
use log::{error, warn};
use ops_sat_rs::config::components::PUS_HK_SERVICE;
use ops_sat_rs::config::{hk_err, tmtc_err};
use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId};
use satrs::pus::verification::{
FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError,
PusPacketHandlerResult, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{hk, PusPacket};
use satrs::tmtc::PacketAsVec;
use std::sync::mpsc;
use std::time::Duration;
use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler};
use crate::requests::GenericRequestRouter;
use super::{HandlingStatus, PusTargetedRequestService};
#[derive(Clone, PartialEq, Debug, new)]
pub struct HkReply {
pub unique_id: UniqueId,
pub variant: HkReplyVariant,
}
#[derive(Clone, PartialEq, Debug)]
#[allow(dead_code)]
pub enum HkReplyVariant {
Ack,
}
#[derive(Default)]
pub struct HkReplyHandler {}
impl PusReplyHandler<ActivePusRequestStd, HkReply> for HkReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<HkReply>,
_tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for service 3: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<HkReply>,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
match reply.message.variant {
HkReplyVariant::Ack => {
verification_handler
.completion_success(tm_sender, started_token, time_stamp)
.expect("sending completion success verification failed");
}
};
Ok(true)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"HK",
)?;
Ok(())
}
}
pub struct HkRequestConverter {
timeout: Duration,
}
impl Default for HkRequestConverter {
fn default() -> Self {
Self {
timeout: Duration::from_secs(60),
}
}
}
impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, HkRequest), Self::Error> {
let user_data = tc.user_data();
if user_data.is_empty() {
let user_data_len = user_data.len() as u32;
let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(
time_stamp,
&tmtc_err::NOT_ENOUGH_APP_DATA,
&user_data_len_raw,
),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::NotEnoughAppData {
expected: 4,
found: 0,
});
}
if user_data.len() < 8 {
let err = if user_data.len() < 4 {
&hk_err::TARGET_ID_MISSING
} else {
&hk_err::UNIQUE_ID_MISSING
};
let user_data_len = user_data.len() as u32;
let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(time_stamp, err, &user_data_len_raw),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::NotEnoughAppData {
expected: 8,
found: 4,
});
}
let subservice = tc.subservice();
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).expect("invalid tc format");
let unique_id = u32::from_be_bytes(tc.user_data()[4..8].try_into().unwrap());
let standard_subservice = hk::Subservice::try_from(subservice);
if standard_subservice.is_err() {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::InvalidSubservice(subservice));
}
let request = match standard_subservice.unwrap() {
hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => {
HkRequest::new(unique_id, HkRequestVariant::EnablePeriodic)
}
hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => {
HkRequest::new(unique_id, HkRequestVariant::DisablePeriodic)
}
hk::Subservice::TcReportHkReportStructures => todo!(),
hk::Subservice::TmHkPacket => todo!(),
hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => {
HkRequest::new(unique_id, HkRequestVariant::OneShot)
}
hk::Subservice::TcModifyDiagCollectionInterval
| hk::Subservice::TcModifyHkCollectionInterval => {
if user_data.len() < 12 {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(
time_stamp,
&tmtc_err::NOT_ENOUGH_APP_DATA,
),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::NotEnoughAppData {
expected: 12,
found: user_data.len(),
});
}
HkRequest::new(
unique_id,
HkRequestVariant::ModifyCollectionInterval(
CollectionIntervalFactor::from_be_bytes(
user_data[8..12].try_into().unwrap(),
),
),
)
}
_ => {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(
time_stamp,
&tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED,
&[subservice],
),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::InvalidSubservice(subservice));
}
};
Ok((
ActivePusRequestStd::new(target_id_and_apid.into(), token, self.timeout),
request,
))
}
}
pub fn create_hk_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
) -> HkServiceWrapper {
let pus_3_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_HK_SERVICE.id(),
pus_hk_rx,
tm_funnel_tx,
create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
EcssTcInVecConverter::default(),
),
HkRequestConverter::default(),
DefaultActiveRequestMap::default(),
HkReplyHandler::default(),
request_router,
reply_receiver,
);
HkServiceWrapper {
service: pus_3_handler,
}
}
pub struct HkServiceWrapper {
pub(crate) service: PusTargetedRequestService<
VerificationReporter,
HkRequestConverter,
HkReplyHandler,
DefaultActiveRequestMap<ActivePusRequestStd>,
ActivePusRequestStd,
HkRequest,
HkReply,
>,
}
impl HkServiceWrapper {
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS 3 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS 3 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS 3 subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS packet handling error: {error:?}");
// To avoid permanent loops on error cases.
return HandlingStatus::Empty;
}
}
HandlingStatus::HandledOne
}
pub fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS 3: Handling reply failed with error {e:?}");
HandlingStatus::Empty
})
}
pub fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
}
}
#[cfg(test)]
mod tests {
use ops_sat_rs::config::tmtc_err;
use satrs::pus::test_util::{
TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::request::MessageMetadata;
use satrs::{
hk::HkRequestVariant,
pus::test_util::TEST_APID,
request::GenericMessage,
spacepackets::{
ecss::{hk::Subservice, tc::PusTcCreator},
SpHeader,
},
};
use crate::pus::{
hk::HkReplyVariant,
tests::{PusConverterTestbench, ReplyHandlerTestbench},
};
use super::{HkReply, HkReplyHandler, HkRequestConverter};
#[test]
fn hk_converter_one_shot_req() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let hk_req = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcGenerateOneShotHk as u8,
&app_data,
true,
);
let accepted_token = hk_bench.add_tc(&hk_req);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::OneShot = req.variant {
} else {
panic!("unexpected HK request")
}
}
#[test]
fn hk_converter_enable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::EnablePeriodic = req.variant {
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableHkGeneration as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableDiagGeneration as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_conversion_disable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::DisablePeriodic = req.variant {
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableHkGeneration as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableDiagGeneration as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_conversion_modify_interval() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 12] = [0; 12];
let collection_interval_factor = 5_u32;
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
app_data[8..12].copy_from_slice(&collection_interval_factor.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::ModifyCollectionInterval(interval_factor) = req.variant {
assert_eq!(interval_factor, collection_interval_factor);
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyHkCollectionInterval as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyDiagCollectionInterval as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_reply_handler() {
let mut reply_testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), HkReplyHandler::default());
let sender_id = 2_u64;
let apid_target_id = 3_u32;
let unique_id = 5_u32;
let (req_id, active_req) = reply_testbench.add_tc(TEST_APID, apid_target_id, &[]);
let reply = GenericMessage::new(
MessageMetadata::new(req_id.into(), sender_id),
HkReply::new(unique_id, HkReplyVariant::Ack),
);
let result = reply_testbench.handle_reply(&reply, &active_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
reply_testbench
.verif_reporter
.assert_full_completion_success(TEST_COMPONENT_ID_0.raw(), req_id, None);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_1, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_1.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

View File

@ -1,8 +1,12 @@
pub mod action;
pub mod event;
pub mod hk;
pub mod mode;
pub mod scheduler;
pub mod stack;
pub mod test;
use crate::requests::GenericRequestRouter;
use crate::tmtc::MpscStoreAndSendError;
use log::warn;
use ops_sat_rs::config::components::PUS_ROUTING_SERVICE;
use ops_sat_rs::config::{tmtc_err, CustomPusServiceId};
@ -13,26 +17,19 @@ use satrs::pus::verification::{
};
use satrs::pus::{
ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter,
EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, GenericConversionError,
GenericRoutingError, PusPacketHandlerResult, PusPacketHandlingError, PusReplyHandler,
PusRequestRouter, PusServiceHelper, PusTcToRequestConverter, TcInMemory,
EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError,
MpscTcReceiver, MpscTmAsVecSender, PusPacketHandlerResult, PusPacketHandlingError,
PusReplyHandler, PusRequestRouter, PusServiceHelper, PusTcToRequestConverter, TcInMemory,
};
use satrs::queue::GenericReceiveError;
use satrs::queue::{GenericReceiveError, GenericSendError};
use satrs::request::{Apid, GenericMessage, MessageMetadata};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusServiceId;
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs::tmtc::PacketAsVec;
use satrs::ComponentId;
use std::fmt::Debug;
use std::sync::mpsc::{self, Sender};
// pub mod action;
// pub mod event;
// pub mod hk;
// pub mod mode;
// pub mod scheduler;
// pub mod stack;
// pub mod test;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[allow(dead_code)]
pub enum HandlingStatus {
@ -50,14 +47,14 @@ pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> Verifi
/// Simple router structure which forwards PUS telecommands to dedicated handlers.
pub struct PusTcMpscRouter {
pub test_tc_sender: Sender<EcssTcAndToken>,
// pub event_tc_sender: Sender<EcssTcAndToken>,
// pub sched_tc_sender: Sender<EcssTcAndToken>,
// pub hk_tc_sender: Sender<EcssTcAndToken>,
// pub action_tc_sender: Sender<EcssTcAndToken>,
// pub mode_tc_sender: Sender<EcssTcAndToken>,
pub event_tc_sender: Sender<EcssTcAndToken>,
pub sched_tc_sender: Sender<EcssTcAndToken>,
pub hk_tc_sender: Sender<EcssTcAndToken>,
pub action_tc_sender: Sender<EcssTcAndToken>,
pub mode_tc_sender: Sender<EcssTcAndToken>,
}
pub struct PusReceiver<TmSender: EcssTmSenderCore> {
pub struct PusTcDistributor<TmSender: EcssTmSender> {
pub id: ComponentId,
pub tm_sender: TmSender,
pub verif_reporter: VerificationReporter,
@ -65,7 +62,7 @@ pub struct PusReceiver<TmSender: EcssTmSenderCore> {
stamp_helper: TimeStampHelper,
}
impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self {
Self {
id: PUS_ROUTING_SERVICE.raw(),
@ -81,39 +78,48 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
pub fn handle_tc_packet(
&mut self,
tc_in_memory: TcInMemory,
service: u8,
pus_tc: &PusTcReader,
) -> Result<PusPacketHandlerResult, MpscStoreAndSendError> {
let init_token = self.verif_reporter.add_tc(pus_tc);
sender_id: ComponentId,
tc: Vec<u8>,
) -> Result<PusPacketHandlerResult, GenericSendError> {
let pus_tc_result = PusTcReader::new(&tc);
if pus_tc_result.is_err() {
log::warn!(
"error creating PUS TC received from {}: {}",
sender_id,
pus_tc_result.unwrap_err()
);
log::warn!("raw data: {:x?}", tc);
return Ok(PusPacketHandlerResult::RequestHandled);
}
let pus_tc = pus_tc_result.unwrap().0;
let init_token = self.verif_reporter.add_tc(&pus_tc);
self.stamp_helper.update_from_now();
let accepted_token = self
.verif_reporter
.acceptance_success(&self.tm_sender, init_token, self.stamp_helper.stamp())
.expect("Acceptance success failure");
let service = PusServiceId::try_from(service);
let service = PusServiceId::try_from(pus_tc.service());
let tc_in_memory = TcInMemory::Vec(PacketAsVec::new(sender_id, tc));
match service {
Ok(standard_service) => match standard_service {
PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?,
// PusServiceId::Housekeeping => {
// self.pus_router.hk_tc_sender.send(EcssTcAndToken {
// tc_in_memory,
// token: Some(accepted_token.into()),
// })?
// }
// PusServiceId::Event => self.pus_router.event_tc_sender.send(EcssTcAndToken {
// tc_in_memory,
// token: Some(accepted_token.into()),
// })?,
// PusServiceId::Scheduling => {
// self.pus_router.sched_tc_sender.send(EcssTcAndToken {
// tc_in_memory,
// token: Some(accepted_token.into()),
// })?
// }
PusServiceId::Action => self.pus_router.action_tc_sender.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?,
PusServiceId::Event => self.pus_router.event_tc_sender.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?,
PusServiceId::Scheduling => {
self.pus_router.sched_tc_sender.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?
}
_ => {
let result = self.verif_reporter.start_failure(
&self.tm_sender,
@ -133,10 +139,10 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) {
match custom_service {
CustomPusServiceId::Mode => {
// self.pus_router.mode_tc_sender.send(EcssTcAndToken {
// tc_in_memory,
// token: Some(accepted_token.into()),
// })?
self.pus_router.mode_tc_sender.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?
}
CustomPusServiceId::Health => {}
}
@ -161,12 +167,12 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
pub trait TargetedPusService {
/// Returns [true] interface the packet handling is finished.
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool;
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus;
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus;
fn check_for_request_timeouts(&mut self);
}
/// This is a generic handler class for all PUS services where a PUS telecommand is converted
/// This is a generic handlers class for all PUS services where a PUS telecommand is converted
/// to a targeted request.
///
/// The generic steps for this process are the following
@ -176,21 +182,18 @@ pub trait TargetedPusService {
/// 3. Convert the PUS TC to a typed request using the [PusTcToRequestConverter].
/// 4. Route the requests using the [GenericRequestRouter].
/// 5. Add the request to the active request map using the [ActiveRequestMapProvider] abstraction.
/// 6. Check for replies which complete the forwarded request. The handler takes care of
/// 6. Check for replies which complete the forwarded request. The handlers takes care of
/// the verification process.
/// 7. Check for timeouts of active requests. Generally, the timeout on the service level should
/// be highest expected timeout for the given target.
///
/// The handler exposes the following API:
/// The handlers exposes the following API:
///
/// 1. [Self::handle_one_tc] which tries to poll and handle one TC packet, covering steps 1-5.
/// 2. [Self::check_one_reply] which tries to poll and handle one reply, covering step 6.
/// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7.
#[allow(dead_code)]
pub struct PusTargetedRequestService<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
@ -199,8 +202,12 @@ pub struct PusTargetedRequestService<
RequestType,
ReplyType,
> {
pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub service_helper: PusServiceHelper<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
VerificationReporter,
>,
pub request_router: GenericRequestRouter,
pub request_converter: RequestConverter,
pub active_request_map: ActiveRequestMap,
@ -209,11 +216,7 @@ pub struct PusTargetedRequestService<
phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>,
}
#[allow(dead_code)]
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
@ -223,9 +226,6 @@ impl<
ReplyType,
>
PusTargetedRequestService<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
RequestConverter,
ReplyHandler,
@ -239,9 +239,9 @@ where
{
pub fn new(
service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
VerificationReporter,
>,
request_converter: RequestConverter,
@ -442,7 +442,7 @@ where
/// and also log the error.
#[allow(dead_code)]
pub fn generic_pus_request_timeout_handler(
sender: &(impl EcssTmSenderCore + ?Sized),
sender: &(impl EcssTmSender + ?Sized),
active_request: &(impl ActiveRequestProvider + Debug),
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
@ -466,12 +466,13 @@ pub(crate) mod tests {
use std::time::Duration;
use satrs::pus::test_util::TEST_COMPONENT_ID_0;
use satrs::pus::{MpscTmAsVecSender, PusTmAsVec, PusTmVariant};
use satrs::pus::{MpscTmAsVecSender, PusTmVariant};
use satrs::request::RequestId;
use satrs::tmtc::PacketAsVec;
use satrs::{
pus::{
verification::test_util::TestVerificationReporter, ActivePusRequestStd,
ActiveRequestMapProvider, EcssTcInVecConverter, MpscTcReceiver,
ActiveRequestMapProvider,
},
request::UniqueApidTargetId,
spacepackets::{
@ -496,7 +497,7 @@ pub(crate) mod tests {
pub id: ComponentId,
pub verif_reporter: TestVerificationReporter,
pub reply_handler: ReplyHandler,
pub tm_receiver: mpsc::Receiver<PusTmAsVec>,
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub default_timeout: Duration,
tm_sender: MpscTmAsVecSender,
phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>,
@ -596,7 +597,7 @@ pub(crate) mod tests {
/// Dummy sender component which does nothing on the [Self::send_tm] call.
///
/// Useful for unit tests.
impl EcssTmSenderCore for DummySender {
impl EcssTmSender for DummySender {
fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> {
Ok(())
}
@ -691,9 +692,6 @@ pub(crate) mod tests {
ReplyType,
> {
pub service: PusTargetedRequestService<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
TestVerificationReporter,
RequestConverter,
ReplyHandler,
@ -703,7 +701,7 @@ pub(crate) mod tests {
ReplyType,
>,
pub request_id: Option<RequestId>,
pub tm_funnel_rx: mpsc::Receiver<PusTmAsVec>,
pub tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>,
pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>,
pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,

402
src/pus/mode.rs Normal file
View File

@ -0,0 +1,402 @@
use derive_new::new;
use log::{error, warn};
use satrs::tmtc::PacketAsVec;
use std::sync::mpsc;
use std::time::Duration;
use crate::requests::GenericRequestRouter;
use ops_sat_rs::config::components::PUS_MODE_SERVICE;
use ops_sat_rs::config::{mode_err, tmtc_err};
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInVecConverter, PusPacketHandlerResult,
PusServiceHelper,
};
use satrs::request::GenericMessage;
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::{
mode::Subservice,
verification::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider,
VerificationToken,
},
ActivePusRequestStd, ActiveRequestProvider, EcssTmSender, EcssTmtcError,
GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
},
request::UniqueApidTargetId,
spacepackets::{
ecss::{
tc::PusTcReader,
tm::{PusTmCreator, PusTmSecondaryHeader},
PusPacket,
},
SpHeader,
},
ComponentId,
};
use super::{
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
PusTargetedRequestService, TargetedPusService,
};
#[derive(new)]
pub struct ModeReplyHandler {
owner_id: ComponentId,
}
impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<ModeReply>,
_tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for mode service 5: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<ModeReply>,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
match reply.message {
ModeReply::ModeReply(mode_reply) => {
let mut source_data: [u8; 12] = [0; 12];
mode_reply
.write_to_be_bytes(&mut source_data)
.expect("writing mode reply failed");
let req_id = verification::RequestId::from(reply.request_id());
let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), 0, 0);
let sec_header =
PusTmSecondaryHeader::new(200, Subservice::TmModeReply as u8, 0, 0, time_stamp);
let pus_tm = PusTmCreator::new(sp_header, sec_header, &source_data, true);
tm_sender.send_tm(self.owner_id, PusTmVariant::Direct(pus_tm))?;
verification_handler.completion_success(tm_sender, started_token, time_stamp)?;
}
ModeReply::CantReachMode(error_code) => {
verification_handler.completion_failure(
tm_sender,
started_token,
FailParams::new(time_stamp, &error_code, &[]),
)?;
}
ModeReply::WrongMode { expected, reached } => {
let mut error_info: [u8; 24] = [0; 24];
let mut written_len = expected
.write_to_be_bytes(&mut error_info[0..ModeAndSubmode::RAW_LEN])
.expect("writing expected mode failed");
written_len += reached
.write_to_be_bytes(&mut error_info[ModeAndSubmode::RAW_LEN..])
.expect("writing reached mode failed");
verification_handler.completion_failure(
tm_sender,
started_token,
FailParams::new(
time_stamp,
&mode_err::WRONG_MODE,
&error_info[..written_len],
),
)?;
}
};
Ok(true)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"HK",
)?;
Ok(())
}
}
#[derive(Default)]
pub struct ModeRequestConverter {}
impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> {
let subservice = tc.subservice();
let user_data = tc.user_data();
let not_enough_app_data = |expected: usize| {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
)
.expect("Sending start failure failed");
Err(GenericConversionError::NotEnoughAppData {
expected,
found: user_data.len(),
})
};
if user_data.len() < core::mem::size_of::<u32>() {
return not_enough_app_data(4);
}
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
let active_request =
ActivePusRequestStd::new(target_id_and_apid.into(), token, Duration::from_secs(30));
let subservice_typed = Subservice::try_from(subservice);
let invalid_subservice = || {
// Invalid subservice
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
)
.expect("Sending start failure failed");
Err(GenericConversionError::InvalidSubservice(subservice))
};
if subservice_typed.is_err() {
return invalid_subservice();
}
let subservice_typed = subservice_typed.unwrap();
match subservice_typed {
Subservice::TcSetMode => {
if user_data.len() < core::mem::size_of::<u32>() + ModeAndSubmode::RAW_LEN {
return not_enough_app_data(4 + ModeAndSubmode::RAW_LEN);
}
let mode_and_submode = ModeAndSubmode::from_be_bytes(&tc.user_data()[4..])
.expect("mode and submode extraction failed");
Ok((active_request, ModeRequest::SetMode(mode_and_submode)))
}
Subservice::TcReadMode => Ok((active_request, ModeRequest::ReadMode)),
Subservice::TcAnnounceMode => Ok((active_request, ModeRequest::AnnounceMode)),
Subservice::TcAnnounceModeRecursive => {
Ok((active_request, ModeRequest::AnnounceModeRecursive))
}
_ => invalid_subservice(),
}
}
}
pub fn create_mode_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper {
let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_MODE_SERVICE.id(),
pus_action_rx,
tm_funnel_tx,
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
EcssTcInVecConverter::default(),
),
ModeRequestConverter::default(),
DefaultActiveRequestMap::default(),
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
mode_router,
reply_receiver,
);
ModeServiceWrapper {
service: mode_request_handler,
}
}
pub struct ModeServiceWrapper {
pub(crate) service: PusTargetedRequestService<
VerificationReporter,
ModeRequestConverter,
ModeReplyHandler,
DefaultActiveRequestMap<ActivePusRequestStd>,
ActivePusRequestStd,
ModeRequest,
ModeReply,
>,
}
impl TargetedPusService for ModeServiceWrapper {
/// Returns [true] if the packet handling is finished.
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS mode service: partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS mode service: invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS mode service: {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS mode service: packet handling error: {error:?}");
// To avoid permanent loops on error cases.
return HandlingStatus::Empty;
}
}
HandlingStatus::HandledOne
}
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus {
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS action service: Handling reply failed with error {e:?}");
HandlingStatus::HandledOne
})
}
fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
}
}
#[cfg(test)]
mod tests {
use ops_sat_rs::config::tmtc_err;
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0};
use satrs::request::MessageMetadata;
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::mode::Subservice,
request::GenericMessage,
spacepackets::{
ecss::tc::{PusTcCreator, PusTcSecondaryHeader},
SpHeader,
},
};
use crate::pus::{
mode::ModeReplyHandler,
tests::{PusConverterTestbench, ReplyHandlerTestbench},
};
use super::ModeRequestConverter;
#[test]
fn mode_converter_read_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::ReadMode);
}
#[test]
fn mode_converter_set_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8);
let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN];
let mode_and_submode = ModeAndSubmode::new(2, 1);
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
mode_and_submode
.write_to_be_bytes(&mut app_data[4..])
.unwrap();
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::SetMode(mode_and_submode));
}
#[test]
fn mode_converter_announce_mode() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceMode);
}
#[test]
fn mode_converter_announce_mode_recursively() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceModeRecursive);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench = ReplyHandlerTestbench::new(
TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1));
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench = ReplyHandlerTestbench::new(
TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

153
src/pus/scheduler.rs Normal file
View File

@ -0,0 +1,153 @@
use std::sync::mpsc;
use std::time::Duration;
use crate::pus::create_verification_reporter;
use log::{error, info, warn};
use ops_sat_rs::config::components::PUS_SCHEDULER_SERVICE;
use satrs::pool::{PoolProvider, StaticMemoryPool};
use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
EcssTcAndToken, EcssTcInVecConverter, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper,
};
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool};
use satrs::ComponentId;
use super::HandlingStatus;
pub trait TcReleaser {
fn release(&mut self, sender_id: ComponentId, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool;
}
impl TcReleaser for PacketSenderWithSharedPool {
fn release(
&mut self,
sender_id: ComponentId,
enabled: bool,
_info: &TcInfo,
tc: &[u8],
) -> bool {
if enabled {
let shared_pool = self.shared_pool.get_mut();
// Transfer TC from scheduler TC pool to shared TC pool.
let released_tc_addr = shared_pool
.0
.write()
.expect("locking pool failed")
.add(tc)
.expect("adding TC to shared pool failed");
self.sender
.send(PacketInPool::new(sender_id, released_tc_addr))
.expect("sending TC to TC source failed");
}
true
}
}
impl TcReleaser for mpsc::Sender<PacketAsVec> {
fn release(
&mut self,
sender_id: ComponentId,
enabled: bool,
_info: &TcInfo,
tc: &[u8],
) -> bool {
if enabled {
// Send released TC to centralized TC source.
self.send(PacketAsVec::new(sender_id, tc.to_vec()))
.expect("sending TC to TC source failed");
}
true
}
}
pub struct SchedulingService {
pub pus_11_handler: PusSchedServiceHandler<
MpscTcReceiver,
mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter,
VerificationReporter,
PusScheduler,
>,
pub sched_tc_pool: StaticMemoryPool,
pub releaser_buf: [u8; 4096],
pub tc_releaser: Box<dyn TcReleaser + Send>,
}
impl SchedulingService {
pub fn release_tcs(&mut self) {
let id = self.pus_11_handler.service_helper.id();
let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool {
self.tc_releaser.release(id, enabled, info, tc)
};
self.pus_11_handler
.scheduler_mut()
.update_time_from_now()
.unwrap();
let released_tcs = self
.pus_11_handler
.scheduler_mut()
.release_telecommands_with_buffer(
releaser,
&mut self.sched_tc_pool,
&mut self.releaser_buf,
)
.expect("releasing TCs failed");
if released_tcs > 0 {
info!("{released_tcs} TC(s) released from scheduler");
}
}
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
match self
.pus_11_handler
.poll_and_handle_next_tc(time_stamp, &mut self.sched_tc_pool)
{
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS11 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS11 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS11: Subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
}
}
HandlingStatus::HandledOne
}
}
pub fn create_scheduler_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
tc_source_sender: mpsc::Sender<PacketAsVec>,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool,
) -> SchedulingService {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed");
let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new(
PUS_SCHEDULER_SERVICE.id(),
pus_sched_rx,
tm_funnel_tx,
create_verification_reporter(PUS_SCHEDULER_SERVICE.id(), PUS_SCHEDULER_SERVICE.apid),
EcssTcInVecConverter::default(),
),
scheduler,
);
SchedulingService {
pus_11_handler,
sched_tc_pool,
releaser_buf: [0; 4096],
tc_releaser: Box::new(tc_source_sender),
}
}

View File

@ -1,35 +1,28 @@
// use crate::pus::mode::ModeServiceWrapper;
use crate::pus::test::TestCustomServiceWrapper;
use crate::pus::HandlingStatus;
use derive_new::new;
use satrs::{
pus::{EcssTcInMemConverter, EcssTmSenderCore},
spacepackets::time::{cds, TimeWriter},
use satrs::spacepackets::time::{cds, TimeWriter};
use super::{
action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper,
mode::ModeServiceWrapper, scheduler::SchedulingService, TargetedPusService,
};
// use super::{
// action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper,
// scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, HandlingStatus,
// TargetedPusService,
// };
#[derive(new)]
pub struct PusStack<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> {
test_srv: TestCustomServiceWrapper<TmSender, TcInMemConverter>,
// hk_srv_wrapper: HkServiceWrapper<TmSender, TcInMemConverter>,
// event_srv: EventServiceWrapper<TmSender, TcInMemConverter>,
// action_srv_wrapper: ActionServiceWrapper<TmSender, TcInMemConverter>,
// schedule_srv: SchedulingServiceWrapper<TmSender, TcInMemConverter>,
// mode_srv: ModeServiceWrapper<TmSender, TcInMemConverter>,
pub struct PusStack {
test_srv: TestCustomServiceWrapper,
hk_srv_wrapper: HkServiceWrapper,
event_srv: EventServiceWrapper,
action_srv_wrapper: ActionServiceWrapper,
schedule_srv: SchedulingService,
mode_srv: ModeServiceWrapper,
}
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
PusStack<TmSender, TcInMemConverter>
{
impl PusStack {
pub fn periodic_operation(&mut self) {
// Release all telecommands which reached their release time before calling the service
// handlers.
// self.schedule_srv.release_tcs();
self.schedule_srv.release_tcs();
let time_stamp = cds::CdsTime::now_with_u16_days()
.expect("time stamp generation error")
.to_vec()
@ -37,37 +30,50 @@ impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
loop {
let mut nothing_to_do = true;
let mut is_srv_finished =
|tc_handling_done: bool, reply_handling_done: Option<HandlingStatus>| {
if !tc_handling_done
|| (reply_handling_done.is_some()
&& reply_handling_done.unwrap() == HandlingStatus::Empty)
|_srv_id: u8,
tc_handling_status: HandlingStatus,
reply_handling_status: Option<HandlingStatus>| {
if tc_handling_status == HandlingStatus::HandledOne
|| (reply_handling_status.is_some()
&& reply_handling_status.unwrap() == HandlingStatus::HandledOne)
{
nothing_to_do = false;
}
};
is_srv_finished(self.test_srv.poll_and_handle_next_packet(&time_stamp), None);
// is_srv_finished(self.schedule_srv.poll_and_handle_next_tc(&time_stamp), None);
// is_srv_finished(self.event_srv.poll_and_handle_next_tc(&time_stamp), None);
// is_srv_finished(
// self.action_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
// Some(
// self.action_srv_wrapper
// .poll_and_handle_next_reply(&time_stamp),
// ),
// );
// is_srv_finished(
// self.hk_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
// Some(self.hk_srv_wrapper.poll_and_handle_next_reply(&time_stamp)),
// );
// is_srv_finished(
// self.mode_srv.poll_and_handle_next_tc(&time_stamp),
// Some(self.mode_srv.poll_and_handle_next_reply(&time_stamp)),
// );
is_srv_finished(
17,
self.test_srv.poll_and_handle_next_packet(&time_stamp),
None,
);
is_srv_finished(
11,
self.schedule_srv.poll_and_handle_next_tc(&time_stamp),
None,
);
is_srv_finished(5, self.event_srv.poll_and_handle_next_tc(&time_stamp), None);
is_srv_finished(
8,
self.action_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
Some(
self.action_srv_wrapper
.poll_and_handle_next_reply(&time_stamp),
),
);
is_srv_finished(
3,
self.hk_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
Some(self.hk_srv_wrapper.poll_and_handle_next_reply(&time_stamp)),
);
is_srv_finished(
200,
self.mode_srv.poll_and_handle_next_tc(&time_stamp),
Some(self.mode_srv.poll_and_handle_next_reply(&time_stamp)),
);
if nothing_to_do {
// Timeout checking is only done once.
// self.action_srv_wrapper.check_for_request_timeouts();
// self.hk_srv_wrapper.check_for_request_timeouts();
// self.mode_srv.check_for_request_timeouts();
self.action_srv_wrapper.check_for_request_timeouts();
self.hk_srv_wrapper.check_for_request_timeouts();
self.mode_srv.check_for_request_timeouts();
break;
}
}

View File

@ -1,25 +1,28 @@
use crate::pus::create_verification_reporter;
use log::{info, warn};
use ops_sat_rs::config::components::PUS_TEST_SERVICE;
use ops_sat_rs::config::tmtc_err;
// use satrs::event_man::{EventMessage, EventMessageU32};
use ops_sat_rs::config::{tmtc_err, TEST_EVENT};
use satrs::event_man::{EventMessage, EventMessageU32};
use satrs::pus::test::PusService17TestHandler;
use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider};
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTmSenderCore, MpscTcReceiver,
MpscTmAsVecSender, PusPacketHandlerResult, PusServiceHelper, PusTmAsVec,
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender,
PusPacketHandlerResult, PusServiceHelper,
};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::spacepackets::time::cds::CdsTime;
use satrs::spacepackets::time::TimeWriter;
use satrs::tmtc::PacketAsVec;
use std::sync::mpsc;
pub fn create_test_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>,
// event_sender: mpsc::Sender<EventMessageU32>,
use super::HandlingStatus;
pub fn create_test_service(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
event_tx: mpsc::SyncSender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> TestCustomServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
) -> TestCustomServiceWrapper {
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
PUS_TEST_SERVICE.id(),
pus_test_rx,
@ -29,27 +32,26 @@ pub fn create_test_service_dynamic(
));
TestCustomServiceWrapper {
handler: pus17_handler,
// test_srv_event_sender: event_sender,
event_tx,
}
}
pub struct TestCustomServiceWrapper<
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
> {
pub handler:
PusService17TestHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
// pub test_srv_event_sender: mpsc::Sender<EventMessageU32>,
pub struct TestCustomServiceWrapper {
pub handler: PusService17TestHandler<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
VerificationReporter,
>,
pub event_tx: mpsc::SyncSender<EventMessageU32>,
}
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
TestCustomServiceWrapper<TmSender, TcInMemConverter>
{
pub fn poll_and_handle_next_packet(&mut self, time_stamp: &[u8]) -> bool {
impl TestCustomServiceWrapper {
pub fn poll_and_handle_next_packet(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let res = self.handler.poll_and_handle_next_tc(time_stamp);
if res.is_err() {
warn!("PUS17 handler failed with error {:?}", res.unwrap_err());
return true;
warn!("PUS17 handlers failed with error {:?}", res.unwrap_err());
return HandlingStatus::Empty;
}
match res.unwrap() {
PusPacketHandlerResult::RequestHandled => {
@ -79,9 +81,9 @@ impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
time_stamper.write_to_bytes(&mut stamp_buf).unwrap();
if subservice == 128 {
info!("Generating test event");
// self.test_srv_event_sender
// .send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
// .expect("Sending test event failed");
self.event_tx
.send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
.expect("Sending test event failed");
let start_token = self
.handler
.service_helper
@ -114,10 +116,8 @@ impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
.expect("Sending start failure verification failed");
}
}
PusPacketHandlerResult::Empty => {
return true;
PusPacketHandlerResult::Empty => return HandlingStatus::Empty,
}
}
false
HandlingStatus::HandledOne
}
}

View File

@ -10,7 +10,7 @@ use satrs::mode::ModeRequest;
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{ActiveRequestProvider, EcssTmSenderCore, GenericRoutingError, PusRequestRouter};
use satrs::pus::{ActiveRequestProvider, EcssTmSender, GenericRoutingError, PusRequestRouter};
use satrs::queue::GenericSendError;
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
@ -49,7 +49,7 @@ impl GenericRequestRouter {
active_request: &impl ActiveRequestProvider,
tc: &PusTcReader,
error: GenericRoutingError,
tm_sender: &(impl EcssTmSenderCore + ?Sized),
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) {

View File

@ -1,109 +0,0 @@
use std::{
collections::HashMap,
sync::mpsc::{self},
};
use log::info;
use satrs::pus::PusTmAsVec;
use satrs::{
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
spacepackets::{
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
time::cds::MIN_CDS_FIELD_LEN,
CcsdsPacket,
},
};
use crate::interface::tcp::SyncTcpTmSource;
#[derive(Default)]
pub struct CcsdsSeqCounterMap {
apid_seq_counter_map: HashMap<u16, CcsdsSimpleSeqCountProvider>,
}
impl CcsdsSeqCounterMap {
pub fn get_and_increment(&mut self, apid: u16) -> u16 {
self.apid_seq_counter_map
.entry(apid)
.or_default()
.get_and_increment()
}
}
pub struct TmFunnelCommon {
seq_counter_map: CcsdsSeqCounterMap,
msg_counter_map: HashMap<u8, u16>,
sync_tm_tcp_source: SyncTcpTmSource,
}
impl TmFunnelCommon {
pub fn new(sync_tm_tcp_source: SyncTcpTmSource) -> Self {
Self {
seq_counter_map: Default::default(),
msg_counter_map: Default::default(),
sync_tm_tcp_source,
}
}
// Applies common packet processing operations for PUS TM packets. This includes setting
// a sequence counter
fn apply_packet_processing(&mut self, mut zero_copy_writer: PusTmZeroCopyWriter) {
// zero_copy_writer.set_apid(PUS_APID);
zero_copy_writer.set_seq_count(
self.seq_counter_map
.get_and_increment(zero_copy_writer.apid()),
);
let entry = self
.msg_counter_map
.entry(zero_copy_writer.service())
.or_insert(0);
zero_copy_writer.set_msg_count(*entry);
if *entry == u16::MAX {
*entry = 0;
} else {
*entry += 1;
}
Self::packet_printout(&zero_copy_writer);
// This operation has to come last!
zero_copy_writer.finish();
}
fn packet_printout(tm: &PusTmZeroCopyWriter) {
info!("Sending PUS TM[{},{}]", tm.service(), tm.subservice());
}
}
pub struct TmFunnelDynamic {
common: TmFunnelCommon,
tm_funnel_rx: mpsc::Receiver<PusTmAsVec>,
tm_server_tx: mpsc::Sender<PusTmAsVec>,
}
impl TmFunnelDynamic {
pub fn new(
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PusTmAsVec>,
tm_server_tx: mpsc::Sender<PusTmAsVec>,
) -> Self {
Self {
common: TmFunnelCommon::new(sync_tm_tcp_source),
tm_funnel_rx,
tm_server_tx,
}
}
pub fn operation(&mut self) {
if let Ok(mut tm) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update
// the CRC.
let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
self.common.sync_tm_tcp_source.add_tm(&tm.packet);
self.tm_server_tx
.send(tm)
.expect("Sending TM to server failed");
}
}
}

View File

@ -1,94 +0,0 @@
use crate::pus::PusReceiver;
use satrs::pool::{StoreAddr, StoreError};
use satrs::pus::{EcssTcAndToken, MpscTmAsVecSender};
use satrs::spacepackets::ecss::PusPacket;
use satrs::{
pus::ReceivesEcssPusTc,
spacepackets::{ecss::tc::PusTcReader, SpHeader},
tmtc::ReceivesCcsdsTc,
};
use std::sync::mpsc::{self, SendError, Sender, TryRecvError};
use thiserror::Error;
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum MpscStoreAndSendError {
#[error("Store error: {0}")]
Store(#[from] StoreError),
#[error("TC send error: {0}")]
TcSend(#[from] SendError<EcssTcAndToken>),
#[error("TMTC send error: {0}")]
TmTcSend(#[from] SendError<StoreAddr>),
}
// Newtype, can not implement necessary traits on MPSC sender directly because of orphan rules.
#[derive(Clone)]
pub struct PusTcSourceProviderDynamic(pub Sender<Vec<u8>>);
impl ReceivesEcssPusTc for PusTcSourceProviderDynamic {
type Error = SendError<Vec<u8>>;
fn pass_pus_tc(&mut self, _: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
self.0.send(pus_tc.raw_data().to_vec())?;
Ok(())
}
}
impl ReceivesCcsdsTc for PusTcSourceProviderDynamic {
type Error = mpsc::SendError<Vec<u8>>;
fn pass_ccsds(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.0.send(tc_raw.to_vec())?;
Ok(())
}
}
// TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic {
pub tc_receiver: mpsc::Receiver<Vec<u8>>,
pus_receiver: PusReceiver<MpscTmAsVecSender>,
}
impl TcSourceTaskDynamic {
pub fn new(
tc_receiver: mpsc::Receiver<Vec<u8>>,
pus_receiver: PusReceiver<MpscTmAsVecSender>,
) -> Self {
Self {
tc_receiver,
pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> bool {
match self.tc_receiver.try_recv() {
Ok(tc) => match PusTcReader::new(&tc) {
Ok((pus_tc, _)) => {
self.pus_receiver
.handle_tc_packet(
satrs::pus::TcInMemory::Vec(tc.clone()),
pus_tc.service(),
&pus_tc,
)
.ok();
true
}
Err(e) => {
log::warn!("error creating PUS TC from raw data: {e}");
log::warn!("raw data: {:x?}", tc);
true
}
},
Err(e) => match e {
TryRecvError::Empty => false,
TryRecvError::Disconnected => {
log::warn!("tmtc thread: sender disconnected");
false
}
},
}
}
}

2
src/tmtc/mod.rs Normal file
View File

@ -0,0 +1,2 @@
pub mod tc_source;
pub mod tm_sink;

47
src/tmtc/tc_source.rs Normal file
View File

@ -0,0 +1,47 @@
use std::sync::mpsc::{self, TryRecvError};
use satrs::{pus::MpscTmAsVecSender, tmtc::PacketAsVec};
use crate::pus::{HandlingStatus, PusTcDistributor};
// TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic {
pub tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_distrib: PusTcDistributor<MpscTmAsVecSender>,
}
impl TcSourceTaskDynamic {
pub fn new(
tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_receiver: PusTcDistributor<MpscTmAsVecSender>,
) -> Self {
Self {
tc_receiver,
pus_distrib: pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> HandlingStatus {
// Right now, we only expect PUS packets. If any other protocols like CFDP are added at
// a later stage, we probably need to check for the APID before routing the packet.
match self.tc_receiver.try_recv() {
Ok(packet_with_sender) => {
self.pus_distrib
.handle_tc_packet(packet_with_sender.sender_id, packet_with_sender.packet)
.ok();
HandlingStatus::HandledOne
}
Err(e) => match e {
TryRecvError::Empty => HandlingStatus::Empty,
TryRecvError::Disconnected => {
log::warn!("tmtc thread: sender disconnected");
HandlingStatus::Empty
}
},
}
}
}

139
src/tmtc/tm_sink.rs Normal file
View File

@ -0,0 +1,139 @@
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::{collections::HashMap, sync::mpsc};
use log::info;
use ops_sat_rs::config::tasks::STOP_CHECK_FREQUENCY;
use satrs::tmtc::PacketAsVec;
use satrs::{
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
spacepackets::{
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
time::cds::MIN_CDS_FIELD_LEN,
CcsdsPacket,
},
};
use crate::interface::tcp_server::SyncTcpTmSource;
#[derive(Default)]
pub struct CcsdsSeqCounterMap {
apid_seq_counter_map: HashMap<u16, CcsdsSimpleSeqCountProvider>,
}
impl CcsdsSeqCounterMap {
pub fn get_and_increment(&mut self, apid: u16) -> u16 {
self.apid_seq_counter_map
.entry(apid)
.or_default()
.get_and_increment()
}
}
pub struct TmFunnelCommon {
seq_counter_map: CcsdsSeqCounterMap,
msg_counter_map: HashMap<u8, u16>,
sync_tm_tcp_source: SyncTcpTmSource,
}
impl TmFunnelCommon {
pub fn new(sync_tm_tcp_source: SyncTcpTmSource) -> Self {
Self {
seq_counter_map: Default::default(),
msg_counter_map: Default::default(),
sync_tm_tcp_source,
}
}
// Applies common packet processing operations for PUS TM packets. This includes setting
// a sequence counter
fn apply_packet_processing(&mut self, mut zero_copy_writer: PusTmZeroCopyWriter) {
// zero_copy_writer.set_apid(PUS_APID);
zero_copy_writer.set_seq_count(
self.seq_counter_map
.get_and_increment(zero_copy_writer.apid()),
);
let entry = self
.msg_counter_map
.entry(zero_copy_writer.service())
.or_insert(0);
zero_copy_writer.set_msg_count(*entry);
if *entry == u16::MAX {
*entry = 0;
} else {
*entry += 1;
}
Self::packet_printout(&zero_copy_writer);
// This operation has to come last!
zero_copy_writer.finish();
}
fn packet_printout(tm: &PusTmZeroCopyWriter) {
info!("Sending PUS TM[{},{}]", tm.service(), tm.subservice());
}
}
pub struct TmFunnelDynamic {
common: TmFunnelCommon,
tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_udp_server_tx: mpsc::Sender<PacketAsVec>,
tm_tcp_client_tx: mpsc::Sender<PacketAsVec>,
stop_signal: Arc<AtomicBool>,
}
impl TmFunnelDynamic {
pub fn new(
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_udp_server_tx: mpsc::Sender<PacketAsVec>,
tm_tcp_client_tx: mpsc::Sender<PacketAsVec>,
stop_signal: Arc<AtomicBool>,
) -> Self {
Self {
common: TmFunnelCommon::new(sync_tm_tcp_source),
tm_funnel_rx,
tm_udp_server_tx,
tm_tcp_client_tx,
stop_signal,
}
}
pub fn operation(&mut self) {
loop {
match self.tm_funnel_rx.recv_timeout(STOP_CHECK_FREQUENCY) {
Ok(mut tm) => {
// Read the TM, set sequence counter and message counter, and finally update
// the CRC.
let zero_copy_writer =
PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
self.common.sync_tm_tcp_source.add_tm(&tm.packet);
let result = self.tm_udp_server_tx.send(tm.clone());
if result.is_err() {
log::error!("TM UDP server has disconnected");
}
let result = self.tm_tcp_client_tx.send(tm);
if result.is_err() {
log::error!("TM TCP client has disconnected");
}
if self.stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
}
Err(e) => match e {
mpsc::RecvTimeoutError::Timeout => {
if self.stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
}
mpsc::RecvTimeoutError::Disconnected => {
log::warn!("All TM funnel senders have disconnected");
break;
}
},
}
}
}
}

5
templates/exp278.toml Normal file
View File

@ -0,0 +1,5 @@
# This configuration file should either be inside the (experiment) home folder or in the current
# folder the application is run from.
# On the small flatsat, change this to 9999.
tcp_spp_server_port = 4096

Binary file not shown.