- Add new shared subcrate satrs-shared to split off some shared components not expected to change very often. - Renmame `satrs-core` to `satrs`. It is expected that sat-rs will remain the primary crate, so the core information is superfluous, and core also implies stability, which will not be the case for some time.
This commit is contained in:
5
satrs/.gitignore
vendored
Normal file
5
satrs/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
/target
|
||||
|
||||
# CLion
|
||||
/.idea/*
|
||||
!/.idea/runConfigurations
|
9
satrs/CHANGELOG.md
Normal file
9
satrs/CHANGELOG.md
Normal file
@ -0,0 +1,9 @@
|
||||
Change Log
|
||||
=======
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](http://keepachangelog.com/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# [unreleased]
|
126
satrs/Cargo.toml
Normal file
126
satrs/Cargo.toml
Normal file
@ -0,0 +1,126 @@
|
||||
[package]
|
||||
name = "satrs"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
rust-version = "1.61"
|
||||
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
|
||||
description = "A framework to build software for remote systems"
|
||||
homepage = "https://absatsw.uni-stuttgart.de/projects/sat-rs"
|
||||
repository = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
|
||||
license = "Apache-2.0"
|
||||
keywords = ["no-std", "space", "aerospace"]
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-support", "embedded"]
|
||||
|
||||
[dependencies]
|
||||
delegate = ">0.7, <=0.10"
|
||||
paste = "1"
|
||||
|
||||
[dependencies.smallvec]
|
||||
version = "1"
|
||||
|
||||
[dependencies.num_enum]
|
||||
version = ">0.5, <=0.7"
|
||||
default-features = false
|
||||
|
||||
[dependencies.crc]
|
||||
version = "3"
|
||||
|
||||
[dependencies.dyn-clone]
|
||||
version = "1"
|
||||
optional = true
|
||||
|
||||
[dependencies.hashbrown]
|
||||
version = "0.14"
|
||||
optional = true
|
||||
|
||||
[dependencies.heapless]
|
||||
version = "0.7"
|
||||
optional = true
|
||||
|
||||
[dependencies.num-traits]
|
||||
version = "0.2"
|
||||
default-features = false
|
||||
|
||||
[dependencies.downcast-rs]
|
||||
version = "1.2"
|
||||
default-features = false
|
||||
optional = true
|
||||
|
||||
[dependencies.bus]
|
||||
version = "2.2"
|
||||
optional = true
|
||||
|
||||
[dependencies.crossbeam-channel]
|
||||
version= "0.5"
|
||||
default-features = false
|
||||
optional = true
|
||||
|
||||
[dependencies.thiserror]
|
||||
version = "1"
|
||||
optional = true
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1"
|
||||
default-features = false
|
||||
optional = true
|
||||
|
||||
[dependencies.socket2]
|
||||
version = "0.5.4"
|
||||
features = ["all"]
|
||||
optional = true
|
||||
|
||||
[dependencies.satrs-shared]
|
||||
# version = "0.1.0"
|
||||
path = "../satrs-shared"
|
||||
|
||||
[dependencies.spacepackets]
|
||||
version = "0.9"
|
||||
default-features = false
|
||||
|
||||
[dependencies.cobs]
|
||||
git = "https://github.com/robamu/cobs.rs.git"
|
||||
version = "0.2.3"
|
||||
branch = "all_features"
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies]
|
||||
serde = "1"
|
||||
zerocopy = "0.7"
|
||||
once_cell = "1.13"
|
||||
serde_json = "1"
|
||||
rand = "0.8"
|
||||
tempfile = "3"
|
||||
|
||||
[dev-dependencies.postcard]
|
||||
version = "1"
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = [
|
||||
"downcast-rs/std",
|
||||
"alloc",
|
||||
"bus",
|
||||
"postcard/use-std",
|
||||
"crossbeam-channel/std",
|
||||
"serde/std",
|
||||
"spacepackets/std",
|
||||
"num_enum/std",
|
||||
"thiserror",
|
||||
"socket2"
|
||||
]
|
||||
alloc = [
|
||||
"serde/alloc",
|
||||
"spacepackets/alloc",
|
||||
"hashbrown",
|
||||
"dyn-clone",
|
||||
"downcast-rs"
|
||||
]
|
||||
serde = ["dep:serde", "spacepackets/serde"]
|
||||
crossbeam = ["crossbeam-channel"]
|
||||
heapless = ["dep:heapless"]
|
||||
doc-images = []
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "doc_cfg", "--generate-link-to-definition"]
|
201
satrs/LICENSE-APACHE
Normal file
201
satrs/LICENSE-APACHE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
1
satrs/NOTICE
Normal file
1
satrs/NOTICE
Normal file
@ -0,0 +1 @@
|
||||
This software contains code developed at the University of Stuttgart's Institute of Space Systems.
|
8
satrs/README.md
Normal file
8
satrs/README.md
Normal file
@ -0,0 +1,8 @@
|
||||
[](https://crates.io/crates/satrs-core)
|
||||
[](https://docs.rs/satrs-core)
|
||||
|
||||
satrs-core
|
||||
======
|
||||
|
||||
This crate contains the core components of the sat-rs framework.
|
||||
You can find more information on the [homepage](https://egit.irs.uni-stuttgart.de/rust/sat-rs).
|
25
satrs/release-checklist.md
Normal file
25
satrs/release-checklist.md
Normal file
@ -0,0 +1,25 @@
|
||||
Checklist for new releases
|
||||
=======
|
||||
|
||||
# Pre-Release
|
||||
|
||||
1. Make sure any new modules are documented sufficiently enough and check docs with
|
||||
`cargo +nightly doc --all-features --config 'rustdocflags=["--cfg", "doc_cfg"]' --open`.
|
||||
2. Bump version specifier in `Cargo.toml`.
|
||||
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
|
||||
`unreleased` section.
|
||||
4. Run `cargo test --all-features`.
|
||||
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
|
||||
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
|
||||
targets.
|
||||
|
||||
# Release
|
||||
|
||||
1. `cargo publish`
|
||||
|
||||
# Post-Release
|
||||
|
||||
1. Create a new annotaged tag and push it with `git tag -a satrs-core-<version>` and
|
||||
`git push -u origin satrs-core-<version>`
|
||||
2. Create a new release on `EGit` based on the tag.
|
||||
|
1600
satrs/src/cfdp/dest.rs
Normal file
1600
satrs/src/cfdp/dest.rs
Normal file
File diff suppressed because it is too large
Load Diff
770
satrs/src/cfdp/filestore.rs
Normal file
770
satrs/src/cfdp/filestore.rs
Normal file
@ -0,0 +1,770 @@
|
||||
use alloc::string::{String, ToString};
|
||||
use core::fmt::Display;
|
||||
use crc::{Crc, CRC_32_CKSUM};
|
||||
use spacepackets::cfdp::ChecksumType;
|
||||
use spacepackets::ByteConversionError;
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
use std::path::Path;
|
||||
#[cfg(feature = "std")]
|
||||
pub use stdmod::*;
|
||||
|
||||
pub const CRC_32: Crc<u32> = Crc::<u32>::new(&CRC_32_CKSUM);
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum FilestoreError {
|
||||
FileDoesNotExist,
|
||||
FileAlreadyExists,
|
||||
DirDoesNotExist,
|
||||
Permission,
|
||||
IsNotFile,
|
||||
IsNotDirectory,
|
||||
ByteConversion(ByteConversionError),
|
||||
Io {
|
||||
raw_errno: Option<i32>,
|
||||
string: String,
|
||||
},
|
||||
ChecksumTypeNotImplemented(ChecksumType),
|
||||
}
|
||||
|
||||
impl From<ByteConversionError> for FilestoreError {
|
||||
fn from(value: ByteConversionError) -> Self {
|
||||
Self::ByteConversion(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for FilestoreError {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
FilestoreError::FileDoesNotExist => {
|
||||
write!(f, "file does not exist")
|
||||
}
|
||||
FilestoreError::FileAlreadyExists => {
|
||||
write!(f, "file already exists")
|
||||
}
|
||||
FilestoreError::DirDoesNotExist => {
|
||||
write!(f, "directory does not exist")
|
||||
}
|
||||
FilestoreError::Permission => {
|
||||
write!(f, "permission error")
|
||||
}
|
||||
FilestoreError::IsNotFile => {
|
||||
write!(f, "is not a file")
|
||||
}
|
||||
FilestoreError::IsNotDirectory => {
|
||||
write!(f, "is not a directory")
|
||||
}
|
||||
FilestoreError::ByteConversion(e) => {
|
||||
write!(f, "filestore error: {e}")
|
||||
}
|
||||
FilestoreError::Io { raw_errno, string } => {
|
||||
write!(
|
||||
f,
|
||||
"filestore generic IO error with raw errno {:?}: {}",
|
||||
raw_errno, string
|
||||
)
|
||||
}
|
||||
FilestoreError::ChecksumTypeNotImplemented(checksum_type) => {
|
||||
write!(f, "checksum {:?} not implemented", checksum_type)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for FilestoreError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
match self {
|
||||
FilestoreError::ByteConversion(e) => Some(e),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl From<std::io::Error> for FilestoreError {
|
||||
fn from(value: std::io::Error) -> Self {
|
||||
Self::Io {
|
||||
raw_errno: value.raw_os_error(),
|
||||
string: value.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait VirtualFilestore {
|
||||
fn create_file(&self, file_path: &str) -> Result<(), FilestoreError>;
|
||||
|
||||
fn remove_file(&self, file_path: &str) -> Result<(), FilestoreError>;
|
||||
|
||||
/// Truncating a file means deleting all its data so the resulting file is empty.
|
||||
/// This can be more efficient than removing and re-creating a file.
|
||||
fn truncate_file(&self, file_path: &str) -> Result<(), FilestoreError>;
|
||||
|
||||
fn remove_dir(&self, dir_path: &str, all: bool) -> Result<(), FilestoreError>;
|
||||
fn create_dir(&self, dir_path: &str) -> Result<(), FilestoreError>;
|
||||
|
||||
fn read_data(
|
||||
&self,
|
||||
file_path: &str,
|
||||
offset: u64,
|
||||
read_len: u64,
|
||||
buf: &mut [u8],
|
||||
) -> Result<(), FilestoreError>;
|
||||
|
||||
fn write_data(&self, file: &str, offset: u64, buf: &[u8]) -> Result<(), FilestoreError>;
|
||||
|
||||
fn filename_from_full_path(path: &str) -> Option<&str>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
// Convert the path string to a Path
|
||||
let path = Path::new(path);
|
||||
|
||||
// Extract the file name using the file_name() method
|
||||
path.file_name().and_then(|name| name.to_str())
|
||||
}
|
||||
|
||||
fn is_file(&self, path: &str) -> bool;
|
||||
|
||||
fn is_dir(&self, path: &str) -> bool {
|
||||
!self.is_file(path)
|
||||
}
|
||||
|
||||
fn exists(&self, path: &str) -> bool;
|
||||
|
||||
/// This special function is the CFDP specific abstraction to verify the checksum of a file.
|
||||
/// This allows to keep OS specific details like reading the whole file in the most efficient
|
||||
/// manner inside the file system abstraction.
|
||||
///
|
||||
/// The passed verification buffer argument will be used by the specific implementation as
|
||||
/// a buffer to read the file into. It is recommended to use common buffer sizes like
|
||||
/// 4096 or 8192 bytes.
|
||||
fn checksum_verify(
|
||||
&self,
|
||||
file_path: &str,
|
||||
checksum_type: ChecksumType,
|
||||
expected_checksum: u32,
|
||||
verification_buf: &mut [u8],
|
||||
) -> Result<bool, FilestoreError>;
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod stdmod {
|
||||
use super::*;
|
||||
use std::{
|
||||
fs::{self, File, OpenOptions},
|
||||
io::{BufReader, Read, Seek, SeekFrom, Write},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct NativeFilestore {}
|
||||
|
||||
impl VirtualFilestore for NativeFilestore {
|
||||
fn create_file(&self, file_path: &str) -> Result<(), FilestoreError> {
|
||||
if self.exists(file_path) {
|
||||
return Err(FilestoreError::FileAlreadyExists);
|
||||
}
|
||||
File::create(file_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_file(&self, file_path: &str) -> Result<(), FilestoreError> {
|
||||
if !self.exists(file_path) {
|
||||
return Err(FilestoreError::FileDoesNotExist);
|
||||
}
|
||||
if !self.is_file(file_path) {
|
||||
return Err(FilestoreError::IsNotFile);
|
||||
}
|
||||
fs::remove_file(file_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn truncate_file(&self, file_path: &str) -> Result<(), FilestoreError> {
|
||||
if !self.exists(file_path) {
|
||||
return Err(FilestoreError::FileDoesNotExist);
|
||||
}
|
||||
if !self.is_file(file_path) {
|
||||
return Err(FilestoreError::IsNotFile);
|
||||
}
|
||||
OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(file_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_dir(&self, dir_path: &str) -> Result<(), FilestoreError> {
|
||||
fs::create_dir(dir_path).map_err(|e| FilestoreError::Io {
|
||||
raw_errno: e.raw_os_error(),
|
||||
string: e.to_string(),
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_dir(&self, dir_path: &str, all: bool) -> Result<(), FilestoreError> {
|
||||
if !self.exists(dir_path) {
|
||||
return Err(FilestoreError::DirDoesNotExist);
|
||||
}
|
||||
if !self.is_dir(dir_path) {
|
||||
return Err(FilestoreError::IsNotDirectory);
|
||||
}
|
||||
if !all {
|
||||
fs::remove_dir(dir_path)?;
|
||||
return Ok(());
|
||||
}
|
||||
fs::remove_dir_all(dir_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_data(
|
||||
&self,
|
||||
file_name: &str,
|
||||
offset: u64,
|
||||
read_len: u64,
|
||||
buf: &mut [u8],
|
||||
) -> Result<(), FilestoreError> {
|
||||
if buf.len() < read_len as usize {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
found: buf.len(),
|
||||
expected: read_len as usize,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
if !self.exists(file_name) {
|
||||
return Err(FilestoreError::FileDoesNotExist);
|
||||
}
|
||||
if !self.is_file(file_name) {
|
||||
return Err(FilestoreError::IsNotFile);
|
||||
}
|
||||
let mut file = File::open(file_name)?;
|
||||
file.seek(SeekFrom::Start(offset))?;
|
||||
file.read_exact(&mut buf[0..read_len as usize])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_data(&self, file: &str, offset: u64, buf: &[u8]) -> Result<(), FilestoreError> {
|
||||
if !self.exists(file) {
|
||||
return Err(FilestoreError::FileDoesNotExist);
|
||||
}
|
||||
if !self.is_file(file) {
|
||||
return Err(FilestoreError::IsNotFile);
|
||||
}
|
||||
let mut file = OpenOptions::new().write(true).open(file)?;
|
||||
file.seek(SeekFrom::Start(offset))?;
|
||||
file.write_all(buf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_file(&self, path: &str) -> bool {
|
||||
let path = Path::new(path);
|
||||
path.is_file()
|
||||
}
|
||||
|
||||
fn exists(&self, path: &str) -> bool {
|
||||
let path = Path::new(path);
|
||||
if !path.exists() {
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn checksum_verify(
|
||||
&self,
|
||||
file_path: &str,
|
||||
checksum_type: ChecksumType,
|
||||
expected_checksum: u32,
|
||||
verification_buf: &mut [u8],
|
||||
) -> Result<bool, FilestoreError> {
|
||||
match checksum_type {
|
||||
ChecksumType::Modular => {
|
||||
if self.calc_modular_checksum(file_path)? == expected_checksum {
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
ChecksumType::Crc32 => {
|
||||
let mut digest = CRC_32.digest();
|
||||
let file_to_check = File::open(file_path)?;
|
||||
let mut buf_reader = BufReader::new(file_to_check);
|
||||
loop {
|
||||
let bytes_read = buf_reader.read(verification_buf)?;
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
digest.update(&verification_buf[0..bytes_read]);
|
||||
}
|
||||
if digest.finalize() == expected_checksum {
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
ChecksumType::NullChecksum => Ok(true),
|
||||
_ => Err(FilestoreError::ChecksumTypeNotImplemented(checksum_type)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NativeFilestore {
|
||||
pub fn calc_modular_checksum(&self, file_path: &str) -> Result<u32, FilestoreError> {
|
||||
let mut checksum: u32 = 0;
|
||||
let file = File::open(file_path)?;
|
||||
let mut buf_reader = BufReader::new(file);
|
||||
let mut buffer = [0; 4];
|
||||
|
||||
loop {
|
||||
let bytes_read = buf_reader.read(&mut buffer)?;
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
// Perform padding directly in the buffer
|
||||
(bytes_read..4).for_each(|i| {
|
||||
buffer[i] = 0;
|
||||
});
|
||||
|
||||
checksum = checksum.wrapping_add(u32::from_be_bytes(buffer));
|
||||
}
|
||||
Ok(checksum)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{fs, path::Path, println};
|
||||
|
||||
use super::*;
|
||||
use alloc::format;
|
||||
use tempfile::tempdir;
|
||||
|
||||
const EXAMPLE_DATA_CFDP: [u8; 15] = [
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
|
||||
];
|
||||
|
||||
const NATIVE_FS: NativeFilestore = NativeFilestore {};
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_filestore_create() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result =
|
||||
NATIVE_FS.create_file(file_path.to_str().expect("getting str for file failed"));
|
||||
assert!(result.is_ok());
|
||||
let path = Path::new(&file_path);
|
||||
assert!(path.exists());
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_file(file_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_file_exists() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
assert!(!NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_file(file_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_dir_exists() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let dir_path = tmpdir.path().join("testdir");
|
||||
assert!(!NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_dir(dir_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_dir(dir_path.as_path().to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_remove_file() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.expect("creating file failed");
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.remove_file(file_path.to_str().unwrap())
|
||||
.expect("removing file failed");
|
||||
assert!(!NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_write() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
assert!(!NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_file(file_path.to_str().unwrap()));
|
||||
println!("{}", file_path.to_str().unwrap());
|
||||
let write_data = "hello world\n";
|
||||
NATIVE_FS
|
||||
.write_data(file_path.to_str().unwrap(), 0, write_data.as_bytes())
|
||||
.expect("writing to file failed");
|
||||
let read_back = fs::read_to_string(file_path).expect("reading back data failed");
|
||||
assert_eq!(read_back, write_data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_read() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
assert!(!NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_file(file_path.to_str().unwrap()));
|
||||
println!("{}", file_path.to_str().unwrap());
|
||||
let write_data = "hello world\n";
|
||||
NATIVE_FS
|
||||
.write_data(file_path.to_str().unwrap(), 0, write_data.as_bytes())
|
||||
.expect("writing to file failed");
|
||||
let read_back = fs::read_to_string(file_path).expect("reading back data failed");
|
||||
assert_eq!(read_back, write_data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_file() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.expect("creating file failed");
|
||||
fs::write(file_path.clone(), [1, 2, 3, 4]).unwrap();
|
||||
assert_eq!(fs::read(file_path.clone()).unwrap(), [1, 2, 3, 4]);
|
||||
NATIVE_FS
|
||||
.truncate_file(file_path.to_str().unwrap())
|
||||
.unwrap();
|
||||
assert_eq!(fs::read(file_path.clone()).unwrap(), []);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_dir() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let dir_path = tmpdir.path().join("testdir");
|
||||
assert!(!NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_dir(dir_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.remove_dir(dir_path.to_str().unwrap(), false)
|
||||
.unwrap();
|
||||
assert!(!NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_file() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.expect("creating file failed");
|
||||
fs::write(file_path.clone(), [1, 2, 3, 4]).unwrap();
|
||||
let read_buf: &mut [u8] = &mut [0; 4];
|
||||
NATIVE_FS
|
||||
.read_data(file_path.to_str().unwrap(), 0, 4, read_buf)
|
||||
.unwrap();
|
||||
assert_eq!([1, 2, 3, 4], read_buf);
|
||||
NATIVE_FS
|
||||
.write_data(file_path.to_str().unwrap(), 4, &[5, 6, 7, 8])
|
||||
.expect("writing to file failed");
|
||||
NATIVE_FS
|
||||
.read_data(file_path.to_str().unwrap(), 2, 4, read_buf)
|
||||
.unwrap();
|
||||
assert_eq!([3, 4, 5, 6], read_buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_which_does_not_exist() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result = NATIVE_FS.read_data(file_path.to_str().unwrap(), 0, 4, &mut [0; 4]);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileDoesNotExist = error {
|
||||
assert_eq!(error.to_string(), "file does not exist");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_already_exists() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result =
|
||||
NATIVE_FS.create_file(file_path.to_str().expect("getting str for file failed"));
|
||||
assert!(result.is_ok());
|
||||
let result =
|
||||
NATIVE_FS.create_file(file_path.to_str().expect("getting str for file failed"));
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileAlreadyExists = error {
|
||||
assert_eq!(error.to_string(), "file already exists");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_file_with_dir_api() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.remove_dir(file_path.to_str().unwrap(), true);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotDirectory = error {
|
||||
assert_eq!(error.to_string(), "is not a directory");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_dir_remove_all() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let dir_path = tmpdir.path().join("test");
|
||||
NATIVE_FS
|
||||
.create_dir(dir_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let file_path = dir_path.as_path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.remove_dir(dir_path.to_str().unwrap(), true);
|
||||
assert!(result.is_ok());
|
||||
assert!(!NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_dir_with_file_api() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test");
|
||||
NATIVE_FS
|
||||
.create_dir(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.remove_file(file_path.to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotFile = error {
|
||||
assert_eq!(error.to_string(), "is not a file");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_dir_which_does_not_exist() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test");
|
||||
let result = NATIVE_FS.remove_dir(file_path.to_str().unwrap(), true);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::DirDoesNotExist = error {
|
||||
assert_eq!(error.to_string(), "directory does not exist");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_file_which_does_not_exist() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result = NATIVE_FS.remove_file(file_path.to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileDoesNotExist = error {
|
||||
assert_eq!(error.to_string(), "file does not exist");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_file_which_does_not_exist() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result = NATIVE_FS.truncate_file(file_path.to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileDoesNotExist = error {
|
||||
assert_eq!(error.to_string(), "file does not exist");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_file_on_directory() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test");
|
||||
NATIVE_FS.create_dir(file_path.to_str().unwrap()).unwrap();
|
||||
let result = NATIVE_FS.truncate_file(file_path.to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotFile = error {
|
||||
assert_eq!(error.to_string(), "is not a file");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_byte_conversion_error_when_reading() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.read_data(file_path.to_str().unwrap(), 0, 2, &mut []);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::ByteConversion(byte_conv_error) = error {
|
||||
if let ByteConversionError::ToSliceTooSmall { found, expected } = byte_conv_error {
|
||||
assert_eq!(found, 0);
|
||||
assert_eq!(expected, 2);
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
assert_eq!(
|
||||
error.to_string(),
|
||||
format!("filestore error: {}", byte_conv_error)
|
||||
);
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_file_on_dir() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let dir_path = tmpdir.path().join("test");
|
||||
NATIVE_FS
|
||||
.create_dir(dir_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.read_data(dir_path.to_str().unwrap(), 0, 4, &mut [0; 4]);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotFile = error {
|
||||
assert_eq!(error.to_string(), "is not a file");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_file_non_existing() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result = NATIVE_FS.write_data(file_path.to_str().unwrap(), 0, &[]);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileDoesNotExist = error {
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_file_on_dir() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test");
|
||||
NATIVE_FS.create_dir(file_path.to_str().unwrap()).unwrap();
|
||||
let result = NATIVE_FS.write_data(file_path.to_str().unwrap(), 0, &[]);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotFile = error {
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filename_extraction() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
NativeFilestore::filename_from_full_path(file_path.to_str().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_modular_checksum() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("mod-crc.bin");
|
||||
fs::write(file_path.as_path(), EXAMPLE_DATA_CFDP).expect("writing test file failed");
|
||||
// Kind of re-writing the modular checksum impl here which we are trying to test, but the
|
||||
// numbers/correctness were verified manually using calculators, so this is okay.
|
||||
let mut checksum: u32 = 0;
|
||||
let mut buffer: [u8; 4] = [0; 4];
|
||||
for i in 0..3 {
|
||||
buffer = EXAMPLE_DATA_CFDP[i * 4..(i + 1) * 4].try_into().unwrap();
|
||||
checksum = checksum.wrapping_add(u32::from_be_bytes(buffer));
|
||||
}
|
||||
buffer[0..3].copy_from_slice(&EXAMPLE_DATA_CFDP[12..15]);
|
||||
buffer[3] = 0;
|
||||
checksum = checksum.wrapping_add(u32::from_be_bytes(buffer));
|
||||
let mut verif_buf: [u8; 32] = [0; 32];
|
||||
let result = NATIVE_FS.checksum_verify(
|
||||
file_path.to_str().unwrap(),
|
||||
ChecksumType::Modular,
|
||||
checksum,
|
||||
&mut verif_buf,
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_null_checksum_impl() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("mod-crc.bin");
|
||||
// The file to check does not even need to exist, and the verification buffer can be
|
||||
// empty: the null checksum is always yields the same result.
|
||||
let result = NATIVE_FS.checksum_verify(
|
||||
file_path.to_str().unwrap(),
|
||||
ChecksumType::NullChecksum,
|
||||
0,
|
||||
&mut [],
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
assert!(result.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_checksum_not_implemented() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("mod-crc.bin");
|
||||
// The file to check does not even need to exist, and the verification buffer can be
|
||||
// empty: the null checksum is always yields the same result.
|
||||
let result = NATIVE_FS.checksum_verify(
|
||||
file_path.to_str().unwrap(),
|
||||
ChecksumType::Crc32Proximity1,
|
||||
0,
|
||||
&mut [],
|
||||
);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::ChecksumTypeNotImplemented(cksum_type) = error {
|
||||
assert_eq!(
|
||||
error.to_string(),
|
||||
format!("checksum {:?} not implemented", cksum_type)
|
||||
);
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
}
|
671
satrs/src/cfdp/mod.rs
Normal file
671
satrs/src/cfdp/mod.rs
Normal file
@ -0,0 +1,671 @@
|
||||
//! This module contains the implementation of the CFDP high level classes as specified in the
|
||||
//! CCSDS 727.0-B-5.
|
||||
use core::{cell::RefCell, fmt::Debug, hash::Hash};
|
||||
|
||||
use crc::{Crc, CRC_32_CKSUM};
|
||||
use hashbrown::HashMap;
|
||||
use spacepackets::{
|
||||
cfdp::{
|
||||
pdu::{FileDirectiveType, PduError, PduHeader},
|
||||
ChecksumType, ConditionCode, FaultHandlerCode, PduType, TransmissionMode,
|
||||
},
|
||||
util::UnsignedByteField,
|
||||
};
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::boxed::Box;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod dest;
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod filestore;
|
||||
#[cfg(feature = "std")]
|
||||
pub mod source;
|
||||
pub mod user;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum EntityType {
|
||||
Sending,
|
||||
Receiving,
|
||||
}
|
||||
|
||||
pub enum TimerContext {
|
||||
CheckLimit {
|
||||
local_id: UnsignedByteField,
|
||||
remote_id: UnsignedByteField,
|
||||
entity_type: EntityType,
|
||||
},
|
||||
NakActivity {
|
||||
expiry_time_seconds: f32,
|
||||
},
|
||||
PositiveAck {
|
||||
expiry_time_seconds: f32,
|
||||
},
|
||||
}
|
||||
|
||||
/// Generic abstraction for a check timer which is used by 3 mechanisms of the CFDP protocol.
|
||||
///
|
||||
/// ## 1. Check limit handling
|
||||
///
|
||||
/// The first mechanism is the check limit handling for unacknowledged transfers as specified
|
||||
/// in 4.6.3.2 and 4.6.3.3 of the CFDP standard.
|
||||
/// For this mechanism, the timer has different functionality depending on whether
|
||||
/// the using entity is the sending entity or the receiving entity for the unacknowledged
|
||||
/// transmission mode.
|
||||
///
|
||||
/// For the sending entity, this timer determines the expiry period for declaring a check limit
|
||||
/// fault after sending an EOF PDU with requested closure. This allows a timeout of the transfer.
|
||||
/// Also see 4.6.3.2 of the CFDP standard.
|
||||
///
|
||||
/// For the receiving entity, this timer determines the expiry period for incrementing a check
|
||||
/// counter after an EOF PDU is received for an incomplete file transfer. This allows out-of-order
|
||||
/// reception of file data PDUs and EOF PDUs. Also see 4.6.3.3 of the CFDP standard.
|
||||
///
|
||||
/// ## 2. NAK activity limit
|
||||
///
|
||||
/// The timer will be used to perform the NAK activity check as specified in 4.6.4.7 of the CFDP
|
||||
/// standard. The expiration period will be provided by the NAK timer expiration limit of the
|
||||
/// remote entity configuration.
|
||||
///
|
||||
/// ## 3. Positive ACK procedures
|
||||
///
|
||||
/// The timer will be used to perform the Positive Acknowledgement Procedures as specified in
|
||||
/// 4.7. 1of the CFDP standard. The expiration period will be provided by the Positive ACK timer
|
||||
/// interval of the remote entity configuration.
|
||||
pub trait CheckTimer: Debug {
|
||||
fn has_expired(&self) -> bool;
|
||||
fn reset(&mut self);
|
||||
}
|
||||
|
||||
/// A generic trait which allows CFDP entities to create check timers which are required to
|
||||
/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2
|
||||
/// and 4.6.3.3. The [CheckTimer] documentation provides more information about the purpose of the
|
||||
/// check timer in the context of CFDP.
|
||||
///
|
||||
/// This trait also allows the creation of different check timers depending on context and purpose
|
||||
/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or
|
||||
/// other factors.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait CheckTimerCreator {
|
||||
fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box<dyn CheckTimer>;
|
||||
}
|
||||
|
||||
/// Simple implementation of the [CheckTimerCreator] trait assuming a standard runtime.
|
||||
/// It also assumes that a second accuracy of the check timer period is sufficient.
|
||||
#[cfg(feature = "std")]
|
||||
#[derive(Debug)]
|
||||
pub struct StdCheckTimer {
|
||||
expiry_time_seconds: u64,
|
||||
start_time: std::time::Instant,
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl StdCheckTimer {
|
||||
pub fn new(expiry_time_seconds: u64) -> Self {
|
||||
Self {
|
||||
expiry_time_seconds,
|
||||
start_time: std::time::Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl CheckTimer for StdCheckTimer {
|
||||
fn has_expired(&self) -> bool {
|
||||
let elapsed_time = self.start_time.elapsed();
|
||||
if elapsed_time.as_secs() > self.expiry_time_seconds {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.start_time = std::time::Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
/// This structure models the remote entity configuration information as specified in chapter 8.3
|
||||
/// of the CFDP standard.
|
||||
|
||||
/// Some of the fields which were not considered necessary for the Rust implementation
|
||||
/// were omitted. Some other fields which are not contained inside the standard but are considered
|
||||
/// necessary for the Rust implementation are included.
|
||||
///
|
||||
/// ## Notes on Positive Acknowledgment Procedures
|
||||
///
|
||||
/// The `positive_ack_timer_interval_seconds` and `positive_ack_timer_expiration_limit` will
|
||||
/// be used for positive acknowledgement procedures as specified in CFDP chapter 4.7. The sending
|
||||
/// entity will start the timer for any PDUs where an acknowledgment is required (e.g. EOF PDU).
|
||||
/// Once the expected ACK response has not been received for that interval, as counter will be
|
||||
/// incremented and the timer will be reset. Once the counter exceeds the
|
||||
/// `positive_ack_timer_expiration_limit`, a Positive ACK Limit Reached fault will be declared.
|
||||
///
|
||||
/// ## Notes on Deferred Lost Segment Procedures
|
||||
///
|
||||
/// This procedure will be active if an EOF (No Error) PDU is received in acknowledged mode. After
|
||||
/// issuing the NAK sequence which has the whole file scope, a timer will be started. The timer is
|
||||
/// reset when missing segments or missing metadata is received. The timer will be deactivated if
|
||||
/// all missing data is received. If the timer expires, a new NAK sequence will be issued and a
|
||||
/// counter will be incremented, which can lead to a NAK Limit Reached fault being declared.
|
||||
///
|
||||
/// ## Fields
|
||||
///
|
||||
/// * `entity_id` - The ID of the remote entity.
|
||||
/// * `max_packet_len` - This determines of all PDUs generated for that remote entity in addition
|
||||
/// to the `max_file_segment_len` attribute which also determines the size of file data PDUs.
|
||||
/// * `max_file_segment_len` The maximum file segment length which determines the maximum size
|
||||
/// of file data PDUs in addition to the `max_packet_len` attribute. If this field is set
|
||||
/// to None, the maximum file segment length will be derived from the maximum packet length.
|
||||
/// If this has some value which is smaller than the segment value derived from
|
||||
/// `max_packet_len`, this value will be picked.
|
||||
/// * `closure_requested_by_default` - If the closure requested field is not supplied as part of
|
||||
/// the Put Request, it will be determined from this field in the remote configuration.
|
||||
/// * `crc_on_transmission_by_default` - If the CRC option is not supplied as part of the Put
|
||||
/// Request, it will be determined from this field in the remote configuration.
|
||||
/// * `default_transmission_mode` - If the transmission mode is not supplied as part of the
|
||||
/// Put Request, it will be determined from this field in the remote configuration.
|
||||
/// * `disposition_on_cancellation` - Determines whether an incomplete received file is discard on
|
||||
/// transaction cancellation. Defaults to False.
|
||||
/// * `default_crc_type` - Default checksum type used to calculate for all file transmissions to
|
||||
/// this remote entity.
|
||||
/// * `check_limit` - This timer determines the expiry period for incrementing a check counter
|
||||
/// after an EOF PDU is received for an incomplete file transfer. This allows out-of-order
|
||||
/// reception of file data PDUs and EOF PDUs. Also see 4.6.3.3 of the CFDP standard. Defaults to
|
||||
/// 2, so the check limit timer may expire twice.
|
||||
/// * `positive_ack_timer_interval_seconds`- See the notes on the Positive Acknowledgment
|
||||
/// Procedures inside the class documentation. Expected as floating point seconds. Defaults to
|
||||
/// 10 seconds.
|
||||
/// * `positive_ack_timer_expiration_limit` - See the notes on the Positive Acknowledgment
|
||||
/// Procedures inside the class documentation. Defaults to 2, so the timer may expire twice.
|
||||
/// * `immediate_nak_mode` - Specifies whether a NAK sequence should be issued immediately when a
|
||||
/// file data gap or lost metadata is detected in the acknowledged mode. Defaults to True.
|
||||
/// * `nak_timer_interval_seconds` - See the notes on the Deferred Lost Segment Procedure inside
|
||||
/// the class documentation. Expected as floating point seconds. Defaults to 10 seconds.
|
||||
/// * `nak_timer_expiration_limit` - See the notes on the Deferred Lost Segment Procedure inside
|
||||
/// the class documentation. Defaults to 2, so the timer may expire two times.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct RemoteEntityConfig {
|
||||
pub entity_id: UnsignedByteField,
|
||||
pub max_packet_len: usize,
|
||||
pub max_file_segment_len: usize,
|
||||
pub closure_requested_by_default: bool,
|
||||
pub crc_on_transmission_by_default: bool,
|
||||
pub default_transmission_mode: TransmissionMode,
|
||||
pub default_crc_type: ChecksumType,
|
||||
pub positive_ack_timer_interval_seconds: f32,
|
||||
pub positive_ack_timer_expiration_limit: u32,
|
||||
pub check_limit: u32,
|
||||
pub disposition_on_cancellation: bool,
|
||||
pub immediate_nak_mode: bool,
|
||||
pub nak_timer_interval_seconds: f32,
|
||||
pub nak_timer_expiration_limit: u32,
|
||||
}
|
||||
|
||||
impl RemoteEntityConfig {
|
||||
pub fn new_with_default_values(
|
||||
entity_id: UnsignedByteField,
|
||||
max_file_segment_len: usize,
|
||||
max_packet_len: usize,
|
||||
closure_requested_by_default: bool,
|
||||
crc_on_transmission_by_default: bool,
|
||||
default_transmission_mode: TransmissionMode,
|
||||
default_crc_type: ChecksumType,
|
||||
) -> Self {
|
||||
Self {
|
||||
entity_id,
|
||||
max_file_segment_len,
|
||||
max_packet_len,
|
||||
closure_requested_by_default,
|
||||
crc_on_transmission_by_default,
|
||||
default_transmission_mode,
|
||||
default_crc_type,
|
||||
check_limit: 2,
|
||||
positive_ack_timer_interval_seconds: 10.0,
|
||||
positive_ack_timer_expiration_limit: 2,
|
||||
disposition_on_cancellation: false,
|
||||
immediate_nak_mode: true,
|
||||
nak_timer_interval_seconds: 10.0,
|
||||
nak_timer_expiration_limit: 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait RemoteEntityConfigProvider {
|
||||
/// Retrieve the remote entity configuration for the given remote ID.
|
||||
fn get_remote_config(&self, remote_id: u64) -> Option<&RemoteEntityConfig>;
|
||||
fn get_remote_config_mut(&mut self, remote_id: u64) -> Option<&mut RemoteEntityConfig>;
|
||||
/// Add a new remote configuration. Return [true] if the configuration was
|
||||
/// inserted successfully, and [false] if a configuration already exists.
|
||||
fn add_config(&mut self, cfg: &RemoteEntityConfig) -> bool;
|
||||
/// Remote a configuration. Returns [true] if the configuration was removed successfully,
|
||||
/// and [false] if no configuration exists for the given remote ID.
|
||||
fn remove_config(&mut self, remote_id: u64) -> bool;
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[derive(Default)]
|
||||
pub struct StdRemoteEntityConfigProvider {
|
||||
remote_cfg_table: HashMap<u64, RemoteEntityConfig>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl RemoteEntityConfigProvider for StdRemoteEntityConfigProvider {
|
||||
fn get_remote_config(&self, remote_id: u64) -> Option<&RemoteEntityConfig> {
|
||||
self.remote_cfg_table.get(&remote_id)
|
||||
}
|
||||
fn get_remote_config_mut(&mut self, remote_id: u64) -> Option<&mut RemoteEntityConfig> {
|
||||
self.remote_cfg_table.get_mut(&remote_id)
|
||||
}
|
||||
fn add_config(&mut self, cfg: &RemoteEntityConfig) -> bool {
|
||||
self.remote_cfg_table
|
||||
.insert(cfg.entity_id.value(), *cfg)
|
||||
.is_some()
|
||||
}
|
||||
fn remove_config(&mut self, remote_id: u64) -> bool {
|
||||
self.remote_cfg_table.remove(&remote_id).is_some()
|
||||
}
|
||||
}
|
||||
|
||||
/// This trait introduces some callbacks which will be called when a particular CFDP fault
|
||||
/// handler is called.
|
||||
///
|
||||
/// It is passed into the CFDP handlers as part of the [DefaultFaultHandler] and the local entity
|
||||
/// configuration and provides a way to specify custom user error handlers. This allows to
|
||||
/// implement some CFDP features like fault handler logging, which would not be possible
|
||||
/// generically otherwise.
|
||||
///
|
||||
/// For each error reported by the [DefaultFaultHandler], the appropriate fault handler callback
|
||||
/// will be called depending on the [FaultHandlerCode].
|
||||
pub trait UserFaultHandler {
|
||||
fn notice_of_suspension_cb(
|
||||
&mut self,
|
||||
transaction_id: TransactionId,
|
||||
cond: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
|
||||
fn notice_of_cancellation_cb(
|
||||
&mut self,
|
||||
transaction_id: TransactionId,
|
||||
cond: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
|
||||
fn abandoned_cb(&mut self, transaction_id: TransactionId, cond: ConditionCode, progress: u64);
|
||||
|
||||
fn ignore_cb(&mut self, transaction_id: TransactionId, cond: ConditionCode, progress: u64);
|
||||
}
|
||||
|
||||
/// This structure is used to implement the fault handling as specified in chapter 4.8 of the CFDP
|
||||
/// standard.
|
||||
///
|
||||
/// It does so by mapping each applicable [spacepackets::cfdp::ConditionCode] to a fault handler
|
||||
/// which is denoted by the four [spacepackets::cfdp::FaultHandlerCode]s. This code is used
|
||||
/// to select the error handling inside the CFDP handler itself in addition to dispatching to a
|
||||
/// user-provided callback function provided by the [UserFaultHandler].
|
||||
///
|
||||
/// Some note on the provided default settings:
|
||||
///
|
||||
/// - Checksum failures will be ignored by default. This is because for unacknowledged transfers,
|
||||
/// cancelling the transfer immediately would interfere with the check limit mechanism specified
|
||||
/// in chapter 4.6.3.3.
|
||||
/// - Unsupported checksum types will also be ignored by default. Even if the checksum type is
|
||||
/// not supported the file transfer might still have worked properly.
|
||||
///
|
||||
/// For all other faults, the default fault handling operation will be to cancel the transaction.
|
||||
/// These defaults can be overriden by using the [Self::set_fault_handler] method.
|
||||
/// Please note that in any case, fault handler overrides can be specified by the sending CFDP
|
||||
/// entity.
|
||||
pub struct DefaultFaultHandler {
|
||||
handler_array: [FaultHandlerCode; 10],
|
||||
// Could also change the user fault handler trait to have non mutable methods, but that limits
|
||||
// flexbility on the user side..
|
||||
user_fault_handler: RefCell<Box<dyn UserFaultHandler + Send>>,
|
||||
}
|
||||
|
||||
impl DefaultFaultHandler {
|
||||
fn condition_code_to_array_index(conditon_code: ConditionCode) -> Option<usize> {
|
||||
Some(match conditon_code {
|
||||
ConditionCode::PositiveAckLimitReached => 0,
|
||||
ConditionCode::KeepAliveLimitReached => 1,
|
||||
ConditionCode::InvalidTransmissionMode => 2,
|
||||
ConditionCode::FilestoreRejection => 3,
|
||||
ConditionCode::FileChecksumFailure => 4,
|
||||
ConditionCode::FileSizeError => 5,
|
||||
ConditionCode::NakLimitReached => 6,
|
||||
ConditionCode::InactivityDetected => 7,
|
||||
ConditionCode::CheckLimitReached => 8,
|
||||
ConditionCode::UnsupportedChecksumType => 9,
|
||||
_ => return None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_fault_handler(
|
||||
&mut self,
|
||||
condition_code: ConditionCode,
|
||||
fault_handler: FaultHandlerCode,
|
||||
) {
|
||||
let array_idx = Self::condition_code_to_array_index(condition_code);
|
||||
if array_idx.is_none() {
|
||||
return;
|
||||
}
|
||||
self.handler_array[array_idx.unwrap()] = fault_handler;
|
||||
}
|
||||
|
||||
pub fn new(user_fault_handler: Box<dyn UserFaultHandler + Send>) -> Self {
|
||||
let mut init_array = [FaultHandlerCode::NoticeOfCancellation; 10];
|
||||
init_array
|
||||
[Self::condition_code_to_array_index(ConditionCode::FileChecksumFailure).unwrap()] =
|
||||
FaultHandlerCode::IgnoreError;
|
||||
init_array[Self::condition_code_to_array_index(ConditionCode::UnsupportedChecksumType)
|
||||
.unwrap()] = FaultHandlerCode::IgnoreError;
|
||||
Self {
|
||||
handler_array: init_array,
|
||||
user_fault_handler: RefCell::new(user_fault_handler),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_fault_handler(&self, condition_code: ConditionCode) -> FaultHandlerCode {
|
||||
let array_idx = Self::condition_code_to_array_index(condition_code);
|
||||
if array_idx.is_none() {
|
||||
return FaultHandlerCode::IgnoreError;
|
||||
}
|
||||
self.handler_array[array_idx.unwrap()]
|
||||
}
|
||||
|
||||
pub fn report_fault(
|
||||
&self,
|
||||
transaction_id: TransactionId,
|
||||
condition: ConditionCode,
|
||||
progress: u64,
|
||||
) -> FaultHandlerCode {
|
||||
let array_idx = Self::condition_code_to_array_index(condition);
|
||||
if array_idx.is_none() {
|
||||
return FaultHandlerCode::IgnoreError;
|
||||
}
|
||||
let fh_code = self.handler_array[array_idx.unwrap()];
|
||||
let mut handler_mut = self.user_fault_handler.borrow_mut();
|
||||
match fh_code {
|
||||
FaultHandlerCode::NoticeOfCancellation => {
|
||||
handler_mut.notice_of_cancellation_cb(transaction_id, condition, progress);
|
||||
}
|
||||
FaultHandlerCode::NoticeOfSuspension => {
|
||||
handler_mut.notice_of_suspension_cb(transaction_id, condition, progress);
|
||||
}
|
||||
FaultHandlerCode::IgnoreError => {
|
||||
handler_mut.ignore_cb(transaction_id, condition, progress);
|
||||
}
|
||||
FaultHandlerCode::AbandonTransaction => {
|
||||
handler_mut.abandoned_cb(transaction_id, condition, progress);
|
||||
}
|
||||
}
|
||||
fh_code
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IndicationConfig {
|
||||
pub eof_sent: bool,
|
||||
pub eof_recv: bool,
|
||||
pub file_segment_recv: bool,
|
||||
pub transaction_finished: bool,
|
||||
pub suspended: bool,
|
||||
pub resumed: bool,
|
||||
}
|
||||
|
||||
impl Default for IndicationConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
eof_sent: true,
|
||||
eof_recv: true,
|
||||
file_segment_recv: true,
|
||||
transaction_finished: true,
|
||||
suspended: true,
|
||||
resumed: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LocalEntityConfig {
|
||||
pub id: UnsignedByteField,
|
||||
pub indication_cfg: IndicationConfig,
|
||||
pub default_fault_handler: DefaultFaultHandler,
|
||||
}
|
||||
|
||||
/// The CFDP transaction ID of a CFDP transaction consists of the source entity ID and the sequence
|
||||
/// number of that transfer which is also determined by the CFDP source entity.
|
||||
#[derive(Debug, Eq, Copy, Clone)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct TransactionId {
|
||||
source_id: UnsignedByteField,
|
||||
seq_num: UnsignedByteField,
|
||||
}
|
||||
|
||||
impl TransactionId {
|
||||
pub fn new(source_id: UnsignedByteField, seq_num: UnsignedByteField) -> Self {
|
||||
Self { source_id, seq_num }
|
||||
}
|
||||
|
||||
pub fn source_id(&self) -> &UnsignedByteField {
|
||||
&self.source_id
|
||||
}
|
||||
|
||||
pub fn seq_num(&self) -> &UnsignedByteField {
|
||||
&self.seq_num
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for TransactionId {
|
||||
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
|
||||
self.source_id.value().hash(state);
|
||||
self.seq_num.value().hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for TransactionId {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.source_id.value() == other.source_id.value()
|
||||
&& self.seq_num.value() == other.seq_num.value()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum TransactionStep {
|
||||
Idle = 0,
|
||||
TransactionStart = 1,
|
||||
ReceivingFileDataPdus = 2,
|
||||
ReceivingFileDataPdusWithCheckLimitHandling = 3,
|
||||
SendingAckPdu = 4,
|
||||
TransferCompletion = 5,
|
||||
SendingFinishedPdu = 6,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum State {
|
||||
Idle = 0,
|
||||
Busy = 1,
|
||||
Suspended = 2,
|
||||
}
|
||||
|
||||
pub const CRC_32: Crc<u32> = Crc::<u32>::new(&CRC_32_CKSUM);
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum PacketTarget {
|
||||
SourceEntity,
|
||||
DestEntity,
|
||||
}
|
||||
|
||||
/// This is a helper struct which contains base information about a particular PDU packet.
|
||||
/// This is also necessary information for CFDP packet routing. For example, some packet types
|
||||
/// like file data PDUs can only be used by CFDP source entities.
|
||||
pub struct PacketInfo<'raw_packet> {
|
||||
pdu_type: PduType,
|
||||
pdu_directive: Option<FileDirectiveType>,
|
||||
target: PacketTarget,
|
||||
raw_packet: &'raw_packet [u8],
|
||||
}
|
||||
|
||||
impl<'raw> PacketInfo<'raw> {
|
||||
pub fn new(raw_packet: &'raw [u8]) -> Result<Self, PduError> {
|
||||
let (pdu_header, header_len) = PduHeader::from_bytes(raw_packet)?;
|
||||
if pdu_header.pdu_type() == PduType::FileData {
|
||||
return Ok(Self {
|
||||
pdu_type: pdu_header.pdu_type(),
|
||||
pdu_directive: None,
|
||||
target: PacketTarget::DestEntity,
|
||||
raw_packet,
|
||||
});
|
||||
}
|
||||
if pdu_header.pdu_datafield_len() < 1 {
|
||||
return Err(PduError::FormatError);
|
||||
}
|
||||
// Route depending on PDU type and directive type if applicable. Retrieve directive type
|
||||
// from the raw stream for better performance (with sanity and directive code check).
|
||||
// The routing is based on section 4.5 of the CFDP standard which specifies the PDU forwarding
|
||||
// procedure.
|
||||
let directive = FileDirectiveType::try_from(raw_packet[header_len]).map_err(|_| {
|
||||
PduError::InvalidDirectiveType {
|
||||
found: raw_packet[header_len],
|
||||
expected: None,
|
||||
}
|
||||
})?;
|
||||
let packet_target = match directive {
|
||||
// Section c) of 4.5.3: These PDUs should always be targeted towards the file sender a.k.a.
|
||||
// the source handler
|
||||
FileDirectiveType::NakPdu
|
||||
| FileDirectiveType::FinishedPdu
|
||||
| FileDirectiveType::KeepAlivePdu => PacketTarget::SourceEntity,
|
||||
// Section b) of 4.5.3: These PDUs should always be targeted towards the file receiver a.k.a.
|
||||
// the destination handler
|
||||
FileDirectiveType::MetadataPdu
|
||||
| FileDirectiveType::EofPdu
|
||||
| FileDirectiveType::PromptPdu => PacketTarget::DestEntity,
|
||||
// Section a): Recipient depends of the type of PDU that is being acknowledged. We can simply
|
||||
// extract the PDU type from the raw stream. If it is an EOF PDU, this packet is passed to
|
||||
// the source handler, for a Finished PDU, it is passed to the destination handler.
|
||||
FileDirectiveType::AckPdu => {
|
||||
let acked_directive = FileDirectiveType::try_from(raw_packet[header_len + 1])
|
||||
.map_err(|_| PduError::InvalidDirectiveType {
|
||||
found: raw_packet[header_len],
|
||||
expected: None,
|
||||
})?;
|
||||
if acked_directive == FileDirectiveType::EofPdu {
|
||||
PacketTarget::SourceEntity
|
||||
} else if acked_directive == FileDirectiveType::FinishedPdu {
|
||||
PacketTarget::DestEntity
|
||||
} else {
|
||||
// TODO: Maybe a better error? This might be confusing..
|
||||
return Err(PduError::InvalidDirectiveType {
|
||||
found: raw_packet[header_len + 1],
|
||||
expected: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(Self {
|
||||
pdu_type: pdu_header.pdu_type(),
|
||||
pdu_directive: Some(directive),
|
||||
target: packet_target,
|
||||
raw_packet,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn pdu_type(&self) -> PduType {
|
||||
self.pdu_type
|
||||
}
|
||||
|
||||
pub fn pdu_directive(&self) -> Option<FileDirectiveType> {
|
||||
self.pdu_directive
|
||||
}
|
||||
|
||||
pub fn target(&self) -> PacketTarget {
|
||||
self.target
|
||||
}
|
||||
|
||||
pub fn raw_packet(&self) -> &[u8] {
|
||||
self.raw_packet
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use spacepackets::cfdp::{
|
||||
lv::Lv,
|
||||
pdu::{
|
||||
eof::EofPdu,
|
||||
file_data::FileDataPdu,
|
||||
metadata::{MetadataGenericParams, MetadataPduCreator},
|
||||
CommonPduConfig, FileDirectiveType, PduHeader, WritablePduPacket,
|
||||
},
|
||||
PduType,
|
||||
};
|
||||
|
||||
use crate::cfdp::PacketTarget;
|
||||
|
||||
use super::PacketInfo;
|
||||
|
||||
fn generic_pdu_header() -> PduHeader {
|
||||
let pdu_conf = CommonPduConfig::default();
|
||||
PduHeader::new_no_file_data(pdu_conf, 0)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metadata_pdu_info() {
|
||||
let mut buf: [u8; 128] = [0; 128];
|
||||
let pdu_header = generic_pdu_header();
|
||||
let metadata_params = MetadataGenericParams::default();
|
||||
let src_file_name = "hello.txt";
|
||||
let dest_file_name = "hello-dest.txt";
|
||||
let src_lv = Lv::new_from_str(src_file_name).unwrap();
|
||||
let dest_lv = Lv::new_from_str(dest_file_name).unwrap();
|
||||
let metadata_pdu =
|
||||
MetadataPduCreator::new_no_opts(pdu_header, metadata_params, src_lv, dest_lv);
|
||||
metadata_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing metadata PDU failed");
|
||||
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
assert_eq!(packet_info.pdu_type(), PduType::FileDirective);
|
||||
assert!(packet_info.pdu_directive().is_some());
|
||||
assert_eq!(
|
||||
packet_info.pdu_directive().unwrap(),
|
||||
FileDirectiveType::MetadataPdu
|
||||
);
|
||||
assert_eq!(packet_info.target(), PacketTarget::DestEntity);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filedata_pdu_info() {
|
||||
let mut buf: [u8; 128] = [0; 128];
|
||||
let pdu_header = generic_pdu_header();
|
||||
let file_data_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 0, &[]);
|
||||
file_data_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing file data PDU failed");
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
assert_eq!(packet_info.pdu_type(), PduType::FileData);
|
||||
assert!(packet_info.pdu_directive().is_none());
|
||||
assert_eq!(packet_info.target(), PacketTarget::DestEntity);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_eof_pdu_info() {
|
||||
let mut buf: [u8; 128] = [0; 128];
|
||||
let pdu_header = generic_pdu_header();
|
||||
let eof_pdu = EofPdu::new_no_error(pdu_header, 0, 0);
|
||||
eof_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing file data PDU failed");
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
assert_eq!(packet_info.pdu_type(), PduType::FileDirective);
|
||||
assert!(packet_info.pdu_directive().is_some());
|
||||
assert_eq!(
|
||||
packet_info.pdu_directive().unwrap(),
|
||||
FileDirectiveType::EofPdu
|
||||
);
|
||||
}
|
||||
}
|
15
satrs/src/cfdp/source.rs
Normal file
15
satrs/src/cfdp/source.rs
Normal file
@ -0,0 +1,15 @@
|
||||
#![allow(dead_code)]
|
||||
use spacepackets::util::UnsignedByteField;
|
||||
|
||||
pub struct SourceHandler {
|
||||
id: UnsignedByteField,
|
||||
}
|
||||
|
||||
impl SourceHandler {
|
||||
pub fn new(id: impl Into<UnsignedByteField>) -> Self {
|
||||
Self { id: id.into() }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {}
|
96
satrs/src/cfdp/user.rs
Normal file
96
satrs/src/cfdp/user.rs
Normal file
@ -0,0 +1,96 @@
|
||||
use spacepackets::{
|
||||
cfdp::{
|
||||
pdu::{
|
||||
file_data::SegmentMetadata,
|
||||
finished::{DeliveryCode, FileStatus},
|
||||
},
|
||||
tlv::{msg_to_user::MsgToUserTlv, WritableTlv},
|
||||
ConditionCode,
|
||||
},
|
||||
util::UnsignedByteField,
|
||||
};
|
||||
|
||||
use super::TransactionId;
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct TransactionFinishedParams {
|
||||
pub id: TransactionId,
|
||||
pub condition_code: ConditionCode,
|
||||
pub delivery_code: DeliveryCode,
|
||||
pub file_status: FileStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MetadataReceivedParams<'src_file, 'dest_file, 'msgs_to_user> {
|
||||
pub id: TransactionId,
|
||||
pub source_id: UnsignedByteField,
|
||||
pub file_size: u64,
|
||||
pub src_file_name: &'src_file str,
|
||||
pub dest_file_name: &'dest_file str,
|
||||
pub msgs_to_user: &'msgs_to_user [MsgToUserTlv<'msgs_to_user>],
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[derive(Debug)]
|
||||
pub struct OwnedMetadataRecvdParams {
|
||||
pub id: TransactionId,
|
||||
pub source_id: UnsignedByteField,
|
||||
pub file_size: u64,
|
||||
pub src_file_name: alloc::string::String,
|
||||
pub dest_file_name: alloc::string::String,
|
||||
pub msgs_to_user: alloc::vec::Vec<alloc::vec::Vec<u8>>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl From<MetadataReceivedParams<'_, '_, '_>> for OwnedMetadataRecvdParams {
|
||||
fn from(value: MetadataReceivedParams) -> Self {
|
||||
Self::from(&value)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl From<&MetadataReceivedParams<'_, '_, '_>> for OwnedMetadataRecvdParams {
|
||||
fn from(value: &MetadataReceivedParams) -> Self {
|
||||
Self {
|
||||
id: value.id,
|
||||
source_id: value.source_id,
|
||||
file_size: value.file_size,
|
||||
src_file_name: value.src_file_name.into(),
|
||||
dest_file_name: value.dest_file_name.into(),
|
||||
msgs_to_user: value.msgs_to_user.iter().map(|tlv| tlv.to_vec()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileSegmentRecvdParams<'seg_meta> {
|
||||
pub id: TransactionId,
|
||||
pub offset: u64,
|
||||
pub length: usize,
|
||||
pub segment_metadata: Option<&'seg_meta SegmentMetadata<'seg_meta>>,
|
||||
}
|
||||
|
||||
pub trait CfdpUser {
|
||||
fn transaction_indication(&mut self, id: &TransactionId);
|
||||
fn eof_sent_indication(&mut self, id: &TransactionId);
|
||||
fn transaction_finished_indication(&mut self, finished_params: &TransactionFinishedParams);
|
||||
fn metadata_recvd_indication(&mut self, md_recvd_params: &MetadataReceivedParams);
|
||||
fn file_segment_recvd_indication(&mut self, segment_recvd_params: &FileSegmentRecvdParams);
|
||||
// TODO: The standard does not strictly specify how the report information looks..
|
||||
fn report_indication(&mut self, id: &TransactionId);
|
||||
fn suspended_indication(&mut self, id: &TransactionId, condition_code: ConditionCode);
|
||||
fn resumed_indication(&mut self, id: &TransactionId, progress: u64);
|
||||
fn fault_indication(
|
||||
&mut self,
|
||||
id: &TransactionId,
|
||||
condition_code: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
fn abandoned_indication(
|
||||
&mut self,
|
||||
id: &TransactionId,
|
||||
condition_code: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
fn eof_recvd_indication(&mut self, id: &TransactionId);
|
||||
}
|
0
satrs/src/device.rs
Normal file
0
satrs/src/device.rs
Normal file
281
satrs/src/encoding/ccsds.rs
Normal file
281
satrs/src/encoding/ccsds.rs
Normal file
@ -0,0 +1,281 @@
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::vec::Vec;
|
||||
#[cfg(feature = "alloc")]
|
||||
use hashbrown::HashSet;
|
||||
use spacepackets::PacketId;
|
||||
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
|
||||
pub trait PacketIdLookup {
|
||||
fn validate(&self, packet_id: u16) -> bool;
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for Vec<u16> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&packet_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for HashSet<u16> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&packet_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for [u16] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&packet_id).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for &[u16] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&packet_id).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for Vec<PacketId> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&PacketId::from(packet_id))
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for HashSet<PacketId> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&PacketId::from(packet_id))
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for [PacketId] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&PacketId::from(packet_id)).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for &[PacketId] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&PacketId::from(packet_id)).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// This function parses a given buffer for tightly packed CCSDS space packets. It uses the
|
||||
/// [PacketId] field of the CCSDS packets to detect the start of a CCSDS space packet and then
|
||||
/// uses the length field of the packet to extract CCSDS packets.
|
||||
///
|
||||
/// This function is also able to deal with broken tail packets at the end as long a the parser
|
||||
/// can read the full 7 bytes which constitue a space packet header plus one byte minimal size.
|
||||
/// If broken tail packets are detected, they are moved to the front of the buffer, and the write
|
||||
/// index for future write operations will be written to the `next_write_idx` argument.
|
||||
///
|
||||
/// The parser will write all packets which were decoded successfully to the given `tc_receiver`
|
||||
/// and return the number of packets found. If the [ReceivesTcCore::pass_tc] calls fails, the
|
||||
/// error will be returned.
|
||||
pub fn parse_buffer_for_ccsds_space_packets<E>(
|
||||
buf: &mut [u8],
|
||||
packet_id_lookup: &(impl PacketIdLookup + ?Sized),
|
||||
tc_receiver: &mut (impl ReceivesTcCore<Error = E> + ?Sized),
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<u32, E> {
|
||||
*next_write_idx = 0;
|
||||
let mut packets_found = 0;
|
||||
let mut current_idx = 0;
|
||||
let buf_len = buf.len();
|
||||
loop {
|
||||
if current_idx + 7 >= buf.len() {
|
||||
break;
|
||||
}
|
||||
let packet_id = u16::from_be_bytes(buf[current_idx..current_idx + 2].try_into().unwrap());
|
||||
if packet_id_lookup.validate(packet_id) {
|
||||
let length_field =
|
||||
u16::from_be_bytes(buf[current_idx + 4..current_idx + 6].try_into().unwrap());
|
||||
let packet_size = length_field + 7;
|
||||
if (current_idx + packet_size as usize) <= buf_len {
|
||||
tc_receiver.pass_tc(&buf[current_idx..current_idx + packet_size as usize])?;
|
||||
packets_found += 1;
|
||||
} else {
|
||||
// Move packet to start of buffer if applicable.
|
||||
if current_idx > 0 {
|
||||
buf.copy_within(current_idx.., 0);
|
||||
*next_write_idx = buf.len() - current_idx;
|
||||
}
|
||||
}
|
||||
current_idx += packet_size as usize;
|
||||
continue;
|
||||
}
|
||||
current_idx += 1;
|
||||
}
|
||||
Ok(packets_found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use spacepackets::{
|
||||
ecss::{tc::PusTcCreator, WritablePusPacket},
|
||||
PacketId, SpHeader,
|
||||
};
|
||||
|
||||
use crate::encoding::tests::TcCacher;
|
||||
|
||||
use super::parse_buffer_for_ccsds_space_packets;
|
||||
|
||||
const TEST_APID_0: u16 = 0x02;
|
||||
const TEST_APID_1: u16 = 0x10;
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0);
|
||||
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1);
|
||||
|
||||
#[test]
|
||||
fn test_basic() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer,
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 1);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 1);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[..packet_len]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_packet() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let packet_len_action = action_tc
|
||||
.write_to_bytes(&mut buffer[packet_len_ping..])
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer,
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 2);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 2);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[..packet_len_ping]
|
||||
);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[packet_len_ping..packet_len_ping + packet_len_action]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_apid() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let packet_len_action = action_tc
|
||||
.write_to_bytes(&mut buffer[packet_len_ping..])
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer,
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 2);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 2);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[..packet_len_ping]
|
||||
);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[packet_len_ping..packet_len_ping + packet_len_action]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_packet_multi() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let packet_len_action = action_tc
|
||||
.write_to_bytes(&mut buffer[packet_len_ping..])
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer[..packet_len_ping + packet_len_action - 4],
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 1);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 1);
|
||||
// The broken packet was moved to the start, so the next write index should be after the
|
||||
// last segment missing 4 bytes.
|
||||
assert_eq!(next_write_idx, packet_len_action - 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_split_packet() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer[..packet_len_ping - 4],
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert_eq!(next_write_idx, 0);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 0);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 0);
|
||||
}
|
||||
}
|
263
satrs/src/encoding/cobs.rs
Normal file
263
satrs/src/encoding/cobs.rs
Normal file
@ -0,0 +1,263 @@
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
use cobs::{decode_in_place, encode, max_encoding_length};
|
||||
|
||||
/// This function encodes the given packet with COBS and also wraps the encoded packet with
|
||||
/// the sentinel value 0. It can be used repeatedly on the same encoded buffer by expecting
|
||||
/// and incrementing the mutable reference of the current packet index. This is also used
|
||||
/// to retrieve the total encoded size.
|
||||
///
|
||||
/// This function will return [false] if the given encoding buffer is not large enough to hold
|
||||
/// the encoded buffer and the two sentinel bytes and [true] if the encoding was successfull.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use cobs::decode_in_place_report;
|
||||
/// use satrs::encoding::{encode_packet_with_cobs};
|
||||
//
|
||||
/// const SIMPLE_PACKET: [u8; 5] = [1, 2, 3, 4, 5];
|
||||
/// const INVERTED_PACKET: [u8; 5] = [5, 4, 3, 2, 1];
|
||||
///
|
||||
/// let mut encoding_buf: [u8; 32] = [0; 32];
|
||||
/// let mut current_idx = 0;
|
||||
/// assert!(encode_packet_with_cobs(&SIMPLE_PACKET, &mut encoding_buf, &mut current_idx));
|
||||
/// assert!(encode_packet_with_cobs(&INVERTED_PACKET, &mut encoding_buf, &mut current_idx));
|
||||
/// assert_eq!(encoding_buf[0], 0);
|
||||
/// let dec_report = decode_in_place_report(&mut encoding_buf[1..]).expect("decoding failed");
|
||||
/// assert_eq!(encoding_buf[1 + dec_report.src_used], 0);
|
||||
/// assert_eq!(dec_report.dst_used, 5);
|
||||
/// assert_eq!(current_idx, 16);
|
||||
/// ```
|
||||
pub fn encode_packet_with_cobs(
|
||||
packet: &[u8],
|
||||
encoded_buf: &mut [u8],
|
||||
current_idx: &mut usize,
|
||||
) -> bool {
|
||||
let max_encoding_len = max_encoding_length(packet.len());
|
||||
if *current_idx + max_encoding_len + 2 > encoded_buf.len() {
|
||||
return false;
|
||||
}
|
||||
encoded_buf[*current_idx] = 0;
|
||||
*current_idx += 1;
|
||||
*current_idx += encode(packet, &mut encoded_buf[*current_idx..]);
|
||||
encoded_buf[*current_idx] = 0;
|
||||
*current_idx += 1;
|
||||
true
|
||||
}
|
||||
|
||||
/// This function parses a given buffer for COBS encoded packets. The packet structure is
|
||||
/// expected to be like this, assuming a sentinel value of 0 as the packet delimiter:
|
||||
///
|
||||
/// 0 | ... Encoded Packet Data ... | 0 | 0 | ... Encoded Packet Data ... | 0
|
||||
///
|
||||
/// This function is also able to deal with broken tail packets at the end. If broken tail
|
||||
/// packets are detected, they are moved to the front of the buffer, and the write index for
|
||||
/// future write operations will be written to the `next_write_idx` argument.
|
||||
///
|
||||
/// The parser will write all packets which were decoded successfully to the given `tc_receiver`.
|
||||
pub fn parse_buffer_for_cobs_encoded_packets<E>(
|
||||
buf: &mut [u8],
|
||||
tc_receiver: &mut dyn ReceivesTcCore<Error = E>,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<u32, E> {
|
||||
let mut start_index_packet = 0;
|
||||
let mut start_found = false;
|
||||
let mut last_byte = false;
|
||||
let mut packets_found = 0;
|
||||
for i in 0..buf.len() {
|
||||
if i == buf.len() - 1 {
|
||||
last_byte = true;
|
||||
}
|
||||
if buf[i] == 0 {
|
||||
if !start_found && !last_byte && buf[i + 1] == 0 {
|
||||
// Special case: Consecutive sentinel values or all zeroes.
|
||||
// Skip.
|
||||
continue;
|
||||
}
|
||||
if start_found {
|
||||
let decode_result = decode_in_place(&mut buf[start_index_packet..i]);
|
||||
if let Ok(packet_len) = decode_result {
|
||||
packets_found += 1;
|
||||
tc_receiver
|
||||
.pass_tc(&buf[start_index_packet..start_index_packet + packet_len])?;
|
||||
}
|
||||
start_found = false;
|
||||
} else {
|
||||
start_index_packet = i + 1;
|
||||
start_found = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Move split frame at the end to the front of the buffer.
|
||||
if start_index_packet > 0 && start_found && packets_found > 0 {
|
||||
buf.copy_within(start_index_packet - 1.., 0);
|
||||
*next_write_idx = buf.len() - start_index_packet + 1;
|
||||
}
|
||||
Ok(packets_found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use cobs::encode;
|
||||
|
||||
use crate::encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET};
|
||||
|
||||
use super::parse_buffer_for_cobs_encoded_packets;
|
||||
|
||||
#[test]
|
||||
fn test_parsing_simple_packet() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
let mut next_read_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
&mut encoded_buf[0..current_idx],
|
||||
&mut test_sender,
|
||||
&mut next_read_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 1);
|
||||
assert_eq!(test_sender.tc_queue.len(), 1);
|
||||
let packet = &test_sender.tc_queue[0];
|
||||
assert_eq!(packet, &SIMPLE_PACKET);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsing_consecutive_packets() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
|
||||
// Second packet
|
||||
encoded_buf[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
current_idx += encode(&INVERTED_PACKET, &mut encoded_buf[current_idx..]);
|
||||
encoded_buf[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
let mut next_read_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
&mut encoded_buf[0..current_idx],
|
||||
&mut test_sender,
|
||||
&mut next_read_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 2);
|
||||
assert_eq!(test_sender.tc_queue.len(), 2);
|
||||
let packet0 = &test_sender.tc_queue[0];
|
||||
assert_eq!(packet0, &SIMPLE_PACKET);
|
||||
let packet1 = &test_sender.tc_queue[1];
|
||||
assert_eq!(packet1, &INVERTED_PACKET);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_tail_packet_only() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
let mut next_read_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut encoded_buf[0..current_idx - 1],
|
||||
&mut test_sender,
|
||||
&mut next_read_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 0);
|
||||
assert_eq!(test_sender.tc_queue.len(), 0);
|
||||
assert_eq!(next_read_idx, 0);
|
||||
}
|
||||
|
||||
fn generic_test_split_packet(cut_off: usize) {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
assert!(cut_off < INVERTED_PACKET.len() + 1);
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
// Second packet
|
||||
encoded_buf[current_idx] = 0;
|
||||
let packet_start = current_idx;
|
||||
current_idx += 1;
|
||||
let encoded_len = encode(&INVERTED_PACKET, &mut encoded_buf[current_idx..]);
|
||||
assert_eq!(encoded_len, 6);
|
||||
current_idx += encoded_len;
|
||||
// We cut off the sentinel byte, so we expecte the write index to be the length of the
|
||||
// packet minus the sentinel byte plus the first sentinel byte.
|
||||
let next_expected_write_idx = 1 + encoded_len - cut_off + 1;
|
||||
encoded_buf[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
let mut next_write_idx = 0;
|
||||
let expected_at_start = encoded_buf[packet_start..current_idx - cut_off].to_vec();
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut encoded_buf[0..current_idx - cut_off],
|
||||
&mut test_sender,
|
||||
&mut next_write_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 1);
|
||||
assert_eq!(test_sender.tc_queue.len(), 1);
|
||||
assert_eq!(&test_sender.tc_queue[0], &SIMPLE_PACKET);
|
||||
assert_eq!(next_write_idx, next_expected_write_idx);
|
||||
assert_eq!(encoded_buf[..next_expected_write_idx], expected_at_start);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_packet_and_split_tail_packet_0() {
|
||||
generic_test_split_packet(1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_packet_and_split_tail_packet_1() {
|
||||
generic_test_split_packet(2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_packet_and_split_tail_packet_2() {
|
||||
generic_test_split_packet(3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_at_end() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut next_write_idx = 0;
|
||||
let mut current_idx = 0;
|
||||
encoded_buf[current_idx] = 5;
|
||||
current_idx += 1;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
encoded_buf[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut encoded_buf[0..current_idx],
|
||||
&mut test_sender,
|
||||
&mut next_write_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 1);
|
||||
assert_eq!(test_sender.tc_queue.len(), 1);
|
||||
assert_eq!(&test_sender.tc_queue[0], &SIMPLE_PACKET);
|
||||
assert_eq!(next_write_idx, 1);
|
||||
assert_eq!(encoded_buf[0], 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_zeroes() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut all_zeroes: [u8; 5] = [0; 5];
|
||||
let mut next_write_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut all_zeroes,
|
||||
&mut test_sender,
|
||||
&mut next_write_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 0);
|
||||
assert!(test_sender.tc_queue.is_empty());
|
||||
assert_eq!(next_write_idx, 0);
|
||||
}
|
||||
}
|
40
satrs/src/encoding/mod.rs
Normal file
40
satrs/src/encoding/mod.rs
Normal file
@ -0,0 +1,40 @@
|
||||
pub mod ccsds;
|
||||
pub mod cobs;
|
||||
|
||||
pub use crate::encoding::ccsds::parse_buffer_for_ccsds_space_packets;
|
||||
pub use crate::encoding::cobs::{encode_packet_with_cobs, parse_buffer_for_cobs_encoded_packets};
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use alloc::{collections::VecDeque, vec::Vec};
|
||||
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
|
||||
use super::cobs::encode_packet_with_cobs;
|
||||
|
||||
pub(crate) const SIMPLE_PACKET: [u8; 5] = [1, 2, 3, 4, 5];
|
||||
pub(crate) const INVERTED_PACKET: [u8; 5] = [5, 4, 3, 2, 1];
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct TcCacher {
|
||||
pub(crate) tc_queue: VecDeque<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl ReceivesTcCore for TcCacher {
|
||||
type Error = ();
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
self.tc_queue.push_back(tc_raw.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn encode_simple_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encode_packet_with_cobs(&SIMPLE_PACKET, encoded_buf, current_idx);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn encode_inverted_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encode_packet_with_cobs(&INVERTED_PACKET, encoded_buf, current_idx);
|
||||
}
|
||||
}
|
710
satrs/src/event_man.rs
Normal file
710
satrs/src/event_man.rs
Normal file
@ -0,0 +1,710 @@
|
||||
//! Event management and forwarding
|
||||
//!
|
||||
//! This module provides components to perform event routing. The most important component for this
|
||||
//! task is the [EventManager]. It receives all events and then routes them to event subscribers
|
||||
//! where appropriate. One common use case for satellite systems is to offer a light-weight
|
||||
//! publish-subscribe mechanism and IPC mechanism for software and hardware events which are also
|
||||
//! packaged as telemetry (TM) or can trigger a system response.
|
||||
//!
|
||||
//! It is recommended to read the
|
||||
//! [sat-rs book chapter](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/events.html)
|
||||
//! about events first:
|
||||
//!
|
||||
//! The event manager has a listener table abstracted by the [ListenerTable], which maps
|
||||
//! listener groups identified by [ListenerKey]s to a [sender ID][ChannelId].
|
||||
//! It also contains a sender table abstracted by the [SenderTable] which maps these sender IDs
|
||||
//! to a concrete [SendEventProvider]s. A simple approach would be to use one send event provider
|
||||
//! for each OBSW thread and then subscribe for all interesting events for a particular thread
|
||||
//! using the send event provider ID.
|
||||
//!
|
||||
//! This can be done with the [EventManager] like this:
|
||||
//!
|
||||
//! 1. Provide a concrete [EventReceiver] implementation. This abstraction allow to use different
|
||||
//! message queue backends. A straightforward implementation where dynamic memory allocation is
|
||||
//! not a big concern could use [std::sync::mpsc::channel] to do this and is provided in
|
||||
//! form of the [MpscEventReceiver].
|
||||
//! 2. To set up event creators, create channel pairs using some message queue implementation.
|
||||
//! Each event creator gets a (cloned) sender component which allows it to send events to the
|
||||
//! manager.
|
||||
//! 3. The event manager receives the receiver component as part of a [EventReceiver]
|
||||
//! implementation so all events are routed to the manager.
|
||||
//! 4. Create the [send event providers][SendEventProvider]s which allow routing events to
|
||||
//! subscribers. You can now use their [sender IDs][SendEventProvider::id] to subscribe for
|
||||
//! event groups, for example by using the [EventManager::subscribe_single] method.
|
||||
//! 5. Add the send provider as well using the [EventManager::add_sender] call so the event
|
||||
//! manager can route listener groups to a the send provider.
|
||||
//!
|
||||
//! Some components like a PUS Event Service or PUS Event Action Service might require all
|
||||
//! events to package them as telemetry or start actions where applicable.
|
||||
//! Other components might only be interested in certain events. For example, a thermal system
|
||||
//! handler might only be interested in temperature events generated by a thermal sensor component.
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/pus_events.rs)
|
||||
//! for a concrete example using multi-threading where events are routed to
|
||||
//! different threads.
|
||||
use crate::events::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw};
|
||||
use crate::params::{Params, ParamsHeapless};
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::boxed::Box;
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::vec;
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::vec::Vec;
|
||||
use core::slice::Iter;
|
||||
#[cfg(feature = "alloc")]
|
||||
use hashbrown::HashMap;
|
||||
|
||||
use crate::ChannelId;
|
||||
#[cfg(feature = "std")]
|
||||
pub use stdmod::*;
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
|
||||
pub enum ListenerKey {
|
||||
Single(LargestEventRaw),
|
||||
Group(LargestGroupIdRaw),
|
||||
All,
|
||||
}
|
||||
|
||||
pub type EventWithHeaplessAuxData<Event> = (Event, Option<ParamsHeapless>);
|
||||
pub type EventU32WithHeaplessAuxData = EventWithHeaplessAuxData<EventU32>;
|
||||
pub type EventU16WithHeaplessAuxData = EventWithHeaplessAuxData<EventU16>;
|
||||
|
||||
pub type EventWithAuxData<Event> = (Event, Option<Params>);
|
||||
pub type EventU32WithAuxData = EventWithAuxData<EventU32>;
|
||||
pub type EventU16WithAuxData = EventWithAuxData<EventU16>;
|
||||
|
||||
pub trait SendEventProvider<Provider: GenericEvent, AuxDataProvider = Params> {
|
||||
type Error;
|
||||
|
||||
fn id(&self) -> ChannelId;
|
||||
fn send_no_data(&self, event: Provider) -> Result<(), Self::Error> {
|
||||
self.send(event, None)
|
||||
}
|
||||
fn send(&self, event: Provider, aux_data: Option<AuxDataProvider>) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
/// Generic abstraction for an event receiver.
|
||||
pub trait EventReceiver<Event: GenericEvent, AuxDataProvider = Params> {
|
||||
/// This function has to be provided by any event receiver. A receive call may or may not return
|
||||
/// an event.
|
||||
///
|
||||
/// To allow returning arbitrary additional auxiliary data, a mutable slice is passed to the
|
||||
/// [Self::receive] call as well. Receivers can write data to this slice, but care must be taken
|
||||
/// to avoid panics due to size missmatches or out of bound writes.
|
||||
fn receive(&self) -> Option<(Event, Option<AuxDataProvider>)>;
|
||||
}
|
||||
|
||||
pub trait ListenerTable {
|
||||
fn get_listeners(&self) -> Vec<ListenerKey>;
|
||||
fn contains_listener(&self, key: &ListenerKey) -> bool;
|
||||
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<ChannelId>>;
|
||||
fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool;
|
||||
fn remove_duplicates(&mut self, key: &ListenerKey);
|
||||
}
|
||||
|
||||
pub trait SenderTable<SendProviderError, Event: GenericEvent = EventU32, AuxDataProvider = Params> {
|
||||
fn contains_send_event_provider(&self, id: &ChannelId) -> bool;
|
||||
fn get_send_event_provider(
|
||||
&self,
|
||||
id: &ChannelId,
|
||||
) -> Option<&dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>>;
|
||||
fn add_send_event_provider(
|
||||
&mut self,
|
||||
send_provider: Box<
|
||||
dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>,
|
||||
>,
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
/// Generic event manager implementation.
|
||||
///
|
||||
/// # Generics
|
||||
///
|
||||
/// * `SendProviderError`: [SendEventProvider] error type
|
||||
/// * `Event`: Concrete event provider, currently either [EventU32] or [EventU16]
|
||||
/// * `AuxDataProvider`: Concrete auxiliary data provider, currently either [Params] or
|
||||
/// [ParamsHeapless]
|
||||
pub struct EventManager<SendProviderError, Event: GenericEvent = EventU32, AuxDataProvider = Params>
|
||||
{
|
||||
listener_table: Box<dyn ListenerTable>,
|
||||
sender_table: Box<dyn SenderTable<SendProviderError, Event, AuxDataProvider>>,
|
||||
event_receiver: Box<dyn EventReceiver<Event, AuxDataProvider>>,
|
||||
}
|
||||
|
||||
/// Safety: It is safe to implement [Send] because all fields in the [EventManager] are [Send]
|
||||
/// as well
|
||||
#[cfg(feature = "std")]
|
||||
unsafe impl<E, Event: GenericEvent + Send, AuxDataProvider: Send> Send
|
||||
for EventManager<E, Event, AuxDataProvider>
|
||||
{
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub type EventManagerWithMpscQueue<Event, AuxDataProvider> = EventManager<
|
||||
std::sync::mpsc::SendError<(Event, Option<AuxDataProvider>)>,
|
||||
Event,
|
||||
AuxDataProvider,
|
||||
>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum EventRoutingResult<Event: GenericEvent, AuxDataProvider> {
|
||||
/// No event was received
|
||||
Empty,
|
||||
/// An event was received and routed.
|
||||
/// The first tuple entry will contain the number of recipients.
|
||||
Handled(u32, Event, Option<AuxDataProvider>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum EventRoutingError<E> {
|
||||
SendError(E),
|
||||
NoSendersForKey(ListenerKey),
|
||||
NoSenderForId(ChannelId),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventRoutingErrorsWithResult<Event: GenericEvent, AuxDataProvider, E> {
|
||||
pub result: EventRoutingResult<Event, AuxDataProvider>,
|
||||
pub errors: [Option<EventRoutingError<E>>; 3],
|
||||
}
|
||||
|
||||
impl<E, Event: GenericEvent + Copy> EventManager<E, Event> {
|
||||
pub fn remove_duplicates(&mut self, key: &ListenerKey) {
|
||||
self.listener_table.remove_duplicates(key)
|
||||
}
|
||||
|
||||
/// Subscribe for a unique event.
|
||||
pub fn subscribe_single(&mut self, event: &Event, sender_id: ChannelId) {
|
||||
self.update_listeners(ListenerKey::Single(event.raw_as_largest_type()), sender_id);
|
||||
}
|
||||
|
||||
/// Subscribe for an event group.
|
||||
pub fn subscribe_group(&mut self, group_id: LargestGroupIdRaw, sender_id: ChannelId) {
|
||||
self.update_listeners(ListenerKey::Group(group_id), sender_id);
|
||||
}
|
||||
|
||||
/// Subscribe for all events received by the manager.
|
||||
///
|
||||
/// For example, this can be useful for a handler component which sends every event as
|
||||
/// a telemetry packet.
|
||||
pub fn subscribe_all(&mut self, sender_id: ChannelId) {
|
||||
self.update_listeners(ListenerKey::All, sender_id);
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static, Event: GenericEvent + Copy + 'static, AuxDataProvider: Clone + 'static>
|
||||
EventManager<E, Event, AuxDataProvider>
|
||||
{
|
||||
/// Create an event manager where the sender table will be the [DefaultSenderTableProvider]
|
||||
/// and the listener table will be the [DefaultListenerTableProvider].
|
||||
pub fn new(event_receiver: Box<dyn EventReceiver<Event, AuxDataProvider>>) -> Self {
|
||||
let listener_table: Box<DefaultListenerTableProvider> = Box::default();
|
||||
let sender_table: Box<DefaultSenderTableProvider<E, Event, AuxDataProvider>> =
|
||||
Box::default();
|
||||
Self::new_custom_tables(listener_table, sender_table, event_receiver)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, Event: GenericEvent + Copy, AuxDataProvider: Clone>
|
||||
EventManager<E, Event, AuxDataProvider>
|
||||
{
|
||||
pub fn new_custom_tables(
|
||||
listener_table: Box<dyn ListenerTable>,
|
||||
sender_table: Box<dyn SenderTable<E, Event, AuxDataProvider>>,
|
||||
event_receiver: Box<dyn EventReceiver<Event, AuxDataProvider>>,
|
||||
) -> Self {
|
||||
EventManager {
|
||||
listener_table,
|
||||
sender_table,
|
||||
event_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_sender(
|
||||
&mut self,
|
||||
send_provider: impl SendEventProvider<Event, AuxDataProvider, Error = E> + 'static,
|
||||
) {
|
||||
if !self
|
||||
.sender_table
|
||||
.contains_send_event_provider(&send_provider.id())
|
||||
{
|
||||
self.sender_table
|
||||
.add_send_event_provider(Box::new(send_provider));
|
||||
}
|
||||
}
|
||||
|
||||
fn update_listeners(&mut self, key: ListenerKey, sender_id: ChannelId) {
|
||||
self.listener_table.add_listener(key, sender_id);
|
||||
}
|
||||
|
||||
/// This function will use the cached event receiver and try to receive one event.
|
||||
/// If an event was received, it will try to route that event to all subscribed event listeners.
|
||||
/// If this works without any issues, the [EventRoutingResult] will contain context information
|
||||
/// about the routed event.
|
||||
///
|
||||
/// This function will track up to 3 errors returned as part of the
|
||||
/// [EventRoutingErrorsWithResult] error struct.
|
||||
pub fn try_event_handling(
|
||||
&self,
|
||||
) -> Result<
|
||||
EventRoutingResult<Event, AuxDataProvider>,
|
||||
EventRoutingErrorsWithResult<Event, AuxDataProvider, E>,
|
||||
> {
|
||||
let mut err_idx = 0;
|
||||
let mut err_slice = [None, None, None];
|
||||
let mut num_recipients = 0;
|
||||
let mut add_error = |error: EventRoutingError<E>| {
|
||||
if err_idx < 3 {
|
||||
err_slice[err_idx] = Some(error);
|
||||
err_idx += 1;
|
||||
}
|
||||
};
|
||||
let mut send_handler =
|
||||
|key: &ListenerKey, event: Event, aux_data: &Option<AuxDataProvider>| {
|
||||
if self.listener_table.contains_listener(key) {
|
||||
if let Some(ids) = self.listener_table.get_listener_ids(key) {
|
||||
for id in ids {
|
||||
if let Some(sender) = self.sender_table.get_send_event_provider(id) {
|
||||
if let Err(e) = sender.send(event, aux_data.clone()) {
|
||||
add_error(EventRoutingError::SendError(e));
|
||||
} else {
|
||||
num_recipients += 1;
|
||||
}
|
||||
} else {
|
||||
add_error(EventRoutingError::NoSenderForId(*id));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
add_error(EventRoutingError::NoSendersForKey(*key));
|
||||
}
|
||||
}
|
||||
};
|
||||
if let Some((event, aux_data)) = self.event_receiver.receive() {
|
||||
let single_key = ListenerKey::Single(event.raw_as_largest_type());
|
||||
send_handler(&single_key, event, &aux_data);
|
||||
let group_key = ListenerKey::Group(event.group_id_as_largest_type());
|
||||
send_handler(&group_key, event, &aux_data);
|
||||
send_handler(&ListenerKey::All, event, &aux_data);
|
||||
if err_idx > 0 {
|
||||
return Err(EventRoutingErrorsWithResult {
|
||||
result: EventRoutingResult::Handled(num_recipients, event, aux_data),
|
||||
errors: err_slice,
|
||||
});
|
||||
}
|
||||
return Ok(EventRoutingResult::Handled(num_recipients, event, aux_data));
|
||||
}
|
||||
Ok(EventRoutingResult::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DefaultListenerTableProvider {
|
||||
listeners: HashMap<ListenerKey, Vec<ChannelId>>,
|
||||
}
|
||||
|
||||
pub struct DefaultSenderTableProvider<
|
||||
SendProviderError,
|
||||
Event: GenericEvent = EventU32,
|
||||
AuxDataProvider = Params,
|
||||
> {
|
||||
senders: HashMap<
|
||||
ChannelId,
|
||||
Box<dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>>,
|
||||
>,
|
||||
}
|
||||
|
||||
impl<SendProviderError, Event: GenericEvent, AuxDataProvider> Default
|
||||
for DefaultSenderTableProvider<SendProviderError, Event, AuxDataProvider>
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
senders: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ListenerTable for DefaultListenerTableProvider {
|
||||
fn get_listeners(&self) -> Vec<ListenerKey> {
|
||||
let mut key_list = Vec::new();
|
||||
for key in self.listeners.keys() {
|
||||
key_list.push(*key);
|
||||
}
|
||||
key_list
|
||||
}
|
||||
|
||||
fn contains_listener(&self, key: &ListenerKey) -> bool {
|
||||
self.listeners.contains_key(key)
|
||||
}
|
||||
|
||||
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<ChannelId>> {
|
||||
self.listeners.get(key).map(|vec| vec.iter())
|
||||
}
|
||||
|
||||
fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool {
|
||||
if let Some(existing_list) = self.listeners.get_mut(&key) {
|
||||
existing_list.push(sender_id);
|
||||
} else {
|
||||
let new_list = vec![sender_id];
|
||||
self.listeners.insert(key, new_list);
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn remove_duplicates(&mut self, key: &ListenerKey) {
|
||||
if let Some(list) = self.listeners.get_mut(key) {
|
||||
list.sort_unstable();
|
||||
list.dedup();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<SendProviderError, Event: GenericEvent, AuxDataProvider>
|
||||
SenderTable<SendProviderError, Event, AuxDataProvider>
|
||||
for DefaultSenderTableProvider<SendProviderError, Event, AuxDataProvider>
|
||||
{
|
||||
fn contains_send_event_provider(&self, id: &ChannelId) -> bool {
|
||||
self.senders.contains_key(id)
|
||||
}
|
||||
|
||||
fn get_send_event_provider(
|
||||
&self,
|
||||
id: &ChannelId,
|
||||
) -> Option<&dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>> {
|
||||
self.senders
|
||||
.get(id)
|
||||
.filter(|sender| sender.id() == *id)
|
||||
.map(|v| v.as_ref())
|
||||
}
|
||||
|
||||
fn add_send_event_provider(
|
||||
&mut self,
|
||||
send_provider: Box<
|
||||
dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>,
|
||||
>,
|
||||
) -> bool {
|
||||
let id = send_provider.id();
|
||||
if self.senders.contains_key(&id) {
|
||||
return false;
|
||||
}
|
||||
self.senders.insert(id, send_provider).is_none()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod stdmod {
|
||||
use super::*;
|
||||
use crate::event_man::{EventReceiver, EventWithAuxData};
|
||||
use crate::events::{EventU16, EventU32, GenericEvent};
|
||||
use crate::params::Params;
|
||||
use std::sync::mpsc::{Receiver, SendError, Sender};
|
||||
|
||||
pub struct MpscEventReceiver<Event: GenericEvent + Send = EventU32> {
|
||||
mpsc_receiver: Receiver<(Event, Option<Params>)>,
|
||||
}
|
||||
|
||||
impl<Event: GenericEvent + Send> MpscEventReceiver<Event> {
|
||||
pub fn new(receiver: Receiver<(Event, Option<Params>)>) -> Self {
|
||||
Self {
|
||||
mpsc_receiver: receiver,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<Event: GenericEvent + Send> EventReceiver<Event> for MpscEventReceiver<Event> {
|
||||
fn receive(&self) -> Option<EventWithAuxData<Event>> {
|
||||
if let Ok(event_and_data) = self.mpsc_receiver.try_recv() {
|
||||
return Some(event_and_data);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub type MpscEventU32Receiver = MpscEventReceiver<EventU32>;
|
||||
pub type MpscEventU16Receiver = MpscEventReceiver<EventU16>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MpscEventSendProvider<Event: GenericEvent + Send> {
|
||||
id: u32,
|
||||
sender: Sender<(Event, Option<Params>)>,
|
||||
}
|
||||
|
||||
impl<Event: GenericEvent + Send> MpscEventSendProvider<Event> {
|
||||
pub fn new(id: u32, sender: Sender<(Event, Option<Params>)>) -> Self {
|
||||
Self { id, sender }
|
||||
}
|
||||
}
|
||||
|
||||
impl<Event: GenericEvent + Send> SendEventProvider<Event> for MpscEventSendProvider<Event> {
|
||||
type Error = SendError<(Event, Option<Params>)>;
|
||||
|
||||
fn id(&self) -> u32 {
|
||||
self.id
|
||||
}
|
||||
fn send(&self, event: Event, aux_data: Option<Params>) -> Result<(), Self::Error> {
|
||||
self.sender.send((event, aux_data))
|
||||
}
|
||||
}
|
||||
|
||||
pub type MpscEventU32SendProvider = MpscEventSendProvider<EventU32>;
|
||||
pub type MpscEventU16SendProvider = MpscEventSendProvider<EventU16>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::event_man::EventManager;
|
||||
use crate::events::{EventU32, GenericEvent, Severity};
|
||||
use crate::params::ParamsRaw;
|
||||
use alloc::boxed::Box;
|
||||
use std::format;
|
||||
use std::sync::mpsc::{channel, Receiver, SendError, Sender};
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MpscEventSenderQueue {
|
||||
id: u32,
|
||||
mpsc_sender: Sender<EventU32WithAuxData>,
|
||||
}
|
||||
|
||||
impl MpscEventSenderQueue {
|
||||
fn new(id: u32, mpsc_sender: Sender<EventU32WithAuxData>) -> Self {
|
||||
Self { id, mpsc_sender }
|
||||
}
|
||||
}
|
||||
|
||||
impl SendEventProvider<EventU32> for MpscEventSenderQueue {
|
||||
type Error = SendError<EventU32WithAuxData>;
|
||||
|
||||
fn id(&self) -> u32 {
|
||||
self.id
|
||||
}
|
||||
fn send(&self, event: EventU32, aux_data: Option<Params>) -> Result<(), Self::Error> {
|
||||
self.mpsc_sender.send((event, aux_data))
|
||||
}
|
||||
}
|
||||
|
||||
fn check_next_event(
|
||||
expected: EventU32,
|
||||
receiver: &Receiver<EventU32WithAuxData>,
|
||||
) -> Option<Params> {
|
||||
if let Ok(event) = receiver.try_recv() {
|
||||
assert_eq!(event.0, expected);
|
||||
return event.1;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn check_handled_event(
|
||||
res: EventRoutingResult<EventU32, Params>,
|
||||
expected: EventU32,
|
||||
expected_num_sent: u32,
|
||||
) {
|
||||
assert!(matches!(res, EventRoutingResult::Handled { .. }));
|
||||
if let EventRoutingResult::Handled(num_recipients, event, _aux_data) = res {
|
||||
assert_eq!(event, expected);
|
||||
assert_eq!(num_recipients, expected_num_sent);
|
||||
}
|
||||
}
|
||||
|
||||
fn generic_event_man() -> (
|
||||
Sender<EventU32WithAuxData>,
|
||||
EventManager<SendError<EventU32WithAuxData>>,
|
||||
) {
|
||||
let (event_sender, manager_queue) = channel();
|
||||
let event_man_receiver = MpscEventReceiver::new(manager_queue);
|
||||
(
|
||||
event_sender,
|
||||
EventManager::new(Box::new(event_man_receiver)),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic() {
|
||||
let (event_sender, mut event_man) = generic_event_man();
|
||||
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
|
||||
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
|
||||
let (single_event_sender, single_event_receiver) = channel();
|
||||
let single_event_listener = MpscEventSenderQueue::new(0, single_event_sender);
|
||||
event_man.subscribe_single(&event_grp_0, single_event_listener.id());
|
||||
event_man.add_sender(single_event_listener);
|
||||
let (group_event_sender_0, group_event_receiver_0) = channel();
|
||||
let group_event_listener = MpscEventSenderQueue {
|
||||
id: 1,
|
||||
mpsc_sender: group_event_sender_0,
|
||||
};
|
||||
event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.id());
|
||||
event_man.add_sender(group_event_listener);
|
||||
|
||||
// Test event with one listener
|
||||
event_sender
|
||||
.send((event_grp_0, None))
|
||||
.expect("Sending single error failed");
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_grp_0, 1);
|
||||
check_next_event(event_grp_0, &single_event_receiver);
|
||||
|
||||
// Test event which is sent to all group listeners
|
||||
event_sender
|
||||
.send((event_grp_1_0, None))
|
||||
.expect("Sending group error failed");
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_grp_1_0, 1);
|
||||
check_next_event(event_grp_1_0, &group_event_receiver_0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_basic_aux_data() {
|
||||
let (event_sender, mut event_man) = generic_event_man();
|
||||
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
|
||||
let (single_event_sender, single_event_receiver) = channel();
|
||||
let single_event_listener = MpscEventSenderQueue::new(0, single_event_sender);
|
||||
event_man.subscribe_single(&event_grp_0, single_event_listener.id());
|
||||
event_man.add_sender(single_event_listener);
|
||||
event_sender
|
||||
.send((event_grp_0, Some(Params::Heapless((2_u32, 3_u32).into()))))
|
||||
.expect("Sending group error failed");
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_grp_0, 1);
|
||||
let aux = check_next_event(event_grp_0, &single_event_receiver);
|
||||
assert!(aux.is_some());
|
||||
let aux = aux.unwrap();
|
||||
if let Params::Heapless(ParamsHeapless::Raw(ParamsRaw::U32Pair(pair))) = aux {
|
||||
assert_eq!(pair.0, 2);
|
||||
assert_eq!(pair.1, 3);
|
||||
} else {
|
||||
panic!("{}", format!("Unexpected auxiliary value type {:?}", aux));
|
||||
}
|
||||
}
|
||||
|
||||
/// Test listening for multiple groups
|
||||
#[test]
|
||||
fn test_multi_group() {
|
||||
let (event_sender, mut event_man) = generic_event_man();
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
let hres = res.unwrap();
|
||||
assert!(matches!(hres, EventRoutingResult::Empty));
|
||||
|
||||
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
|
||||
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
|
||||
let (event_grp_0_sender, event_grp_0_receiver) = channel();
|
||||
let event_grp_0_and_1_listener = MpscEventSenderQueue {
|
||||
id: 0,
|
||||
mpsc_sender: event_grp_0_sender,
|
||||
};
|
||||
event_man.subscribe_group(event_grp_0.group_id(), event_grp_0_and_1_listener.id());
|
||||
event_man.subscribe_group(event_grp_1_0.group_id(), event_grp_0_and_1_listener.id());
|
||||
event_man.add_sender(event_grp_0_and_1_listener);
|
||||
|
||||
event_sender
|
||||
.send((event_grp_0, None))
|
||||
.expect("Sending Event Group 0 failed");
|
||||
event_sender
|
||||
.send((event_grp_1_0, None))
|
||||
.expect("Sendign Event Group 1 failed");
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_grp_0, 1);
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_grp_1_0, 1);
|
||||
|
||||
check_next_event(event_grp_0, &event_grp_0_receiver);
|
||||
check_next_event(event_grp_1_0, &event_grp_0_receiver);
|
||||
}
|
||||
|
||||
/// Test listening to the same event from multiple listeners. Also test listening
|
||||
/// to both group and single events from one listener
|
||||
#[test]
|
||||
fn test_listening_to_same_event_and_multi_type() {
|
||||
let (event_sender, mut event_man) = generic_event_man();
|
||||
let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap();
|
||||
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
|
||||
let (event_0_tx_0, event_0_rx_0) = channel();
|
||||
let (event_0_tx_1, event_0_rx_1) = channel();
|
||||
let event_listener_0 = MpscEventSenderQueue {
|
||||
id: 0,
|
||||
mpsc_sender: event_0_tx_0,
|
||||
};
|
||||
let event_listener_1 = MpscEventSenderQueue {
|
||||
id: 1,
|
||||
mpsc_sender: event_0_tx_1,
|
||||
};
|
||||
let event_listener_0_sender_id = event_listener_0.id();
|
||||
event_man.subscribe_single(&event_0, event_listener_0_sender_id);
|
||||
event_man.add_sender(event_listener_0);
|
||||
let event_listener_1_sender_id = event_listener_1.id();
|
||||
event_man.subscribe_single(&event_0, event_listener_1_sender_id);
|
||||
event_man.add_sender(event_listener_1);
|
||||
event_sender
|
||||
.send((event_0, None))
|
||||
.expect("Triggering Event 0 failed");
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_0, 2);
|
||||
check_next_event(event_0, &event_0_rx_0);
|
||||
check_next_event(event_0, &event_0_rx_1);
|
||||
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
|
||||
event_sender
|
||||
.send((event_0, None))
|
||||
.expect("Triggering Event 0 failed");
|
||||
event_sender
|
||||
.send((event_1, None))
|
||||
.expect("Triggering Event 1 failed");
|
||||
|
||||
// 3 Events messages will be sent now
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_0, 2);
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_1, 1);
|
||||
// Both the single event and the group event should arrive now
|
||||
check_next_event(event_0, &event_0_rx_0);
|
||||
check_next_event(event_1, &event_0_rx_0);
|
||||
|
||||
// Do double insertion and then remove duplicates
|
||||
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
|
||||
event_man.remove_duplicates(&ListenerKey::Group(event_1.group_id()));
|
||||
event_sender
|
||||
.send((event_1, None))
|
||||
.expect("Triggering Event 1 failed");
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_1, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_events_listener() {
|
||||
let (event_sender, manager_queue) = channel();
|
||||
let event_man_receiver = MpscEventReceiver::new(manager_queue);
|
||||
let mut event_man: EventManager<SendError<EventU32WithAuxData>> =
|
||||
EventManager::new(Box::new(event_man_receiver));
|
||||
let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap();
|
||||
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
|
||||
let (event_0_tx_0, all_events_rx) = channel();
|
||||
let all_events_listener = MpscEventSenderQueue {
|
||||
id: 0,
|
||||
mpsc_sender: event_0_tx_0,
|
||||
};
|
||||
event_man.subscribe_all(all_events_listener.id());
|
||||
event_man.add_sender(all_events_listener);
|
||||
event_sender
|
||||
.send((event_0, None))
|
||||
.expect("Triggering event 0 failed");
|
||||
event_sender
|
||||
.send((event_1, None))
|
||||
.expect("Triggering event 1 failed");
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_0, 1);
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
check_handled_event(res.unwrap(), event_1, 1);
|
||||
check_next_event(event_0, &all_events_rx);
|
||||
check_next_event(event_1, &all_events_rx);
|
||||
}
|
||||
}
|
826
satrs/src/events.rs
Normal file
826
satrs/src/events.rs
Normal file
@ -0,0 +1,826 @@
|
||||
//! Event support module
|
||||
//!
|
||||
//! This module includes the basic event structs [EventU32] and [EventU16] and versions with the
|
||||
//! ECSS severity levels as a type parameter. These structs are simple abstractions on top of the
|
||||
//! [u32] and [u16] types where the raw value is the unique identifier for a particular event.
|
||||
//! The abstraction also allows to group related events using a group ID, and the severity
|
||||
//! of an event is encoded inside the raw value itself with four possible [Severity] levels:
|
||||
//!
|
||||
//! - INFO
|
||||
//! - LOW
|
||||
//! - MEDIUM
|
||||
//! - HIGH
|
||||
//!
|
||||
//! All event structs implement the [EcssEnumeration] trait and can be created as constants.
|
||||
//! This allows to easily create a static list of constant events which can then be used to generate
|
||||
//! event telemetry using the PUS event manager modules.
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! ```
|
||||
//! use satrs::events::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo};
|
||||
//!
|
||||
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(1, 0);
|
||||
//! const MSG_FAILED: EventU32 = EventU32::const_new(Severity::LOW, 1, 1);
|
||||
//!
|
||||
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::const_new(2, 0);
|
||||
//!
|
||||
//! let small_event = EventU16::new(Severity::INFO, 3, 0);
|
||||
//! ```
|
||||
use core::fmt::Debug;
|
||||
use core::hash::Hash;
|
||||
use core::marker::PhantomData;
|
||||
use delegate::delegate;
|
||||
use spacepackets::ecss::EcssEnumeration;
|
||||
use spacepackets::util::{ToBeBytes, UnsignedEnum};
|
||||
use spacepackets::ByteConversionError;
|
||||
|
||||
/// Using a type definition allows to change this to u64 in the future more easily
|
||||
pub type LargestEventRaw = u32;
|
||||
/// Using a type definition allows to change this to u32 in the future more easily
|
||||
pub type LargestGroupIdRaw = u16;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
|
||||
pub enum Severity {
|
||||
INFO = 0,
|
||||
LOW = 1,
|
||||
MEDIUM = 2,
|
||||
HIGH = 3,
|
||||
}
|
||||
|
||||
pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
|
||||
const SEVERITY: Severity;
|
||||
}
|
||||
|
||||
/// Type level support struct
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct SeverityInfo {}
|
||||
impl HasSeverity for SeverityInfo {
|
||||
const SEVERITY: Severity = Severity::INFO;
|
||||
}
|
||||
|
||||
/// Type level support struct
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct SeverityLow {}
|
||||
impl HasSeverity for SeverityLow {
|
||||
const SEVERITY: Severity = Severity::LOW;
|
||||
}
|
||||
|
||||
/// Type level support struct
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct SeverityMedium {}
|
||||
impl HasSeverity for SeverityMedium {
|
||||
const SEVERITY: Severity = Severity::MEDIUM;
|
||||
}
|
||||
|
||||
/// Type level support struct
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct SeverityHigh {}
|
||||
impl HasSeverity for SeverityHigh {
|
||||
const SEVERITY: Severity = Severity::HIGH;
|
||||
}
|
||||
|
||||
pub trait GenericEvent: EcssEnumeration {
|
||||
type Raw;
|
||||
type GroupId;
|
||||
type UniqueId;
|
||||
|
||||
fn raw(&self) -> Self::Raw;
|
||||
fn severity(&self) -> Severity;
|
||||
fn group_id(&self) -> Self::GroupId;
|
||||
fn unique_id(&self) -> Self::UniqueId;
|
||||
|
||||
fn raw_as_largest_type(&self) -> LargestEventRaw;
|
||||
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for Severity {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
x if x == Severity::INFO as u8 => Ok(Severity::INFO),
|
||||
x if x == Severity::LOW as u8 => Ok(Severity::LOW),
|
||||
x if x == Severity::MEDIUM as u8 => Ok(Severity::MEDIUM),
|
||||
x if x == Severity::HIGH as u8 => Ok(Severity::HIGH),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
struct EventBase<RAW, GID, UID> {
|
||||
severity: Severity,
|
||||
group_id: GID,
|
||||
unique_id: UID,
|
||||
phantom: PhantomData<RAW>,
|
||||
}
|
||||
|
||||
impl<RAW: ToBeBytes, GID, UID> EventBase<RAW, GID, UID> {
|
||||
fn write_to_bytes(
|
||||
&self,
|
||||
raw: RAW,
|
||||
buf: &mut [u8],
|
||||
width: usize,
|
||||
) -> Result<usize, ByteConversionError> {
|
||||
if buf.len() < width {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
found: buf.len(),
|
||||
expected: width,
|
||||
});
|
||||
}
|
||||
buf.copy_from_slice(raw.to_be_bytes().as_ref());
|
||||
Ok(raw.written_len())
|
||||
}
|
||||
}
|
||||
|
||||
impl EventBase<u32, u16, u16> {
|
||||
#[inline]
|
||||
fn raw(&self) -> u32 {
|
||||
((self.severity as u32) << 30) | ((self.group_id as u32) << 16) | self.unique_id as u32
|
||||
}
|
||||
}
|
||||
|
||||
impl EventBase<u16, u8, u8> {
|
||||
#[inline]
|
||||
fn raw(&self) -> u16 {
|
||||
((self.severity as u16) << 14) | ((self.group_id as u16) << 8) | self.unique_id as u16
|
||||
}
|
||||
}
|
||||
|
||||
impl<RAW, GID, UID> EventBase<RAW, GID, UID> {
|
||||
#[inline]
|
||||
pub fn severity(&self) -> Severity {
|
||||
self.severity
|
||||
}
|
||||
}
|
||||
|
||||
impl<RAW, GID> EventBase<RAW, GID, u16> {
|
||||
#[inline]
|
||||
pub fn unique_id(&self) -> u16 {
|
||||
self.unique_id
|
||||
}
|
||||
}
|
||||
|
||||
impl<RAW, GID> EventBase<RAW, GID, u8> {
|
||||
#[inline]
|
||||
pub fn unique_id(&self) -> u8 {
|
||||
self.unique_id
|
||||
}
|
||||
}
|
||||
|
||||
impl<RAW, UID> EventBase<RAW, u16, UID> {
|
||||
#[inline]
|
||||
pub fn group_id(&self) -> u16 {
|
||||
self.group_id
|
||||
}
|
||||
}
|
||||
|
||||
impl<RAW, UID> EventBase<RAW, u8, UID> {
|
||||
#[inline]
|
||||
pub fn group_id(&self) -> u8 {
|
||||
self.group_id
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! event_provider_impl {
|
||||
() => {
|
||||
#[inline]
|
||||
fn raw(&self) -> Self::Raw {
|
||||
self.base.raw()
|
||||
}
|
||||
|
||||
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
|
||||
/// ID is invalid
|
||||
#[inline]
|
||||
fn severity(&self) -> Severity {
|
||||
self.base.severity()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn group_id(&self) -> Self::GroupId {
|
||||
self.base.group_id()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn unique_id(&self) -> Self::UniqueId {
|
||||
self.base.unique_id()
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! impl_event_provider {
|
||||
($BaseIdent: ident, $TypedIdent: ident, $raw: ty, $gid: ty, $uid: ty) => {
|
||||
impl GenericEvent for $BaseIdent {
|
||||
type Raw = $raw;
|
||||
type GroupId = $gid;
|
||||
type UniqueId = $uid;
|
||||
|
||||
event_provider_impl!();
|
||||
|
||||
fn raw_as_largest_type(&self) -> LargestEventRaw {
|
||||
self.raw().into()
|
||||
}
|
||||
|
||||
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
|
||||
self.group_id().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<SEVERITY: HasSeverity> GenericEvent for $TypedIdent<SEVERITY> {
|
||||
type Raw = $raw;
|
||||
type GroupId = $gid;
|
||||
type UniqueId = $uid;
|
||||
|
||||
delegate!(to self.event {
|
||||
fn raw(&self) -> Self::Raw;
|
||||
fn severity(&self) -> Severity;
|
||||
fn group_id(&self) -> Self::GroupId;
|
||||
fn unique_id(&self) -> Self::UniqueId;
|
||||
fn raw_as_largest_type(&self) -> LargestEventRaw;
|
||||
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! try_from_impls {
|
||||
($SevIdent: ident, $severity: path, $raw: ty, $TypedSevIdent: ident) => {
|
||||
impl TryFrom<$raw> for $TypedSevIdent<$SevIdent> {
|
||||
type Error = Severity;
|
||||
|
||||
fn try_from(raw: $raw) -> Result<Self, Self::Error> {
|
||||
Self::try_from_generic($severity, raw)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! const_from_fn {
|
||||
($from_fn_name: ident, $TypedIdent: ident, $SevIdent: ident) => {
|
||||
pub const fn $from_fn_name(event: $TypedIdent<$SevIdent>) -> Self {
|
||||
Self {
|
||||
base: event.event.base,
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct EventU32 {
|
||||
base: EventBase<u32, u16, u16>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct EventU32TypedSev<SEVERITY> {
|
||||
event: EventU32,
|
||||
phantom: PhantomData<SEVERITY>,
|
||||
}
|
||||
|
||||
impl<SEVERITY: HasSeverity> From<EventU32TypedSev<SEVERITY>> for EventU32 {
|
||||
fn from(e: EventU32TypedSev<SEVERITY>) -> Self {
|
||||
Self { base: e.event.base }
|
||||
}
|
||||
}
|
||||
|
||||
impl<Severity: HasSeverity> AsRef<EventU32> for EventU32TypedSev<Severity> {
|
||||
fn as_ref(&self) -> &EventU32 {
|
||||
&self.event
|
||||
}
|
||||
}
|
||||
|
||||
impl<Severity: HasSeverity> AsMut<EventU32> for EventU32TypedSev<Severity> {
|
||||
fn as_mut(&mut self) -> &mut EventU32 {
|
||||
&mut self.event
|
||||
}
|
||||
}
|
||||
|
||||
impl_event_provider!(EventU32, EventU32TypedSev, u32, u16, u16);
|
||||
|
||||
impl EventU32 {
|
||||
/// Generate an event. The raw representation of an event has 32 bits.
|
||||
/// If the passed group ID is invalid (too large), None wil be returned
|
||||
///
|
||||
/// # Parameter
|
||||
///
|
||||
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
|
||||
/// be stored inside the uppermost 2 bits of the raw event ID
|
||||
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
|
||||
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
|
||||
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
|
||||
/// raw event ID
|
||||
pub fn new(
|
||||
severity: Severity,
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Option<Self> {
|
||||
if group_id > (2u16.pow(14) - 1) {
|
||||
return None;
|
||||
}
|
||||
Some(Self {
|
||||
base: EventBase {
|
||||
severity,
|
||||
group_id,
|
||||
unique_id,
|
||||
phantom: PhantomData,
|
||||
},
|
||||
})
|
||||
}
|
||||
pub const fn const_new(
|
||||
severity: Severity,
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Self {
|
||||
if group_id > (2u16.pow(14) - 1) {
|
||||
panic!("Group ID too large");
|
||||
}
|
||||
Self {
|
||||
base: EventBase {
|
||||
severity,
|
||||
group_id,
|
||||
unique_id,
|
||||
phantom: PhantomData,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo);
|
||||
const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow);
|
||||
const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium);
|
||||
const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh);
|
||||
}
|
||||
|
||||
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
|
||||
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
|
||||
/// have distinct types for events with different severities
|
||||
pub fn new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Option<Self> {
|
||||
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id)?;
|
||||
Some(Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// Const version of [Self::new], but panics on invalid group ID input values.
|
||||
pub const fn const_new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Self {
|
||||
let event = EventU32::const_new(SEVERITY::SEVERITY, group_id, unique_id);
|
||||
Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
|
||||
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
|
||||
if severity != expected {
|
||||
return Err(severity);
|
||||
}
|
||||
Ok(Self::const_new(
|
||||
((raw >> 16) & 0x3FFF) as u16,
|
||||
(raw & 0xFFFF) as u16,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for EventU32 {
|
||||
fn from(raw: u32) -> Self {
|
||||
// Severity conversion from u8 should never fail
|
||||
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
|
||||
let group_id = ((raw >> 16) & 0x3FFF) as u16;
|
||||
let unique_id = (raw & 0xFFFF) as u16;
|
||||
// Sanitized input, should never fail
|
||||
Self::const_new(severity, group_id, unique_id)
|
||||
}
|
||||
}
|
||||
|
||||
try_from_impls!(SeverityInfo, Severity::INFO, u32, EventU32TypedSev);
|
||||
try_from_impls!(SeverityLow, Severity::LOW, u32, EventU32TypedSev);
|
||||
try_from_impls!(SeverityMedium, Severity::MEDIUM, u32, EventU32TypedSev);
|
||||
try_from_impls!(SeverityHigh, Severity::HIGH, u32, EventU32TypedSev);
|
||||
|
||||
impl UnsignedEnum for EventU32 {
|
||||
fn size(&self) -> usize {
|
||||
core::mem::size_of::<u32>()
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
self.base.write_to_bytes(self.raw(), buf, self.size())
|
||||
}
|
||||
}
|
||||
|
||||
impl EcssEnumeration for EventU32 {
|
||||
fn pfc(&self) -> u8 {
|
||||
u32::BITS as u8
|
||||
}
|
||||
}
|
||||
|
||||
//noinspection RsTraitImplementation
|
||||
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> {
|
||||
delegate!(to self.event {
|
||||
fn size(&self) -> usize;
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
|
||||
});
|
||||
}
|
||||
|
||||
//noinspection RsTraitImplementation
|
||||
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU32TypedSev<SEVERITY> {
|
||||
delegate!(to self.event {
|
||||
fn pfc(&self) -> u8;
|
||||
});
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct EventU16 {
|
||||
base: EventBase<u16, u8, u8>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct EventU16TypedSev<SEVERITY> {
|
||||
event: EventU16,
|
||||
phantom: PhantomData<SEVERITY>,
|
||||
}
|
||||
|
||||
impl<Severity: HasSeverity> AsRef<EventU16> for EventU16TypedSev<Severity> {
|
||||
fn as_ref(&self) -> &EventU16 {
|
||||
&self.event
|
||||
}
|
||||
}
|
||||
|
||||
impl<Severity: HasSeverity> AsMut<EventU16> for EventU16TypedSev<Severity> {
|
||||
fn as_mut(&mut self) -> &mut EventU16 {
|
||||
&mut self.event
|
||||
}
|
||||
}
|
||||
|
||||
impl EventU16 {
|
||||
/// Generate a small event. The raw representation of a small event has 16 bits.
|
||||
/// If the passed group ID is invalid (too large), [None] wil be returned
|
||||
///
|
||||
/// # Parameter
|
||||
///
|
||||
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
|
||||
/// be stored inside the uppermost 2 bits of the raw event ID
|
||||
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
|
||||
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
|
||||
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
|
||||
/// raw event ID
|
||||
pub fn new(
|
||||
severity: Severity,
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Option<Self> {
|
||||
if group_id > (2u8.pow(6) - 1) {
|
||||
return None;
|
||||
}
|
||||
Some(Self {
|
||||
base: EventBase {
|
||||
severity,
|
||||
group_id,
|
||||
unique_id,
|
||||
phantom: Default::default(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Const version of [Self::new], but panics on invalid group ID input values.
|
||||
pub const fn const_new(
|
||||
severity: Severity,
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Self {
|
||||
if group_id > (2u8.pow(6) - 1) {
|
||||
panic!("Group ID too large");
|
||||
}
|
||||
Self {
|
||||
base: EventBase {
|
||||
severity,
|
||||
group_id,
|
||||
unique_id,
|
||||
phantom: PhantomData,
|
||||
},
|
||||
}
|
||||
}
|
||||
const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo);
|
||||
const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow);
|
||||
const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium);
|
||||
const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh);
|
||||
}
|
||||
|
||||
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
|
||||
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
|
||||
/// have distinct types for events with different severities
|
||||
pub fn new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Option<Self> {
|
||||
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id)?;
|
||||
Some(Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// Const version of [Self::new], but panics on invalid group ID input values.
|
||||
pub const fn const_new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Self {
|
||||
let event = EventU16::const_new(SEVERITY::SEVERITY, group_id, unique_id);
|
||||
Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
|
||||
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
|
||||
if severity != expected {
|
||||
return Err(severity);
|
||||
}
|
||||
Ok(Self::const_new(
|
||||
((raw >> 8) & 0x3F) as u8,
|
||||
(raw & 0xFF) as u8,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
|
||||
|
||||
impl UnsignedEnum for EventU16 {
|
||||
fn size(&self) -> usize {
|
||||
core::mem::size_of::<u16>()
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
self.base.write_to_bytes(self.raw(), buf, self.size())
|
||||
}
|
||||
}
|
||||
impl EcssEnumeration for EventU16 {
|
||||
#[inline]
|
||||
fn pfc(&self) -> u8 {
|
||||
u16::BITS as u8
|
||||
}
|
||||
}
|
||||
|
||||
//noinspection RsTraitImplementation
|
||||
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> {
|
||||
delegate!(to self.event {
|
||||
fn size(&self) -> usize;
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
|
||||
});
|
||||
}
|
||||
|
||||
//noinspection RsTraitImplementation
|
||||
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU16TypedSev<SEVERITY> {
|
||||
delegate!(to self.event {
|
||||
fn pfc(&self) -> u8;
|
||||
});
|
||||
}
|
||||
|
||||
impl From<u16> for EventU16 {
|
||||
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
|
||||
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
|
||||
let group_id = ((raw >> 8) & 0x3F) as u8;
|
||||
let unique_id = (raw & 0xFF) as u8;
|
||||
// Sanitized input, new call should never fail
|
||||
Self::const_new(severity, group_id, unique_id)
|
||||
}
|
||||
}
|
||||
|
||||
try_from_impls!(SeverityInfo, Severity::INFO, u16, EventU16TypedSev);
|
||||
try_from_impls!(SeverityLow, Severity::LOW, u16, EventU16TypedSev);
|
||||
try_from_impls!(SeverityMedium, Severity::MEDIUM, u16, EventU16TypedSev);
|
||||
try_from_impls!(SeverityHigh, Severity::HIGH, u16, EventU16TypedSev);
|
||||
|
||||
impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> {
|
||||
#[inline]
|
||||
fn eq(&self, other: &EventU32) -> bool {
|
||||
self.raw() == other.raw()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Severity: HasSeverity> PartialEq<EventU32TypedSev<Severity>> for EventU32 {
|
||||
#[inline]
|
||||
fn eq(&self, other: &EventU32TypedSev<Severity>) -> bool {
|
||||
self.raw() == other.raw()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Severity: HasSeverity> PartialEq<EventU16> for EventU16TypedSev<Severity> {
|
||||
#[inline]
|
||||
fn eq(&self, other: &EventU16) -> bool {
|
||||
self.raw() == other.raw()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Severity: HasSeverity> PartialEq<EventU16TypedSev<Severity>> for EventU16 {
|
||||
#[inline]
|
||||
fn eq(&self, other: &EventU16TypedSev<Severity>) -> bool {
|
||||
self.raw() == other.raw()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::EventU32TypedSev;
|
||||
use super::*;
|
||||
use spacepackets::ByteConversionError;
|
||||
use std::mem::size_of;
|
||||
|
||||
fn assert_size<T>(_: T, val: usize) {
|
||||
assert_eq!(size_of::<T>(), val);
|
||||
}
|
||||
|
||||
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(0, 0);
|
||||
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::const_new(0, 0);
|
||||
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> =
|
||||
EventU32TypedSev::const_new(0x3FFF, 0xFFFF);
|
||||
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> =
|
||||
EventU16TypedSev::const_new(0x3F, 0xff);
|
||||
|
||||
/// This working is a test in itself.
|
||||
const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT);
|
||||
|
||||
#[test]
|
||||
fn test_normal_from_raw_conversion() {
|
||||
let conv_from_raw = EventU32TypedSev::<SeverityInfo>::try_from(INFO_EVENT.raw())
|
||||
.expect("Creating typed EventU32 failed");
|
||||
assert_eq!(conv_from_raw, INFO_EVENT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_small_from_raw_conversion() {
|
||||
let conv_from_raw = EventU16TypedSev::<SeverityInfo>::try_from(INFO_EVENT_SMALL.raw())
|
||||
.expect("Creating typed EventU16 failed");
|
||||
assert_eq!(conv_from_raw, INFO_EVENT_SMALL);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn verify_normal_size() {
|
||||
assert_size(INFO_EVENT.raw(), 4)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn verify_small_size() {
|
||||
assert_size(INFO_EVENT_SMALL.raw(), 2)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normal_event_getters() {
|
||||
assert_eq!(INFO_EVENT.severity(), Severity::INFO);
|
||||
assert_eq!(INFO_EVENT.unique_id(), 0);
|
||||
assert_eq!(INFO_EVENT.group_id(), 0);
|
||||
let raw_event = INFO_EVENT.raw();
|
||||
assert_eq!(raw_event, 0x00000000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_small_event_getters() {
|
||||
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::INFO);
|
||||
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
|
||||
assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
|
||||
let raw_event = INFO_EVENT_SMALL.raw();
|
||||
assert_eq!(raw_event, 0x00000000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn all_ones_event_regular() {
|
||||
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::HIGH);
|
||||
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
|
||||
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
|
||||
let raw_event = HIGH_SEV_EVENT.raw();
|
||||
assert_eq!(raw_event, 0xFFFFFFFF);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn all_ones_event_small() {
|
||||
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::HIGH);
|
||||
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
|
||||
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
|
||||
let raw_event = HIGH_SEV_EVENT_SMALL.raw();
|
||||
assert_eq!(raw_event, 0xFFFF);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_group_id_normal() {
|
||||
assert!(EventU32TypedSev::<SeverityMedium>::new(2_u16.pow(14), 0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_group_id_small() {
|
||||
assert!(EventU16TypedSev::<SeverityMedium>::new(2_u8.pow(6), 0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn regular_new() {
|
||||
assert_eq!(
|
||||
EventU32TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"),
|
||||
INFO_EVENT
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn small_new() {
|
||||
assert_eq!(
|
||||
EventU16TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"),
|
||||
INFO_EVENT_SMALL
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn as_largest_type() {
|
||||
let event_raw = HIGH_SEV_EVENT.raw_as_largest_type();
|
||||
assert_size(event_raw, 4);
|
||||
assert_eq!(event_raw, 0xFFFFFFFF);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn as_largest_type_for_small_event() {
|
||||
let event_raw = HIGH_SEV_EVENT_SMALL.raw_as_largest_type();
|
||||
assert_size(event_raw, 4);
|
||||
assert_eq!(event_raw, 0xFFFF);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn as_largest_group_id() {
|
||||
let group_id = HIGH_SEV_EVENT.group_id_as_largest_type();
|
||||
assert_size(group_id, 2);
|
||||
assert_eq!(group_id, 0x3FFF);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn as_largest_group_id_small_event() {
|
||||
let group_id = HIGH_SEV_EVENT_SMALL.group_id_as_largest_type();
|
||||
assert_size(group_id, 2);
|
||||
assert_eq!(group_id, 0x3F);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_to_buf() {
|
||||
let mut buf: [u8; 4] = [0; 4];
|
||||
assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok());
|
||||
let val_from_raw = u32::from_be_bytes(buf);
|
||||
assert_eq!(val_from_raw, 0xFFFFFFFF);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_to_buf_small() {
|
||||
let mut buf: [u8; 2] = [0; 2];
|
||||
assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok());
|
||||
let val_from_raw = u16::from_be_bytes(buf);
|
||||
assert_eq!(val_from_raw, 0xFFFF);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_to_buf_insufficient_buf() {
|
||||
let mut buf: [u8; 3] = [0; 3];
|
||||
let err = HIGH_SEV_EVENT.write_to_be_bytes(&mut buf);
|
||||
assert!(err.is_err());
|
||||
let err = err.unwrap_err();
|
||||
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
|
||||
assert_eq!(expected, 4);
|
||||
assert_eq!(found, 3);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_to_buf_small_insufficient_buf() {
|
||||
let mut buf: [u8; 1] = [0; 1];
|
||||
let err = HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf);
|
||||
assert!(err.is_err());
|
||||
let err = err.unwrap_err();
|
||||
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
|
||||
assert_eq!(expected, 2);
|
||||
assert_eq!(found, 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn severity_from_invalid_raw_val() {
|
||||
let invalid = 0xFF;
|
||||
assert!(Severity::try_from(invalid).is_err());
|
||||
let invalid = Severity::HIGH as u8 + 1;
|
||||
assert!(Severity::try_from(invalid).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reduction() {
|
||||
let event = EventU32TypedSev::<SeverityInfo>::const_new(1, 1);
|
||||
let raw = event.raw();
|
||||
let reduced: EventU32 = event.into();
|
||||
assert_eq!(reduced.group_id(), 1);
|
||||
assert_eq!(reduced.unique_id(), 1);
|
||||
assert_eq!(raw, reduced.raw());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn const_reducation() {
|
||||
assert_eq!(INFO_REDUCED.raw(), INFO_EVENT.raw());
|
||||
}
|
||||
}
|
516
satrs/src/executable.rs
Normal file
516
satrs/src/executable.rs
Normal file
@ -0,0 +1,516 @@
|
||||
//! Task scheduling module
|
||||
use alloc::string::String;
|
||||
use bus::BusReader;
|
||||
use std::boxed::Box;
|
||||
use std::sync::mpsc::TryRecvError;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::vec;
|
||||
use std::vec::Vec;
|
||||
use std::{io, thread};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum OpResult {
|
||||
Ok,
|
||||
TerminationRequested,
|
||||
}
|
||||
|
||||
pub enum ExecutionType {
|
||||
Infinite,
|
||||
Cycles(u32),
|
||||
OneShot,
|
||||
}
|
||||
|
||||
pub trait Executable: Send {
|
||||
type Error;
|
||||
|
||||
fn exec_type(&self) -> ExecutionType;
|
||||
fn task_name(&self) -> &'static str;
|
||||
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, Self::Error>;
|
||||
}
|
||||
|
||||
/// This function allows executing one task which implements the [Executable] trait
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `executable`: Executable task
|
||||
/// * `task_freq`: Optional frequency of task. Required for periodic and fixed cycle tasks.
|
||||
/// If [None] is passed, no sleeping will be performed.
|
||||
/// * `op_code`: Operation code which is passed to the executable task
|
||||
/// [operation call][Executable::periodic_op]
|
||||
/// * `termination`: Optional termination handler which can cancel threads with a broadcast
|
||||
pub fn exec_sched_single<T: Executable<Error = E> + Send + 'static + ?Sized, E: Send + 'static>(
|
||||
mut executable: Box<T>,
|
||||
task_freq: Option<Duration>,
|
||||
op_code: i32,
|
||||
mut termination: Option<BusReader<()>>,
|
||||
) -> Result<JoinHandle<Result<OpResult, E>>, io::Error> {
|
||||
let mut cycle_count = 0;
|
||||
thread::Builder::new()
|
||||
.name(String::from(executable.task_name()))
|
||||
.spawn(move || loop {
|
||||
if let Some(ref mut terminator) = termination {
|
||||
match terminator.try_recv() {
|
||||
Ok(_) | Err(TryRecvError::Disconnected) => {
|
||||
return Ok(OpResult::Ok);
|
||||
}
|
||||
Err(TryRecvError::Empty) => (),
|
||||
}
|
||||
}
|
||||
match executable.exec_type() {
|
||||
ExecutionType::OneShot => {
|
||||
executable.periodic_op(op_code)?;
|
||||
return Ok(OpResult::Ok);
|
||||
}
|
||||
ExecutionType::Infinite => {
|
||||
executable.periodic_op(op_code)?;
|
||||
}
|
||||
ExecutionType::Cycles(cycles) => {
|
||||
executable.periodic_op(op_code)?;
|
||||
cycle_count += 1;
|
||||
if cycle_count == cycles {
|
||||
return Ok(OpResult::Ok);
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(freq) = task_freq {
|
||||
thread::sleep(freq);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// This function allows executing multiple tasks as long as the tasks implement the
|
||||
/// [Executable] trait
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `executable_vec`: Vector of executable objects
|
||||
/// * `task_freq`: Optional frequency of task. Required for periodic and fixed cycle tasks
|
||||
/// * `op_code`: Operation code which is passed to the executable task [operation call][Executable::periodic_op]
|
||||
/// * `termination`: Optional termination handler which can cancel threads with a broadcast
|
||||
pub fn exec_sched_multi<T: Executable<Error = E> + Send + 'static + ?Sized, E: Send + 'static>(
|
||||
task_name: &'static str,
|
||||
mut executable_vec: Vec<Box<T>>,
|
||||
task_freq: Option<Duration>,
|
||||
op_code: i32,
|
||||
mut termination: Option<BusReader<()>>,
|
||||
) -> Result<JoinHandle<Result<OpResult, E>>, io::Error> {
|
||||
let mut cycle_counts = vec![0; executable_vec.len()];
|
||||
let mut removal_flags = vec![false; executable_vec.len()];
|
||||
|
||||
thread::Builder::new()
|
||||
.name(String::from(task_name))
|
||||
.spawn(move || loop {
|
||||
if let Some(ref mut terminator) = termination {
|
||||
match terminator.try_recv() {
|
||||
Ok(_) | Err(TryRecvError::Disconnected) => {
|
||||
removal_flags.iter_mut().for_each(|x| *x = true);
|
||||
}
|
||||
Err(TryRecvError::Empty) => (),
|
||||
}
|
||||
}
|
||||
for (idx, executable) in executable_vec.iter_mut().enumerate() {
|
||||
match executable.exec_type() {
|
||||
ExecutionType::OneShot => {
|
||||
executable.periodic_op(op_code)?;
|
||||
removal_flags[idx] = true;
|
||||
}
|
||||
ExecutionType::Infinite => {
|
||||
executable.periodic_op(op_code)?;
|
||||
}
|
||||
ExecutionType::Cycles(cycles) => {
|
||||
executable.periodic_op(op_code)?;
|
||||
cycle_counts[idx] += 1;
|
||||
if cycle_counts[idx] == cycles {
|
||||
removal_flags[idx] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut removal_iter = removal_flags.iter();
|
||||
executable_vec.retain(|_| !*removal_iter.next().unwrap());
|
||||
removal_iter = removal_flags.iter();
|
||||
cycle_counts.retain(|_| !*removal_iter.next().unwrap());
|
||||
removal_flags.retain(|&i| !i);
|
||||
if executable_vec.is_empty() {
|
||||
return Ok(OpResult::Ok);
|
||||
}
|
||||
let freq = task_freq.unwrap_or_else(|| panic!("No task frequency specified"));
|
||||
thread::sleep(freq);
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{exec_sched_multi, exec_sched_single, Executable, ExecutionType, OpResult};
|
||||
use bus::Bus;
|
||||
use std::boxed::Box;
|
||||
use std::error::Error;
|
||||
use std::string::{String, ToString};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
use std::vec::Vec;
|
||||
use std::{fmt, thread, vec};
|
||||
|
||||
struct TestInfo {
|
||||
exec_num: u32,
|
||||
op_code: i32,
|
||||
}
|
||||
struct OneShotTask {
|
||||
exec_num: Arc<Mutex<TestInfo>>,
|
||||
}
|
||||
struct FixedCyclesTask {
|
||||
cycles: u32,
|
||||
exec_num: Arc<Mutex<TestInfo>>,
|
||||
}
|
||||
struct PeriodicTask {
|
||||
exec_num: Arc<Mutex<TestInfo>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct ExampleError {
|
||||
kind: ErrorKind,
|
||||
}
|
||||
|
||||
/// The kind of an error that can occur.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ErrorKind {
|
||||
Generic(String, i32),
|
||||
}
|
||||
|
||||
impl ExampleError {
|
||||
fn new(msg: &str, code: i32) -> ExampleError {
|
||||
ExampleError {
|
||||
kind: ErrorKind::Generic(msg.to_string(), code),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the kind of this error.
|
||||
pub fn kind(&self) -> &ErrorKind {
|
||||
&self.kind
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ExampleError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.kind() {
|
||||
ErrorKind::Generic(str, code) => {
|
||||
write!(f, "{str} with code {code}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ExampleError {}
|
||||
|
||||
const ONE_SHOT_TASK_NAME: &str = "One Shot Task";
|
||||
|
||||
impl Executable for OneShotTask {
|
||||
type Error = ExampleError;
|
||||
|
||||
fn exec_type(&self) -> ExecutionType {
|
||||
ExecutionType::OneShot
|
||||
}
|
||||
|
||||
fn task_name(&self) -> &'static str {
|
||||
ONE_SHOT_TASK_NAME
|
||||
}
|
||||
|
||||
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, ExampleError> {
|
||||
let mut data = self.exec_num.lock().expect("Locking Mutex failed");
|
||||
data.exec_num += 1;
|
||||
data.op_code = op_code;
|
||||
std::mem::drop(data);
|
||||
if op_code >= 0 {
|
||||
Ok(OpResult::Ok)
|
||||
} else {
|
||||
Err(ExampleError::new("One Shot Task Failure", op_code))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const CYCLE_TASK_NAME: &str = "Fixed Cycles Task";
|
||||
|
||||
impl Executable for FixedCyclesTask {
|
||||
type Error = ExampleError;
|
||||
|
||||
fn exec_type(&self) -> ExecutionType {
|
||||
ExecutionType::Cycles(self.cycles)
|
||||
}
|
||||
|
||||
fn task_name(&self) -> &'static str {
|
||||
CYCLE_TASK_NAME
|
||||
}
|
||||
|
||||
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, ExampleError> {
|
||||
let mut data = self.exec_num.lock().expect("Locking Mutex failed");
|
||||
data.exec_num += 1;
|
||||
data.op_code = op_code;
|
||||
std::mem::drop(data);
|
||||
if op_code >= 0 {
|
||||
Ok(OpResult::Ok)
|
||||
} else {
|
||||
Err(ExampleError::new("Fixed Cycle Task Failure", op_code))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const PERIODIC_TASK_NAME: &str = "Periodic Task";
|
||||
|
||||
impl Executable for PeriodicTask {
|
||||
type Error = ExampleError;
|
||||
|
||||
fn exec_type(&self) -> ExecutionType {
|
||||
ExecutionType::Infinite
|
||||
}
|
||||
|
||||
fn task_name(&self) -> &'static str {
|
||||
PERIODIC_TASK_NAME
|
||||
}
|
||||
|
||||
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, ExampleError> {
|
||||
let mut data = self.exec_num.lock().expect("Locking Mutex failed");
|
||||
data.exec_num += 1;
|
||||
data.op_code = op_code;
|
||||
std::mem::drop(data);
|
||||
if op_code >= 0 {
|
||||
Ok(OpResult::Ok)
|
||||
} else {
|
||||
Err(ExampleError::new("Example Task Failure", op_code))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_one_shot() {
|
||||
let expected_op_code = 42;
|
||||
let shared = Arc::new(Mutex::new(TestInfo {
|
||||
exec_num: 0,
|
||||
op_code: 0,
|
||||
}));
|
||||
let exec_task = OneShotTask {
|
||||
exec_num: shared.clone(),
|
||||
};
|
||||
let task = Box::new(exec_task);
|
||||
let jhandle = exec_sched_single(
|
||||
task,
|
||||
Some(Duration::from_millis(100)),
|
||||
expected_op_code,
|
||||
None,
|
||||
)
|
||||
.expect("thread creation failed");
|
||||
let thread_res = jhandle.join().expect("One Shot Task failed");
|
||||
assert!(thread_res.is_ok());
|
||||
assert_eq!(thread_res.unwrap(), OpResult::Ok);
|
||||
let data = shared.lock().expect("Locking Mutex failed");
|
||||
assert_eq!(data.exec_num, 1);
|
||||
assert_eq!(data.op_code, expected_op_code);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_failed_one_shot() {
|
||||
let op_code_inducing_failure = -1;
|
||||
let shared = Arc::new(Mutex::new(TestInfo {
|
||||
exec_num: 0,
|
||||
op_code: 0,
|
||||
}));
|
||||
let exec_task = OneShotTask {
|
||||
exec_num: shared.clone(),
|
||||
};
|
||||
let task = Box::new(exec_task);
|
||||
let jhandle = exec_sched_single(
|
||||
task,
|
||||
Some(Duration::from_millis(100)),
|
||||
op_code_inducing_failure,
|
||||
None,
|
||||
)
|
||||
.expect("thread creation failed");
|
||||
let thread_res = jhandle.join().expect("One Shot Task failed");
|
||||
assert!(thread_res.is_err());
|
||||
let error = thread_res.unwrap_err();
|
||||
let err = error.kind();
|
||||
assert!(matches!(err, &ErrorKind::Generic { .. }));
|
||||
match err {
|
||||
ErrorKind::Generic(str, op_code) => {
|
||||
assert_eq!(str, &String::from("One Shot Task Failure"));
|
||||
assert_eq!(op_code, &op_code_inducing_failure);
|
||||
}
|
||||
}
|
||||
let error_display = error.to_string();
|
||||
assert_eq!(error_display, "One Shot Task Failure with code -1");
|
||||
let data = shared.lock().expect("Locking Mutex failed");
|
||||
assert_eq!(data.exec_num, 1);
|
||||
assert_eq!(data.op_code, op_code_inducing_failure);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_multi_one_shot() {
|
||||
let expected_op_code = 43;
|
||||
let shared = Arc::new(Mutex::new(TestInfo {
|
||||
exec_num: 0,
|
||||
op_code: 0,
|
||||
}));
|
||||
let exec_task_0 = OneShotTask {
|
||||
exec_num: shared.clone(),
|
||||
};
|
||||
let exec_task_1 = OneShotTask {
|
||||
exec_num: shared.clone(),
|
||||
};
|
||||
let task_vec = vec![Box::new(exec_task_0), Box::new(exec_task_1)];
|
||||
for task in task_vec.iter() {
|
||||
assert_eq!(task.task_name(), ONE_SHOT_TASK_NAME);
|
||||
}
|
||||
let jhandle = exec_sched_multi(
|
||||
"multi-task-name",
|
||||
task_vec,
|
||||
Some(Duration::from_millis(100)),
|
||||
expected_op_code,
|
||||
None,
|
||||
)
|
||||
.expect("thread creation failed");
|
||||
let thread_res = jhandle.join().expect("One Shot Task failed");
|
||||
assert!(thread_res.is_ok());
|
||||
assert_eq!(thread_res.unwrap(), OpResult::Ok);
|
||||
let data = shared.lock().expect("Locking Mutex failed");
|
||||
assert_eq!(data.exec_num, 2);
|
||||
assert_eq!(data.op_code, expected_op_code);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cycles_single() {
|
||||
let expected_op_code = 44;
|
||||
let shared = Arc::new(Mutex::new(TestInfo {
|
||||
exec_num: 0,
|
||||
op_code: 0,
|
||||
}));
|
||||
let cycled_task = Box::new(FixedCyclesTask {
|
||||
exec_num: shared.clone(),
|
||||
cycles: 1,
|
||||
});
|
||||
assert_eq!(cycled_task.task_name(), CYCLE_TASK_NAME);
|
||||
let jh = exec_sched_single(
|
||||
cycled_task,
|
||||
Some(Duration::from_millis(100)),
|
||||
expected_op_code,
|
||||
None,
|
||||
)
|
||||
.expect("thread creation failed");
|
||||
let thread_res = jh.join().expect("Cycles Task failed");
|
||||
assert!(thread_res.is_ok());
|
||||
let data = shared.lock().expect("Locking Mutex failed");
|
||||
assert_eq!(thread_res.unwrap(), OpResult::Ok);
|
||||
assert_eq!(data.exec_num, 1);
|
||||
assert_eq!(data.op_code, expected_op_code);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_and_cycles() {
|
||||
let expected_op_code = 50;
|
||||
let shared = Arc::new(Mutex::new(TestInfo {
|
||||
exec_num: 0,
|
||||
op_code: 0,
|
||||
}));
|
||||
let one_shot_task = Box::new(OneShotTask {
|
||||
exec_num: shared.clone(),
|
||||
});
|
||||
let cycled_task_0 = Box::new(FixedCyclesTask {
|
||||
exec_num: shared.clone(),
|
||||
cycles: 1,
|
||||
});
|
||||
let cycled_task_1 = Box::new(FixedCyclesTask {
|
||||
exec_num: shared.clone(),
|
||||
cycles: 1,
|
||||
});
|
||||
assert_eq!(cycled_task_0.task_name(), CYCLE_TASK_NAME);
|
||||
assert_eq!(one_shot_task.task_name(), ONE_SHOT_TASK_NAME);
|
||||
let task_vec: Vec<Box<dyn Executable<Error = ExampleError>>> =
|
||||
vec![one_shot_task, cycled_task_0, cycled_task_1];
|
||||
let jh = exec_sched_multi(
|
||||
"multi-task-name",
|
||||
task_vec,
|
||||
Some(Duration::from_millis(100)),
|
||||
expected_op_code,
|
||||
None,
|
||||
)
|
||||
.expect("thread creation failed");
|
||||
let thread_res = jh.join().expect("Cycles Task failed");
|
||||
assert!(thread_res.is_ok());
|
||||
let data = shared.lock().expect("Locking Mutex failed");
|
||||
assert_eq!(thread_res.unwrap(), OpResult::Ok);
|
||||
assert_eq!(data.exec_num, 3);
|
||||
assert_eq!(data.op_code, expected_op_code);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_periodic_single() {
|
||||
let mut terminator = Bus::new(5);
|
||||
let expected_op_code = 45;
|
||||
let shared = Arc::new(Mutex::new(TestInfo {
|
||||
exec_num: 0,
|
||||
op_code: 0,
|
||||
}));
|
||||
let periodic_task = Box::new(PeriodicTask {
|
||||
exec_num: shared.clone(),
|
||||
});
|
||||
assert_eq!(periodic_task.task_name(), PERIODIC_TASK_NAME);
|
||||
let jh = exec_sched_single(
|
||||
periodic_task,
|
||||
Some(Duration::from_millis(20)),
|
||||
expected_op_code,
|
||||
Some(terminator.add_rx()),
|
||||
)
|
||||
.expect("thread creation failed");
|
||||
thread::sleep(Duration::from_millis(40));
|
||||
terminator.broadcast(());
|
||||
let thread_res = jh.join().expect("Periodic Task failed");
|
||||
assert!(thread_res.is_ok());
|
||||
let data = shared.lock().expect("Locking Mutex failed");
|
||||
assert_eq!(thread_res.unwrap(), OpResult::Ok);
|
||||
let range = 2..4;
|
||||
assert!(range.contains(&data.exec_num));
|
||||
assert_eq!(data.op_code, expected_op_code);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_periodic_multi() {
|
||||
let mut terminator = Bus::new(5);
|
||||
let expected_op_code = 46;
|
||||
let shared = Arc::new(Mutex::new(TestInfo {
|
||||
exec_num: 0,
|
||||
op_code: 0,
|
||||
}));
|
||||
let cycled_task = Box::new(FixedCyclesTask {
|
||||
exec_num: shared.clone(),
|
||||
cycles: 1,
|
||||
});
|
||||
let periodic_task_0 = Box::new(PeriodicTask {
|
||||
exec_num: shared.clone(),
|
||||
});
|
||||
let periodic_task_1 = Box::new(PeriodicTask {
|
||||
exec_num: shared.clone(),
|
||||
});
|
||||
assert_eq!(periodic_task_0.task_name(), PERIODIC_TASK_NAME);
|
||||
assert_eq!(periodic_task_1.task_name(), PERIODIC_TASK_NAME);
|
||||
let task_vec: Vec<Box<dyn Executable<Error = ExampleError>>> =
|
||||
vec![cycled_task, periodic_task_0, periodic_task_1];
|
||||
let jh = exec_sched_multi(
|
||||
"multi-task-name",
|
||||
task_vec,
|
||||
Some(Duration::from_millis(20)),
|
||||
expected_op_code,
|
||||
Some(terminator.add_rx()),
|
||||
)
|
||||
.expect("thread creation failed");
|
||||
thread::sleep(Duration::from_millis(60));
|
||||
terminator.broadcast(());
|
||||
let thread_res = jh.join().expect("Periodic Task failed");
|
||||
assert!(thread_res.is_ok());
|
||||
let data = shared.lock().expect("Locking Mutex failed");
|
||||
assert_eq!(thread_res.unwrap(), OpResult::Ok);
|
||||
let range = 7..11;
|
||||
assert!(range.contains(&data.exec_num));
|
||||
assert_eq!(data.op_code, expected_op_code);
|
||||
}
|
||||
}
|
4
satrs/src/hal/mod.rs
Normal file
4
satrs/src/hal/mod.rs
Normal file
@ -0,0 +1,4 @@
|
||||
//! # Hardware Abstraction Layer module
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub mod std;
|
6
satrs/src/hal/std/mod.rs
Normal file
6
satrs/src/hal/std/mod.rs
Normal file
@ -0,0 +1,6 @@
|
||||
//! Helper modules intended to be used on systems with a full [std] runtime.
|
||||
pub mod tcp_server;
|
||||
pub mod udp_server;
|
||||
|
||||
mod tcp_cobs_server;
|
||||
mod tcp_spacepackets_server;
|
379
satrs/src/hal/std/tcp_cobs_server.rs
Normal file
379
satrs/src/hal/std/tcp_cobs_server.rs
Normal file
@ -0,0 +1,379 @@
|
||||
use alloc::vec;
|
||||
use cobs::encode;
|
||||
use delegate::delegate;
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::TcpListener;
|
||||
use std::net::TcpStream;
|
||||
use std::vec::Vec;
|
||||
|
||||
use crate::encoding::parse_buffer_for_cobs_encoded_packets;
|
||||
use crate::tmtc::ReceivesTc;
|
||||
use crate::tmtc::TmPacketSource;
|
||||
|
||||
use crate::hal::std::tcp_server::{
|
||||
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
|
||||
};
|
||||
|
||||
/// Concrete [TcpTcParser] implementation for the [TcpTmtcInCobsServer].
|
||||
#[derive(Default)]
|
||||
pub struct CobsTcParser {}
|
||||
|
||||
impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for CobsTcParser {
|
||||
fn handle_tc_parsing(
|
||||
&mut self,
|
||||
tc_buffer: &mut [u8],
|
||||
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
current_write_idx: usize,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<(), TcpTmtcError<TmError, TcError>> {
|
||||
conn_result.num_received_tcs += parse_buffer_for_cobs_encoded_packets(
|
||||
&mut tc_buffer[..current_write_idx],
|
||||
tc_receiver.upcast_mut(),
|
||||
next_write_idx,
|
||||
)
|
||||
.map_err(|e| TcpTmtcError::TcError(e))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Concrete [TcpTmSender] implementation for the [TcpTmtcInCobsServer].
|
||||
pub struct CobsTmSender {
|
||||
tm_encoding_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CobsTmSender {
|
||||
fn new(tm_buffer_size: usize) -> Self {
|
||||
Self {
|
||||
// The buffer should be large enough to hold the maximum expected TM size encoded with
|
||||
// COBS.
|
||||
tm_encoding_buffer: vec![0; cobs::max_encoding_length(tm_buffer_size)],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
|
||||
fn handle_tm_sending(
|
||||
&mut self,
|
||||
tm_buffer: &mut [u8],
|
||||
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
stream: &mut TcpStream,
|
||||
) -> Result<bool, TcpTmtcError<TmError, TcError>> {
|
||||
let mut tm_was_sent = false;
|
||||
loop {
|
||||
// Write TM until TM source is exhausted. For now, there is no limit for the amount
|
||||
// of TM written this way.
|
||||
let read_tm_len = tm_source
|
||||
.retrieve_packet(tm_buffer)
|
||||
.map_err(|e| TcpTmtcError::TmError(e))?;
|
||||
|
||||
if read_tm_len == 0 {
|
||||
return Ok(tm_was_sent);
|
||||
}
|
||||
tm_was_sent = true;
|
||||
conn_result.num_sent_tms += 1;
|
||||
|
||||
// Encode into COBS and sent to client.
|
||||
let mut current_idx = 0;
|
||||
self.tm_encoding_buffer[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
current_idx += encode(
|
||||
&tm_buffer[..read_tm_len],
|
||||
&mut self.tm_encoding_buffer[current_idx..],
|
||||
);
|
||||
self.tm_encoding_buffer[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
stream.write_all(&self.tm_encoding_buffer[..current_idx])?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// TCP TMTC server implementation for exchange of generic TMTC packets which are framed with the
|
||||
/// [COBS protocol](https://en.wikipedia.org/wiki/Consistent_Overhead_Byte_Stuffing).
|
||||
///
|
||||
/// Telemetry will be encoded with the COBS protocol using [cobs::encode] in addition to being
|
||||
/// wrapped with the sentinel value 0 as the packet delimiter as well before being sent back to
|
||||
/// the client. Please note that the server will send as much data as it can retrieve from the
|
||||
/// [TmPacketSource] in its current implementation.
|
||||
///
|
||||
/// Using a framing protocol like COBS imposes minimal restrictions on the type of TMTC data
|
||||
/// exchanged while also allowing packets with flexible size and a reliable way to reconstruct full
|
||||
/// packets even from a data stream which is split up. The server wil use the
|
||||
/// [parse_buffer_for_cobs_encoded_packets] function to parse for packets and pass them to a
|
||||
/// generic TC receiver. The user can use [crate::encoding::encode_packet_with_cobs] to encode
|
||||
/// telecommands sent to the server.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/tcp_servers.rs)
|
||||
/// test also serves as the example application for this module.
|
||||
pub struct TcpTmtcInCobsServer<
|
||||
TmError,
|
||||
TcError: 'static,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
> {
|
||||
generic_server:
|
||||
TcpTmtcGenericServer<TmError, TcError, TmSource, TcReceiver, CobsTmSender, CobsTcParser>,
|
||||
}
|
||||
|
||||
impl<
|
||||
TmError: 'static,
|
||||
TcError: 'static,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
> TcpTmtcInCobsServer<TmError, TcError, TmSource, TcReceiver>
|
||||
{
|
||||
/// Create a new TCP TMTC server which exchanges TMTC packets encoded with
|
||||
/// [COBS protocol](https://en.wikipedia.org/wiki/Consistent_Overhead_Byte_Stuffing).
|
||||
///
|
||||
/// ## Parameter
|
||||
///
|
||||
/// * `cfg` - Configuration of the server.
|
||||
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
|
||||
/// then sent back to the client.
|
||||
/// * `tc_receiver` - Any received telecommands which were decoded successfully will be
|
||||
/// forwarded to this TC receiver.
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tm_source: TmSource,
|
||||
tc_receiver: TcReceiver,
|
||||
) -> Result<Self, std::io::Error> {
|
||||
Ok(Self {
|
||||
generic_server: TcpTmtcGenericServer::new(
|
||||
cfg,
|
||||
CobsTcParser::default(),
|
||||
CobsTmSender::new(cfg.tm_buffer_size),
|
||||
tm_source,
|
||||
tc_receiver,
|
||||
)?,
|
||||
})
|
||||
}
|
||||
|
||||
delegate! {
|
||||
to self.generic_server {
|
||||
pub fn listener(&mut self) -> &mut TcpListener;
|
||||
|
||||
/// Can be used to retrieve the local assigned address of the TCP server. This is especially
|
||||
/// useful if using the port number 0 for OS auto-assignment.
|
||||
pub fn local_addr(&self) -> std::io::Result<SocketAddr>;
|
||||
|
||||
/// Delegation to the [TcpTmtcGenericServer::handle_next_connection] call.
|
||||
pub fn handle_next_connection(
|
||||
&mut self,
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use core::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
|
||||
thread,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
encoding::tests::{INVERTED_PACKET, SIMPLE_PACKET},
|
||||
hal::std::tcp_server::{
|
||||
tests::{SyncTcCacher, SyncTmSource},
|
||||
ServerConfig,
|
||||
},
|
||||
};
|
||||
use alloc::sync::Arc;
|
||||
use cobs::encode;
|
||||
|
||||
use super::TcpTmtcInCobsServer;
|
||||
|
||||
fn encode_simple_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encode_packet(&SIMPLE_PACKET, encoded_buf, current_idx)
|
||||
}
|
||||
|
||||
fn encode_inverted_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encode_packet(&INVERTED_PACKET, encoded_buf, current_idx)
|
||||
}
|
||||
|
||||
fn encode_packet(packet: &[u8], encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encoded_buf[*current_idx] = 0;
|
||||
*current_idx += 1;
|
||||
*current_idx += encode(packet, &mut encoded_buf[*current_idx..]);
|
||||
encoded_buf[*current_idx] = 0;
|
||||
*current_idx += 1;
|
||||
}
|
||||
|
||||
fn generic_tmtc_server(
|
||||
addr: &SocketAddr,
|
||||
tc_receiver: SyncTcCacher,
|
||||
tm_source: SyncTmSource,
|
||||
) -> TcpTmtcInCobsServer<(), (), SyncTmSource, SyncTcCacher> {
|
||||
TcpTmtcInCobsServer::new(
|
||||
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024),
|
||||
tm_source,
|
||||
tc_receiver,
|
||||
)
|
||||
.expect("TCP server generation failed")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_basic_no_tm() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let tm_source = SyncTmSource::default();
|
||||
let mut tcp_server = generic_tmtc_server(&auto_port_addr, tc_receiver.clone(), tm_source);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 1);
|
||||
assert_eq!(conn_result.num_sent_tms, 0);
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
// Send TC to server now.
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.write_all(&encoded_buf[..current_idx])
|
||||
.expect("writing to TCP server failed");
|
||||
drop(stream);
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that the packet was received and decoded successfully.
|
||||
let mut tc_queue = tc_receiver
|
||||
.tc_queue
|
||||
.lock()
|
||||
.expect("locking tc queue failed");
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET);
|
||||
drop(tc_queue);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_basic_multi_tm_multi_tc() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
tm_source.add_tm(&INVERTED_PACKET);
|
||||
tm_source.add_tm(&SIMPLE_PACKET);
|
||||
let mut tcp_server =
|
||||
generic_tmtc_server(&auto_port_addr, tc_receiver.clone(), tm_source.clone());
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 2, "Not enough TCs received");
|
||||
assert_eq!(conn_result.num_sent_tms, 2, "Not enough TMs received");
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
// Send TC to server now.
|
||||
let mut encoded_buf: [u8; 32] = [0; 32];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
encode_inverted_packet(&mut encoded_buf, &mut current_idx);
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.set_read_timeout(Some(Duration::from_millis(10)))
|
||||
.expect("setting reas timeout failed");
|
||||
stream
|
||||
.write_all(&encoded_buf[..current_idx])
|
||||
.expect("writing to TCP server failed");
|
||||
// Done with writing.
|
||||
stream
|
||||
.shutdown(std::net::Shutdown::Write)
|
||||
.expect("shutting down write failed");
|
||||
let mut read_buf: [u8; 16] = [0; 16];
|
||||
let mut read_len_total = 0;
|
||||
// Timeout ensures this does not block forever.
|
||||
while read_len_total < 16 {
|
||||
let read_len = stream.read(&mut read_buf).expect("read failed");
|
||||
read_len_total += read_len;
|
||||
// Read until full expected size is available.
|
||||
if read_len == 16 {
|
||||
// Read first TM packet.
|
||||
current_idx = 0;
|
||||
assert_eq!(read_len, 16);
|
||||
assert_eq!(read_buf[0], 0);
|
||||
current_idx += 1;
|
||||
let mut dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
|
||||
.expect("COBS decoding failed");
|
||||
assert_eq!(dec_report.dst_used, 5);
|
||||
// Skip first sentinel byte.
|
||||
assert_eq!(
|
||||
&read_buf[current_idx..current_idx + INVERTED_PACKET.len()],
|
||||
&INVERTED_PACKET
|
||||
);
|
||||
current_idx += dec_report.src_used;
|
||||
// End sentinel.
|
||||
assert_eq!(read_buf[current_idx], 0, "invalid sentinel end byte");
|
||||
current_idx += 1;
|
||||
|
||||
// Read second TM packet.
|
||||
assert_eq!(read_buf[current_idx], 0);
|
||||
current_idx += 1;
|
||||
dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
|
||||
.expect("COBS decoding failed");
|
||||
assert_eq!(dec_report.dst_used, 5);
|
||||
// Skip first sentinel byte.
|
||||
assert_eq!(
|
||||
&read_buf[current_idx..current_idx + SIMPLE_PACKET.len()],
|
||||
&SIMPLE_PACKET
|
||||
);
|
||||
current_idx += dec_report.src_used;
|
||||
// End sentinel.
|
||||
assert_eq!(read_buf[current_idx], 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
drop(stream);
|
||||
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that the packet was received and decoded successfully.
|
||||
let mut tc_queue = tc_receiver
|
||||
.tc_queue
|
||||
.lock()
|
||||
.expect("locking tc queue failed");
|
||||
assert_eq!(tc_queue.len(), 2);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &INVERTED_PACKET);
|
||||
drop(tc_queue);
|
||||
}
|
||||
}
|
359
satrs/src/hal/std/tcp_server.rs
Normal file
359
satrs/src/hal/std/tcp_server.rs
Normal file
@ -0,0 +1,359 @@
|
||||
//! Generic TCP TMTC servers with different TMTC format flavours.
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use core::time::Duration;
|
||||
use socket2::{Domain, Socket, Type};
|
||||
use std::io::Read;
|
||||
use std::net::TcpListener;
|
||||
use std::net::{SocketAddr, TcpStream};
|
||||
use std::thread;
|
||||
|
||||
use crate::tmtc::{ReceivesTc, TmPacketSource};
|
||||
use thiserror::Error;
|
||||
|
||||
// Re-export the TMTC in COBS server.
|
||||
pub use crate::hal::std::tcp_cobs_server::{CobsTcParser, CobsTmSender, TcpTmtcInCobsServer};
|
||||
pub use crate::hal::std::tcp_spacepackets_server::{
|
||||
SpacepacketsTcParser, SpacepacketsTmSender, TcpSpacepacketsServer,
|
||||
};
|
||||
|
||||
/// Configuration struct for the generic TCP TMTC server
|
||||
///
|
||||
/// ## Parameters
|
||||
///
|
||||
/// * `addr` - Address of the TCP server.
|
||||
/// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or
|
||||
/// no TM needs to be sent, the TCP server will delay for the specified amount of time
|
||||
/// to reduce CPU load.
|
||||
/// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [TmPacketSource] and
|
||||
/// encoding of that data. This buffer should at large enough to hold the maximum expected
|
||||
/// TM size read from the packet source.
|
||||
/// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from
|
||||
/// the client. It is recommended to make this buffer larger to allow reading multiple
|
||||
/// consecutive packets as well, for example by using common buffer sizes like 4096 or 8192
|
||||
/// byte. The buffer should at the very least be large enough to hold the maximum expected
|
||||
/// telecommand size.
|
||||
/// * `reuse_addr` - Can be used to set the `SO_REUSEADDR` option on the raw socket. This is
|
||||
/// especially useful if the address and port are static for the server. Set to false by
|
||||
/// default.
|
||||
/// * `reuse_port` - Can be used to set the `SO_REUSEPORT` option on the raw socket. This is
|
||||
/// especially useful if the address and port are static for the server. Set to false by
|
||||
/// default.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct ServerConfig {
|
||||
pub addr: SocketAddr,
|
||||
pub inner_loop_delay: Duration,
|
||||
pub tm_buffer_size: usize,
|
||||
pub tc_buffer_size: usize,
|
||||
pub reuse_addr: bool,
|
||||
pub reuse_port: bool,
|
||||
}
|
||||
|
||||
impl ServerConfig {
|
||||
pub fn new(
|
||||
addr: SocketAddr,
|
||||
inner_loop_delay: Duration,
|
||||
tm_buffer_size: usize,
|
||||
tc_buffer_size: usize,
|
||||
) -> Self {
|
||||
Self {
|
||||
addr,
|
||||
inner_loop_delay,
|
||||
tm_buffer_size,
|
||||
tc_buffer_size,
|
||||
reuse_addr: false,
|
||||
reuse_port: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum TcpTmtcError<TmError, TcError> {
|
||||
#[error("TM retrieval error: {0}")]
|
||||
TmError(TmError),
|
||||
#[error("TC retrieval error: {0}")]
|
||||
TcError(TcError),
|
||||
#[error("io error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
|
||||
/// Result of one connection attempt. Contains the client address if a connection was established,
|
||||
/// in addition to the number of telecommands and telemetry packets exchanged.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ConnectionResult {
|
||||
pub addr: Option<SocketAddr>,
|
||||
pub num_received_tcs: u32,
|
||||
pub num_sent_tms: u32,
|
||||
}
|
||||
|
||||
/// Generic parser abstraction for an object which can parse for telecommands given a raw
|
||||
/// bytestream received from a TCP socket and send them to a generic [ReceivesTc] telecommand
|
||||
/// receiver. This allows different encoding schemes for telecommands.
|
||||
pub trait TcpTcParser<TmError, TcError> {
|
||||
fn handle_tc_parsing(
|
||||
&mut self,
|
||||
tc_buffer: &mut [u8],
|
||||
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
current_write_idx: usize,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<(), TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
|
||||
/// Generic sender abstraction for an object which can pull telemetry from a given TM source
|
||||
/// using a [TmPacketSource] and then send them back to a client using a given [TcpStream].
|
||||
/// The concrete implementation can also perform any encoding steps which are necessary before
|
||||
/// sending back the data to a client.
|
||||
pub trait TcpTmSender<TmError, TcError> {
|
||||
fn handle_tm_sending(
|
||||
&mut self,
|
||||
tm_buffer: &mut [u8],
|
||||
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
stream: &mut TcpStream,
|
||||
) -> Result<bool, TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
|
||||
/// TCP TMTC server implementation for exchange of generic TMTC packets in a generic way which
|
||||
/// stays agnostic to the encoding scheme and format used for both telecommands and telemetry.
|
||||
///
|
||||
/// This server implements a generic TMTC handling logic and allows modifying its behaviour
|
||||
/// through the following 4 core abstractions:
|
||||
///
|
||||
/// 1. [TcpTcParser] to parse for telecommands from the raw bytestream received from a client.
|
||||
/// 2. Parsed telecommands will be sent to the [ReceivesTc] telecommand receiver.
|
||||
/// 3. [TcpTmSender] to send telemetry pulled from a TM source back to the client.
|
||||
/// 4. [TmPacketSource] as a generic TM source used by the [TcpTmSender].
|
||||
///
|
||||
/// It is possible to specify custom abstractions to build a dedicated TCP TMTC server without
|
||||
/// having to re-implement common logic.
|
||||
///
|
||||
/// Currently, this framework offers the following concrete implementations:
|
||||
///
|
||||
/// 1. [TcpTmtcInCobsServer] to exchange TMTC wrapped inside the COBS framing protocol.
|
||||
pub struct TcpTmtcGenericServer<
|
||||
TmError,
|
||||
TcError,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
TmSender: TcpTmSender<TmError, TcError>,
|
||||
TcParser: TcpTcParser<TmError, TcError>,
|
||||
> {
|
||||
pub(crate) listener: TcpListener,
|
||||
pub(crate) inner_loop_delay: Duration,
|
||||
pub(crate) tm_source: TmSource,
|
||||
pub(crate) tm_buffer: Vec<u8>,
|
||||
pub(crate) tc_receiver: TcReceiver,
|
||||
pub(crate) tc_buffer: Vec<u8>,
|
||||
tc_handler: TcParser,
|
||||
tm_handler: TmSender,
|
||||
}
|
||||
|
||||
impl<
|
||||
TmError: 'static,
|
||||
TcError: 'static,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
TmSender: TcpTmSender<TmError, TcError>,
|
||||
TcParser: TcpTcParser<TmError, TcError>,
|
||||
> TcpTmtcGenericServer<TmError, TcError, TmSource, TcReceiver, TmSender, TcParser>
|
||||
{
|
||||
/// Create a new generic TMTC server instance.
|
||||
///
|
||||
/// ## Parameter
|
||||
///
|
||||
/// * `cfg` - Configuration of the server.
|
||||
/// * `tc_parser` - Parser which extracts telecommands from the raw bytestream received from
|
||||
/// the client.
|
||||
/// * `tm_sender` - Sends back telemetry to the client using the specified TM source.
|
||||
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
|
||||
/// then sent back to the client.
|
||||
/// * `tc_receiver` - Any received telecommand which was decoded successfully will be forwarded
|
||||
/// to this TC receiver.
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tc_parser: TcParser,
|
||||
tm_sender: TmSender,
|
||||
tm_source: TmSource,
|
||||
tc_receiver: TcReceiver,
|
||||
) -> Result<Self, std::io::Error> {
|
||||
// Create a TCP listener bound to two addresses.
|
||||
let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?;
|
||||
socket.set_reuse_address(cfg.reuse_addr)?;
|
||||
#[cfg(unix)]
|
||||
socket.set_reuse_port(cfg.reuse_port)?;
|
||||
let addr = (cfg.addr).into();
|
||||
socket.bind(&addr)?;
|
||||
socket.listen(128)?;
|
||||
Ok(Self {
|
||||
tc_handler: tc_parser,
|
||||
tm_handler: tm_sender,
|
||||
listener: socket.into(),
|
||||
inner_loop_delay: cfg.inner_loop_delay,
|
||||
tm_source,
|
||||
tm_buffer: vec![0; cfg.tm_buffer_size],
|
||||
tc_receiver,
|
||||
tc_buffer: vec![0; cfg.tc_buffer_size],
|
||||
})
|
||||
}
|
||||
|
||||
/// Retrieve the internal [TcpListener] class.
|
||||
pub fn listener(&mut self) -> &mut TcpListener {
|
||||
&mut self.listener
|
||||
}
|
||||
|
||||
/// Can be used to retrieve the local assigned address of the TCP server. This is especially
|
||||
/// useful if using the port number 0 for OS auto-assignment.
|
||||
pub fn local_addr(&self) -> std::io::Result<SocketAddr> {
|
||||
self.listener.local_addr()
|
||||
}
|
||||
|
||||
/// This call is used to handle the next connection to a client. Right now, it performs
|
||||
/// the following steps:
|
||||
///
|
||||
/// 1. It calls the [std::net::TcpListener::accept] method internally using the blocking API
|
||||
/// until a client connects.
|
||||
/// 2. It reads all the telecommands from the client and parses all received data using the
|
||||
/// user specified [TcpTcParser].
|
||||
/// 3. After reading and parsing all telecommands, it sends back all telemetry using the
|
||||
/// user specified [TcpTmSender].
|
||||
///
|
||||
/// The server will delay for a user-specified period if the client connects to the server
|
||||
/// for prolonged periods and there is no traffic for the server. This is the case if the
|
||||
/// client does not send any telecommands and no telemetry needs to be sent back to the client.
|
||||
pub fn handle_next_connection(
|
||||
&mut self,
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>> {
|
||||
let mut connection_result = ConnectionResult::default();
|
||||
let mut current_write_idx;
|
||||
let mut next_write_idx = 0;
|
||||
let (mut stream, addr) = self.listener.accept()?;
|
||||
stream.set_nonblocking(true)?;
|
||||
connection_result.addr = Some(addr);
|
||||
current_write_idx = next_write_idx;
|
||||
loop {
|
||||
let read_result = stream.read(&mut self.tc_buffer[current_write_idx..]);
|
||||
match read_result {
|
||||
Ok(0) => {
|
||||
// Connection closed by client. If any TC was read, parse for complete packets.
|
||||
// After that, break the outer loop.
|
||||
if current_write_idx > 0 {
|
||||
self.tc_handler.handle_tc_parsing(
|
||||
&mut self.tc_buffer,
|
||||
&mut self.tc_receiver,
|
||||
&mut connection_result,
|
||||
current_write_idx,
|
||||
&mut next_write_idx,
|
||||
)?;
|
||||
}
|
||||
break;
|
||||
}
|
||||
Ok(read_len) => {
|
||||
current_write_idx += read_len;
|
||||
// TC buffer is full, we must parse for complete packets now.
|
||||
if current_write_idx == self.tc_buffer.capacity() {
|
||||
self.tc_handler.handle_tc_parsing(
|
||||
&mut self.tc_buffer,
|
||||
&mut self.tc_receiver,
|
||||
&mut connection_result,
|
||||
current_write_idx,
|
||||
&mut next_write_idx,
|
||||
)?;
|
||||
current_write_idx = next_write_idx;
|
||||
}
|
||||
}
|
||||
Err(e) => match e.kind() {
|
||||
// As per [TcpStream::set_read_timeout] documentation, this should work for
|
||||
// both UNIX and Windows.
|
||||
std::io::ErrorKind::WouldBlock | std::io::ErrorKind::TimedOut => {
|
||||
self.tc_handler.handle_tc_parsing(
|
||||
&mut self.tc_buffer,
|
||||
&mut self.tc_receiver,
|
||||
&mut connection_result,
|
||||
current_write_idx,
|
||||
&mut next_write_idx,
|
||||
)?;
|
||||
current_write_idx = next_write_idx;
|
||||
|
||||
if !self.tm_handler.handle_tm_sending(
|
||||
&mut self.tm_buffer,
|
||||
&mut self.tm_source,
|
||||
&mut connection_result,
|
||||
&mut stream,
|
||||
)? {
|
||||
// No TC read, no TM was sent, but the client has not disconnected.
|
||||
// Perform an inner delay to avoid burning CPU time.
|
||||
thread::sleep(self.inner_loop_delay);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(TcpTmtcError::Io(e));
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
self.tm_handler.handle_tm_sending(
|
||||
&mut self.tm_buffer,
|
||||
&mut self.tm_source,
|
||||
&mut connection_result,
|
||||
&mut stream,
|
||||
)?;
|
||||
Ok(connection_result)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use std::sync::Mutex;
|
||||
|
||||
use alloc::{collections::VecDeque, sync::Arc, vec::Vec};
|
||||
|
||||
use crate::tmtc::{ReceivesTcCore, TmPacketSourceCore};
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct SyncTcCacher {
|
||||
pub(crate) tc_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
impl ReceivesTcCore for SyncTcCacher {
|
||||
type Error = ();
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut tc_queue = self.tc_queue.lock().expect("tc forwarder failed");
|
||||
tc_queue.push_back(tc_raw.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct SyncTmSource {
|
||||
tm_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
|
||||
impl SyncTmSource {
|
||||
pub(crate) fn add_tm(&mut self, tm: &[u8]) {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failec");
|
||||
tm_queue.push_back(tm.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
impl TmPacketSourceCore for SyncTmSource {
|
||||
type Error = ();
|
||||
|
||||
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failed");
|
||||
if !tm_queue.is_empty() {
|
||||
let next_vec = tm_queue.front().unwrap();
|
||||
if buffer.len() < next_vec.len() {
|
||||
panic!(
|
||||
"provided buffer too small, must be at least {} bytes",
|
||||
next_vec.len()
|
||||
);
|
||||
}
|
||||
let next_vec = tm_queue.pop_front().unwrap();
|
||||
buffer[0..next_vec.len()].copy_from_slice(&next_vec);
|
||||
return Ok(next_vec.len());
|
||||
}
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
}
|
362
satrs/src/hal/std/tcp_spacepackets_server.rs
Normal file
362
satrs/src/hal/std/tcp_spacepackets_server.rs
Normal file
@ -0,0 +1,362 @@
|
||||
use delegate::delegate;
|
||||
use std::{
|
||||
io::Write,
|
||||
net::{SocketAddr, TcpListener, TcpStream},
|
||||
};
|
||||
|
||||
use alloc::boxed::Box;
|
||||
|
||||
use crate::{
|
||||
encoding::{ccsds::PacketIdLookup, parse_buffer_for_ccsds_space_packets},
|
||||
tmtc::{ReceivesTc, TmPacketSource},
|
||||
};
|
||||
|
||||
use super::tcp_server::{
|
||||
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
|
||||
};
|
||||
|
||||
/// Concrete [TcpTcParser] implementation for the [TcpSpacepacketsServer].
|
||||
pub struct SpacepacketsTcParser {
|
||||
packet_id_lookup: Box<dyn PacketIdLookup + Send>,
|
||||
}
|
||||
|
||||
impl SpacepacketsTcParser {
|
||||
pub fn new(packet_id_lookup: Box<dyn PacketIdLookup + Send>) -> Self {
|
||||
Self { packet_id_lookup }
|
||||
}
|
||||
}
|
||||
|
||||
impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for SpacepacketsTcParser {
|
||||
fn handle_tc_parsing(
|
||||
&mut self,
|
||||
tc_buffer: &mut [u8],
|
||||
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
current_write_idx: usize,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<(), TcpTmtcError<TmError, TcError>> {
|
||||
// Reader vec full, need to parse for packets.
|
||||
conn_result.num_received_tcs += parse_buffer_for_ccsds_space_packets(
|
||||
&mut tc_buffer[..current_write_idx],
|
||||
self.packet_id_lookup.as_ref(),
|
||||
tc_receiver.upcast_mut(),
|
||||
next_write_idx,
|
||||
)
|
||||
.map_err(|e| TcpTmtcError::TcError(e))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Concrete [TcpTmSender] implementation for the [TcpSpacepacketsServer].
|
||||
#[derive(Default)]
|
||||
pub struct SpacepacketsTmSender {}
|
||||
|
||||
impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
|
||||
fn handle_tm_sending(
|
||||
&mut self,
|
||||
tm_buffer: &mut [u8],
|
||||
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
stream: &mut TcpStream,
|
||||
) -> Result<bool, TcpTmtcError<TmError, TcError>> {
|
||||
let mut tm_was_sent = false;
|
||||
loop {
|
||||
// Write TM until TM source is exhausted. For now, there is no limit for the amount
|
||||
// of TM written this way.
|
||||
let read_tm_len = tm_source
|
||||
.retrieve_packet(tm_buffer)
|
||||
.map_err(|e| TcpTmtcError::TmError(e))?;
|
||||
|
||||
if read_tm_len == 0 {
|
||||
return Ok(tm_was_sent);
|
||||
}
|
||||
tm_was_sent = true;
|
||||
conn_result.num_sent_tms += 1;
|
||||
|
||||
stream.write_all(&tm_buffer[..read_tm_len])?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// TCP TMTC server implementation for exchange of tightly stuffed
|
||||
/// [CCSDS space packets](https://public.ccsds.org/Pubs/133x0b2e1.pdf).
|
||||
///
|
||||
/// This serves only works if
|
||||
/// [CCSDS 133.0-B-2 space packets](https://public.ccsds.org/Pubs/133x0b2e1.pdf) are the only
|
||||
/// packet type being exchanged. It uses the CCSDS [spacepackets::PacketId] as the packet delimiter
|
||||
/// and start marker when parsing for packets. The user specifies a set of expected
|
||||
/// [spacepackets::PacketId]s as part of the server configuration for that purpose.
|
||||
///
|
||||
/// ## Example
|
||||
/// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/tcp_servers.rs)
|
||||
/// also serves as the example application for this module.
|
||||
pub struct TcpSpacepacketsServer<
|
||||
TmError,
|
||||
TcError: 'static,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
> {
|
||||
generic_server: TcpTmtcGenericServer<
|
||||
TmError,
|
||||
TcError,
|
||||
TmSource,
|
||||
TcReceiver,
|
||||
SpacepacketsTmSender,
|
||||
SpacepacketsTcParser,
|
||||
>,
|
||||
}
|
||||
|
||||
impl<
|
||||
TmError: 'static,
|
||||
TcError: 'static,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
> TcpSpacepacketsServer<TmError, TcError, TmSource, TcReceiver>
|
||||
{
|
||||
///
|
||||
/// ## Parameter
|
||||
///
|
||||
/// * `cfg` - Configuration of the server.
|
||||
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
|
||||
/// then sent back to the client.
|
||||
/// * `tc_receiver` - Any received telecommands which were decoded successfully will be
|
||||
/// forwarded to this TC receiver.
|
||||
/// * `packet_id_lookup` - This lookup table contains the relevant packets IDs for packet
|
||||
/// parsing. This mechanism is used to have a start marker for finding CCSDS packets.
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tm_source: TmSource,
|
||||
tc_receiver: TcReceiver,
|
||||
packet_id_lookup: Box<dyn PacketIdLookup + Send>,
|
||||
) -> Result<Self, std::io::Error> {
|
||||
Ok(Self {
|
||||
generic_server: TcpTmtcGenericServer::new(
|
||||
cfg,
|
||||
SpacepacketsTcParser::new(packet_id_lookup),
|
||||
SpacepacketsTmSender::default(),
|
||||
tm_source,
|
||||
tc_receiver,
|
||||
)?,
|
||||
})
|
||||
}
|
||||
|
||||
delegate! {
|
||||
to self.generic_server {
|
||||
pub fn listener(&mut self) -> &mut TcpListener;
|
||||
|
||||
/// Can be used to retrieve the local assigned address of the TCP server. This is especially
|
||||
/// useful if using the port number 0 for OS auto-assignment.
|
||||
pub fn local_addr(&self) -> std::io::Result<SocketAddr>;
|
||||
|
||||
/// Delegation to the [TcpTmtcGenericServer::handle_next_connection] call.
|
||||
pub fn handle_next_connection(
|
||||
&mut self,
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use core::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
#[allow(unused_imports)]
|
||||
use std::println;
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
|
||||
thread,
|
||||
};
|
||||
|
||||
use alloc::{boxed::Box, sync::Arc};
|
||||
use hashbrown::HashSet;
|
||||
use spacepackets::{
|
||||
ecss::{tc::PusTcCreator, WritablePusPacket},
|
||||
PacketId, SpHeader,
|
||||
};
|
||||
|
||||
use crate::hal::std::tcp_server::{
|
||||
tests::{SyncTcCacher, SyncTmSource},
|
||||
ServerConfig,
|
||||
};
|
||||
|
||||
use super::TcpSpacepacketsServer;
|
||||
|
||||
const TEST_APID_0: u16 = 0x02;
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0);
|
||||
const TEST_APID_1: u16 = 0x10;
|
||||
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1);
|
||||
|
||||
fn generic_tmtc_server(
|
||||
addr: &SocketAddr,
|
||||
tc_receiver: SyncTcCacher,
|
||||
tm_source: SyncTmSource,
|
||||
packet_id_lookup: HashSet<PacketId>,
|
||||
) -> TcpSpacepacketsServer<(), (), SyncTmSource, SyncTcCacher> {
|
||||
TcpSpacepacketsServer::new(
|
||||
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024),
|
||||
tm_source,
|
||||
tc_receiver,
|
||||
Box::new(packet_id_lookup),
|
||||
)
|
||||
.expect("TCP server generation failed")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_tc_only() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let tm_source = SyncTmSource::default();
|
||||
let mut packet_id_lookup = HashSet::new();
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_0);
|
||||
let mut tcp_server = generic_tmtc_server(
|
||||
&auto_port_addr,
|
||||
tc_receiver.clone(),
|
||||
tm_source,
|
||||
packet_id_lookup,
|
||||
);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 1);
|
||||
assert_eq!(conn_result.num_sent_tms, 0);
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let tc_0 = ping_tc.to_vec().expect("packet generation failed");
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.write_all(&tc_0)
|
||||
.expect("writing to TCP server failed");
|
||||
drop(stream);
|
||||
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that TC has arrived.
|
||||
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap();
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_tc_multi_tm() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
|
||||
// Add telemetry
|
||||
let mut total_tm_len = 0;
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 1, None, true);
|
||||
let tm_0 = verif_tm.to_vec().expect("writing packet failed");
|
||||
total_tm_len += tm_0.len();
|
||||
tm_source.add_tm(&tm_0);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 3, None, true);
|
||||
let tm_1 = verif_tm.to_vec().expect("writing packet failed");
|
||||
total_tm_len += tm_1.len();
|
||||
tm_source.add_tm(&tm_1);
|
||||
|
||||
// Set up server
|
||||
let mut packet_id_lookup = HashSet::new();
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_0);
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_1);
|
||||
let mut tcp_server = generic_tmtc_server(
|
||||
&auto_port_addr,
|
||||
tc_receiver.clone(),
|
||||
tm_source,
|
||||
packet_id_lookup,
|
||||
);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(
|
||||
conn_result.num_received_tcs, 2,
|
||||
"wrong number of received TCs"
|
||||
);
|
||||
assert_eq!(conn_result.num_sent_tms, 2, "wrong number of sent TMs");
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.set_read_timeout(Some(Duration::from_millis(10)))
|
||||
.expect("setting reas timeout failed");
|
||||
|
||||
// Send telecommands
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let tc_0 = ping_tc.to_vec().expect("ping tc creation failed");
|
||||
stream
|
||||
.write_all(&tc_0)
|
||||
.expect("writing to TCP server failed");
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let tc_1 = action_tc.to_vec().expect("action tc creation failed");
|
||||
stream
|
||||
.write_all(&tc_1)
|
||||
.expect("writing to TCP server failed");
|
||||
|
||||
// Done with writing.
|
||||
stream
|
||||
.shutdown(std::net::Shutdown::Write)
|
||||
.expect("shutting down write failed");
|
||||
let mut read_buf: [u8; 32] = [0; 32];
|
||||
let mut current_idx = 0;
|
||||
let mut read_len_total = 0;
|
||||
// Timeout ensures this does not block forever.
|
||||
while read_len_total < total_tm_len {
|
||||
let read_len = stream
|
||||
.read(&mut read_buf[current_idx..])
|
||||
.expect("read failed");
|
||||
current_idx += read_len;
|
||||
read_len_total += read_len;
|
||||
}
|
||||
drop(stream);
|
||||
assert_eq!(read_buf[..tm_0.len()], tm_0);
|
||||
assert_eq!(read_buf[tm_0.len()..tm_0.len() + tm_1.len()], tm_1);
|
||||
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that TC has arrived.
|
||||
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap();
|
||||
assert_eq!(tc_queue.len(), 2);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_0);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_1);
|
||||
}
|
||||
}
|
215
satrs/src/hal/std/udp_server.rs
Normal file
215
satrs/src/hal/std/udp_server.rs
Normal file
@ -0,0 +1,215 @@
|
||||
//! Generic UDP TC server.
|
||||
use crate::tmtc::{ReceivesTc, ReceivesTcCore};
|
||||
use std::boxed::Box;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
|
||||
use std::vec;
|
||||
use std::vec::Vec;
|
||||
|
||||
/// This UDP server can be used to receive CCSDS space packet telecommands or any other telecommand
|
||||
/// format.
|
||||
///
|
||||
/// It caches all received telecomands into a vector. The maximum expected telecommand size should
|
||||
/// be declared upfront. This avoids dynamic allocation during run-time. The user can specify a TC
|
||||
/// receiver in form of a special trait object which implements [ReceivesTc]. Please note that the
|
||||
/// receiver should copy out the received data if it the data is required past the
|
||||
/// [ReceivesTcCore::pass_tc] call.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
/// use spacepackets::ecss::WritablePusPacket;
|
||||
/// use satrs::hal::std::udp_server::UdpTcServer;
|
||||
/// use satrs::tmtc::{ReceivesTc, ReceivesTcCore};
|
||||
/// use spacepackets::SpHeader;
|
||||
/// use spacepackets::ecss::tc::PusTcCreator;
|
||||
///
|
||||
/// #[derive (Default)]
|
||||
/// struct PingReceiver {}
|
||||
/// impl ReceivesTcCore for PingReceiver {
|
||||
/// type Error = ();
|
||||
/// fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
/// assert_eq!(tc_raw.len(), 13);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let mut buf = [0; 32];
|
||||
/// let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
|
||||
/// let ping_receiver = PingReceiver::default();
|
||||
/// let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
|
||||
/// .expect("Creating UDP TMTC server failed");
|
||||
/// let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap();
|
||||
/// let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
/// let len = pus_tc
|
||||
/// .write_to_bytes(&mut buf)
|
||||
/// .expect("Error writing PUS TC packet");
|
||||
/// assert_eq!(len, 13);
|
||||
/// let client = UdpSocket::bind("127.0.0.1:7778").expect("Connecting to UDP server failed");
|
||||
/// client
|
||||
/// .send_to(&buf[0..len], dest_addr)
|
||||
/// .expect("Error sending PUS TC via UDP");
|
||||
/// ```
|
||||
///
|
||||
/// The [satrs-example crate](https://egit.irs.uni-stuttgart.de/rust/fsrc-launchpad/src/branch/main/satrs-example)
|
||||
/// server code also includes
|
||||
/// [example code](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/tmtc.rs#L67)
|
||||
/// on how to use this TC server. It uses the server to receive PUS telecommands on a specific port
|
||||
/// and then forwards them to a generic CCSDS packet receiver.
|
||||
pub struct UdpTcServer<E> {
|
||||
pub socket: UdpSocket,
|
||||
recv_buf: Vec<u8>,
|
||||
sender_addr: Option<SocketAddr>,
|
||||
tc_receiver: Box<dyn ReceivesTc<Error = E>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ReceiveResult<E> {
|
||||
NothingReceived,
|
||||
IoError(Error),
|
||||
ReceiverError(E),
|
||||
}
|
||||
|
||||
impl<E> From<Error> for ReceiveResult<E> {
|
||||
fn from(e: Error) -> Self {
|
||||
ReceiveResult::IoError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: PartialEq> PartialEq for ReceiveResult<E> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
use ReceiveResult::*;
|
||||
match (self, other) {
|
||||
(IoError(ref e), IoError(ref other_e)) => e.kind() == other_e.kind(),
|
||||
(NothingReceived, NothingReceived) => true,
|
||||
(ReceiverError(e), ReceiverError(other_e)) => e == other_e,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Eq + PartialEq> Eq for ReceiveResult<E> {}
|
||||
|
||||
impl<E: 'static> ReceivesTcCore for UdpTcServer<E> {
|
||||
type Error = E;
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
self.tc_receiver.pass_tc(tc_raw)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static> UdpTcServer<E> {
|
||||
pub fn new<A: ToSocketAddrs>(
|
||||
addr: A,
|
||||
max_recv_size: usize,
|
||||
tc_receiver: Box<dyn ReceivesTc<Error = E>>,
|
||||
) -> Result<Self, Error> {
|
||||
let server = Self {
|
||||
socket: UdpSocket::bind(addr)?,
|
||||
recv_buf: vec![0; max_recv_size],
|
||||
sender_addr: None,
|
||||
tc_receiver,
|
||||
};
|
||||
server.socket.set_nonblocking(true)?;
|
||||
Ok(server)
|
||||
}
|
||||
|
||||
pub fn try_recv_tc(&mut self) -> Result<(usize, SocketAddr), ReceiveResult<E>> {
|
||||
let res = match self.socket.recv_from(&mut self.recv_buf) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
return if e.kind() == ErrorKind::WouldBlock || e.kind() == ErrorKind::TimedOut {
|
||||
Err(ReceiveResult::NothingReceived)
|
||||
} else {
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
};
|
||||
let (num_bytes, from) = res;
|
||||
self.sender_addr = Some(from);
|
||||
self.tc_receiver
|
||||
.pass_tc(&self.recv_buf[0..num_bytes])
|
||||
.map_err(|e| ReceiveResult::ReceiverError(e))?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn last_sender(&self) -> Option<SocketAddr> {
|
||||
self.sender_addr
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::hal::std::udp_server::{ReceiveResult, UdpTcServer};
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
use spacepackets::ecss::tc::PusTcCreator;
|
||||
use spacepackets::ecss::WritablePusPacket;
|
||||
use spacepackets::SpHeader;
|
||||
use std::boxed::Box;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::vec::Vec;
|
||||
|
||||
fn is_send<T: Send>(_: &T) {}
|
||||
|
||||
#[derive(Default)]
|
||||
struct PingReceiver {
|
||||
pub sent_cmds: VecDeque<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl ReceivesTcCore for PingReceiver {
|
||||
type Error = ();
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut sent_data = Vec::new();
|
||||
sent_data.extend_from_slice(tc_raw);
|
||||
self.sent_cmds.push_back(sent_data);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_test() {
|
||||
let mut buf = [0; 32];
|
||||
let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
|
||||
let ping_receiver = PingReceiver::default();
|
||||
is_send(&ping_receiver);
|
||||
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
|
||||
.expect("Creating UDP TMTC server failed");
|
||||
is_send(&udp_tc_server);
|
||||
let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap();
|
||||
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let len = pus_tc
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("Error writing PUS TC packet");
|
||||
let client = UdpSocket::bind("127.0.0.1:7778").expect("Connecting to UDP server failed");
|
||||
client
|
||||
.send_to(&buf[0..len], dest_addr)
|
||||
.expect("Error sending PUS TC via UDP");
|
||||
let local_addr = client.local_addr().unwrap();
|
||||
udp_tc_server
|
||||
.try_recv_tc()
|
||||
.expect("Error receiving sent telecommand");
|
||||
assert_eq!(
|
||||
udp_tc_server.last_sender().expect("No sender set"),
|
||||
local_addr
|
||||
);
|
||||
let ping_receiver: &mut PingReceiver = udp_tc_server.tc_receiver.downcast_mut().unwrap();
|
||||
assert_eq!(ping_receiver.sent_cmds.len(), 1);
|
||||
let sent_cmd = ping_receiver.sent_cmds.pop_front().unwrap();
|
||||
assert_eq!(sent_cmd, buf[0..len]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nothing_received() {
|
||||
let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7779);
|
||||
let ping_receiver = PingReceiver::default();
|
||||
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
|
||||
.expect("Creating UDP TMTC server failed");
|
||||
let res = udp_tc_server.try_recv_tc();
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert_eq!(err, ReceiveResult::NothingReceived);
|
||||
}
|
||||
}
|
16
satrs/src/hk.rs
Normal file
16
satrs/src/hk.rs
Normal file
@ -0,0 +1,16 @@
|
||||
pub type CollectionIntervalFactor = u32;
|
||||
pub type UniqueId = u32;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum HkRequest {
|
||||
OneShot(UniqueId),
|
||||
Enable(UniqueId),
|
||||
Disable(UniqueId),
|
||||
ModifyCollectionInterval(UniqueId, CollectionIntervalFactor),
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct TargetedHkRequest {
|
||||
target: u32,
|
||||
hk_request: HkRequest,
|
||||
}
|
50
satrs/src/lib.rs
Normal file
50
satrs/src/lib.rs
Normal file
@ -0,0 +1,50 @@
|
||||
//! # sat-rs: A framework to build on-board software for remote systems
|
||||
//!
|
||||
//! You can find more information about the sat-rs framework on the
|
||||
//! [homepage](https://egit.irs.uni-stuttgart.de/rust/sat-rs).
|
||||
//!
|
||||
//! ## Overview
|
||||
//!
|
||||
//! The core modules of this crate include
|
||||
//!
|
||||
//! - The [event manager][event_man] module which provides a publish and
|
||||
//! and subscribe to route events.
|
||||
//! - The [pus] module which provides special support for projects using
|
||||
//! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
|
||||
#![no_std]
|
||||
#![cfg_attr(doc_cfg, feature(doc_cfg))]
|
||||
#[cfg(feature = "alloc")]
|
||||
extern crate alloc;
|
||||
#[cfg(feature = "alloc")]
|
||||
extern crate downcast_rs;
|
||||
#[cfg(any(feature = "std", test))]
|
||||
extern crate std;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub mod cfdp;
|
||||
pub mod encoding;
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub mod event_man;
|
||||
pub mod events;
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub mod executable;
|
||||
pub mod hal;
|
||||
pub mod hk;
|
||||
pub mod mode;
|
||||
pub mod objects;
|
||||
pub mod params;
|
||||
pub mod pool;
|
||||
pub mod power;
|
||||
pub mod pus;
|
||||
pub mod request;
|
||||
pub mod res_code;
|
||||
pub mod seq_count;
|
||||
pub mod tmtc;
|
||||
|
||||
pub use spacepackets;
|
||||
|
||||
// Generic channel ID type.
|
||||
pub type ChannelId = u32;
|
94
satrs/src/mode.rs
Normal file
94
satrs/src/mode.rs
Normal file
@ -0,0 +1,94 @@
|
||||
use crate::tmtc::TargetId;
|
||||
use core::mem::size_of;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
use spacepackets::ByteConversionError;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct ModeAndSubmode {
|
||||
mode: u32,
|
||||
submode: u16,
|
||||
}
|
||||
|
||||
impl ModeAndSubmode {
|
||||
pub const fn new_mode_only(mode: u32) -> Self {
|
||||
Self { mode, submode: 0 }
|
||||
}
|
||||
|
||||
pub const fn new(mode: u32, submode: u16) -> Self {
|
||||
Self { mode, submode }
|
||||
}
|
||||
|
||||
pub fn raw_len() -> usize {
|
||||
size_of::<u32>() + size_of::<u16>()
|
||||
}
|
||||
|
||||
pub fn from_be_bytes(buf: &[u8]) -> Result<Self, ByteConversionError> {
|
||||
if buf.len() < 6 {
|
||||
return Err(ByteConversionError::FromSliceTooSmall {
|
||||
expected: 6,
|
||||
found: buf.len(),
|
||||
});
|
||||
}
|
||||
Ok(Self {
|
||||
mode: u32::from_be_bytes(buf[0..4].try_into().unwrap()),
|
||||
submode: u16::from_be_bytes(buf[4..6].try_into().unwrap()),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mode(&self) -> u32 {
|
||||
self.mode
|
||||
}
|
||||
|
||||
pub fn submode(&self) -> u16 {
|
||||
self.submode
|
||||
}
|
||||
}
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct ModeCommand {
|
||||
pub address: TargetId,
|
||||
pub mode_submode: ModeAndSubmode,
|
||||
}
|
||||
|
||||
impl ModeCommand {
|
||||
pub const fn new(address: TargetId, mode_submode: ModeAndSubmode) -> Self {
|
||||
Self {
|
||||
address,
|
||||
mode_submode,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn address(&self) -> TargetId {
|
||||
self.address
|
||||
}
|
||||
|
||||
pub fn mode_submode(&self) -> ModeAndSubmode {
|
||||
self.mode_submode
|
||||
}
|
||||
|
||||
pub fn mode(&self) -> u32 {
|
||||
self.mode_submode.mode
|
||||
}
|
||||
|
||||
pub fn submode(&self) -> u16 {
|
||||
self.mode_submode.submode
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum ModeRequest {
|
||||
SetMode(ModeAndSubmode),
|
||||
ReadMode,
|
||||
AnnounceMode,
|
||||
AnnounceModeRecursive,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct TargetedModeRequest {
|
||||
target_id: TargetId,
|
||||
mode_request: ModeRequest,
|
||||
}
|
307
satrs/src/objects.rs
Normal file
307
satrs/src/objects.rs
Normal file
@ -0,0 +1,307 @@
|
||||
//! # Module providing addressable object support and a manager for them
|
||||
//!
|
||||
//! Each addressable object can be identified using an [object ID][ObjectId].
|
||||
//! The [system object][ManagedSystemObject] trait also allows storing these objects into the
|
||||
//! [object manager][ObjectManager]. They can then be retrieved and casted back to a known type
|
||||
//! using the object ID.
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! ```rust
|
||||
//! use std::any::Any;
|
||||
//! use std::error::Error;
|
||||
//! use satrs::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject};
|
||||
//!
|
||||
//! struct ExampleSysObj {
|
||||
//! id: ObjectId,
|
||||
//! dummy: u32,
|
||||
//! was_initialized: bool,
|
||||
//! }
|
||||
//!
|
||||
//! impl ExampleSysObj {
|
||||
//! fn new(id: ObjectId, dummy: u32) -> ExampleSysObj {
|
||||
//! ExampleSysObj {
|
||||
//! id,
|
||||
//! dummy,
|
||||
//! was_initialized: false,
|
||||
//! }
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl SystemObject for ExampleSysObj {
|
||||
//! type Error = ();
|
||||
//! fn get_object_id(&self) -> &ObjectId {
|
||||
//! &self.id
|
||||
//! }
|
||||
//!
|
||||
//! fn initialize(&mut self) -> Result<(), Self::Error> {
|
||||
//! self.was_initialized = true;
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl ManagedSystemObject for ExampleSysObj {}
|
||||
//!
|
||||
//! let mut obj_manager = ObjectManager::default();
|
||||
//! let obj_id = ObjectId { id: 0, name: "Example 0"};
|
||||
//! let example_obj = ExampleSysObj::new(obj_id, 42);
|
||||
//! obj_manager.insert(Box::new(example_obj));
|
||||
//! let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&obj_id);
|
||||
//! let example_obj = obj_back_casted.unwrap();
|
||||
//! assert_eq!(example_obj.id, obj_id);
|
||||
//! assert_eq!(example_obj.dummy, 42);
|
||||
//! ```
|
||||
use crate::tmtc::TargetId;
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::boxed::Box;
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
#[cfg(feature = "alloc")]
|
||||
use downcast_rs::Downcast;
|
||||
#[cfg(feature = "alloc")]
|
||||
use hashbrown::HashMap;
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
|
||||
pub struct ObjectId {
|
||||
pub id: TargetId,
|
||||
pub name: &'static str,
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod alloc_mod {
|
||||
use super::*;
|
||||
|
||||
/// Each object which is stored inside the [object manager][ObjectManager] needs to implemented
|
||||
/// this trait
|
||||
pub trait SystemObject: Downcast {
|
||||
type Error;
|
||||
fn get_object_id(&self) -> &ObjectId;
|
||||
fn initialize(&mut self) -> Result<(), Self::Error>;
|
||||
}
|
||||
downcast_rs::impl_downcast!(SystemObject assoc Error);
|
||||
|
||||
pub trait ManagedSystemObject: SystemObject + Send {}
|
||||
downcast_rs::impl_downcast!(ManagedSystemObject assoc Error);
|
||||
|
||||
/// Helper module to manage multiple [ManagedSystemObjects][ManagedSystemObject] by mapping them
|
||||
/// using an [object ID][ObjectId]
|
||||
#[cfg(feature = "alloc")]
|
||||
pub struct ObjectManager<E> {
|
||||
obj_map: HashMap<ObjectId, Box<dyn ManagedSystemObject<Error = E>>>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<E: 'static> Default for ObjectManager<E> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<E: 'static> ObjectManager<E> {
|
||||
pub fn new() -> Self {
|
||||
ObjectManager {
|
||||
obj_map: HashMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn insert(&mut self, sys_obj: Box<dyn ManagedSystemObject<Error = E>>) -> bool {
|
||||
let obj_id = sys_obj.get_object_id();
|
||||
if self.obj_map.contains_key(obj_id) {
|
||||
return false;
|
||||
}
|
||||
self.obj_map.insert(*obj_id, sys_obj).is_none()
|
||||
}
|
||||
|
||||
/// Initializes all System Objects in the hash map and returns the number of successful
|
||||
/// initializations
|
||||
pub fn initialize(&mut self) -> Result<u32, Box<dyn Error>> {
|
||||
let mut init_success = 0;
|
||||
for val in self.obj_map.values_mut() {
|
||||
if val.initialize().is_ok() {
|
||||
init_success += 1
|
||||
}
|
||||
}
|
||||
Ok(init_success)
|
||||
}
|
||||
|
||||
/// Retrieve a reference to an object stored inside the manager. The type to retrieve needs to
|
||||
/// be explicitly passed as a generic parameter or specified on the left hand side of the
|
||||
/// expression.
|
||||
pub fn get_ref<T: ManagedSystemObject<Error = E>>(&self, key: &ObjectId) -> Option<&T> {
|
||||
self.obj_map.get(key).and_then(|o| o.downcast_ref::<T>())
|
||||
}
|
||||
|
||||
/// Retrieve a mutable reference to an object stored inside the manager. The type to retrieve
|
||||
/// needs to be explicitly passed as a generic parameter or specified on the left hand side
|
||||
/// of the expression.
|
||||
pub fn get_mut<T: ManagedSystemObject<Error = E>>(
|
||||
&mut self,
|
||||
key: &ObjectId,
|
||||
) -> Option<&mut T> {
|
||||
self.obj_map
|
||||
.get_mut(key)
|
||||
.and_then(|o| o.downcast_mut::<T>())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject};
|
||||
use std::boxed::Box;
|
||||
use std::string::String;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
|
||||
struct ExampleSysObj {
|
||||
id: ObjectId,
|
||||
dummy: u32,
|
||||
was_initialized: bool,
|
||||
}
|
||||
|
||||
impl ExampleSysObj {
|
||||
fn new(id: ObjectId, dummy: u32) -> ExampleSysObj {
|
||||
ExampleSysObj {
|
||||
id,
|
||||
dummy,
|
||||
was_initialized: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemObject for ExampleSysObj {
|
||||
type Error = ();
|
||||
fn get_object_id(&self) -> &ObjectId {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn initialize(&mut self) -> Result<(), Self::Error> {
|
||||
self.was_initialized = true;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ManagedSystemObject for ExampleSysObj {}
|
||||
|
||||
struct OtherExampleObject {
|
||||
id: ObjectId,
|
||||
string: String,
|
||||
was_initialized: bool,
|
||||
}
|
||||
|
||||
impl SystemObject for OtherExampleObject {
|
||||
type Error = ();
|
||||
fn get_object_id(&self) -> &ObjectId {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn initialize(&mut self) -> Result<(), Self::Error> {
|
||||
self.was_initialized = true;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ManagedSystemObject for OtherExampleObject {}
|
||||
|
||||
#[test]
|
||||
fn test_obj_manager_simple() {
|
||||
let mut obj_manager = ObjectManager::default();
|
||||
let expl_obj_id = ObjectId {
|
||||
id: 0,
|
||||
name: "Example 0",
|
||||
};
|
||||
let example_obj = ExampleSysObj::new(expl_obj_id, 42);
|
||||
assert!(obj_manager.insert(Box::new(example_obj)));
|
||||
let res = obj_manager.initialize();
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), 1);
|
||||
let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&expl_obj_id);
|
||||
assert!(obj_back_casted.is_some());
|
||||
let expl_obj_back_casted = obj_back_casted.unwrap();
|
||||
assert_eq!(expl_obj_back_casted.dummy, 42);
|
||||
assert!(expl_obj_back_casted.was_initialized);
|
||||
|
||||
let second_obj_id = ObjectId {
|
||||
id: 12,
|
||||
name: "Example 1",
|
||||
};
|
||||
let second_example_obj = OtherExampleObject {
|
||||
id: second_obj_id,
|
||||
string: String::from("Hello Test"),
|
||||
was_initialized: false,
|
||||
};
|
||||
|
||||
assert!(obj_manager.insert(Box::new(second_example_obj)));
|
||||
let res = obj_manager.initialize();
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), 2);
|
||||
let obj_back_casted: Option<&OtherExampleObject> = obj_manager.get_ref(&second_obj_id);
|
||||
assert!(obj_back_casted.is_some());
|
||||
let expl_obj_back_casted = obj_back_casted.unwrap();
|
||||
assert_eq!(expl_obj_back_casted.string, String::from("Hello Test"));
|
||||
assert!(expl_obj_back_casted.was_initialized);
|
||||
|
||||
let existing_obj_id = ObjectId {
|
||||
id: 12,
|
||||
name: "Example 1",
|
||||
};
|
||||
let invalid_obj = OtherExampleObject {
|
||||
id: existing_obj_id,
|
||||
string: String::from("Hello Test"),
|
||||
was_initialized: false,
|
||||
};
|
||||
|
||||
assert!(!obj_manager.insert(Box::new(invalid_obj)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn object_man_threaded() {
|
||||
let obj_manager = Arc::new(Mutex::new(ObjectManager::new()));
|
||||
let expl_obj_id = ObjectId {
|
||||
id: 0,
|
||||
name: "Example 0",
|
||||
};
|
||||
let example_obj = ExampleSysObj::new(expl_obj_id, 42);
|
||||
let second_obj_id = ObjectId {
|
||||
id: 12,
|
||||
name: "Example 1",
|
||||
};
|
||||
let second_example_obj = OtherExampleObject {
|
||||
id: second_obj_id,
|
||||
string: String::from("Hello Test"),
|
||||
was_initialized: false,
|
||||
};
|
||||
|
||||
let mut obj_man_handle = obj_manager.lock().expect("Mutex lock failed");
|
||||
assert!(obj_man_handle.insert(Box::new(example_obj)));
|
||||
assert!(obj_man_handle.insert(Box::new(second_example_obj)));
|
||||
let res = obj_man_handle.initialize();
|
||||
std::mem::drop(obj_man_handle);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), 2);
|
||||
let obj_man_0 = obj_manager.clone();
|
||||
let jh0 = thread::spawn(move || {
|
||||
let locked_man = obj_man_0.lock().expect("Mutex lock failed");
|
||||
let obj_back_casted: Option<&ExampleSysObj> = locked_man.get_ref(&expl_obj_id);
|
||||
assert!(obj_back_casted.is_some());
|
||||
let expl_obj_back_casted = obj_back_casted.unwrap();
|
||||
assert_eq!(expl_obj_back_casted.dummy, 42);
|
||||
assert!(expl_obj_back_casted.was_initialized);
|
||||
std::mem::drop(locked_man)
|
||||
});
|
||||
|
||||
let jh1 = thread::spawn(move || {
|
||||
let locked_man = obj_manager.lock().expect("Mutex lock failed");
|
||||
let obj_back_casted: Option<&OtherExampleObject> = locked_man.get_ref(&second_obj_id);
|
||||
assert!(obj_back_casted.is_some());
|
||||
let expl_obj_back_casted = obj_back_casted.unwrap();
|
||||
assert_eq!(expl_obj_back_casted.string, String::from("Hello Test"));
|
||||
assert!(expl_obj_back_casted.was_initialized);
|
||||
std::mem::drop(locked_man)
|
||||
});
|
||||
jh0.join().expect("Joining thread 0 failed");
|
||||
jh1.join().expect("Joining thread 1 failed");
|
||||
}
|
||||
}
|
679
satrs/src/params.rs
Normal file
679
satrs/src/params.rs
Normal file
@ -0,0 +1,679 @@
|
||||
//! Parameter types and enums.
|
||||
//!
|
||||
//! This module contains various helper types.
|
||||
//!
|
||||
//! # Primtive Parameter Wrappers and Enumeration
|
||||
//!
|
||||
//! This module includes wrapper for primitive rust types using the newtype pattern.
|
||||
//! This was also done for pairs and triplets of these primitive types.
|
||||
//! The [WritableToBeBytes] was implemented for all those types as well, which allows to easily
|
||||
//! convert them into a network friendly raw byte format. The [ParamsRaw] enumeration groups
|
||||
//! all newtypes and implements the [WritableToBeBytes] trait itself.
|
||||
//!
|
||||
//! ## Example for primitive type wrapper
|
||||
//!
|
||||
//! ```
|
||||
//! use satrs::params::{ParamsRaw, ToBeBytes, U32Pair, WritableToBeBytes};
|
||||
//!
|
||||
//! let u32_pair = U32Pair(0x1010, 25);
|
||||
//! assert_eq!(u32_pair.0, 0x1010);
|
||||
//! assert_eq!(u32_pair.1, 25);
|
||||
//! // Convert to raw stream
|
||||
//! let raw_buf = u32_pair.to_be_bytes();
|
||||
//! assert_eq!(raw_buf, [0, 0, 0x10, 0x10, 0, 0, 0, 25]);
|
||||
//!
|
||||
//! // Convert to enum variant
|
||||
//! let params_raw: ParamsRaw = u32_pair.into();
|
||||
//! assert_eq!(params_raw, (0x1010_u32, 25_u32).into());
|
||||
//!
|
||||
//! // Convert to stream using the enum variant
|
||||
//! let mut other_raw_buf: [u8; 8] = [0; 8];
|
||||
//! params_raw.write_to_be_bytes(&mut other_raw_buf).expect("Writing parameter to buffer failed");
|
||||
//! assert_eq!(other_raw_buf, [0, 0, 0x10, 0x10, 0, 0, 0, 25]);
|
||||
//!
|
||||
//! // Create a pair from a raw stream
|
||||
//! let u32_pair_from_stream: U32Pair = raw_buf.as_slice().try_into().unwrap();
|
||||
//! assert_eq!(u32_pair_from_stream.0, 0x1010);
|
||||
//! assert_eq!(u32_pair_from_stream.1, 25);
|
||||
//! ```
|
||||
//!
|
||||
//! # Generic Parameter Enumeration
|
||||
//!
|
||||
//! The module also contains generic parameter enumerations.
|
||||
//! This includes the [ParamsHeapless] enumeration for contained values which do not require heap
|
||||
//! allocation, and the [Params] which enumerates [ParamsHeapless] and some additional types which
|
||||
//! require [alloc] support but allow for more flexbility.
|
||||
#[cfg(feature = "alloc")]
|
||||
use crate::pool::StoreAddr;
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::string::{String, ToString};
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::vec::Vec;
|
||||
use core::fmt::Debug;
|
||||
use core::mem::size_of;
|
||||
use paste::paste;
|
||||
use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU64, EcssEnumU8};
|
||||
use spacepackets::util::UnsignedEnum;
|
||||
use spacepackets::ByteConversionError;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
pub use spacepackets::util::ToBeBytes;
|
||||
|
||||
/// Generic trait which is used for objects which can be converted into a raw network (big) endian
|
||||
/// byte format.
|
||||
pub trait WritableToBeBytes {
|
||||
fn raw_len(&self) -> usize;
|
||||
/// Writes the object to a raw buffer in network endianness (big)
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
|
||||
}
|
||||
|
||||
macro_rules! param_to_be_bytes_impl {
|
||||
($Newtype: ident) => {
|
||||
impl WritableToBeBytes for $Newtype {
|
||||
#[inline]
|
||||
fn raw_len(&self) -> usize {
|
||||
size_of::<<Self as ToBeBytes>::ByteArray>()
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
let raw_len = self.raw_len();
|
||||
if buf.len() < raw_len {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
found: buf.len(),
|
||||
expected: raw_len,
|
||||
});
|
||||
}
|
||||
buf[0..raw_len].copy_from_slice(&self.to_be_bytes());
|
||||
Ok(raw_len)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! primitive_newtypes_with_eq {
|
||||
($($ty: ty,)+) => {
|
||||
$(
|
||||
paste! {
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct [<$ty:upper>](pub $ty);
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct [<$ty:upper Pair>](pub $ty, pub $ty);
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct [<$ty:upper Triplet>](pub $ty, pub $ty, pub $ty);
|
||||
|
||||
param_to_be_bytes_impl!([<$ty:upper>]);
|
||||
param_to_be_bytes_impl!([<$ty:upper Pair>]);
|
||||
param_to_be_bytes_impl!([<$ty:upper Triplet>]);
|
||||
|
||||
impl From<$ty> for [<$ty:upper>] {
|
||||
fn from(v: $ty) -> Self {
|
||||
Self(v)
|
||||
}
|
||||
}
|
||||
impl From<($ty, $ty)> for [<$ty:upper Pair>] {
|
||||
fn from(v: ($ty, $ty)) -> Self {
|
||||
Self(v.0, v.1)
|
||||
}
|
||||
}
|
||||
impl From<($ty, $ty, $ty)> for [<$ty:upper Triplet>] {
|
||||
fn from(v: ($ty, $ty, $ty)) -> Self {
|
||||
Self(v.0, v.1, v.2)
|
||||
}
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! primitive_newtypes {
|
||||
($($ty: ty,)+) => {
|
||||
$(
|
||||
paste! {
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub struct [<$ty:upper>](pub $ty);
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub struct [<$ty:upper Pair>](pub $ty, pub $ty);
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub struct [<$ty:upper Triplet>](pub $ty, pub $ty, pub $ty);
|
||||
|
||||
param_to_be_bytes_impl!([<$ty:upper>]);
|
||||
param_to_be_bytes_impl!([<$ty:upper Pair>]);
|
||||
param_to_be_bytes_impl!([<$ty:upper Triplet>]);
|
||||
|
||||
impl From<$ty> for [<$ty:upper>] {
|
||||
fn from(v: $ty) -> Self {
|
||||
Self(v)
|
||||
}
|
||||
}
|
||||
impl From<($ty, $ty)> for [<$ty:upper Pair>] {
|
||||
fn from(v: ($ty, $ty)) -> Self {
|
||||
Self(v.0, v.1)
|
||||
}
|
||||
}
|
||||
impl From<($ty, $ty, $ty)> for [<$ty:upper Triplet>] {
|
||||
fn from(v: ($ty, $ty, $ty)) -> Self {
|
||||
Self(v.0, v.1, v.2)
|
||||
}
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
|
||||
primitive_newtypes_with_eq!(u8, u16, u32, u64, i8, i16, i32, i64,);
|
||||
primitive_newtypes!(f32, f64,);
|
||||
|
||||
macro_rules! scalar_byte_conversions_impl {
|
||||
($($ty: ty,)+) => {
|
||||
$(
|
||||
paste! {
|
||||
impl ToBeBytes for [<$ty:upper>] {
|
||||
type ByteArray = [u8; size_of::<$ty>()];
|
||||
|
||||
fn written_len(&self) -> usize {
|
||||
size_of::<Self::ByteArray>()
|
||||
}
|
||||
|
||||
fn to_be_bytes(&self) -> Self::ByteArray {
|
||||
self.0.to_be_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&[u8]> for [<$ty:upper>] {
|
||||
type Error = ByteConversionError;
|
||||
|
||||
fn try_from(v: &[u8]) -> Result<Self, Self::Error> {
|
||||
if v.len() < size_of::<$ty>() {
|
||||
return Err(ByteConversionError::FromSliceTooSmall{
|
||||
expected: size_of::<$ty>(),
|
||||
found: v.len()
|
||||
});
|
||||
}
|
||||
Ok([<$ty:upper>]($ty::from_be_bytes(v[0..size_of::<$ty>()].try_into().unwrap())))
|
||||
}
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! pair_byte_conversions_impl {
|
||||
($($ty: ty,)+) => {
|
||||
$(
|
||||
paste! {
|
||||
impl ToBeBytes for [<$ty:upper Pair>] {
|
||||
type ByteArray = [u8; size_of::<$ty>() * 2];
|
||||
|
||||
fn written_len(&self) -> usize {
|
||||
size_of::<Self::ByteArray>()
|
||||
}
|
||||
|
||||
fn to_be_bytes(&self) -> Self::ByteArray {
|
||||
let mut array = [0; size_of::<$ty>() * 2];
|
||||
array[0..size_of::<$ty>()].copy_from_slice(&self.0.to_be_bytes());
|
||||
array[
|
||||
size_of::<$ty>()..2 * size_of::<$ty>()
|
||||
].copy_from_slice(&self.1.to_be_bytes());
|
||||
array
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&[u8]> for [<$ty:upper Pair>] {
|
||||
type Error = ByteConversionError;
|
||||
|
||||
fn try_from(v: &[u8]) -> Result<Self, Self::Error> {
|
||||
if v.len() < 2 * size_of::<$ty>() {
|
||||
return Err(ByteConversionError::FromSliceTooSmall{
|
||||
expected: 2 * size_of::<$ty>(),
|
||||
found: v.len()
|
||||
});
|
||||
}
|
||||
Ok([<$ty:upper Pair>](
|
||||
$ty::from_be_bytes(v[0..size_of::<$ty>()].try_into().unwrap()),
|
||||
$ty::from_be_bytes(v[size_of::<$ty>()..2 * size_of::<$ty>()].try_into().unwrap())
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! triplet_to_be_bytes_impl {
|
||||
($($ty: ty,)+) => {
|
||||
$(
|
||||
paste! {
|
||||
impl ToBeBytes for [<$ty:upper Triplet>] {
|
||||
type ByteArray = [u8; size_of::<$ty>() * 3];
|
||||
|
||||
fn written_len(&self) -> usize {
|
||||
size_of::<Self::ByteArray>()
|
||||
}
|
||||
|
||||
fn to_be_bytes(&self) -> Self::ByteArray {
|
||||
let mut array = [0; size_of::<$ty>() * 3];
|
||||
array[0..size_of::<$ty>()].copy_from_slice(&self.0.to_be_bytes());
|
||||
array[
|
||||
size_of::<$ty>()..2 * size_of::<$ty>()
|
||||
].copy_from_slice(&self.1.to_be_bytes());
|
||||
array[
|
||||
2 * size_of::<$ty>()..3 * size_of::<$ty>()
|
||||
].copy_from_slice(&self.2.to_be_bytes());
|
||||
array
|
||||
}
|
||||
}
|
||||
impl TryFrom<&[u8]> for [<$ty:upper Triplet>] {
|
||||
type Error = ByteConversionError;
|
||||
|
||||
fn try_from(v: &[u8]) -> Result<Self, Self::Error> {
|
||||
if v.len() < 3 * size_of::<$ty>() {
|
||||
return Err(ByteConversionError::FromSliceTooSmall{
|
||||
expected: 3 * size_of::<$ty>(),
|
||||
found: v.len()
|
||||
});
|
||||
}
|
||||
Ok([<$ty:upper Triplet>](
|
||||
$ty::from_be_bytes(v[0..size_of::<$ty>()].try_into().unwrap()),
|
||||
$ty::from_be_bytes(v[size_of::<$ty>()..2 * size_of::<$ty>()].try_into().unwrap()),
|
||||
$ty::from_be_bytes(v[2 * size_of::<$ty>()..3 * size_of::<$ty>()].try_into().unwrap())
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
|
||||
scalar_byte_conversions_impl!(u8, u16, u32, u64, i8, i16, i32, i64, f32, f64,);
|
||||
|
||||
impl ToBeBytes for U8Pair {
|
||||
type ByteArray = [u8; 2];
|
||||
|
||||
fn written_len(&self) -> usize {
|
||||
size_of::<Self::ByteArray>()
|
||||
}
|
||||
|
||||
fn to_be_bytes(&self) -> Self::ByteArray {
|
||||
let mut array = [0; 2];
|
||||
array[0] = self.0;
|
||||
array[1] = self.1;
|
||||
array
|
||||
}
|
||||
}
|
||||
|
||||
impl ToBeBytes for I8Pair {
|
||||
type ByteArray = [u8; 2];
|
||||
|
||||
fn written_len(&self) -> usize {
|
||||
size_of::<Self::ByteArray>()
|
||||
}
|
||||
|
||||
fn to_be_bytes(&self) -> Self::ByteArray {
|
||||
let mut array = [0; 2];
|
||||
array[0] = self.0 as u8;
|
||||
array[1] = self.1 as u8;
|
||||
array
|
||||
}
|
||||
}
|
||||
|
||||
impl ToBeBytes for U8Triplet {
|
||||
type ByteArray = [u8; 3];
|
||||
|
||||
fn written_len(&self) -> usize {
|
||||
size_of::<Self::ByteArray>()
|
||||
}
|
||||
|
||||
fn to_be_bytes(&self) -> Self::ByteArray {
|
||||
let mut array = [0; 3];
|
||||
array[0] = self.0;
|
||||
array[1] = self.1;
|
||||
array[2] = self.2;
|
||||
array
|
||||
}
|
||||
}
|
||||
|
||||
impl ToBeBytes for I8Triplet {
|
||||
type ByteArray = [u8; 3];
|
||||
|
||||
fn written_len(&self) -> usize {
|
||||
size_of::<Self::ByteArray>()
|
||||
}
|
||||
|
||||
fn to_be_bytes(&self) -> Self::ByteArray {
|
||||
let mut array = [0; 3];
|
||||
array[0] = self.0 as u8;
|
||||
array[1] = self.1 as u8;
|
||||
array[2] = self.2 as u8;
|
||||
array
|
||||
}
|
||||
}
|
||||
|
||||
pair_byte_conversions_impl!(u16, u32, u64, i16, i32, i64, f32, f64,);
|
||||
triplet_to_be_bytes_impl!(u16, u32, u64, i16, i32, i64, f32, f64,);
|
||||
|
||||
/// Generic enumeration for additonal parameters only consisting of primitive data types.
|
||||
///
|
||||
/// All contained variants and the enum itself implement the [WritableToBeBytes] trait, which
|
||||
/// allows to easily convert them into a network-friendly format.
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub enum ParamsRaw {
|
||||
U8(U8),
|
||||
U8Pair(U8Pair),
|
||||
U8Triplet(U8Triplet),
|
||||
I8(I8),
|
||||
I8Pair(I8Pair),
|
||||
I8Triplet(I8Triplet),
|
||||
U16(U16),
|
||||
U16Pair(U16Pair),
|
||||
U16Triplet(U16Triplet),
|
||||
I16(I16),
|
||||
I16Pair(I16Pair),
|
||||
I16Triplet(I16Triplet),
|
||||
U32(U32),
|
||||
U32Pair(U32Pair),
|
||||
U32Triplet(U32Triplet),
|
||||
I32(I32),
|
||||
I32Pair(I32Pair),
|
||||
I32Triplet(I32Triplet),
|
||||
F32(F32),
|
||||
F32Pair(F32Pair),
|
||||
F32Triplet(F32Triplet),
|
||||
U64(U64),
|
||||
I64(I64),
|
||||
F64(F64),
|
||||
}
|
||||
|
||||
impl WritableToBeBytes for ParamsRaw {
|
||||
fn raw_len(&self) -> usize {
|
||||
match self {
|
||||
ParamsRaw::U8(v) => v.raw_len(),
|
||||
ParamsRaw::U8Pair(v) => v.raw_len(),
|
||||
ParamsRaw::U8Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::I8(v) => v.raw_len(),
|
||||
ParamsRaw::I8Pair(v) => v.raw_len(),
|
||||
ParamsRaw::I8Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::U16(v) => v.raw_len(),
|
||||
ParamsRaw::U16Pair(v) => v.raw_len(),
|
||||
ParamsRaw::U16Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::I16(v) => v.raw_len(),
|
||||
ParamsRaw::I16Pair(v) => v.raw_len(),
|
||||
ParamsRaw::I16Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::U32(v) => v.raw_len(),
|
||||
ParamsRaw::U32Pair(v) => v.raw_len(),
|
||||
ParamsRaw::U32Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::I32(v) => v.raw_len(),
|
||||
ParamsRaw::I32Pair(v) => v.raw_len(),
|
||||
ParamsRaw::I32Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::F32(v) => v.raw_len(),
|
||||
ParamsRaw::F32Pair(v) => v.raw_len(),
|
||||
ParamsRaw::F32Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::U64(v) => v.raw_len(),
|
||||
ParamsRaw::I64(v) => v.raw_len(),
|
||||
ParamsRaw::F64(v) => v.raw_len(),
|
||||
}
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
match self {
|
||||
ParamsRaw::U8(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::U8Pair(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::U8Triplet(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I8(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I8Pair(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I8Triplet(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::U16(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::U16Pair(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::U16Triplet(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I16(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I16Pair(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I16Triplet(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::U32(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::U32Pair(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::U32Triplet(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I32(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I32Pair(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I32Triplet(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::F32(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::F32Pair(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::F32Triplet(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::U64(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::I64(v) => v.write_to_be_bytes(buf),
|
||||
ParamsRaw::F64(v) => v.write_to_be_bytes(buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! params_raw_from_newtype {
|
||||
($($newtype: ident,)+) => {
|
||||
$(
|
||||
impl From<$newtype> for ParamsRaw {
|
||||
fn from(v: $newtype) -> Self {
|
||||
Self::$newtype(v)
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
|
||||
params_raw_from_newtype!(
|
||||
U8, U8Pair, U8Triplet, U16, U16Pair, U16Triplet, U32, U32Pair, U32Triplet, I8, I8Pair,
|
||||
I8Triplet, I16, I16Pair, I16Triplet, I32, I32Pair, I32Triplet, F32, F32Pair, F32Triplet, U64,
|
||||
I64, F64,
|
||||
);
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum EcssEnumParams {
|
||||
U8(EcssEnumU8),
|
||||
U16(EcssEnumU16),
|
||||
U32(EcssEnumU32),
|
||||
U64(EcssEnumU64),
|
||||
}
|
||||
|
||||
macro_rules! writable_as_be_bytes_ecss_enum_impl {
|
||||
($EnumIdent: ident) => {
|
||||
impl WritableToBeBytes for $EnumIdent {
|
||||
fn raw_len(&self) -> usize {
|
||||
self.size()
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
<Self as UnsignedEnum>::write_to_be_bytes(self, buf).map(|_| self.raw_len())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64);
|
||||
|
||||
impl WritableToBeBytes for EcssEnumParams {
|
||||
fn raw_len(&self) -> usize {
|
||||
match self {
|
||||
EcssEnumParams::U8(e) => e.raw_len(),
|
||||
EcssEnumParams::U16(e) => e.raw_len(),
|
||||
EcssEnumParams::U32(e) => e.raw_len(),
|
||||
EcssEnumParams::U64(e) => e.raw_len(),
|
||||
}
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
match self {
|
||||
EcssEnumParams::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
EcssEnumParams::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
EcssEnumParams::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
EcssEnumParams::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generic enumeration for parameters which do not rely on heap allocations.
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub enum ParamsHeapless {
|
||||
Raw(ParamsRaw),
|
||||
EcssEnum(EcssEnumParams),
|
||||
}
|
||||
|
||||
macro_rules! from_conversions_for_raw {
|
||||
($(($raw_ty: ty, $TargetPath: path),)+) => {
|
||||
$(
|
||||
impl From<$raw_ty> for ParamsRaw {
|
||||
fn from(val: $raw_ty) -> Self {
|
||||
$TargetPath(val.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<$raw_ty> for ParamsHeapless {
|
||||
fn from(val: $raw_ty) -> Self {
|
||||
ParamsHeapless::Raw(val.into())
|
||||
}
|
||||
}
|
||||
)+
|
||||
};
|
||||
}
|
||||
|
||||
from_conversions_for_raw!(
|
||||
(u8, Self::U8),
|
||||
((u8, u8), Self::U8Pair),
|
||||
((u8, u8, u8), Self::U8Triplet),
|
||||
(i8, Self::I8),
|
||||
((i8, i8), Self::I8Pair),
|
||||
((i8, i8, i8), Self::I8Triplet),
|
||||
(u16, Self::U16),
|
||||
((u16, u16), Self::U16Pair),
|
||||
((u16, u16, u16), Self::U16Triplet),
|
||||
(i16, Self::I16),
|
||||
((i16, i16), Self::I16Pair),
|
||||
((i16, i16, i16), Self::I16Triplet),
|
||||
(u32, Self::U32),
|
||||
((u32, u32), Self::U32Pair),
|
||||
((u32, u32, u32), Self::U32Triplet),
|
||||
(i32, Self::I32),
|
||||
((i32, i32), Self::I32Pair),
|
||||
((i32, i32, i32), Self::I32Triplet),
|
||||
(f32, Self::F32),
|
||||
((f32, f32), Self::F32Pair),
|
||||
((f32, f32, f32), Self::F32Triplet),
|
||||
(u64, Self::U64),
|
||||
(f64, Self::F64),
|
||||
);
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
mod alloc_mod {
|
||||
use super::*;
|
||||
/// Generic enumeration for additional parameters, including parameters which rely on heap
|
||||
/// allocations.
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Params {
|
||||
Heapless(ParamsHeapless),
|
||||
Store(StoreAddr),
|
||||
Vec(Vec<u8>),
|
||||
String(String),
|
||||
}
|
||||
|
||||
impl From<StoreAddr> for Params {
|
||||
fn from(x: StoreAddr) -> Self {
|
||||
Self::Store(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParamsHeapless> for Params {
|
||||
fn from(x: ParamsHeapless) -> Self {
|
||||
Self::Heapless(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for Params {
|
||||
fn from(val: Vec<u8>) -> Self {
|
||||
Self::Vec(val)
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a byte slice into the [Params::Vec] variant
|
||||
impl From<&[u8]> for Params {
|
||||
fn from(val: &[u8]) -> Self {
|
||||
Self::Vec(val.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Params {
|
||||
fn from(val: String) -> Self {
|
||||
Self::String(val)
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a string slice into the [Params::String] variant
|
||||
impl From<&str> for Params {
|
||||
fn from(val: &str) -> Self {
|
||||
Self::String(val.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_basic_u32_pair() {
|
||||
let u32_pair = U32Pair(4, 8);
|
||||
assert_eq!(u32_pair.0, 4);
|
||||
assert_eq!(u32_pair.1, 8);
|
||||
let raw = u32_pair.to_be_bytes();
|
||||
let mut u32_conv_back = u32::from_be_bytes(raw[0..4].try_into().unwrap());
|
||||
assert_eq!(u32_conv_back, 4);
|
||||
u32_conv_back = u32::from_be_bytes(raw[4..8].try_into().unwrap());
|
||||
assert_eq!(u32_conv_back, 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_signed_test_pair() {
|
||||
let i8_pair = I8Pair(-3, -16);
|
||||
assert_eq!(i8_pair.0, -3);
|
||||
assert_eq!(i8_pair.1, -16);
|
||||
let raw = i8_pair.to_be_bytes();
|
||||
let mut i8_conv_back = i8::from_be_bytes(raw[0..1].try_into().unwrap());
|
||||
assert_eq!(i8_conv_back, -3);
|
||||
i8_conv_back = i8::from_be_bytes(raw[1..2].try_into().unwrap());
|
||||
assert_eq!(i8_conv_back, -16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_signed_test_triplet() {
|
||||
let i8_triplet = I8Triplet(-3, -16, -126);
|
||||
assert_eq!(i8_triplet.0, -3);
|
||||
assert_eq!(i8_triplet.1, -16);
|
||||
assert_eq!(i8_triplet.2, -126);
|
||||
let raw = i8_triplet.to_be_bytes();
|
||||
let mut i8_conv_back = i8::from_be_bytes(raw[0..1].try_into().unwrap());
|
||||
assert_eq!(i8_conv_back, -3);
|
||||
i8_conv_back = i8::from_be_bytes(raw[1..2].try_into().unwrap());
|
||||
assert_eq!(i8_conv_back, -16);
|
||||
i8_conv_back = i8::from_be_bytes(raw[2..3].try_into().unwrap());
|
||||
assert_eq!(i8_conv_back, -126);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conversion_test_string() {
|
||||
let param: Params = "Test String".into();
|
||||
if let Params::String(str) = param {
|
||||
assert_eq!(str, String::from("Test String"));
|
||||
} else {
|
||||
panic!("Params type is not String")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conversion_from_slice() {
|
||||
let test_slice: [u8; 5] = [0; 5];
|
||||
let vec_param: Params = test_slice.as_slice().into();
|
||||
if let Params::Vec(vec) = vec_param {
|
||||
assert_eq!(vec, test_slice.to_vec());
|
||||
} else {
|
||||
panic!("Params type is not a vector")
|
||||
}
|
||||
}
|
||||
}
|
1043
satrs/src/pool.rs
Normal file
1043
satrs/src/pool.rs
Normal file
File diff suppressed because it is too large
Load Diff
102
satrs/src/power.rs
Normal file
102
satrs/src/power.rs
Normal file
@ -0,0 +1,102 @@
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Generic trait for a device capable of switching itself on or off.
|
||||
pub trait PowerSwitch {
|
||||
type Error;
|
||||
|
||||
fn switch_on(&mut self) -> Result<(), Self::Error>;
|
||||
fn switch_off(&mut self) -> Result<(), Self::Error>;
|
||||
|
||||
fn is_switch_on(&self) -> bool {
|
||||
self.switch_state() == SwitchState::On
|
||||
}
|
||||
|
||||
fn switch_state(&self) -> SwitchState;
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum SwitchState {
|
||||
Off = 0,
|
||||
On = 1,
|
||||
Unknown = 2,
|
||||
Faulty = 3,
|
||||
}
|
||||
|
||||
pub type SwitchId = u16;
|
||||
|
||||
/// Generic trait for a device capable of turning on and off switches.
|
||||
pub trait PowerSwitcherCommandSender {
|
||||
type Error;
|
||||
|
||||
fn send_switch_on_cmd(&mut self, switch_id: SwitchId) -> Result<(), Self::Error>;
|
||||
fn send_switch_off_cmd(&mut self, switch_id: SwitchId) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
pub trait PowerSwitchInfo {
|
||||
type Error;
|
||||
|
||||
/// Retrieve the switch state
|
||||
fn get_switch_state(&mut self, switch_id: SwitchId) -> Result<SwitchState, Self::Error>;
|
||||
|
||||
fn get_is_switch_on(&mut self, switch_id: SwitchId) -> Result<bool, Self::Error> {
|
||||
Ok(self.get_switch_state(switch_id)? == SwitchState::On)
|
||||
}
|
||||
|
||||
/// The maximum delay it will take to change a switch.
|
||||
///
|
||||
/// This may take into account the time to send a command, wait for it to be executed, and
|
||||
/// see the switch changed.
|
||||
fn switch_delay_ms(&self) -> u32;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(dead_code)]
|
||||
use super::*;
|
||||
use std::boxed::Box;
|
||||
|
||||
struct Pcdu {
|
||||
switch_rx: std::sync::mpsc::Receiver<(SwitchId, u16)>,
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq)]
|
||||
enum DeviceState {
|
||||
OFF,
|
||||
SwitchingPower,
|
||||
ON,
|
||||
SETUP,
|
||||
IDLE,
|
||||
}
|
||||
struct MyComplexDevice {
|
||||
power_switcher: Box<dyn PowerSwitcherCommandSender<Error = ()>>,
|
||||
power_info: Box<dyn PowerSwitchInfo<Error = ()>>,
|
||||
switch_id: SwitchId,
|
||||
some_state: u16,
|
||||
dev_state: DeviceState,
|
||||
mode: u32,
|
||||
submode: u16,
|
||||
}
|
||||
|
||||
impl MyComplexDevice {
|
||||
pub fn periodic_op(&mut self) {
|
||||
// .. mode command coming in
|
||||
let mode = 1;
|
||||
if mode == 1 {
|
||||
if self.dev_state == DeviceState::OFF {
|
||||
self.power_switcher
|
||||
.send_switch_on_cmd(self.switch_id)
|
||||
.expect("sending siwthc cmd failed");
|
||||
self.dev_state = DeviceState::SwitchingPower;
|
||||
}
|
||||
if self.dev_state == DeviceState::SwitchingPower {
|
||||
if self.power_info.get_is_switch_on(0).unwrap() {
|
||||
self.dev_state = DeviceState::ON;
|
||||
self.mode = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
449
satrs/src/pus/event.rs
Normal file
449
satrs/src/pus/event.rs
Normal file
@ -0,0 +1,449 @@
|
||||
use crate::pus::{source_buffer_large_enough, EcssTmtcError};
|
||||
use spacepackets::ecss::tm::PusTmCreator;
|
||||
use spacepackets::ecss::tm::PusTmSecondaryHeader;
|
||||
use spacepackets::ecss::{EcssEnumeration, PusError};
|
||||
use spacepackets::{SpHeader, MAX_APID};
|
||||
|
||||
use crate::pus::EcssTmSenderCore;
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::EventReporter;
|
||||
pub use spacepackets::ecss::event::*;
|
||||
|
||||
pub struct EventReporterBase {
|
||||
msg_count: u16,
|
||||
apid: u16,
|
||||
pub dest_id: u16,
|
||||
}
|
||||
|
||||
impl EventReporterBase {
|
||||
pub fn new(apid: u16) -> Option<Self> {
|
||||
if apid > MAX_APID {
|
||||
return None;
|
||||
}
|
||||
Some(Self {
|
||||
msg_count: 0,
|
||||
dest_id: 0,
|
||||
apid,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn event_info(
|
||||
&mut self,
|
||||
buf: &mut [u8],
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
self.generate_and_send_generic_tm(
|
||||
buf,
|
||||
Subservice::TmInfoReport,
|
||||
sender,
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn event_low_severity(
|
||||
&mut self,
|
||||
buf: &mut [u8],
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
self.generate_and_send_generic_tm(
|
||||
buf,
|
||||
Subservice::TmLowSeverityReport,
|
||||
sender,
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn event_medium_severity(
|
||||
&mut self,
|
||||
buf: &mut [u8],
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
self.generate_and_send_generic_tm(
|
||||
buf,
|
||||
Subservice::TmMediumSeverityReport,
|
||||
sender,
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn event_high_severity(
|
||||
&mut self,
|
||||
buf: &mut [u8],
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
self.generate_and_send_generic_tm(
|
||||
buf,
|
||||
Subservice::TmHighSeverityReport,
|
||||
sender,
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
fn generate_and_send_generic_tm(
|
||||
&mut self,
|
||||
buf: &mut [u8],
|
||||
subservice: Subservice,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
let tm = self.generate_generic_event_tm(buf, subservice, time_stamp, event_id, aux_data)?;
|
||||
sender.send_tm(tm.into())?;
|
||||
self.msg_count += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn generate_generic_event_tm<'a>(
|
||||
&'a self,
|
||||
buf: &'a mut [u8],
|
||||
subservice: Subservice,
|
||||
time_stamp: &'a [u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<PusTmCreator, EcssTmtcError> {
|
||||
let mut src_data_len = event_id.size();
|
||||
if let Some(aux_data) = aux_data {
|
||||
src_data_len += aux_data.len();
|
||||
}
|
||||
source_buffer_large_enough(buf.len(), src_data_len)?;
|
||||
let mut sp_header = SpHeader::tm_unseg(self.apid, 0, 0).unwrap();
|
||||
let sec_header = PusTmSecondaryHeader::new(
|
||||
5,
|
||||
subservice.into(),
|
||||
self.msg_count,
|
||||
self.dest_id,
|
||||
Some(time_stamp),
|
||||
);
|
||||
let mut current_idx = 0;
|
||||
event_id
|
||||
.write_to_be_bytes(&mut buf[0..event_id.size()])
|
||||
.map_err(PusError::ByteConversion)?;
|
||||
current_idx += event_id.size();
|
||||
if let Some(aux_data) = aux_data {
|
||||
buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data);
|
||||
current_idx += aux_data.len();
|
||||
}
|
||||
Ok(PusTmCreator::new(
|
||||
&mut sp_header,
|
||||
sec_header,
|
||||
&buf[0..current_idx],
|
||||
true,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
mod alloc_mod {
|
||||
use super::*;
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
pub struct EventReporter {
|
||||
source_data_buf: Vec<u8>,
|
||||
pub reporter: EventReporterBase,
|
||||
}
|
||||
|
||||
impl EventReporter {
|
||||
pub fn new(apid: u16, max_event_id_and_aux_data_size: usize) -> Option<Self> {
|
||||
let reporter = EventReporterBase::new(apid)?;
|
||||
Some(Self {
|
||||
source_data_buf: vec![0; max_event_id_and_aux_data_size],
|
||||
reporter,
|
||||
})
|
||||
}
|
||||
pub fn event_info(
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
self.reporter.event_info(
|
||||
self.source_data_buf.as_mut_slice(),
|
||||
sender,
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn event_low_severity(
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
self.reporter.event_low_severity(
|
||||
self.source_data_buf.as_mut_slice(),
|
||||
sender,
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn event_medium_severity(
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
self.reporter.event_medium_severity(
|
||||
self.source_data_buf.as_mut_slice(),
|
||||
sender,
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn event_high_severity(
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
self.reporter.event_high_severity(
|
||||
self.source_data_buf.as_mut_slice(),
|
||||
sender,
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::events::{EventU32, Severity};
|
||||
use crate::pus::tests::CommonTmInfo;
|
||||
use crate::pus::{EcssChannel, PusTmWrapper};
|
||||
use crate::ChannelId;
|
||||
use spacepackets::ByteConversionError;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
use std::vec::Vec;
|
||||
|
||||
const EXAMPLE_APID: u16 = 0xee;
|
||||
const EXAMPLE_GROUP_ID: u16 = 2;
|
||||
const EXAMPLE_EVENT_ID_0: u16 = 1;
|
||||
#[allow(dead_code)]
|
||||
const EXAMPLE_EVENT_ID_1: u16 = 2;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
struct TmInfo {
|
||||
pub common: CommonTmInfo,
|
||||
pub event: EventU32,
|
||||
pub aux_data: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
struct TestSender {
|
||||
pub service_queue: RefCell<VecDeque<TmInfo>>,
|
||||
}
|
||||
|
||||
impl EcssChannel for TestSender {
|
||||
fn id(&self) -> ChannelId {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
impl EcssTmSenderCore for TestSender {
|
||||
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
|
||||
match tm {
|
||||
PusTmWrapper::InStore(_) => {
|
||||
panic!("TestSender: unexpected call with address");
|
||||
}
|
||||
PusTmWrapper::Direct(tm) => {
|
||||
assert!(!tm.source_data().is_empty());
|
||||
let src_data = tm.source_data();
|
||||
assert!(src_data.len() >= 4);
|
||||
let event =
|
||||
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
|
||||
let mut aux_data = Vec::new();
|
||||
if src_data.len() > 4 {
|
||||
aux_data.extend_from_slice(&src_data[4..]);
|
||||
}
|
||||
self.service_queue.borrow_mut().push_back(TmInfo {
|
||||
common: CommonTmInfo::new_from_tm(&tm),
|
||||
event,
|
||||
aux_data,
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn severity_to_subservice(severity: Severity) -> Subservice {
|
||||
match severity {
|
||||
Severity::INFO => Subservice::TmInfoReport,
|
||||
Severity::LOW => Subservice::TmLowSeverityReport,
|
||||
Severity::MEDIUM => Subservice::TmMediumSeverityReport,
|
||||
Severity::HIGH => Subservice::TmHighSeverityReport,
|
||||
}
|
||||
}
|
||||
|
||||
fn report_basic_event(
|
||||
reporter: &mut EventReporter,
|
||||
sender: &mut TestSender,
|
||||
time_stamp: &[u8],
|
||||
event: EventU32,
|
||||
severity: Severity,
|
||||
aux_data: Option<&[u8]>,
|
||||
) {
|
||||
match severity {
|
||||
Severity::INFO => {
|
||||
reporter
|
||||
.event_info(sender, time_stamp, event, aux_data)
|
||||
.expect("Error reporting info event");
|
||||
}
|
||||
Severity::LOW => {
|
||||
reporter
|
||||
.event_low_severity(sender, time_stamp, event, aux_data)
|
||||
.expect("Error reporting low event");
|
||||
}
|
||||
Severity::MEDIUM => {
|
||||
reporter
|
||||
.event_medium_severity(sender, time_stamp, event, aux_data)
|
||||
.expect("Error reporting medium event");
|
||||
}
|
||||
Severity::HIGH => {
|
||||
reporter
|
||||
.event_high_severity(sender, time_stamp, event, aux_data)
|
||||
.expect("Error reporting high event");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn basic_event_test(
|
||||
max_event_aux_data_buf: usize,
|
||||
severity: Severity,
|
||||
error_data: Option<&[u8]>,
|
||||
) {
|
||||
let mut sender = TestSender::default();
|
||||
let reporter = EventReporter::new(EXAMPLE_APID, max_event_aux_data_buf);
|
||||
assert!(reporter.is_some());
|
||||
let mut reporter = reporter.unwrap();
|
||||
let time_stamp_empty: [u8; 7] = [0; 7];
|
||||
let mut error_copy = Vec::new();
|
||||
if let Some(err_data) = error_data {
|
||||
error_copy.extend_from_slice(err_data);
|
||||
}
|
||||
let event = EventU32::new(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
|
||||
.expect("Error creating example event");
|
||||
report_basic_event(
|
||||
&mut reporter,
|
||||
&mut sender,
|
||||
&time_stamp_empty,
|
||||
event,
|
||||
severity,
|
||||
error_data,
|
||||
);
|
||||
let mut service_queue = sender.service_queue.borrow_mut();
|
||||
assert_eq!(service_queue.len(), 1);
|
||||
let tm_info = service_queue.pop_front().unwrap();
|
||||
assert_eq!(
|
||||
tm_info.common.subservice,
|
||||
severity_to_subservice(severity) as u8
|
||||
);
|
||||
assert_eq!(tm_info.common.dest_id, 0);
|
||||
assert_eq!(tm_info.common.time_stamp, time_stamp_empty);
|
||||
assert_eq!(tm_info.common.msg_counter, 0);
|
||||
assert_eq!(tm_info.common.apid, EXAMPLE_APID);
|
||||
assert_eq!(tm_info.event, event);
|
||||
assert_eq!(tm_info.aux_data, error_copy);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_info_event_generation() {
|
||||
basic_event_test(4, Severity::INFO, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_low_severity_event() {
|
||||
basic_event_test(4, Severity::LOW, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_medium_severity_event() {
|
||||
basic_event_test(4, Severity::MEDIUM, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_high_severity_event() {
|
||||
basic_event_test(4, Severity::HIGH, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_with_info_string() {
|
||||
let info_string = "Test Information";
|
||||
basic_event_test(32, Severity::INFO, Some(info_string.as_bytes()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn low_severity_with_raw_err_data() {
|
||||
let raw_err_param: i32 = -1;
|
||||
let raw_err = raw_err_param.to_be_bytes();
|
||||
basic_event_test(8, Severity::LOW, Some(&raw_err))
|
||||
}
|
||||
|
||||
fn check_buf_too_small(
|
||||
reporter: &mut EventReporter,
|
||||
sender: &mut TestSender,
|
||||
expected_found_len: usize,
|
||||
) {
|
||||
let time_stamp_empty: [u8; 7] = [0; 7];
|
||||
let event = EventU32::new(Severity::INFO, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
|
||||
.expect("Error creating example event");
|
||||
let err = reporter.event_info(sender, &time_stamp_empty, event, None);
|
||||
assert!(err.is_err());
|
||||
let err = err.unwrap_err();
|
||||
if let EcssTmtcError::Pus(PusError::ByteConversion(
|
||||
ByteConversionError::ToSliceTooSmall { found, expected },
|
||||
)) = err
|
||||
{
|
||||
assert_eq!(expected, 4);
|
||||
assert_eq!(found, expected_found_len);
|
||||
} else {
|
||||
panic!("Unexpected error {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insufficient_buffer() {
|
||||
let mut sender = TestSender::default();
|
||||
for i in 0..3 {
|
||||
let reporter = EventReporter::new(EXAMPLE_APID, i);
|
||||
assert!(reporter.is_some());
|
||||
let mut reporter = reporter.unwrap();
|
||||
check_buf_too_small(&mut reporter, &mut sender, i);
|
||||
}
|
||||
}
|
||||
}
|
309
satrs/src/pus/event_man.rs
Normal file
309
satrs/src/pus/event_man.rs
Normal file
@ -0,0 +1,309 @@
|
||||
use crate::events::{EventU32, GenericEvent, Severity};
|
||||
#[cfg(feature = "alloc")]
|
||||
use crate::events::{EventU32TypedSev, HasSeverity};
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::boxed::Box;
|
||||
#[cfg(feature = "alloc")]
|
||||
use core::hash::Hash;
|
||||
#[cfg(feature = "alloc")]
|
||||
use hashbrown::HashSet;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use crate::pus::event::EventReporter;
|
||||
use crate::pus::verification::TcStateToken;
|
||||
#[cfg(feature = "alloc")]
|
||||
use crate::pus::EcssTmSenderCore;
|
||||
use crate::pus::EcssTmtcError;
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub use alloc_mod::*;
|
||||
#[cfg(feature = "heapless")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
|
||||
pub use heapless_mod::*;
|
||||
|
||||
/// This trait allows the PUS event manager implementation to stay generic over various types
|
||||
/// of backend containers.
|
||||
///
|
||||
/// These backend containers keep track on whether a particular event is enabled or disabled for
|
||||
/// reporting and also expose a simple API to enable or disable the event reporting.
|
||||
///
|
||||
/// For example, a straight forward implementation for host systems could use a
|
||||
/// [hash set](https://docs.rs/hashbrown/latest/hashbrown/struct.HashSet.html)
|
||||
/// structure to track disabled events. A more primitive and embedded friendly
|
||||
/// solution could track this information in a static or pre-allocated list which contains
|
||||
/// the disabled events.
|
||||
pub trait PusEventMgmtBackendProvider<Provider: GenericEvent> {
|
||||
type Error;
|
||||
|
||||
fn event_enabled(&self, event: &Provider) -> bool;
|
||||
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
|
||||
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
|
||||
}
|
||||
|
||||
#[cfg(feature = "heapless")]
|
||||
pub mod heapless_mod {
|
||||
use super::*;
|
||||
use crate::events::{GenericEvent, LargestEventRaw};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
|
||||
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
|
||||
// regular Event type again.
|
||||
#[derive(Default)]
|
||||
pub struct HeaplessPusMgmtBackendProvider<const N: usize, Provider: GenericEvent> {
|
||||
disabled: heapless::FnvIndexSet<LargestEventRaw, N>,
|
||||
phantom: PhantomData<Provider>,
|
||||
}
|
||||
|
||||
/// Safety: All contained field are [Send] as well
|
||||
unsafe impl<const N: usize, Event: GenericEvent + Send> Send
|
||||
for HeaplessPusMgmtBackendProvider<N, Event>
|
||||
{
|
||||
}
|
||||
|
||||
impl<const N: usize, Provider: GenericEvent> PusEventMgmtBackendProvider<Provider>
|
||||
for HeaplessPusMgmtBackendProvider<N, Provider>
|
||||
{
|
||||
type Error = ();
|
||||
|
||||
fn event_enabled(&self, event: &Provider) -> bool {
|
||||
self.disabled.contains(&event.raw_as_largest_type())
|
||||
}
|
||||
|
||||
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
|
||||
self.disabled
|
||||
.insert(event.raw_as_largest_type())
|
||||
.map_err(|_| ())
|
||||
}
|
||||
|
||||
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
|
||||
Ok(self.disabled.remove(&event.raw_as_largest_type()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum EventRequest<Event: GenericEvent = EventU32> {
|
||||
Enable(Event),
|
||||
Disable(Event),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EventRequestWithToken<Event: GenericEvent = EventU32> {
|
||||
pub request: EventRequest<Event>,
|
||||
pub token: TcStateToken,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum EventManError {
|
||||
EcssTmtcError(EcssTmtcError),
|
||||
SeverityMissmatch(Severity, Severity),
|
||||
}
|
||||
|
||||
impl From<EcssTmtcError> for EventManError {
|
||||
fn from(v: EcssTmtcError) -> Self {
|
||||
Self::EcssTmtcError(v)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod alloc_mod {
|
||||
use super::*;
|
||||
|
||||
/// Default backend provider which uses a hash set as the event reporting status container
|
||||
/// like mentioned in the example of the [PusEventMgmtBackendProvider] documentation.
|
||||
///
|
||||
/// This provider is a good option for host systems or larger embedded systems where
|
||||
/// the expected occasional memory allocation performed by the [HashSet] is not an issue.
|
||||
pub struct DefaultPusMgmtBackendProvider<Event: GenericEvent = EventU32> {
|
||||
disabled: HashSet<Event>,
|
||||
}
|
||||
|
||||
/// Safety: All contained field are [Send] as well
|
||||
unsafe impl<Event: GenericEvent + Send> Send for DefaultPusMgmtBackendProvider<Event> {}
|
||||
|
||||
impl<Event: GenericEvent> Default for DefaultPusMgmtBackendProvider<Event> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
disabled: HashSet::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Provider: GenericEvent + PartialEq + Eq + Hash + Copy + Clone>
|
||||
PusEventMgmtBackendProvider<Provider> for DefaultPusMgmtBackendProvider<Provider>
|
||||
{
|
||||
type Error = ();
|
||||
fn event_enabled(&self, event: &Provider) -> bool {
|
||||
!self.disabled.contains(event)
|
||||
}
|
||||
|
||||
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
|
||||
Ok(self.disabled.remove(event))
|
||||
}
|
||||
|
||||
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
|
||||
Ok(self.disabled.insert(*event))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PusEventDispatcher<BackendError, Provider: GenericEvent> {
|
||||
reporter: EventReporter,
|
||||
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>,
|
||||
}
|
||||
|
||||
/// Safety: All contained fields are send as well.
|
||||
unsafe impl<E: Send, Event: GenericEvent + Send> Send for PusEventDispatcher<E, Event> {}
|
||||
|
||||
impl<BackendError, Provider: GenericEvent> PusEventDispatcher<BackendError, Provider> {
|
||||
pub fn new(
|
||||
reporter: EventReporter,
|
||||
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>,
|
||||
) -> Self {
|
||||
Self { reporter, backend }
|
||||
}
|
||||
}
|
||||
|
||||
impl<BackendError, Event: GenericEvent> PusEventDispatcher<BackendError, Event> {
|
||||
pub fn enable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> {
|
||||
self.backend.enable_event_reporting(event)
|
||||
}
|
||||
|
||||
pub fn disable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> {
|
||||
self.backend.disable_event_reporting(event)
|
||||
}
|
||||
|
||||
pub fn generate_pus_event_tm_generic(
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event: Event,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<bool, EventManError> {
|
||||
if !self.backend.event_enabled(&event) {
|
||||
return Ok(false);
|
||||
}
|
||||
match event.severity() {
|
||||
Severity::INFO => self
|
||||
.reporter
|
||||
.event_info(sender, time_stamp, event, aux_data)
|
||||
.map(|_| true)
|
||||
.map_err(|e| e.into()),
|
||||
Severity::LOW => self
|
||||
.reporter
|
||||
.event_low_severity(sender, time_stamp, event, aux_data)
|
||||
.map(|_| true)
|
||||
.map_err(|e| e.into()),
|
||||
Severity::MEDIUM => self
|
||||
.reporter
|
||||
.event_medium_severity(sender, time_stamp, event, aux_data)
|
||||
.map(|_| true)
|
||||
.map_err(|e| e.into()),
|
||||
Severity::HIGH => self
|
||||
.reporter
|
||||
.event_high_severity(sender, time_stamp, event, aux_data)
|
||||
.map(|_| true)
|
||||
.map_err(|e| e.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<BackendError> PusEventDispatcher<BackendError, EventU32> {
|
||||
pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>(
|
||||
&mut self,
|
||||
event: &EventU32TypedSev<Severity>,
|
||||
) -> Result<bool, BackendError> {
|
||||
self.backend.enable_event_reporting(event.as_ref())
|
||||
}
|
||||
|
||||
pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>(
|
||||
&mut self,
|
||||
event: &EventU32TypedSev<Severity>,
|
||||
) -> Result<bool, BackendError> {
|
||||
self.backend.disable_event_reporting(event.as_ref())
|
||||
}
|
||||
|
||||
pub fn generate_pus_event_tm<Severity: HasSeverity>(
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event: EventU32TypedSev<Severity>,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<bool, EventManError> {
|
||||
self.generate_pus_event_tm_generic(sender, time_stamp, event.into(), aux_data)
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::events::SeverityInfo;
|
||||
use crate::pus::MpscTmAsVecSender;
|
||||
use std::sync::mpsc::{channel, TryRecvError};
|
||||
|
||||
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
|
||||
EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
|
||||
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
|
||||
const EMPTY_STAMP: [u8; 7] = [0; 7];
|
||||
|
||||
fn create_basic_man() -> PusEventDispatcher<(), EventU32> {
|
||||
let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed");
|
||||
let backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
|
||||
PusEventDispatcher::new(reporter, Box::new(backend))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic() {
|
||||
let mut event_man = create_basic_man();
|
||||
let (event_tx, event_rx) = channel();
|
||||
let mut sender = MpscTmAsVecSender::new(0, "test_sender", event_tx);
|
||||
let event_sent = event_man
|
||||
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
|
||||
.expect("Sending info event failed");
|
||||
|
||||
assert!(event_sent);
|
||||
// Will not check packet here, correctness of packet was tested somewhere else
|
||||
event_rx.try_recv().expect("Receiving event TM failed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_disable_event() {
|
||||
let mut event_man = create_basic_man();
|
||||
let (event_tx, event_rx) = channel();
|
||||
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx);
|
||||
let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT);
|
||||
assert!(res.is_ok());
|
||||
assert!(res.unwrap());
|
||||
let mut event_sent = event_man
|
||||
.generate_pus_event_tm_generic(&mut sender, &EMPTY_STAMP, LOW_SEV_EVENT, None)
|
||||
.expect("Sending low severity event failed");
|
||||
assert!(!event_sent);
|
||||
let res = event_rx.try_recv();
|
||||
assert!(res.is_err());
|
||||
assert!(matches!(res.unwrap_err(), TryRecvError::Empty));
|
||||
// Check that only the low severity event was disabled
|
||||
event_sent = event_man
|
||||
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
|
||||
.expect("Sending info event failed");
|
||||
assert!(event_sent);
|
||||
event_rx.try_recv().expect("No info event received");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reenable_event() {
|
||||
let mut event_man = create_basic_man();
|
||||
let (event_tx, event_rx) = channel();
|
||||
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx);
|
||||
let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT);
|
||||
assert!(res.is_ok());
|
||||
assert!(res.unwrap());
|
||||
res = event_man.enable_tm_for_event_with_sev(&INFO_EVENT);
|
||||
assert!(res.is_ok());
|
||||
assert!(res.unwrap());
|
||||
let event_sent = event_man
|
||||
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
|
||||
.expect("Sending info event failed");
|
||||
assert!(event_sent);
|
||||
event_rx.try_recv().expect("No info event received");
|
||||
}
|
||||
}
|
280
satrs/src/pus/event_srv.rs
Normal file
280
satrs/src/pus/event_srv.rs
Normal file
@ -0,0 +1,280 @@
|
||||
use crate::events::EventU32;
|
||||
use crate::pus::event_man::{EventRequest, EventRequestWithToken};
|
||||
use crate::pus::verification::TcStateToken;
|
||||
use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError};
|
||||
use spacepackets::ecss::event::Subservice;
|
||||
use spacepackets::ecss::PusPacket;
|
||||
use std::sync::mpsc::Sender;
|
||||
|
||||
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
|
||||
|
||||
pub struct PusService5EventHandler<TcInMemConverter: EcssTcInMemConverter> {
|
||||
pub service_helper: PusServiceHelper<TcInMemConverter>,
|
||||
event_request_tx: Sender<EventRequestWithToken>,
|
||||
}
|
||||
|
||||
impl<TcInMemConverter: EcssTcInMemConverter> PusService5EventHandler<TcInMemConverter> {
|
||||
pub fn new(
|
||||
service_handler: PusServiceHelper<TcInMemConverter>,
|
||||
event_request_tx: Sender<EventRequestWithToken>,
|
||||
) -> Self {
|
||||
Self {
|
||||
service_helper: service_handler,
|
||||
event_request_tx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
|
||||
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
|
||||
if possible_packet.is_none() {
|
||||
return Ok(PusPacketHandlerResult::Empty);
|
||||
}
|
||||
let ecss_tc_and_token = possible_packet.unwrap();
|
||||
let tc = self
|
||||
.service_helper
|
||||
.tc_in_mem_converter
|
||||
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
|
||||
let subservice = tc.subservice();
|
||||
let srv = Subservice::try_from(subservice);
|
||||
if srv.is_err() {
|
||||
return Ok(PusPacketHandlerResult::CustomSubservice(
|
||||
tc.subservice(),
|
||||
ecss_tc_and_token.token,
|
||||
));
|
||||
}
|
||||
let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| {
|
||||
if tc.user_data().len() < 4 {
|
||||
return Err(PusPacketHandlingError::NotEnoughAppData(
|
||||
"at least 4 bytes event ID expected".into(),
|
||||
));
|
||||
}
|
||||
let user_data = tc.user_data();
|
||||
let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
|
||||
let start_token = self
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.borrow_mut()
|
||||
.start_success(ecss_tc_and_token.token, Some(&stamp))
|
||||
.map_err(|_| PartialPusHandlingError::Verification);
|
||||
let partial_error = start_token.clone().err();
|
||||
let mut token: TcStateToken = ecss_tc_and_token.token.into();
|
||||
if let Ok(start_token) = start_token {
|
||||
token = start_token.into();
|
||||
}
|
||||
let event_req_with_token = if enable {
|
||||
EventRequestWithToken {
|
||||
request: EventRequest::Enable(event_u32),
|
||||
token,
|
||||
}
|
||||
} else {
|
||||
EventRequestWithToken {
|
||||
request: EventRequest::Disable(event_u32),
|
||||
token,
|
||||
}
|
||||
};
|
||||
self.event_request_tx
|
||||
.send(event_req_with_token)
|
||||
.map_err(|_| {
|
||||
PusPacketHandlingError::Other("Forwarding event request failed".into())
|
||||
})?;
|
||||
if let Some(partial_error) = partial_error {
|
||||
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
|
||||
partial_error,
|
||||
));
|
||||
}
|
||||
Ok(PusPacketHandlerResult::RequestHandled)
|
||||
};
|
||||
let mut partial_error = None;
|
||||
let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
|
||||
match srv.unwrap() {
|
||||
Subservice::TmInfoReport
|
||||
| Subservice::TmLowSeverityReport
|
||||
| Subservice::TmMediumSeverityReport
|
||||
| Subservice::TmHighSeverityReport => {
|
||||
return Err(PusPacketHandlingError::InvalidSubservice(tc.subservice()))
|
||||
}
|
||||
Subservice::TcEnableEventGeneration => {
|
||||
handle_enable_disable_request(true, time_stamp)?;
|
||||
}
|
||||
Subservice::TcDisableEventGeneration => {
|
||||
handle_enable_disable_request(false, time_stamp)?;
|
||||
}
|
||||
Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => {
|
||||
return Ok(PusPacketHandlerResult::SubserviceNotImplemented(
|
||||
subservice,
|
||||
ecss_tc_and_token.token,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(PusPacketHandlerResult::RequestHandled)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use delegate::delegate;
|
||||
use spacepackets::ecss::event::Subservice;
|
||||
use spacepackets::util::UnsignedEnum;
|
||||
use spacepackets::{
|
||||
ecss::{
|
||||
tc::{PusTcCreator, PusTcSecondaryHeader},
|
||||
tm::PusTmReader,
|
||||
},
|
||||
SequenceFlags, SpHeader,
|
||||
};
|
||||
use std::sync::mpsc::{self, Sender};
|
||||
|
||||
use crate::pus::event_man::EventRequest;
|
||||
use crate::pus::tests::SimplePusPacketHandler;
|
||||
use crate::pus::verification::RequestId;
|
||||
use crate::{
|
||||
events::EventU32,
|
||||
pus::{
|
||||
event_man::EventRequestWithToken,
|
||||
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness, TEST_APID},
|
||||
verification::{TcStateAccepted, VerificationToken},
|
||||
EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError,
|
||||
},
|
||||
};
|
||||
|
||||
use super::PusService5EventHandler;
|
||||
|
||||
const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25);
|
||||
|
||||
struct Pus5HandlerWithStoreTester {
|
||||
common: PusServiceHandlerWithSharedStoreCommon,
|
||||
handler: PusService5EventHandler<EcssTcInSharedStoreConverter>,
|
||||
}
|
||||
|
||||
impl Pus5HandlerWithStoreTester {
|
||||
pub fn new(event_request_tx: Sender<EventRequestWithToken>) -> Self {
|
||||
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
|
||||
Self {
|
||||
common,
|
||||
handler: PusService5EventHandler::new(srv_handler, event_request_tx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PusTestHarness for Pus5HandlerWithStoreTester {
|
||||
delegate! {
|
||||
to self.common {
|
||||
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
|
||||
fn read_next_tm(&mut self) -> PusTmReader<'_>;
|
||||
fn check_no_tm_available(&self) -> bool;
|
||||
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
impl SimplePusPacketHandler for Pus5HandlerWithStoreTester {
|
||||
delegate! {
|
||||
to self.handler {
|
||||
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn event_test(
|
||||
test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler),
|
||||
subservice: Subservice,
|
||||
expected_event_req: EventRequest,
|
||||
event_req_receiver: mpsc::Receiver<EventRequestWithToken>,
|
||||
) {
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8);
|
||||
let mut app_data = [0; 4];
|
||||
TEST_EVENT_0
|
||||
.write_to_be_bytes(&mut app_data)
|
||||
.expect("writing test event failed");
|
||||
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true);
|
||||
let token = test_harness.send_tc(&ping_tc);
|
||||
let request_id = token.req_id();
|
||||
test_harness.handle_one_tc().unwrap();
|
||||
test_harness.check_next_verification_tm(1, request_id);
|
||||
test_harness.check_next_verification_tm(3, request_id);
|
||||
// Completion TM is not generated for us.
|
||||
assert!(test_harness.check_no_tm_available());
|
||||
let event_request = event_req_receiver
|
||||
.try_recv()
|
||||
.expect("no event request received");
|
||||
assert_eq!(expected_event_req, event_request.request);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_enabling_event_reporting() {
|
||||
let (event_request_tx, event_request_rx) = mpsc::channel();
|
||||
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
|
||||
event_test(
|
||||
&mut test_harness,
|
||||
Subservice::TcEnableEventGeneration,
|
||||
EventRequest::Enable(TEST_EVENT_0),
|
||||
event_request_rx,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_disabling_event_reporting() {
|
||||
let (event_request_tx, event_request_rx) = mpsc::channel();
|
||||
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
|
||||
event_test(
|
||||
&mut test_harness,
|
||||
Subservice::TcDisableEventGeneration,
|
||||
EventRequest::Disable(TEST_EVENT_0),
|
||||
event_request_rx,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_tc_queue() {
|
||||
let (event_request_tx, _) = mpsc::channel();
|
||||
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_ok());
|
||||
let result = result.unwrap();
|
||||
if let PusPacketHandlerResult::Empty = result {
|
||||
} else {
|
||||
panic!("unexpected result type {result:?}")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sending_custom_subservice() {
|
||||
let (event_request_tx, _) = mpsc::channel();
|
||||
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(5, 200);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
|
||||
test_harness.send_tc(&ping_tc);
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_ok());
|
||||
let result = result.unwrap();
|
||||
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result {
|
||||
assert_eq!(subservice, 200);
|
||||
} else {
|
||||
panic!("unexpected result type {result:?}")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sending_invalid_app_data() {
|
||||
let (event_request_tx, _) = mpsc::channel();
|
||||
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header =
|
||||
PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8);
|
||||
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &[0, 1, 2], true);
|
||||
test_harness.send_tc(&ping_tc);
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_err());
|
||||
let result = result.unwrap_err();
|
||||
if let PusPacketHandlingError::NotEnoughAppData(string) = result {
|
||||
assert_eq!(string, "at least 4 bytes event ID expected");
|
||||
} else {
|
||||
panic!("unexpected result type {result:?}")
|
||||
}
|
||||
}
|
||||
}
|
1
satrs/src/pus/hk.rs
Normal file
1
satrs/src/pus/hk.rs
Normal file
@ -0,0 +1 @@
|
||||
pub use spacepackets::ecss::hk::*;
|
1194
satrs/src/pus/mod.rs
Normal file
1194
satrs/src/pus/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
16
satrs/src/pus/mode.rs
Normal file
16
satrs/src/pus/mode.rs
Normal file
@ -0,0 +1,16 @@
|
||||
use num_enum::{IntoPrimitive, TryFromPrimitive};
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[repr(u8)]
|
||||
pub enum Subservice {
|
||||
TcSetMode = 1,
|
||||
TcReadMode = 3,
|
||||
TcAnnounceMode = 4,
|
||||
TcAnnounceModeRecursive = 5,
|
||||
TmModeReply = 6,
|
||||
TmCantReachMode = 7,
|
||||
TmWrongModeReply = 8,
|
||||
}
|
2026
satrs/src/pus/scheduler.rs
Normal file
2026
satrs/src/pus/scheduler.rs
Normal file
File diff suppressed because it is too large
Load Diff
355
satrs/src/pus/scheduler_srv.rs
Normal file
355
satrs/src/pus/scheduler_srv.rs
Normal file
@ -0,0 +1,355 @@
|
||||
use super::scheduler::PusSchedulerProvider;
|
||||
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
|
||||
use crate::pool::PoolProvider;
|
||||
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
|
||||
use alloc::string::ToString;
|
||||
use spacepackets::ecss::{scheduling, PusPacket};
|
||||
use spacepackets::time::cds::TimeProvider;
|
||||
|
||||
/// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service)
|
||||
/// packets. This handler is able to handle the most important PUS requests for a scheduling
|
||||
/// service which provides the [PusSchedulerProvider].
|
||||
///
|
||||
/// Please note that this class does not do the regular periodic handling like releasing any
|
||||
/// telecommands inside the scheduler. The user can retrieve the wrapped scheduler via the
|
||||
/// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release
|
||||
/// telecommands when applicable.
|
||||
pub struct PusService11SchedHandler<
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
PusScheduler: PusSchedulerProvider,
|
||||
> {
|
||||
pub service_helper: PusServiceHelper<TcInMemConverter>,
|
||||
scheduler: PusScheduler,
|
||||
}
|
||||
|
||||
impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerProvider>
|
||||
PusService11SchedHandler<TcInMemConverter, Scheduler>
|
||||
{
|
||||
pub fn new(service_helper: PusServiceHelper<TcInMemConverter>, scheduler: Scheduler) -> Self {
|
||||
Self {
|
||||
service_helper,
|
||||
scheduler,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scheduler_mut(&mut self) -> &mut Scheduler {
|
||||
&mut self.scheduler
|
||||
}
|
||||
|
||||
pub fn scheduler(&self) -> &Scheduler {
|
||||
&self.scheduler
|
||||
}
|
||||
|
||||
pub fn handle_one_tc(
|
||||
&mut self,
|
||||
sched_tc_pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
|
||||
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
|
||||
if possible_packet.is_none() {
|
||||
return Ok(PusPacketHandlerResult::Empty);
|
||||
}
|
||||
let ecss_tc_and_token = possible_packet.unwrap();
|
||||
let tc = self
|
||||
.service_helper
|
||||
.tc_in_mem_converter
|
||||
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
|
||||
let subservice = PusPacket::subservice(&tc);
|
||||
let standard_subservice = scheduling::Subservice::try_from(subservice);
|
||||
if standard_subservice.is_err() {
|
||||
return Ok(PusPacketHandlerResult::CustomSubservice(
|
||||
subservice,
|
||||
ecss_tc_and_token.token,
|
||||
));
|
||||
}
|
||||
let mut partial_error = None;
|
||||
let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
|
||||
match standard_subservice.unwrap() {
|
||||
scheduling::Subservice::TcEnableScheduling => {
|
||||
let start_token = self
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
|
||||
.expect("Error sending start success");
|
||||
|
||||
self.scheduler.enable();
|
||||
if self.scheduler.is_enabled() {
|
||||
self.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.completion_success(start_token, Some(&time_stamp))
|
||||
.expect("Error sending completion success");
|
||||
} else {
|
||||
return Err(PusPacketHandlingError::Other(
|
||||
"failed to enabled scheduler".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
scheduling::Subservice::TcDisableScheduling => {
|
||||
let start_token = self
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
|
||||
.expect("Error sending start success");
|
||||
|
||||
self.scheduler.disable();
|
||||
if !self.scheduler.is_enabled() {
|
||||
self.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.completion_success(start_token, Some(&time_stamp))
|
||||
.expect("Error sending completion success");
|
||||
} else {
|
||||
return Err(PusPacketHandlingError::Other(
|
||||
"failed to disable scheduler".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
scheduling::Subservice::TcResetScheduling => {
|
||||
let start_token = self
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
|
||||
.expect("Error sending start success");
|
||||
|
||||
self.scheduler
|
||||
.reset(sched_tc_pool)
|
||||
.expect("Error resetting TC Pool");
|
||||
|
||||
self.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.completion_success(start_token, Some(&time_stamp))
|
||||
.expect("Error sending completion success");
|
||||
}
|
||||
scheduling::Subservice::TcInsertActivity => {
|
||||
let start_token = self
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
|
||||
.expect("error sending start success");
|
||||
|
||||
// let mut pool = self.sched_tc_pool.write().expect("locking pool failed");
|
||||
self.scheduler
|
||||
.insert_wrapped_tc::<TimeProvider>(&tc, sched_tc_pool)
|
||||
.expect("insertion of activity into pool failed");
|
||||
|
||||
self.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.completion_success(start_token, Some(&time_stamp))
|
||||
.expect("sending completion success failed");
|
||||
}
|
||||
_ => {
|
||||
// Treat unhandled standard subservices as custom subservices for now.
|
||||
return Ok(PusPacketHandlerResult::CustomSubservice(
|
||||
subservice,
|
||||
ecss_tc_and_token.token,
|
||||
));
|
||||
}
|
||||
}
|
||||
if let Some(partial_error) = partial_error {
|
||||
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
|
||||
partial_error,
|
||||
));
|
||||
}
|
||||
Ok(PusPacketHandlerResult::RequestHandled)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::pool::{StaticMemoryPool, StaticPoolConfig};
|
||||
use crate::pus::tests::TEST_APID;
|
||||
use crate::pus::{
|
||||
scheduler::{self, PusSchedulerProvider, TcInfo},
|
||||
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness},
|
||||
verification::{RequestId, TcStateAccepted, VerificationToken},
|
||||
EcssTcInSharedStoreConverter,
|
||||
};
|
||||
use alloc::collections::VecDeque;
|
||||
use delegate::delegate;
|
||||
use spacepackets::ecss::scheduling::Subservice;
|
||||
use spacepackets::ecss::tc::PusTcSecondaryHeader;
|
||||
use spacepackets::ecss::WritablePusPacket;
|
||||
use spacepackets::time::TimeWriter;
|
||||
use spacepackets::SpHeader;
|
||||
use spacepackets::{
|
||||
ecss::{tc::PusTcCreator, tm::PusTmReader},
|
||||
time::cds,
|
||||
};
|
||||
|
||||
use super::PusService11SchedHandler;
|
||||
|
||||
struct Pus11HandlerWithStoreTester {
|
||||
common: PusServiceHandlerWithSharedStoreCommon,
|
||||
handler: PusService11SchedHandler<EcssTcInSharedStoreConverter, TestScheduler>,
|
||||
sched_tc_pool: StaticMemoryPool,
|
||||
}
|
||||
|
||||
impl Pus11HandlerWithStoreTester {
|
||||
pub fn new() -> Self {
|
||||
let test_scheduler = TestScheduler::default();
|
||||
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false);
|
||||
let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone());
|
||||
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
|
||||
Self {
|
||||
common,
|
||||
handler: PusService11SchedHandler::new(srv_handler, test_scheduler),
|
||||
sched_tc_pool,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PusTestHarness for Pus11HandlerWithStoreTester {
|
||||
delegate! {
|
||||
to self.common {
|
||||
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
|
||||
fn read_next_tm(&mut self) -> PusTmReader<'_>;
|
||||
fn check_no_tm_available(&self) -> bool;
|
||||
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct TestScheduler {
|
||||
reset_count: u32,
|
||||
enabled: bool,
|
||||
enabled_count: u32,
|
||||
disabled_count: u32,
|
||||
inserted_tcs: VecDeque<TcInfo>,
|
||||
}
|
||||
|
||||
impl PusSchedulerProvider for TestScheduler {
|
||||
type TimeProvider = cds::TimeProvider;
|
||||
|
||||
fn reset(
|
||||
&mut self,
|
||||
_store: &mut (impl crate::pool::PoolProvider + ?Sized),
|
||||
) -> Result<(), crate::pool::StoreError> {
|
||||
self.reset_count += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_enabled(&self) -> bool {
|
||||
self.enabled
|
||||
}
|
||||
|
||||
fn enable(&mut self) {
|
||||
self.enabled_count += 1;
|
||||
self.enabled = true;
|
||||
}
|
||||
|
||||
fn disable(&mut self) {
|
||||
self.disabled_count += 1;
|
||||
self.enabled = false;
|
||||
}
|
||||
|
||||
fn insert_unwrapped_and_stored_tc(
|
||||
&mut self,
|
||||
_time_stamp: spacepackets::time::UnixTimestamp,
|
||||
info: crate::pus::scheduler::TcInfo,
|
||||
) -> Result<(), crate::pus::scheduler::ScheduleError> {
|
||||
self.inserted_tcs.push_back(info);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn generic_subservice_send(
|
||||
test_harness: &mut Pus11HandlerWithStoreTester,
|
||||
subservice: Subservice,
|
||||
) {
|
||||
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap();
|
||||
let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8);
|
||||
let enable_scheduling = PusTcCreator::new(&mut reply_header, tc_header, &[0; 7], true);
|
||||
let token = test_harness.send_tc(&enable_scheduling);
|
||||
|
||||
let request_id = token.req_id();
|
||||
test_harness
|
||||
.handler
|
||||
.handle_one_tc(&mut test_harness.sched_tc_pool)
|
||||
.unwrap();
|
||||
test_harness.check_next_verification_tm(1, request_id);
|
||||
test_harness.check_next_verification_tm(3, request_id);
|
||||
test_harness.check_next_verification_tm(7, request_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduling_enabling_tc() {
|
||||
let mut test_harness = Pus11HandlerWithStoreTester::new();
|
||||
test_harness.handler.scheduler_mut().disable();
|
||||
assert!(!test_harness.handler.scheduler().is_enabled());
|
||||
generic_subservice_send(&mut test_harness, Subservice::TcEnableScheduling);
|
||||
assert!(test_harness.handler.scheduler().is_enabled());
|
||||
assert_eq!(test_harness.handler.scheduler().enabled_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduling_disabling_tc() {
|
||||
let mut test_harness = Pus11HandlerWithStoreTester::new();
|
||||
test_harness.handler.scheduler_mut().enable();
|
||||
assert!(test_harness.handler.scheduler().is_enabled());
|
||||
generic_subservice_send(&mut test_harness, Subservice::TcDisableScheduling);
|
||||
assert!(!test_harness.handler.scheduler().is_enabled());
|
||||
assert_eq!(test_harness.handler.scheduler().disabled_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_scheduler_tc() {
|
||||
let mut test_harness = Pus11HandlerWithStoreTester::new();
|
||||
generic_subservice_send(&mut test_harness, Subservice::TcResetScheduling);
|
||||
assert_eq!(test_harness.handler.scheduler().reset_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insert_activity_tc() {
|
||||
let mut test_harness = Pus11HandlerWithStoreTester::new();
|
||||
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap();
|
||||
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
|
||||
let ping_tc = PusTcCreator::new(&mut reply_header, sec_header, &[], true);
|
||||
let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc);
|
||||
let stamper = cds::TimeProvider::from_now_with_u16_days().expect("time provider failed");
|
||||
let mut sched_app_data: [u8; 64] = [0; 64];
|
||||
let mut written_len = stamper.write_to_bytes(&mut sched_app_data).unwrap();
|
||||
let ping_raw = ping_tc.to_vec().expect("generating raw tc failed");
|
||||
sched_app_data[written_len..written_len + ping_raw.len()].copy_from_slice(&ping_raw);
|
||||
written_len += ping_raw.len();
|
||||
reply_header = SpHeader::tm_unseg(TEST_APID, 1, 0).unwrap();
|
||||
sec_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcInsertActivity as u8);
|
||||
let enable_scheduling = PusTcCreator::new(
|
||||
&mut reply_header,
|
||||
sec_header,
|
||||
&sched_app_data[..written_len],
|
||||
true,
|
||||
);
|
||||
let token = test_harness.send_tc(&enable_scheduling);
|
||||
|
||||
let request_id = token.req_id();
|
||||
test_harness
|
||||
.handler
|
||||
.handle_one_tc(&mut test_harness.sched_tc_pool)
|
||||
.unwrap();
|
||||
test_harness.check_next_verification_tm(1, request_id);
|
||||
test_harness.check_next_verification_tm(3, request_id);
|
||||
test_harness.check_next_verification_tm(7, request_id);
|
||||
let tc_info = test_harness
|
||||
.handler
|
||||
.scheduler_mut()
|
||||
.inserted_tcs
|
||||
.pop_front()
|
||||
.unwrap();
|
||||
assert_eq!(tc_info.request_id(), req_id_ping_tc);
|
||||
}
|
||||
}
|
272
satrs/src/pus/test.rs
Normal file
272
satrs/src/pus/test.rs
Normal file
@ -0,0 +1,272 @@
|
||||
use crate::pus::{
|
||||
PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmWrapper,
|
||||
};
|
||||
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
|
||||
use spacepackets::ecss::PusPacket;
|
||||
use spacepackets::SpHeader;
|
||||
|
||||
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
|
||||
|
||||
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
|
||||
/// This handler only processes ping requests and generates a ping reply for them accordingly.
|
||||
pub struct PusService17TestHandler<TcInMemConverter: EcssTcInMemConverter> {
|
||||
pub service_helper: PusServiceHelper<TcInMemConverter>,
|
||||
}
|
||||
|
||||
impl<TcInMemConverter: EcssTcInMemConverter> PusService17TestHandler<TcInMemConverter> {
|
||||
pub fn new(service_helper: PusServiceHelper<TcInMemConverter>) -> Self {
|
||||
Self { service_helper }
|
||||
}
|
||||
|
||||
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
|
||||
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
|
||||
if possible_packet.is_none() {
|
||||
return Ok(PusPacketHandlerResult::Empty);
|
||||
}
|
||||
let ecss_tc_and_token = possible_packet.unwrap();
|
||||
let tc = self
|
||||
.service_helper
|
||||
.tc_in_mem_converter
|
||||
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
|
||||
if tc.service() != 17 {
|
||||
return Err(PusPacketHandlingError::WrongService(tc.service()));
|
||||
}
|
||||
if tc.subservice() == 1 {
|
||||
let mut partial_error = None;
|
||||
let time_stamp = PusServiceBase::get_current_timestamp(&mut partial_error);
|
||||
let result = self
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.start_success(ecss_tc_and_token.token, Some(&time_stamp))
|
||||
.map_err(|_| PartialPusHandlingError::Verification);
|
||||
let start_token = if let Ok(result) = result {
|
||||
Some(result)
|
||||
} else {
|
||||
partial_error = Some(result.unwrap_err());
|
||||
None
|
||||
};
|
||||
// Sequence count will be handled centrally in TM funnel.
|
||||
let mut reply_header =
|
||||
SpHeader::tm_unseg(self.service_helper.common.tm_apid, 0, 0).unwrap();
|
||||
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp);
|
||||
let ping_reply = PusTmCreator::new(&mut reply_header, tc_header, &[], true);
|
||||
let result = self
|
||||
.service_helper
|
||||
.common
|
||||
.tm_sender
|
||||
.send_tm(PusTmWrapper::Direct(ping_reply))
|
||||
.map_err(PartialPusHandlingError::TmSend);
|
||||
if let Err(err) = result {
|
||||
partial_error = Some(err);
|
||||
}
|
||||
|
||||
if let Some(start_token) = start_token {
|
||||
if self
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.get_mut()
|
||||
.completion_success(start_token, Some(&time_stamp))
|
||||
.is_err()
|
||||
{
|
||||
partial_error = Some(PartialPusHandlingError::Verification)
|
||||
}
|
||||
}
|
||||
if let Some(partial_error) = partial_error {
|
||||
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
|
||||
partial_error,
|
||||
));
|
||||
};
|
||||
} else {
|
||||
return Ok(PusPacketHandlerResult::CustomSubservice(
|
||||
tc.subservice(),
|
||||
ecss_tc_and_token.token,
|
||||
));
|
||||
}
|
||||
Ok(PusPacketHandlerResult::RequestHandled)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::pus::tests::{
|
||||
PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon, PusTestHarness,
|
||||
SimplePusPacketHandler, TEST_APID,
|
||||
};
|
||||
use crate::pus::verification::RequestId;
|
||||
use crate::pus::verification::{TcStateAccepted, VerificationToken};
|
||||
use crate::pus::{
|
||||
EcssTcInSharedStoreConverter, EcssTcInVecConverter, PusPacketHandlerResult,
|
||||
PusPacketHandlingError,
|
||||
};
|
||||
use delegate::delegate;
|
||||
use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader};
|
||||
use spacepackets::ecss::tm::PusTmReader;
|
||||
use spacepackets::ecss::PusPacket;
|
||||
use spacepackets::{SequenceFlags, SpHeader};
|
||||
|
||||
use super::PusService17TestHandler;
|
||||
|
||||
struct Pus17HandlerWithStoreTester {
|
||||
common: PusServiceHandlerWithSharedStoreCommon,
|
||||
handler: PusService17TestHandler<EcssTcInSharedStoreConverter>,
|
||||
}
|
||||
|
||||
impl Pus17HandlerWithStoreTester {
|
||||
pub fn new() -> Self {
|
||||
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
|
||||
let pus_17_handler = PusService17TestHandler::new(srv_handler);
|
||||
Self {
|
||||
common,
|
||||
handler: pus_17_handler,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PusTestHarness for Pus17HandlerWithStoreTester {
|
||||
delegate! {
|
||||
to self.common {
|
||||
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
|
||||
fn read_next_tm(&mut self) -> PusTmReader<'_>;
|
||||
fn check_no_tm_available(&self) -> bool;
|
||||
fn check_next_verification_tm(
|
||||
&self,
|
||||
subservice: u8,
|
||||
expected_request_id: RequestId
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
impl SimplePusPacketHandler for Pus17HandlerWithStoreTester {
|
||||
delegate! {
|
||||
to self.handler {
|
||||
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Pus17HandlerWithVecTester {
|
||||
common: PusServiceHandlerWithVecCommon,
|
||||
handler: PusService17TestHandler<EcssTcInVecConverter>,
|
||||
}
|
||||
|
||||
impl Pus17HandlerWithVecTester {
|
||||
pub fn new() -> Self {
|
||||
let (common, srv_handler) = PusServiceHandlerWithVecCommon::new();
|
||||
Self {
|
||||
common,
|
||||
handler: PusService17TestHandler::new(srv_handler),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PusTestHarness for Pus17HandlerWithVecTester {
|
||||
delegate! {
|
||||
to self.common {
|
||||
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
|
||||
fn read_next_tm(&mut self) -> PusTmReader<'_>;
|
||||
fn check_no_tm_available(&self) -> bool;
|
||||
fn check_next_verification_tm(
|
||||
&self,
|
||||
subservice: u8,
|
||||
expected_request_id: RequestId,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
impl SimplePusPacketHandler for Pus17HandlerWithVecTester {
|
||||
delegate! {
|
||||
to self.handler {
|
||||
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn ping_test(test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler)) {
|
||||
// Create a ping TC, verify acceptance.
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
|
||||
let token = test_harness.send_tc(&ping_tc);
|
||||
let request_id = token.req_id();
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_ok());
|
||||
// We should see 4 replies in the TM queue now: Acceptance TM, Start TM, ping reply and
|
||||
// Completion TM
|
||||
|
||||
// Acceptance TM
|
||||
test_harness.check_next_verification_tm(1, request_id);
|
||||
|
||||
// Start TM
|
||||
test_harness.check_next_verification_tm(3, request_id);
|
||||
|
||||
// Ping reply
|
||||
let tm = test_harness.read_next_tm();
|
||||
assert_eq!(tm.service(), 17);
|
||||
assert_eq!(tm.subservice(), 2);
|
||||
assert!(tm.user_data().is_empty());
|
||||
|
||||
// TM completion
|
||||
test_harness.check_next_verification_tm(7, request_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_ping_processing_using_store() {
|
||||
let mut test_harness = Pus17HandlerWithStoreTester::new();
|
||||
ping_test(&mut test_harness);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_ping_processing_using_vec() {
|
||||
let mut test_harness = Pus17HandlerWithVecTester::new();
|
||||
ping_test(&mut test_harness);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_tc_queue() {
|
||||
let mut test_harness = Pus17HandlerWithStoreTester::new();
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_ok());
|
||||
let result = result.unwrap();
|
||||
if let PusPacketHandlerResult::Empty = result {
|
||||
} else {
|
||||
panic!("unexpected result type {result:?}")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sending_unsupported_service() {
|
||||
let mut test_harness = Pus17HandlerWithStoreTester::new();
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(3, 1);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
|
||||
test_harness.send_tc(&ping_tc);
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let PusPacketHandlingError::WrongService(num) = error {
|
||||
assert_eq!(num, 3);
|
||||
} else {
|
||||
panic!("unexpected error type {error}")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sending_custom_subservice() {
|
||||
let mut test_harness = Pus17HandlerWithStoreTester::new();
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(17, 200);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
|
||||
test_harness.send_tc(&ping_tc);
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_ok());
|
||||
let result = result.unwrap();
|
||||
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result {
|
||||
assert_eq!(subservice, 200);
|
||||
} else {
|
||||
panic!("unexpected result type {result:?}")
|
||||
}
|
||||
}
|
||||
}
|
2194
satrs/src/pus/verification.rs
Normal file
2194
satrs/src/pus/verification.rs
Normal file
File diff suppressed because it is too large
Load Diff
1
satrs/src/request.rs
Normal file
1
satrs/src/request.rs
Normal file
@ -0,0 +1 @@
|
||||
|
1
satrs/src/res_code.rs
Normal file
1
satrs/src/res_code.rs
Normal file
@ -0,0 +1 @@
|
||||
pub use satrs_shared::res_code::*;
|
250
satrs/src/seq_count.rs
Normal file
250
satrs/src/seq_count.rs
Normal file
@ -0,0 +1,250 @@
|
||||
use core::cell::Cell;
|
||||
#[cfg(feature = "alloc")]
|
||||
use dyn_clone::DynClone;
|
||||
use paste::paste;
|
||||
use spacepackets::MAX_SEQ_COUNT;
|
||||
#[cfg(feature = "std")]
|
||||
pub use stdmod::*;
|
||||
|
||||
/// Core trait for objects which can provide a sequence count.
|
||||
///
|
||||
/// The core functions are not mutable on purpose to allow easier usage with
|
||||
/// static structs when using the interior mutability pattern. This can be achieved by using
|
||||
/// [Cell], [core::cell::RefCell] or atomic types.
|
||||
pub trait SequenceCountProviderCore<Raw> {
|
||||
fn get(&self) -> Raw;
|
||||
|
||||
fn increment(&self);
|
||||
|
||||
fn get_and_increment(&self) -> Raw {
|
||||
let val = self.get();
|
||||
self.increment();
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
/// Extension trait which allows cloning a sequence count provider after it was turned into
|
||||
/// a trait object.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait SequenceCountProvider<Raw>: SequenceCountProviderCore<Raw> + DynClone {}
|
||||
#[cfg(feature = "alloc")]
|
||||
dyn_clone::clone_trait_object!(SequenceCountProvider<u16>);
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<T, Raw> SequenceCountProvider<Raw> for T where T: SequenceCountProviderCore<Raw> + Clone {}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct SeqCountProviderSimple<T: Copy> {
|
||||
seq_count: Cell<T>,
|
||||
max_val: T,
|
||||
}
|
||||
|
||||
macro_rules! impl_for_primitives {
|
||||
($($ty: ident,)+) => {
|
||||
$(
|
||||
paste! {
|
||||
impl SeqCountProviderSimple<$ty> {
|
||||
pub fn [<new_ $ty _max_val>](max_val: $ty) -> Self {
|
||||
Self {
|
||||
seq_count: Cell::new(0),
|
||||
max_val,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn [<new_ $ty>]() -> Self {
|
||||
Self {
|
||||
seq_count: Cell::new(0),
|
||||
max_val: $ty::MAX
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SequenceCountProviderCore<$ty> for SeqCountProviderSimple<$ty> {
|
||||
fn get(&self) -> $ty {
|
||||
self.seq_count.get()
|
||||
}
|
||||
|
||||
fn increment(&self) {
|
||||
self.get_and_increment();
|
||||
}
|
||||
|
||||
fn get_and_increment(&self) -> $ty {
|
||||
let curr_count = self.seq_count.get();
|
||||
|
||||
if curr_count == self.max_val {
|
||||
self.seq_count.set(0);
|
||||
} else {
|
||||
self.seq_count.set(curr_count + 1);
|
||||
}
|
||||
curr_count
|
||||
}
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
|
||||
impl_for_primitives!(u8, u16, u32, u64,);
|
||||
|
||||
/// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT].
|
||||
pub struct CcsdsSimpleSeqCountProvider {
|
||||
provider: SeqCountProviderSimple<u16>,
|
||||
}
|
||||
|
||||
impl CcsdsSimpleSeqCountProvider {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
provider: SeqCountProviderSimple::new_u16_max_val(MAX_SEQ_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CcsdsSimpleSeqCountProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl SequenceCountProviderCore<u16> for CcsdsSimpleSeqCountProvider {
|
||||
delegate::delegate! {
|
||||
to self.provider {
|
||||
fn get(&self) -> u16;
|
||||
fn increment(&self);
|
||||
fn get_and_increment(&self) -> u16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod stdmod {
|
||||
use super::*;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
macro_rules! sync_clonable_seq_counter_impl {
|
||||
($($ty: ident,)+) => {
|
||||
$(paste! {
|
||||
/// These sequence counters can be shared between threads and can also be
|
||||
/// configured to wrap around at specified maximum values. Please note that
|
||||
/// that the API provided by this class will not panic und [Mutex] lock errors,
|
||||
/// but it will yield 0 for the getter functions.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct [<SeqCountProviderSync $ty:upper>] {
|
||||
seq_count: Arc<Mutex<$ty>>,
|
||||
max_val: $ty
|
||||
}
|
||||
|
||||
impl [<SeqCountProviderSync $ty:upper>] {
|
||||
pub fn new() -> Self {
|
||||
Self::new_with_max_val($ty::MAX)
|
||||
}
|
||||
|
||||
pub fn new_with_max_val(max_val: $ty) -> Self {
|
||||
Self {
|
||||
seq_count: Arc::default(),
|
||||
max_val
|
||||
}
|
||||
}
|
||||
}
|
||||
impl SequenceCountProviderCore<$ty> for [<SeqCountProviderSync $ty:upper>] {
|
||||
fn get(&self) -> $ty {
|
||||
match self.seq_count.lock() {
|
||||
Ok(counter) => *counter,
|
||||
Err(_) => 0
|
||||
}
|
||||
}
|
||||
|
||||
fn increment(&self) {
|
||||
self.get_and_increment();
|
||||
}
|
||||
|
||||
fn get_and_increment(&self) -> $ty {
|
||||
match self.seq_count.lock() {
|
||||
Ok(mut counter) => {
|
||||
let val = *counter;
|
||||
if val == self.max_val {
|
||||
*counter = 0;
|
||||
} else {
|
||||
*counter += 1;
|
||||
}
|
||||
val
|
||||
}
|
||||
Err(_) => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
})+
|
||||
}
|
||||
}
|
||||
sync_clonable_seq_counter_impl!(u8, u16, u32, u64,);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::seq_count::{
|
||||
CcsdsSimpleSeqCountProvider, SeqCountProviderSimple, SeqCountProviderSyncU8,
|
||||
SequenceCountProviderCore,
|
||||
};
|
||||
use spacepackets::MAX_SEQ_COUNT;
|
||||
|
||||
#[test]
|
||||
fn test_u8_counter() {
|
||||
let u8_counter = SeqCountProviderSimple::new_u8();
|
||||
assert_eq!(u8_counter.get(), 0);
|
||||
assert_eq!(u8_counter.get_and_increment(), 0);
|
||||
assert_eq!(u8_counter.get_and_increment(), 1);
|
||||
assert_eq!(u8_counter.get(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u8_counter_overflow() {
|
||||
let u8_counter = SeqCountProviderSimple::new_u8();
|
||||
for _ in 0..256 {
|
||||
u8_counter.increment();
|
||||
}
|
||||
assert_eq!(u8_counter.get(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ccsds_counter() {
|
||||
let ccsds_counter = CcsdsSimpleSeqCountProvider::default();
|
||||
assert_eq!(ccsds_counter.get(), 0);
|
||||
assert_eq!(ccsds_counter.get_and_increment(), 0);
|
||||
assert_eq!(ccsds_counter.get_and_increment(), 1);
|
||||
assert_eq!(ccsds_counter.get(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ccsds_counter_overflow() {
|
||||
let ccsds_counter = CcsdsSimpleSeqCountProvider::default();
|
||||
for _ in 0..MAX_SEQ_COUNT + 1 {
|
||||
ccsds_counter.increment();
|
||||
}
|
||||
assert_eq!(ccsds_counter.get(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_ref_counters() {
|
||||
let sync_u8_counter = SeqCountProviderSyncU8::new();
|
||||
assert_eq!(sync_u8_counter.get(), 0);
|
||||
assert_eq!(sync_u8_counter.get_and_increment(), 0);
|
||||
assert_eq!(sync_u8_counter.get_and_increment(), 1);
|
||||
assert_eq!(sync_u8_counter.get(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_ref_counters_overflow() {
|
||||
let sync_u8_counter = SeqCountProviderSyncU8::new();
|
||||
for _ in 0..u8::MAX as u16 + 1 {
|
||||
sync_u8_counter.increment();
|
||||
}
|
||||
assert_eq!(sync_u8_counter.get(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_ref_counters_overflow_custom_max_val() {
|
||||
let sync_u8_counter = SeqCountProviderSyncU8::new_with_max_val(128);
|
||||
for _ in 0..129 {
|
||||
sync_u8_counter.increment();
|
||||
}
|
||||
assert_eq!(sync_u8_counter.get(), 0);
|
||||
}
|
||||
}
|
367
satrs/src/tmtc/ccsds_distrib.rs
Normal file
367
satrs/src/tmtc/ccsds_distrib.rs
Normal file
@ -0,0 +1,367 @@
|
||||
//! CCSDS packet routing components.
|
||||
//!
|
||||
//! The routing components consist of two core components:
|
||||
//! 1. [CcsdsDistributor] component which dispatches received packets to a user-provided handler
|
||||
//! 2. [CcsdsPacketHandler] trait which should be implemented by the user-provided packet handler.
|
||||
//!
|
||||
//! The [CcsdsDistributor] implements the [ReceivesCcsdsTc] and [ReceivesTcCore] trait which allows to
|
||||
//! pass raw or CCSDS packets to it. Upon receiving a packet, it performs the following steps:
|
||||
//!
|
||||
//! 1. It tries to identify the target Application Process Identifier (APID) based on the
|
||||
//! respective CCSDS space packet header field. If that process fails, a [ByteConversionError] is
|
||||
//! returned to the user
|
||||
//! 2. If a valid APID is found and matches one of the APIDs provided by
|
||||
//! [CcsdsPacketHandler::valid_apids], it will pass the packet to the user provided
|
||||
//! [CcsdsPacketHandler::handle_known_apid] function. If no valid APID is found, the packet
|
||||
//! will be passed to the [CcsdsPacketHandler::handle_unknown_apid] function.
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! use satrs::tmtc::ccsds_distrib::{CcsdsPacketHandler, CcsdsDistributor};
|
||||
//! use satrs::tmtc::{ReceivesTc, ReceivesTcCore};
|
||||
//! use spacepackets::{CcsdsPacket, SpHeader};
|
||||
//! use spacepackets::ecss::WritablePusPacket;
|
||||
//! use spacepackets::ecss::tc::{PusTc, PusTcCreator};
|
||||
//!
|
||||
//! #[derive (Default)]
|
||||
//! struct ConcreteApidHandler {
|
||||
//! known_call_count: u32,
|
||||
//! unknown_call_count: u32
|
||||
//! }
|
||||
//!
|
||||
//! impl ConcreteApidHandler {
|
||||
//! fn mutable_foo(&mut self) {}
|
||||
//! }
|
||||
//!
|
||||
//! impl CcsdsPacketHandler for ConcreteApidHandler {
|
||||
//! type Error = ();
|
||||
//! fn valid_apids(&self) -> &'static [u16] { &[0x002] }
|
||||
//! fn handle_known_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
//! assert_eq!(sp_header.apid(), 0x002);
|
||||
//! assert_eq!(tc_raw.len(), 13);
|
||||
//! self.known_call_count += 1;
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! fn handle_unknown_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
//! assert_eq!(sp_header.apid(), 0x003);
|
||||
//! assert_eq!(tc_raw.len(), 13);
|
||||
//! self.unknown_call_count += 1;
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! let apid_handler = ConcreteApidHandler::default();
|
||||
//! let mut ccsds_distributor = CcsdsDistributor::new(Box::new(apid_handler));
|
||||
//!
|
||||
//! // Create and pass PUS telecommand with a valid APID
|
||||
//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
|
||||
//! let mut pus_tc = PusTcCreator::new_simple(&mut space_packet_header, 17, 1, None, true);
|
||||
//! let mut test_buf: [u8; 32] = [0; 32];
|
||||
//! let mut size = pus_tc
|
||||
//! .write_to_bytes(test_buf.as_mut_slice())
|
||||
//! .expect("Error writing TC to buffer");
|
||||
//! let tc_slice = &test_buf[0..size];
|
||||
//! ccsds_distributor.pass_tc(&tc_slice).expect("Passing TC slice failed");
|
||||
//!
|
||||
//! // Now pass a packet with an unknown APID to the distributor
|
||||
//! pus_tc.set_apid(0x003);
|
||||
//! size = pus_tc
|
||||
//! .write_to_bytes(test_buf.as_mut_slice())
|
||||
//! .expect("Error writing TC to buffer");
|
||||
//! let tc_slice = &test_buf[0..size];
|
||||
//! ccsds_distributor.pass_tc(&tc_slice).expect("Passing TC slice failed");
|
||||
//!
|
||||
//! // User helper function to retrieve concrete class
|
||||
//! let concrete_handler_ref: &ConcreteApidHandler = ccsds_distributor
|
||||
//! .apid_handler_ref()
|
||||
//! .expect("Casting back to concrete type failed");
|
||||
//! assert_eq!(concrete_handler_ref.known_call_count, 1);
|
||||
//! assert_eq!(concrete_handler_ref.unknown_call_count, 1);
|
||||
//!
|
||||
//! // It's also possible to retrieve a mutable reference
|
||||
//! let mutable_ref: &mut ConcreteApidHandler = ccsds_distributor
|
||||
//! .apid_handler_mut()
|
||||
//! .expect("Casting back to concrete type failed");
|
||||
//! mutable_ref.mutable_foo();
|
||||
//! ```
|
||||
use crate::tmtc::{ReceivesCcsdsTc, ReceivesTcCore};
|
||||
use alloc::boxed::Box;
|
||||
use core::fmt::{Display, Formatter};
|
||||
use downcast_rs::Downcast;
|
||||
use spacepackets::{ByteConversionError, CcsdsPacket, SpHeader};
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
|
||||
/// Generic trait for a handler or dispatcher object handling CCSDS packets.
|
||||
///
|
||||
/// Users should implement this trait on their custom CCSDS packet handler and then pass a boxed
|
||||
/// instance of this handler to the [CcsdsDistributor]. The distributor will use the trait
|
||||
/// interface to dispatch received packets to the user based on the Application Process Identifier
|
||||
/// (APID) field of the CCSDS packet.
|
||||
///
|
||||
/// This trait automatically implements the [downcast_rs::Downcast] to allow a more convenient API
|
||||
/// to cast trait objects back to their concrete type after the handler was passed to the
|
||||
/// distributor.
|
||||
pub trait CcsdsPacketHandler: Downcast {
|
||||
type Error;
|
||||
|
||||
fn valid_apids(&self) -> &'static [u16];
|
||||
fn handle_known_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8])
|
||||
-> Result<(), Self::Error>;
|
||||
fn handle_unknown_apid(
|
||||
&mut self,
|
||||
sp_header: &SpHeader,
|
||||
tc_raw: &[u8],
|
||||
) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
downcast_rs::impl_downcast!(CcsdsPacketHandler assoc Error);
|
||||
|
||||
pub trait SendableCcsdsPacketHandler: CcsdsPacketHandler + Send {}
|
||||
|
||||
impl<T: CcsdsPacketHandler + Send> SendableCcsdsPacketHandler for T {}
|
||||
|
||||
downcast_rs::impl_downcast!(SendableCcsdsPacketHandler assoc Error);
|
||||
|
||||
/// The CCSDS distributor dispatches received CCSDS packets to a user provided packet handler.
|
||||
///
|
||||
/// The passed APID handler is required to be [Send]able to allow more ergonomic usage with
|
||||
/// threads.
|
||||
pub struct CcsdsDistributor<E> {
|
||||
/// User provided APID handler stored as a generic trait object.
|
||||
/// It can be cast back to the original concrete type using the [Self::apid_handler_ref] or
|
||||
/// the [Self::apid_handler_mut] method.
|
||||
pub apid_handler: Box<dyn SendableCcsdsPacketHandler<Error = E>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum CcsdsError<E> {
|
||||
CustomError(E),
|
||||
ByteConversionError(ByteConversionError),
|
||||
}
|
||||
|
||||
impl<E: Display> Display for CcsdsError<E> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
Self::CustomError(e) => write!(f, "{e}"),
|
||||
Self::ByteConversionError(e) => write!(f, "{e}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl<E: Error> Error for CcsdsError<E> {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
match self {
|
||||
Self::CustomError(e) => e.source(),
|
||||
Self::ByteConversionError(e) => e.source(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static> ReceivesCcsdsTc for CcsdsDistributor<E> {
|
||||
type Error = CcsdsError<E>;
|
||||
|
||||
fn pass_ccsds(&mut self, header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
self.dispatch_ccsds(header, tc_raw)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static> ReceivesTcCore for CcsdsDistributor<E> {
|
||||
type Error = CcsdsError<E>;
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
if tc_raw.len() < 7 {
|
||||
return Err(CcsdsError::ByteConversionError(
|
||||
ByteConversionError::FromSliceTooSmall {
|
||||
found: tc_raw.len(),
|
||||
expected: 7,
|
||||
},
|
||||
));
|
||||
}
|
||||
let (sp_header, _) =
|
||||
SpHeader::from_be_bytes(tc_raw).map_err(|e| CcsdsError::ByteConversionError(e))?;
|
||||
self.dispatch_ccsds(&sp_header, tc_raw)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static> CcsdsDistributor<E> {
|
||||
pub fn new(apid_handler: Box<dyn SendableCcsdsPacketHandler<Error = E>>) -> Self {
|
||||
CcsdsDistributor { apid_handler }
|
||||
}
|
||||
|
||||
/// This function can be used to retrieve a reference to the concrete instance of the APID
|
||||
/// handler after it was passed to the distributor. See the
|
||||
/// [module documentation][crate::tmtc::ccsds_distrib] for an fsrc-example.
|
||||
pub fn apid_handler_ref<T: SendableCcsdsPacketHandler<Error = E>>(&self) -> Option<&T> {
|
||||
self.apid_handler.downcast_ref::<T>()
|
||||
}
|
||||
|
||||
/// This function can be used to retrieve a mutable reference to the concrete instance of the
|
||||
/// APID handler after it was passed to the distributor.
|
||||
pub fn apid_handler_mut<T: SendableCcsdsPacketHandler<Error = E>>(&mut self) -> Option<&mut T> {
|
||||
self.apid_handler.downcast_mut::<T>()
|
||||
}
|
||||
|
||||
fn dispatch_ccsds(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), CcsdsError<E>> {
|
||||
let apid = sp_header.apid();
|
||||
let valid_apids = self.apid_handler.valid_apids();
|
||||
for &valid_apid in valid_apids {
|
||||
if valid_apid == apid {
|
||||
return self
|
||||
.apid_handler
|
||||
.handle_known_apid(sp_header, tc_raw)
|
||||
.map_err(|e| CcsdsError::CustomError(e));
|
||||
}
|
||||
}
|
||||
self.apid_handler
|
||||
.handle_unknown_apid(sp_header, tc_raw)
|
||||
.map_err(|e| CcsdsError::CustomError(e))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
use crate::tmtc::ccsds_distrib::{CcsdsDistributor, CcsdsPacketHandler};
|
||||
use spacepackets::ecss::tc::PusTcCreator;
|
||||
use spacepackets::ecss::WritablePusPacket;
|
||||
use spacepackets::CcsdsPacket;
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::vec::Vec;
|
||||
|
||||
fn is_send<T: Send>(_: &T) {}
|
||||
|
||||
pub fn generate_ping_tc(buf: &mut [u8]) -> &[u8] {
|
||||
let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
|
||||
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let size = pus_tc
|
||||
.write_to_bytes(buf)
|
||||
.expect("Error writing TC to buffer");
|
||||
assert_eq!(size, 13);
|
||||
&buf[0..size]
|
||||
}
|
||||
|
||||
type SharedPacketQueue = Arc<Mutex<VecDeque<(u16, Vec<u8>)>>>;
|
||||
pub struct BasicApidHandlerSharedQueue {
|
||||
pub known_packet_queue: SharedPacketQueue,
|
||||
pub unknown_packet_queue: SharedPacketQueue,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BasicApidHandlerOwnedQueue {
|
||||
pub known_packet_queue: VecDeque<(u16, Vec<u8>)>,
|
||||
pub unknown_packet_queue: VecDeque<(u16, Vec<u8>)>,
|
||||
}
|
||||
|
||||
impl CcsdsPacketHandler for BasicApidHandlerSharedQueue {
|
||||
type Error = ();
|
||||
fn valid_apids(&self) -> &'static [u16] {
|
||||
&[0x000, 0x002]
|
||||
}
|
||||
|
||||
fn handle_known_apid(
|
||||
&mut self,
|
||||
sp_header: &SpHeader,
|
||||
tc_raw: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
let mut vec = Vec::new();
|
||||
vec.extend_from_slice(tc_raw);
|
||||
self.known_packet_queue
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push_back((sp_header.apid(), vec));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_unknown_apid(
|
||||
&mut self,
|
||||
sp_header: &SpHeader,
|
||||
tc_raw: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
let mut vec = Vec::new();
|
||||
vec.extend_from_slice(tc_raw);
|
||||
self.unknown_packet_queue
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push_back((sp_header.apid(), vec));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl CcsdsPacketHandler for BasicApidHandlerOwnedQueue {
|
||||
type Error = ();
|
||||
|
||||
fn valid_apids(&self) -> &'static [u16] {
|
||||
&[0x000, 0x002]
|
||||
}
|
||||
|
||||
fn handle_known_apid(
|
||||
&mut self,
|
||||
sp_header: &SpHeader,
|
||||
tc_raw: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
let mut vec = Vec::new();
|
||||
vec.extend_from_slice(tc_raw);
|
||||
Ok(self.known_packet_queue.push_back((sp_header.apid(), vec)))
|
||||
}
|
||||
|
||||
fn handle_unknown_apid(
|
||||
&mut self,
|
||||
sp_header: &SpHeader,
|
||||
tc_raw: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
let mut vec = Vec::new();
|
||||
vec.extend_from_slice(tc_raw);
|
||||
Ok(self.unknown_packet_queue.push_back((sp_header.apid(), vec)))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distribs_known_apid() {
|
||||
let known_packet_queue = Arc::new(Mutex::default());
|
||||
let unknown_packet_queue = Arc::new(Mutex::default());
|
||||
let apid_handler = BasicApidHandlerSharedQueue {
|
||||
known_packet_queue: known_packet_queue.clone(),
|
||||
unknown_packet_queue: unknown_packet_queue.clone(),
|
||||
};
|
||||
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
|
||||
is_send(&ccsds_distrib);
|
||||
let mut test_buf: [u8; 32] = [0; 32];
|
||||
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
|
||||
|
||||
ccsds_distrib.pass_tc(tc_slice).expect("Passing TC failed");
|
||||
let recvd = known_packet_queue.lock().unwrap().pop_front();
|
||||
assert!(unknown_packet_queue.lock().unwrap().is_empty());
|
||||
assert!(recvd.is_some());
|
||||
let (apid, packet) = recvd.unwrap();
|
||||
assert_eq!(apid, 0x002);
|
||||
assert_eq!(packet, tc_slice);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distribs_unknown_apid() {
|
||||
let known_packet_queue = Arc::new(Mutex::default());
|
||||
let unknown_packet_queue = Arc::new(Mutex::default());
|
||||
let apid_handler = BasicApidHandlerSharedQueue {
|
||||
known_packet_queue: known_packet_queue.clone(),
|
||||
unknown_packet_queue: unknown_packet_queue.clone(),
|
||||
};
|
||||
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
|
||||
let mut sph = SpHeader::tc_unseg(0x004, 0x34, 0).unwrap();
|
||||
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let mut test_buf: [u8; 32] = [0; 32];
|
||||
pus_tc
|
||||
.write_to_bytes(test_buf.as_mut_slice())
|
||||
.expect("Error writing TC to buffer");
|
||||
ccsds_distrib.pass_tc(&test_buf).expect("Passing TC failed");
|
||||
let recvd = unknown_packet_queue.lock().unwrap().pop_front();
|
||||
assert!(known_packet_queue.lock().unwrap().is_empty());
|
||||
assert!(recvd.is_some());
|
||||
let (apid, packet) = recvd.unwrap();
|
||||
assert_eq!(apid, 0x004);
|
||||
assert_eq!(packet.as_slice(), test_buf);
|
||||
}
|
||||
}
|
117
satrs/src/tmtc/mod.rs
Normal file
117
satrs/src/tmtc/mod.rs
Normal file
@ -0,0 +1,117 @@
|
||||
//! Telemetry and Telecommanding (TMTC) module. Contains packet routing components with special
|
||||
//! support for CCSDS and ECSS packets.
|
||||
//!
|
||||
//! The distributor modules provided by this module use trait objects provided by the user to
|
||||
//! directly dispatch received packets to packet listeners based on packet fields like the CCSDS
|
||||
//! Application Process ID (APID) or the ECSS PUS service type. This allows for fast packet
|
||||
//! routing without the overhead and complication of using message queues. However, it also requires
|
||||
#[cfg(feature = "alloc")]
|
||||
use downcast_rs::{impl_downcast, Downcast};
|
||||
use spacepackets::SpHeader;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod ccsds_distrib;
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod pus_distrib;
|
||||
pub mod tm_helper;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use ccsds_distrib::{CcsdsDistributor, CcsdsError, CcsdsPacketHandler};
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use pus_distrib::{PusDistributor, PusServiceProvider};
|
||||
|
||||
pub type TargetId = u32;
|
||||
|
||||
/// Generic trait for object which can receive any telecommands in form of a raw bytestream, with
|
||||
/// no assumptions about the received protocol.
|
||||
///
|
||||
/// This trait is implemented by both the [crate::tmtc::pus_distrib::PusDistributor] and the
|
||||
/// [crate::tmtc::ccsds_distrib::CcsdsDistributor] which allows to pass the respective packets in
|
||||
/// raw byte format into them.
|
||||
pub trait ReceivesTcCore {
|
||||
type Error;
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
/// Extension trait of [ReceivesTcCore] which allows downcasting by implementing [Downcast] and
|
||||
/// is also sendable.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait ReceivesTc: ReceivesTcCore + Downcast + Send {
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast(&self) -> &dyn ReceivesTcCore<Error = Self::Error>;
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast_mut(&mut self) -> &mut dyn ReceivesTcCore<Error = Self::Error>;
|
||||
}
|
||||
|
||||
/// Blanket implementation to automatically implement [ReceivesTc] when the [alloc] feature
|
||||
/// is enabled.
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<T> ReceivesTc for T
|
||||
where
|
||||
T: ReceivesTcCore + Send + 'static,
|
||||
{
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast(&self) -> &dyn ReceivesTcCore<Error = Self::Error> {
|
||||
self
|
||||
}
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast_mut(&mut self) -> &mut dyn ReceivesTcCore<Error = Self::Error> {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl_downcast!(ReceivesTc assoc Error);
|
||||
|
||||
/// Generic trait for object which can receive CCSDS space packets, for example ECSS PUS packets
|
||||
/// for CCSDS File Delivery Protocol (CFDP) packets.
|
||||
///
|
||||
/// This trait is implemented by both the [crate::tmtc::pus_distrib::PusDistributor] and the
|
||||
/// [crate::tmtc::ccsds_distrib::CcsdsDistributor] which allows
|
||||
/// to pass the respective packets in raw byte format or in CCSDS format into them.
|
||||
pub trait ReceivesCcsdsTc {
|
||||
type Error;
|
||||
fn pass_ccsds(&mut self, header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
/// Generic trait for a TM packet source, with no restrictions on the type of TM.
|
||||
/// Implementors write the telemetry into the provided buffer and return the size of the telemetry.
|
||||
pub trait TmPacketSourceCore {
|
||||
type Error;
|
||||
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error>;
|
||||
}
|
||||
|
||||
/// Extension trait of [TmPacketSourceCore] which allows downcasting by implementing [Downcast] and
|
||||
/// is also sendable.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait TmPacketSource: TmPacketSourceCore + Downcast + Send {
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast(&self) -> &dyn TmPacketSourceCore<Error = Self::Error>;
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast_mut(&mut self) -> &mut dyn TmPacketSourceCore<Error = Self::Error>;
|
||||
}
|
||||
|
||||
/// Blanket implementation to automatically implement [ReceivesTc] when the [alloc] feature
|
||||
/// is enabled.
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<T> TmPacketSource for T
|
||||
where
|
||||
T: TmPacketSourceCore + Send + 'static,
|
||||
{
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast(&self) -> &dyn TmPacketSourceCore<Error = Self::Error> {
|
||||
self
|
||||
}
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast_mut(&mut self) -> &mut dyn TmPacketSourceCore<Error = Self::Error> {
|
||||
self
|
||||
}
|
||||
}
|
369
satrs/src/tmtc/pus_distrib.rs
Normal file
369
satrs/src/tmtc/pus_distrib.rs
Normal file
@ -0,0 +1,369 @@
|
||||
//! ECSS PUS packet routing components.
|
||||
//!
|
||||
//! The routing components consist of two core components:
|
||||
//! 1. [PusDistributor] component which dispatches received packets to a user-provided handler.
|
||||
//! 2. [PusServiceProvider] trait which should be implemented by the user-provided PUS packet
|
||||
//! handler.
|
||||
//!
|
||||
//! The [PusDistributor] implements the [ReceivesEcssPusTc], [ReceivesCcsdsTc] and the
|
||||
//! [ReceivesTcCore] trait which allows to pass raw packets, CCSDS packets and PUS TC packets into
|
||||
//! it. Upon receiving a packet, it performs the following steps:
|
||||
//!
|
||||
//! 1. It tries to extract the [SpHeader] and [spacepackets::ecss::tc::PusTcReader] objects from
|
||||
//! the raw bytestream. If this process fails, a [PusDistribError::PusError] is returned to the
|
||||
//! user.
|
||||
//! 2. If it was possible to extract both components, the packet will be passed to the
|
||||
//! [PusServiceProvider::handle_pus_tc_packet] method provided by the user.
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! use spacepackets::ecss::WritablePusPacket;
|
||||
//! use satrs::tmtc::pus_distrib::{PusDistributor, PusServiceProvider};
|
||||
//! use satrs::tmtc::{ReceivesTc, ReceivesTcCore};
|
||||
//! use spacepackets::SpHeader;
|
||||
//! use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
|
||||
//! struct ConcretePusHandler {
|
||||
//! handler_call_count: u32
|
||||
//! }
|
||||
//!
|
||||
//! // This is a very simple possible service provider. It increments an internal call count field,
|
||||
//! // which is used to verify the handler was called
|
||||
//! impl PusServiceProvider for ConcretePusHandler {
|
||||
//! type Error = ();
|
||||
//! fn handle_pus_tc_packet(&mut self, service: u8, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
|
||||
//! assert_eq!(service, 17);
|
||||
//! assert_eq!(pus_tc.len_packed(), 13);
|
||||
//! self.handler_call_count += 1;
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! let service_handler = ConcretePusHandler {
|
||||
//! handler_call_count: 0
|
||||
//! };
|
||||
//! let mut pus_distributor = PusDistributor::new(Box::new(service_handler));
|
||||
//!
|
||||
//! // Create and pass PUS ping telecommand with a valid APID
|
||||
//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
|
||||
//! let mut pus_tc = PusTcCreator::new_simple(&mut space_packet_header, 17, 1, None, true);
|
||||
//! let mut test_buf: [u8; 32] = [0; 32];
|
||||
//! let mut size = pus_tc
|
||||
//! .write_to_bytes(test_buf.as_mut_slice())
|
||||
//! .expect("Error writing TC to buffer");
|
||||
//! let tc_slice = &test_buf[0..size];
|
||||
//!
|
||||
//! pus_distributor.pass_tc(tc_slice).expect("Passing PUS telecommand failed");
|
||||
//!
|
||||
//! // User helper function to retrieve concrete class. We check the call count here to verify
|
||||
//! // that the PUS ping telecommand was routed successfully.
|
||||
//! let concrete_handler_ref: &ConcretePusHandler = pus_distributor
|
||||
//! .service_provider_ref()
|
||||
//! .expect("Casting back to concrete type failed");
|
||||
//! assert_eq!(concrete_handler_ref.handler_call_count, 1);
|
||||
//! ```
|
||||
use crate::pus::ReceivesEcssPusTc;
|
||||
use crate::tmtc::{ReceivesCcsdsTc, ReceivesTcCore};
|
||||
use alloc::boxed::Box;
|
||||
use core::fmt::{Display, Formatter};
|
||||
use downcast_rs::Downcast;
|
||||
use spacepackets::ecss::tc::PusTcReader;
|
||||
use spacepackets::ecss::{PusError, PusPacket};
|
||||
use spacepackets::SpHeader;
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
|
||||
pub trait PusServiceProvider: Downcast {
|
||||
type Error;
|
||||
fn handle_pus_tc_packet(
|
||||
&mut self,
|
||||
service: u8,
|
||||
header: &SpHeader,
|
||||
pus_tc: &PusTcReader,
|
||||
) -> Result<(), Self::Error>;
|
||||
}
|
||||
downcast_rs::impl_downcast!(PusServiceProvider assoc Error);
|
||||
|
||||
pub trait SendablePusServiceProvider: PusServiceProvider + Send {}
|
||||
|
||||
impl<T: Send + PusServiceProvider> SendablePusServiceProvider for T {}
|
||||
|
||||
downcast_rs::impl_downcast!(SendablePusServiceProvider assoc Error);
|
||||
|
||||
/// Generic distributor object which dispatches received packets to a user provided handler.
|
||||
///
|
||||
/// This distributor expects the passed trait object to be [Send]able to allow more ergonomic
|
||||
/// usage with threads.
|
||||
pub struct PusDistributor<E> {
|
||||
pub service_provider: Box<dyn SendablePusServiceProvider<Error = E>>,
|
||||
}
|
||||
|
||||
impl<E> PusDistributor<E> {
|
||||
pub fn new(service_provider: Box<dyn SendablePusServiceProvider<Error = E>>) -> Self {
|
||||
PusDistributor { service_provider }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum PusDistribError<E> {
|
||||
CustomError(E),
|
||||
PusError(PusError),
|
||||
}
|
||||
|
||||
impl<E: Display> Display for PusDistribError<E> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
PusDistribError::CustomError(e) => write!(f, "{e}"),
|
||||
PusDistribError::PusError(e) => write!(f, "{e}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl<E: Error> Error for PusDistribError<E> {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
match self {
|
||||
Self::CustomError(e) => e.source(),
|
||||
Self::PusError(e) => e.source(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static> ReceivesTcCore for PusDistributor<E> {
|
||||
type Error = PusDistribError<E>;
|
||||
fn pass_tc(&mut self, tm_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
// Convert to ccsds and call pass_ccsds
|
||||
let (sp_header, _) = SpHeader::from_be_bytes(tm_raw)
|
||||
.map_err(|e| PusDistribError::PusError(PusError::ByteConversion(e)))?;
|
||||
self.pass_ccsds(&sp_header, tm_raw)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static> ReceivesCcsdsTc for PusDistributor<E> {
|
||||
type Error = PusDistribError<E>;
|
||||
fn pass_ccsds(&mut self, header: &SpHeader, tm_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let (tc, _) = PusTcReader::new(tm_raw).map_err(|e| PusDistribError::PusError(e))?;
|
||||
self.pass_pus_tc(header, &tc)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static> ReceivesEcssPusTc for PusDistributor<E> {
|
||||
type Error = PusDistribError<E>;
|
||||
fn pass_pus_tc(&mut self, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
|
||||
self.service_provider
|
||||
.handle_pus_tc_packet(pus_tc.service(), header, pus_tc)
|
||||
.map_err(|e| PusDistribError::CustomError(e))
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static> PusDistributor<E> {
|
||||
pub fn service_provider_ref<T: SendablePusServiceProvider<Error = E>>(&self) -> Option<&T> {
|
||||
self.service_provider.downcast_ref::<T>()
|
||||
}
|
||||
|
||||
pub fn service_provider_mut<T: SendablePusServiceProvider<Error = E>>(
|
||||
&mut self,
|
||||
) -> Option<&mut T> {
|
||||
self.service_provider.downcast_mut::<T>()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tmtc::ccsds_distrib::tests::{
|
||||
generate_ping_tc, BasicApidHandlerOwnedQueue, BasicApidHandlerSharedQueue,
|
||||
};
|
||||
use crate::tmtc::ccsds_distrib::{CcsdsDistributor, CcsdsPacketHandler};
|
||||
use alloc::vec::Vec;
|
||||
use spacepackets::ecss::PusError;
|
||||
use spacepackets::CcsdsPacket;
|
||||
#[cfg(feature = "std")]
|
||||
use std::collections::VecDeque;
|
||||
#[cfg(feature = "std")]
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
fn is_send<T: Send>(_: &T) {}
|
||||
|
||||
struct PusHandlerSharedQueue {
|
||||
pub pus_queue: Arc<Mutex<VecDeque<(u8, u16, Vec<u8>)>>>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct PusHandlerOwnedQueue {
|
||||
pub pus_queue: VecDeque<(u8, u16, Vec<u8>)>,
|
||||
}
|
||||
|
||||
impl PusServiceProvider for PusHandlerSharedQueue {
|
||||
type Error = PusError;
|
||||
fn handle_pus_tc_packet(
|
||||
&mut self,
|
||||
service: u8,
|
||||
sp_header: &SpHeader,
|
||||
pus_tc: &PusTcReader,
|
||||
) -> Result<(), Self::Error> {
|
||||
let mut vec: Vec<u8> = Vec::new();
|
||||
vec.extend_from_slice(pus_tc.raw_data());
|
||||
Ok(self
|
||||
.pus_queue
|
||||
.lock()
|
||||
.expect("Mutex lock failed")
|
||||
.push_back((service, sp_header.apid(), vec)))
|
||||
}
|
||||
}
|
||||
|
||||
impl PusServiceProvider for PusHandlerOwnedQueue {
|
||||
type Error = PusError;
|
||||
fn handle_pus_tc_packet(
|
||||
&mut self,
|
||||
service: u8,
|
||||
sp_header: &SpHeader,
|
||||
pus_tc: &PusTcReader,
|
||||
) -> Result<(), Self::Error> {
|
||||
let mut vec: Vec<u8> = Vec::new();
|
||||
vec.extend_from_slice(pus_tc.raw_data());
|
||||
Ok(self.pus_queue.push_back((service, sp_header.apid(), vec)))
|
||||
}
|
||||
}
|
||||
|
||||
struct ApidHandlerShared {
|
||||
pub pus_distrib: PusDistributor<PusError>,
|
||||
pub handler_base: BasicApidHandlerSharedQueue,
|
||||
}
|
||||
|
||||
struct ApidHandlerOwned {
|
||||
pub pus_distrib: PusDistributor<PusError>,
|
||||
handler_base: BasicApidHandlerOwnedQueue,
|
||||
}
|
||||
|
||||
macro_rules! apid_handler_impl {
|
||||
() => {
|
||||
type Error = PusError;
|
||||
|
||||
fn valid_apids(&self) -> &'static [u16] {
|
||||
&[0x000, 0x002]
|
||||
}
|
||||
|
||||
fn handle_known_apid(
|
||||
&mut self,
|
||||
sp_header: &SpHeader,
|
||||
tc_raw: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
self.handler_base
|
||||
.handle_known_apid(&sp_header, tc_raw)
|
||||
.ok()
|
||||
.expect("Unexpected error");
|
||||
match self.pus_distrib.pass_ccsds(&sp_header, tc_raw) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => match e {
|
||||
PusDistribError::CustomError(_) => Ok(()),
|
||||
PusDistribError::PusError(e) => Err(e),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_unknown_apid(
|
||||
&mut self,
|
||||
sp_header: &SpHeader,
|
||||
tc_raw: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
self.handler_base
|
||||
.handle_unknown_apid(&sp_header, tc_raw)
|
||||
.ok()
|
||||
.expect("Unexpected error");
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl CcsdsPacketHandler for ApidHandlerOwned {
|
||||
apid_handler_impl!();
|
||||
}
|
||||
|
||||
impl CcsdsPacketHandler for ApidHandlerShared {
|
||||
apid_handler_impl!();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "std")]
|
||||
fn test_pus_distribution() {
|
||||
let known_packet_queue = Arc::new(Mutex::default());
|
||||
let unknown_packet_queue = Arc::new(Mutex::default());
|
||||
let pus_queue = Arc::new(Mutex::default());
|
||||
let pus_handler = PusHandlerSharedQueue {
|
||||
pus_queue: pus_queue.clone(),
|
||||
};
|
||||
let handler_base = BasicApidHandlerSharedQueue {
|
||||
known_packet_queue: known_packet_queue.clone(),
|
||||
unknown_packet_queue: unknown_packet_queue.clone(),
|
||||
};
|
||||
|
||||
let pus_distrib = PusDistributor {
|
||||
service_provider: Box::new(pus_handler),
|
||||
};
|
||||
is_send(&pus_distrib);
|
||||
let apid_handler = ApidHandlerShared {
|
||||
pus_distrib,
|
||||
handler_base,
|
||||
};
|
||||
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
|
||||
let mut test_buf: [u8; 32] = [0; 32];
|
||||
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
|
||||
|
||||
// Pass packet to distributor
|
||||
ccsds_distrib
|
||||
.pass_tc(tc_slice)
|
||||
.expect("Passing TC slice failed");
|
||||
let recvd_ccsds = known_packet_queue.lock().unwrap().pop_front();
|
||||
assert!(unknown_packet_queue.lock().unwrap().is_empty());
|
||||
assert!(recvd_ccsds.is_some());
|
||||
let (apid, packet) = recvd_ccsds.unwrap();
|
||||
assert_eq!(apid, 0x002);
|
||||
assert_eq!(packet.as_slice(), tc_slice);
|
||||
let recvd_pus = pus_queue.lock().unwrap().pop_front();
|
||||
assert!(recvd_pus.is_some());
|
||||
let (service, apid, tc_raw) = recvd_pus.unwrap();
|
||||
assert_eq!(service, 17);
|
||||
assert_eq!(apid, 0x002);
|
||||
assert_eq!(tc_raw, tc_slice);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_as_any_cast() {
|
||||
let pus_handler = PusHandlerOwnedQueue::default();
|
||||
let handler_base = BasicApidHandlerOwnedQueue::default();
|
||||
let pus_distrib = PusDistributor {
|
||||
service_provider: Box::new(pus_handler),
|
||||
};
|
||||
|
||||
let apid_handler = ApidHandlerOwned {
|
||||
pus_distrib,
|
||||
handler_base,
|
||||
};
|
||||
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
|
||||
|
||||
let mut test_buf: [u8; 32] = [0; 32];
|
||||
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
|
||||
|
||||
ccsds_distrib
|
||||
.pass_tc(tc_slice)
|
||||
.expect("Passing TC slice failed");
|
||||
|
||||
let apid_handler_casted_back: &mut ApidHandlerOwned = ccsds_distrib
|
||||
.apid_handler_mut()
|
||||
.expect("Cast to concrete type ApidHandler failed");
|
||||
assert!(!apid_handler_casted_back
|
||||
.handler_base
|
||||
.known_packet_queue
|
||||
.is_empty());
|
||||
let handler_casted_back: &mut PusHandlerOwnedQueue = apid_handler_casted_back
|
||||
.pus_distrib
|
||||
.service_provider_mut()
|
||||
.expect("Cast to concrete type PusHandlerOwnedQueue failed");
|
||||
assert!(!handler_casted_back.pus_queue.is_empty());
|
||||
let (service, apid, packet_raw) = handler_casted_back.pus_queue.pop_front().unwrap();
|
||||
assert_eq!(service, 17);
|
||||
assert_eq!(apid, 0x002);
|
||||
assert_eq!(packet_raw.as_slice(), tc_slice);
|
||||
}
|
||||
}
|
127
satrs/src/tmtc/tm_helper.rs
Normal file
127
satrs/src/tmtc/tm_helper.rs
Normal file
@ -0,0 +1,127 @@
|
||||
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
|
||||
use spacepackets::time::cds::TimeProvider;
|
||||
use spacepackets::time::TimeWriter;
|
||||
use spacepackets::SpHeader;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std_mod::*;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod std_mod {
|
||||
use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr};
|
||||
use crate::pus::EcssTmtcError;
|
||||
use spacepackets::ecss::tm::PusTmCreator;
|
||||
use spacepackets::ecss::WritablePusPacket;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SharedTmPool(pub SharedStaticMemoryPool);
|
||||
|
||||
impl SharedTmPool {
|
||||
pub fn new(shared_pool: StaticMemoryPool) -> Self {
|
||||
Self(Arc::new(RwLock::new(shared_pool)))
|
||||
}
|
||||
|
||||
pub fn clone_backing_pool(&self) -> SharedStaticMemoryPool {
|
||||
self.0.clone()
|
||||
}
|
||||
pub fn shared_pool(&self) -> &SharedStaticMemoryPool {
|
||||
&self.0
|
||||
}
|
||||
|
||||
pub fn shared_pool_mut(&mut self) -> &mut SharedStaticMemoryPool {
|
||||
&mut self.0
|
||||
}
|
||||
|
||||
pub fn add_pus_tm(&self, pus_tm: &PusTmCreator) -> Result<StoreAddr, EcssTmtcError> {
|
||||
let mut pg = self.0.write().map_err(|_| EcssTmtcError::StoreLock)?;
|
||||
let addr = pg.free_element(pus_tm.len_written(), |buf| {
|
||||
pus_tm
|
||||
.write_to_bytes(buf)
|
||||
.expect("writing PUS TM to store failed");
|
||||
})?;
|
||||
Ok(addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PusTmWithCdsShortHelper {
|
||||
apid: u16,
|
||||
cds_short_buf: [u8; 7],
|
||||
}
|
||||
|
||||
impl PusTmWithCdsShortHelper {
|
||||
pub fn new(apid: u16) -> Self {
|
||||
Self {
|
||||
apid,
|
||||
cds_short_buf: [0; 7],
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub fn create_pus_tm_timestamp_now<'a>(
|
||||
&'a mut self,
|
||||
service: u8,
|
||||
subservice: u8,
|
||||
source_data: &'a [u8],
|
||||
seq_count: u16,
|
||||
) -> PusTmCreator {
|
||||
let time_stamp = TimeProvider::from_now_with_u16_days().unwrap();
|
||||
time_stamp.write_to_bytes(&mut self.cds_short_buf).unwrap();
|
||||
self.create_pus_tm_common(service, subservice, source_data, seq_count)
|
||||
}
|
||||
|
||||
pub fn create_pus_tm_with_stamper<'a>(
|
||||
&'a mut self,
|
||||
service: u8,
|
||||
subservice: u8,
|
||||
source_data: &'a [u8],
|
||||
stamper: &TimeProvider,
|
||||
seq_count: u16,
|
||||
) -> PusTmCreator {
|
||||
stamper.write_to_bytes(&mut self.cds_short_buf).unwrap();
|
||||
self.create_pus_tm_common(service, subservice, source_data, seq_count)
|
||||
}
|
||||
|
||||
fn create_pus_tm_common<'a>(
|
||||
&'a self,
|
||||
service: u8,
|
||||
subservice: u8,
|
||||
source_data: &'a [u8],
|
||||
seq_count: u16,
|
||||
) -> PusTmCreator {
|
||||
let mut reply_header = SpHeader::tm_unseg(self.apid, seq_count, 0).unwrap();
|
||||
let tc_header = PusTmSecondaryHeader::new_simple(service, subservice, &self.cds_short_buf);
|
||||
PusTmCreator::new(&mut reply_header, tc_header, source_data, true)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use spacepackets::{ecss::PusPacket, time::cds::TimeProvider, CcsdsPacket};
|
||||
|
||||
use super::PusTmWithCdsShortHelper;
|
||||
|
||||
#[test]
|
||||
fn test_helper_with_stamper() {
|
||||
let mut pus_tm_helper = PusTmWithCdsShortHelper::new(0x123);
|
||||
let stamper = TimeProvider::new_with_u16_days(0, 0);
|
||||
let tm = pus_tm_helper.create_pus_tm_with_stamper(17, 1, &[1, 2, 3, 4], &stamper, 25);
|
||||
assert_eq!(tm.service(), 17);
|
||||
assert_eq!(tm.subservice(), 1);
|
||||
assert_eq!(tm.user_data(), &[1, 2, 3, 4]);
|
||||
assert_eq!(tm.seq_count(), 25);
|
||||
assert_eq!(tm.timestamp(), [64, 0, 0, 0, 0, 0, 0])
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_helper_from_now() {
|
||||
let mut pus_tm_helper = PusTmWithCdsShortHelper::new(0x123);
|
||||
let tm = pus_tm_helper.create_pus_tm_timestamp_now(17, 1, &[1, 2, 3, 4], 25);
|
||||
assert_eq!(tm.service(), 17);
|
||||
assert_eq!(tm.subservice(), 1);
|
||||
assert_eq!(tm.user_data(), &[1, 2, 3, 4]);
|
||||
assert_eq!(tm.seq_count(), 25);
|
||||
assert_eq!(tm.timestamp().len(), 7);
|
||||
}
|
||||
}
|
164
satrs/tests/hk_helpers.rs
Normal file
164
satrs/tests/hk_helpers.rs
Normal file
@ -0,0 +1,164 @@
|
||||
#![allow(dead_code)]
|
||||
use core::mem::size_of;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use spacepackets::ecss::{PfcReal, PfcUnsigned, Ptc};
|
||||
use spacepackets::time::cds::TimeProvider;
|
||||
use spacepackets::time::{CcsdsTimeProvider, TimeWriter};
|
||||
|
||||
enum NumOfParamsInfo {
|
||||
/// The parameter entry is a scalar field
|
||||
Scalar = 0b00,
|
||||
/// The parameter entry is a vector, and its length field is one byte wide (max. 255 entries)
|
||||
VecLenFieldOneByte = 0b01,
|
||||
/// The parameter entry is a vecotr, and its length field is two bytes wide (max. 65565 entries)
|
||||
VecLenFieldTwoBytes = 0b10,
|
||||
/// The parameter entry is a matrix, and its length field contains a one byte row number
|
||||
/// and a one byte column number.
|
||||
MatrixRowsAndColumns = 0b11,
|
||||
}
|
||||
|
||||
const HAS_VALIDITY_MASK: u8 = 1 << 7;
|
||||
|
||||
struct ParamWithValidity<T> {
|
||||
valid: bool,
|
||||
val: T,
|
||||
}
|
||||
|
||||
struct TestMgmHk {
|
||||
temp: f32,
|
||||
mgm_vals: [u16; 3],
|
||||
}
|
||||
|
||||
struct TestMgmHkWithIndividualValidity {
|
||||
temp: ParamWithValidity<f32>,
|
||||
mgm_vals: ParamWithValidity<[u16; 3]>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct TestMgmHkWithGroupValidity {
|
||||
last_valid_stamp: TimeProvider,
|
||||
valid: bool,
|
||||
temp: f32,
|
||||
mgm_vals: [u16; 3],
|
||||
}
|
||||
|
||||
impl TestMgmHk {
|
||||
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ()> {
|
||||
let mut curr_idx = 0;
|
||||
buf[curr_idx..curr_idx + size_of::<f32>()].copy_from_slice(&self.temp.to_be_bytes());
|
||||
curr_idx += size_of::<f32>();
|
||||
for val in self.mgm_vals {
|
||||
buf[curr_idx..curr_idx + size_of::<u16>()].copy_from_slice(&val.to_be_bytes());
|
||||
curr_idx += size_of::<u16>();
|
||||
}
|
||||
Ok(curr_idx)
|
||||
}
|
||||
}
|
||||
|
||||
/// This could in principle be auto-generated.
|
||||
impl TestMgmHkWithIndividualValidity {
|
||||
pub fn write_to_be_bytes_self_describing(&self, buf: &mut [u8]) -> Result<usize, ()> {
|
||||
let mut curr_idx = 0;
|
||||
buf[curr_idx] = 0;
|
||||
buf[curr_idx] |= HAS_VALIDITY_MASK | (self.temp.valid as u8) << 6;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = Ptc::Real as u8;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = PfcReal::Float as u8;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx..curr_idx + size_of::<f32>()].copy_from_slice(&self.temp.val.to_be_bytes());
|
||||
curr_idx += size_of::<f32>();
|
||||
buf[curr_idx] = 0;
|
||||
buf[curr_idx] |= HAS_VALIDITY_MASK
|
||||
| (self.mgm_vals.valid as u8) << 6
|
||||
| (NumOfParamsInfo::VecLenFieldOneByte as u8) << 4;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = Ptc::UnsignedInt as u8;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = PfcUnsigned::TwoBytes as u8;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = 3;
|
||||
curr_idx += 1;
|
||||
for val in self.mgm_vals.val {
|
||||
buf[curr_idx..curr_idx + size_of::<u16>()].copy_from_slice(&val.to_be_bytes());
|
||||
curr_idx += size_of::<u16>();
|
||||
}
|
||||
Ok(curr_idx)
|
||||
}
|
||||
}
|
||||
|
||||
impl TestMgmHkWithGroupValidity {
|
||||
pub fn write_to_be_bytes_self_describing(&self, buf: &mut [u8]) -> Result<usize, ()> {
|
||||
let mut curr_idx = 0;
|
||||
buf[curr_idx] = self.valid as u8;
|
||||
curr_idx += 1;
|
||||
self.last_valid_stamp
|
||||
.write_to_bytes(&mut buf[curr_idx..curr_idx + self.last_valid_stamp.len_as_bytes()])
|
||||
.unwrap();
|
||||
curr_idx += self.last_valid_stamp.len_as_bytes();
|
||||
buf[curr_idx] = 0;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = Ptc::Real as u8;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = PfcReal::Float as u8;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx..curr_idx + size_of::<f32>()].copy_from_slice(&self.temp.to_be_bytes());
|
||||
curr_idx += size_of::<f32>();
|
||||
buf[curr_idx] = 0;
|
||||
buf[curr_idx] |= (NumOfParamsInfo::VecLenFieldOneByte as u8) << 4;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = Ptc::UnsignedInt as u8;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = PfcUnsigned::TwoBytes as u8;
|
||||
curr_idx += 1;
|
||||
buf[curr_idx] = 3;
|
||||
for val in self.mgm_vals {
|
||||
buf[curr_idx..curr_idx + size_of::<u16>()].copy_from_slice(&val.to_be_bytes());
|
||||
curr_idx += size_of::<u16>();
|
||||
}
|
||||
Ok(curr_idx)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn main() {
|
||||
let mut raw_buf: [u8; 32] = [0; 32];
|
||||
let mgm_hk = TestMgmHk {
|
||||
temp: 20.0,
|
||||
mgm_vals: [0x1f1f, 0x2f2f, 0x3f3f],
|
||||
};
|
||||
// 4 byte float + 3 * 2 bytes MGM values
|
||||
let written = mgm_hk.write_to_be_bytes(&mut raw_buf).unwrap();
|
||||
assert_eq!(written, 10);
|
||||
|
||||
let mgm_hk_individual_validity = TestMgmHkWithIndividualValidity {
|
||||
temp: ParamWithValidity {
|
||||
valid: true,
|
||||
val: 20.0,
|
||||
},
|
||||
mgm_vals: ParamWithValidity {
|
||||
valid: true,
|
||||
val: [0x1f1f, 0x2f2f, 0x3f3f],
|
||||
},
|
||||
};
|
||||
let written = mgm_hk_individual_validity
|
||||
.write_to_be_bytes_self_describing(&mut raw_buf)
|
||||
.unwrap();
|
||||
// 3 byte float description, 4 byte float, 4 byte MGM val description, 3 * 2 bytes MGM values
|
||||
assert_eq!(written, 17);
|
||||
|
||||
// The easiest and probably best approach, trading off big advantages for TM downlink capacity:
|
||||
// Use a JSON format
|
||||
let mgm_hk_group_validity = TestMgmHkWithGroupValidity {
|
||||
last_valid_stamp: TimeProvider::from_now_with_u16_days().unwrap(),
|
||||
valid: false,
|
||||
temp: 20.0,
|
||||
mgm_vals: [0x1f1f, 0x2f2f, 0x3f3f],
|
||||
};
|
||||
let mgm_as_json_str = serde_json::to_string(&mgm_hk_group_validity).unwrap();
|
||||
println!(
|
||||
"JSON string with length {}: {}",
|
||||
mgm_as_json_str.len(),
|
||||
mgm_as_json_str
|
||||
);
|
||||
}
|
38
satrs/tests/pools.rs
Normal file
38
satrs/tests/pools.rs
Normal file
@ -0,0 +1,38 @@
|
||||
use satrs::pool::{PoolGuard, PoolProvider, StaticMemoryPool, StaticPoolConfig, StoreAddr};
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
|
||||
const DUMMY_DATA: [u8; 4] = [0, 1, 2, 3];
|
||||
|
||||
#[test]
|
||||
fn threaded_usage() {
|
||||
let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false);
|
||||
let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
|
||||
let shared_clone = shared_pool.clone();
|
||||
let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel();
|
||||
let jh0 = thread::spawn(move || {
|
||||
let mut dummy = shared_pool.write().unwrap();
|
||||
let addr = dummy.add(&DUMMY_DATA).expect("Writing data failed");
|
||||
tx.send(addr).expect("Sending store address failed");
|
||||
});
|
||||
|
||||
let jh1 = thread::spawn(move || {
|
||||
let addr;
|
||||
{
|
||||
addr = rx.recv().expect("Receiving store address failed");
|
||||
let mut pool_access = shared_clone.write().unwrap();
|
||||
let pg = PoolGuard::new(pool_access.deref_mut(), addr);
|
||||
let mut read_buf: [u8; 4] = [0; 4];
|
||||
let read_bytes = pg.read(&mut read_buf).expect("Reading failed");
|
||||
assert_eq!(read_buf, DUMMY_DATA);
|
||||
assert_eq!(read_bytes, 4);
|
||||
}
|
||||
let pool_access = shared_clone.read().unwrap();
|
||||
assert!(!pool_access.has_element_at(&addr).expect("Invalid address"));
|
||||
});
|
||||
jh0.join().unwrap();
|
||||
jh1.join().unwrap();
|
||||
}
|
94
satrs/tests/pus_autogen_events.rs
Normal file
94
satrs/tests/pus_autogen_events.rs
Normal file
@ -0,0 +1,94 @@
|
||||
#![allow(dead_code, unused_imports)]
|
||||
|
||||
use satrs::events::{
|
||||
EventU32, EventU32TypedSev, GenericEvent, HasSeverity, LargestEventRaw, LargestGroupIdRaw,
|
||||
Severity, SeverityInfo, SeverityLow, SeverityMedium,
|
||||
};
|
||||
use std::convert::AsRef;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct GroupIdIntrospection {
|
||||
name: &'static str,
|
||||
id: LargestGroupIdRaw,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct EventIntrospection {
|
||||
name: &'static str,
|
||||
group_id: GroupIdIntrospection,
|
||||
event: &'static EventU32,
|
||||
info: &'static str,
|
||||
}
|
||||
|
||||
//#[event(descr="This is some info event")]
|
||||
const INFO_EVENT_0: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(0, 0);
|
||||
const INFO_EVENT_0_ERASED: EventU32 = EventU32::const_from_info(INFO_EVENT_0);
|
||||
|
||||
// This is ideally auto-generated
|
||||
const INFO_EVENT_0_INTROSPECTION: EventIntrospection = EventIntrospection {
|
||||
name: "INFO_EVENT_0",
|
||||
group_id: GroupIdIntrospection {
|
||||
id: 0,
|
||||
name: "Group ID 0 without name",
|
||||
},
|
||||
event: &INFO_EVENT_0_ERASED,
|
||||
info: "This is some info event",
|
||||
};
|
||||
|
||||
//#[event(descr="This is some low severity event")]
|
||||
const SOME_LOW_SEV_EVENT: EventU32TypedSev<SeverityLow> = EventU32TypedSev::const_new(0, 12);
|
||||
|
||||
//const EVENT_LIST: [&'static Event; 2] = [&INFO_EVENT_0, &SOME_LOW_SEV_EVENT];
|
||||
|
||||
//#[event_group]
|
||||
const TEST_GROUP_NAME: u16 = 1;
|
||||
// Auto-generated?
|
||||
const TEST_GROUP_NAME_NAME: &str = "TEST_GROUP_NAME";
|
||||
|
||||
//#[event(desc="Some medium severity event")]
|
||||
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP: EventU32TypedSev<SeverityMedium> =
|
||||
EventU32TypedSev::const_new(TEST_GROUP_NAME, 0);
|
||||
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_REDUCED: EventU32 =
|
||||
EventU32::const_from_medium(MEDIUM_SEV_EVENT_IN_OTHER_GROUP);
|
||||
|
||||
// Also auto-generated
|
||||
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_INTROSPECTION: EventIntrospection = EventIntrospection {
|
||||
name: "MEDIUM_SEV_EVENT_IN_OTHER_GROUP",
|
||||
group_id: GroupIdIntrospection {
|
||||
name: TEST_GROUP_NAME_NAME,
|
||||
id: TEST_GROUP_NAME,
|
||||
},
|
||||
event: &MEDIUM_SEV_EVENT_IN_OTHER_GROUP_REDUCED,
|
||||
info: "Some medium severity event",
|
||||
};
|
||||
|
||||
const CONST_SLICE: &'static [u8] = &[0, 1, 2, 3];
|
||||
const INTROSPECTION_FOR_TEST_GROUP_0: [&EventIntrospection; 2] =
|
||||
[&INFO_EVENT_0_INTROSPECTION, &INFO_EVENT_0_INTROSPECTION];
|
||||
|
||||
//const INTROSPECTION_FOR_TABLE: &'static [&EventIntrospection] = &INTROSPECTION_FOR_TEST_GROUP_0;
|
||||
|
||||
const INTROSPECTION_FOR_TEST_GROUP_NAME: [&EventIntrospection; 1] =
|
||||
[&MEDIUM_SEV_EVENT_IN_OTHER_GROUP_INTROSPECTION];
|
||||
//const BLAH: &'static [&EventIntrospection] = &INTROSPECTION_FOR_TEST_GROUP_NAME;
|
||||
|
||||
const ALL_EVENTS: [&[&EventIntrospection]; 2] = [
|
||||
&INTROSPECTION_FOR_TEST_GROUP_0,
|
||||
&INTROSPECTION_FOR_TEST_GROUP_NAME,
|
||||
];
|
||||
|
||||
#[test]
|
||||
fn main() {
|
||||
//let test = stringify!(INFO_EVENT);
|
||||
//println!("{:?}", test);
|
||||
//for event in EVENT_LIST {
|
||||
// println!("{:?}", event);
|
||||
//}
|
||||
//for events in ALL_EVENTS.into_iter().flatten() {
|
||||
// dbg!("{:?}", events);
|
||||
//}
|
||||
//for introspection_info in INTROSPECTION_FOR_TEST_GROUP {
|
||||
// dbg!("{:?}", introspection_info);
|
||||
//}
|
||||
//let test_struct =
|
||||
}
|
158
satrs/tests/pus_events.rs
Normal file
158
satrs/tests/pus_events.rs
Normal file
@ -0,0 +1,158 @@
|
||||
use satrs::event_man::{
|
||||
EventManagerWithMpscQueue, MpscEventU32Receiver, MpscEventU32SendProvider, SendEventProvider,
|
||||
};
|
||||
use satrs::events::{EventU32, EventU32TypedSev, Severity, SeverityInfo};
|
||||
use satrs::params::U32Pair;
|
||||
use satrs::params::{Params, ParamsHeapless, WritableToBeBytes};
|
||||
use satrs::pus::event_man::{DefaultPusMgmtBackendProvider, EventReporter, PusEventDispatcher};
|
||||
use satrs::pus::MpscTmAsVecSender;
|
||||
use spacepackets::ecss::tm::PusTmReader;
|
||||
use spacepackets::ecss::{PusError, PusPacket};
|
||||
use std::sync::mpsc::{channel, SendError, TryRecvError};
|
||||
use std::thread;
|
||||
|
||||
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
|
||||
EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
|
||||
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
|
||||
const EMPTY_STAMP: [u8; 7] = [0; 7];
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum CustomTmSenderError {
|
||||
SendError(SendError<Vec<u8>>),
|
||||
PusError(PusError),
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_threaded_usage() {
|
||||
let (event_sender, event_man_receiver) = channel();
|
||||
let event_receiver = MpscEventU32Receiver::new(event_man_receiver);
|
||||
let mut event_man = EventManagerWithMpscQueue::new(Box::new(event_receiver));
|
||||
|
||||
let (pus_event_man_tx, pus_event_man_rx) = channel();
|
||||
let pus_event_man_send_provider = MpscEventU32SendProvider::new(1, pus_event_man_tx);
|
||||
event_man.subscribe_all(pus_event_man_send_provider.id());
|
||||
event_man.add_sender(pus_event_man_send_provider);
|
||||
let (event_tx, event_rx) = channel();
|
||||
let reporter = EventReporter::new(0x02, 128).expect("Creating event reporter failed");
|
||||
let backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
|
||||
let mut pus_event_man = PusEventDispatcher::new(reporter, Box::new(backend));
|
||||
// PUS + Generic event manager thread
|
||||
let jh0 = thread::spawn(move || {
|
||||
let mut sender = MpscTmAsVecSender::new(0, "event_sender", event_tx);
|
||||
let mut event_cnt = 0;
|
||||
let mut params_array: [u8; 128] = [0; 128];
|
||||
loop {
|
||||
let res = event_man.try_event_handling();
|
||||
assert!(res.is_ok());
|
||||
match pus_event_man_rx.try_recv() {
|
||||
Ok((event, aux_data)) => {
|
||||
let mut gen_event = |aux_data| {
|
||||
pus_event_man.generate_pus_event_tm_generic(
|
||||
&mut sender,
|
||||
&EMPTY_STAMP,
|
||||
event,
|
||||
aux_data,
|
||||
)
|
||||
};
|
||||
let res = if let Some(aux_data) = aux_data {
|
||||
match aux_data {
|
||||
Params::Heapless(heapless) => match heapless {
|
||||
ParamsHeapless::Raw(raw) => {
|
||||
raw.write_to_be_bytes(&mut params_array)
|
||||
.expect("Writing raw parameter failed");
|
||||
gen_event(Some(¶ms_array[0..raw.raw_len()]))
|
||||
}
|
||||
ParamsHeapless::EcssEnum(e) => {
|
||||
e.write_to_be_bytes(&mut params_array)
|
||||
.expect("Writing ECSS enum failed");
|
||||
gen_event(Some(¶ms_array[0..e.raw_len()]))
|
||||
}
|
||||
},
|
||||
Params::Vec(vec) => gen_event(Some(vec.as_slice())),
|
||||
Params::String(str) => gen_event(Some(str.as_bytes())),
|
||||
Params::Store(_) => gen_event(None),
|
||||
}
|
||||
} else {
|
||||
gen_event(None)
|
||||
};
|
||||
event_cnt += 1;
|
||||
assert!(res.is_ok());
|
||||
assert!(res.unwrap());
|
||||
if event_cnt == 2 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if let TryRecvError::Disconnected = e {
|
||||
panic!("Event receiver disconnected!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Event sender and TM checker thread
|
||||
let jh1 = thread::spawn(move || {
|
||||
event_sender
|
||||
.send((INFO_EVENT.into(), None))
|
||||
.expect("Sending info event failed");
|
||||
loop {
|
||||
match event_rx.try_recv() {
|
||||
// Event TM received successfully
|
||||
Ok(event_tm) => {
|
||||
let tm =
|
||||
PusTmReader::new(event_tm.as_slice(), 7).expect("Deserializing TM failed");
|
||||
assert_eq!(tm.0.service(), 5);
|
||||
assert_eq!(tm.0.subservice(), 1);
|
||||
let src_data = tm.0.source_data();
|
||||
assert!(!src_data.is_empty());
|
||||
assert_eq!(src_data.len(), 4);
|
||||
let event =
|
||||
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
|
||||
assert_eq!(event, INFO_EVENT);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
if let TryRecvError::Disconnected = e {
|
||||
panic!("Event sender disconnected!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
event_sender
|
||||
.send((
|
||||
LOW_SEV_EVENT.into(),
|
||||
Some(Params::Heapless((2_u32, 3_u32).into())),
|
||||
))
|
||||
.expect("Sending low severity event failed");
|
||||
loop {
|
||||
match event_rx.try_recv() {
|
||||
// Event TM received successfully
|
||||
Ok(event_tm) => {
|
||||
let tm =
|
||||
PusTmReader::new(event_tm.as_slice(), 7).expect("Deserializing TM failed");
|
||||
assert_eq!(tm.0.service(), 5);
|
||||
assert_eq!(tm.0.subservice(), 2);
|
||||
let src_data = tm.0.source_data();
|
||||
assert!(!src_data.is_empty());
|
||||
assert_eq!(src_data.len(), 12);
|
||||
let event =
|
||||
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
|
||||
assert_eq!(event, LOW_SEV_EVENT);
|
||||
let u32_pair: U32Pair =
|
||||
src_data[4..].try_into().expect("Creating U32Pair failed");
|
||||
assert_eq!(u32_pair.0, 2);
|
||||
assert_eq!(u32_pair.1, 3);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
if let TryRecvError::Disconnected = e {
|
||||
panic!("Event sender disconnected!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
jh0.join().expect("Joining manager thread failed");
|
||||
jh1.join().expect("Joining creator thread failed");
|
||||
}
|
190
satrs/tests/pus_verification.rs
Normal file
190
satrs/tests/pus_verification.rs
Normal file
@ -0,0 +1,190 @@
|
||||
//#[cfg(feature = "crossbeam")]
|
||||
pub mod crossbeam_test {
|
||||
use hashbrown::HashMap;
|
||||
use satrs::pool::{PoolProvider, PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
|
||||
use satrs::pus::verification::{
|
||||
FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender,
|
||||
};
|
||||
use satrs::pus::CrossbeamTmInStoreSender;
|
||||
use satrs::tmtc::tm_helper::SharedTmPool;
|
||||
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
|
||||
use spacepackets::ecss::tm::PusTmReader;
|
||||
use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket, WritablePusPacket};
|
||||
use spacepackets::SpHeader;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
const TEST_APID: u16 = 0x03;
|
||||
const FIXED_STAMP: [u8; 7] = [0; 7];
|
||||
const PACKETS_SENT: u8 = 8;
|
||||
|
||||
/// This test also shows how the verification report could be used in a multi-threaded context,
|
||||
/// wrapping it into an [Arc] and [Mutex] and then passing it to two threads.
|
||||
///
|
||||
/// - The first thread generates a acceptance, a start, two steps and one completion report
|
||||
/// - The second generates an acceptance and start success report and a completion failure
|
||||
/// - The third thread is the verification receiver. In the test case, it verifies the other two
|
||||
/// threads have sent the correct expected verification reports
|
||||
#[test]
|
||||
fn test_shared_reporter() {
|
||||
// We use a synced sequence count provider here because both verification reporters have the
|
||||
// the same APID. If they had distinct APIDs, the more correct approach would be to have
|
||||
// each reporter have an own sequence count provider.
|
||||
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
|
||||
// Shared pool object to store the verification PUS telemetry
|
||||
let pool_cfg =
|
||||
StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
|
||||
let shared_tm_pool = SharedTmPool::new(StaticMemoryPool::new(pool_cfg.clone()));
|
||||
let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
|
||||
let shared_tc_pool_1 = shared_tc_pool_0.clone();
|
||||
let (tx, rx) = crossbeam_channel::bounded(10);
|
||||
let sender =
|
||||
CrossbeamTmInStoreSender::new(0, "verif_sender", shared_tm_pool.clone(), tx.clone());
|
||||
let mut reporter_with_sender_0 =
|
||||
VerificationReporterWithSender::new(&cfg, Box::new(sender));
|
||||
let mut reporter_with_sender_1 = reporter_with_sender_0.clone();
|
||||
// For test purposes, we retrieve the request ID from the TCs and pass them to the receiver
|
||||
// tread.
|
||||
let req_id_0;
|
||||
let req_id_1;
|
||||
|
||||
let (tx_tc_0, rx_tc_0) = crossbeam_channel::bounded(3);
|
||||
let (tx_tc_1, rx_tc_1) = crossbeam_channel::bounded(3);
|
||||
{
|
||||
let mut tc_guard = shared_tc_pool_0.write().unwrap();
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap();
|
||||
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
|
||||
let pus_tc_0 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true);
|
||||
req_id_0 = RequestId::new(&pus_tc_0);
|
||||
let addr = tc_guard
|
||||
.free_element(pus_tc_0.len_written(), |buf| {
|
||||
pus_tc_0.write_to_bytes(buf).unwrap();
|
||||
})
|
||||
.unwrap();
|
||||
tx_tc_0.send(addr).unwrap();
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID, 1, 0).unwrap();
|
||||
let tc_header = PusTcSecondaryHeader::new_simple(5, 1);
|
||||
let pus_tc_1 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true);
|
||||
req_id_1 = RequestId::new(&pus_tc_1);
|
||||
let addr = tc_guard
|
||||
.free_element(pus_tc_0.len_written(), |buf| {
|
||||
pus_tc_1.write_to_bytes(buf).unwrap();
|
||||
})
|
||||
.unwrap();
|
||||
tx_tc_1.send(addr).unwrap();
|
||||
}
|
||||
let verif_sender_0 = thread::spawn(move || {
|
||||
let mut tc_buf: [u8; 1024] = [0; 1024];
|
||||
let tc_addr = rx_tc_0
|
||||
.recv_timeout(Duration::from_millis(20))
|
||||
.expect("Receive timeout");
|
||||
let tc_len;
|
||||
{
|
||||
let mut tc_guard = shared_tc_pool_0.write().unwrap();
|
||||
let pg = tc_guard.read_with_guard(tc_addr);
|
||||
tc_len = pg.read(&mut tc_buf).unwrap();
|
||||
}
|
||||
let (_tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
|
||||
|
||||
let token = reporter_with_sender_0.add_tc_with_req_id(req_id_0);
|
||||
let accepted_token = reporter_with_sender_0
|
||||
.acceptance_success(token, Some(&FIXED_STAMP))
|
||||
.expect("Acceptance success failed");
|
||||
|
||||
// Do some start handling here
|
||||
let started_token = reporter_with_sender_0
|
||||
.start_success(accepted_token, Some(&FIXED_STAMP))
|
||||
.expect("Start success failed");
|
||||
// Do some step handling here
|
||||
reporter_with_sender_0
|
||||
.step_success(&started_token, Some(&FIXED_STAMP), EcssEnumU8::new(0))
|
||||
.expect("Start success failed");
|
||||
|
||||
// Finish up
|
||||
reporter_with_sender_0
|
||||
.step_success(&started_token, Some(&FIXED_STAMP), EcssEnumU8::new(1))
|
||||
.expect("Start success failed");
|
||||
reporter_with_sender_0
|
||||
.completion_success(started_token, Some(&FIXED_STAMP))
|
||||
.expect("Completion success failed");
|
||||
});
|
||||
|
||||
let verif_sender_1 = thread::spawn(move || {
|
||||
let mut tc_buf: [u8; 1024] = [0; 1024];
|
||||
let tc_addr = rx_tc_1
|
||||
.recv_timeout(Duration::from_millis(20))
|
||||
.expect("Receive timeout");
|
||||
let tc_len;
|
||||
{
|
||||
let mut tc_guard = shared_tc_pool_1.write().unwrap();
|
||||
let pg = tc_guard.read_with_guard(tc_addr);
|
||||
tc_len = pg.read(&mut tc_buf).unwrap();
|
||||
}
|
||||
let (tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
|
||||
let token = reporter_with_sender_1.add_tc(&tc);
|
||||
let accepted_token = reporter_with_sender_1
|
||||
.acceptance_success(token, Some(&FIXED_STAMP))
|
||||
.expect("Acceptance success failed");
|
||||
let started_token = reporter_with_sender_1
|
||||
.start_success(accepted_token, Some(&FIXED_STAMP))
|
||||
.expect("Start success failed");
|
||||
let fail_code = EcssEnumU16::new(2);
|
||||
let params = FailParams::new(Some(&FIXED_STAMP), &fail_code, None);
|
||||
reporter_with_sender_1
|
||||
.completion_failure(started_token, params)
|
||||
.expect("Completion success failed");
|
||||
});
|
||||
|
||||
let verif_receiver = thread::spawn(move || {
|
||||
let mut packet_counter = 0;
|
||||
let mut tm_buf: [u8; 1024] = [0; 1024];
|
||||
let mut verif_map = HashMap::new();
|
||||
while packet_counter < PACKETS_SENT {
|
||||
let verif_addr = rx
|
||||
.recv_timeout(Duration::from_millis(50))
|
||||
.expect("Packet reception timeout");
|
||||
let tm_len;
|
||||
let shared_tm_store = shared_tm_pool.clone_backing_pool();
|
||||
{
|
||||
let mut rg = shared_tm_store.write().expect("Error locking shared pool");
|
||||
let store_guard = rg.read_with_guard(verif_addr);
|
||||
tm_len = store_guard
|
||||
.read(&mut tm_buf)
|
||||
.expect("Error reading TM slice");
|
||||
}
|
||||
let (pus_tm, _) =
|
||||
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");
|
||||
let req_id =
|
||||
RequestId::from_bytes(&pus_tm.source_data()[0..RequestId::SIZE_AS_BYTES])
|
||||
.expect("reading request ID from PUS TM source data failed");
|
||||
if !verif_map.contains_key(&req_id) {
|
||||
let content = vec![pus_tm.subservice()];
|
||||
verif_map.insert(req_id, content);
|
||||
} else {
|
||||
let content = verif_map.get_mut(&req_id).unwrap();
|
||||
content.push(pus_tm.subservice())
|
||||
}
|
||||
packet_counter += 1;
|
||||
}
|
||||
for (req_id, content) in verif_map {
|
||||
if req_id == req_id_1 {
|
||||
assert_eq!(content[0], 1);
|
||||
assert_eq!(content[1], 3);
|
||||
assert_eq!(content[2], 8);
|
||||
} else if req_id == req_id_0 {
|
||||
assert_eq!(content[0], 1);
|
||||
assert_eq!(content[1], 3);
|
||||
assert_eq!(content[2], 5);
|
||||
assert_eq!(content[3], 5);
|
||||
assert_eq!(content[4], 7);
|
||||
} else {
|
||||
panic!("Unexpected request ID {:?}", req_id);
|
||||
}
|
||||
}
|
||||
});
|
||||
verif_sender_0.join().expect("Joining thread 0 failed");
|
||||
verif_sender_1.join().expect("Joining thread 1 failed");
|
||||
verif_receiver.join().expect("Joining thread 2 failed");
|
||||
}
|
||||
}
|
241
satrs/tests/tcp_servers.rs
Normal file
241
satrs/tests/tcp_servers.rs
Normal file
@ -0,0 +1,241 @@
|
||||
//! This serves as both an integration test and an example application showcasing all major
|
||||
//! features of the TCP COBS server by performing following steps:
|
||||
//!
|
||||
//! 1. It defines both a TC receiver and a TM source which are [Sync].
|
||||
//! 2. A telemetry packet is inserted into the TM source. The packet will be handled by the
|
||||
//! TCP server after handling all TCs.
|
||||
//! 3. It instantiates the TCP server on localhost with automatic port assignment and assigns
|
||||
//! the TC receiver and TM source created previously.
|
||||
//! 4. It moves the TCP server to a different thread and calls the
|
||||
//! [TcpTmtcInCobsServer::handle_next_connection] call inside that thread
|
||||
//! 5. The main threads connects to the server, sends a test telecommand and then reads back
|
||||
//! the test telemetry insertd in to the TM source previously.
|
||||
use core::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
|
||||
sync::Mutex,
|
||||
thread,
|
||||
};
|
||||
|
||||
use hashbrown::HashSet;
|
||||
use satrs::{
|
||||
encoding::cobs::encode_packet_with_cobs,
|
||||
hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer, TcpTmtcInCobsServer},
|
||||
tmtc::{ReceivesTcCore, TmPacketSourceCore},
|
||||
};
|
||||
use spacepackets::{
|
||||
ecss::{tc::PusTcCreator, WritablePusPacket},
|
||||
PacketId, SpHeader,
|
||||
};
|
||||
use std::{boxed::Box, collections::VecDeque, sync::Arc, vec::Vec};
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
struct SyncTcCacher {
|
||||
tc_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
impl ReceivesTcCore for SyncTcCacher {
|
||||
type Error = ();
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut tc_queue = self.tc_queue.lock().expect("tc forwarder failed");
|
||||
println!("Received TC: {:x?}", tc_raw);
|
||||
tc_queue.push_back(tc_raw.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
struct SyncTmSource {
|
||||
tm_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
|
||||
impl SyncTmSource {
|
||||
pub(crate) fn add_tm(&mut self, tm: &[u8]) {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failec");
|
||||
tm_queue.push_back(tm.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
impl TmPacketSourceCore for SyncTmSource {
|
||||
type Error = ();
|
||||
|
||||
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failed");
|
||||
if !tm_queue.is_empty() {
|
||||
let next_vec = tm_queue.front().unwrap();
|
||||
if buffer.len() < next_vec.len() {
|
||||
panic!(
|
||||
"provided buffer too small, must be at least {} bytes",
|
||||
next_vec.len()
|
||||
);
|
||||
}
|
||||
println!("Sending and encoding TM: {:x?}", next_vec);
|
||||
let next_vec = tm_queue.pop_front().unwrap();
|
||||
buffer[0..next_vec.len()].copy_from_slice(&next_vec);
|
||||
return Ok(next_vec.len());
|
||||
}
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
|
||||
const SIMPLE_PACKET: [u8; 5] = [1, 2, 3, 4, 5];
|
||||
const INVERTED_PACKET: [u8; 5] = [5, 4, 3, 4, 1];
|
||||
const AUTO_PORT_ADDR: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
|
||||
#[test]
|
||||
fn test_cobs_server() {
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
// Insert a telemetry packet which will be read back by the client at a later stage.
|
||||
tm_source.add_tm(&INVERTED_PACKET);
|
||||
let mut tcp_server = TcpTmtcInCobsServer::new(
|
||||
ServerConfig::new(AUTO_PORT_ADDR, Duration::from_millis(2), 1024, 1024),
|
||||
tm_source,
|
||||
tc_receiver.clone(),
|
||||
)
|
||||
.expect("TCP server generation failed");
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 1, "No TC received");
|
||||
assert_eq!(conn_result.num_sent_tms, 1, "No TM received");
|
||||
// Signal the main thread we are done.
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
|
||||
// Send TC to server now.
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_packet_with_cobs(&SIMPLE_PACKET, &mut encoded_buf, &mut current_idx);
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.write_all(&encoded_buf[..current_idx])
|
||||
.expect("writing to TCP server failed");
|
||||
// Done with writing.
|
||||
stream
|
||||
.shutdown(std::net::Shutdown::Write)
|
||||
.expect("shutting down write failed");
|
||||
let mut read_buf: [u8; 16] = [0; 16];
|
||||
let read_len = stream.read(&mut read_buf).expect("read failed");
|
||||
drop(stream);
|
||||
|
||||
// 1 byte encoding overhead, 2 sentinel bytes.
|
||||
assert_eq!(read_len, 8);
|
||||
assert_eq!(read_buf[0], 0);
|
||||
assert_eq!(read_buf[read_len - 1], 0);
|
||||
let decoded_len =
|
||||
cobs::decode_in_place(&mut read_buf[1..read_len]).expect("COBS decoding failed");
|
||||
assert_eq!(decoded_len, 5);
|
||||
// Skip first sentinel byte.
|
||||
assert_eq!(&read_buf[1..1 + INVERTED_PACKET.len()], &INVERTED_PACKET);
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that the packet was received and decoded successfully.
|
||||
let mut tc_queue = tc_receiver
|
||||
.tc_queue
|
||||
.lock()
|
||||
.expect("locking tc queue failed");
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET);
|
||||
drop(tc_queue);
|
||||
}
|
||||
|
||||
const TEST_APID_0: u16 = 0x02;
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0);
|
||||
|
||||
#[test]
|
||||
fn test_ccsds_server() {
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 1, None, true);
|
||||
let tm_0 = verif_tm.to_vec().expect("tm generation failed");
|
||||
tm_source.add_tm(&tm_0);
|
||||
let mut packet_id_lookup = HashSet::new();
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_0);
|
||||
let mut tcp_server = TcpSpacepacketsServer::new(
|
||||
ServerConfig::new(AUTO_PORT_ADDR, Duration::from_millis(2), 1024, 1024),
|
||||
tm_source,
|
||||
tc_receiver.clone(),
|
||||
Box::new(packet_id_lookup),
|
||||
)
|
||||
.expect("TCP server generation failed");
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 1);
|
||||
assert_eq!(conn_result.num_sent_tms, 1);
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.set_read_timeout(Some(Duration::from_millis(10)))
|
||||
.expect("setting reas timeout failed");
|
||||
|
||||
// Send ping telecommand.
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let tc_0 = ping_tc.to_vec().expect("packet creation failed");
|
||||
stream
|
||||
.write_all(&tc_0)
|
||||
.expect("writing to TCP server failed");
|
||||
// Done with writing.
|
||||
stream
|
||||
.shutdown(std::net::Shutdown::Write)
|
||||
.expect("shutting down write failed");
|
||||
|
||||
// Now read all the telemetry from the server.
|
||||
let mut read_buf: [u8; 16] = [0; 16];
|
||||
let mut read_len_total = 0;
|
||||
// Timeout ensures this does not block forever.
|
||||
while read_len_total < tm_0.len() {
|
||||
let read_len = stream.read(&mut read_buf).expect("read failed");
|
||||
read_len_total += read_len;
|
||||
assert_eq!(read_buf[..read_len], tm_0);
|
||||
}
|
||||
drop(stream);
|
||||
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that TC has arrived.
|
||||
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap();
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_0);
|
||||
}
|
Reference in New Issue
Block a user