init commit
All checks were successful
Rust/cfdp/pipeline/head This commit looks good

This commit is contained in:
Robin Müller 2024-08-20 11:50:13 +02:00
commit b87ccde07d
24 changed files with 9299 additions and 0 deletions

72
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,72 @@
name: ci
on: [push, pull_request]
jobs:
check:
name: Check build
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo check --release
test:
name: Run Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install nextest
uses: taiki-e/install-action@nextest
- run: cargo nextest run --all-features
- run: cargo test --doc
msrv:
name: Check MSRV
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@1.75.0
- run: cargo check --release
cross-check:
name: Check Cross-Compilation
runs-on: ubuntu-latest
strategy:
matrix:
target:
- armv7-unknown-linux-gnueabihf
- thumbv7em-none-eabihf
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
targets: "armv7-unknown-linux-gnueabihf, thumbv7em-none-eabihf"
- run: cargo check --release --target=${{matrix.target}} --no-default-features
fmt:
name: Check formatting
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo fmt --all -- --check
docs:
name: Check Documentation Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo clippy -- -D warnings

7
.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
# Rust
/target
/Cargo.lock
# CLion
/.idea/*
!/.idea/runConfigurations

9
CHANGELOG.md Normal file
View File

@ -0,0 +1,9 @@
Change Log
=======
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]

67
Cargo.toml Normal file
View File

@ -0,0 +1,67 @@
[package]
name = "cfdp-rs"
version = "0.1.0"
edition = "2021"
rust-version = "1.75.0"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "High level CCSDS File Delivery Protocol components"
homepage = "https://egit.irs.uni-stuttgart.de/rust/cfdp"
repository = "https://egit.irs.uni-stuttgart.de/rust/cfdp"
license = "Apache-2.0"
keywords = ["no-std", "space", "packets", "ccsds", "ecss"]
categories = ["aerospace", "aerospace::space-protocols", "no-std", "filesystem"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
name = "cfdp"
[dependencies]
crc = "3"
smallvec = "1"
derive-new = "0.6"
[dependencies.thiserror]
version = "1"
optional = true
[dependencies.hashbrown]
version = "0.14"
optional = true
[dependencies.serde]
version = "1"
optional = true
[dependencies.spacepackets]
version = "0.12"
default-features = false
[dependencies.defmt]
version = "0.3"
optional = true
[features]
default = ["std"]
std = [
"alloc",
"thiserror",
"spacepackets/std"
]
alloc = [
"hashbrown",
"spacepackets/alloc"
]
serde = ["dep:serde", "spacepackets/serde", "hashbrown/serde"]
defmt = ["dep:defmt", "spacepackets/defmt"]
[dev-dependencies]
tempfile = "3"
rand = "0.8"
log = "0.4"
fern = "0.6"
chrono = "0.4"
clap = { version = "4", features = ["derive"] }
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--generate-link-to-definition"]

201
LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
NOTICE Normal file
View File

@ -0,0 +1 @@
This software contains code developed at the University of Stuttgart's Institute of Space Systems.

49
README.md Normal file
View File

@ -0,0 +1,49 @@
[![Crates.io](https://img.shields.io/crates/v/cfdp-rs)](https://crates.io/crates/cfdp-rs)
[![docs.rs](https://img.shields.io/docsrs/cfdp-rs)](https://docs.rs/cfdp-rs)
[![ci](https://github.com/us-irs/cfdp-rs/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/us-irs/cfdp-rs/actions/workflows/ci.yml)
[![coverage](https://shields.io/endpoint?url=https://absatsw.irs.uni-stuttgart.de/projects/cfdp/coverage-rs/latest/coverage.json)](https://absatsw.irs.uni-stuttgart.de/projects/cfdp/coverage-rs/latest/index.html)
cfdp-rs - High level Rust crate for CFDP components
======================
The `cfdp-rs` Rust crate offers some high-level CCSDS File Delivery Protocol (CFDP) components to
perform file transfers according to the [CCSDS Blue Book 727.0-B-5](https://public.ccsds.org/Pubs/727x0b5.pdf).
The underlying base packet library used to generate the packets to be sent is the
[spacepackets](https://egit.irs.uni-stuttgart.de/rust/spacepackets) library.
# Features
`cfdp-rs` supports various runtime environments and is also suitable for `no_std` environments.
It is recommended to activate the `alloc` feature at the very least to allow using the primary
components provided by this crate. These components will only allocate memory at initialization
time and thus are still viable for systems where run-time allocation is prohibited.
## Default features
- [`std`](https://doc.rust-lang.org/std/): Enables functionality relying on the standard library.
- [`alloc`](https://doc.rust-lang.org/alloc/): Enables features which require allocation support.
Enabled by the `std` feature.
## Optional Features
- [`serde`](https://serde.rs/): Adds `serde` support for most types by adding `Serialize` and `Deserialize` `derive`s
- [`defmt`](https://defmt.ferrous-systems.com/): Add support for the `defmt` by adding the
[`defmt::Format`](https://defmt.ferrous-systems.com/format) derive on many types.
# Examples
You can check the [documentation](https://docs.rs/cfdp-rs) of individual modules for various usage
examples.
# Coverage
Coverage was generated using [`grcov`](https://github.com/mozilla/grcov). If you have not done so
already, install the `llvm-tools-preview`:
```sh
rustup component add llvm-tools-preview
cargo install grcov --locked
```
After that, you can simply run `coverage.py` to test the project with coverage. You can optionally
supply the `--open` flag to open the coverage report in your webbrowser.

28
automation/Dockerfile Normal file
View File

@ -0,0 +1,28 @@
# Run the following commands from root directory to build and run locally
# docker build -f automation/Dockerfile -t <NAME> .
# docker run -it <NAME>
FROM rust:latest
RUN apt-get update
RUN apt-get --yes upgrade
# tzdata is a dependency, won't install otherwise
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get --yes install rsync curl
# set CROSS_CONTAINER_IN_CONTAINER to inform `cross` that it is executed from within a container
ENV CROSS_CONTAINER_IN_CONTAINER=true
RUN rustup install nightly && \
rustup target add thumbv7em-none-eabihf armv7-unknown-linux-gnueabihf && \
rustup component add rustfmt clippy llvm-tools-preview
# Get grcov
RUN curl -sSL https://github.com/mozilla/grcov/releases/download/v0.8.19/grcov-x86_64-unknown-linux-gnu.tar.bz2 | tar -xj --directory /usr/local/bin
# Get nextest
RUN curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin
# SSH stuff to allow deployment to doc server
RUN adduser --uid 114 jenkins
# Add documentation server to known hosts
RUN echo "|1|/LzCV4BuTmTb2wKnD146l9fTKgQ=|NJJtVjvWbtRt8OYqFgcYRnMQyVw= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNL8ssTonYtgiR/6RRlSIK9WU1ywOcJmxFTLcEblAwH7oifZzmYq3XRfwXrgfMpylEfMFYfCU8JRqtmi19xc21A=" >> /etc/ssh/ssh_known_hosts
RUN echo "|1|CcBvBc3EG03G+XM5rqRHs6gK/Gg=|oGeJQ+1I8NGI2THIkJsW92DpTzs= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNL8ssTonYtgiR/6RRlSIK9WU1ywOcJmxFTLcEblAwH7oifZzmYq3XRfwXrgfMpylEfMFYfCU8JRqtmi19xc21A=" >> /etc/ssh/ssh_known_hosts

81
automation/Jenkinsfile vendored Normal file
View File

@ -0,0 +1,81 @@
pipeline {
agent {
dockerfile {
dir 'automation'
reuseNode true
args '--network host'
}
}
stages {
stage('Rust Toolchain Info') {
steps {
sh 'rustc --version'
}
}
stage('Clippy') {
steps {
sh 'cargo clippy'
}
}
stage('Docs') {
steps {
sh """
RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc --all-features
"""
}
}
stage('Rustfmt') {
steps {
sh 'cargo fmt --all --check'
}
}
stage('Test') {
steps {
sh 'cargo nextest r --all-features'
sh 'cargo test --doc'
}
}
stage('Check with all features') {
steps {
sh 'cargo check --all-features'
}
}
stage('Check with no features') {
steps {
sh 'cargo check --no-default-features'
}
}
stage('Check Cross Embedded Bare Metal') {
steps {
sh 'cargo check --target thumbv7em-none-eabihf --no-default-features'
}
}
stage('Check Cross Embedded Linux') {
steps {
sh 'cargo check --target armv7-unknown-linux-gnueabihf'
}
}
stage('Run test with Coverage') {
when {
anyOf {
branch 'main';
branch pattern: 'cov-deployment*'
}
}
steps {
withEnv(['RUSTFLAGS=-Cinstrument-coverage', 'LLVM_PROFILE_FILE=target/coverage/%p-%m.profraw']) {
echo "Executing tests with coverage"
sh 'cargo clean'
sh 'cargo test --all-features'
sh 'grcov . -s . --binary-path ./target/debug -t html --branch --ignore-not-existing -o ./target/debug/coverage/'
sshagent(credentials: ['documentation-buildfix']) {
// Deploy to Apache webserver
sh 'rsync --mkpath -r --delete ./target/debug/coverage/ buildfix@documentation.irs.uni-stuttgart.de:/projects/cfdp/coverage-rs/latest/'
}
}
}
}
}
}

54
coverage.py Executable file
View File

@ -0,0 +1,54 @@
#!/usr/bin/env python3
import os
import logging
import argparse
import webbrowser
_LOGGER = logging.getLogger()
def generate_cov_report(open_report: bool, format: str):
logging.basicConfig(level=logging.INFO)
os.environ["RUSTFLAGS"] = "-Cinstrument-coverage"
os.environ["LLVM_PROFILE_FILE"] = "target/coverage/%p-%m.profraw"
_LOGGER.info("Executing tests with coverage")
os.system("cargo test --all-features")
out_path = "./target/debug/coverage"
if format == "lcov":
out_path = "./target/debug/lcov.info"
os.system(
f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing "
f"--ignore \"examples/*\" -o {out_path}"
)
if format == "lcov":
os.system(
"genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source "
"--legend ./target/debug/lcov.info"
)
if open_report:
coverage_report_path = os.path.abspath("./target/debug/coverage/index.html")
webbrowser.open_new_tab(coverage_report_path)
_LOGGER.info("Done")
def main():
parser = argparse.ArgumentParser(
description="Generate coverage report and optionally open it in a browser"
)
parser.add_argument(
"--open", action="store_true", help="Open the coverage report in a browser"
)
parser.add_argument(
"--format",
choices=["html", "lcov"],
default="html",
help="Choose report format (html or lcov)",
)
args = parser.parse_args()
generate_cov_report(args.open, args.format)
if __name__ == "__main__":
main()

1
examples/python-interop/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/venv

View File

@ -0,0 +1,37 @@
Python Interoperability Example for cfdp-rs
=======
This example application showcases the interoperability of the CFDP handlers written in Rust
with a Python implementation which uses [cfdp-py](https://github.com/us-irs/cfdp-py) library.
Both the Rust and the Python app exchange packet data units via a UDP interface and launch
both a destination and source handler. As such, they are both able to send and receive files.
Both applications can be started with the command line argument `-f` to initiate a file transfer.
You can run both applications with `-h` to get more information about the available options.
## Running the Python App
It is recommended to run the Python App in a dedicated virtual environment. For example, on a
Unix system you can use `python3 -m venv venv` and then `source venv/bin/activate` to create
and activate a virtual environment.
After that, you can install the required dependencies using
```sh
pip install -r requirements.txt
```
and then run the application using `./main.py` or `python3 main.py`.
It is recommended to run `./main.py -h` first to get an overview of some possible options.
Running the Python App with `./main.py -f` will cause the Python App to start a file copy operation
with fixed temporary paths.
## Running the Rust App
You can run the Rust application using `cargo`, for example `cargo run --example python-interop`.
It is recommended to run `cargo run --example python-interop -- -h` to get an overview of some
possible launch options.
Running the Rust App with `cargo run --example python-interop -- -f` will cause the Rust app to
start a file copy operation with fixed temporary paths.

682
examples/python-interop/main.py Executable file
View File

@ -0,0 +1,682 @@
#!/usr/bin/env python3
from datetime import timedelta
from pathlib import Path
import os
import ipaddress
import tempfile
import socket
import select
import threading
import argparse
import logging
import time
import copy
from threading import Thread, Event
from typing import Any, Dict, List, Tuple, Optional
from multiprocessing import Queue
from queue import Empty
from cfdppy.handler import DestHandler, RemoteEntityCfgTable, SourceHandler
from cfdppy.exceptions import InvalidDestinationId, SourceFileDoesNotExist
from cfdppy import (
CfdpUserBase,
LocalEntityCfg,
PacketDestination,
PutRequest,
TransactionId,
get_packet_destination,
CfdpState,
)
from cfdppy.mib import (
CheckTimerProvider,
DefaultFaultHandlerBase,
EntityType,
IndicationCfg,
RemoteEntityCfg,
)
from cfdppy.user import (
FileSegmentRecvdParams,
MetadataRecvParams,
TransactionFinishedParams,
TransactionParams,
)
from spacepackets.cfdp import ChecksumType, ConditionCode, TransmissionMode
from spacepackets.cfdp.pdu import AbstractFileDirectiveBase, PduFactory, PduHolder
from spacepackets.cfdp.tlv import (
MessageToUserTlv,
OriginatingTransactionId,
ProxyMessageType,
ProxyPutResponse,
ReservedCfdpMessage,
)
from spacepackets.cfdp.tlv.msg_to_user import ProxyPutResponseParams
from spacepackets.countdown import Countdown
from spacepackets.seqcount import SeqCountProvider
from spacepackets.util import ByteFieldU16, UnsignedByteField
PYTHON_ENTITY_ID = ByteFieldU16(1)
RUST_ENTITY_ID = ByteFieldU16(2)
# Enable all indications for both local and remote entity.
INDICATION_CFG = IndicationCfg()
BASE_STR_SRC = "PY SRC"
BASE_STR_DEST = "PY DEST"
FILE_CONTENT = "Hello World!\n"
FILE_SEGMENT_SIZE = 256
MAX_PACKET_LEN = 512
# This queue is used to send put requests.
PUT_REQ_QUEUE = Queue()
# All telecommands which should go to the source handler should be put into this queue by
# the UDP server.
SOURCE_ENTITY_QUEUE = Queue()
# All telecommands which should go to the destination handler should be put into this queue by
# the UDP server.
DEST_ENTITY_QUEUE = Queue()
# All telemetry which should be sent to the remote entity is put into this queue and will then
# be sent by the UDP server.
TM_QUEUE = Queue()
REMOTE_CFG_OF_PY_ENTITY = RemoteEntityCfg(
entity_id=PYTHON_ENTITY_ID,
max_packet_len=MAX_PACKET_LEN,
max_file_segment_len=FILE_SEGMENT_SIZE,
closure_requested=True,
crc_on_transmission=False,
default_transmission_mode=TransmissionMode.ACKNOWLEDGED,
crc_type=ChecksumType.CRC_32,
)
REMOTE_CFG_OF_REMOTE_ENTITY = copy.copy(REMOTE_CFG_OF_PY_ENTITY)
REMOTE_CFG_OF_REMOTE_ENTITY.entity_id = RUST_ENTITY_ID
RUST_PORT = 5111
PY_PORT = 5222
_LOGGER = logging.getLogger(__name__)
class UdpServer(Thread):
def __init__(
self,
sleep_time: float,
addr: Tuple[str, int],
explicit_remote_addr: Optional[Tuple[str, int]],
tx_queue: Queue,
source_entity_rx_queue: Queue,
dest_entity_rx_queue: Queue,
stop_signal: Event,
):
super().__init__()
self.sleep_time = sleep_time
self.udp_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
self.addr = addr
self.explicit_remote_addr = explicit_remote_addr
self.udp_socket.bind(addr)
self.tm_queue = tx_queue
self.last_sender = None
self.stop_signal = stop_signal
self.source_entity_queue = source_entity_rx_queue
self.dest_entity_queue = dest_entity_rx_queue
def run(self):
_LOGGER.info(f"Starting UDP server on {self.addr}")
while True:
if self.stop_signal.is_set():
break
self.periodic_operation()
time.sleep(self.sleep_time)
def periodic_operation(self):
while True:
next_packet = self.poll_next_udp_packet()
if next_packet is None or next_packet.pdu is None:
break
# Perform PDU routing.
packet_dest = get_packet_destination(next_packet.pdu)
_LOGGER.debug(f"UDP server: Routing {next_packet} to {packet_dest}")
if packet_dest == PacketDestination.DEST_HANDLER:
self.dest_entity_queue.put(next_packet.pdu)
elif packet_dest == PacketDestination.SOURCE_HANDLER:
self.source_entity_queue.put(next_packet.pdu)
self.send_packets()
def poll_next_udp_packet(self) -> Optional[PduHolder]:
ready = select.select([self.udp_socket], [], [], 0)
if ready[0]:
data, self.last_sender = self.udp_socket.recvfrom(4096)
return PduFactory.from_raw_to_holder(data)
return None
def send_packets(self):
while True:
try:
next_tm = self.tm_queue.get(False)
if not isinstance(next_tm, bytes) and not isinstance(
next_tm, bytearray
):
_LOGGER.error(
f"UDP server can only sent bytearray, received {next_tm}"
)
continue
if self.explicit_remote_addr is not None:
self.udp_socket.sendto(next_tm, self.explicit_remote_addr)
elif self.last_sender is not None:
self.udp_socket.sendto(next_tm, self.last_sender)
else:
_LOGGER.warning(
"UDP Server: No packet destination found, dropping TM"
)
except Empty:
break
class SourceEntityHandler(Thread):
def __init__(
self,
base_str: str,
verbose_level: int,
source_handler: SourceHandler,
put_req_queue: Queue,
source_entity_queue: Queue,
tm_queue: Queue,
stop_signal: Event,
):
super().__init__()
self.base_str = base_str
self.verbose_level = verbose_level
self.source_handler = source_handler
self.put_req_queue = put_req_queue
self.source_entity_queue = source_entity_queue
self.tm_queue = tm_queue
self.stop_signal = stop_signal
def _idle_handling(self) -> bool:
try:
put_req: PutRequest = self.put_req_queue.get(False)
_LOGGER.info(f"{self.base_str}: Handling Put Request: {put_req}")
if put_req.destination_id not in [PYTHON_ENTITY_ID, RUST_ENTITY_ID]:
_LOGGER.warning(
f"can only handle put requests target towards {RUST_ENTITY_ID} or "
f"{PYTHON_ENTITY_ID}"
)
else:
try:
self.source_handler.put_request(put_req)
except SourceFileDoesNotExist as e:
_LOGGER.warning(
f"can not handle put request, source file {e.file} does not exist"
)
return True
except Empty:
return False
def _busy_handling(self):
# We are getting the packets from a Queue here, they could for example also be polled
# from a network.
packet_received = False
packet = None
try:
# We are getting the packets from a Queue here, they could for example also be polled
# from a network.
packet = self.source_entity_queue.get(False)
packet_received = True
except Empty:
pass
try:
packet_sent = self._call_source_state_machine(packet)
# If there is no work to do, put the thread to sleep.
if not packet_received and not packet_sent:
return False
except SourceFileDoesNotExist:
_LOGGER.warning("Source file does not exist")
self.source_handler.reset()
def _call_source_state_machine(
self, packet: Optional[AbstractFileDirectiveBase]
) -> bool:
"""Returns whether a packet was sent."""
if packet is not None:
_LOGGER.debug(f"{self.base_str}: Inserting {packet}")
try:
fsm_result = self.source_handler.state_machine(packet)
except InvalidDestinationId as e:
_LOGGER.warning(
f"invalid destination ID {e.found_dest_id} on packet {packet}, expected "
f"{e.expected_dest_id}"
)
fsm_result = self.source_handler.state_machine(None)
packet_sent = False
if fsm_result.states.num_packets_ready > 0:
while fsm_result.states.num_packets_ready > 0:
next_pdu_wrapper = self.source_handler.get_next_packet()
assert next_pdu_wrapper is not None
if self.verbose_level >= 1:
_LOGGER.debug(
f"{self.base_str}: Sending packet {next_pdu_wrapper.pdu}"
)
# Send all packets which need to be sent.
self.tm_queue.put(next_pdu_wrapper.pack())
packet_sent = True
return packet_sent
def run(self):
_LOGGER.info(f"Starting {self.base_str}")
while True:
if self.stop_signal.is_set():
break
if self.source_handler.state == CfdpState.IDLE:
if not self._idle_handling():
time.sleep(0.2)
continue
if self.source_handler.state == CfdpState.BUSY:
if not self._busy_handling():
time.sleep(0.2)
class DestEntityHandler(Thread):
def __init__(
self,
base_str: str,
verbose_level: int,
dest_handler: DestHandler,
dest_entity_queue: Queue,
tm_queue: Queue,
stop_signal: Event,
):
super().__init__()
self.base_str = base_str
self.verbose_level = verbose_level
self.dest_handler = dest_handler
self.dest_entity_queue = dest_entity_queue
self.tm_queue = tm_queue
self.stop_signal = stop_signal
def run(self):
_LOGGER.info(
f"Starting {self.base_str}. Local ID {self.dest_handler.cfg.local_entity_id}"
)
while True:
packet_received = False
packet = None
if self.stop_signal.is_set():
break
try:
packet = self.dest_entity_queue.get(False)
packet_received = True
except Empty:
pass
if packet is not None:
_LOGGER.debug(f"{self.base_str}: Inserting {packet}")
fsm_result = self.dest_handler.state_machine(packet)
packet_sent = False
if fsm_result.states.num_packets_ready > 0:
while fsm_result.states.num_packets_ready > 0:
next_pdu_wrapper = self.dest_handler.get_next_packet()
assert next_pdu_wrapper is not None
if self.verbose_level >= 1:
_LOGGER.debug(
f"{self.base_str}: Sending packet {next_pdu_wrapper.pdu}"
)
self.tm_queue.put(next_pdu_wrapper.pack())
packet_sent = True
# If there is no work to do, put the thread to sleep.
if not packet_received and not packet_sent:
time.sleep(0.5)
class CfdpFaultHandler(DefaultFaultHandlerBase):
def __init__(self, base_str: str):
self.base_str = base_str
super().__init__()
def notice_of_suspension_cb(
self, transaction_id: TransactionId, cond: ConditionCode, progress: int
):
_LOGGER.warning(
f"{self.base_str}: Received Notice of Suspension for transaction {transaction_id!r} "
f"with condition code {cond!r}. Progress: {progress}"
)
def notice_of_cancellation_cb(
self, transaction_id: TransactionId, cond: ConditionCode, progress: int
):
_LOGGER.warning(
f"{self.base_str}: Received Notice of Cancellation for transaction {transaction_id!r} "
f"with condition code {cond!r}. Progress: {progress}"
)
def abandoned_cb(
self, transaction_id: TransactionId, cond: ConditionCode, progress: int
):
_LOGGER.warning(
f"{self.base_str}: Abandoned fault for transaction {transaction_id!r} "
f"with condition code {cond!r}. Progress: {progress}"
)
def ignore_cb(
self, transaction_id: TransactionId, cond: ConditionCode, progress: int
):
_LOGGER.warning(
f"{self.base_str}: Ignored fault for transaction {transaction_id!r} "
f"with condition code {cond!r}. Progress: {progress}"
)
class CfdpUser(CfdpUserBase):
def __init__(self, base_str: str, put_req_queue: Queue):
self.base_str = base_str
self.put_req_queue = put_req_queue
# This is a dictionary where the key is the current transaction ID for a transaction which
# was triggered by a proxy request with a originating ID.
self.active_proxy_put_reqs: Dict[TransactionId, TransactionId] = {}
super().__init__()
def transaction_indication(
self,
transaction_indication_params: TransactionParams,
):
"""This indication is used to report the transaction ID to the CFDP user"""
_LOGGER.info(
f"{self.base_str}: Transaction.indication for {transaction_indication_params.transaction_id}"
)
if transaction_indication_params.originating_transaction_id is not None:
_LOGGER.info(
f"Originating Transaction ID: {transaction_indication_params.originating_transaction_id}"
)
self.active_proxy_put_reqs.update(
{
transaction_indication_params.transaction_id: transaction_indication_params.originating_transaction_id
}
)
def eof_sent_indication(self, transaction_id: TransactionId):
_LOGGER.info(f"{self.base_str}: EOF-Sent.indication for {transaction_id}")
def transaction_finished_indication(self, params: TransactionFinishedParams):
_LOGGER.info(
f"{self.base_str}: Transaction-Finished.indication for {params.transaction_id}."
)
_LOGGER.info(f"Condition Code: {params.finished_params.condition_code!r}")
_LOGGER.info(f"Delivery Code: {params.finished_params.delivery_code!r}")
_LOGGER.info(f"File Status: {params.finished_params.file_status!r}")
if params.transaction_id in self.active_proxy_put_reqs:
proxy_put_response = ProxyPutResponse(
ProxyPutResponseParams.from_finished_params(params.finished_params)
).to_generic_msg_to_user_tlv()
originating_id = self.active_proxy_put_reqs.get(params.transaction_id)
assert originating_id is not None
put_req = PutRequest(
destination_id=originating_id.source_id,
source_file=None,
dest_file=None,
trans_mode=None,
closure_requested=None,
msgs_to_user=[
proxy_put_response,
OriginatingTransactionId(
originating_id
).to_generic_msg_to_user_tlv(),
],
)
_LOGGER.info(
f"Requesting Proxy Put Response concluding Proxy Put originating from "
f"{originating_id}"
)
self.put_req_queue.put(put_req)
self.active_proxy_put_reqs.pop(params.transaction_id)
def metadata_recv_indication(self, params: MetadataRecvParams):
_LOGGER.info(
f"{self.base_str}: Metadata-Recv.indication for {params.transaction_id}."
)
if params.msgs_to_user is not None:
self._handle_msgs_to_user(params.transaction_id, params.msgs_to_user)
def _handle_msgs_to_user(
self, transaction_id: TransactionId, msgs_to_user: List[MessageToUserTlv]
):
for msg_to_user in msgs_to_user:
if msg_to_user.is_reserved_cfdp_message():
reserved_msg_tlv = msg_to_user.to_reserved_msg_tlv()
assert reserved_msg_tlv is not None
self._handle_reserved_cfdp_message(transaction_id, reserved_msg_tlv)
else:
_LOGGER.info(f"Received custom message to user: {msg_to_user}")
def _handle_reserved_cfdp_message(
self, transaction_id: TransactionId, reserved_cfdp_msg: ReservedCfdpMessage
):
if reserved_cfdp_msg.is_cfdp_proxy_operation():
self._handle_cfdp_proxy_operation(transaction_id, reserved_cfdp_msg)
elif reserved_cfdp_msg.is_originating_transaction_id():
_LOGGER.info(
f"Received originating transaction ID: "
f"{reserved_cfdp_msg.get_originating_transaction_id()}"
)
def _handle_cfdp_proxy_operation(
self, transaction_id: TransactionId, reserved_cfdp_msg: ReservedCfdpMessage
):
if (
reserved_cfdp_msg.get_cfdp_proxy_message_type()
== ProxyMessageType.PUT_REQUEST
):
put_req_params = reserved_cfdp_msg.get_proxy_put_request_params()
_LOGGER.info(f"Received Proxy Put Request: {put_req_params}")
assert put_req_params is not None
put_req = PutRequest(
destination_id=put_req_params.dest_entity_id,
source_file=Path(put_req_params.source_file_as_path),
dest_file=Path(put_req_params.dest_file_as_path),
trans_mode=None,
closure_requested=None,
msgs_to_user=[
OriginatingTransactionId(
transaction_id
).to_generic_msg_to_user_tlv()
],
)
self.put_req_queue.put(put_req)
elif (
reserved_cfdp_msg.get_cfdp_proxy_message_type()
== ProxyMessageType.PUT_RESPONSE
):
put_response_params = reserved_cfdp_msg.get_proxy_put_response_params()
_LOGGER.info(f"Received Proxy Put Response: {put_response_params}")
def file_segment_recv_indication(self, params: FileSegmentRecvdParams):
_LOGGER.info(
f"{self.base_str}: File-Segment-Recv.indication for {params.transaction_id}."
)
def report_indication(self, transaction_id: TransactionId, status_report: Any):
# TODO: p.28 of the CFDP standard specifies what information the status report parameter
# could contain. I think it would be better to not hardcode the type of the status
# report here, but something like Union[any, CfdpStatusReport] with CfdpStatusReport
# being an implementation which supports all three information suggestions would be
# nice
pass
def suspended_indication(
self, transaction_id: TransactionId, cond_code: ConditionCode
):
_LOGGER.info(
f"{self.base_str}: Suspended.indication for {transaction_id} | Condition Code: {cond_code}"
)
def resumed_indication(self, transaction_id: TransactionId, progress: int):
_LOGGER.info(
f"{self.base_str}: Resumed.indication for {transaction_id} | Progress: {progress} bytes"
)
def fault_indication(
self, transaction_id: TransactionId, cond_code: ConditionCode, progress: int
):
_LOGGER.info(
f"{self.base_str}: Fault.indication for {transaction_id} | Condition Code: {cond_code} | "
f"Progress: {progress} bytes"
)
def abandoned_indication(
self, transaction_id: TransactionId, cond_code: ConditionCode, progress: int
):
_LOGGER.info(
f"{self.base_str}: Abandoned.indication for {transaction_id} | Condition Code: {cond_code} |"
f" Progress: {progress} bytes"
)
def eof_recv_indication(self, transaction_id: TransactionId):
_LOGGER.info(f"{self.base_str}: EOF-Recv.indication for {transaction_id}")
class CustomCheckTimerProvider(CheckTimerProvider):
def provide_check_timer(
self,
local_entity_id: UnsignedByteField,
remote_entity_id: UnsignedByteField,
entity_type: EntityType,
) -> Countdown:
return Countdown(timedelta(seconds=5.0))
def main():
parser = argparse.ArgumentParser(
prog="CFDP Local Entity Application",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument(
"-f",
help="Perform a file-copy operation",
action="store_true",
dest="file_copy",
)
parser.add_argument(
"-m",
"--mode",
dest="transmission_mode",
help=(
f"Specify the transfer type{os.linesep}"
f' - "0" or "ack" for unacknowledged (Class 0) transfers{os.linesep}'
f' - "1" or "nak" for acknowledged (Class 1) transfers. Default value'
),
default="nak",
)
# Optional Boolean argument where you can specify True/False
parser.add_argument(
"-c",
type=bool,
nargs="?",
const=True,
default=None,
dest="closure_requested",
help="Request transaction closure for the unacknowledged mode",
)
args = parser.parse_args()
stop_signal = threading.Event()
logging_level = logging.INFO
if args.verbose >= 1:
logging_level = logging.DEBUG
logging.basicConfig(level=logging_level)
remote_cfg_table = RemoteEntityCfgTable()
remote_cfg_table.add_config(REMOTE_CFG_OF_REMOTE_ENTITY)
src_fault_handler = CfdpFaultHandler(BASE_STR_SRC)
# 16 bit sequence count for transactions.
src_seq_count_provider = SeqCountProvider(16)
src_user = CfdpUser(BASE_STR_SRC, PUT_REQ_QUEUE)
check_timer_provider = CustomCheckTimerProvider()
source_handler = SourceHandler(
cfg=LocalEntityCfg(PYTHON_ENTITY_ID, INDICATION_CFG, src_fault_handler),
seq_num_provider=src_seq_count_provider,
remote_cfg_table=remote_cfg_table,
user=src_user,
check_timer_provider=check_timer_provider,
)
source_entity_task = SourceEntityHandler(
BASE_STR_SRC,
logging_level,
source_handler,
PUT_REQ_QUEUE,
SOURCE_ENTITY_QUEUE,
TM_QUEUE,
stop_signal,
)
# Enable all indications.
dest_fault_handler = CfdpFaultHandler(BASE_STR_DEST)
dest_user = CfdpUser(BASE_STR_DEST, PUT_REQ_QUEUE)
dest_handler = DestHandler(
cfg=LocalEntityCfg(PYTHON_ENTITY_ID, INDICATION_CFG, dest_fault_handler),
user=dest_user,
remote_cfg_table=remote_cfg_table,
check_timer_provider=check_timer_provider,
)
dest_entity_task = DestEntityHandler(
BASE_STR_DEST,
logging_level,
dest_handler,
DEST_ENTITY_QUEUE,
TM_QUEUE,
stop_signal,
)
# Address Any to accept CFDP packets from other address than localhost.
local_addr = ipaddress.ip_address("0.0.0.0")
# Localhost as default.
remote_addr = ipaddress.ip_address("127.0.0.1")
udp_server = UdpServer(
sleep_time=0.1,
addr=(str(local_addr), PY_PORT),
explicit_remote_addr=(str(remote_addr), RUST_PORT),
tx_queue=TM_QUEUE,
source_entity_rx_queue=SOURCE_ENTITY_QUEUE,
dest_entity_rx_queue=DEST_ENTITY_QUEUE,
stop_signal=stop_signal,
)
# Prepare a put request / file copy operation if the user specifies it.
if args.file_copy:
_LOGGER.info("Performing file copy operation")
transmission_mode = None
if args.transmission_mode == "ack":
transmission_mode = TransmissionMode.ACKNOWLEDGED
elif args.transmission_mode == "nak":
transmission_mode = TransmissionMode.UNACKNOWLEDGED
with tempfile.NamedTemporaryFile(delete=False) as srcfile:
srcfile.write(FILE_CONTENT.encode())
srcfile_path = srcfile.name
tempdir = tempfile.TemporaryDirectory()
put_req = PutRequest(
destination_id=RUST_ENTITY_ID,
source_file=Path(srcfile_path),
dest_file=Path(tempdir.name).joinpath("test.txt"),
closure_requested=args.closure_requested,
trans_mode=transmission_mode,
)
PUT_REQ_QUEUE.put(put_req)
source_entity_task.start()
dest_entity_task.start()
udp_server.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
stop_signal.set()
source_entity_task.join()
dest_entity_task.join()
udp_server.join()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,520 @@
use std::{
fmt::Debug,
fs::OpenOptions,
io::{self, ErrorKind, Write},
net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs, UdpSocket},
sync::mpsc,
thread,
time::Duration,
};
use cfdp::{
dest::DestinationHandler,
filestore::NativeFilestore,
request::{PutRequestOwned, StaticPutRequestCacher},
source::SourceHandler,
user::{CfdpUser, FileSegmentRecvdParams, MetadataReceivedParams, TransactionFinishedParams},
EntityType, IndicationConfig, LocalEntityConfig, PduOwnedWithInfo, PduProvider,
RemoteEntityConfig, StdTimerCreator, TransactionId, UserFaultHookProvider,
};
use clap::Parser;
use log::{debug, info, warn};
use spacepackets::{
cfdp::{
pdu::{file_data::FileDataPdu, metadata::MetadataPduReader, PduError},
ChecksumType, ConditionCode, TransmissionMode,
},
seq_count::SeqCountProviderSyncU16,
util::{UnsignedByteFieldU16, UnsignedEnum},
};
const PYTHON_ID: UnsignedByteFieldU16 = UnsignedByteFieldU16::new(1);
const RUST_ID: UnsignedByteFieldU16 = UnsignedByteFieldU16::new(2);
const RUST_PORT: u16 = 5111;
const PY_PORT: u16 = 5222;
const LOG_LEVEL: log::LevelFilter = log::LevelFilter::Info;
const FILE_DATA: &str = "Hello World!";
#[derive(Debug, Copy, Clone, clap::ValueEnum)]
pub enum TransmissionModeCli {
Nak,
Ack,
}
#[derive(clap::Parser)]
#[command(about = "Arguments for executing a file copy operation")]
pub struct Cli {
#[arg(short, help = "Perform a file copy operation")]
file_copy: bool,
#[arg(short, default_value = "nak")]
mode: Option<TransmissionModeCli>,
#[arg(short)]
closure_requested: Option<bool>,
}
#[derive(Default)]
pub struct ExampleFaultHandler {}
impl UserFaultHookProvider for ExampleFaultHandler {
fn notice_of_suspension_cb(
&mut self,
transaction_id: TransactionId,
cond: ConditionCode,
progress: u64,
) {
panic!(
"unexpected suspension of transaction {:?}, condition code {:?}, progress {}",
transaction_id, cond, progress
);
}
fn notice_of_cancellation_cb(
&mut self,
transaction_id: TransactionId,
cond: ConditionCode,
progress: u64,
) {
panic!(
"unexpected cancellation of transaction {:?}, condition code {:?}, progress {}",
transaction_id, cond, progress
);
}
fn abandoned_cb(&mut self, transaction_id: TransactionId, cond: ConditionCode, progress: u64) {
panic!(
"unexpected abandonment of transaction {:?}, condition code {:?}, progress {}",
transaction_id, cond, progress
);
}
fn ignore_cb(&mut self, transaction_id: TransactionId, cond: ConditionCode, progress: u64) {
panic!(
"ignoring unexpected error in transaction {:?}, condition code {:?}, progress {}",
transaction_id, cond, progress
);
}
}
pub struct ExampleCfdpUser {
entity_type: EntityType,
}
impl ExampleCfdpUser {
pub fn new(entity_type: EntityType) -> Self {
Self { entity_type }
}
}
impl CfdpUser for ExampleCfdpUser {
fn transaction_indication(&mut self, id: &crate::TransactionId) {
println!(
"{:?} entity: Transaction indication for {:?}",
self.entity_type, id
);
}
fn eof_sent_indication(&mut self, id: &crate::TransactionId) {
println!(
"{:?} entity: EOF sent for transaction {:?}",
self.entity_type, id
);
}
fn transaction_finished_indication(&mut self, finished_params: &TransactionFinishedParams) {
println!(
"{:?} entity: Transaction finished: {:?}",
self.entity_type, finished_params
);
}
fn metadata_recvd_indication(&mut self, md_recvd_params: &MetadataReceivedParams) {
println!(
"{:?} entity: Metadata received: {:?}",
self.entity_type, md_recvd_params
);
}
fn file_segment_recvd_indication(&mut self, segment_recvd_params: &FileSegmentRecvdParams) {
println!(
"{:?} entity: File segment {:?} received",
self.entity_type, segment_recvd_params
);
}
fn report_indication(&mut self, _id: &crate::TransactionId) {}
fn suspended_indication(&mut self, _id: &crate::TransactionId, _condition_code: ConditionCode) {
panic!("unexpected suspended indication");
}
fn resumed_indication(&mut self, _id: &crate::TransactionId, _progresss: u64) {}
fn fault_indication(
&mut self,
_id: &crate::TransactionId,
_condition_code: ConditionCode,
_progress: u64,
) {
panic!("unexpected fault indication");
}
fn abandoned_indication(
&mut self,
_id: &crate::TransactionId,
_condition_code: ConditionCode,
_progress: u64,
) {
panic!("unexpected abandoned indication");
}
fn eof_recvd_indication(&mut self, id: &crate::TransactionId) {
println!(
"{:?} entity: EOF received for transaction {:?}",
self.entity_type, id
);
}
}
pub struct UdpServer {
pub socket: UdpSocket,
recv_buf: Vec<u8>,
remote_addr: SocketAddr,
source_tc_tx: mpsc::Sender<PduOwnedWithInfo>,
dest_tc_tx: mpsc::Sender<PduOwnedWithInfo>,
source_tm_rx: mpsc::Receiver<PduOwnedWithInfo>,
dest_tm_rx: mpsc::Receiver<PduOwnedWithInfo>,
}
#[derive(Debug, thiserror::Error)]
pub enum UdpServerError {
#[error(transparent)]
Io(#[from] io::Error),
#[error("pdu error: {0}")]
Pdu(#[from] PduError),
#[error("send error")]
Send,
}
impl UdpServer {
pub fn new<A: ToSocketAddrs>(
addr: A,
remote_addr: SocketAddr,
max_recv_size: usize,
source_tc_tx: mpsc::Sender<PduOwnedWithInfo>,
dest_tc_tx: mpsc::Sender<PduOwnedWithInfo>,
source_tm_rx: mpsc::Receiver<PduOwnedWithInfo>,
dest_tm_rx: mpsc::Receiver<PduOwnedWithInfo>,
) -> Result<Self, io::Error> {
let server = Self {
socket: UdpSocket::bind(addr)?,
recv_buf: vec![0; max_recv_size],
source_tc_tx,
dest_tc_tx,
remote_addr,
source_tm_rx,
dest_tm_rx,
};
server.socket.set_nonblocking(true)?;
Ok(server)
}
pub fn try_recv_tc(
&mut self,
) -> Result<Option<(PduOwnedWithInfo, SocketAddr)>, UdpServerError> {
let res = match self.socket.recv_from(&mut self.recv_buf) {
Ok(res) => res,
Err(e) => {
return if e.kind() == ErrorKind::WouldBlock || e.kind() == ErrorKind::TimedOut {
Ok(None)
}