Compare commits
30 Commits
main
...
serializat
Author | SHA1 | Date | |
---|---|---|---|
2eaa78dfbc | |||
a6d9bee5df | |||
a77bbfa953 | |||
4c67bcdde1 | |||
a710b30013 | |||
29783b2b07 | |||
2a2a3a3eab | |||
2507469e68 | |||
b4febefa33 | |||
fe60cb9ccf | |||
27e88ed7f7 | |||
295fed9a72 | |||
8e89c8dd66 | |||
cb0a65c4d4 | |||
3db54da3df | |||
15fcb17363 | |||
8728c7ebea | |||
7606767f63 | |||
37b32a9008 | |||
9e096193dd | |||
43bd77eef0 | |||
a4888bce01 | |||
6e5b70af34 | |||
d1476eb770 | |||
783388aa6f | |||
4a8db6b26a | |||
b86c2eb1d1 | |||
fe4126f7e2 | |||
c20163b10a | |||
b970154488 |
@ -11,4 +11,6 @@ members = [
|
||||
exclude = [
|
||||
"embedded-examples/stm32f3-disco-rtic",
|
||||
"embedded-examples/stm32h7-rtic",
|
||||
"serialization-prototyping",
|
||||
]
|
||||
|
||||
|
11
README.md
11
README.md
@ -37,16 +37,13 @@ This project currently contains following crates:
|
||||
* [`satrs-example`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example):
|
||||
Example of a simple example on-board software using various sat-rs components which can be run
|
||||
on a host computer or on any system with a standard runtime like a Raspberry Pi.
|
||||
* [`satrs-minisim`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-minisim):
|
||||
Mini-Simulator based on [asynchronix](https://github.com/asynchronics/asynchronix) which
|
||||
simulates some physical devices for the `satrs-example` application device handlers.
|
||||
* [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib):
|
||||
Components to build a mission information base from the on-board software directly.
|
||||
* [`satrs-stm32f3-disco-rtic`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/embedded-examples/stm32f3-disco-rtic):
|
||||
* [`satrs-stm32f3-disco-rtic`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/embedded-examples/satrs-stm32f3-disco-rtic):
|
||||
Example of a simple example using low-level sat-rs components on a bare-metal system
|
||||
with constrained resources. This example uses the [RTIC](https://github.com/rtic-rs/rtic)
|
||||
framework on the STM32F3-Discovery device.
|
||||
* [`satrs-stm32h-nucleo-rtic`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/embedded-examples/stm32h7-nucleo-rtic):
|
||||
* [`satrs-stm32h-nucleo-rtic`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/embedded-examples/satrs-stm32h7-nucleo-rtic):
|
||||
Example of a simple example using sat-rs components on a bare-metal system
|
||||
with constrained resources. This example uses the [RTIC](https://github.com/rtic-rs/rtic)
|
||||
framework on the STM32H743ZIT device.
|
||||
@ -72,10 +69,6 @@ Currently this library has the following flight heritage:
|
||||
[flown on the satellite](https://blogs.esa.int/rocketscience/2024/05/21/ops-sat-reentry-tomorrow-final-experiments-continue/).
|
||||
The application is strongly based on the sat-rs example application. You can find the repository
|
||||
of the experiment [here](https://egit.irs.uni-stuttgart.de/rust/ops-sat-rs).
|
||||
- Development and use of a sat-rs-based [demonstration on-board software](https://egit.irs.uni-stuttgart.de/rust/eurosim-obsw)
|
||||
alongside a Flight System Simulator in the context of a
|
||||
[Bachelors Thesis](https://www.researchgate.net/publication/380785984_Design_and_Development_of_a_Hardware-in-the-Loop_EuroSim_Demonstrator)
|
||||
at [Airbus Netherlands](https://www.airbusdefenceandspacenetherlands.nl/).
|
||||
|
||||
# Coverage
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
sat-rs example for the STM32H73ZI-Nucleo board
|
||||
sat-rs example for the STM32F3-Discovery board
|
||||
=======
|
||||
|
||||
This example application shows how the [sat-rs library](https://egit.irs.uni-stuttgart.de/rust/sat-rs)
|
||||
|
@ -1,260 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
|
||||
<!--Created by yEd 3.23.2-->
|
||||
<key attr.name="Description" attr.type="string" for="graph" id="d0"/>
|
||||
<key for="port" id="d1" yfiles.type="portgraphics"/>
|
||||
<key for="port" id="d2" yfiles.type="portgeometry"/>
|
||||
<key for="port" id="d3" yfiles.type="portuserdata"/>
|
||||
<key attr.name="url" attr.type="string" for="node" id="d4"/>
|
||||
<key attr.name="description" attr.type="string" for="node" id="d5"/>
|
||||
<key for="node" id="d6" yfiles.type="nodegraphics"/>
|
||||
<key for="graphml" id="d7" yfiles.type="resources"/>
|
||||
<key attr.name="url" attr.type="string" for="edge" id="d8"/>
|
||||
<key attr.name="description" attr.type="string" for="edge" id="d9"/>
|
||||
<key for="edge" id="d10" yfiles.type="edgegraphics"/>
|
||||
<graph edgedefault="directed" id="G">
|
||||
<data key="d0" xml:space="preserve"/>
|
||||
<node id="n0">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="360.0" width="479.0" x="771.3047672479152" y="458.0"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="237.5" y="178.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n1">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="177.64799999999997" width="200.75199999999973" x="1037.5527672479152" y="470.15200000000027"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="67.919921875" x="13.264464667588754" xml:space="preserve" y="8.302185845943427">Simulation<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.433926114471642" nodeRatioY="-0.45326608886143704" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n2">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="34.0" width="84.39999999999986" x="1068.8351781652768" y="508.2800000000002"/>
|
||||
<y:Fill color="#FFCC00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="37.638671875" x="23.380664062499818" xml:space="preserve" y="8.015625">PCDU<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n3">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="34.0" width="120.39999999999986" x="1068.8351781652768" y="550.4800000000001"/>
|
||||
<y:Fill color="#FFCC00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="92.453125" x="13.973437499999818" xml:space="preserve" y="8.015625">Magnetometer<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n4">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="34.0" width="120.39999999999986" x="1068.8351781652768" y="594.9000000000001"/>
|
||||
<y:Fill color="#FFCC00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="88.83203125" x="15.783984374999818" xml:space="preserve" y="8.015625">Magnetorquer<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n5">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="34.0" width="120.39999999999986" x="783.4063563305535" y="545.2800000000002"/>
|
||||
<y:Fill color="#FFCC00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="85.931640625" x="17.234179687499932" xml:space="preserve" y="8.015625">SimController<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n6">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="34.0" width="120.39999999999986" x="840.5407126611072" y="677.8000000000002"/>
|
||||
<y:Fill color="#FFCC00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="105.05078125" x="7.674609374999932" xml:space="preserve" y="8.015625">UDP TC Receiver<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n7">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="34.0" width="120.39999999999986" x="1005.2814253222144" y="677.8000000000002"/>
|
||||
<y:Fill color="#FFCC00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="97.111328125" x="11.644335937499932" xml:space="preserve" y="8.015625">UDP TM Sender<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n8">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="34.0" width="120.39999999999986" x="931.6174253222144" y="775.5920000000002"/>
|
||||
<y:Fill color="#FFCC00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="38.740234375" x="40.82988281249993" xml:space="preserve" y="8.015625">Client<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<edge id="e0" source="n5" target="n3">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="0.0" sy="0.0" tx="0.0" ty="-5.199999999999932"/>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e1" source="n5" target="n2">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="60.19999999999993" sy="0.0" tx="0.0" ty="0.0">
|
||||
<y:Point x="1023.8695890826383" y="562.2800000000002"/>
|
||||
<y:Point x="1023.8695890826383" y="525.2800000000002"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e2" source="n5" target="n4">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="48.72964366944643" sy="0.0" tx="0.0" ty="0.0">
|
||||
<y:Point x="1023.8695890826383" y="562.2800000000002"/>
|
||||
<y:Point x="1023.8695890826383" y="611.9000000000001"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="97.955078125" x="12.686124396959713" xml:space="preserve" y="-22.50440429687478">schedule_event<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="13.519999999999978" distanceToCenter="true" position="left" ratio="0.11621274698385183" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e3" source="n6" target="n5">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="0.0" sy="0.0" tx="-5.329643669446341" ty="0.0">
|
||||
<y:Point x="900.7407126611072" y="628.5400000000002"/>
|
||||
<y:Point x="838.2767126611071" y="628.5400000000002"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="75.923828125" x="-87.89792405764274" xml:space="preserve" y="-40.606550292968564">SimRequest<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="49.935999999999936" distanceToCenter="true" position="left" ratio="0.5" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e4" source="n4" target="n7">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="60.200000000000045" sy="0.0" tx="0.0" ty="0.0">
|
||||
<y:Point x="1223.8814253222142" y="611.9000000000001"/>
|
||||
<y:Point x="1223.8814253222142" y="694.8000000000002"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e5" source="n3" target="n7">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="0.0" sy="0.0" tx="0.0" ty="0.0">
|
||||
<y:Point x="1223.8814253222142" y="567.4800000000001"/>
|
||||
<y:Point x="1223.8814253222142" y="694.8000000000002"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e6" source="n2" target="n7">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="11.514125426161627" sy="-2.5781798912005343" tx="45.553752843062284" ty="0.0">
|
||||
<y:Point x="1223.8814253222142" y="522.7018201087997"/>
|
||||
<y:Point x="1223.8814253222142" y="694.8000000000002"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="60.4140625" x="-2.4087265765670054" xml:space="preserve" y="145.1356018470808">SimReply<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="17.97817989120062" distanceToCenter="true" position="right" ratio="0.679561684469248" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e7" source="n8" target="n6">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="-25.212712661107275" sy="0.0" tx="-11.264000000000124" ty="0.0">
|
||||
<y:Point x="966.6047126611071" y="731.8000000000002"/>
|
||||
<y:Point x="889.4767126611071" y="731.8000000000002"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="119.751953125" x="-132.27600022951788" xml:space="preserve" y="-32.03587548828091">SimRequest in UDP<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="20.73181250000017" distanceToCenter="true" position="left" ratio="0.9386993050513424" segment="1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e8" source="n7" target="n8">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="29.18399999999997" sy="0.0" tx="24.28800000000001" ty="0.0">
|
||||
<y:Point x="1094.6654253222143" y="731.8000000000002"/>
|
||||
<y:Point x="1016.1054253222144" y="731.8000000000002"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="104.2421875" x="-62.15307370122309" xml:space="preserve" y="34.80927001953137">SimReply in UDP<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="23.81218750000005" distanceToCenter="true" position="left" ratio="0.12769857433808468" segment="1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e9" source="n5" target="n1">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="23.921741802203996" sy="-3.0501798912007416" tx="0.0" ty="-56.27417989120056">
|
||||
<y:Point x="867.5280981327575" y="502.70182010879967"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="29.95703125" x="73.38950633588263" xml:space="preserve" y="-62.699758016200235">step<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="11.126187499999986" distanceToCenter="true" position="left" ratio="0.5889387894625147" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
</graph>
|
||||
<data key="d7">
|
||||
<y:Resources/>
|
||||
</data>
|
||||
</graphml>
|
Binary file not shown.
Before Width: | Height: | Size: 98 KiB |
@ -5,8 +5,10 @@ This book is the primary information resource for the [sat-rs library](https://e
|
||||
in addition to the regular API documentation. It contains the following resources:
|
||||
|
||||
1. Architecture informations and consideration which would exceeds the scope of the regular API.
|
||||
2. General information on how to build on-board Software and how `sat-rs` can help to fulfill
|
||||
2. General information on how to build On-Board Software and how `sat-rs` can help to fulfill
|
||||
the unique requirements of writing software for remote systems.
|
||||
2. A Getting-Started workshop where a small On-Board Software is built from scratch using
|
||||
sat-rs components.
|
||||
|
||||
# Introduction
|
||||
|
||||
@ -29,9 +31,7 @@ and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-
|
||||
The [`satrs-example`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example)
|
||||
provides various practical usage examples of the `sat-rs` framework. If you are more interested in
|
||||
the practical application of `sat-rs` inside an application, it is recommended to have a look at
|
||||
the example application. The [`satrs-minisim`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-minisim)
|
||||
applicatin complements the example application and can be used to simulate some physical devices
|
||||
for the `satrs-example` device handlers.
|
||||
the example application.
|
||||
|
||||
# Flight Heritage
|
||||
|
||||
@ -43,7 +43,3 @@ Currently this library has the following flight heritage:
|
||||
[flown on the satellite](https://blogs.esa.int/rocketscience/2024/05/21/ops-sat-reentry-tomorrow-final-experiments-continue/).
|
||||
The application is strongly based on the sat-rs example application. You can find the repository
|
||||
of the experiment [here](https://egit.irs.uni-stuttgart.de/rust/ops-sat-rs).
|
||||
- Development and use of a sat-rs-based [demonstration on-board software](https://egit.irs.uni-stuttgart.de/rust/eurosim-obsw)
|
||||
alongside a Flight System Simulator in the context of a
|
||||
[Bachelors Thesis](https://www.researchgate.net/publication/380785984_Design_and_Development_of_a_Hardware-in-the-Loop_EuroSim_Demonstrator)
|
||||
at [Airbus Netherlands](https://www.airbusdefenceandspacenetherlands.nl/).
|
||||
|
@ -7,13 +7,3 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# [unreleased]
|
||||
|
||||
# [v0.1.1] 2024-02-21
|
||||
|
||||
satrs v0.2.0-rc.0
|
||||
satrs-mib v0.1.1
|
||||
|
||||
# [v0.1.0] 2024-02-13
|
||||
|
||||
satrs v0.1.1
|
||||
satrs-mib v0.1.0
|
||||
|
@ -8,18 +8,18 @@ homepage = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
|
||||
repository = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
|
||||
|
||||
[dependencies]
|
||||
fern = "0.7"
|
||||
fern = "0.6"
|
||||
chrono = "0.4"
|
||||
log = "0.4"
|
||||
crossbeam-channel = "0.5"
|
||||
delegate = "0.13"
|
||||
zerocopy = "0.8"
|
||||
delegate = "0.10"
|
||||
zerocopy = "0.6"
|
||||
csv = "1"
|
||||
num_enum = "0.7"
|
||||
thiserror = "2"
|
||||
thiserror = "1"
|
||||
lazy_static = "1"
|
||||
strum = { version = "0.26", features = ["derive"] }
|
||||
derive-new = "0.7"
|
||||
derive-new = "0.5"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
|
||||
|
@ -73,22 +73,3 @@ the `simpleclient`:
|
||||
```
|
||||
|
||||
You can also simply call the script without any arguments to view the command tree.
|
||||
|
||||
## Adding the mini simulator application
|
||||
|
||||
This example application features a few device handlers. The
|
||||
[`satrs-minisim`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-minisim)
|
||||
can be used to simulate the physical devices managed by these device handlers.
|
||||
|
||||
The example application will attempt communication with the mini simulator on UDP port 7303.
|
||||
If this works, the device handlers will use communication interfaces dedicated to the communication
|
||||
with the mini simulator. Otherwise, they will be replaced by dummy interfaces which either
|
||||
return constant values or behave like ideal devices.
|
||||
|
||||
In summary, you can use the following command command to run the mini-simulator first:
|
||||
|
||||
```sh
|
||||
cargo run -p satrs-minisim
|
||||
```
|
||||
|
||||
and then start the example using `cargo run -p satrs-example`.
|
||||
|
@ -19,7 +19,7 @@ use satrs::mode::{
|
||||
};
|
||||
use satrs::pus::{EcssTmSender, PusTmVariant};
|
||||
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
|
||||
use satrs_example::config::components::{NO_SENDER, PUS_MODE_SERVICE};
|
||||
use satrs_example::config::components::PUS_MODE_SERVICE;
|
||||
|
||||
use crate::hk::PusHkHelper;
|
||||
use crate::pus::hk::{HkReply, HkReplyVariant};
|
||||
@ -132,7 +132,6 @@ pub struct MgmData {
|
||||
pub struct MpscModeLeafInterface {
|
||||
pub request_rx: mpsc::Receiver<GenericMessage<ModeRequest>>,
|
||||
pub reply_to_pus_tx: mpsc::Sender<GenericMessage<ModeReply>>,
|
||||
#[allow(dead_code)]
|
||||
pub reply_to_parent_tx: mpsc::SyncSender<GenericMessage<ModeReply>>,
|
||||
}
|
||||
|
||||
@ -422,12 +421,9 @@ impl<
|
||||
self.mode_helpers.target = None;
|
||||
self.announce_mode(requestor, false);
|
||||
if let Some(requestor) = requestor {
|
||||
if requestor.sender_id() == NO_SENDER {
|
||||
return Ok(());
|
||||
}
|
||||
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
|
||||
log::warn!(
|
||||
"can not send back mode reply to sender {:x}",
|
||||
"can not send back mode reply to sender {}",
|
||||
requestor.sender_id()
|
||||
);
|
||||
} else {
|
||||
@ -536,7 +532,7 @@ mod tests {
|
||||
hk_reply_rx,
|
||||
handler: MgmHandlerLis3Mdl::new(
|
||||
UniqueApidTargetId::new(Apid::Acs as u16, 1),
|
||||
"TEST_MGM",
|
||||
"test-mgm",
|
||||
mode_interface,
|
||||
composite_request_rx,
|
||||
hk_reply_tx,
|
||||
|
@ -3,7 +3,7 @@
|
||||
use crossbeam_channel::{bounded, Receiver, Sender};
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
use std::thread;
|
||||
use zerocopy::{FromBytes, Immutable, IntoBytes, NetworkEndian, Unaligned, U16};
|
||||
use zerocopy::{AsBytes, FromBytes, NetworkEndian, Unaligned, U16};
|
||||
|
||||
trait FieldDataProvider: Send {
|
||||
fn get_data(&self) -> &[u8];
|
||||
@ -35,7 +35,7 @@ struct ExampleMgmSet {
|
||||
temperature: u16,
|
||||
}
|
||||
|
||||
#[derive(FromBytes, IntoBytes, Immutable, Unaligned)]
|
||||
#[derive(FromBytes, AsBytes, Unaligned)]
|
||||
#[repr(C)]
|
||||
struct ExampleMgmSetZc {
|
||||
mgm_vec: [u8; 12],
|
||||
|
@ -122,7 +122,7 @@ pub mod mode_err {
|
||||
}
|
||||
|
||||
pub mod components {
|
||||
use satrs::{request::UniqueApidTargetId, ComponentId};
|
||||
use satrs::request::UniqueApidTargetId;
|
||||
use strum::EnumIter;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, EnumIter)]
|
||||
@ -184,7 +184,6 @@ pub mod components {
|
||||
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::UdpServer as u32);
|
||||
pub const TCP_SERVER: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::TcpServer as u32);
|
||||
pub const NO_SENDER: ComponentId = ComponentId::MAX;
|
||||
}
|
||||
|
||||
pub mod pool {
|
||||
|
@ -88,7 +88,6 @@ impl PowerSwitcherCommandSender<PcduSwitch> for PowerSwitchHelper {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(new)]
|
||||
pub struct SwitchRequestInfo {
|
||||
pub requestor_info: MessageMetadata,
|
||||
@ -100,7 +99,6 @@ pub struct SwitchRequestInfo {
|
||||
pub struct TestSwitchHelper {
|
||||
pub switch_requests: RefCell<VecDeque<SwitchRequestInfo>>,
|
||||
pub switch_info_requests: RefCell<VecDeque<PcduSwitch>>,
|
||||
#[allow(dead_code)]
|
||||
pub switch_delay_request_count: u32,
|
||||
pub next_switch_delay: Duration,
|
||||
pub switch_map: RefCell<SwitchMapWrapper>,
|
||||
|
@ -15,10 +15,7 @@ use satrs::{
|
||||
request::{GenericMessage, MessageMetadata, UniqueApidTargetId},
|
||||
spacepackets::ByteConversionError,
|
||||
};
|
||||
use satrs_example::{
|
||||
config::components::{NO_SENDER, PUS_MODE_SERVICE},
|
||||
DeviceMode, TimestampHelper,
|
||||
};
|
||||
use satrs_example::{config::components::PUS_MODE_SERVICE, DeviceMode, TimestampHelper};
|
||||
use satrs_minisim::{
|
||||
eps::{
|
||||
PcduReply, PcduRequest, PcduSwitch, SwitchMap, SwitchMapBinaryWrapper, SwitchMapWrapper,
|
||||
@ -63,9 +60,9 @@ impl SerialInterface for SerialInterfaceToSim {
|
||||
type Error = ();
|
||||
|
||||
fn send(&self, data: &[u8]) -> Result<(), Self::Error> {
|
||||
let request: PcduRequest = serde_json::from_slice(data).expect("expected a PCDU request");
|
||||
let request: SimRequest = serde_json::from_slice(data).unwrap();
|
||||
self.sim_request_tx
|
||||
.send(SimRequest::new_with_epoch_time(request))
|
||||
.send(request)
|
||||
.expect("failed to send request to simulation");
|
||||
Ok(())
|
||||
}
|
||||
@ -104,7 +101,9 @@ impl SerialInterface for SerialInterfaceDummy {
|
||||
type Error = ();
|
||||
|
||||
fn send(&self, data: &[u8]) -> Result<(), Self::Error> {
|
||||
let pcdu_req: PcduRequest = serde_json::from_slice(data).unwrap();
|
||||
let sim_req: SimRequest = serde_json::from_slice(data).unwrap();
|
||||
let pcdu_req =
|
||||
PcduRequest::from_sim_message(&sim_req).expect("PCDU request creation failed");
|
||||
let switch_map_mut = &mut self.switch_map.borrow_mut().0;
|
||||
match pcdu_req {
|
||||
PcduRequest::SwitchDevice { switch, state } => {
|
||||
@ -120,7 +119,7 @@ impl SerialInterface for SerialInterfaceDummy {
|
||||
PcduRequest::RequestSwitchInfo => {
|
||||
let mut reply_deque_mut = self.reply_deque.borrow_mut();
|
||||
reply_deque_mut.push_back(SimReply::new(&PcduReply::SwitchInfo(
|
||||
switch_map_mut.clone(),
|
||||
self.switch_map.borrow().0.clone(),
|
||||
)));
|
||||
}
|
||||
};
|
||||
@ -131,13 +130,15 @@ impl SerialInterface for SerialInterfaceDummy {
|
||||
&self,
|
||||
mut f: ReplyHandler,
|
||||
) -> Result<(), Self::Error> {
|
||||
if self.reply_queue_empty() {
|
||||
if self.reply_deque.borrow().is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
loop {
|
||||
let reply = self.get_next_reply_as_string();
|
||||
let mut reply_deque_mut = self.reply_deque.borrow_mut();
|
||||
let next_reply = reply_deque_mut.pop_front().unwrap();
|
||||
let reply = serde_json::to_string(&next_reply).unwrap();
|
||||
f(reply.as_bytes());
|
||||
if self.reply_queue_empty() {
|
||||
if reply_deque_mut.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -145,18 +146,6 @@ impl SerialInterface for SerialInterfaceDummy {
|
||||
}
|
||||
}
|
||||
|
||||
impl SerialInterfaceDummy {
|
||||
fn get_next_reply_as_string(&self) -> String {
|
||||
let mut reply_deque_mut = self.reply_deque.borrow_mut();
|
||||
let next_reply = reply_deque_mut.pop_front().unwrap();
|
||||
serde_json::to_string(&next_reply).unwrap()
|
||||
}
|
||||
|
||||
fn reply_queue_empty(&self) -> bool {
|
||||
self.reply_deque.borrow().is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
pub enum SerialSimInterfaceWrapper {
|
||||
Dummy(SerialInterfaceDummy),
|
||||
Sim(SerialInterfaceToSim),
|
||||
@ -382,12 +371,10 @@ impl<ComInterface: SerialInterface, TmSender: EcssTmSender> PcduHandler<ComInter
|
||||
PcduReply::SwitchInfo(switch_info) => {
|
||||
let switch_map_wrapper =
|
||||
SwitchMapWrapper::from_binary_switch_map_ref(&switch_info);
|
||||
let mut shared_switch_map = self
|
||||
.shared_switch_map
|
||||
self.shared_switch_map
|
||||
.lock()
|
||||
.expect("failed to lock switch map");
|
||||
shared_switch_map.switch_map = switch_map_wrapper.0;
|
||||
shared_switch_map.valid = true;
|
||||
.expect("failed to lock switch map")
|
||||
.switch_map = switch_map_wrapper.0;
|
||||
}
|
||||
}
|
||||
}) {
|
||||
@ -440,9 +427,6 @@ impl<ComInterface: SerialInterface, TmSender: EcssTmSender> ModeRequestHandler
|
||||
) -> Result<(), Self::Error> {
|
||||
self.announce_mode(requestor, false);
|
||||
if let Some(requestor) = requestor {
|
||||
if requestor.sender_id() == NO_SENDER {
|
||||
return Ok(());
|
||||
}
|
||||
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
|
||||
log::warn!(
|
||||
"can not send back mode reply to sender {}",
|
||||
@ -481,242 +465,3 @@ impl<ComInterface: SerialInterface, TmSender: EcssTmSender> ModeRequestHandler
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::mpsc;
|
||||
|
||||
use satrs::{
|
||||
mode::ModeRequest, power::SwitchStateBinary, request::GenericMessage, tmtc::PacketAsVec,
|
||||
};
|
||||
use satrs_example::config::components::{Apid, MGM_HANDLER_0};
|
||||
use satrs_minisim::eps::SwitchMapBinary;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SerialInterfaceTest {
|
||||
pub inner: SerialInterfaceDummy,
|
||||
pub send_queue: RefCell<VecDeque<Vec<u8>>>,
|
||||
pub reply_queue: RefCell<VecDeque<String>>,
|
||||
}
|
||||
|
||||
impl SerialInterface for SerialInterfaceTest {
|
||||
type Error = ();
|
||||
|
||||
fn send(&self, data: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut send_queue_mut = self.send_queue.borrow_mut();
|
||||
send_queue_mut.push_back(data.to_vec());
|
||||
self.inner.send(data)
|
||||
}
|
||||
|
||||
fn try_recv_replies<ReplyHandler: FnMut(&[u8])>(
|
||||
&self,
|
||||
mut f: ReplyHandler,
|
||||
) -> Result<(), Self::Error> {
|
||||
if self.inner.reply_queue_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
loop {
|
||||
let reply = self.inner.get_next_reply_as_string();
|
||||
self.reply_queue.borrow_mut().push_back(reply.clone());
|
||||
f(reply.as_bytes());
|
||||
if self.inner.reply_queue_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PcduTestbench {
|
||||
pub mode_request_tx: mpsc::Sender<GenericMessage<ModeRequest>>,
|
||||
pub mode_reply_rx_to_pus: mpsc::Receiver<GenericMessage<ModeReply>>,
|
||||
pub mode_reply_rx_to_parent: mpsc::Receiver<GenericMessage<ModeReply>>,
|
||||
pub composite_request_tx: mpsc::Sender<GenericMessage<CompositeRequest>>,
|
||||
pub hk_reply_rx: mpsc::Receiver<GenericMessage<HkReply>>,
|
||||
pub tm_rx: mpsc::Receiver<PacketAsVec>,
|
||||
pub switch_request_tx: mpsc::Sender<GenericMessage<SwitchRequest>>,
|
||||
pub handler: PcduHandler<SerialInterfaceTest, mpsc::Sender<PacketAsVec>>,
|
||||
}
|
||||
|
||||
impl PcduTestbench {
|
||||
pub fn new() -> Self {
|
||||
let (mode_request_tx, mode_request_rx) = mpsc::channel();
|
||||
let (mode_reply_tx_to_pus, mode_reply_rx_to_pus) = mpsc::channel();
|
||||
let (mode_reply_tx_to_parent, mode_reply_rx_to_parent) = mpsc::sync_channel(5);
|
||||
let mode_interface = MpscModeLeafInterface {
|
||||
request_rx: mode_request_rx,
|
||||
reply_to_pus_tx: mode_reply_tx_to_pus,
|
||||
reply_to_parent_tx: mode_reply_tx_to_parent,
|
||||
};
|
||||
let (composite_request_tx, composite_request_rx) = mpsc::channel();
|
||||
let (hk_reply_tx, hk_reply_rx) = mpsc::channel();
|
||||
let (tm_tx, tm_rx) = mpsc::channel::<PacketAsVec>();
|
||||
let (switch_request_tx, switch_reqest_rx) = mpsc::channel();
|
||||
let shared_switch_map = Arc::new(Mutex::new(SwitchSet::default()));
|
||||
Self {
|
||||
mode_request_tx,
|
||||
mode_reply_rx_to_pus,
|
||||
mode_reply_rx_to_parent,
|
||||
composite_request_tx,
|
||||
hk_reply_rx,
|
||||
tm_rx,
|
||||
switch_request_tx,
|
||||
handler: PcduHandler::new(
|
||||
UniqueApidTargetId::new(Apid::Eps as u16, 0),
|
||||
"TEST_PCDU",
|
||||
mode_interface,
|
||||
composite_request_rx,
|
||||
hk_reply_tx,
|
||||
switch_reqest_rx,
|
||||
tm_tx,
|
||||
SerialInterfaceTest::default(),
|
||||
shared_switch_map,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_switch_info_req_was_sent(&self, expected_queue_len: usize) {
|
||||
// Check that there is now communication happening.
|
||||
let mut send_queue_mut = self.handler.com_interface.send_queue.borrow_mut();
|
||||
assert_eq!(send_queue_mut.len(), expected_queue_len);
|
||||
let packet_sent = send_queue_mut.pop_front().unwrap();
|
||||
drop(send_queue_mut);
|
||||
let pcdu_req: PcduRequest = serde_json::from_slice(&packet_sent).unwrap();
|
||||
assert_eq!(pcdu_req, PcduRequest::RequestSwitchInfo);
|
||||
}
|
||||
|
||||
pub fn verify_switch_req_was_sent(
|
||||
&self,
|
||||
expected_queue_len: usize,
|
||||
switch_id: PcduSwitch,
|
||||
target_state: SwitchStateBinary,
|
||||
) {
|
||||
// Check that there is now communication happening.
|
||||
let mut send_queue_mut = self.handler.com_interface.send_queue.borrow_mut();
|
||||
assert_eq!(send_queue_mut.len(), expected_queue_len);
|
||||
let packet_sent = send_queue_mut.pop_front().unwrap();
|
||||
drop(send_queue_mut);
|
||||
let pcdu_req: PcduRequest = serde_json::from_slice(&packet_sent).unwrap();
|
||||
assert_eq!(
|
||||
pcdu_req,
|
||||
PcduRequest::SwitchDevice {
|
||||
switch: switch_id,
|
||||
state: target_state
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
pub fn verify_switch_reply_received(
|
||||
&self,
|
||||
expected_queue_len: usize,
|
||||
expected_map: SwitchMapBinary,
|
||||
) {
|
||||
// Check that a switch reply was read back.
|
||||
let mut reply_received_mut = self.handler.com_interface.reply_queue.borrow_mut();
|
||||
assert_eq!(reply_received_mut.len(), expected_queue_len);
|
||||
let reply_received = reply_received_mut.pop_front().unwrap();
|
||||
let sim_reply: SimReply = serde_json::from_str(&reply_received).unwrap();
|
||||
let pcdu_reply = PcduReply::from_sim_message(&sim_reply).unwrap();
|
||||
assert_eq!(pcdu_reply, PcduReply::SwitchInfo(expected_map));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_handler() {
|
||||
let mut testbench = PcduTestbench::new();
|
||||
assert_eq!(testbench.handler.com_interface.send_queue.borrow().len(), 0);
|
||||
assert_eq!(
|
||||
testbench.handler.com_interface.reply_queue.borrow().len(),
|
||||
0
|
||||
);
|
||||
assert_eq!(
|
||||
testbench.handler.mode_and_submode().mode(),
|
||||
DeviceMode::Off as u32
|
||||
);
|
||||
assert_eq!(testbench.handler.mode_and_submode().submode(), 0_u16);
|
||||
testbench.handler.periodic_operation(OpCode::RegularOp);
|
||||
testbench
|
||||
.handler
|
||||
.periodic_operation(OpCode::PollAndRecvReplies);
|
||||
// Handler is OFF, no changes expected.
|
||||
assert_eq!(testbench.handler.com_interface.send_queue.borrow().len(), 0);
|
||||
assert_eq!(
|
||||
testbench.handler.com_interface.reply_queue.borrow().len(),
|
||||
0
|
||||
);
|
||||
assert_eq!(
|
||||
testbench.handler.mode_and_submode().mode(),
|
||||
DeviceMode::Off as u32
|
||||
);
|
||||
assert_eq!(testbench.handler.mode_and_submode().submode(), 0_u16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normal_mode() {
|
||||
let mut testbench = PcduTestbench::new();
|
||||
testbench
|
||||
.mode_request_tx
|
||||
.send(GenericMessage::new(
|
||||
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
|
||||
ModeRequest::SetMode(ModeAndSubmode::new(DeviceMode::Normal as u32, 0)),
|
||||
))
|
||||
.expect("failed to send mode request");
|
||||
let switch_map_shared = testbench.handler.shared_switch_map.lock().unwrap();
|
||||
assert!(!switch_map_shared.valid);
|
||||
drop(switch_map_shared);
|
||||
testbench.handler.periodic_operation(OpCode::RegularOp);
|
||||
testbench
|
||||
.handler
|
||||
.periodic_operation(OpCode::PollAndRecvReplies);
|
||||
// Check correctness of mode.
|
||||
assert_eq!(
|
||||
testbench.handler.mode_and_submode().mode(),
|
||||
DeviceMode::Normal as u32
|
||||
);
|
||||
assert_eq!(testbench.handler.mode_and_submode().submode(), 0);
|
||||
|
||||
testbench.verify_switch_info_req_was_sent(1);
|
||||
testbench.verify_switch_reply_received(1, SwitchMapBinaryWrapper::default().0);
|
||||
|
||||
let switch_map_shared = testbench.handler.shared_switch_map.lock().unwrap();
|
||||
assert!(switch_map_shared.valid);
|
||||
drop(switch_map_shared);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_switch_request_handling() {
|
||||
let mut testbench = PcduTestbench::new();
|
||||
testbench
|
||||
.mode_request_tx
|
||||
.send(GenericMessage::new(
|
||||
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
|
||||
ModeRequest::SetMode(ModeAndSubmode::new(DeviceMode::Normal as u32, 0)),
|
||||
))
|
||||
.expect("failed to send mode request");
|
||||
testbench
|
||||
.switch_request_tx
|
||||
.send(GenericMessage::new(
|
||||
MessageMetadata::new(0, MGM_HANDLER_0.id()),
|
||||
SwitchRequest::new(0, SwitchStateBinary::On),
|
||||
))
|
||||
.expect("failed to send switch request");
|
||||
testbench.handler.periodic_operation(OpCode::RegularOp);
|
||||
testbench
|
||||
.handler
|
||||
.periodic_operation(OpCode::PollAndRecvReplies);
|
||||
|
||||
testbench.verify_switch_req_was_sent(2, PcduSwitch::Mgm, SwitchStateBinary::On);
|
||||
testbench.verify_switch_info_req_was_sent(1);
|
||||
let mut switch_map = SwitchMapBinaryWrapper::default().0;
|
||||
*switch_map
|
||||
.get_mut(&PcduSwitch::Mgm)
|
||||
.expect("switch state setting failed") = SwitchStateBinary::On;
|
||||
testbench.verify_switch_reply_received(1, switch_map);
|
||||
|
||||
let switch_map_shared = testbench.handler.shared_switch_map.lock().unwrap();
|
||||
assert!(switch_map_shared.valid);
|
||||
drop(switch_map_shared);
|
||||
}
|
||||
}
|
||||
|
@ -22,14 +22,13 @@ use pus::test::create_test_service_dynamic;
|
||||
use satrs::hal::std::tcp_server::ServerConfig;
|
||||
use satrs::hal::std::udp_server::UdpTcServer;
|
||||
use satrs::pus::HandlingStatus;
|
||||
use satrs::request::{GenericMessage, MessageMetadata};
|
||||
use satrs::request::GenericMessage;
|
||||
use satrs::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
|
||||
use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools};
|
||||
use satrs_example::config::tasks::{
|
||||
FREQ_MS_AOCS, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC, SIM_CLIENT_IDLE_DELAY_MS,
|
||||
};
|
||||
use satrs_example::config::{OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT};
|
||||
use satrs_example::DeviceMode;
|
||||
|
||||
use crate::acs::mgm::{
|
||||
MgmHandlerLis3Mdl, MpscModeLeafInterface, SpiDummyInterface, SpiSimInterface,
|
||||
@ -47,12 +46,10 @@ use crate::pus::scheduler::{create_scheduler_service_dynamic, create_scheduler_s
|
||||
use crate::pus::test::create_test_service_static;
|
||||
use crate::pus::{PusTcDistributor, PusTcMpscRouter};
|
||||
use crate::requests::{CompositeRequest, GenericRequestRouter};
|
||||
use satrs::mode::{Mode, ModeAndSubmode, ModeRequest};
|
||||
use satrs::mode::ModeRequest;
|
||||
use satrs::pus::event_man::EventRequestWithToken;
|
||||
use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter};
|
||||
use satrs_example::config::components::{
|
||||
MGM_HANDLER_0, NO_SENDER, PCDU_HANDLER, TCP_SERVER, UDP_SERVER,
|
||||
};
|
||||
use satrs_example::config::components::{MGM_HANDLER_0, PCDU_HANDLER, TCP_SERVER, UDP_SERVER};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::{mpsc, Mutex};
|
||||
use std::sync::{Arc, RwLock};
|
||||
@ -101,7 +98,7 @@ fn static_tmtc_pool_main() {
|
||||
.insert(PCDU_HANDLER.id(), pcdu_handler_composite_tx);
|
||||
request_map
|
||||
.mode_router_map
|
||||
.insert(PCDU_HANDLER.id(), pcdu_handler_mode_tx.clone());
|
||||
.insert(PCDU_HANDLER.id(), pcdu_handler_mode_tx);
|
||||
|
||||
// This helper structure is used by all telecommand providers which need to send telecommands
|
||||
// to the TC source.
|
||||
@ -275,7 +272,6 @@ fn static_tmtc_pool_main() {
|
||||
} else {
|
||||
SerialSimInterfaceWrapper::Dummy(SerialInterfaceDummy::default())
|
||||
};
|
||||
|
||||
let mut pcdu_handler = PcduHandler::new(
|
||||
PCDU_HANDLER,
|
||||
"PCDU",
|
||||
@ -287,13 +283,6 @@ fn static_tmtc_pool_main() {
|
||||
pcdu_serial_interface,
|
||||
shared_switch_set,
|
||||
);
|
||||
// The PCDU is a critical component which should be in normal mode immediately.
|
||||
pcdu_handler_mode_tx
|
||||
.send(GenericMessage::new(
|
||||
MessageMetadata::new(0, NO_SENDER),
|
||||
ModeRequest::SetMode(ModeAndSubmode::new(DeviceMode::Normal as Mode, 0)),
|
||||
))
|
||||
.expect("sending initial mode request failed");
|
||||
|
||||
info!("Starting TMTC and UDP task");
|
||||
let jh_udp_tmtc = thread::Builder::new()
|
||||
@ -431,7 +420,7 @@ fn dyn_tmtc_pool_main() {
|
||||
.insert(PCDU_HANDLER.id(), pcdu_handler_composite_tx);
|
||||
request_map
|
||||
.mode_router_map
|
||||
.insert(PCDU_HANDLER.id(), pcdu_handler_mode_tx.clone());
|
||||
.insert(PCDU_HANDLER.id(), pcdu_handler_mode_tx);
|
||||
|
||||
// Create event handling components
|
||||
// These sender handles are used to send event requests, for example to enable or disable
|
||||
@ -594,13 +583,6 @@ fn dyn_tmtc_pool_main() {
|
||||
pcdu_serial_interface,
|
||||
shared_switch_set,
|
||||
);
|
||||
// The PCDU is a critical component which should be in normal mode immediately.
|
||||
pcdu_handler_mode_tx
|
||||
.send(GenericMessage::new(
|
||||
MessageMetadata::new(0, NO_SENDER),
|
||||
ModeRequest::SetMode(ModeAndSubmode::new(DeviceMode::Normal as Mode, 0)),
|
||||
))
|
||||
.expect("sending initial mode request failed");
|
||||
|
||||
info!("Starting TMTC and UDP task");
|
||||
let jh_udp_tmtc = thread::Builder::new()
|
||||
|
@ -33,7 +33,6 @@ pub struct HkReply {
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
#[allow(dead_code)]
|
||||
pub enum HkReplyVariant {
|
||||
Ack,
|
||||
Failed(ResultU16),
|
||||
|
@ -44,13 +44,11 @@ pub struct PusTcMpscRouter {
|
||||
pub event_tc_sender: Sender<EcssTcAndToken>,
|
||||
pub sched_tc_sender: Sender<EcssTcAndToken>,
|
||||
pub hk_tc_sender: Sender<EcssTcAndToken>,
|
||||
#[allow(dead_code)]
|
||||
pub action_tc_sender: Sender<EcssTcAndToken>,
|
||||
pub mode_tc_sender: Sender<EcssTcAndToken>,
|
||||
}
|
||||
|
||||
pub struct PusTcDistributor<TmSender: EcssTmSender> {
|
||||
#[allow(dead_code)]
|
||||
pub id: ComponentId,
|
||||
pub tm_sender: TmSender,
|
||||
pub verif_reporter: VerificationReporter,
|
||||
|
@ -26,7 +26,6 @@ pub enum CompositeRequest {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GenericRequestRouter {
|
||||
#[allow(dead_code)]
|
||||
pub id: ComponentId,
|
||||
// All messages which do not have a dedicated queue.
|
||||
pub composite_router_map:
|
||||
|
@ -4,19 +4,16 @@ use std::{
|
||||
};
|
||||
|
||||
use log::info;
|
||||
use satrs::tmtc::{PacketAsVec, PacketInPool, SharedPacketPool};
|
||||
use satrs::{
|
||||
pool::PoolProvider,
|
||||
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
|
||||
spacepackets::{
|
||||
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
|
||||
seq_count::CcsdsSimpleSeqCountProvider,
|
||||
time::cds::MIN_CDS_FIELD_LEN,
|
||||
CcsdsPacket,
|
||||
},
|
||||
};
|
||||
use satrs::{
|
||||
spacepackets::seq_count::SequenceCountProvider,
|
||||
tmtc::{PacketAsVec, PacketInPool, SharedPacketPool},
|
||||
};
|
||||
|
||||
use crate::interface::tcp::SyncTcpTmSource;
|
||||
|
||||
|
@ -23,7 +23,7 @@ version = "1"
|
||||
optional = true
|
||||
|
||||
[dependencies.satrs-shared]
|
||||
version = ">=0.1.3, <=0.2"
|
||||
version = ">=0.1.3, <0.2"
|
||||
features = ["serde"]
|
||||
|
||||
[dependencies.satrs-mib-codegen]
|
||||
|
@ -28,7 +28,7 @@ features = ["full"]
|
||||
trybuild = { version = "1", features = ["diff"] }
|
||||
|
||||
[dev-dependencies.satrs-shared]
|
||||
version = ">=0.1.3, <=0.2"
|
||||
version = ">=0.1.3, <0.2"
|
||||
|
||||
[dev-dependencies.satrs-mib]
|
||||
path = ".."
|
||||
|
@ -1,4 +1,6 @@
|
||||
#![no_std]
|
||||
#[cfg(feature = "alloc")]
|
||||
extern crate alloc;
|
||||
#[cfg(any(feature = "std", test))]
|
||||
extern crate std;
|
||||
|
||||
|
@ -9,20 +9,20 @@ edition = "2021"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
log = "0.4"
|
||||
thiserror = "2"
|
||||
fern = "0.7"
|
||||
thiserror = "1"
|
||||
fern = "0.5"
|
||||
strum = { version = "0.26", features = ["derive"] }
|
||||
num_enum = "0.7"
|
||||
humantime = "2"
|
||||
|
||||
[dependencies.asynchronix]
|
||||
version = "0.2.2"
|
||||
# git = "https://github.com/asynchronics/asynchronix.git"
|
||||
# branch = "main"
|
||||
version = "0.2.1"
|
||||
git = "https://github.com/asynchronics/asynchronix.git"
|
||||
branch = "main"
|
||||
features = ["serde"]
|
||||
|
||||
[dependencies.satrs]
|
||||
path = "../satrs"
|
||||
|
||||
[dev-dependencies]
|
||||
delegate = "0.13"
|
||||
delegate = "0.12"
|
||||
|
@ -1,32 +0,0 @@
|
||||
sat-rs minisim
|
||||
======
|
||||
|
||||
This crate contains a mini-simulator based on the open-source discrete-event simulation framework
|
||||
[asynchronix](https://github.com/asynchronics/asynchronix).
|
||||
|
||||
Right now, this crate is primarily used together with the
|
||||
[`satrs-example` application](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example)
|
||||
to simulate the devices connected to the example application.
|
||||
|
||||
You can simply run this application using
|
||||
|
||||
```sh
|
||||
cargo run
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```sh
|
||||
cargo run -p satrs-minisim
|
||||
```
|
||||
|
||||
in the workspace. The mini simulator uses the UDP port 7303 to exchange simulation requests and
|
||||
simulation replies with any other application.
|
||||
|
||||
The simulator was designed in a modular way to be scalable and adaptable to other communication
|
||||
schemes. This might allow it to serve a mini-simulator for other example applications which
|
||||
still have similar device handlers.
|
||||
|
||||
The following graph shows the high-level architecture of the mini-simulator.
|
||||
|
||||
<img src="../images/minisim-arch/minisim-arch.png" alt="Mini simulator architecture" width="500" class="center"/>
|
@ -31,7 +31,6 @@ const PHASE_Z: f32 = 0.2;
|
||||
/// might still be possible and is probably sufficient for many OBSW needs.
|
||||
pub struct MagnetometerModel<ReplyProvider: MgmReplyProvider> {
|
||||
pub switch_state: SwitchStateBinary,
|
||||
#[allow(dead_code)]
|
||||
pub periodicity: Duration,
|
||||
pub external_mag_field: Option<MgmSensorValuesMicroTesla>,
|
||||
pub reply_sender: mpsc::Sender<SimReply>,
|
||||
|
@ -16,12 +16,10 @@ use crate::{
|
||||
eps::PcduModel,
|
||||
};
|
||||
|
||||
const WARNING_FOR_STALE_DATA: bool = false;
|
||||
|
||||
const SIM_CTRL_REQ_WIRETAPPING: bool = false;
|
||||
const MGM_REQ_WIRETAPPING: bool = false;
|
||||
const PCDU_REQ_WIRETAPPING: bool = false;
|
||||
const MGT_REQ_WIRETAPPING: bool = false;
|
||||
const SIM_CTRL_REQ_WIRETAPPING: bool = true;
|
||||
const MGM_REQ_WIRETAPPING: bool = true;
|
||||
const PCDU_REQ_WIRETAPPING: bool = true;
|
||||
const MGT_REQ_WIRETAPPING: bool = true;
|
||||
|
||||
// The simulation controller processes requests and drives the simulation.
|
||||
pub struct SimController {
|
||||
@ -74,7 +72,7 @@ impl SimController {
|
||||
loop {
|
||||
match self.request_receiver.try_recv() {
|
||||
Ok(request) => {
|
||||
if request.timestamp < old_timestamp && WARNING_FOR_STALE_DATA {
|
||||
if request.timestamp < old_timestamp {
|
||||
log::warn!("stale data with timestamp {:?} received", request.timestamp);
|
||||
}
|
||||
if let Err(e) = match request.component() {
|
||||
|
@ -236,7 +236,7 @@ pub mod eps {
|
||||
RequestSwitchInfo = 1,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
|
||||
pub enum PcduRequest {
|
||||
SwitchDevice {
|
||||
switch: PcduSwitch,
|
||||
|
@ -8,14 +8,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# [unreleased]
|
||||
|
||||
# [v0.2.1] 2024-11-15
|
||||
|
||||
Increased allowed spacepackets to v0.13
|
||||
|
||||
# [v0.2.0] 2024-11-04
|
||||
|
||||
Semver bump, due to added features in v0.1.4
|
||||
|
||||
# [v0.1.4] 2024-04-24
|
||||
|
||||
## Added
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "satrs-shared"
|
||||
description = "Components shared by multiple sat-rs crates"
|
||||
version = "0.2.1"
|
||||
version = "0.1.4"
|
||||
edition = "2021"
|
||||
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
|
||||
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
|
||||
@ -22,13 +22,12 @@ version = "0.3"
|
||||
optional = true
|
||||
|
||||
[dependencies.spacepackets]
|
||||
version = ">0.9, <=0.13"
|
||||
version = ">0.9, <=0.11"
|
||||
default-features = false
|
||||
|
||||
[features]
|
||||
serde = ["dep:serde", "spacepackets/serde"]
|
||||
defmt = ["dep:defmt", "spacepackets/defmt"]
|
||||
spacepackets = ["dep:defmt", "spacepackets/defmt"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--generate-link-to-definition"]
|
||||
rustdoc-args = ["--cfg", "docs_rs", "--generate-link-to-definition"]
|
||||
|
@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
export RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options"
|
||||
cargo +nightly doc --all-features --open
|
@ -1,23 +0,0 @@
|
||||
Checklist for new releases
|
||||
=======
|
||||
|
||||
# Pre-Release
|
||||
|
||||
1. Make sure any new modules are documented sufficiently enough and check docs by running
|
||||
`docs.sh`.
|
||||
2. Bump version specifier in `Cargo.toml`.
|
||||
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
|
||||
`unreleased` section.
|
||||
4. Run `cargo test --all-features` or `cargo nextest r --all-features` and `cargo test --doc`.
|
||||
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
|
||||
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
|
||||
targets.
|
||||
|
||||
# Release
|
||||
|
||||
1. `cargo publish`
|
||||
|
||||
# Post-Release
|
||||
|
||||
1. Create a new release on `EGit` with the name `satrs-<version>`.
|
||||
|
@ -1,4 +1,4 @@
|
||||
//! This crates contains modules shared among other sat-rs framework crates.
|
||||
#![no_std]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docs_rs, feature(doc_auto_cfg))]
|
||||
pub mod res_code;
|
||||
|
@ -12,14 +12,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
- Renamed `StaticPoolConfig::new` to `StaticPoolConfig::new_from_subpool_cfg_tuples`. The new
|
||||
`new` implementation expects a type struct instead of tuples.
|
||||
- Moved `cfdp` module to [dedicated crate](https://egit.irs.uni-stuttgart.de/rust/cfdp)
|
||||
- Moved `seq_count` module to [spacepackets](https://egit.irs.uni-stuttgart.de/rust/spacepackets)
|
||||
crate
|
||||
|
||||
## Added
|
||||
|
||||
- `StaticHeaplessMemoryPool` which can be grown with user-provided static buffers.
|
||||
- Scheduling table for systems with a standard runtime
|
||||
|
||||
# [v0.2.1] 2024-05-19
|
||||
|
||||
|
@ -13,21 +13,21 @@ keywords = ["no-std", "space", "aerospace"]
|
||||
categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-support", "embedded"]
|
||||
|
||||
[dependencies]
|
||||
delegate = ">0.7, <=0.13"
|
||||
delegate = ">0.7, <=0.10"
|
||||
paste = "1"
|
||||
derive-new = ">=0.6, <=0.7"
|
||||
derive-new = "0.6"
|
||||
smallvec = "1"
|
||||
crc = "3"
|
||||
|
||||
[dependencies.satrs-shared]
|
||||
version = ">=0.1.3, <=0.2"
|
||||
version = ">=0.1.3, <0.2"
|
||||
|
||||
[dependencies.num_enum]
|
||||
version = ">0.5, <=0.7"
|
||||
default-features = false
|
||||
|
||||
[dependencies.spacepackets]
|
||||
version = "0.13"
|
||||
version = "0.11"
|
||||
default-features = false
|
||||
|
||||
[dependencies.cobs]
|
||||
@ -45,11 +45,11 @@ version = "1"
|
||||
optional = true
|
||||
|
||||
[dependencies.hashbrown]
|
||||
version = ">=0.14, <=0.15"
|
||||
version = "0.14"
|
||||
optional = true
|
||||
|
||||
[dependencies.heapless]
|
||||
version = "0.8"
|
||||
version = "0.7"
|
||||
optional = true
|
||||
|
||||
[dependencies.downcast-rs]
|
||||
@ -67,8 +67,8 @@ default-features = false
|
||||
optional = true
|
||||
|
||||
[dependencies.thiserror]
|
||||
version = "2"
|
||||
default-features = false
|
||||
version = "1"
|
||||
optional = true
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1"
|
||||
@ -81,7 +81,7 @@ features = ["all"]
|
||||
optional = true
|
||||
|
||||
[dependencies.mio]
|
||||
version = "1"
|
||||
version = "0.8"
|
||||
features = ["os-poll", "net"]
|
||||
optional = true
|
||||
|
||||
@ -91,7 +91,7 @@ optional = true
|
||||
|
||||
[dev-dependencies]
|
||||
serde = "1"
|
||||
zerocopy = "0.8"
|
||||
zerocopy = "0.7"
|
||||
once_cell = "1"
|
||||
serde_json = "1"
|
||||
rand = "0.8"
|
||||
@ -111,7 +111,7 @@ std = [
|
||||
"serde/std",
|
||||
"spacepackets/std",
|
||||
"num_enum/std",
|
||||
"thiserror/std",
|
||||
"thiserror",
|
||||
"socket2",
|
||||
"mio"
|
||||
]
|
||||
@ -127,7 +127,8 @@ crossbeam = ["crossbeam-channel"]
|
||||
heapless = ["dep:heapless"]
|
||||
defmt = ["dep:defmt", "spacepackets/defmt"]
|
||||
test_util = []
|
||||
doc-images = []
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--generate-link-to-definition"]
|
||||
rustdoc-args = ["--cfg", "docs_rs", "--generate-link-to-definition"]
|
||||
|
@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
export RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options"
|
||||
cargo +nightly doc --all-features --open
|
@ -3,7 +3,8 @@ Checklist for new releases
|
||||
|
||||
# Pre-Release
|
||||
|
||||
1. Make sure any new modules are documented sufficiently enough and check docs by running `docs.sh`.
|
||||
1. Make sure any new modules are documented sufficiently enough and check docs with
|
||||
`cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]' --open`.
|
||||
2. Bump version specifier in `Cargo.toml`.
|
||||
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
|
||||
`unreleased` section.
|
||||
|
1603
satrs/src/cfdp/dest.rs
Normal file
1603
satrs/src/cfdp/dest.rs
Normal file
File diff suppressed because it is too large
Load Diff
769
satrs/src/cfdp/filestore.rs
Normal file
769
satrs/src/cfdp/filestore.rs
Normal file
@ -0,0 +1,769 @@
|
||||
use alloc::string::{String, ToString};
|
||||
use core::fmt::Display;
|
||||
use crc::{Crc, CRC_32_CKSUM};
|
||||
use spacepackets::cfdp::ChecksumType;
|
||||
use spacepackets::ByteConversionError;
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
use std::path::Path;
|
||||
#[cfg(feature = "std")]
|
||||
pub use std_mod::*;
|
||||
|
||||
pub const CRC_32: Crc<u32> = Crc::<u32>::new(&CRC_32_CKSUM);
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum FilestoreError {
|
||||
FileDoesNotExist,
|
||||
FileAlreadyExists,
|
||||
DirDoesNotExist,
|
||||
Permission,
|
||||
IsNotFile,
|
||||
IsNotDirectory,
|
||||
ByteConversion(ByteConversionError),
|
||||
Io {
|
||||
raw_errno: Option<i32>,
|
||||
string: String,
|
||||
},
|
||||
ChecksumTypeNotImplemented(ChecksumType),
|
||||
}
|
||||
|
||||
impl From<ByteConversionError> for FilestoreError {
|
||||
fn from(value: ByteConversionError) -> Self {
|
||||
Self::ByteConversion(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for FilestoreError {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
FilestoreError::FileDoesNotExist => {
|
||||
write!(f, "file does not exist")
|
||||
}
|
||||
FilestoreError::FileAlreadyExists => {
|
||||
write!(f, "file already exists")
|
||||
}
|
||||
FilestoreError::DirDoesNotExist => {
|
||||
write!(f, "directory does not exist")
|
||||
}
|
||||
FilestoreError::Permission => {
|
||||
write!(f, "permission error")
|
||||
}
|
||||
FilestoreError::IsNotFile => {
|
||||
write!(f, "is not a file")
|
||||
}
|
||||
FilestoreError::IsNotDirectory => {
|
||||
write!(f, "is not a directory")
|
||||
}
|
||||
FilestoreError::ByteConversion(e) => {
|
||||
write!(f, "filestore error: {e}")
|
||||
}
|
||||
FilestoreError::Io { raw_errno, string } => {
|
||||
write!(
|
||||
f,
|
||||
"filestore generic IO error with raw errno {:?}: {}",
|
||||
raw_errno, string
|
||||
)
|
||||
}
|
||||
FilestoreError::ChecksumTypeNotImplemented(checksum_type) => {
|
||||
write!(f, "checksum {:?} not implemented", checksum_type)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for FilestoreError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
match self {
|
||||
FilestoreError::ByteConversion(e) => Some(e),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl From<std::io::Error> for FilestoreError {
|
||||
fn from(value: std::io::Error) -> Self {
|
||||
Self::Io {
|
||||
raw_errno: value.raw_os_error(),
|
||||
string: value.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait VirtualFilestore {
|
||||
fn create_file(&self, file_path: &str) -> Result<(), FilestoreError>;
|
||||
|
||||
fn remove_file(&self, file_path: &str) -> Result<(), FilestoreError>;
|
||||
|
||||
/// Truncating a file means deleting all its data so the resulting file is empty.
|
||||
/// This can be more efficient than removing and re-creating a file.
|
||||
fn truncate_file(&self, file_path: &str) -> Result<(), FilestoreError>;
|
||||
|
||||
fn remove_dir(&self, dir_path: &str, all: bool) -> Result<(), FilestoreError>;
|
||||
fn create_dir(&self, dir_path: &str) -> Result<(), FilestoreError>;
|
||||
|
||||
fn read_data(
|
||||
&self,
|
||||
file_path: &str,
|
||||
offset: u64,
|
||||
read_len: u64,
|
||||
buf: &mut [u8],
|
||||
) -> Result<(), FilestoreError>;
|
||||
|
||||
fn write_data(&self, file: &str, offset: u64, buf: &[u8]) -> Result<(), FilestoreError>;
|
||||
|
||||
fn filename_from_full_path(path: &str) -> Option<&str>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
// Convert the path string to a Path
|
||||
let path = Path::new(path);
|
||||
|
||||
// Extract the file name using the file_name() method
|
||||
path.file_name().and_then(|name| name.to_str())
|
||||
}
|
||||
|
||||
fn is_file(&self, path: &str) -> bool;
|
||||
|
||||
fn is_dir(&self, path: &str) -> bool {
|
||||
!self.is_file(path)
|
||||
}
|
||||
|
||||
fn exists(&self, path: &str) -> bool;
|
||||
|
||||
/// This special function is the CFDP specific abstraction to verify the checksum of a file.
|
||||
/// This allows to keep OS specific details like reading the whole file in the most efficient
|
||||
/// manner inside the file system abstraction.
|
||||
///
|
||||
/// The passed verification buffer argument will be used by the specific implementation as
|
||||
/// a buffer to read the file into. It is recommended to use common buffer sizes like
|
||||
/// 4096 or 8192 bytes.
|
||||
fn checksum_verify(
|
||||
&self,
|
||||
file_path: &str,
|
||||
checksum_type: ChecksumType,
|
||||
expected_checksum: u32,
|
||||
verification_buf: &mut [u8],
|
||||
) -> Result<bool, FilestoreError>;
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod std_mod {
|
||||
use super::*;
|
||||
use std::{
|
||||
fs::{self, File, OpenOptions},
|
||||
io::{BufReader, Read, Seek, SeekFrom, Write},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct NativeFilestore {}
|
||||
|
||||
impl VirtualFilestore for NativeFilestore {
|
||||
fn create_file(&self, file_path: &str) -> Result<(), FilestoreError> {
|
||||
if self.exists(file_path) {
|
||||
return Err(FilestoreError::FileAlreadyExists);
|
||||
}
|
||||
File::create(file_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_file(&self, file_path: &str) -> Result<(), FilestoreError> {
|
||||
if !self.exists(file_path) {
|
||||
return Err(FilestoreError::FileDoesNotExist);
|
||||
}
|
||||
if !self.is_file(file_path) {
|
||||
return Err(FilestoreError::IsNotFile);
|
||||
}
|
||||
fs::remove_file(file_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn truncate_file(&self, file_path: &str) -> Result<(), FilestoreError> {
|
||||
if !self.exists(file_path) {
|
||||
return Err(FilestoreError::FileDoesNotExist);
|
||||
}
|
||||
if !self.is_file(file_path) {
|
||||
return Err(FilestoreError::IsNotFile);
|
||||
}
|
||||
OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(file_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_dir(&self, dir_path: &str) -> Result<(), FilestoreError> {
|
||||
fs::create_dir(dir_path).map_err(|e| FilestoreError::Io {
|
||||
raw_errno: e.raw_os_error(),
|
||||
string: e.to_string(),
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_dir(&self, dir_path: &str, all: bool) -> Result<(), FilestoreError> {
|
||||
if !self.exists(dir_path) {
|
||||
return Err(FilestoreError::DirDoesNotExist);
|
||||
}
|
||||
if !self.is_dir(dir_path) {
|
||||
return Err(FilestoreError::IsNotDirectory);
|
||||
}
|
||||
if !all {
|
||||
fs::remove_dir(dir_path)?;
|
||||
return Ok(());
|
||||
}
|
||||
fs::remove_dir_all(dir_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_data(
|
||||
&self,
|
||||
file_name: &str,
|
||||
offset: u64,
|
||||
read_len: u64,
|
||||
buf: &mut [u8],
|
||||
) -> Result<(), FilestoreError> {
|
||||
if buf.len() < read_len as usize {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
found: buf.len(),
|
||||
expected: read_len as usize,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
if !self.exists(file_name) {
|
||||
return Err(FilestoreError::FileDoesNotExist);
|
||||
}
|
||||
if !self.is_file(file_name) {
|
||||
return Err(FilestoreError::IsNotFile);
|
||||
}
|
||||
let mut file = File::open(file_name)?;
|
||||
file.seek(SeekFrom::Start(offset))?;
|
||||
file.read_exact(&mut buf[0..read_len as usize])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_data(&self, file: &str, offset: u64, buf: &[u8]) -> Result<(), FilestoreError> {
|
||||
if !self.exists(file) {
|
||||
return Err(FilestoreError::FileDoesNotExist);
|
||||
}
|
||||
if !self.is_file(file) {
|
||||
return Err(FilestoreError::IsNotFile);
|
||||
}
|
||||
let mut file = OpenOptions::new().write(true).open(file)?;
|
||||
file.seek(SeekFrom::Start(offset))?;
|
||||
file.write_all(buf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_file(&self, path: &str) -> bool {
|
||||
let path = Path::new(path);
|
||||
path.is_file()
|
||||
}
|
||||
|
||||
fn exists(&self, path: &str) -> bool {
|
||||
let path = Path::new(path);
|
||||
if !path.exists() {
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn checksum_verify(
|
||||
&self,
|
||||
file_path: &str,
|
||||
checksum_type: ChecksumType,
|
||||
expected_checksum: u32,
|
||||
verification_buf: &mut [u8],
|
||||
) -> Result<bool, FilestoreError> {
|
||||
match checksum_type {
|
||||
ChecksumType::Modular => {
|
||||
if self.calc_modular_checksum(file_path)? == expected_checksum {
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
ChecksumType::Crc32 => {
|
||||
let mut digest = CRC_32.digest();
|
||||
let file_to_check = File::open(file_path)?;
|
||||
let mut buf_reader = BufReader::new(file_to_check);
|
||||
loop {
|
||||
let bytes_read = buf_reader.read(verification_buf)?;
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
digest.update(&verification_buf[0..bytes_read]);
|
||||
}
|
||||
if digest.finalize() == expected_checksum {
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
ChecksumType::NullChecksum => Ok(true),
|
||||
_ => Err(FilestoreError::ChecksumTypeNotImplemented(checksum_type)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NativeFilestore {
|
||||
pub fn calc_modular_checksum(&self, file_path: &str) -> Result<u32, FilestoreError> {
|
||||
let mut checksum: u32 = 0;
|
||||
let file = File::open(file_path)?;
|
||||
let mut buf_reader = BufReader::new(file);
|
||||
let mut buffer = [0; 4];
|
||||
|
||||
loop {
|
||||
let bytes_read = buf_reader.read(&mut buffer)?;
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
// Perform padding directly in the buffer
|
||||
(bytes_read..4).for_each(|i| {
|
||||
buffer[i] = 0;
|
||||
});
|
||||
|
||||
checksum = checksum.wrapping_add(u32::from_be_bytes(buffer));
|
||||
}
|
||||
Ok(checksum)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{fs, path::Path, println};
|
||||
|
||||
use super::*;
|
||||
use alloc::format;
|
||||
use tempfile::tempdir;
|
||||
|
||||
const EXAMPLE_DATA_CFDP: [u8; 15] = [
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
|
||||
];
|
||||
|
||||
const NATIVE_FS: NativeFilestore = NativeFilestore {};
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_filestore_create() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result =
|
||||
NATIVE_FS.create_file(file_path.to_str().expect("getting str for file failed"));
|
||||
assert!(result.is_ok());
|
||||
let path = Path::new(&file_path);
|
||||
assert!(path.exists());
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_file(file_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_file_exists() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
assert!(!NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_file(file_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_dir_exists() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let dir_path = tmpdir.path().join("testdir");
|
||||
assert!(!NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_dir(dir_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_dir(dir_path.as_path().to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_remove_file() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.expect("creating file failed");
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.remove_file(file_path.to_str().unwrap())
|
||||
.expect("removing file failed");
|
||||
assert!(!NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_write() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
assert!(!NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_file(file_path.to_str().unwrap()));
|
||||
println!("{}", file_path.to_str().unwrap());
|
||||
let write_data = "hello world\n";
|
||||
NATIVE_FS
|
||||
.write_data(file_path.to_str().unwrap(), 0, write_data.as_bytes())
|
||||
.expect("writing to file failed");
|
||||
let read_back = fs::read_to_string(file_path).expect("reading back data failed");
|
||||
assert_eq!(read_back, write_data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_native_fs_read() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
assert!(!NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(file_path.to_str().unwrap()));
|
||||
assert!(NATIVE_FS.is_file(file_path.to_str().unwrap()));
|
||||
println!("{}", file_path.to_str().unwrap());
|
||||
let write_data = "hello world\n";
|
||||
NATIVE_FS
|
||||
.write_data(file_path.to_str().unwrap(), 0, write_data.as_bytes())
|
||||
.expect("writing to file failed");
|
||||
let read_back = fs::read_to_string(file_path).expect("reading back data failed");
|
||||
assert_eq!(read_back, write_data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_file() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.expect("creating file failed");
|
||||
fs::write(file_path.clone(), [1, 2, 3, 4]).unwrap();
|
||||
assert_eq!(fs::read(file_path.clone()).unwrap(), [1, 2, 3, 4]);
|
||||
NATIVE_FS
|
||||
.truncate_file(file_path.to_str().unwrap())
|
||||
.unwrap();
|
||||
assert_eq!(fs::read(file_path.clone()).unwrap(), []);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_dir() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let dir_path = tmpdir.path().join("testdir");
|
||||
assert!(!NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.create_dir(dir_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
assert!(NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
NATIVE_FS
|
||||
.remove_dir(dir_path.to_str().unwrap(), false)
|
||||
.unwrap();
|
||||
assert!(!NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_file() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.expect("creating file failed");
|
||||
fs::write(file_path.clone(), [1, 2, 3, 4]).unwrap();
|
||||
let read_buf: &mut [u8] = &mut [0; 4];
|
||||
NATIVE_FS
|
||||
.read_data(file_path.to_str().unwrap(), 0, 4, read_buf)
|
||||
.unwrap();
|
||||
assert_eq!([1, 2, 3, 4], read_buf);
|
||||
NATIVE_FS
|
||||
.write_data(file_path.to_str().unwrap(), 4, &[5, 6, 7, 8])
|
||||
.expect("writing to file failed");
|
||||
NATIVE_FS
|
||||
.read_data(file_path.to_str().unwrap(), 2, 4, read_buf)
|
||||
.unwrap();
|
||||
assert_eq!([3, 4, 5, 6], read_buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_which_does_not_exist() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result = NATIVE_FS.read_data(file_path.to_str().unwrap(), 0, 4, &mut [0; 4]);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileDoesNotExist = error {
|
||||
assert_eq!(error.to_string(), "file does not exist");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_already_exists() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result =
|
||||
NATIVE_FS.create_file(file_path.to_str().expect("getting str for file failed"));
|
||||
assert!(result.is_ok());
|
||||
let result =
|
||||
NATIVE_FS.create_file(file_path.to_str().expect("getting str for file failed"));
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileAlreadyExists = error {
|
||||
assert_eq!(error.to_string(), "file already exists");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_file_with_dir_api() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.remove_dir(file_path.to_str().unwrap(), true);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotDirectory = error {
|
||||
assert_eq!(error.to_string(), "is not a directory");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_dir_remove_all() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let dir_path = tmpdir.path().join("test");
|
||||
NATIVE_FS
|
||||
.create_dir(dir_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let file_path = dir_path.as_path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.remove_dir(dir_path.to_str().unwrap(), true);
|
||||
assert!(result.is_ok());
|
||||
assert!(!NATIVE_FS.exists(dir_path.to_str().unwrap()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_dir_with_file_api() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test");
|
||||
NATIVE_FS
|
||||
.create_dir(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.remove_file(file_path.to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotFile = error {
|
||||
assert_eq!(error.to_string(), "is not a file");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_dir_which_does_not_exist() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test");
|
||||
let result = NATIVE_FS.remove_dir(file_path.to_str().unwrap(), true);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::DirDoesNotExist = error {
|
||||
assert_eq!(error.to_string(), "directory does not exist");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_file_which_does_not_exist() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result = NATIVE_FS.remove_file(file_path.to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileDoesNotExist = error {
|
||||
assert_eq!(error.to_string(), "file does not exist");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_file_which_does_not_exist() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result = NATIVE_FS.truncate_file(file_path.to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileDoesNotExist = error {
|
||||
assert_eq!(error.to_string(), "file does not exist");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_file_on_directory() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test");
|
||||
NATIVE_FS.create_dir(file_path.to_str().unwrap()).unwrap();
|
||||
let result = NATIVE_FS.truncate_file(file_path.to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotFile = error {
|
||||
assert_eq!(error.to_string(), "is not a file");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_byte_conversion_error_when_reading() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.read_data(file_path.to_str().unwrap(), 0, 2, &mut []);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::ByteConversion(byte_conv_error) = error {
|
||||
if let ByteConversionError::ToSliceTooSmall { found, expected } = byte_conv_error {
|
||||
assert_eq!(found, 0);
|
||||
assert_eq!(expected, 2);
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
assert_eq!(
|
||||
error.to_string(),
|
||||
format!("filestore error: {}", byte_conv_error)
|
||||
);
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_file_on_dir() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let dir_path = tmpdir.path().join("test");
|
||||
NATIVE_FS
|
||||
.create_dir(dir_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
let result = NATIVE_FS.read_data(dir_path.to_str().unwrap(), 0, 4, &mut [0; 4]);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotFile = error {
|
||||
assert_eq!(error.to_string(), "is not a file");
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_file_non_existing() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
let result = NATIVE_FS.write_data(file_path.to_str().unwrap(), 0, &[]);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::FileDoesNotExist = error {
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_file_on_dir() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test");
|
||||
NATIVE_FS.create_dir(file_path.to_str().unwrap()).unwrap();
|
||||
let result = NATIVE_FS.write_data(file_path.to_str().unwrap(), 0, &[]);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::IsNotFile = error {
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filename_extraction() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("test.txt");
|
||||
NATIVE_FS
|
||||
.create_file(file_path.to_str().expect("getting str for file failed"))
|
||||
.unwrap();
|
||||
NativeFilestore::filename_from_full_path(file_path.to_str().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_modular_checksum() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("mod-crc.bin");
|
||||
fs::write(file_path.as_path(), EXAMPLE_DATA_CFDP).expect("writing test file failed");
|
||||
// Kind of re-writing the modular checksum impl here which we are trying to test, but the
|
||||
// numbers/correctness were verified manually using calculators, so this is okay.
|
||||
let mut checksum: u32 = 0;
|
||||
let mut buffer: [u8; 4] = [0; 4];
|
||||
for i in 0..3 {
|
||||
buffer = EXAMPLE_DATA_CFDP[i * 4..(i + 1) * 4].try_into().unwrap();
|
||||
checksum = checksum.wrapping_add(u32::from_be_bytes(buffer));
|
||||
}
|
||||
buffer[0..3].copy_from_slice(&EXAMPLE_DATA_CFDP[12..15]);
|
||||
buffer[3] = 0;
|
||||
checksum = checksum.wrapping_add(u32::from_be_bytes(buffer));
|
||||
let mut verif_buf: [u8; 32] = [0; 32];
|
||||
let result = NATIVE_FS.checksum_verify(
|
||||
file_path.to_str().unwrap(),
|
||||
ChecksumType::Modular,
|
||||
checksum,
|
||||
&mut verif_buf,
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_null_checksum_impl() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("mod-crc.bin");
|
||||
// The file to check does not even need to exist, and the verification buffer can be
|
||||
// empty: the null checksum is always yields the same result.
|
||||
let result = NATIVE_FS.checksum_verify(
|
||||
file_path.to_str().unwrap(),
|
||||
ChecksumType::NullChecksum,
|
||||
0,
|
||||
&mut [],
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
assert!(result.unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_checksum_not_implemented() {
|
||||
let tmpdir = tempdir().expect("creating tmpdir failed");
|
||||
let file_path = tmpdir.path().join("mod-crc.bin");
|
||||
// The file to check does not even need to exist, and the verification buffer can be
|
||||
// empty: the null checksum is always yields the same result.
|
||||
let result = NATIVE_FS.checksum_verify(
|
||||
file_path.to_str().unwrap(),
|
||||
ChecksumType::Crc32Proximity1,
|
||||
0,
|
||||
&mut [],
|
||||
);
|
||||
assert!(result.is_err());
|
||||
let error = result.unwrap_err();
|
||||
if let FilestoreError::ChecksumTypeNotImplemented(cksum_type) = error {
|
||||
assert_eq!(
|
||||
error.to_string(),
|
||||
format!("checksum {:?} not implemented", cksum_type)
|
||||
);
|
||||
} else {
|
||||
panic!("unexpected error");
|
||||
}
|
||||
}
|
||||
}
|
668
satrs/src/cfdp/mod.rs
Normal file
668
satrs/src/cfdp/mod.rs
Normal file
@ -0,0 +1,668 @@
|
||||
//! This module contains the implementation of the CFDP high level classes as specified in the
|
||||
//! CCSDS 727.0-B-5.
|
||||
use core::{cell::RefCell, fmt::Debug, hash::Hash};
|
||||
|
||||
use crc::{Crc, CRC_32_CKSUM};
|
||||
use hashbrown::HashMap;
|
||||
use spacepackets::{
|
||||
cfdp::{
|
||||
pdu::{FileDirectiveType, PduError, PduHeader},
|
||||
ChecksumType, ConditionCode, FaultHandlerCode, PduType, TransmissionMode,
|
||||
},
|
||||
util::{UnsignedByteField, UnsignedEnum},
|
||||
};
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::boxed::Box;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::time::CountdownProvider;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod dest;
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod filestore;
|
||||
#[cfg(feature = "std")]
|
||||
pub mod source;
|
||||
pub mod user;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum EntityType {
|
||||
Sending,
|
||||
Receiving,
|
||||
}
|
||||
|
||||
pub enum TimerContext {
|
||||
CheckLimit {
|
||||
local_id: UnsignedByteField,
|
||||
remote_id: UnsignedByteField,
|
||||
entity_type: EntityType,
|
||||
},
|
||||
NakActivity {
|
||||
expiry_time_seconds: f32,
|
||||
},
|
||||
PositiveAck {
|
||||
expiry_time_seconds: f32,
|
||||
},
|
||||
}
|
||||
|
||||
/// A generic trait which allows CFDP entities to create check timers which are required to
|
||||
/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2
|
||||
/// and 4.6.3.3.
|
||||
///
|
||||
/// This trait also allows the creation of different check timers depending on context and purpose
|
||||
/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or
|
||||
/// other factors.
|
||||
///
|
||||
/// The countdown timer is used by 3 mechanisms of the CFDP protocol.
|
||||
///
|
||||
/// ## 1. Check limit handling
|
||||
///
|
||||
/// The first mechanism is the check limit handling for unacknowledged transfers as specified
|
||||
/// in 4.6.3.2 and 4.6.3.3 of the CFDP standard.
|
||||
/// For this mechanism, the timer has different functionality depending on whether
|
||||
/// the using entity is the sending entity or the receiving entity for the unacknowledged
|
||||
/// transmission mode.
|
||||
///
|
||||
/// For the sending entity, this timer determines the expiry period for declaring a check limit
|
||||
/// fault after sending an EOF PDU with requested closure. This allows a timeout of the transfer.
|
||||
/// Also see 4.6.3.2 of the CFDP standard.
|
||||
///
|
||||
/// For the receiving entity, this timer determines the expiry period for incrementing a check
|
||||
/// counter after an EOF PDU is received for an incomplete file transfer. This allows out-of-order
|
||||
/// reception of file data PDUs and EOF PDUs. Also see 4.6.3.3 of the CFDP standard.
|
||||
///
|
||||
/// ## 2. NAK activity limit
|
||||
///
|
||||
/// The timer will be used to perform the NAK activity check as specified in 4.6.4.7 of the CFDP
|
||||
/// standard. The expiration period will be provided by the NAK timer expiration limit of the
|
||||
/// remote entity configuration.
|
||||
///
|
||||
/// ## 3. Positive ACK procedures
|
||||
///
|
||||
/// The timer will be used to perform the Positive Acknowledgement Procedures as specified in
|
||||
/// 4.7. 1of the CFDP standard. The expiration period will be provided by the Positive ACK timer
|
||||
/// interval of the remote entity configuration.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait CheckTimerCreator {
|
||||
fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box<dyn CountdownProvider>;
|
||||
}
|
||||
|
||||
/// Simple implementation of the [CheckTimerCreator] trait assuming a standard runtime.
|
||||
/// It also assumes that a second accuracy of the check timer period is sufficient.
|
||||
#[cfg(feature = "std")]
|
||||
#[derive(Debug)]
|
||||
pub struct StdCheckTimer {
|
||||
expiry_time_seconds: u64,
|
||||
start_time: std::time::Instant,
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl StdCheckTimer {
|
||||
pub fn new(expiry_time_seconds: u64) -> Self {
|
||||
Self {
|
||||
expiry_time_seconds,
|
||||
start_time: std::time::Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl CountdownProvider for StdCheckTimer {
|
||||
fn has_expired(&self) -> bool {
|
||||
let elapsed_time = self.start_time.elapsed();
|
||||
if elapsed_time.as_secs() > self.expiry_time_seconds {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.start_time = std::time::Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
/// This structure models the remote entity configuration information as specified in chapter 8.3
|
||||
/// of the CFDP standard.
|
||||
|
||||
/// Some of the fields which were not considered necessary for the Rust implementation
|
||||
/// were omitted. Some other fields which are not contained inside the standard but are considered
|
||||
/// necessary for the Rust implementation are included.
|
||||
///
|
||||
/// ## Notes on Positive Acknowledgment Procedures
|
||||
///
|
||||
/// The `positive_ack_timer_interval_seconds` and `positive_ack_timer_expiration_limit` will
|
||||
/// be used for positive acknowledgement procedures as specified in CFDP chapter 4.7. The sending
|
||||
/// entity will start the timer for any PDUs where an acknowledgment is required (e.g. EOF PDU).
|
||||
/// Once the expected ACK response has not been received for that interval, as counter will be
|
||||
/// incremented and the timer will be reset. Once the counter exceeds the
|
||||
/// `positive_ack_timer_expiration_limit`, a Positive ACK Limit Reached fault will be declared.
|
||||
///
|
||||
/// ## Notes on Deferred Lost Segment Procedures
|
||||
///
|
||||
/// This procedure will be active if an EOF (No Error) PDU is received in acknowledged mode. After
|
||||
/// issuing the NAK sequence which has the whole file scope, a timer will be started. The timer is
|
||||
/// reset when missing segments or missing metadata is received. The timer will be deactivated if
|
||||
/// all missing data is received. If the timer expires, a new NAK sequence will be issued and a
|
||||
/// counter will be incremented, which can lead to a NAK Limit Reached fault being declared.
|
||||
///
|
||||
/// ## Fields
|
||||
///
|
||||
/// * `entity_id` - The ID of the remote entity.
|
||||
/// * `max_packet_len` - This determines of all PDUs generated for that remote entity in addition
|
||||
/// to the `max_file_segment_len` attribute which also determines the size of file data PDUs.
|
||||
/// * `max_file_segment_len` The maximum file segment length which determines the maximum size
|
||||
/// of file data PDUs in addition to the `max_packet_len` attribute. If this field is set
|
||||
/// to None, the maximum file segment length will be derived from the maximum packet length.
|
||||
/// If this has some value which is smaller than the segment value derived from
|
||||
/// `max_packet_len`, this value will be picked.
|
||||
/// * `closure_requested_by_default` - If the closure requested field is not supplied as part of
|
||||
/// the Put Request, it will be determined from this field in the remote configuration.
|
||||
/// * `crc_on_transmission_by_default` - If the CRC option is not supplied as part of the Put
|
||||
/// Request, it will be determined from this field in the remote configuration.
|
||||
/// * `default_transmission_mode` - If the transmission mode is not supplied as part of the
|
||||
/// Put Request, it will be determined from this field in the remote configuration.
|
||||
/// * `disposition_on_cancellation` - Determines whether an incomplete received file is discard on
|
||||
/// transaction cancellation. Defaults to False.
|
||||
/// * `default_crc_type` - Default checksum type used to calculate for all file transmissions to
|
||||
/// this remote entity.
|
||||
/// * `check_limit` - This timer determines the expiry period for incrementing a check counter
|
||||
/// after an EOF PDU is received for an incomplete file transfer. This allows out-of-order
|
||||
/// reception of file data PDUs and EOF PDUs. Also see 4.6.3.3 of the CFDP standard. Defaults to
|
||||
/// 2, so the check limit timer may expire twice.
|
||||
/// * `positive_ack_timer_interval_seconds`- See the notes on the Positive Acknowledgment
|
||||
/// Procedures inside the class documentation. Expected as floating point seconds. Defaults to
|
||||
/// 10 seconds.
|
||||
/// * `positive_ack_timer_expiration_limit` - See the notes on the Positive Acknowledgment
|
||||
/// Procedures inside the class documentation. Defaults to 2, so the timer may expire twice.
|
||||
/// * `immediate_nak_mode` - Specifies whether a NAK sequence should be issued immediately when a
|
||||
/// file data gap or lost metadata is detected in the acknowledged mode. Defaults to True.
|
||||
/// * `nak_timer_interval_seconds` - See the notes on the Deferred Lost Segment Procedure inside
|
||||
/// the class documentation. Expected as floating point seconds. Defaults to 10 seconds.
|
||||
/// * `nak_timer_expiration_limit` - See the notes on the Deferred Lost Segment Procedure inside
|
||||
/// the class documentation. Defaults to 2, so the timer may expire two times.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct RemoteEntityConfig {
|
||||
pub entity_id: UnsignedByteField,
|
||||
pub max_packet_len: usize,
|
||||
pub max_file_segment_len: usize,
|
||||
pub closure_requested_by_default: bool,
|
||||
pub crc_on_transmission_by_default: bool,
|
||||
pub default_transmission_mode: TransmissionMode,
|
||||
pub default_crc_type: ChecksumType,
|
||||
pub positive_ack_timer_interval_seconds: f32,
|
||||
pub positive_ack_timer_expiration_limit: u32,
|
||||
pub check_limit: u32,
|
||||
pub disposition_on_cancellation: bool,
|
||||
pub immediate_nak_mode: bool,
|
||||
pub nak_timer_interval_seconds: f32,
|
||||
pub nak_timer_expiration_limit: u32,
|
||||
}
|
||||
|
||||
impl RemoteEntityConfig {
|
||||
pub fn new_with_default_values(
|
||||
entity_id: UnsignedByteField,
|
||||
max_file_segment_len: usize,
|
||||
max_packet_len: usize,
|
||||
closure_requested_by_default: bool,
|
||||
crc_on_transmission_by_default: bool,
|
||||
default_transmission_mode: TransmissionMode,
|
||||
default_crc_type: ChecksumType,
|
||||
) -> Self {
|
||||
Self {
|
||||
entity_id,
|
||||
max_file_segment_len,
|
||||
max_packet_len,
|
||||
closure_requested_by_default,
|
||||
crc_on_transmission_by_default,
|
||||
default_transmission_mode,
|
||||
default_crc_type,
|
||||
check_limit: 2,
|
||||
positive_ack_timer_interval_seconds: 10.0,
|
||||
positive_ack_timer_expiration_limit: 2,
|
||||
disposition_on_cancellation: false,
|
||||
immediate_nak_mode: true,
|
||||
nak_timer_interval_seconds: 10.0,
|
||||
nak_timer_expiration_limit: 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait RemoteEntityConfigProvider {
|
||||
/// Retrieve the remote entity configuration for the given remote ID.
|
||||
fn get_remote_config(&self, remote_id: u64) -> Option<&RemoteEntityConfig>;
|
||||
fn get_remote_config_mut(&mut self, remote_id: u64) -> Option<&mut RemoteEntityConfig>;
|
||||
/// Add a new remote configuration. Return [true] if the configuration was
|
||||
/// inserted successfully, and [false] if a configuration already exists.
|
||||
fn add_config(&mut self, cfg: &RemoteEntityConfig) -> bool;
|
||||
/// Remote a configuration. Returns [true] if the configuration was removed successfully,
|
||||
/// and [false] if no configuration exists for the given remote ID.
|
||||
fn remove_config(&mut self, remote_id: u64) -> bool;
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[derive(Default)]
|
||||
pub struct StdRemoteEntityConfigProvider {
|
||||
remote_cfg_table: HashMap<u64, RemoteEntityConfig>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl RemoteEntityConfigProvider for StdRemoteEntityConfigProvider {
|
||||
fn get_remote_config(&self, remote_id: u64) -> Option<&RemoteEntityConfig> {
|
||||
self.remote_cfg_table.get(&remote_id)
|
||||
}
|
||||
fn get_remote_config_mut(&mut self, remote_id: u64) -> Option<&mut RemoteEntityConfig> {
|
||||
self.remote_cfg_table.get_mut(&remote_id)
|
||||
}
|
||||
fn add_config(&mut self, cfg: &RemoteEntityConfig) -> bool {
|
||||
self.remote_cfg_table
|
||||
.insert(cfg.entity_id.value(), *cfg)
|
||||
.is_some()
|
||||
}
|
||||
fn remove_config(&mut self, remote_id: u64) -> bool {
|
||||
self.remote_cfg_table.remove(&remote_id).is_some()
|
||||
}
|
||||
}
|
||||
|
||||
/// This trait introduces some callbacks which will be called when a particular CFDP fault
|
||||
/// handler is called.
|
||||
///
|
||||
/// It is passed into the CFDP handlers as part of the [DefaultFaultHandler] and the local entity
|
||||
/// configuration and provides a way to specify custom user error handlers. This allows to
|
||||
/// implement some CFDP features like fault handler logging, which would not be possible
|
||||
/// generically otherwise.
|
||||
///
|
||||
/// For each error reported by the [DefaultFaultHandler], the appropriate fault handler callback
|
||||
/// will be called depending on the [FaultHandlerCode].
|
||||
pub trait UserFaultHandler {
|
||||
fn notice_of_suspension_cb(
|
||||
&mut self,
|
||||
transaction_id: TransactionId,
|
||||
cond: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
|
||||
fn notice_of_cancellation_cb(
|
||||
&mut self,
|
||||
transaction_id: TransactionId,
|
||||
cond: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
|
||||
fn abandoned_cb(&mut self, transaction_id: TransactionId, cond: ConditionCode, progress: u64);
|
||||
|
||||
fn ignore_cb(&mut self, transaction_id: TransactionId, cond: ConditionCode, progress: u64);
|
||||
}
|
||||
|
||||
/// This structure is used to implement the fault handling as specified in chapter 4.8 of the CFDP
|
||||
/// standard.
|
||||
///
|
||||
/// It does so by mapping each applicable [spacepackets::cfdp::ConditionCode] to a fault handler
|
||||
/// which is denoted by the four [spacepackets::cfdp::FaultHandlerCode]s. This code is used
|
||||
/// to select the error handling inside the CFDP handler itself in addition to dispatching to a
|
||||
/// user-provided callback function provided by the [UserFaultHandler].
|
||||
///
|
||||
/// Some note on the provided default settings:
|
||||
///
|
||||
/// - Checksum failures will be ignored by default. This is because for unacknowledged transfers,
|
||||
/// cancelling the transfer immediately would interfere with the check limit mechanism specified
|
||||
/// in chapter 4.6.3.3.
|
||||
/// - Unsupported checksum types will also be ignored by default. Even if the checksum type is
|
||||
/// not supported the file transfer might still have worked properly.
|
||||
///
|
||||
/// For all other faults, the default fault handling operation will be to cancel the transaction.
|
||||
/// These defaults can be overriden by using the [Self::set_fault_handler] method.
|
||||
/// Please note that in any case, fault handler overrides can be specified by the sending CFDP
|
||||
/// entity.
|
||||
pub struct DefaultFaultHandler {
|
||||
handler_array: [FaultHandlerCode; 10],
|
||||
// Could also change the user fault handler trait to have non mutable methods, but that limits
|
||||
// flexbility on the user side..
|
||||
user_fault_handler: RefCell<Box<dyn UserFaultHandler + Send>>,
|
||||
}
|
||||
|
||||
impl DefaultFaultHandler {
|
||||
fn condition_code_to_array_index(conditon_code: ConditionCode) -> Option<usize> {
|
||||
Some(match conditon_code {
|
||||
ConditionCode::PositiveAckLimitReached => 0,
|
||||
ConditionCode::KeepAliveLimitReached => 1,
|
||||
ConditionCode::InvalidTransmissionMode => 2,
|
||||
ConditionCode::FilestoreRejection => 3,
|
||||
ConditionCode::FileChecksumFailure => 4,
|
||||
ConditionCode::FileSizeError => 5,
|
||||
ConditionCode::NakLimitReached => 6,
|
||||
ConditionCode::InactivityDetected => 7,
|
||||
ConditionCode::CheckLimitReached => 8,
|
||||
ConditionCode::UnsupportedChecksumType => 9,
|
||||
_ => return None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_fault_handler(
|
||||
&mut self,
|
||||
condition_code: ConditionCode,
|
||||
fault_handler: FaultHandlerCode,
|
||||
) {
|
||||
let array_idx = Self::condition_code_to_array_index(condition_code);
|
||||
if array_idx.is_none() {
|
||||
return;
|
||||
}
|
||||
self.handler_array[array_idx.unwrap()] = fault_handler;
|
||||
}
|
||||
|
||||
pub fn new(user_fault_handler: Box<dyn UserFaultHandler + Send>) -> Self {
|
||||
let mut init_array = [FaultHandlerCode::NoticeOfCancellation; 10];
|
||||
init_array
|
||||
[Self::condition_code_to_array_index(ConditionCode::FileChecksumFailure).unwrap()] =
|
||||
FaultHandlerCode::IgnoreError;
|
||||
init_array[Self::condition_code_to_array_index(ConditionCode::UnsupportedChecksumType)
|
||||
.unwrap()] = FaultHandlerCode::IgnoreError;
|
||||
Self {
|
||||
handler_array: init_array,
|
||||
user_fault_handler: RefCell::new(user_fault_handler),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_fault_handler(&self, condition_code: ConditionCode) -> FaultHandlerCode {
|
||||
let array_idx = Self::condition_code_to_array_index(condition_code);
|
||||
if array_idx.is_none() {
|
||||
return FaultHandlerCode::IgnoreError;
|
||||
}
|
||||
self.handler_array[array_idx.unwrap()]
|
||||
}
|
||||
|
||||
pub fn report_fault(
|
||||
&self,
|
||||
transaction_id: TransactionId,
|
||||
condition: ConditionCode,
|
||||
progress: u64,
|
||||
) -> FaultHandlerCode {
|
||||
let array_idx = Self::condition_code_to_array_index(condition);
|
||||
if array_idx.is_none() {
|
||||
return FaultHandlerCode::IgnoreError;
|
||||
}
|
||||
let fh_code = self.handler_array[array_idx.unwrap()];
|
||||
let mut handler_mut = self.user_fault_handler.borrow_mut();
|
||||
match fh_code {
|
||||
FaultHandlerCode::NoticeOfCancellation => {
|
||||
handler_mut.notice_of_cancellation_cb(transaction_id, condition, progress);
|
||||
}
|
||||
FaultHandlerCode::NoticeOfSuspension => {
|
||||
handler_mut.notice_of_suspension_cb(transaction_id, condition, progress);
|
||||
}
|
||||
FaultHandlerCode::IgnoreError => {
|
||||
handler_mut.ignore_cb(transaction_id, condition, progress);
|
||||
}
|
||||
FaultHandlerCode::AbandonTransaction => {
|
||||
handler_mut.abandoned_cb(transaction_id, condition, progress);
|
||||
}
|
||||
}
|
||||
fh_code
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IndicationConfig {
|
||||
pub eof_sent: bool,
|
||||
pub eof_recv: bool,
|
||||
pub file_segment_recv: bool,
|
||||
pub transaction_finished: bool,
|
||||
pub suspended: bool,
|
||||
pub resumed: bool,
|
||||
}
|
||||
|
||||
impl Default for IndicationConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
eof_sent: true,
|
||||
eof_recv: true,
|
||||
file_segment_recv: true,
|
||||
transaction_finished: true,
|
||||
suspended: true,
|
||||
resumed: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LocalEntityConfig {
|
||||
pub id: UnsignedByteField,
|
||||
pub indication_cfg: IndicationConfig,
|
||||
pub default_fault_handler: DefaultFaultHandler,
|
||||
}
|
||||
|
||||
/// The CFDP transaction ID of a CFDP transaction consists of the source entity ID and the sequence
|
||||
/// number of that transfer which is also determined by the CFDP source entity.
|
||||
#[derive(Debug, Eq, Copy, Clone)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct TransactionId {
|
||||
source_id: UnsignedByteField,
|
||||
seq_num: UnsignedByteField,
|
||||
}
|
||||
|
||||
impl TransactionId {
|
||||
pub fn new(source_id: UnsignedByteField, seq_num: UnsignedByteField) -> Self {
|
||||
Self { source_id, seq_num }
|
||||
}
|
||||
|
||||
pub fn source_id(&self) -> &UnsignedByteField {
|
||||
&self.source_id
|
||||
}
|
||||
|
||||
pub fn seq_num(&self) -> &UnsignedByteField {
|
||||
&self.seq_num
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for TransactionId {
|
||||
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
|
||||
self.source_id.value().hash(state);
|
||||
self.seq_num.value().hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for TransactionId {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.source_id.value() == other.source_id.value()
|
||||
&& self.seq_num.value() == other.seq_num.value()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum TransactionStep {
|
||||
Idle = 0,
|
||||
TransactionStart = 1,
|
||||
ReceivingFileDataPdus = 2,
|
||||
ReceivingFileDataPdusWithCheckLimitHandling = 3,
|
||||
SendingAckPdu = 4,
|
||||
TransferCompletion = 5,
|
||||
SendingFinishedPdu = 6,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum State {
|
||||
Idle = 0,
|
||||
Busy = 1,
|
||||
Suspended = 2,
|
||||
}
|
||||
|
||||
pub const CRC_32: Crc<u32> = Crc::<u32>::new(&CRC_32_CKSUM);
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum PacketTarget {
|
||||
SourceEntity,
|
||||
DestEntity,
|
||||
}
|
||||
|
||||
/// This is a helper struct which contains base information about a particular PDU packet.
|
||||
/// This is also necessary information for CFDP packet routing. For example, some packet types
|
||||
/// like file data PDUs can only be used by CFDP source entities.
|
||||
pub struct PacketInfo<'raw_packet> {
|
||||
pdu_type: PduType,
|
||||
pdu_directive: Option<FileDirectiveType>,
|
||||
target: PacketTarget,
|
||||
raw_packet: &'raw_packet [u8],
|
||||
}
|
||||
|
||||
impl<'raw> PacketInfo<'raw> {
|
||||
pub fn new(raw_packet: &'raw [u8]) -> Result<Self, PduError> {
|
||||
let (pdu_header, header_len) = PduHeader::from_bytes(raw_packet)?;
|
||||
if pdu_header.pdu_type() == PduType::FileData {
|
||||
return Ok(Self {
|
||||
pdu_type: pdu_header.pdu_type(),
|
||||
pdu_directive: None,
|
||||
target: PacketTarget::DestEntity,
|
||||
raw_packet,
|
||||
});
|
||||
}
|
||||
if pdu_header.pdu_datafield_len() < 1 {
|
||||
return Err(PduError::FormatError);
|
||||
}
|
||||
// Route depending on PDU type and directive type if applicable. Retrieve directive type
|
||||
// from the raw stream for better performance (with sanity and directive code check).
|
||||
// The routing is based on section 4.5 of the CFDP standard which specifies the PDU forwarding
|
||||
// procedure.
|
||||
let directive = FileDirectiveType::try_from(raw_packet[header_len]).map_err(|_| {
|
||||
PduError::InvalidDirectiveType {
|
||||
found: raw_packet[header_len],
|
||||
expected: None,
|
||||
}
|
||||
})?;
|
||||
let packet_target = match directive {
|
||||
// Section c) of 4.5.3: These PDUs should always be targeted towards the file sender a.k.a.
|
||||
// the source handler
|
||||
FileDirectiveType::NakPdu
|
||||
| FileDirectiveType::FinishedPdu
|
||||
| FileDirectiveType::KeepAlivePdu => PacketTarget::SourceEntity,
|
||||
// Section b) of 4.5.3: These PDUs should always be targeted towards the file receiver a.k.a.
|
||||
// the destination handler
|
||||
FileDirectiveType::MetadataPdu
|
||||
| FileDirectiveType::EofPdu
|
||||
| FileDirectiveType::PromptPdu => PacketTarget::DestEntity,
|
||||
// Section a): Recipient depends of the type of PDU that is being acknowledged. We can simply
|
||||
// extract the PDU type from the raw stream. If it is an EOF PDU, this packet is passed to
|
||||
// the source handler, for a Finished PDU, it is passed to the destination handler.
|
||||
FileDirectiveType::AckPdu => {
|
||||
let acked_directive = FileDirectiveType::try_from(raw_packet[header_len + 1])
|
||||
.map_err(|_| PduError::InvalidDirectiveType {
|
||||
found: raw_packet[header_len],
|
||||
expected: None,
|
||||
})?;
|
||||
if acked_directive == FileDirectiveType::EofPdu {
|
||||
PacketTarget::SourceEntity
|
||||
} else if acked_directive == FileDirectiveType::FinishedPdu {
|
||||
PacketTarget::DestEntity
|
||||
} else {
|
||||
// TODO: Maybe a better error? This might be confusing..
|
||||
return Err(PduError::InvalidDirectiveType {
|
||||
found: raw_packet[header_len + 1],
|
||||
expected: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(Self {
|
||||
pdu_type: pdu_header.pdu_type(),
|
||||
pdu_directive: Some(directive),
|
||||
target: packet_target,
|
||||
raw_packet,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn pdu_type(&self) -> PduType {
|
||||
self.pdu_type
|
||||
}
|
||||
|
||||
pub fn pdu_directive(&self) -> Option<FileDirectiveType> {
|
||||
self.pdu_directive
|
||||
}
|
||||
|
||||
pub fn target(&self) -> PacketTarget {
|
||||
self.target
|
||||
}
|
||||
|
||||
pub fn raw_packet(&self) -> &[u8] {
|
||||
self.raw_packet
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use spacepackets::cfdp::{
|
||||
lv::Lv,
|
||||
pdu::{
|
||||
eof::EofPdu,
|
||||
file_data::FileDataPdu,
|
||||
metadata::{MetadataGenericParams, MetadataPduCreator},
|
||||
CommonPduConfig, FileDirectiveType, PduHeader, WritablePduPacket,
|
||||
},
|
||||
PduType,
|
||||
};
|
||||
|
||||
use crate::cfdp::PacketTarget;
|
||||
|
||||
use super::PacketInfo;
|
||||
|
||||
fn generic_pdu_header() -> PduHeader {
|
||||
let pdu_conf = CommonPduConfig::default();
|
||||
PduHeader::new_no_file_data(pdu_conf, 0)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metadata_pdu_info() {
|
||||
let mut buf: [u8; 128] = [0; 128];
|
||||
let pdu_header = generic_pdu_header();
|
||||
let metadata_params = MetadataGenericParams::default();
|
||||
let src_file_name = "hello.txt";
|
||||
let dest_file_name = "hello-dest.txt";
|
||||
let src_lv = Lv::new_from_str(src_file_name).unwrap();
|
||||
let dest_lv = Lv::new_from_str(dest_file_name).unwrap();
|
||||
let metadata_pdu =
|
||||
MetadataPduCreator::new_no_opts(pdu_header, metadata_params, src_lv, dest_lv);
|
||||
metadata_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing metadata PDU failed");
|
||||
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
assert_eq!(packet_info.pdu_type(), PduType::FileDirective);
|
||||
assert!(packet_info.pdu_directive().is_some());
|
||||
assert_eq!(
|
||||
packet_info.pdu_directive().unwrap(),
|
||||
FileDirectiveType::MetadataPdu
|
||||
);
|
||||
assert_eq!(packet_info.target(), PacketTarget::DestEntity);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filedata_pdu_info() {
|
||||
let mut buf: [u8; 128] = [0; 128];
|
||||
let pdu_header = generic_pdu_header();
|
||||
let file_data_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 0, &[]);
|
||||
file_data_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing file data PDU failed");
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
assert_eq!(packet_info.pdu_type(), PduType::FileData);
|
||||
assert!(packet_info.pdu_directive().is_none());
|
||||
assert_eq!(packet_info.target(), PacketTarget::DestEntity);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_eof_pdu_info() {
|
||||
let mut buf: [u8; 128] = [0; 128];
|
||||
let pdu_header = generic_pdu_header();
|
||||
let eof_pdu = EofPdu::new_no_error(pdu_header, 0, 0);
|
||||
eof_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing file data PDU failed");
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
assert_eq!(packet_info.pdu_type(), PduType::FileDirective);
|
||||
assert!(packet_info.pdu_directive().is_some());
|
||||
assert_eq!(
|
||||
packet_info.pdu_directive().unwrap(),
|
||||
FileDirectiveType::EofPdu
|
||||
);
|
||||
}
|
||||
}
|
15
satrs/src/cfdp/source.rs
Normal file
15
satrs/src/cfdp/source.rs
Normal file
@ -0,0 +1,15 @@
|
||||
#![allow(dead_code)]
|
||||
use spacepackets::util::UnsignedByteField;
|
||||
|
||||
pub struct SourceHandler {
|
||||
id: UnsignedByteField,
|
||||
}
|
||||
|
||||
impl SourceHandler {
|
||||
pub fn new(id: impl Into<UnsignedByteField>) -> Self {
|
||||
Self { id: id.into() }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {}
|
96
satrs/src/cfdp/user.rs
Normal file
96
satrs/src/cfdp/user.rs
Normal file
@ -0,0 +1,96 @@
|
||||
use spacepackets::{
|
||||
cfdp::{
|
||||
pdu::{
|
||||
file_data::SegmentMetadata,
|
||||
finished::{DeliveryCode, FileStatus},
|
||||
},
|
||||
tlv::{msg_to_user::MsgToUserTlv, WritableTlv},
|
||||
ConditionCode,
|
||||
},
|
||||
util::UnsignedByteField,
|
||||
};
|
||||
|
||||
use super::TransactionId;
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct TransactionFinishedParams {
|
||||
pub id: TransactionId,
|
||||
pub condition_code: ConditionCode,
|
||||
pub delivery_code: DeliveryCode,
|
||||
pub file_status: FileStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MetadataReceivedParams<'src_file, 'dest_file, 'msgs_to_user> {
|
||||
pub id: TransactionId,
|
||||
pub source_id: UnsignedByteField,
|
||||
pub file_size: u64,
|
||||
pub src_file_name: &'src_file str,
|
||||
pub dest_file_name: &'dest_file str,
|
||||
pub msgs_to_user: &'msgs_to_user [MsgToUserTlv<'msgs_to_user>],
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[derive(Debug)]
|
||||
pub struct OwnedMetadataRecvdParams {
|
||||
pub id: TransactionId,
|
||||
pub source_id: UnsignedByteField,
|
||||
pub file_size: u64,
|
||||
pub src_file_name: alloc::string::String,
|
||||
pub dest_file_name: alloc::string::String,
|
||||
pub msgs_to_user: alloc::vec::Vec<alloc::vec::Vec<u8>>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl From<MetadataReceivedParams<'_, '_, '_>> for OwnedMetadataRecvdParams {
|
||||
fn from(value: MetadataReceivedParams) -> Self {
|
||||
Self::from(&value)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl From<&MetadataReceivedParams<'_, '_, '_>> for OwnedMetadataRecvdParams {
|
||||
fn from(value: &MetadataReceivedParams) -> Self {
|
||||
Self {
|
||||
id: value.id,
|
||||
source_id: value.source_id,
|
||||
file_size: value.file_size,
|
||||
src_file_name: value.src_file_name.into(),
|
||||
dest_file_name: value.dest_file_name.into(),
|
||||
msgs_to_user: value.msgs_to_user.iter().map(|tlv| tlv.to_vec()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileSegmentRecvdParams<'seg_meta> {
|
||||
pub id: TransactionId,
|
||||
pub offset: u64,
|
||||
pub length: usize,
|
||||
pub segment_metadata: Option<&'seg_meta SegmentMetadata<'seg_meta>>,
|
||||
}
|
||||
|
||||
pub trait CfdpUser {
|
||||
fn transaction_indication(&mut self, id: &TransactionId);
|
||||
fn eof_sent_indication(&mut self, id: &TransactionId);
|
||||
fn transaction_finished_indication(&mut self, finished_params: &TransactionFinishedParams);
|
||||
fn metadata_recvd_indication(&mut self, md_recvd_params: &MetadataReceivedParams);
|
||||
fn file_segment_recvd_indication(&mut self, segment_recvd_params: &FileSegmentRecvdParams);
|
||||
// TODO: The standard does not strictly specify how the report information looks..
|
||||
fn report_indication(&mut self, id: &TransactionId);
|
||||
fn suspended_indication(&mut self, id: &TransactionId, condition_code: ConditionCode);
|
||||
fn resumed_indication(&mut self, id: &TransactionId, progress: u64);
|
||||
fn fault_indication(
|
||||
&mut self,
|
||||
id: &TransactionId,
|
||||
condition_code: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
fn abandoned_indication(
|
||||
&mut self,
|
||||
id: &TransactionId,
|
||||
condition_code: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
fn eof_recvd_indication(&mut self, id: &TransactionId);
|
||||
}
|
@ -15,24 +15,20 @@ pub enum OpResult {
|
||||
TerminationRequested,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ExecutionType {
|
||||
Infinite,
|
||||
Cycles(u32),
|
||||
OneShot,
|
||||
}
|
||||
|
||||
pub trait Executable {
|
||||
pub trait Executable: Send {
|
||||
type Error;
|
||||
|
||||
fn exec_type(&self) -> ExecutionType;
|
||||
fn task_name(&self) -> &'static str;
|
||||
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, Self::Error>;
|
||||
}
|
||||
|
||||
pub trait ExecutableWithType: Executable {
|
||||
fn exec_type(&self) -> ExecutionType;
|
||||
}
|
||||
|
||||
/// This function allows executing one task which implements the [Executable] trait
|
||||
///
|
||||
/// # Arguments
|
||||
@ -43,10 +39,7 @@ pub trait ExecutableWithType: Executable {
|
||||
/// * `op_code`: Operation code which is passed to the executable task
|
||||
/// [operation call][Executable::periodic_op]
|
||||
/// * `termination`: Optional termination handler which can cancel threads with a broadcast
|
||||
pub fn exec_sched_single<
|
||||
T: ExecutableWithType<Error = E> + Send + 'static + ?Sized,
|
||||
E: Send + 'static,
|
||||
>(
|
||||
pub fn exec_sched_single<T: Executable<Error = E> + Send + 'static + ?Sized, E: Send + 'static>(
|
||||
mut executable: Box<T>,
|
||||
task_freq: Option<Duration>,
|
||||
op_code: i32,
|
||||
@ -95,10 +88,7 @@ pub fn exec_sched_single<
|
||||
/// * `task_freq`: Optional frequency of task. Required for periodic and fixed cycle tasks
|
||||
/// * `op_code`: Operation code which is passed to the executable task [operation call][Executable::periodic_op]
|
||||
/// * `termination`: Optional termination handler which can cancel threads with a broadcast
|
||||
pub fn exec_sched_multi<
|
||||
T: ExecutableWithType<Error = E> + Send + 'static + ?Sized,
|
||||
E: Send + 'static,
|
||||
>(
|
||||
pub fn exec_sched_multi<T: Executable<Error = E> + Send + 'static + ?Sized, E: Send + 'static>(
|
||||
task_name: &'static str,
|
||||
mut executable_vec: Vec<Box<T>>,
|
||||
task_freq: Option<Duration>,
|
||||
@ -152,10 +142,7 @@ pub fn exec_sched_multi<
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
exec_sched_multi, exec_sched_single, Executable, ExecutableWithType, ExecutionType,
|
||||
OpResult,
|
||||
};
|
||||
use super::{exec_sched_multi, exec_sched_single, Executable, ExecutionType, OpResult};
|
||||
use bus::Bus;
|
||||
use std::boxed::Box;
|
||||
use std::error::Error;
|
||||
@ -221,6 +208,10 @@ mod tests {
|
||||
impl Executable for OneShotTask {
|
||||
type Error = ExampleError;
|
||||
|
||||
fn exec_type(&self) -> ExecutionType {
|
||||
ExecutionType::OneShot
|
||||
}
|
||||
|
||||
fn task_name(&self) -> &'static str {
|
||||
ONE_SHOT_TASK_NAME
|
||||
}
|
||||
@ -238,17 +229,15 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecutableWithType for OneShotTask {
|
||||
fn exec_type(&self) -> ExecutionType {
|
||||
ExecutionType::OneShot
|
||||
}
|
||||
}
|
||||
|
||||
const CYCLE_TASK_NAME: &str = "Fixed Cycles Task";
|
||||
|
||||
impl Executable for FixedCyclesTask {
|
||||
type Error = ExampleError;
|
||||
|
||||
fn exec_type(&self) -> ExecutionType {
|
||||
ExecutionType::Cycles(self.cycles)
|
||||
}
|
||||
|
||||
fn task_name(&self) -> &'static str {
|
||||
CYCLE_TASK_NAME
|
||||
}
|
||||
@ -266,17 +255,15 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecutableWithType for FixedCyclesTask {
|
||||
fn exec_type(&self) -> ExecutionType {
|
||||
ExecutionType::Cycles(self.cycles)
|
||||
}
|
||||
}
|
||||
|
||||
const PERIODIC_TASK_NAME: &str = "Periodic Task";
|
||||
|
||||
impl Executable for PeriodicTask {
|
||||
type Error = ExampleError;
|
||||
|
||||
fn exec_type(&self) -> ExecutionType {
|
||||
ExecutionType::Infinite
|
||||
}
|
||||
|
||||
fn task_name(&self) -> &'static str {
|
||||
PERIODIC_TASK_NAME
|
||||
}
|
||||
@ -294,12 +281,6 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecutableWithType for PeriodicTask {
|
||||
fn exec_type(&self) -> ExecutionType {
|
||||
ExecutionType::Infinite
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_one_shot() {
|
||||
let expected_op_code = 42;
|
||||
@ -442,7 +423,7 @@ mod tests {
|
||||
});
|
||||
assert_eq!(cycled_task_0.task_name(), CYCLE_TASK_NAME);
|
||||
assert_eq!(one_shot_task.task_name(), ONE_SHOT_TASK_NAME);
|
||||
let task_vec: Vec<Box<dyn ExecutableWithType<Error = ExampleError> + Send>> =
|
||||
let task_vec: Vec<Box<dyn Executable<Error = ExampleError>>> =
|
||||
vec![one_shot_task, cycled_task_0, cycled_task_1];
|
||||
let jh = exec_sched_multi(
|
||||
"multi-task-name",
|
||||
@ -512,7 +493,7 @@ mod tests {
|
||||
});
|
||||
assert_eq!(periodic_task_0.task_name(), PERIODIC_TASK_NAME);
|
||||
assert_eq!(periodic_task_1.task_name(), PERIODIC_TASK_NAME);
|
||||
let task_vec: Vec<Box<dyn ExecutableWithType<Error = ExampleError> + Send>> =
|
||||
let task_vec: Vec<Box<dyn Executable<Error = ExampleError>>> =
|
||||
vec![cycled_task, periodic_task_0, periodic_task_1];
|
||||
let jh = exec_sched_multi(
|
||||
"multi-task-name",
|
||||
|
@ -1,4 +1,4 @@
|
||||
//! # sat-rs: A library to build on-board software for remote systems
|
||||
//! # sat-rs: A framework to build on-board software for remote systems
|
||||
//!
|
||||
//! You can find more information about the sat-rs framework on the
|
||||
//! [homepage](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/).
|
||||
@ -14,7 +14,7 @@
|
||||
//! - The [pus] module which provides special support for projects using
|
||||
//! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
|
||||
#![no_std]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docs_rs, feature(doc_auto_cfg))]
|
||||
#[cfg(feature = "alloc")]
|
||||
extern crate alloc;
|
||||
#[cfg(feature = "alloc")]
|
||||
@ -22,6 +22,8 @@ extern crate downcast_rs;
|
||||
#[cfg(any(feature = "std", test))]
|
||||
extern crate std;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod cfdp;
|
||||
pub mod encoding;
|
||||
pub mod event_man;
|
||||
pub mod events;
|
||||
@ -36,10 +38,9 @@ pub mod pus;
|
||||
pub mod queue;
|
||||
pub mod request;
|
||||
pub mod res_code;
|
||||
pub mod seq_count;
|
||||
pub mod time;
|
||||
pub mod tmtc;
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod scheduling;
|
||||
|
||||
pub mod action;
|
||||
pub mod hk;
|
||||
|
@ -1584,21 +1584,23 @@ mod tests {
|
||||
mod heapless_tests {
|
||||
use super::*;
|
||||
use crate::static_subpool;
|
||||
use core::ptr::addr_of_mut;
|
||||
use core::mem::MaybeUninit;
|
||||
|
||||
const SUBPOOL_1_BLOCK_SIZE: usize = 4;
|
||||
const SUBPOOL_1_NUM_ELEMENTS: u16 = 4;
|
||||
static mut SUBPOOL_1: [u8; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE] =
|
||||
[0; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE];
|
||||
static mut SUBPOOL_1_SIZES: [usize; SUBPOOL_1_NUM_ELEMENTS as usize] =
|
||||
[STORE_FREE; SUBPOOL_1_NUM_ELEMENTS as usize];
|
||||
static mut SUBPOOL_1: MaybeUninit<
|
||||
[u8; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE],
|
||||
> = MaybeUninit::new([0; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE]);
|
||||
static mut SUBPOOL_1_SIZES: MaybeUninit<[usize; SUBPOOL_1_NUM_ELEMENTS as usize]> =
|
||||
MaybeUninit::new([STORE_FREE; SUBPOOL_1_NUM_ELEMENTS as usize]);
|
||||
|
||||
const SUBPOOL_2_NUM_ELEMENTS: u16 = 2;
|
||||
const SUBPOOL_2_BLOCK_SIZE: usize = 8;
|
||||
static mut SUBPOOL_2: [u8; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE] =
|
||||
[0; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE];
|
||||
static mut SUBPOOL_2_SIZES: [usize; SUBPOOL_2_NUM_ELEMENTS as usize] =
|
||||
[STORE_FREE; SUBPOOL_2_NUM_ELEMENTS as usize];
|
||||
static mut SUBPOOL_2: MaybeUninit<
|
||||
[u8; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE],
|
||||
> = MaybeUninit::new([0; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE]);
|
||||
static mut SUBPOOL_2_SIZES: MaybeUninit<[usize; SUBPOOL_2_NUM_ELEMENTS as usize]> =
|
||||
MaybeUninit::new([STORE_FREE; SUBPOOL_2_NUM_ELEMENTS as usize]);
|
||||
|
||||
const SUBPOOL_3_NUM_ELEMENTS: u16 = 1;
|
||||
const SUBPOOL_3_BLOCK_SIZE: usize = 16;
|
||||
@ -1641,18 +1643,18 @@ mod tests {
|
||||
StaticHeaplessMemoryPool::new(false);
|
||||
assert!(heapless_pool
|
||||
.grow(
|
||||
unsafe { &mut *addr_of_mut!(SUBPOOL_1) },
|
||||
unsafe { &mut *addr_of_mut!(SUBPOOL_1_SIZES) },
|
||||
unsafe { SUBPOOL_1.assume_init_mut() },
|
||||
unsafe { SUBPOOL_1_SIZES.assume_init_mut() },
|
||||
SUBPOOL_1_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
assert!(heapless_pool
|
||||
.grow(
|
||||
unsafe { &mut *addr_of_mut!(SUBPOOL_2) },
|
||||
unsafe { &mut *addr_of_mut!(SUBPOOL_2_SIZES) },
|
||||
unsafe { SUBPOOL_2.assume_init_mut() },
|
||||
unsafe { SUBPOOL_2_SIZES.assume_init_mut() },
|
||||
SUBPOOL_2_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
assert!(heapless_pool
|
||||
@ -1660,7 +1662,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_3.assume_init_mut() },
|
||||
unsafe { SUBPOOL_3_SIZES.assume_init_mut() },
|
||||
SUBPOOL_3_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
heapless_pool
|
||||
@ -1780,10 +1782,10 @@ mod tests {
|
||||
StaticHeaplessMemoryPool::new(true);
|
||||
assert!(heapless_pool
|
||||
.grow(
|
||||
unsafe { &mut *addr_of_mut!(SUBPOOL_2) },
|
||||
unsafe { &mut *addr_of_mut!(SUBPOOL_2_SIZES) },
|
||||
unsafe { SUBPOOL_2.assume_init_mut() },
|
||||
unsafe { SUBPOOL_2_SIZES.assume_init_mut() },
|
||||
SUBPOOL_2_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
assert!(heapless_pool
|
||||
@ -1791,7 +1793,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_4.assume_init_mut() },
|
||||
unsafe { SUBPOOL_4_SIZES.assume_init_mut() },
|
||||
SUBPOOL_4_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
generic_test_spills_to_higher_subpools(&mut heapless_pool);
|
||||
@ -1806,7 +1808,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_5.assume_init_mut() },
|
||||
unsafe { SUBPOOL_5_SIZES.assume_init_mut() },
|
||||
SUBPOOL_5_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
assert!(heapless_pool
|
||||
@ -1814,7 +1816,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_3.assume_init_mut() },
|
||||
unsafe { SUBPOOL_3_SIZES.assume_init_mut() },
|
||||
SUBPOOL_3_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
generic_test_spillage_fails_as_well(&mut heapless_pool);
|
||||
@ -1829,7 +1831,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_5.assume_init_mut() },
|
||||
unsafe { SUBPOOL_5_SIZES.assume_init_mut() },
|
||||
SUBPOOL_5_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
assert!(heapless_pool
|
||||
@ -1837,7 +1839,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_6.assume_init_mut() },
|
||||
unsafe { SUBPOOL_6_SIZES.assume_init_mut() },
|
||||
SUBPOOL_6_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
assert!(heapless_pool
|
||||
@ -1845,7 +1847,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_3.assume_init_mut() },
|
||||
unsafe { SUBPOOL_3_SIZES.assume_init_mut() },
|
||||
SUBPOOL_3_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
generic_test_spillage_works_across_multiple_subpools(&mut heapless_pool);
|
||||
@ -1860,7 +1862,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_5.assume_init_mut() },
|
||||
unsafe { SUBPOOL_5_SIZES.assume_init_mut() },
|
||||
SUBPOOL_5_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
assert!(heapless_pool
|
||||
@ -1868,7 +1870,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_6.assume_init_mut() },
|
||||
unsafe { SUBPOOL_6_SIZES.assume_init_mut() },
|
||||
SUBPOOL_6_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
assert!(heapless_pool
|
||||
@ -1876,7 +1878,7 @@ mod tests {
|
||||
unsafe { SUBPOOL_3.assume_init_mut() },
|
||||
unsafe { SUBPOOL_3_SIZES.assume_init_mut() },
|
||||
SUBPOOL_3_NUM_ELEMENTS,
|
||||
true
|
||||
false
|
||||
)
|
||||
.is_ok());
|
||||
generic_test_spillage_fails_across_multiple_subpools(&mut heapless_pool);
|
||||
|
@ -20,7 +20,7 @@
|
||||
//! VerificationReportingProvider, VerificationReporterCfg, VerificationReporter
|
||||
//! };
|
||||
//! use satrs::tmtc::{SharedStaticMemoryPool, PacketSenderWithSharedPool};
|
||||
//! use satrs::spacepackets::seq_count::SeqCountProviderSimple;
|
||||
//! use satrs::seq_count::SeqCountProviderSimple;
|
||||
//! use satrs::request::UniqueApidTargetId;
|
||||
//! use spacepackets::ecss::PusPacket;
|
||||
//! use spacepackets::SpHeader;
|
||||
@ -97,8 +97,8 @@ use spacepackets::ecss::EcssEnumeration;
|
||||
use spacepackets::{ByteConversionError, CcsdsPacket, PacketId, PacketSequenceCtrl};
|
||||
use spacepackets::{SpHeader, MAX_APID};
|
||||
|
||||
pub use crate::seq_count::SeqCountProviderSimple;
|
||||
pub use spacepackets::ecss::verification::*;
|
||||
pub use spacepackets::seq_count::SeqCountProviderSimple;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
@ -1702,7 +1702,7 @@ pub mod tests {
|
||||
};
|
||||
use crate::pus::{ChannelWithId, PusTmVariant};
|
||||
use crate::request::MessageMetadata;
|
||||
use crate::spacepackets::seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProvider};
|
||||
use crate::seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore};
|
||||
use crate::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
|
||||
use crate::ComponentId;
|
||||
use alloc::format;
|
||||
|
@ -1,416 +0,0 @@
|
||||
use core::{convert::Infallible, fmt::Debug, time::Duration};
|
||||
use std::time::Instant;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::executable::Executable;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std_mod::*;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SchedulingTable {
|
||||
execution_frequency: Duration,
|
||||
pub table: alloc::vec::Vec<u32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum InvalidSlotError {
|
||||
#[error("slot time is larger than the execution frequency")]
|
||||
SlotTimeLargerThanFrequency,
|
||||
#[error("slot time is smaller than previous slot")]
|
||||
SmallerThanPreviousSlot {
|
||||
slot_time_ms: u32,
|
||||
prev_slot_time_ms: u32,
|
||||
},
|
||||
}
|
||||
|
||||
impl SchedulingTable {
|
||||
pub fn new(execution_frequency: Duration) -> Self {
|
||||
Self {
|
||||
execution_frequency,
|
||||
table: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_slot(&mut self, relative_execution_time_ms: u32) -> Result<(), InvalidSlotError> {
|
||||
if relative_execution_time_ms > self.execution_frequency.as_millis() as u32 {
|
||||
return Err(InvalidSlotError::SlotTimeLargerThanFrequency);
|
||||
}
|
||||
|
||||
if !self.table.is_empty() {
|
||||
let prev_slot_ms = *self.table.last().unwrap();
|
||||
if relative_execution_time_ms < prev_slot_ms {
|
||||
return Err(InvalidSlotError::SmallerThanPreviousSlot {
|
||||
slot_time_ms: relative_execution_time_ms,
|
||||
prev_slot_time_ms: *self.table.last().unwrap(),
|
||||
});
|
||||
}
|
||||
}
|
||||
self.table.push(relative_execution_time_ms);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum TaskWithSchedulingTableError {
|
||||
#[error("scheudlig table error: {0}")]
|
||||
InvalidSlot(#[from] InvalidSlotError),
|
||||
#[error("task lock error")]
|
||||
LockError,
|
||||
#[error("task borrow error")]
|
||||
BorrowError,
|
||||
}
|
||||
|
||||
pub trait DeadlineMissedHandler {
|
||||
fn deadline_missed_callback(&mut self, task_name: &'static str, op_code: i32);
|
||||
}
|
||||
|
||||
pub trait TaskExecutor {
|
||||
fn with_task<F: FnOnce(&mut dyn Executable<Error = Infallible>)>(&self, f: F);
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod std_mod {
|
||||
use core::cell::RefCell;
|
||||
use std::{
|
||||
rc::Rc,
|
||||
sync::{Arc, Mutex},
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
impl TaskExecutor for Arc<Mutex<dyn Executable<Error = Infallible> + Send>> {
|
||||
fn with_task<F: FnOnce(&mut dyn Executable<Error = Infallible>)>(&self, f: F) {
|
||||
let mut task = self.lock().unwrap();
|
||||
f(&mut *task);
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskExecutor for Rc<RefCell<dyn Executable<Error = Infallible>>> {
|
||||
fn with_task<F: FnOnce(&mut dyn Executable<Error = Infallible>)>(&self, f: F) {
|
||||
let mut task = self.borrow_mut();
|
||||
f(&mut *task);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TaskWithOpCode<T: TaskExecutor> {
|
||||
task: T,
|
||||
op_code: i32,
|
||||
}
|
||||
|
||||
pub struct TaskWithSchedulingTable<T: TaskExecutor> {
|
||||
start_of_slot: Instant,
|
||||
end_of_slot: Instant,
|
||||
deadline_missed_ms_count: u32,
|
||||
table: SchedulingTable,
|
||||
tasks: Vec<TaskWithOpCode<T>>,
|
||||
}
|
||||
|
||||
impl TaskWithSchedulingTable<Rc<RefCell<dyn Executable<Error = Infallible>>>> {
|
||||
/// Add a new task to the scheduling table
|
||||
///
|
||||
/// The task needs to be wrapped inside [Rc] and [RefCell]. The task is not sendable and
|
||||
/// needs to be created inside the target thread.
|
||||
pub fn add_task(
|
||||
&mut self,
|
||||
relative_execution_time_ms: u32,
|
||||
task: Rc<RefCell<dyn Executable<Error = Infallible>>>,
|
||||
op_code: i32,
|
||||
) -> Result<(), TaskWithSchedulingTableError> {
|
||||
self.table.add_slot(relative_execution_time_ms)?;
|
||||
self.tasks.push(TaskWithOpCode { task, op_code });
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskWithSchedulingTable<Arc<Mutex<dyn Executable<Error = Infallible> + Send>>> {
|
||||
/// Add a new task to the scheduling table
|
||||
///
|
||||
/// The task needs to be wrapped inside [Arc] and [Mutex], but the task can be sent to
|
||||
/// a different thread.
|
||||
pub fn add_task_sendable(
|
||||
&mut self,
|
||||
relative_execution_time_ms: u32,
|
||||
task: Arc<Mutex<dyn Executable<Error = Infallible> + Send>>,
|
||||
op_code: i32,
|
||||
) -> Result<(), TaskWithSchedulingTableError> {
|
||||
self.table.add_slot(relative_execution_time_ms)?;
|
||||
self.tasks.push(TaskWithOpCode { task, op_code });
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TaskExecutor> TaskWithSchedulingTable<T> {
|
||||
pub fn new(execution_frequency: Duration) -> Self {
|
||||
Self {
|
||||
start_of_slot: Instant::now(),
|
||||
end_of_slot: Instant::now(),
|
||||
deadline_missed_ms_count: 10,
|
||||
table: SchedulingTable::new(execution_frequency),
|
||||
tasks: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Can be used to set the start of the slot to the current time. This is useful if a custom
|
||||
/// runner implementation is used instead of the [Self::start] method.
|
||||
pub fn init_start_of_slot(&mut self) {
|
||||
self.start_of_slot = Instant::now();
|
||||
}
|
||||
|
||||
pub fn run_one_task_cycle(
|
||||
&mut self,
|
||||
deadline_missed_cb: &mut impl DeadlineMissedHandler,
|
||||
) -> Result<(), TaskWithSchedulingTableError> {
|
||||
self.end_of_slot = self.start_of_slot + self.table.execution_frequency;
|
||||
|
||||
for (&relative_execution_time_ms, task_with_op_code) in
|
||||
self.table.table.iter().zip(self.tasks.iter_mut())
|
||||
{
|
||||
let scheduled_execution_time = self.start_of_slot
|
||||
+ core::time::Duration::from_millis(relative_execution_time_ms as u64);
|
||||
let now = Instant::now();
|
||||
|
||||
if now < scheduled_execution_time {
|
||||
std::thread::sleep(scheduled_execution_time - now);
|
||||
} else if (now - scheduled_execution_time).as_millis()
|
||||
> self.deadline_missed_ms_count.into()
|
||||
{
|
||||
task_with_op_code.task.with_task(|task| {
|
||||
deadline_missed_cb
|
||||
.deadline_missed_callback(task.task_name(), task_with_op_code.op_code);
|
||||
});
|
||||
}
|
||||
|
||||
task_with_op_code.task.with_task(|task| {
|
||||
// Unwrapping is okay here because we constrain the tasks to be infallible.
|
||||
task.periodic_op(task_with_op_code.op_code).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
if now <= self.end_of_slot {
|
||||
let diff = self.end_of_slot - now;
|
||||
std::thread::sleep(diff);
|
||||
self.start_of_slot = self.end_of_slot;
|
||||
} else if now > self.end_of_slot + self.table.execution_frequency {
|
||||
// We're getting strongly out of sync. Set the new start timt to now.
|
||||
self.start_of_slot = now;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use core::{cell::RefCell, convert::Infallible, time::Duration};
|
||||
use std::{
|
||||
println,
|
||||
rc::Rc,
|
||||
sync::{
|
||||
mpsc::{self, TryRecvError},
|
||||
Arc, Mutex,
|
||||
},
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use crate::executable::{Executable, OpResult};
|
||||
|
||||
use super::{DeadlineMissedHandler, TaskWithSchedulingTable};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CallInfo {
|
||||
time: std::time::Instant,
|
||||
op_code: i32,
|
||||
}
|
||||
|
||||
pub struct Task1 {
|
||||
called_queue: mpsc::Sender<CallInfo>,
|
||||
}
|
||||
|
||||
impl Executable for Task1 {
|
||||
type Error = Infallible;
|
||||
fn task_name(&self) -> &'static str {
|
||||
"Task1"
|
||||
}
|
||||
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, Self::Error> {
|
||||
self.called_queue
|
||||
.send(CallInfo {
|
||||
time: Instant::now(),
|
||||
op_code,
|
||||
})
|
||||
.unwrap();
|
||||
Ok(OpResult::Ok)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Task2 {
|
||||
called_queue: mpsc::Sender<CallInfo>,
|
||||
}
|
||||
|
||||
impl Executable for Task2 {
|
||||
type Error = Infallible;
|
||||
fn task_name(&self) -> &'static str {
|
||||
"Task2"
|
||||
}
|
||||
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, Self::Error> {
|
||||
self.called_queue
|
||||
.send(CallInfo {
|
||||
time: Instant::now(),
|
||||
op_code,
|
||||
})
|
||||
.unwrap();
|
||||
Ok(OpResult::Ok)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DeadlineMissed {
|
||||
call_count: u32,
|
||||
}
|
||||
|
||||
impl DeadlineMissedHandler for DeadlineMissed {
|
||||
fn deadline_missed_callback(&mut self, task_name: &'static str, _op_code: i32) {
|
||||
println!("task name {task_name} missed the deadline");
|
||||
self.call_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn basic_test() {
|
||||
let (tx_t1, rx_t1) = mpsc::channel();
|
||||
let (tx_t2, rx_t2) = mpsc::channel();
|
||||
let t1 = Task1 {
|
||||
called_queue: tx_t1,
|
||||
};
|
||||
let t2 = Task2 {
|
||||
called_queue: tx_t2,
|
||||
};
|
||||
let mut deadline_missed_cb = DeadlineMissed::default();
|
||||
let mut exec_task = TaskWithSchedulingTable::new(Duration::from_millis(200));
|
||||
let t1_first_slot = Rc::new(RefCell::new(t1));
|
||||
let t1_second_slot = t1_first_slot.clone();
|
||||
let t2_first_slot = Rc::new(RefCell::new(t2));
|
||||
let t2_second_slot = t2_first_slot.clone();
|
||||
|
||||
exec_task.add_task(0, t1_first_slot, 0).unwrap();
|
||||
exec_task.add_task(50, t1_second_slot, -1).unwrap();
|
||||
exec_task.add_task(100, t2_first_slot, 1).unwrap();
|
||||
exec_task.add_task(150, t2_second_slot, 2).unwrap();
|
||||
let now = Instant::now();
|
||||
exec_task.init_start_of_slot();
|
||||
exec_task
|
||||
.run_one_task_cycle(&mut deadline_missed_cb)
|
||||
.unwrap();
|
||||
let mut call_info = rx_t1.try_recv().unwrap();
|
||||
assert_eq!(call_info.op_code, 0);
|
||||
let diff_call_to_start = call_info.time - now;
|
||||
assert!(diff_call_to_start.as_millis() < 30);
|
||||
call_info = rx_t1.try_recv().unwrap();
|
||||
assert_eq!(call_info.op_code, -1);
|
||||
let diff_call_to_start = call_info.time - now;
|
||||
assert!(diff_call_to_start.as_millis() < 80);
|
||||
assert!(diff_call_to_start.as_millis() >= 50);
|
||||
matches!(rx_t1.try_recv().unwrap_err(), TryRecvError::Empty);
|
||||
|
||||
call_info = rx_t2.try_recv().unwrap();
|
||||
assert_eq!(call_info.op_code, 1);
|
||||
let diff_call_to_start = call_info.time - now;
|
||||
assert!(diff_call_to_start.as_millis() < 120);
|
||||
assert!(diff_call_to_start.as_millis() >= 100);
|
||||
call_info = rx_t2.try_recv().unwrap();
|
||||
assert_eq!(call_info.op_code, 2);
|
||||
let diff_call_to_start = call_info.time - now;
|
||||
assert!(diff_call_to_start.as_millis() < 180);
|
||||
assert!(diff_call_to_start.as_millis() >= 150);
|
||||
matches!(rx_t2.try_recv().unwrap_err(), TryRecvError::Empty);
|
||||
assert_eq!(deadline_missed_cb.call_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn basic_test_with_arc_mutex() {
|
||||
let (tx_t1, rx_t1) = mpsc::channel();
|
||||
let (tx_t2, rx_t2) = mpsc::channel();
|
||||
let t1 = Task1 {
|
||||
called_queue: tx_t1,
|
||||
};
|
||||
let t2 = Task2 {
|
||||
called_queue: tx_t2,
|
||||
};
|
||||
let mut deadline_missed_cb = DeadlineMissed::default();
|
||||
let mut exec_task = TaskWithSchedulingTable::new(Duration::from_millis(200));
|
||||
let t1_first_slot = Arc::new(Mutex::new(t1));
|
||||
let t1_second_slot = t1_first_slot.clone();
|
||||
let t2_first_slot = Arc::new(Mutex::new(t2));
|
||||
let t2_second_slot = t2_first_slot.clone();
|
||||
|
||||
exec_task.add_task_sendable(0, t1_first_slot, 0).unwrap();
|
||||
exec_task.add_task_sendable(50, t1_second_slot, -1).unwrap();
|
||||
exec_task.add_task_sendable(100, t2_first_slot, 1).unwrap();
|
||||
exec_task.add_task_sendable(150, t2_second_slot, 2).unwrap();
|
||||
let now = Instant::now();
|
||||
exec_task.init_start_of_slot();
|
||||
exec_task
|
||||
.run_one_task_cycle(&mut deadline_missed_cb)
|
||||
.unwrap();
|
||||
let mut call_info = rx_t1.try_recv().unwrap();
|
||||
assert_eq!(call_info.op_code, 0);
|
||||
let diff_call_to_start = call_info.time - now;
|
||||
assert!(diff_call_to_start.as_millis() < 30);
|
||||
call_info = rx_t1.try_recv().unwrap();
|
||||
assert_eq!(call_info.op_code, -1);
|
||||
let diff_call_to_start = call_info.time - now;
|
||||
assert!(diff_call_to_start.as_millis() < 80);
|
||||
assert!(diff_call_to_start.as_millis() >= 50);
|
||||
matches!(rx_t1.try_recv().unwrap_err(), TryRecvError::Empty);
|
||||
|
||||
call_info = rx_t2.try_recv().unwrap();
|
||||
assert_eq!(call_info.op_code, 1);
|
||||
let diff_call_to_start = call_info.time - now;
|
||||
assert!(diff_call_to_start.as_millis() < 120);
|
||||
assert!(diff_call_to_start.as_millis() >= 100);
|
||||
call_info = rx_t2.try_recv().unwrap();
|
||||
assert_eq!(call_info.op_code, 2);
|
||||
let diff_call_to_start = call_info.time - now;
|
||||
assert!(diff_call_to_start.as_millis() < 180);
|
||||
assert!(diff_call_to_start.as_millis() >= 150);
|
||||
matches!(rx_t2.try_recv().unwrap_err(), TryRecvError::Empty);
|
||||
assert_eq!(deadline_missed_cb.call_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn basic_test_in_thread() {
|
||||
let mut deadline_missed_cb = DeadlineMissed::default();
|
||||
std::thread::spawn(move || {
|
||||
let (tx_t1, _rx_t1) = mpsc::channel();
|
||||
let t1 = Task1 {
|
||||
called_queue: tx_t1,
|
||||
};
|
||||
// Need to construct this in the thread, the task table in not [Send]
|
||||
let mut exec_task = TaskWithSchedulingTable::new(Duration::from_millis(200));
|
||||
let t1_wrapper = Rc::new(RefCell::new(t1));
|
||||
exec_task.add_task(0, t1_wrapper, 0).unwrap();
|
||||
exec_task
|
||||
.run_one_task_cycle(&mut deadline_missed_cb)
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
let mut deadline_missed_cb = DeadlineMissed::default();
|
||||
let (tx_t1, _rx_t1) = mpsc::channel();
|
||||
let t1 = Task1 {
|
||||
called_queue: tx_t1,
|
||||
};
|
||||
let mut exec_task_sendable = TaskWithSchedulingTable::new(Duration::from_millis(200));
|
||||
exec_task_sendable
|
||||
.add_task_sendable(0, Arc::new(Mutex::new(t1)), 1)
|
||||
.unwrap();
|
||||
std::thread::spawn(move || {
|
||||
exec_task_sendable
|
||||
.run_one_task_cycle(&mut deadline_missed_cb)
|
||||
.unwrap();
|
||||
});
|
||||
}
|
||||
}
|
250
satrs/src/seq_count.rs
Normal file
250
satrs/src/seq_count.rs
Normal file
@ -0,0 +1,250 @@
|
||||
use core::cell::Cell;
|
||||
#[cfg(feature = "alloc")]
|
||||
use dyn_clone::DynClone;
|
||||
use paste::paste;
|
||||
use spacepackets::MAX_SEQ_COUNT;
|
||||
#[cfg(feature = "std")]
|
||||
pub use stdmod::*;
|
||||
|
||||
/// Core trait for objects which can provide a sequence count.
|
||||
///
|
||||
/// The core functions are not mutable on purpose to allow easier usage with
|
||||
/// static structs when using the interior mutability pattern. This can be achieved by using
|
||||
/// [Cell], [core::cell::RefCell] or atomic types.
|
||||
pub trait SequenceCountProviderCore<Raw> {
|
||||
fn get(&self) -> Raw;
|
||||
|
||||
fn increment(&self);
|
||||
|
||||
fn get_and_increment(&self) -> Raw {
|
||||
let val = self.get();
|
||||
self.increment();
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
/// Extension trait which allows cloning a sequence count provider after it was turned into
|
||||
/// a trait object.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait SequenceCountProvider<Raw>: SequenceCountProviderCore<Raw> + DynClone {}
|
||||
#[cfg(feature = "alloc")]
|
||||
dyn_clone::clone_trait_object!(SequenceCountProvider<u16>);
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<T, Raw> SequenceCountProvider<Raw> for T where T: SequenceCountProviderCore<Raw> + Clone {}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SeqCountProviderSimple<T: Copy> {
|
||||
seq_count: Cell<T>,
|
||||
max_val: T,
|
||||
}
|
||||
|
||||
macro_rules! impl_for_primitives {
|
||||
($($ty: ident,)+) => {
|
||||
$(
|
||||
paste! {
|
||||
impl SeqCountProviderSimple<$ty> {
|
||||
pub fn [<new_custom_max_val_ $ty>](max_val: $ty) -> Self {
|
||||
Self {
|
||||
seq_count: Cell::new(0),
|
||||
max_val,
|
||||
}
|
||||
}
|
||||
pub fn [<new_ $ty>]() -> Self {
|
||||
Self {
|
||||
seq_count: Cell::new(0),
|
||||
max_val: $ty::MAX
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SeqCountProviderSimple<$ty> {
|
||||
fn default() -> Self {
|
||||
Self::[<new_ $ty>]()
|
||||
}
|
||||
}
|
||||
|
||||
impl SequenceCountProviderCore<$ty> for SeqCountProviderSimple<$ty> {
|
||||
fn get(&self) -> $ty {
|
||||
self.seq_count.get()
|
||||
}
|
||||
|
||||
fn increment(&self) {
|
||||
self.get_and_increment();
|
||||
}
|
||||
|
||||
fn get_and_increment(&self) -> $ty {
|
||||
let curr_count = self.seq_count.get();
|
||||
|
||||
if curr_count == self.max_val {
|
||||
self.seq_count.set(0);
|
||||
} else {
|
||||
self.seq_count.set(curr_count + 1);
|
||||
}
|
||||
curr_count
|
||||
}
|
||||
}
|
||||
}
|
||||
)+
|
||||
}
|
||||
}
|
||||
|
||||
impl_for_primitives!(u8, u16, u32, u64,);
|
||||
|
||||
/// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT].
|
||||
#[derive(Clone)]
|
||||
pub struct CcsdsSimpleSeqCountProvider {
|
||||
provider: SeqCountProviderSimple<u16>,
|
||||
}
|
||||
|
||||
impl Default for CcsdsSimpleSeqCountProvider {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
provider: SeqCountProviderSimple::new_custom_max_val_u16(MAX_SEQ_COUNT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SequenceCountProviderCore<u16> for CcsdsSimpleSeqCountProvider {
|
||||
delegate::delegate! {
|
||||
to self.provider {
|
||||
fn get(&self) -> u16;
|
||||
fn increment(&self);
|
||||
fn get_and_increment(&self) -> u16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod stdmod {
|
||||
use super::*;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
macro_rules! sync_clonable_seq_counter_impl {
|
||||
($($ty: ident,)+) => {
|
||||
$(paste! {
|
||||
/// These sequence counters can be shared between threads and can also be
|
||||
/// configured to wrap around at specified maximum values. Please note that
|
||||
/// that the API provided by this class will not panic und [Mutex] lock errors,
|
||||
/// but it will yield 0 for the getter functions.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct [<SeqCountProviderSync $ty:upper>] {
|
||||
seq_count: Arc<Mutex<$ty>>,
|
||||
max_val: $ty
|
||||
}
|
||||
|
||||
impl [<SeqCountProviderSync $ty:upper>] {
|
||||
pub fn new() -> Self {
|
||||
Self::new_with_max_val($ty::MAX)
|
||||
}
|
||||
|
||||
pub fn new_with_max_val(max_val: $ty) -> Self {
|
||||
Self {
|
||||
seq_count: Arc::default(),
|
||||
max_val
|
||||
}
|
||||
}
|
||||
}
|
||||
impl SequenceCountProviderCore<$ty> for [<SeqCountProviderSync $ty:upper>] {
|
||||
fn get(&self) -> $ty {
|
||||
match self.seq_count.lock() {
|
||||
Ok(counter) => *counter,
|
||||
Err(_) => 0
|
||||
}
|
||||
}
|
||||
|
||||
fn increment(&self) {
|
||||
self.get_and_increment();
|
||||
}
|
||||
|
||||
fn get_and_increment(&self) -> $ty {
|
||||
match self.seq_count.lock() {
|
||||
Ok(mut counter) => {
|
||||
let val = *counter;
|
||||
if val == self.max_val {
|
||||
*counter = 0;
|
||||
} else {
|
||||
*counter += 1;
|
||||
}
|
||||
val
|
||||
}
|
||||
Err(_) => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
})+
|
||||
}
|
||||
}
|
||||
sync_clonable_seq_counter_impl!(u8, u16, u32, u64,);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::seq_count::{
|
||||
CcsdsSimpleSeqCountProvider, SeqCountProviderSimple, SeqCountProviderSyncU8,
|
||||
SequenceCountProviderCore,
|
||||
};
|
||||
use spacepackets::MAX_SEQ_COUNT;
|
||||
|
||||
#[test]
|
||||
fn test_u8_counter() {
|
||||
let u8_counter = SeqCountProviderSimple::<u8>::default();
|
||||
assert_eq!(u8_counter.get(), 0);
|
||||
assert_eq!(u8_counter.get_and_increment(), 0);
|
||||
assert_eq!(u8_counter.get_and_increment(), 1);
|
||||
assert_eq!(u8_counter.get(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u8_counter_overflow() {
|
||||
let u8_counter = SeqCountProviderSimple::new_u8();
|
||||
for _ in 0..256 {
|
||||
u8_counter.increment();
|
||||
}
|
||||
assert_eq!(u8_counter.get(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ccsds_counter() {
|
||||
let ccsds_counter = CcsdsSimpleSeqCountProvider::default();
|
||||
assert_eq!(ccsds_counter.get(), 0);
|
||||
assert_eq!(ccsds_counter.get_and_increment(), 0);
|
||||
assert_eq!(ccsds_counter.get_and_increment(), 1);
|
||||
assert_eq!(ccsds_counter.get(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ccsds_counter_overflow() {
|
||||
let ccsds_counter = CcsdsSimpleSeqCountProvider::default();
|
||||
for _ in 0..MAX_SEQ_COUNT + 1 {
|
||||
ccsds_counter.increment();
|
||||
}
|
||||
assert_eq!(ccsds_counter.get(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_ref_counters() {
|
||||
let sync_u8_counter = SeqCountProviderSyncU8::new();
|
||||
assert_eq!(sync_u8_counter.get(), 0);
|
||||
assert_eq!(sync_u8_counter.get_and_increment(), 0);
|
||||
assert_eq!(sync_u8_counter.get_and_increment(), 1);
|
||||
assert_eq!(sync_u8_counter.get(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_ref_counters_overflow() {
|
||||
let sync_u8_counter = SeqCountProviderSyncU8::new();
|
||||
for _ in 0..u8::MAX as u16 + 1 {
|
||||
sync_u8_counter.increment();
|
||||
}
|
||||
assert_eq!(sync_u8_counter.get(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomic_ref_counters_overflow_custom_max_val() {
|
||||
let sync_u8_counter = SeqCountProviderSyncU8::new_with_max_val(128);
|
||||
for _ in 0..129 {
|
||||
sync_u8_counter.increment();
|
||||
}
|
||||
assert_eq!(sync_u8_counter.get(), 0);
|
||||
}
|
||||
}
|
1
serialization-prototyping/.gitignore
vendored
Normal file
1
serialization-prototyping/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/target
|
1242
serialization-prototyping/Cargo.lock
generated
Normal file
1242
serialization-prototyping/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
14
serialization-prototyping/Cargo.toml
Normal file
14
serialization-prototyping/Cargo.toml
Normal file
@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "msg-pack-test"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
satrs-minisim = { version = "0.1", path = "../satrs-minisim" }
|
||||
satrs = { version = "0.2", path = "../satrs" }
|
||||
rmp-serde = "1"
|
||||
rmpv = { version = "1", features = ["with-serde"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
1
serialization-prototyping/python-test/.gitignore
vendored
Normal file
1
serialization-prototyping/python-test/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/venv
|
51
serialization-prototyping/python-test/main.py
Executable file
51
serialization-prototyping/python-test/main.py
Executable file
@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python3
|
||||
import enum
|
||||
from socket import AF_INET, SOCK_DGRAM, socket
|
||||
from pydantic import BaseModel
|
||||
import msgpack
|
||||
|
||||
|
||||
TEST_PERSON = {
|
||||
"age": 24,
|
||||
"name": "Nadine",
|
||||
}
|
||||
|
||||
|
||||
class Devices(str, enum.Enum):
|
||||
MGT = "Mgt"
|
||||
MGM = "Mgm"
|
||||
|
||||
|
||||
class SwitchState(str, enum.Enum):
|
||||
OFF = "Off"
|
||||
ON = "On"
|
||||
UNKNOWN = "Unknown"
|
||||
FAULTY = "Faulty"
|
||||
|
||||
|
||||
class SwitchMap(BaseModel):
|
||||
valid: bool
|
||||
switch_map: dict[Devices, SwitchState]
|
||||
|
||||
|
||||
def msg_pack_unloading(recv_back: bytes):
|
||||
unpacked = msgpack.unpackb(recv_back)
|
||||
print(f"unpacked: {unpacked}")
|
||||
loaded_back = msgpack.loads(recv_back)
|
||||
print(loaded_back)
|
||||
|
||||
|
||||
def main():
|
||||
server_socket = socket(AF_INET, SOCK_DGRAM)
|
||||
target_address = "localhost", 7301
|
||||
msg_pack_stuff = msgpack.packb(TEST_PERSON)
|
||||
assert msg_pack_stuff is not None
|
||||
_ = server_socket.sendto(msg_pack_stuff, target_address)
|
||||
recv_back = server_socket.recv(4096)
|
||||
print(f"recv back: {recv_back}")
|
||||
switch_map = SwitchMap.model_validate_json(recv_back)
|
||||
print(f"switch map: {switch_map}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
2
serialization-prototyping/python-test/requirements.txt
Normal file
2
serialization-prototyping/python-test/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
msgpack==1.0.8
|
||||
pydantic==2.7
|
88
serialization-prototyping/src/archive.rs
Normal file
88
serialization-prototyping/src/archive.rs
Normal file
@ -0,0 +1,88 @@
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Serialize)]
|
||||
pub enum Color {
|
||||
Red = 0,
|
||||
Green = 1,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
|
||||
struct Human {
|
||||
age: u16,
|
||||
name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
|
||||
struct HumanAdvanced {
|
||||
id: u32,
|
||||
age: u16,
|
||||
name: String,
|
||||
fav_color: Color,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
|
||||
struct HumanGroup {
|
||||
humans: Vec<HumanAdvanced>,
|
||||
bank: HashMap<u32, usize>,
|
||||
}
|
||||
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn random_testing() {
|
||||
let mut buf = Vec::new();
|
||||
let john = HumanAdvanced {
|
||||
id: 0,
|
||||
age: 42,
|
||||
name: "John".into(),
|
||||
fav_color: Color::Green,
|
||||
};
|
||||
|
||||
john.serialize(&mut Serializer::new(&mut buf)).unwrap();
|
||||
|
||||
println!("{:?}", buf);
|
||||
let new_val: HumanAdvanced = rmp_serde::from_slice(&buf).expect("deserialization failed");
|
||||
let rmpv_val: rmpv::Value = rmp_serde::from_slice(&buf).expect("serialization into val failed");
|
||||
println!("RMPV value: {:?}", rmpv_val);
|
||||
let json_str = serde_json::to_string(&rmpv_val).expect("creating json failed");
|
||||
assert_eq!(john, new_val);
|
||||
println!("JSON str: {}", json_str);
|
||||
let val_test: HumanAdvanced = serde_json::from_str(&json_str).expect("wild");
|
||||
println!("val test: {:?}", val_test);
|
||||
|
||||
let nadine = HumanAdvanced {
|
||||
id: 1,
|
||||
age: 24,
|
||||
name: "Nadine".into(),
|
||||
fav_color: Color::Red,
|
||||
};
|
||||
let mut bank = HashMap::default();
|
||||
bank.insert(john.id, 1000000);
|
||||
bank.insert(nadine.id, 1);
|
||||
|
||||
let human_group = HumanGroup {
|
||||
humans: vec![john, nadine.clone()],
|
||||
bank,
|
||||
};
|
||||
let json_str = serde_json::to_string(&nadine).unwrap();
|
||||
println!("Nadine as JSON: {}", json_str);
|
||||
|
||||
let nadine_is_back: HumanAdvanced = serde_json::from_str(&json_str).unwrap();
|
||||
println!("nadine deserialized: {:?}", nadine_is_back);
|
||||
|
||||
let human_group_json = serde_json::to_string(&human_group).unwrap();
|
||||
println!("human group: {}", human_group_json);
|
||||
println!("human group json size: {}", human_group_json.len());
|
||||
|
||||
let human_group_rmp_vec = rmp_serde::to_vec_named(&human_group_json).unwrap();
|
||||
println!("human group msg pack size: {:?}", human_group_rmp_vec.len());
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn send_back_weird_stuff(buf: &[u8], received: usize, socket: &UdpSocket, src: SocketAddr) {
|
||||
let human_from_python: rmpv::Value = rmp_serde::from_slice(&buf[..received]).expect("blablah");
|
||||
let human_attempt_2: Human = rmp_serde::from_slice(&buf[..received]).expect("blhfwhfw");
|
||||
println!("human from python: {}", human_from_python);
|
||||
println!("human 2 from python: {:?}", human_attempt_2);
|
||||
let send_back_human = rmp_serde::to_vec_named(&human_attempt_2).expect("k32k323k2");
|
||||
socket
|
||||
.send_to(&send_back_human, src)
|
||||
.expect("sending back failed");
|
||||
}
|
61
serialization-prototyping/src/main.rs
Normal file
61
serialization-prototyping/src/main.rs
Normal file
@ -0,0 +1,61 @@
|
||||
#![allow(unused_imports)]
|
||||
use rmp_serde::{Deserializer, Serializer};
|
||||
use satrs_minisim::eps::{SwitchMap, SwitchMapWrapper};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Default, Serialize, Deserialize)]
|
||||
pub struct SwitchSet {
|
||||
pub valid: bool,
|
||||
pub switch_map: SwitchMap,
|
||||
}
|
||||
|
||||
pub struct UdpServer {
|
||||
socket: UdpSocket,
|
||||
last_sender: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
impl Default for UdpServer {
|
||||
fn default() -> Self {
|
||||
UdpServer {
|
||||
socket: UdpSocket::bind("127.0.0.1:7301").expect("binding UDP socket failed"),
|
||||
last_sender: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UdpServer {
|
||||
pub fn send_back_reply(&self, reply: &[u8]) {
|
||||
self.socket
|
||||
.send_to(reply, self.last_sender.expect("last sender not set"))
|
||||
.expect("sending back failed");
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut udp_server = UdpServer::default();
|
||||
|
||||
loop {
|
||||
// Receives a single datagram message on the socket. If `buf` is too small to hold
|
||||
// the message, it will be cut off.
|
||||
let mut buf = [0; 4096];
|
||||
let (received, src) = udp_server
|
||||
.socket
|
||||
.recv_from(&mut buf)
|
||||
.expect("receive call failed");
|
||||
udp_server.last_sender = Some(src);
|
||||
println!("received {} bytes from {:?}", received, src);
|
||||
let switch_map_off = SwitchMapWrapper::default();
|
||||
let switch_set = SwitchSet {
|
||||
valid: true,
|
||||
switch_map: switch_map_off.0.clone(),
|
||||
};
|
||||
let switch_map_off_json =
|
||||
serde_json::to_string(&switch_set).expect("json serialization failed");
|
||||
println!("sending back reply: {}", switch_map_off_json);
|
||||
udp_server.send_back_reply(switch_map_off_json.as_bytes());
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user