15 Commits

Author SHA1 Message Date
20d5212710 Merge pull request 'satrs-core v0.1.0-alpha.3' (#108) from core-v0.1.0-alpha.3 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #108
2024-02-12 13:13:12 +01:00
2dd38c163f better name for new trait
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-12 12:54:25 +01:00
79d095b1f7 some doc improvements
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-12 12:48:28 +01:00
377ffc052c bump release
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-02-12 12:43:17 +01:00
c39ce99e2c Merge pull request 'Add Static Pool Spillover feature' (#107) from add-pool-spillover into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #107
2024-02-12 12:21:00 +01:00
c0692a3523 Added static pool spillover feature
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
- Allows to utilize the full pool even if some subpools are full.
2024-02-12 11:35:10 +01:00
712dc718f9 Merge pull request 'Pool docs improvements' (#105) from pool-docs-improvements into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #105
2024-02-12 11:30:48 +01:00
a69347af7b Merge remote-tracking branch 'origin/main' into pool-docs-improvements
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-12 10:56:13 +01:00
3c7113c231 Merge pull request 'Refactor and improve pool abstraction' (#104) from refactor-pool-impl into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #104
2024-02-10 15:39:38 +01:00
9c310e7a36 Merge branch 'main' into refactor-pool-impl
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-10 12:54:36 +01:00
0bab5799e5 Merge pull request 'Graph for static pools' (#103) from graph-for-static-pools into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #103
2024-02-10 12:54:29 +01:00
b56bbc8c41 update pool docs
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-10 12:54:13 +01:00
12ac5913aa Merge branch 'graph-for-static-pools' into refactor-pool-impl 2024-02-10 12:30:35 +01:00
d017b9c179 Refactored pool abstraction
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
- Redesigned PoolProvider and PoolProviderWithGuards to allow
  easer optimizations and increase flexbility
2024-02-10 11:59:26 +01:00
f3ca570e53 Update pool docs in sat-rs book
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
- Add simple graph to show how it works
2024-02-10 11:57:19 +01:00
20 changed files with 2079 additions and 1180 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,348 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
<!--Created by yEd 3.23.2-->
<key attr.name="Description" attr.type="string" for="graph" id="d0"/>
<key for="port" id="d1" yfiles.type="portgraphics"/>
<key for="port" id="d2" yfiles.type="portgeometry"/>
<key for="port" id="d3" yfiles.type="portuserdata"/>
<key attr.name="url" attr.type="string" for="node" id="d4"/>
<key attr.name="description" attr.type="string" for="node" id="d5"/>
<key for="node" id="d6" yfiles.type="nodegraphics"/>
<key for="graphml" id="d7" yfiles.type="resources"/>
<key attr.name="url" attr.type="string" for="edge" id="d8"/>
<key attr.name="description" attr.type="string" for="edge" id="d9"/>
<key for="edge" id="d10" yfiles.type="edgegraphics"/>
<graph edgedefault="directed" id="G">
<data key="d0"/>
<node id="n0">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="174.17919999999998" width="273.1071999999999" x="1000.3040000000003" y="433.61760000000004"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="67.380859375" x="12.06892812499973" xml:space="preserve" y="7.247609374999911">Static Pool<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.45580882479480683" nodeRatioY="-0.45838992615076934" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n1">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="120.0" x="1141.8400000000001" y="536.1087687499992"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" lineColor="#000000" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="86.142578125" x="16.9287109375" xml:space="preserve" y="21.015625">1 x 128 bytes<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n2" yfiles.foldertype="group">
<data key="d4" xml:space="preserve"/>
<data key="d5"/>
<data key="d6">
<y:ProxyAutoBoundsNode>
<y:Realizers active="0">
<y:GroupNode>
<y:Geometry height="90.0" width="164.73919999999998" x="1113.1776" y="451.01919999999996"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle hasColor="false" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="false" width="164.73919999999998" x="0.0" xml:space="preserve" y="0.0">Group 1</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="15" leftF="14.739199999999983" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
<y:GroupNode>
<y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
<y:Fill color="#F5F5F5" transparent="false"/>
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" xml:space="preserve" y="0.0">Folder 1</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
</y:Realizers>
</y:ProxyAutoBoundsNode>
</data>
<graph edgedefault="directed" id="n2:">
<node id="n2::n0">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="60.0" x="1142.9168" y="466.01919999999996"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="28.0" y="28.0">
<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n2::n1">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="120.0" x="1142.9168" y="466.01919999999996"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" backgroundColor="#FFFFFF" fontFamily="Dialog" fontSize="12" fontStyle="plain" height="17.96875" horizontalTextPosition="center" iconTextGap="4" lineColor="#000000" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="78.5078125" x="20.746093749999773" xml:space="preserve" y="4.862813159989173">2 x 64 bytes<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="-0.5" nodeRatioX="-1.1657341758564144E-15" nodeRatioY="-0.41895311400018" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
</graph>
</node>
<node id="n3" yfiles.foldertype="group">
<data key="d4" xml:space="preserve"/>
<data key="d5"/>
<data key="d6">
<y:ProxyAutoBoundsNode>
<y:Realizers active="0">
<y:GroupNode>
<y:Geometry height="89.99999999999994" width="150.0" x="997.9168" y="451.0192"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle hasColor="false" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="false" width="150.0" x="0.0" xml:space="preserve" y="0.0">Group 2</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
<y:GroupNode>
<y:Geometry height="50.0" width="50.0" x="997.9168" y="451.01919999999996"/>
<y:Fill color="#F5F5F5" transparent="false"/>
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" xml:space="preserve" y="0.0">Folder 2</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
</y:Realizers>
</y:ProxyAutoBoundsNode>
</data>
<graph edgedefault="directed" id="n3:">
<node id="n3::n0">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="20.0" x="1092.9168" y="466.0192"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n3::n1">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="20.0" x="1072.9168" y="466.0192"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n3::n2">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="20.0" x="1032.9168" y="466.0192"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n3::n3">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="20.0" x="1052.9168" y="466.0192"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n3::n4">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="20.0" x="1012.9168" y="466.0192"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n3::n5">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="120.0" x="1012.9168" y="466.0192"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" backgroundColor="#FFFFFF" fontFamily="Dialog" fontSize="12" fontStyle="plain" height="17.96875" horizontalTextPosition="center" iconTextGap="4" lineColor="#000000" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="78.5078125" x="14.814773932043863" xml:space="preserve" y="4.392671444094276">6 x 16 bytes<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="-0.5" nodeRatioX="-0.04942766514963404" nodeRatioY="-0.426788809265095" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
</graph>
</node>
<node id="n4" yfiles.foldertype="group">
<data key="d4" xml:space="preserve"/>
<data key="d5"/>
<data key="d6">
<y:ProxyAutoBoundsNode>
<y:Realizers active="0">
<y:GroupNode>
<y:Geometry height="90.0" width="150.0" x="997.9168" y="521.1759843749999"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle hasColor="false" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="false" width="150.0" x="0.0" xml:space="preserve" y="0.0">Group 3</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
<y:GroupNode>
<y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
<y:Fill color="#F5F5F5" transparent="false"/>
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" xml:space="preserve" y="0.0">Folder 3</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
</y:Realizers>
</y:ProxyAutoBoundsNode>
</data>
<graph edgedefault="directed" id="n4:">
<node id="n4::n0">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="30.0" x="1012.9168" y="536.1759843749999"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="13.0" y="28.0">
<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n4::n1">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="30.0" x="1042.9168" y="536.1759843749999"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="13.0" y="28.0">
<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n4::n2">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="30.0" x="1072.9168" y="536.1759843749999"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="13.0" y="28.0">
<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n4::n3">
<data key="d5"/>
<data key="d6">
<y:ShapeNode>
<y:Geometry height="60.0" width="120.0" x="1012.9168" y="536.1759843749999"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" backgroundColor="#FFFFFF" fontFamily="Dialog" fontSize="12" fontStyle="plain" height="17.96875" horizontalTextPosition="center" iconTextGap="4" lineColor="#000000" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="78.5078125" x="20.74609375" xml:space="preserve" y="4.392671444094276">4 x 32 bytes<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="-0.5" nodeRatioX="0.0" nodeRatioY="-0.426788809265095" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
</graph>
</node>
</graph>
<data key="d7">
<y:Resources/>
</data>
</graphml>

View File

@ -0,0 +1,256 @@
%PDF-1.4
%<25><><EFBFBD><EFBFBD>
1 0 obj
<<
/Title ()
/Author ()
/Subject ()
/Keywords ()
/Creator (yExport 1.5)
/Producer (org.freehep.graphicsio.pdf.YPDFGraphics2D 1.5)
/CreationDate (D:20240209121153+01'00')
/ModDate (D:20240209121153+01'00')
/Trapped /False
>>
endobj
2 0 obj
<<
/Type /Catalog
/Pages 3 0 R
/ViewerPreferences 4 0 R
/OpenAction [5 0 R /Fit]
>>
endobj
4 0 obj
<<
/FitWindow true
/CenterWindow false
>>
endobj
5 0 obj
<<
/Parent 3 0 R
/Type /Page
/Contents 6 0 R
>>
endobj
6 0 obj
<<
/Length 7 0 R
/Filter [/ASCII85Decode /FlateDecode]
>>
stream
GauHqbDmmZP2*dc,(JOJpjOd%&g0Gt.kP?b'JS!oXFWk;Im?:Y1FF&?pX/3EoVU?*%+=B^)P-:.4Rk3i
s*]RdT>/;PVD+YBq=ssLgOFU_/,o=0j/OX---Sln^>2M2IJ`Ed^O5qQVk*kY?bcTdDuVnJr7h6u29C@I
n%\htn,N!oI>bl:q_/!E8Gq*>`4k*>H&qp+S%%gYRm0Q`^J"46=8=LjUt,*E8fY3j6_j1O)dT5Rr;36d
2TE1sT$toUNk^6OlG0M$`a8\SDu][+2SjlHja.QR?iT*F^H[dDqOmlY/O#F`o1-puZR+(6c_!Z$9=T]T
3D@>hs2JCF[r;<4>?W&7(f(C?hGX+g+/RK5>#oRYmB]N&/Qlj"0&n123'X=7S:R]6PWlg21E6kE\4XeS
2\[`<&[K"H.d`i7:KfE2]p+JeNVt]"Y*L+:.OV5^Fb.U2n0a"#gC<8<]$kU=OJ*NV)=du!KCWJ.>a@;!
AiTJpf@'RJ7&^"(Yb"R/l(pRdQ<kVs%?8)!>V`PO_WeB@PWi!pB+5<W>7FTapG>"eLU:Cra(k/I;;^ah
mAcX]0+W@c9"cLWCDs?VR[8F?6<T5F$_0C:m"MjrBBs4k)B)!QUC!)4)NFtV@<<r9q$g4$m3nNi+q1ru
e]XeUoN<]O"rT6rdIc7nDrJXG_<,>V[+c%pmXpGV6L..t<8>20'_?%rO!N4JZZYrn7!-+)W,<%t>ADS8
/TJAA+-bJKcYS>'s3KC9l,0-0e5A(AITl4'3i\f46/U?l%$3[[D1-a[<d(=Q9X^_,!@p9Wm'TZV7GRh3
rF)Ja082PP(B7n)^+A+X7k=ASO*kK6,(6!VRDEO4*]U9%D@#R--_LN6iTq@`,YPD6Cs8m4P?AfT*5Gd]
[;(LpJ)b!=C%su^D3#$WIOeISK@*am.##XK[S[_P!?^>tVXHJ#qEt3?2Z8-=k2XWs.,17T[bN*>#j]U:
+n?FH0^6*EPO[^;:H%KH3f("]U9q;P]"j#B)eLQ$@1?"IBSn;YH6lYb&!tVhGK>8uB'7MF"FFfqpVGc<
e,4hFfV*0s,_P,I_QT;tW4=HSW2;Ra[gBM4`qdES$e#Amc(@.hj5@*2oF=>L%a<mBs"l[h;q(=d*OOlf
J\oOmWJbKB>#1\>#4"cMCBF9nZ_8(\3B3X.Ce5M5@GC=9ETuK##3d.F9`om9\CVZIc4fpE_>+:BO<J]+
H82#q>$E&a":@K&ZD/,"^/e9SMaTXR4:emV+:YVSV']$J([5Yf)XL^]K57i74uu_XpD*@olnX,&NO9ib
%&N3p%WmY0V3,o$NEeT^eb<HbXfh=$96H48FpusINN>n+-?`G3[[)E[c:Y2<_OIriakDd!l>kAQ%VsAW
AF8/If$8]MoQQX3ZSq>[Jc02#p-mQXojmFp?*'U_akBmhOKg^+ZZ`MGY*I2DWtB:KeU(@CG1Jk?KA87s
@g/HOM6nM9!.9`I^:<BOrbA\h]cSgEhCU?uURj'M1$c"YMq5^c](iQ#&>PQ:2.[P-3c2mhE-_@`J$@fS
Dr,`=a@ib&>8l*Uf6+&oBsDgN[$uSRWT9WU_`,ciCF7Mt7gQI=T/'sF(R6T"e#_EoZA5)r#HW*R#p`]_
;dE]PNpSIdg\6CCQ:6F]TV^1VrK%68^6q0\hmJKElGB.7UIAF+V^67qcJ?fV+8sR\g-Z3Xb'QW-m65AL
UC0S+Tsnq;>ibcqXPfO;:H"8jre($\6sJQ*T!GIQ4Lo&VH+V6@U@aE[<9K#^&i7s"S2Z@R:6kOt\?n?\
@p8!#:7CKqp!R+&Y]'#C/Qrg2RMVR=@b(t;`foH&?N-VC;Id]"Fln?G<NR>6`9P]52`L$-C<*l4plc.7
4LX"1<nT+Nr-?OgD0lC<YFkX:nR7D1lb[?95#;tmOVMWgf]B,h03"K'->^:DU1!shS^`%C-di:.>MJtE
:J,\\RN)(L`nK=]CO(1Tdmj\N7gT%#Z1B<^qo0QCdR_QNI?m?+,6&K!4fE0TH4Xp3/#bs@]Ql.@WaMU]
BWhp*L`O1NZaDS.g0J?#[[Y>lJYclJ%HQ=qZk].NXgD>/K%5%V<?,\IWJYSHNOHKbeii>SSm3KlhX7(m
EbVLh^[9V@pJ^+\Sq#\_][o)h+u?TPo1!NNWGPYC7aKRZ"@1!5)Z=%7Gk!')(2ETj@O9&6$UQ$l,W5#f
q92>l8u!Ra*@P4\#";duf5O:q?Lihd1dQS3oQCa.7'^3sQ.A<TLOPreY-r$(%o1.R9pT8^-Ke$C46_+/
`eS`mioh5qd#K&0oVC7:jQWId]7!m5,a&m0hQ=c"ZPH0.p1eq0dAGT7:YaXJ`ZN^=N>&k8^p7rB.2tdh
>7]pt>(T/ji2PU,j)3m?DJX0V4aUHr-0P!<FjKu<`jQ5I@g'TS-p/qo6bA*h`@>K!O@WDGNGfcFPDn8k
]N0g#G1lYPO'jlnkp+!`qs>Ub7G_G4*lj8smU>/I@Y3o^Fm>;3YP,@#_qiT]gE-XV3s"W]nb.b<p$Y?3
@p.AZZUN>E*hXPKbOH9YrARdUTCN!PnOqQaZPu(cT3X,pj&";:8,VKDOZqD-aSKhVqQ$O/25[h-T_6]R
C9+8bQB+N5[;fCAnV-i0_0PGC#EHtl39hG%(g^PAcZlqVGQ/U8AnYrZ(Kg>P)i3=ig+4SW7aRXtd$=q8
L%9Mtd$HV]Gf>!pdr-2@flXdJ(LPtj,MLHnp\kW*SaaSJb1esgYlB$7O7W0)U9S1#a./*sn^kcE-_o=M
I85TH:.DQ2I1D!Gi$Z1lT>/6I"-Du2cT.EHFQ=[6rO+36h85+pMnhi2=P5BW=f<9C4LP$c2Li\nn2Q>/
86aIt6etQI5`[KZ2E3b_<i]HljKMH:KY"CGZ?V7LVr;CX2Oif;5.FFdQb;jG+0?nF_p@U)<,M@*<4`aK
)7I3"r9K'"d!+\r*2V?N3SEg7/[&sW'\#jaP1H<_Znr1[lc&EWhnM0pMdc"_:8QfFUdiam=V#(qFVU2P
6`9k0jf`3QgpUO`EW+<g2@2a5%OX_YV6HW:\If`r(V&+CI%c@aW^)q#c$?X%X2Vsm_OT3YPd"2`l=-kK
Das:U\!CiR(YnG5i>#L&.%$!kq/NSCgUU3Qm"p'\LpGKB":LoFAXH@$VMV`V[,8V9DI=TQ(@-18S2PtA
#p\$$-S5\jm7qD$]fs/,rG[Ujfqmc#bj$fCNm`;<$&dI'B'Vt#8i)P7BZhobd:A7W,=T4K&,FXOip8p_
LOQ5mBJ%hI`BU#`MR>[AM[6@;S!hjY_\ScsBmPt3:"*f2Dc=`_qlG]U3EaMtbh9gYQsI$c->q1Tn^X<F
B^MnK`hQN!$r;KHG.U1*@D,\s_idP]nd8k`lcJB9?)Sci:d<<iAk*aB)c^K%1m?I&*P6=t5d9rE,m2_$
Vn<<o:eoNRDDM*'FcQa361sg4]H*<Ip6FiBg6!taI_2m/9DeN52='`Z"+09,F'QE1KsjB9h>)I?UQK\,
Up?#_#P95ma[2];_gZ=I=Nu-1=+4sjm!L#3.`Ou)]!rM#B/5^beuNBCAU0$?f$DdgQ%CA>f7M`$=0J%X
#SL]o3a$TV'C?#TA`hfd@]!MG@]tt`ak0Va-H6NQ)O3e[.sNgi/Z@KEodYrunT!k2)NU0T2=8&]Z&LD;
c<'Bue0<IHeB2N,>,\p9X>jgXQ&<0+A6*E=(@bpE05=Rp<EU4MM1k"f?*Z'J_JWF3?gT?.Y$SZE.+,Ca
JE/T+Z'D)kYS9lJ]rKo2IL8aF+jlujC-9UUnMXTFe4`&:/f&qAB\Iin/1n_3=D--;0Ul[BZ1&_8Db2^b
['Hk%40co"!"29l%*"Z3rktlWD2eW.9%^ZRNk3u9Ma+0YGen(&OLn,.GrJr91gVYDbJj.Yg8UN4YgmP3
Y!GJBV;Jg7ITqVTi[KWJoL/WD1pI$.H@n./I[Wi"]__D15CcfGaQP!u1TR[g9*6Yui6*Kp(MI7$T^Bl4
l4#-LRH0"HG"N)]RB]N\G0l5^ge]fV#>g27[#Sj(5X\1A7I["a?*hQ&War9\^@fsYo-*5J)#@>mOb#;?
;rr^6]^*0nWSp<R?kd:ah*%WZO-=Wa0k[Tf`^=_Ta#4<ck0d5oZlbe`(65[J6f8.>=Y/`2oR4nkGg!L3
%J;@oKhW'B3<Gm]mR*t:^!&E,Pp]&mJ/lP,])<P^q`ktSqPu_Ah:VB"opQE8:i>@QNd)%R?^]@8KY8]$
.r78lfYIc>Z5b_;0L8GRq9"\nSE.(2XlM$>`^k0:f5@FBorM*W69'%!m.skVQ$M@lE?!AMY-NVO\nH:c
I]NFkbrYC#-2CHlUBd/88?o[?IRlfq>0bkMj&cg6/OrroTlBHl3H3lDAfm4L&<hpKl\%T0QiHDJP2GYt
U4`DcUq^i\OV:fom2J'(`3G#k[*W\[L/r8*/XHqTASA"9I+f,)`E[XCaqGQPOc=m_YU=#U$X\O5;D-bN
)KjiEYJCg\-^#-k8sCYcRC4CWSN<^;C2_WT">0?Ci>16s["nQ],:-2g"!];L8L@r=lHRtr7#oB4OWs60
q1)H*ql\Sf6AMaE!paTSP<K+=347A-D?f:";sM31R/=_i`c*-TWc/n!ra];sAjc'/K%G]7C_t<Z&M3N\
o2O3_Bl#c^,'p/l2gC*(nr,[5&7Q7Yns@;$M!\[u0nNi882R-/,!R5mP_*HroaS(^qChALk+Wk-X)r5T
L&+!bYbJHM\<[mH\6b2=ZQcl@#jk0<SaoR\E`Q-QN&@sqofX7`lNXo;FpmT-hCRaF]Ma><OaX=s8_V`T
]d`0A9isrCnq?=ce?6$HHMJib`Y-?ooV$!Vh;1M(#kM0aQFH``Rk2hFk/$UJ[U'l"_q\sAX\5RrVE@Z^
Mk1`ZpI_NpdE*>q]jT*IDlNbQDjG._M'_?Y`&ZD]98h)+.paUXcgoPB\@KipB8heU`Q9djs)G2Sq9W]T
;!cin^gFr9$)@J=Hi8H`c,WVn[9%;6O7i2O^*efCg"R1`h]1Y;,lHbK%)+[AqJWJ39CJJ9qBI'trTSA$
SK+&//4\jh6tD.*^G[^]c/)S#$%)V_mk9Z2Z+G_DK;)r/='roX^-%4mLQ5c)]S2rg*Ui2]A4I`kOEEfP
rP96Qn+P%L+/mr]/_^N\Y.DO?ikpHkqrupaO,_jm?!tiu+:HrYnnnmO6a`q30V$j_(GT'QV)'H6A*ldc
]n+r.`R3,V1mAT'D>#PR[kEgOTr$k+/H):Z7Vdi2^R.pdMTcN6WlRK1T?9+C,m@a`P-A9`=38dnZBTZd
cP=(*]@"kFZb#iW6#VQ4_d\oA&(DIWh&*J,mg2-5Pp>"RL,A(Zd9eoZMUH5TY[Zo+774bhDLhCRQZiSA
Q=Qr+h0$%kMf@YrO7f&KrOCIsG@W80QL#&Cb?3Jf'#W#5?RR#ZMAYSa+X:q8PbO[]5KbWkHIi\YIN:MJ
bPjZfHf0IcRJ\7Ioj=lmHp7QL118I/*6;M[ZcG.!PK775p!*trXbdXWmgIVK1/'rqmlP?%X,0V!fFZaW
pAG0$FOWBnG[M7jBoj-eftY.@*`$C;leS_WVs1m,cMY1Ko"+c:%A)'?1\jOeY[1U.l-eL`?eCJ%D:aZ=
%1tOj_N@\O"pcgtpjni0>?sfuP%g;u*1LRhLU5?d="2]P4<Da`>A)#?>K]bs8g?;P!F\B1SoqS!Vtd_+
+KXRk,Qm>c]!^n6[9Gh^q^+A2WmWG)B=8a0?9-NPrM"IH^MUs9NV:R3Y49T!?k];[]8upK4&*tHAr[8M
Kda\eZ1(J+=fk<41YLXa_4(U?Q]QE![nEsZ0dr`,?\/(740cZ?,#.CFA2@`.4f3C`Wjr,m=6d82hPNFq
X4(6OR`5)FgKD3k_no,.?f(lZHhMh]P0Ni-&SjRBLJJe:Ki+JD2"tn,XDnjOEdY6U\3=gNQa_IoJ)fNp
/gPaQeORpaB,oM"hd8$^EQX?1bhWl3"R_K$o0eYZEl0WRKCRSN<WZaegA*;"'Ve\8,$e(3%Ai$21/.O.
CcC!NMX!(O>K`08#kG8R7U0(4Hu16/[/6NY1=/g5+-L:lnoT/.XD[)k8BfhY:P-c6]8?JgYLI7XbY6[[
roOisYrC=ZUfPC!\L[[mk]nkB'!Z=8F#ELjOaDr?6Z^n*W*PkEY!n4!HXsJ1PeHQ(?uEmUNTb%kS2aE`
/00(<b-Y2Ec/>-]8jqg8]Zs.iNL'iq(bpCX1H2(@*UT;Kd,9P<;4Njo>SaWsb]p65_n@Hn_>/B:iP%k"
QbUGtF[(CX&(TOH8baq85o.GU=3dT2r/gCWrFh]eS336gbE25d0bEqP?2/2Fb;^GYm!%J:bY6VTaImZJ
`@(lW',Y-"KJ/mg&R![Bb.B0$Fh>"V3TB#"-%$/O@-Ai_)am0X-r3]@m.FYVbb0d&)mYF@]7ornZ"^uH
*)62G&'m*Fj1F$!Yfg0c(Z.SELp8da&Ij(B"C3.n;/)B>>Vt0kcI?+-a8-aA?#H&WZu$DEPhBtd*\3/B
9TDO0^nR5b;=US1T1%]E(VK:h?b6/V@FP8.CHe+OalME4S^0%)k"kc$o>9/5T]71+Qam+?7[o;7;r[uM
IX9Gd"`tp-\DpHO4uhT]n?V):Xg!!?#;\0u;0Bh1.77$G#/0$?MR^:C9i[Lq<Wmd2Br^U2]5\YY6egKj
pptXT*knOP.DBjDpt@:Xqr#.8V656ensl7ts!SfiSo9H%LZ]12.=Oe!S,La8p',2;qZabrhRa-/\G"@h
F]g('HASO0Q]I\$A0I"%AU`G._[K/s#\&KV-'"Y,h:#m=9>1W),\Xh$k,]d(7=HbkN1NS'[uk?NA<,W7
#`5Muq!/]:LCJIIghG0P7si>'2'O-PEC?"k&8sHOb0<iij1r+L0L"nP&4U=nVQ+=t*b!Z<cE(J:4g?PU
7/!@so4gA1AMQ!$LaNcjXuo5'<ZRNIQ8TqW#+4jj5PQ]biLAeW0ADi+n0k#$W_lBl3=fl1Gg=?=(JV/K
j-?KT3d;r.`S),CA#&+2&KXMH"Zo(?jZRFn\",*kB$Qt;O%:;:Et)d$(ZX*96I[0h@7Z/d]9OoL/JW;"
pIoB:-S^Mu%`YMXam??RE*X-$jK8IK2D@p5_&(s7g?Dh<TR=]:9kE_Zb.+u0Hm#g)*:7NEBp=&kk;^V>
L4-7]P@lBsg(h*0d8sL]]4"5`Y`N"r?349-dU&1*.]aSICOB:PKi0r'mq\\;V*t"Y*ZX-D)kFP:G-PSU
^Ea<VoRBh400\iI^U$@:L!FM5ib6Jf3PYL0$qcn2:*[b?VZFGO(o[JSs(hmNIHP_Gp3QSuf))Z?5M]Om
oi>G5eQ;)4=88EUXhK,IYMS,GH1tWp=)[WZ>CQ8=*m8mOiudM;^3Y]eYdZi[q;Ucj/&Pe6N5s;4@4M%i
#'ed,roNh1=S;^$bh&I>fAFf"Kq.9!7,=GuKQUYDg:\jF]G5l=QkO])Q;EIK7#WMQD(NU;hm_Xfj_8mV
GHng4Kcf-fLEp&9WUV-WJZNb"8pu3L9d8P2[ji$J;-FHdSE%;#3CZs3Bt!*i*dSBu6VHOhi1XMmeKl=V
0QjE(U>DSRoi4I$B6LgdQAGmPb3hD;P1_g@`&[4C:!-?3etmbZl9$sb>KjdOZdnTk!:R9t&YsjH:ER1D
/Lu0Z[gCt\e9OCec8[<e<@%a[He^lN\MNmgjDh1eX"(5f2c(%;3@jDR=si[4C!*+r8&rh3H$&.8GIiao
<e/g9U-c/QCjTaGhGi3pR.Un>X]D8Z.G@kD4MN2/f5e2BdV8k1CsA+>aMJs)e)nHU\*g.\*`TW@,gL\.
I\U'"Bb/2M!S4PL#LM+H<pnMb$Z66DKbghd_k+X/`YmiAB]6q^'(<'he4o%a.#iNf0$HR(VS-ji)-=Ao
jL]lj>%Mt\s+[k]H)eU!T\4h.aq+=HILYd=jo$*_IOQQC`]SXB5-e0KS@IRu%^05]IqW]Ik_YAoa6ruC
Y(<$kM-6I[1L*'"/tQttcaO_:[u9G(`UiK9oBkQ5G4Bs,nU.rn^0NkY?9I*tSo5Fk[hmB"Ms&j6oBjNg
mGdqbnU,]RI(OX;>s."niq`<-q=`hDG4BslGeUk9^0NkY/tQu7So5Fk[u9G(Ms&j6oBkQ5p>YmknU.rn
^:ed*>s.!sSo69RD\pa/Ms&iQkM*uImGds8iq[cfI(OX;]m.7s4Pqc_D_I]#(XKRKkM(pXgVrdOiq`<-
q0):U]6M''GeRHGh_(L=0:m(ucaShqgPb7L`UiKVq=YKJpWg1->U=C14-o2E#/m7[#'sc)YOFFY7JP-u
HBh2=THn&jh*S3M>YciqUiuA":/o"lc7VuV>3DHA`"?1kPKR:0\%=D7/d(kRGC+T&4Lr,Bn&"It/sNg1
ai9l=m4&a"?9?gR&fphF`\V,&jHQ78);R9XZZfVL0qMQbi1"&`ncr#(3bCmYNU>F?CV<XN13;oSk@41S
%.4jRIj<=6n\P/(&pCsLOM0JhnGUiFn)^>\\a$aM<S]MRN`)ud]A8E%Qc46s'!iY/i*q;L/]?7Z5;FQJ
LZb<"dmJ[6_>/B:^-.=8'_eq=D8:Kj.o3UL);$SmG\BM-gQ9BV\/Mdn*6;(J2K-<:j`c,sj>.6pD-ZlO
f;`1a\-j7EL/sOB4Vcca2Bqa``Y(YL$+l[D7(kNW<-Ln1PTua5ed-X.82ua0l-%aS(MA$,IC3"GeF:i^
*#TY9HrGhsbZBl)`Xff/;Z.D<2EPi9=VtE"G,s:m/<fh3%`L(O9:=Hk*D\jjT&tB/L::FBel[F^Z#0(u
MQtl0FJJ/M<34KpjZRaNdU.kN7GY?VI#ME+IH*HmrV!CN:&eS(q7mE`Y.*Qq,'h&U,C.&@&NZNJG^D%#
m@BA]2r;bKr&@A]AaS'C=d5Pr?%0-N=d7(,2t+fT!nl8=0]bOOSV'l?\T-*bCkk)4EL+)[g)9YE9kAE^
f_o@:F%%N5p?/DkYjpEu[jU2(BVAD'UdB1j8U093l,e72oT7<:RX1<a\*WZk[>EYarEV;@pSg)Qao7Y:
m1%FVp5N35in2CNg=VSEkVX<'Z?GMpfdU+Gb*jVHYTN4id-:,UED)PVQ.o55"((qj:WYa+)XmGjAtcGW
%?A4eOf9A0e+UlN&]jQXLLpb9./n+[h&JS#_U<*.q=,M.OO).[;hMhl_[)o]ZhJTpB#3=X%nV_7pZR3L
Wlkd0+gPu=5ku7*%,p]o@Sj4NGqq,<h4[E>-[1$o1[Lt@AP>%GXU<C7;OkPi?,!!+.WNXJ%!p+1<0Zq_
7nV)Y1u?OdH>Al9rGq9"&j#$ANchp;D^L4?/0NeR%%188c;K-&p-@&acD.qE'RmnI]9fVn+[\;XGtBuC
Q,XA;\,0:^<=4b)e$V%1QLHnYgqutDB9oNQ!,q.nMASbLEdAr3N2=#;^h3RH><DIAn.iAW9XK@!7SS[;
O&E4EH:MB-+WfL)oO>R>@E:T?8($'+Y3@LFGC_Kh,O0.kp1&#qQjbrcNO%XTqU-P^)(\B5Y`R;B?348B
.c]<[;gs_RTlFVDdt*7d=3"D^b-Enu_H^Eu]OK->Yh'js;YD^Qjlj?VkP*Bt\o50TMQC2E'f]]F4'Ptf
nrGSTaN<L42@AQ]?]M&Epe.^bs!VAV?G6#5DuJ5_]D(m.HL#cn=O"H:iqYN[n%Yp4QR8.!g:cT4VTD&e
%E!D,l[-[THu9SrEa/k,f4Y%@<rG69)tmpfos_89q!rUaL!8>2h)\R=hj7aH)l*CPIsYJ12`BLL?A-'5
+"c-sf0A5m;ZQ-uEVA/A+jh6UIEQR,N5P3M^L$IPG=gE7(NW`\47W\IbH$'+^O*&*pW.mf7mR*hfU's?
K`.hF_:h[jjk`4oT-pf8ASdc`2fpean+(iiDin.+1TR(GfQX_=+sXSG;!s44<W)-98'`Z6A&qKeCk[l@
(5[#$>FLJD*Ye9PUXi/@RId:(/S9.pmbo_8aP]0Q"8+TtpMbdDZ1$2Afic#>a`]3'-V$7WKFfNW;V633
2=XFZqoW#b`&Gn<njm3IkQXZb&EQ\S*]`)RYS['?S/_D"(A._H,&bH'VI;o&l_X'o\S8,Q44Sc5_<-l,
[md53DP!2n](+VEn0Zk!g\8T!&"U@9Iq,j?gH.C/Fec1]hs5:ZXj)4/Q[b6Fh-HXKUkg^D_eFa!i]V58
*kkJCnPq8?g>f6HXZ*PEkM?uX;Ob:Pr%;c#-V>2RC\#=heh7OB+@r;TkpP/GHW@I8G1Nhs<4>g\NN@A9
ioX\0$YQrA;I'2JFGs4rRCreKoZ;>3f5kM`^fIdZfdH(][0=V5'k@4=9GPim%[rkq=>L0i6,)C;)s2a,
<_nrM'8=1ZG##F5M1<NOhnCaM(q/;/BCINllnJ>P'`4bM@H7bBf)]^<fV7TjL&8GUEf7J_D<HWl/@heK
J::T-QF>S',Q?sMEPkAYK>ALUEMUM3R^./2c)k,Rh=A]hkI017ctbj<LTQoc=g5MlYW%&ec7o!#ICj3,
=TWN3LL'3]o>Y,44q2:/hQ5SESjM93'PA=T;=OH?Pd"E`]-VO"/>ZZ&Da9Aj>lH(%I:$gNnU&JSiWJFn
^l:&f`kSA12b*02ijsWg`P*u`9.hUr(C&`O%\CVN]tqE92XBU3hW`SmhagF)\Q@&W4\ROiDj^:lJ/'gp
]n!tuXn^XeH6h4Dr<gLT__OhK`4m'Z)Njps@!"F:=^>.79#LI8T3Tj&r_Mqaen_bN;%`os?eQ]apr*_*
=W*u&]@^aF"'<6-mMD<5CF<Th]H=!7H;Zi(g,1I]=)*s4SbcnM$0)oooD0a0=khF0IrU0glAD!h+QaXc
bSJjrp3ZiPV+O6l@_9):L3>[j$hBkiMuCQi&CW8m0u_^p,:1R`Z/$Whj@(J,Om!MtZU5r4;^62;Mdni]
XW_V`DgPVn>M*)EWo&hh"tRFKO[UPA;,)@Zir!eCm_G[_f;:MlD`kI847&Q*>1Q;XVIT;7hiAaZA,(3t
i!/ZUEE=!#QV21OO:LV)r!c<S-4XR/mIi?oYDoU\`mNH-`_G+40P#)/YbueY-Is"\ZP_AW7;<'&$:V_C
__G\N6aYp1VrqudM%^EF!)FAr;)ud3U_W@pY"rXZ$MPbDh?3:cTtOVD<c4mQk1t:\DFH^.@T9jVFVdNn
+h]W5I\'prGX8lu.-0o/i[rcj2:gZpluJ"p6dM#PIB8Lj1%;-LV73TXmOK*2r5+Ef(k7_Cqngcg]$G]@
<PR10cGpME\+<bWEUo"EO0p@T:Dh:a*=mHkPOV\t#/(f-jBHgJ'W%"l$Sa7MO4$R2Sl&A4>14)<bhE"4
V6bGbMUIn$<H720g<Uh"C7#r09mMV4MDX'=(<JsVbDLt1f+80M=:b.5MtNsmm$6egP@iF\@rpKEEA2<K
.Ul:<O+[%G@HOG)IRh#>rR%\>#&'Y+#,7i==$b\PQcd4NRfYq9\fRaZ$SkhkOMY3O:A_jZ(!nCY\*D"j
;r'Tr,F;<uH^%aplh;I4CRi+n7c9lZe.+QRHeG$e,o2!2"a7J'AZQu&8]iF/BFjpL7OD2>k(k@tI,)m?
&Do[H)F@eSj$srLMmb^cBP)bmn>6uD`s<bGn*n72+u^")W85\_<pomAn5nli)&+7HWpms/&WSUYU,f&C
#5Ac'b6FZf]Bth(A]g3ZQ;6$>CcPBH_iW^Q&9OpeP^i8RATr]Y8RAZK;&i.\MQGHi*^P7lUqYf9B`/I\
qZ8&2`u8jM;rLjE<=4aV*i+Z4\od.U6pol:BW7($Ur,NE1[b*JD)#'+91GP5KBnu9,J!g<bt<HTar4PG
9"SP)e4Vp:.G?(tDgW`&KH3L"=1F5#f-pPdc#M5b<3Yg0R?d&rcH`u^%DL7/j%m?]0<c#"Z\j&8A=Z>>
i>j2R^V<J)bmI8.G3]Ran2u^b7T[MIlijF^3R+SfBA72s/?(G/3)h87Q"'1kBs7D/5>A6+\`OTi[<lmg
IecDbl*!]>gnF[RrpflHVtto@Tle->l0J>$j)m_ArI._+p*8=1b\E&As5i"M58+"hg]%MM_N%=~>
endstream
endobj
7 0 obj
11983
endobj
3 0 obj
<<
/Parent null
/Type /Pages
/MediaBox [0.0000 0.0000 311.00 209.00]
/Resources 8 0 R
/Kids [5 0 R]
/Count 1
>>
endobj
9 0 obj
[/PDF /Text /ImageC]
endobj
10 0 obj
<<
/S /Transparency
/CS /DeviceRGB
/I true
/K false
>>
endobj
11 0 obj
<<
/Alpha1
<<
/ca 1.0000
/CA 1.0000
/BM /Normal
/AIS false
>>
>>
endobj
8 0 obj
<<
/ProcSet 9 0 R
/ExtGState 11 0 R
>>
endobj
xref
0 12
0000000000 65535 f
0000000015 00000 n
0000000315 00000 n
0000012726 00000 n
0000000445 00000 n
0000000521 00000 n
0000000609 00000 n
0000012702 00000 n
0000013180 00000 n
0000012896 00000 n
0000012935 00000 n
0000013037 00000 n
trailer
<<
/Size 12
/Root 2 0 R
/Info 1 0 R
>>
startxref
13253
%%EOF

View File

@ -5,7 +5,14 @@ High-level documentation of the [sat-rs project](https://absatsw.irs.uni-stuttga
## Building
If you have not done so, install `mdbook` using `cargo install mdbook --locked`.
If you have not done so, install the pre-requisites first:
```sh
cargo install mdbook --locked
cargo install mdbook-linkcheck --locked
```
After that, you can build the book with:
```sh
mdbook build

View File

@ -11,7 +11,8 @@ time where the OBSW might be running on Linux based systems with hundreds of MBs
A useful pattern used commonly in space systems is to limit heap allocations to program
initialization time and avoid frequent run-time allocations. This prevents issues like
running out of memory (something even Rust can not protect from) or heap fragmentation.
running out of memory (something even Rust can not protect from) or heap fragmentation on systems
without a MMU.
# Using pre-allocated pool structures
@ -19,15 +20,45 @@ A huge candidate for heap allocations is the TMTC and handling. TC, TMs and IPC
candidates where the data size might vary greatly. The regular solution for host systems
might be to send around this data as a `Vec<u8>` until it is dropped. `sat-rs` provides
another solution to avoid run-time allocations by offering pre-allocated static
pools.
pools. These pools are split into subpools where each subpool can have different page sizes.
For example, a very small telecommand (TC) pool might look like this:
These pools are split into subpools where each subpool can have different page sizes.
For example, a very small TC pool might look like this:
![Example Pool](images/pools/static-pools.png)
TODO: Add image
The core of the pool abstractions is the
[PoolProvider trait](https://docs.rs/satrs-core/0.1.0-alpha.3/satrs_core/pool/trait.PoolProvider.html).
This trait specifies the general API a pool structure should have without making assumption
of how the data is stored.
This trait is implemented by a static memory pool implementation.
The code to generate this static pool would look like this:
```rust
use satrs_core::pool::{StaticMemoryPool, StaticPoolConfig};
let tc_pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![
(6, 16),
(4, 32),
(2, 64),
(1, 128)
]));
```
It should be noted that the buckets only show the maximum size of data being stored inside them.
The store will keep a separate structure to track the actual size of the data being stored.
A TC entry inside this pool has a store address which can then be sent around without having
to dynamically allocate memory. The same principle can also be applied to the TM and IPC data.
to dynamically allocate memory. The same principle can also be applied to the telemetry (TM) and
inter-process communication (IPC) data.
You can read
- [`StaticPoolConfig` API](https://docs.rs/satrs-core/0.1.0-alpha.3/satrs_core/pool/struct.StaticPoolConfig.html)
- [`StaticMemoryPool` API](https://docs.rs/satrs-core/0.1.0-alpha.3/satrs_core/pool/struct.StaticMemoryPool.html)
for more details.
In the future, optimized pool structures which use standard containers or are
[`Sync`](https://doc.rust-lang.org/std/marker/trait.Sync.html) by default might be added as well.
# Using special crates to prevent smaller allocations
@ -35,7 +66,7 @@ Another common way to use the heap on host systems is using containers like `Str
to work with data where the size is not known beforehand. The most common solution for embedded
systems is to determine the maximum expected size and then use a pre-allocated `u8` buffer and a
size variable. Alternatively, you can use the following crates for more convenience or a smart
behaviour which at the very least reduce heap allocations:
behaviour which at the very least reduces heap allocations:
1. [`smallvec`](https://docs.rs/smallvec/latest/smallvec/).
2. [`arrayvec`](https://docs.rs/arrayvec/latest/arrayvec/index.html) which also contains an

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

View File

@ -1,6 +1,6 @@
[package]
name = "satrs-core"
version = "0.1.0-alpha.2"
version = "0.1.0-alpha.3"
edition = "2021"
rust-version = "1.61"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]

View File

@ -1,13 +1,21 @@
//! # Pool implementation providing memory pools for packet storage.
//!
//! This module provides generic abstractions for memory pools which provide a storage
//! machanism for variable sized data like Telemetry and Telecommand (TMTC) packets. The core
//! abstraction for this is the [PoolProvider] trait.
//!
//! It also contains the [StaticMemoryPool] as a concrete implementation which can be used to avoid
//! dynamic run-time allocations for the storage of TMTC packets.
//!
//! # Example for the [StaticMemoryPool]
//!
//! ```
//! use satrs_core::pool::{PoolProviderMemInPlace, StaticMemoryPool, StaticPoolConfig};
//! use satrs_core::pool::{PoolProvider, StaticMemoryPool, StaticPoolConfig};
//!
//! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
//! let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
//! let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)], false);
//! let mut local_pool = StaticMemoryPool::new(pool_cfg);
//! let mut read_buf: [u8; 16] = [0; 16];
//! let mut addr;
//! {
//! // Add new data to the pool
@ -20,25 +28,25 @@
//!
//! {
//! // Read the store data back
//! let res = local_pool.read(&addr);
//! let res = local_pool.read(&addr, &mut read_buf);
//! assert!(res.is_ok());
//! let buf_read_back = res.unwrap();
//! assert_eq!(buf_read_back.len(), 4);
//! assert_eq!(buf_read_back[0], 42);
//! let read_bytes = res.unwrap();
//! assert_eq!(read_bytes, 4);
//! assert_eq!(read_buf[0], 42);
//! // Modify the stored data
//! let res = local_pool.modify(&addr);
//! let res = local_pool.modify(&addr, |buf| {
//! buf[0] = 12;
//! });
//! assert!(res.is_ok());
//! let buf_read_back = res.unwrap();
//! buf_read_back[0] = 12;
//! }
//!
//! {
//! // Read the modified data back
//! let res = local_pool.read(&addr);
//! let res = local_pool.read(&addr, &mut read_buf);
//! assert!(res.is_ok());
//! let buf_read_back = res.unwrap();
//! assert_eq!(buf_read_back.len(), 4);
//! assert_eq!(buf_read_back[0], 12);
//! let read_bytes = res.unwrap();
//! assert_eq!(read_bytes, 4);
//! assert_eq!(read_buf[0], 12);
//! }
//!
//! // Delete the stored data
@ -46,21 +54,21 @@
//!
//! // Get a free element in the pool with an appropriate size
//! {
//! let res = local_pool.free_element(12);
//! let res = local_pool.free_element(12, |buf| {
//! buf[0] = 7;
//! });
//! assert!(res.is_ok());
//! let (tmp, mut_buf) = res.unwrap();
//! addr = tmp;
//! mut_buf[0] = 7;
//! addr = res.unwrap();
//! }
//!
//! // Read back the data
//! {
//! // Read the store data back
//! let res = local_pool.read(&addr);
//! let res = local_pool.read(&addr, &mut read_buf);
//! assert!(res.is_ok());
//! let buf_read_back = res.unwrap();
//! assert_eq!(buf_read_back.len(), 12);
//! assert_eq!(buf_read_back[0], 7);
//! let read_bytes = res.unwrap();
//! assert_eq!(read_bytes, 12);
//! assert_eq!(read_buf[0], 7);
//! }
//! ```
#[cfg(feature = "alloc")]
@ -70,6 +78,7 @@ use core::fmt::{Display, Formatter};
use delegate::delegate;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use spacepackets::ByteConversionError;
#[cfg(feature = "std")]
use std::error::Error;
@ -151,6 +160,8 @@ pub enum StoreError {
InvalidStoreId(StoreIdError, Option<StoreAddr>),
/// Valid subpool and packet index, but no data is stored at the given address
DataDoesNotExist(StoreAddr),
ByteConversionError(spacepackets::ByteConversionError),
LockError,
/// Internal or configuration errors
InternalError(u32),
}
@ -173,10 +184,22 @@ impl Display for StoreError {
StoreError::InternalError(e) => {
write!(f, "internal error: {e}")
}
StoreError::ByteConversionError(e) => {
write!(f, "store error: {e}")
}
StoreError::LockError => {
write!(f, "lock error")
}
}
}
}
impl From<ByteConversionError> for StoreError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
#[cfg(feature = "std")]
impl Error for StoreError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
@ -187,41 +210,61 @@ impl Error for StoreError {
}
}
/// Generic trait for pool providers where the data can be modified and read in-place. This
/// generally means that a shared pool structure has to be wrapped inside a lock structure.
pub trait PoolProviderMemInPlace {
/// Generic trait for pool providers which provide memory pools for variable sized data.
///
/// It specifies a basic API to [Self::add], [Self::modify], [Self::read] and [Self::delete] data
/// in the store at its core. The API was designed so internal optimizations can be performed
/// more easily and that is is also possible to make the pool structure [Sync] without the whole
/// pool structure being wrapped inside a lock.
pub trait PoolProvider {
/// Add new data to the pool. The provider should attempt to reserve a memory block with the
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
/// be used to access the data stored in the pool
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>;
/// The provider should attempt to reserve a free memory block with the appropriate size and
/// then return a mutable reference to it. Yields a [StoreAddr] which can be used to access
/// the data stored in the pool
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError>;
/// The provider should attempt to reserve a free memory block with the appropriate size first.
/// It then executes a user-provided closure and passes a mutable reference to that memory
/// block to the closure. This allows the user to write data to the memory block.
/// The function should yield a [StoreAddr] which can be used to access the data stored in the
/// pool.
fn free_element<W: FnMut(&mut [u8])>(
&mut self,
len: usize,
writer: W,
) -> Result<StoreAddr, StoreError>;
/// Modify data added previously using a given [StoreAddr] by yielding a mutable reference
/// to it
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError>;
/// Modify data added previously using a given [StoreAddr]. The provider should use the store
/// address to determine if a memory block exists for that address. If it does, it should
/// call the user-provided closure and pass a mutable reference to the memory block
/// to the closure. This allows the user to modify the memory block.
fn modify<U: FnMut(&mut [u8])>(
&mut self,
addr: &StoreAddr,
updater: U,
) -> Result<(), StoreError>;
/// Read data by yielding a read-only reference given a [StoreAddr]
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError>;
/// The provider should copy the data from the memory block to the user-provided buffer if
/// it exists.
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError>;
/// Delete data inside the pool given a [StoreAddr]
/// Delete data inside the pool given a [StoreAddr].
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
/// Retrieve the length of the data at the given store address.
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
if !self.has_element_at(addr)? {
return Err(StoreError::DataDoesNotExist(*addr));
}
Ok(self.read(addr)?.len())
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError>;
#[cfg(feature = "alloc")]
fn read_as_vec(&self, addr: &StoreAddr) -> Result<alloc::vec::Vec<u8>, StoreError> {
let mut vec = alloc::vec![0; self.len_of_data(addr)?];
self.read(addr, &mut vec)?;
Ok(vec)
}
}
pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
/// This function behaves like [PoolProviderMemInPlace::read], but consumes the provided address
/// Extension trait which adds guarded pool access classes.
pub trait PoolProviderWithGuards: PoolProvider {
/// This function behaves like [PoolProvider::read], but consumes the provided address
/// and returns a RAII conformant guard object.
///
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
@ -231,7 +274,7 @@ pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
/// manual deletion is necessary when returning from a processing function prematurely.
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self>;
/// This function behaves like [PoolProviderMemInPlace::modify], but consumes the provided
/// This function behaves like [PoolProvider::modify], but consumes the provided
/// address and returns a RAII conformant guard object.
///
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
@ -242,15 +285,16 @@ pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self>;
}
pub struct PoolGuard<'a, MemProvider: PoolProviderMemInPlace + ?Sized> {
pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> {
pool: &'a mut MemProvider,
pub addr: StoreAddr,
no_deletion: bool,
deletion_failed_error: Option<StoreError>,
}
/// This helper object
impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
/// This helper object can be used to safely access pool data without worrying about memory
/// leaks.
impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> {
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
Self {
pool,
@ -260,8 +304,13 @@ impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
}
}
pub fn read(&self) -> Result<&[u8], StoreError> {
self.pool.read(&self.addr)
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError> {
self.pool.read(&self.addr, buf)
}
#[cfg(feature = "alloc")]
pub fn read_as_vec(&self) -> Result<alloc::vec::Vec<u8>, StoreError> {
self.pool.read_as_vec(&self.addr)
}
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
@ -271,7 +320,7 @@ impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
}
}
impl<MemProvider: PoolProviderMemInPlace + ?Sized> Drop for PoolGuard<'_, MemProvider> {
impl<MemProvider: PoolProvider + ?Sized> Drop for PoolGuard<'_, MemProvider> {
fn drop(&mut self) {
if !self.no_deletion {
if let Err(e) = self.pool.delete(self.addr) {
@ -281,24 +330,24 @@ impl<MemProvider: PoolProviderMemInPlace + ?Sized> Drop for PoolGuard<'_, MemPro
}
}
pub struct PoolRwGuard<'a, MemProvider: PoolProviderMemInPlace + ?Sized> {
pub struct PoolRwGuard<'a, MemProvider: PoolProvider + ?Sized> {
guard: PoolGuard<'a, MemProvider>,
}
impl<'a, MemProvider: PoolProviderMemInPlace> PoolRwGuard<'a, MemProvider> {
impl<'a, MemProvider: PoolProvider> PoolRwGuard<'a, MemProvider> {
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
Self {
guard: PoolGuard::new(pool, addr),
}
}
pub fn modify(&mut self) -> Result<&mut [u8], StoreError> {
self.guard.pool.modify(&self.guard.addr)
pub fn update<U: FnMut(&mut [u8])>(&mut self, updater: &mut U) -> Result<(), StoreError> {
self.guard.pool.modify(&self.guard.addr, updater)
}
delegate!(
to self.guard {
pub fn read(&self) -> Result<&[u8], StoreError>;
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError>;
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped.
pub fn release(&mut self);
@ -308,13 +357,11 @@ impl<'a, MemProvider: PoolProviderMemInPlace> PoolRwGuard<'a, MemProvider> {
#[cfg(feature = "alloc")]
mod alloc_mod {
use super::{
PoolGuard, PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, PoolRwGuard,
StaticPoolAddr,
};
use super::{PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticPoolAddr};
use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError};
use alloc::vec;
use alloc::vec::Vec;
use spacepackets::ByteConversionError;
#[cfg(feature = "std")]
use std::sync::{Arc, RwLock};
@ -329,16 +376,24 @@ mod alloc_mod {
///
/// # Parameters
///
/// * `cfg`: Vector of tuples which represent a subpool. The first entry in the tuple specifies the
/// number of memory blocks in the subpool, the second entry the size of the blocks
/// * `cfg` - Vector of tuples which represent a subpool. The first entry in the tuple specifies
/// the number of memory blocks in the subpool, the second entry the size of the blocks
/// * `spill_to_higher_subpools` - Specifies whether data will be spilled to higher subpools
/// if the next fitting subpool is full. This is useful to ensure the pool remains useful
/// for all data sizes as long as possible. However, an undesirable side-effect might be
/// the chocking of larger subpools by underdimensioned smaller subpools.
#[derive(Clone)]
pub struct StaticPoolConfig {
cfg: Vec<(NumBlocks, usize)>,
spill_to_higher_subpools: bool,
}
impl StaticPoolConfig {
pub fn new(cfg: Vec<(NumBlocks, usize)>) -> Self {
StaticPoolConfig { cfg }
pub fn new(cfg: Vec<(NumBlocks, usize)>, spill_to_higher_subpools: bool) -> Self {
StaticPoolConfig {
cfg,
spill_to_higher_subpools,
}
}
pub fn cfg(&self) -> &Vec<(NumBlocks, usize)> {
@ -356,16 +411,21 @@ mod alloc_mod {
/// Pool implementation providing sub-pools with fixed size memory blocks.
///
/// This is a simple memory pool implementation which pre-allocates all sub-pools using a given pool
/// configuration. After the pre-allocation, no dynamic memory allocation will be performed
/// during run-time. This makes the implementation suitable for real-time applications and
/// embedded environments. The pool implementation will also track the size of the data stored
/// inside it.
/// This is a simple memory pool implementation which pre-allocates all subpools using a given
/// pool configuration. After the pre-allocation, no dynamic memory allocation will be
/// performed during run-time. This makes the implementation suitable for real-time
/// applications and embedded environments.
///
/// The subpool bucket sizes only denote the maximum possible data size being stored inside
/// them and the pool implementation will still track the size of the data stored inside it.
/// The implementation will generally determine the best fitting subpool for given data to
/// add. Currently, the pool does not support spilling to larger subpools if the closest
/// fitting subpool is full. This might be added in the future.
///
/// Transactions with the [pool][StaticMemoryPool] are done using a generic
/// [address][StoreAddr] type.
/// Adding any data to the pool will yield a store address. Modification and read operations are
/// done using a reference to a store address. Deletion will consume the store address.
/// [address][StoreAddr] type. Adding any data to the pool will yield a store address.
/// Modification and read operations are done using a reference to a store address. Deletion
/// will consume the store address.
pub struct StaticMemoryPool {
pool_cfg: StaticPoolConfig,
pool: Vec<Vec<u8>>,
@ -422,7 +482,17 @@ mod alloc_mod {
}
fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, StoreError> {
let subpool_idx = self.find_subpool(data_len, 0)?;
let mut subpool_idx = self.find_subpool(data_len, 0)?;
if self.pool_cfg.spill_to_higher_subpools {
while let Err(StoreError::StoreFull(_)) = self.find_empty(subpool_idx) {
if (subpool_idx + 1) as usize == self.sizes_lists.len() {
return Err(StoreError::StoreFull(subpool_idx));
}
subpool_idx += 1;
}
}
let (slot, size_slot_ref) = self.find_empty(subpool_idx)?;
*size_slot_ref = data_len;
Ok(StaticPoolAddr {
@ -476,7 +546,7 @@ mod alloc_mod {
}
}
impl PoolProviderMemInPlace for StaticMemoryPool {
impl PoolProvider for StaticMemoryPool {
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> {
let data_len = data.len();
if data_len > POOL_MAX_SIZE {
@ -487,7 +557,11 @@ mod alloc_mod {
Ok(addr.into())
}
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError> {
fn free_element<W: FnMut(&mut [u8])>(
&mut self,
len: usize,
mut writer: W,
) -> Result<StoreAddr, StoreError> {
if len > POOL_MAX_SIZE {
return Err(StoreError::DataTooLarge(len));
}
@ -495,25 +569,40 @@ mod alloc_mod {
let raw_pos = self.raw_pos(&addr).unwrap();
let block =
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len];
Ok((addr.into(), block))
writer(block);
Ok(addr.into())
}
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> {
fn modify<U: FnMut(&mut [u8])>(
&mut self,
addr: &StoreAddr,
mut updater: U,
) -> Result<(), StoreError> {
let addr = StaticPoolAddr::from(*addr);
let curr_size = self.addr_check(&addr)?;
let raw_pos = self.raw_pos(&addr).unwrap();
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
[raw_pos..raw_pos + curr_size];
Ok(block)
updater(block);
Ok(())
}
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError> {
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError> {
let addr = StaticPoolAddr::from(*addr);
let curr_size = self.addr_check(&addr)?;
if buf.len() < curr_size {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: curr_size,
}
.into());
}
let raw_pos = self.raw_pos(&addr).unwrap();
let block =
&self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
Ok(block)
//block.copy_from_slice(&src);
buf[..curr_size].copy_from_slice(block);
Ok(curr_size)
}
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> {
@ -540,9 +629,21 @@ mod alloc_mod {
}
Ok(true)
}
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
let addr = StaticPoolAddr::from(*addr);
self.validate_addr(&addr)?;
let pool_idx = addr.pool_idx as usize;
let size_list = self.sizes_lists.get(pool_idx).unwrap();
let size = size_list[addr.packet_idx as usize];
Ok(match size {
STORE_FREE => 0,
_ => size,
})
}
}
impl PoolProviderMemInPlaceWithGuards for StaticMemoryPool {
impl PoolProviderWithGuards for StaticMemoryPool {
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self> {
PoolRwGuard::new(self, addr)
}
@ -556,30 +657,29 @@ mod alloc_mod {
#[cfg(test)]
mod tests {
use crate::pool::{
PoolGuard, PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, PoolRwGuard,
StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError,
POOL_MAX_SIZE,
PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticMemoryPool,
StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError, POOL_MAX_SIZE,
};
use std::vec;
fn basic_small_pool() -> StaticMemoryPool {
// 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)], false);
StaticMemoryPool::new(pool_cfg)
}
#[test]
fn test_cfg() {
// Values where number of buckets is 0 or size is too large should be removed
let mut pool_cfg = StaticPoolConfig::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)]);
let mut pool_cfg = StaticPoolConfig::new(vec![(0, 0), (1, 0), (2, POOL_MAX_SIZE)], false);
pool_cfg.sanitize();
assert_eq!(*pool_cfg.cfg(), vec![(1, 0)]);
// Entries should be ordered according to bucket size
pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)]);
pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false);
pool_cfg.sanitize();
assert_eq!(*pool_cfg.cfg(), vec![(32, 3), (16, 6), (8, 12)]);
// Unstable sort is used, so order of entries with same block length should not matter
pool_cfg = StaticPoolConfig::new(vec![(12, 12), (14, 16), (10, 12)]);
pool_cfg = StaticPoolConfig::new(vec![(12, 12), (14, 16), (10, 12)], false);
pool_cfg.sanitize();
assert!(
*pool_cfg.cfg() == vec![(12, 12), (10, 12), (14, 16)]
@ -594,13 +694,14 @@ mod tests {
for (i, val) in test_buf.iter_mut().enumerate() {
*val = i as u8;
}
let mut other_buf: [u8; 16] = [0; 16];
let addr = local_pool.add(&test_buf).expect("Adding data failed");
// Read back data and verify correctness
let res = local_pool.read(&addr);
let res = local_pool.read(&addr, &mut other_buf);
assert!(res.is_ok());
let buf_read_back = res.unwrap();
assert_eq!(buf_read_back.len(), 16);
for (i, &val) in buf_read_back.iter().enumerate() {
let read_len = res.unwrap();
assert_eq!(read_len, 16);
for (i, &val) in other_buf.iter().enumerate() {
assert_eq!(val, i as u8);
}
}
@ -610,8 +711,10 @@ mod tests {
let mut local_pool = basic_small_pool();
let test_buf: [u8; 12] = [0; 12];
let addr = local_pool.add(&test_buf).expect("Adding data failed");
let res = local_pool.read(&addr).expect("Read back failed");
assert_eq!(res.len(), 12);
let res = local_pool
.read(&addr, &mut [0; 12])
.expect("Read back failed");
assert_eq!(res, 12);
}
#[test]
@ -622,10 +725,13 @@ mod tests {
// Delete the data
let res = local_pool.delete(addr);
assert!(res.is_ok());
let mut writer = |buf: &mut [u8]| {
assert_eq!(buf.len(), 12);
};
// Verify that the slot is free by trying to get a reference to it
let res = local_pool.free_element(12);
let res = local_pool.free_element(12, &mut writer);
assert!(res.is_ok());
let (addr, buf_ref) = res.unwrap();
let addr = res.unwrap();
assert_eq!(
addr,
u64::from(StaticPoolAddr {
@ -633,7 +739,6 @@ mod tests {
packet_idx: 0
})
);
assert_eq!(buf_ref.len(), 12);
}
#[test]
@ -647,29 +752,34 @@ mod tests {
{
// Verify that the slot is free by trying to get a reference to it
let res = local_pool.modify(&addr).expect("Modifying data failed");
res[0] = 0;
res[1] = 0x42;
local_pool
.modify(&addr, &mut |buf: &mut [u8]| {
buf[0] = 0;
buf[1] = 0x42;
})
.expect("Modifying data failed");
}
let res = local_pool.read(&addr).expect("Reading back data failed");
assert_eq!(res[0], 0);
assert_eq!(res[1], 0x42);
assert_eq!(res[2], 2);
assert_eq!(res[3], 3);
local_pool
.read(&addr, &mut test_buf)
.expect("Reading back data failed");
assert_eq!(test_buf[0], 0);
assert_eq!(test_buf[1], 0x42);
assert_eq!(test_buf[2], 2);
assert_eq!(test_buf[3], 3);
}
#[test]
fn test_consecutive_reservation() {
let mut local_pool = basic_small_pool();
// Reserve two smaller blocks consecutively and verify that the third reservation fails
let res = local_pool.free_element(8);
let res = local_pool.free_element(8, |_| {});
assert!(res.is_ok());
let (addr0, _) = res.unwrap();
let res = local_pool.free_element(8);
let addr0 = res.unwrap();
let res = local_pool.free_element(8, |_| {});
assert!(res.is_ok());
let (addr1, _) = res.unwrap();
let res = local_pool.free_element(8);
let addr1 = res.unwrap();
let res = local_pool.free_element(8, |_| {});
assert!(res.is_err());
let err = res.unwrap_err();
assert_eq!(err, StoreError::StoreFull(1));
@ -689,6 +799,7 @@ mod tests {
pool_idx: 0,
}
.into(),
&mut [],
);
assert!(res.is_err());
assert!(matches!(
@ -720,7 +831,7 @@ mod tests {
packet_idx: 0,
}
.into();
let res = local_pool.read(&addr);
let res = local_pool.read(&addr, &mut []);
assert!(res.is_err());
let err = res.unwrap_err();
assert!(matches!(
@ -737,7 +848,7 @@ mod tests {
packet_idx: 1,
};
assert_eq!(addr.raw(), 0x00020001);
let res = local_pool.read(&addr.into());
let res = local_pool.read(&addr.into(), &mut []);
assert!(res.is_err());
let err = res.unwrap_err();
assert!(matches!(
@ -759,7 +870,7 @@ mod tests {
#[test]
fn test_data_too_large_1() {
let mut local_pool = basic_small_pool();
let res = local_pool.free_element(POOL_MAX_SIZE + 1);
let res = local_pool.free_element(POOL_MAX_SIZE + 1, |_| {});
assert!(res.is_err());
assert_eq!(
res.unwrap_err(),
@ -771,7 +882,7 @@ mod tests {
fn test_free_element_too_large() {
let mut local_pool = basic_small_pool();
// Try to request a slot which is too large
let res = local_pool.free_element(20);
let res = local_pool.free_element(20, |_| {});
assert!(res.is_err());
assert_eq!(res.unwrap_err(), StoreError::DataTooLarge(20));
}
@ -813,7 +924,7 @@ mod tests {
let test_buf: [u8; 16] = [0; 16];
let addr = local_pool.add(&test_buf).expect("Adding data failed");
let mut rw_guard = PoolRwGuard::new(&mut local_pool, addr);
let _ = rw_guard.modify().expect("modify failed");
rw_guard.update(&mut |_| {}).expect("modify failed");
drop(rw_guard);
assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
}
@ -824,7 +935,7 @@ mod tests {
let test_buf: [u8; 16] = [0; 16];
let addr = local_pool.add(&test_buf).expect("Adding data failed");
let mut rw_guard = local_pool.modify_with_guard(addr);
let _ = rw_guard.modify().expect("modify failed");
rw_guard.update(&mut |_| {}).expect("modify failed");
drop(rw_guard);
assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
}
@ -840,13 +951,93 @@ mod tests {
let addr1 = local_pool.add(&test_buf_1).expect("Adding data failed");
let addr2 = local_pool.add(&test_buf_2).expect("Adding data failed");
let addr3 = local_pool.add(&test_buf_3).expect("Adding data failed");
let tm0_raw = local_pool.modify(&addr0).expect("Modifying data failed");
assert_eq!(tm0_raw, test_buf_0);
let tm1_raw = local_pool.modify(&addr1).expect("Modifying data failed");
assert_eq!(tm1_raw, test_buf_1);
let tm2_raw = local_pool.modify(&addr2).expect("Modifying data failed");
assert_eq!(tm2_raw, test_buf_2);
let tm3_raw = local_pool.modify(&addr3).expect("Modifying data failed");
assert_eq!(tm3_raw, test_buf_3);
local_pool
.modify(&addr0, |buf| {
assert_eq!(buf, test_buf_0);
})
.expect("Modifying data failed");
local_pool
.modify(&addr1, |buf| {
assert_eq!(buf, test_buf_1);
})
.expect("Modifying data failed");
local_pool
.modify(&addr2, |buf| {
assert_eq!(buf, test_buf_2);
})
.expect("Modifying data failed");
local_pool
.modify(&addr3, |buf| {
assert_eq!(buf, test_buf_3);
})
.expect("Modifying data failed");
}
#[test]
fn test_spills_to_higher_subpools() {
let pool_cfg = StaticPoolConfig::new(vec![(2, 8), (2, 16)], true);
let mut local_pool = StaticMemoryPool::new(pool_cfg);
local_pool.free_element(8, |_| {}).unwrap();
local_pool.free_element(8, |_| {}).unwrap();
let mut in_larger_subpool_now = local_pool.free_element(8, |_| {});
assert!(in_larger_subpool_now.is_ok());
let generic_addr = in_larger_subpool_now.unwrap();
let pool_addr = StaticPoolAddr::from(generic_addr);
assert_eq!(pool_addr.pool_idx, 1);
assert_eq!(pool_addr.packet_idx, 0);
assert!(local_pool.has_element_at(&generic_addr).unwrap());
in_larger_subpool_now = local_pool.free_element(8, |_| {});
assert!(in_larger_subpool_now.is_ok());
let generic_addr = in_larger_subpool_now.unwrap();
let pool_addr = StaticPoolAddr::from(generic_addr);
assert_eq!(pool_addr.pool_idx, 1);
assert_eq!(pool_addr.packet_idx, 1);
assert!(local_pool.has_element_at(&generic_addr).unwrap());
}
#[test]
fn test_spillage_fails_as_well() {
let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 16)], true);
let mut local_pool = StaticMemoryPool::new(pool_cfg);
local_pool.free_element(8, |_| {}).unwrap();
local_pool.free_element(8, |_| {}).unwrap();
let should_fail = local_pool.free_element(8, |_| {});
assert!(should_fail.is_err());
if let Err(err) = should_fail {
assert_eq!(err, StoreError::StoreFull(1));
} else {
panic!("unexpected store address");
}
}
#[test]
fn test_spillage_works_across_multiple_subpools() {
let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 12), (1, 16)], true);
let mut local_pool = StaticMemoryPool::new(pool_cfg);
local_pool.free_element(8, |_| {}).unwrap();
local_pool.free_element(12, |_| {}).unwrap();
let in_larger_subpool_now = local_pool.free_element(8, |_| {});
assert!(in_larger_subpool_now.is_ok());
let generic_addr = in_larger_subpool_now.unwrap();
let pool_addr = StaticPoolAddr::from(generic_addr);
assert_eq!(pool_addr.pool_idx, 2);
assert_eq!(pool_addr.packet_idx, 0);
assert!(local_pool.has_element_at(&generic_addr).unwrap());
}
#[test]
fn test_spillage_fails_across_multiple_subpools() {
let pool_cfg = StaticPoolConfig::new(vec![(1, 8), (1, 12), (1, 16)], true);
let mut local_pool = StaticMemoryPool::new(pool_cfg);
local_pool.free_element(8, |_| {}).unwrap();
local_pool.free_element(12, |_| {}).unwrap();
local_pool.free_element(16, |_| {}).unwrap();
let should_fail = local_pool.free_element(8, |_| {});
assert!(should_fail.is_err());
if let Err(err) = should_fail {
assert_eq!(err, StoreError::StoreFull(2));
} else {
panic!("unexpected store address");
}
}
}

View File

@ -390,7 +390,7 @@ mod alloc_mod {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod {
use crate::pool::{PoolProviderMemInPlaceWithGuards, SharedStaticMemoryPool, StoreAddr};
use crate::pool::{PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr};
use crate::pus::verification::{
StdVerifReporterWithSender, TcStateAccepted, VerificationToken,
};
@ -789,12 +789,15 @@ pub mod std_mod {
.shared_tc_store
.write()
.map_err(|_| PusPacketHandlingError::EcssTmtc(EcssTmtcError::StoreLock))?;
let tc_guard = tc_pool.read_with_guard(addr);
let tc_raw = tc_guard.read().unwrap();
if tc_raw.len() > self.pus_buf.len() {
return Err(PusPacketHandlingError::PusPacketTooLarge(tc_raw.len()));
let tc_size = tc_pool
.len_of_data(&addr)
.map_err(|e| PusPacketHandlingError::EcssTmtc(EcssTmtcError::Store(e)))?;
if tc_size > self.pus_buf.len() {
return Err(PusPacketHandlingError::PusPacketTooLarge(tc_size));
}
self.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw);
let tc_guard = tc_pool.read_with_guard(addr);
// TODO: Proper error handling.
tc_guard.read(&mut self.pus_buf[0..tc_size]).unwrap();
Ok(())
}
}
@ -947,8 +950,7 @@ pub mod tests {
use spacepackets::CcsdsPacket;
use crate::pool::{
PoolProviderMemInPlace, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig,
StoreAddr,
PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig, StoreAddr,
};
use crate::pus::verification::RequestId;
use crate::tmtc::tm_helper::SharedTmPool;
@ -1015,7 +1017,7 @@ pub mod tests {
///
/// The PUS service handler is instantiated with a [EcssTcInStoreConverter].
pub fn new() -> (Self, PusServiceHelper<EcssTcInSharedStoreConverter>) {
let pool_cfg = StaticPoolConfig::new(vec![(16, 16), (8, 32), (4, 64)]);
let pool_cfg = StaticPoolConfig::new(vec![(16, 16), (8, 32), (4, 64)], false);
let tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let tm_pool = StaticMemoryPool::new(pool_cfg);
let shared_tc_pool = SharedStaticMemoryPool::new(RwLock::new(tc_pool));
@ -1078,8 +1080,8 @@ pub mod tests {
assert!(next_msg.is_ok());
let tm_addr = next_msg.unwrap();
let tm_pool = self.tm_pool.0.read().unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
self.tm_buf[0..tm_raw.len()].copy_from_slice(tm_raw);
let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap();
self.tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw);
PusTmReader::new(&self.tm_buf, 7).unwrap().0
}
@ -1096,8 +1098,8 @@ pub mod tests {
assert!(next_msg.is_ok());
let tm_addr = next_msg.unwrap();
let tm_pool = self.tm_pool.0.read().unwrap();
let tm_raw = tm_pool.read(&tm_addr).unwrap();
let tm = PusTmReader::new(tm_raw, 7).unwrap().0;
let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap();
let tm = PusTmReader::new(&tm_raw, 7).unwrap().0;
assert_eq!(PusPacket::service(&tm), 1);
assert_eq!(PusPacket::subservice(&tm), subservice);
assert_eq!(tm.apid(), TEST_APID);

View File

@ -16,7 +16,7 @@ use spacepackets::{ByteConversionError, CcsdsPacket};
#[cfg(feature = "std")]
use std::error::Error;
use crate::pool::{PoolProviderMemInPlace, StoreError};
use crate::pool::{PoolProvider, StoreError};
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
@ -238,13 +238,11 @@ impl Error for ScheduleError {
}
}
pub trait PusSchedulerInterface {
/// Generic trait for scheduler objects which are able to schedule ECSS PUS C packets.
pub trait PusSchedulerProvider {
type TimeProvider: CcsdsTimeProvider + TimeReader;
fn reset(
&mut self,
store: &mut (impl PoolProviderMemInPlace + ?Sized),
) -> Result<(), StoreError>;
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError>;
fn is_enabled(&self) -> bool;
@ -267,7 +265,7 @@ pub trait PusSchedulerInterface {
fn insert_wrapped_tc<TimeProvider>(
&mut self,
pus_tc: &(impl IsPusTelecommand + PusPacket + GenericPusTcSecondaryHeader),
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
if PusPacket::service(pus_tc) != 11 {
return Err(ScheduleError::WrongService(PusPacket::service(pus_tc)));
@ -293,7 +291,7 @@ pub trait PusSchedulerInterface {
&mut self,
time_stamp: UnixTimestamp,
tc: &[u8],
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
@ -343,7 +341,7 @@ pub fn generate_insert_telecommand_app_data(
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use super::*;
use crate::pool::{PoolProviderMemInPlace, StoreAddr, StoreError};
use crate::pool::{PoolProvider, StoreAddr, StoreError};
use alloc::collections::btree_map::{Entry, Range};
use alloc::collections::BTreeMap;
use alloc::vec;
@ -379,7 +377,7 @@ pub mod alloc_mod {
/// This is the core data structure for scheduling PUS telecommands with [alloc] support.
///
/// It is assumed that the actual telecommand data is stored in a separate TC pool offering
/// a [crate::pool::PoolProviderMemInPlace] API. This data structure just tracks the store
/// a [crate::pool::PoolProvider] API. This data structure just tracks the store
/// addresses and their release times and offers a convenient API to insert and release
/// telecommands and perform other functionality specified by the ECSS standard in section 6.11.
/// The time is tracked as a [spacepackets::time::UnixTimestamp] but the only requirement to
@ -413,6 +411,8 @@ pub mod alloc_mod {
/// * `time_margin` - This time margin is used when inserting new telecommands into the
/// schedule. If the release time of a new telecommand is earlier than the time margin
/// added to the current time, it will not be inserted into the schedule.
/// * `tc_buf_size` - Buffer for temporary storage of telecommand packets. This buffer
/// should be large enough to accomodate the largest expected TC packets.
pub fn new(init_current_time: UnixTimestamp, time_margin: Duration) -> Self {
PusScheduler {
tc_map: Default::default(),
@ -476,7 +476,7 @@ pub mod alloc_mod {
&mut self,
time_stamp: UnixTimestamp,
tc: &[u8],
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
@ -499,7 +499,7 @@ pub mod alloc_mod {
pub fn insert_wrapped_tc_cds_short(
&mut self,
pus_tc: &PusTc,
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
self.insert_wrapped_tc::<cds::TimeProvider>(pus_tc, pool)
}
@ -509,7 +509,7 @@ pub mod alloc_mod {
pub fn insert_wrapped_tc_cds_long(
&mut self,
pus_tc: &PusTc,
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
self.insert_wrapped_tc::<cds::TimeProvider<DaysLen24Bits>>(pus_tc, pool)
}
@ -525,7 +525,7 @@ pub mod alloc_mod {
pub fn delete_by_time_filter<TimeProvider: CcsdsTimeProvider + Clone>(
&mut self,
time_window: TimeWindow<TimeProvider>,
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<u64, (u64, StoreError)> {
let range = self.retrieve_by_time_filter(time_window);
let mut del_packets = 0;
@ -555,7 +555,7 @@ pub mod alloc_mod {
/// the last deletion will be supplied in addition to the number of deleted commands.
pub fn delete_all(
&mut self,
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<u64, (u64, StoreError)> {
self.delete_by_time_filter(TimeWindow::<cds::TimeProvider>::new_select_all(), pool)
}
@ -613,7 +613,7 @@ pub mod alloc_mod {
pub fn delete_by_request_id_and_from_pool(
&mut self,
req_id: &RequestId,
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<bool, StoreError> {
if let DeletionResult::WithStoreDeletion(v) =
self.delete_by_request_id_internal_with_store_deletion(req_id, pool)
@ -645,7 +645,7 @@ pub mod alloc_mod {
fn delete_by_request_id_internal_with_store_deletion(
&mut self,
req_id: &RequestId,
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
pool: &mut (impl PoolProvider + ?Sized),
) -> DeletionResult {
let mut idx_found = None;
for time_bucket in &mut self.tc_map {
@ -675,7 +675,8 @@ pub mod alloc_mod {
/// Utility method which calls [Self::telecommands_to_release] and then calls a releaser
/// closure for each telecommand which should be released. This function will also delete
/// the telecommands from the holding store after calling the release closure if the user
/// returns [true] from the release closure.
/// returns [true] from the release closure. A buffer must be provided to hold the
/// telecommands for the release process.
///
/// # Arguments
///
@ -685,18 +686,55 @@ pub mod alloc_mod {
/// note that returning false might lead to memory leaks if the TC is not cleared from
/// the store in some other way.
/// * `tc_store` - The holding store of the telecommands.
/// * `tc_buf` - Buffer to hold each telecommand being released.
pub fn release_telecommands_with_buffer<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
&mut self,
releaser: R,
tc_store: &mut (impl PoolProvider + ?Sized),
tc_buf: &mut [u8],
) -> Result<u64, (u64, StoreError)> {
self.release_telecommands_internal(releaser, tc_store, Some(tc_buf))
}
/// This functions is almost identical to [Self::release_telecommands_with_buffer] but does
/// not require a user provided TC buffer because it will always use the
/// [PoolProvider::read_as_vec] API to read the TC packets.
///
/// However, this might also perform frequent allocations for all telecommands being
/// released.
pub fn release_telecommands<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
&mut self,
releaser: R,
tc_store: &mut (impl PoolProvider + ?Sized),
) -> Result<u64, (u64, StoreError)> {
self.release_telecommands_internal(releaser, tc_store, None)
}
fn release_telecommands_internal<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
&mut self,
mut releaser: R,
tc_store: &mut (impl PoolProviderMemInPlace + ?Sized),
tc_store: &mut (impl PoolProvider + ?Sized),
mut tc_buf: Option<&mut [u8]>,
) -> Result<u64, (u64, StoreError)> {
let tcs_to_release = self.telecommands_to_release();
let mut released_tcs = 0;
let mut store_error = Ok(());
for tc in tcs_to_release {
for info in tc.1 {
let tc = tc_store.read(&info.addr).map_err(|e| (released_tcs, e))?;
let should_delete = releaser(self.enabled, info, tc);
let should_delete = match tc_buf.as_mut() {
Some(buf) => {
tc_store
.read(&info.addr, buf)
.map_err(|e| (released_tcs, e))?;
releaser(self.enabled, info, buf)
}
None => {
let tc = tc_store
.read_as_vec(&info.addr)
.map_err(|e| (released_tcs, e))?;
releaser(self.enabled, info, &tc)
}
};
released_tcs += 1;
if should_delete {
let res = tc_store.delete(info.addr);
@ -721,16 +759,17 @@ pub mod alloc_mod {
pub fn release_telecommands_no_deletion<R: FnMut(bool, &TcInfo, &[u8])>(
&mut self,
mut releaser: R,
tc_store: &(impl PoolProviderMemInPlace + ?Sized),
tc_store: &(impl PoolProvider + ?Sized),
tc_buf: &mut [u8],
) -> Result<Vec<TcInfo>, (Vec<TcInfo>, StoreError)> {
let tcs_to_release = self.telecommands_to_release();
let mut released_tcs = Vec::new();
for tc in tcs_to_release {
for info in tc.1 {
let tc = tc_store
.read(&info.addr)
tc_store
.read(&info.addr, tc_buf)
.map_err(|e| (released_tcs.clone(), e))?;
releaser(self.is_enabled(), info, tc);
releaser(self.is_enabled(), info, tc_buf);
released_tcs.push(*info);
}
}
@ -744,7 +783,7 @@ pub mod alloc_mod {
}
}
impl PusSchedulerInterface for PusScheduler {
impl PusSchedulerProvider for PusScheduler {
type TimeProvider = cds::TimeProvider;
/// This will disable the scheduler and clear the schedule as specified in 6.11.4.4.
@ -753,10 +792,7 @@ pub mod alloc_mod {
/// The holding store for the telecommands needs to be passed so all the stored telecommands
/// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error
/// will be returned but the method will still try to delete all the commands in the schedule.
fn reset(
&mut self,
store: &mut (impl PoolProviderMemInPlace + ?Sized),
) -> Result<(), StoreError> {
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError> {
self.enabled = false;
let mut deletion_ok = Ok(());
for tc_lists in &mut self.tc_map {
@ -814,8 +850,7 @@ pub mod alloc_mod {
mod tests {
use super::*;
use crate::pool::{
PoolProviderMemInPlace, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreAddr,
StoreError,
PoolProvider, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreAddr, StoreError,
};
use alloc::collections::btree_map::Range;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
@ -911,7 +946,7 @@ mod tests {
#[test]
fn test_reset() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
@ -1063,7 +1098,7 @@ mod tests {
}
#[test]
fn test_release_telecommands() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
@ -1088,8 +1123,9 @@ mod tests {
// test 1: too early, no tcs
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
let mut tc_buf: [u8; 128] = [0; 128];
scheduler
.release_telecommands(&mut test_closure_1, &mut pool)
.release_telecommands_with_buffer(&mut test_closure_1, &mut pool, &mut tc_buf)
.expect("deletion failed");
// test 2: exact time stamp of tc, releases 1 tc
@ -1111,7 +1147,7 @@ mod tests {
scheduler.update_time(UnixTimestamp::new_only_seconds(206));
released = scheduler
.release_telecommands(&mut test_closure_2, &mut pool)
.release_telecommands_with_buffer(&mut test_closure_2, &mut pool, &mut tc_buf)
.expect("deletion failed");
assert_eq!(released, 1);
// TC is deleted.
@ -1128,7 +1164,7 @@ mod tests {
#[test]
fn release_multi_with_same_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
@ -1157,9 +1193,10 @@ mod tests {
// test 1: too early, no tcs
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
let mut tc_buf: [u8; 128] = [0; 128];
let mut released = scheduler
.release_telecommands(&mut test_closure, &mut pool)
.release_telecommands_with_buffer(&mut test_closure, &mut pool, &mut tc_buf)
.expect("deletion failed");
assert_eq!(released, 0);
@ -1185,7 +1222,7 @@ mod tests {
#[test]
fn release_with_scheduler_disabled() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
@ -1209,11 +1246,13 @@ mod tests {
true
};
let mut tc_buf: [u8; 128] = [0; 128];
// test 1: too early, no tcs
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
scheduler
.release_telecommands(&mut test_closure_1, &mut pool)
.release_telecommands_with_buffer(&mut test_closure_1, &mut pool, &mut tc_buf)
.expect("deletion failed");
// test 2: exact time stamp of tc, releases 1 tc
@ -1253,7 +1292,7 @@ mod tests {
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
@ -1267,8 +1306,9 @@ mod tests {
assert!(pool.has_element_at(&tc_info_0.addr()).unwrap());
let data = pool.read(&tc_info_0.addr()).unwrap();
let check_tc = PusTcReader::new(data).expect("incorrect Pus tc raw data");
let mut read_buf: [u8; 64] = [0; 64];
pool.read(&tc_info_0.addr(), &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@ -1289,8 +1329,9 @@ mod tests {
.release_telecommands(&mut test_closure, &mut pool)
.unwrap();
let data = pool.read(&addr_vec[0]).unwrap();
let check_tc = PusTcReader::new(data).expect("incorrect Pus tc raw data");
let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
}
@ -1299,7 +1340,7 @@ mod tests {
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut buf: [u8; 32] = [0; 32];
let tc = scheduled_tc(UnixTimestamp::new_only_seconds(100), &mut buf);
@ -1313,8 +1354,9 @@ mod tests {
assert!(pool.has_element_at(&info.addr).unwrap());
let data = pool.read(&info.addr).unwrap();
let check_tc = PusTcReader::new(data).expect("incorrect Pus tc raw data");
let read_len = pool.read(&info.addr, &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@ -1331,12 +1373,15 @@ mod tests {
false
};
let mut tc_buf: [u8; 64] = [0; 64];
scheduler
.release_telecommands(&mut test_closure, &mut pool)
.release_telecommands_with_buffer(&mut test_closure, &mut pool, &mut tc_buf)
.unwrap();
let data = pool.read(&addr_vec[0]).unwrap();
let check_tc = PusTcReader::new(data).expect("incorrect PUS tc raw data");
let read_len = pool.read(&addr_vec[0], &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
}
@ -1345,7 +1390,7 @@ mod tests {
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut buf: [u8; 32] = [0; 32];
let tc = wrong_tc_service(UnixTimestamp::new_only_seconds(100), &mut buf);
@ -1368,7 +1413,7 @@ mod tests {
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut buf: [u8; 32] = [0; 32];
let tc = wrong_tc_subservice(UnixTimestamp::new_only_seconds(100), &mut buf);
@ -1390,7 +1435,7 @@ mod tests {
fn insert_wrapped_tc_faulty_app_data() {
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let tc = invalid_time_tagged_cmd();
let insert_res = scheduler.insert_wrapped_tc::<cds::TimeProvider>(&tc, &mut pool);
assert!(insert_res.is_err());
@ -1405,7 +1450,7 @@ mod tests {
fn insert_doubly_wrapped_time_tagged_cmd() {
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut buf: [u8; 64] = [0; 64];
let tc = double_wrapped_time_tagged_tc(UnixTimestamp::new_only_seconds(50), &mut buf);
let insert_res = scheduler.insert_wrapped_tc::<cds::TimeProvider>(&tc, &mut pool);
@ -1441,7 +1486,7 @@ mod tests {
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut buf: [u8; 32] = [0; 32];
@ -1465,7 +1510,7 @@ mod tests {
#[test]
fn test_store_error_propagation_release() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
@ -1500,7 +1545,7 @@ mod tests {
#[test]
fn test_store_error_propagation_reset() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
@ -1524,7 +1569,7 @@ mod tests {
#[test]
fn test_delete_by_req_id_simple_retrieve_addr() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
@ -1543,7 +1588,7 @@ mod tests {
#[test]
fn test_delete_by_req_id_simple_delete_all() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
@ -1562,7 +1607,7 @@ mod tests {
#[test]
fn test_delete_by_req_id_complex() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
@ -1609,7 +1654,7 @@ mod tests {
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(1, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(1, 64)], false));
let mut buf: [u8; 32] = [0; 32];
// Store is full after this.
@ -1648,7 +1693,7 @@ mod tests {
#[test]
fn test_time_window_retrieval_select_all() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@ -1679,7 +1724,7 @@ mod tests {
#[test]
fn test_time_window_retrieval_select_from_stamp() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@ -1710,7 +1755,7 @@ mod tests {
#[test]
fn test_time_window_retrieval_select_to_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@ -1741,7 +1786,7 @@ mod tests {
#[test]
fn test_time_window_retrieval_select_from_time_to_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@ -1776,7 +1821,7 @@ mod tests {
#[test]
fn test_deletion_all() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@ -1803,7 +1848,7 @@ mod tests {
#[test]
fn test_deletion_from_start_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@ -1824,7 +1869,7 @@ mod tests {
#[test]
fn test_deletion_to_end_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@ -1846,7 +1891,7 @@ mod tests {
#[test]
fn test_deletion_from_start_time_to_end_time() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
let cmd_out_of_range_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
@ -1875,7 +1920,7 @@ mod tests {
#[test]
fn test_release_without_deletion() {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)]));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler =
PusScheduler::new(UnixTimestamp::new_only_seconds(0), Duration::from_secs(5));
@ -1903,8 +1948,9 @@ mod tests {
scheduler.update_time(UnixTimestamp::new_only_seconds(205));
let mut tc_buf: [u8; 64] = [0; 64];
let tc_info_vec = scheduler
.release_telecommands_no_deletion(&mut test_closure_1, &pool)
.release_telecommands_no_deletion(&mut test_closure_1, &pool, &mut tc_buf)
.expect("deletion failed");
assert_eq!(tc_info_vec[0], tc_info_0);
assert_eq!(tc_info_vec[1], tc_info_1);

View File

@ -1,6 +1,6 @@
use super::scheduler::PusSchedulerInterface;
use super::scheduler::PusSchedulerProvider;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
use crate::pool::PoolProviderMemInPlace;
use crate::pool::PoolProvider;
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
use alloc::string::ToString;
use spacepackets::ecss::{scheduling, PusPacket};
@ -16,13 +16,13 @@ use spacepackets::time::cds::TimeProvider;
/// telecommands when applicable.
pub struct PusService11SchedHandler<
TcInMemConverter: EcssTcInMemConverter,
Scheduler: PusSchedulerInterface,
PusScheduler: PusSchedulerProvider,
> {
pub service_helper: PusServiceHelper<TcInMemConverter>,
scheduler: Scheduler,
scheduler: PusScheduler,
}
impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerProvider>
PusService11SchedHandler<TcInMemConverter, Scheduler>
{
pub fn new(service_helper: PusServiceHelper<TcInMemConverter>, scheduler: Scheduler) -> Self {
@ -42,7 +42,7 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
pub fn handle_one_tc(
&mut self,
sched_tc_pool: &mut (impl PoolProviderMemInPlace + ?Sized),
sched_tc_pool: &mut (impl PoolProvider + ?Sized),
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
@ -173,7 +173,7 @@ mod tests {
use crate::pool::{StaticMemoryPool, StaticPoolConfig};
use crate::pus::tests::TEST_APID;
use crate::pus::{
scheduler::{self, PusSchedulerInterface, TcInfo},
scheduler::{self, PusSchedulerProvider, TcInfo},
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness},
verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter,
@ -201,7 +201,7 @@ mod tests {
impl Pus11HandlerWithStoreTester {
pub fn new() -> Self {
let test_scheduler = TestScheduler::default();
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)]);
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false);
let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
Self {
@ -232,12 +232,12 @@ mod tests {
inserted_tcs: VecDeque<TcInfo>,
}
impl PusSchedulerInterface for TestScheduler {
impl PusSchedulerProvider for TestScheduler {
type TimeProvider = cds::TimeProvider;
fn reset(
&mut self,
_store: &mut (impl crate::pool::PoolProviderMemInPlace + ?Sized),
_store: &mut (impl crate::pool::PoolProvider + ?Sized),
) -> Result<(), crate::pool::StoreError> {
self.reset_count += 1;
Ok(())

View File

@ -15,7 +15,7 @@
//! ```
//! use std::sync::{Arc, mpsc, RwLock};
//! use std::time::Duration;
//! use satrs_core::pool::{PoolProviderMemInPlaceWithGuards, StaticMemoryPool, StaticPoolConfig};
//! use satrs_core::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
//! use satrs_core::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender};
//! use satrs_core::seq_count::SeqCountProviderSimple;
//! use satrs_core::pus::MpscTmInSharedPoolSender;
@ -28,7 +28,7 @@
//! const EMPTY_STAMP: [u8; 7] = [0; 7];
//! const TEST_APID: u16 = 0x02;
//!
//! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
//! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
//! let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
//! let shared_tm_store = SharedTmPool::new(tm_pool);
//! let tm_store = shared_tm_store.clone_backing_pool();
@ -56,9 +56,7 @@
//! {
//! let mut rg = tm_store.write().expect("Error locking shared pool");
//! let store_guard = rg.read_with_guard(addr);
//! let slice = store_guard.read().expect("Error reading TM slice");
//! tm_len = slice.len();
//! tm_buf[0..tm_len].copy_from_slice(slice);
//! tm_len = store_guard.read(&mut tm_buf).expect("Error reading TM slice");
//! }
//! let (pus_tm, _) = PusTmReader::new(&tm_buf[0..tm_len], 7)
//! .expect("Error reading verification TM");
@ -1325,7 +1323,7 @@ mod std_mod {
#[cfg(test)]
mod tests {
use crate::pool::{PoolProviderMemInPlaceWithGuards, StaticMemoryPool, StaticPoolConfig};
use crate::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
use crate::pus::tests::CommonTmInfo;
use crate::pus::verification::{
EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone,
@ -1486,7 +1484,7 @@ mod tests {
#[test]
fn test_mpsc_verif_send_sync() {
let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)]));
let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)], false));
let shared_tm_store = SharedTmPool::new(pool);
let (tx, _) = mpsc::channel();
let mpsc_verif_sender =
@ -1547,7 +1545,7 @@ mod tests {
let mut sender = TestSender::default();
let fail_code = EcssEnumU16::new(2);
let fail_params = FailParams::new(Some(stamp_buf.as_slice()), &fail_code, None);
b.vr.acceptance_failure(tok, &mut sender, fail_params)
b.vr.acceptance_failure(tok, &sender, fail_params)
.expect("Sending acceptance success failed");
acceptance_fail_check(&mut sender, tok.req_id, stamp_buf);
}
@ -1682,12 +1680,10 @@ mod tests {
);
let accepted_token =
b.vr.acceptance_success(tok, &mut sender, Some(&EMPTY_STAMP))
b.vr.acceptance_success(tok, &sender, Some(&EMPTY_STAMP))
.expect("Sending acceptance success failed");
let empty =
b.vr.start_failure(accepted_token, &mut sender, fail_params)
.expect("Start failure failure");
assert_eq!(empty, ());
b.vr.start_failure(accepted_token, &mut sender, fail_params)
.expect("Start failure failure");
start_fail_check(&mut sender, tok.req_id, fail_data_raw);
}
@ -1779,23 +1775,23 @@ mod tests {
let mut sender = TestSender::default();
let accepted_token = b
.rep()
.acceptance_success(tok, &mut sender, Some(&EMPTY_STAMP))
.acceptance_success(tok, &sender, Some(&EMPTY_STAMP))
.expect("Sending acceptance success failed");
let started_token = b
.rep()
.start_success(accepted_token, &mut sender, Some(&[0, 1, 0, 1, 0, 1, 0]))
.start_success(accepted_token, &sender, Some(&[0, 1, 0, 1, 0, 1, 0]))
.expect("Sending start success failed");
b.rep()
.step_success(
&started_token,
&mut sender,
&sender,
Some(&EMPTY_STAMP),
EcssEnumU8::new(0),
)
.expect("Sending step 0 success failed");
b.vr.step_success(
&started_token,
&mut sender,
&sender,
Some(&EMPTY_STAMP),
EcssEnumU8::new(1),
)
@ -2139,9 +2135,9 @@ mod tests {
}
#[test]
// TODO: maybe a bit more extensive testing, all I have time for right now
fn test_seq_count_increment() {
let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
let pool_cfg =
StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
let tm_pool = StaticMemoryPool::new(pool_cfg.clone());
let shared_tm_store = SharedTmPool::new(tm_pool);
let shared_tm_pool = shared_tm_store.clone_backing_pool();
@ -2176,9 +2172,9 @@ mod tests {
{
let mut rg = shared_tm_pool.write().expect("Error locking shared pool");
let store_guard = rg.read_with_guard(addr);
let slice = store_guard.read().expect("Error reading TM slice");
tm_len = slice.len();
tm_buf[0..tm_len].copy_from_slice(slice);
tm_len = store_guard
.read(&mut tm_buf)
.expect("Error reading TM slice");
}
let (pus_tm, _) =
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");

View File

@ -8,9 +8,7 @@ pub use std_mod::*;
#[cfg(feature = "std")]
pub mod std_mod {
use crate::pool::{
PoolProviderMemInPlace, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr,
};
use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr};
use crate::pus::EcssTmtcError;
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::WritablePusPacket;
@ -37,10 +35,11 @@ pub mod std_mod {
pub fn add_pus_tm(&self, pus_tm: &PusTmCreator) -> Result<StoreAddr, EcssTmtcError> {
let mut pg = self.0.write().map_err(|_| EcssTmtcError::StoreLock)?;
let (addr, buf) = pg.free_element(pus_tm.len_written())?;
pus_tm
.write_to_bytes(buf)
.expect("writing PUS TM to store failed");
let addr = pg.free_element(pus_tm.len_written(), |buf| {
pus_tm
.write_to_bytes(buf)
.expect("writing PUS TM to store failed");
})?;
Ok(addr)
}
}

View File

@ -1,6 +1,4 @@
use satrs_core::pool::{
PoolGuard, PoolProviderMemInPlace, StaticMemoryPool, StaticPoolConfig, StoreAddr,
};
use satrs_core::pool::{PoolGuard, PoolProvider, StaticMemoryPool, StaticPoolConfig, StoreAddr};
use std::ops::DerefMut;
use std::sync::mpsc;
use std::sync::mpsc::{Receiver, Sender};
@ -11,7 +9,7 @@ const DUMMY_DATA: [u8; 4] = [0, 1, 2, 3];
#[test]
fn threaded_usage() {
let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)]);
let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false);
let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_clone = shared_pool.clone();
let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel();
@ -27,8 +25,10 @@ fn threaded_usage() {
addr = rx.recv().expect("Receiving store address failed");
let mut pool_access = shared_clone.write().unwrap();
let pg = PoolGuard::new(pool_access.deref_mut(), addr);
let read_res = pg.read().expect("Reading failed");
assert_eq!(read_res, DUMMY_DATA);
let mut read_buf: [u8; 4] = [0; 4];
let read_bytes = pg.read(&mut read_buf).expect("Reading failed");
assert_eq!(read_buf, DUMMY_DATA);
assert_eq!(read_bytes, 4);
}
let pool_access = shared_clone.read().unwrap();
assert!(!pool_access.has_element_at(&addr).expect("Invalid address"));

View File

@ -2,8 +2,7 @@
pub mod crossbeam_test {
use hashbrown::HashMap;
use satrs_core::pool::{
PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, StaticMemoryPool,
StaticPoolConfig,
PoolProvider, PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig,
};
use satrs_core::pus::verification::{
FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender,
@ -36,7 +35,8 @@ pub mod crossbeam_test {
// each reporter have an own sequence count provider.
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
// Shared pool object to store the verification PUS telemetry
let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)]);
let pool_cfg =
StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false);
let shared_tm_pool = SharedTmPool::new(StaticMemoryPool::new(pool_cfg.clone()));
let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_tc_pool_1 = shared_tc_pool_0.clone();
@ -59,15 +59,21 @@ pub mod crossbeam_test {
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
let pus_tc_0 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true);
req_id_0 = RequestId::new(&pus_tc_0);
let (addr, buf) = tc_guard.free_element(pus_tc_0.len_written()).unwrap();
pus_tc_0.write_to_bytes(buf).unwrap();
let addr = tc_guard
.free_element(pus_tc_0.len_written(), |buf| {
pus_tc_0.write_to_bytes(buf).unwrap();
})
.unwrap();
tx_tc_0.send(addr).unwrap();
let mut sph = SpHeader::tc_unseg(TEST_APID, 1, 0).unwrap();
let tc_header = PusTcSecondaryHeader::new_simple(5, 1);
let pus_tc_1 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true);
req_id_1 = RequestId::new(&pus_tc_1);
let (addr, buf) = tc_guard.free_element(pus_tc_0.len_written()).unwrap();
pus_tc_1.write_to_bytes(buf).unwrap();
let addr = tc_guard
.free_element(pus_tc_0.len_written(), |buf| {
pus_tc_1.write_to_bytes(buf).unwrap();
})
.unwrap();
tx_tc_1.send(addr).unwrap();
}
let verif_sender_0 = thread::spawn(move || {
@ -79,9 +85,7 @@ pub mod crossbeam_test {
{
let mut tc_guard = shared_tc_pool_0.write().unwrap();
let pg = tc_guard.read_with_guard(tc_addr);
let buf = pg.read().unwrap();
tc_len = buf.len();
tc_buf[0..tc_len].copy_from_slice(buf);
tc_len = pg.read(&mut tc_buf).unwrap();
}
let (_tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
@ -117,9 +121,7 @@ pub mod crossbeam_test {
{
let mut tc_guard = shared_tc_pool_1.write().unwrap();
let pg = tc_guard.read_with_guard(tc_addr);
let buf = pg.read().unwrap();
tc_len = buf.len();
tc_buf[0..tc_len].copy_from_slice(buf);
tc_len = pg.read(&mut tc_buf).unwrap();
}
let (tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
let token = reporter_with_sender_1.add_tc(&tc);
@ -149,9 +151,9 @@ pub mod crossbeam_test {
{
let mut rg = shared_tm_store.write().expect("Error locking shared pool");
let store_guard = rg.read_with_guard(verif_addr);
let slice = store_guard.read().expect("Error reading TM slice");
tm_len = slice.len();
tm_buf[0..tm_len].copy_from_slice(slice);
tm_len = store_guard
.read(&mut tm_buf)
.expect("Error reading TM slice");
}
let (pus_tm, _) =
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");

View File

@ -103,34 +103,43 @@ pub mod pool {
use super::*;
pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) {
(
StaticMemoryPool::new(StaticPoolConfig::new(vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
])),
StaticMemoryPool::new(StaticPoolConfig::new(vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
])),
StaticMemoryPool::new(StaticPoolConfig::new(
vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
],
true,
)),
StaticMemoryPool::new(StaticPoolConfig::new(
vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
],
true,
)),
)
}
pub fn create_sched_tc_pool() -> StaticMemoryPool {
StaticMemoryPool::new(StaticPoolConfig::new(vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
]))
StaticMemoryPool::new(StaticPoolConfig::new(
vec![
(30, 32),
(15, 64),
(15, 128),
(15, 256),
(15, 1024),
(15, 2048),
],
true,
))
}
}

View File

@ -2,7 +2,7 @@ use std::sync::mpsc;
use std::time::Duration;
use log::{error, info, warn};
use satrs_core::pool::{PoolProviderMemInPlace, StaticMemoryPool, StoreAddr};
use satrs_core::pool::{PoolProvider, StaticMemoryPool, StoreAddr};
use satrs_core::pus::scheduler::{PusScheduler, TcInfo};
use satrs_core::pus::scheduler_srv::PusService11SchedHandler;
use satrs_core::pus::verification::VerificationReporterWithSender;
@ -54,6 +54,7 @@ impl TcReleaser for mpsc::Sender<Vec<u8>> {
pub struct Pus11Wrapper<TcInMemConverter: EcssTcInMemConverter> {
pub pus_11_handler: PusService11SchedHandler<TcInMemConverter, PusScheduler>,
pub sched_tc_pool: StaticMemoryPool,
pub releaser_buf: [u8; 4096],
pub tc_releaser: Box<dyn TcReleaser + Send>,
}
@ -70,7 +71,11 @@ impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> {
let released_tcs = self
.pus_11_handler
.scheduler_mut()
.release_telecommands(releaser, &mut self.sched_tc_pool)
.release_telecommands_with_buffer(
releaser,
&mut self.sched_tc_pool,
&mut self.releaser_buf,
)
.expect("releasing TCs failed");
if released_tcs > 0 {
info!("{released_tcs} TC(s) released from scheduler");
@ -136,6 +141,7 @@ pub fn create_scheduler_service_static(
Pus11Wrapper {
pus_11_handler,
sched_tc_pool,
releaser_buf: [0; 4096],
tc_releaser: Box::new(tc_releaser),
}
}
@ -172,6 +178,7 @@ pub fn create_scheduler_service_dynamic(
Pus11Wrapper {
pus_11_handler,
sched_tc_pool,
releaser_buf: [0; 4096],
tc_releaser: Box::new(tc_source_sender),
}
}

View File

@ -3,8 +3,9 @@ use std::{
sync::mpsc::{Receiver, Sender},
};
use log::info;
use satrs_core::{
pool::{PoolProviderMemInPlace, StoreAddr},
pool::{PoolProvider, StoreAddr},
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
spacepackets::{
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
@ -63,9 +64,14 @@ impl TmFunnelCommon {
*entry += 1;
}
Self::packet_printout(&zero_copy_writer);
// This operation has to come last!
zero_copy_writer.finish();
}
fn packet_printout(tm: &PusTmZeroCopyWriter) {
info!("Sending PUS TM[{},{}]", tm.service(), tm.subservice());
}
}
pub struct TmFunnelStatic {
@ -96,16 +102,21 @@ impl TmFunnelStatic {
// the CRC.
let shared_pool = self.shared_tm_store.clone_backing_pool();
let mut pool_guard = shared_pool.write().expect("Locking TM pool failed");
let tm_raw = pool_guard
.modify(&addr)
let mut tm_copy = Vec::new();
pool_guard
.modify(&addr, |buf| {
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
tm_copy = buf.to_vec()
})
.expect("Reading TM from pool failed");
let zero_copy_writer = PusTmZeroCopyWriter::new(tm_raw, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
self.tm_server_tx
.send(addr)
.expect("Sending TM to server failed");
self.common.sync_tm_tcp_source.add_tm(tm_raw);
// We could also do this step in the update closure, but I'd rather avoid this, could
// lead to nested locking.
self.common.sync_tm_tcp_source.add_tm(&tm_copy);
}
}
}

View File

@ -5,7 +5,7 @@ use std::sync::mpsc::{self, Receiver, SendError, Sender, TryRecvError};
use thiserror::Error;
use crate::pus::PusReceiver;
use satrs_core::pool::{PoolProviderMemInPlace, SharedStaticMemoryPool, StoreAddr, StoreError};
use satrs_core::pool::{PoolProvider, SharedStaticMemoryPool, StoreAddr, StoreError};
use satrs_core::spacepackets::ecss::tc::PusTcReader;
use satrs_core::spacepackets::ecss::PusPacket;
use satrs_core::tmtc::ReceivesCcsdsTc;
@ -28,8 +28,9 @@ pub struct SharedTcPool {
impl SharedTcPool {
pub fn add_pus_tc(&mut self, pus_tc: &PusTcReader) -> Result<StoreAddr, StoreError> {
let mut pg = self.pool.write().expect("error locking TC store");
let (addr, buf) = pg.free_element(pus_tc.len_packed())?;
buf[0..pus_tc.len_packed()].copy_from_slice(pus_tc.raw_data());
let addr = pg.free_element(pus_tc.len_packed(), |buf| {
buf[0..pus_tc.len_packed()].copy_from_slice(pus_tc.raw_data());
})?;
Ok(addr)
}
}
@ -125,8 +126,8 @@ impl TcSourceTaskStatic {
.pool
.read()
.expect("locking tc pool failed");
let data = pool.read(&addr).expect("reading pool failed");
self.tc_buf[0..data.len()].copy_from_slice(data);
pool.read(&addr, &mut self.tc_buf)
.expect("reading pool failed");
drop(pool);
match PusTcReader::new(&self.tc_buf) {
Ok((pus_tc, _)) => {

View File

@ -6,7 +6,7 @@ use std::{
use log::{info, warn};
use satrs_core::{
hal::std::udp_server::{ReceiveResult, UdpTcServer},
pool::{PoolProviderMemInPlaceWithGuards, SharedStaticMemoryPool, StoreAddr},
pool::{PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr},
tmtc::CcsdsError,
};
@ -29,20 +29,13 @@ impl UdpTmHandler for StaticUdpTmHandler {
}
let mut store_lock = store_lock.unwrap();
let pg = store_lock.read_with_guard(addr);
let read_res = pg.read();
let read_res = pg.read_as_vec();
if read_res.is_err() {
warn!("Error reading TM pool data");
continue;
}
let buf = read_res.unwrap();
if buf.len() > 9 {
let service = buf[7];
let subservice = buf[8];
info!("Sending PUS TM[{service},{subservice}]")
} else {
info!("Sending PUS TM");
}
let result = socket.send_to(buf, recv_addr);
let result = socket.send_to(&buf, recv_addr);
if let Err(e) = result {
warn!("Sending TM with UDP socket failed: {e}")
}