Merge branch 'main' into bump-sat-rs-core-deps
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit
This commit is contained in:
commit
80ed68bd6c
2
automation/Jenkinsfile
vendored
2
automation/Jenkinsfile
vendored
@ -67,7 +67,7 @@ pipeline {
|
||||
sh 'mdbook build'
|
||||
sshagent(credentials: ['documentation-buildfix']) {
|
||||
// Deploy to Apache webserver
|
||||
sh 'rsync -r --delete book buildfix@documentation.irs.uni-stuttgart.de:/projects/sat-rs'
|
||||
sh 'rsync -r --delete book/html/ buildfix@documentation.irs.uni-stuttgart.de:/projects/sat-rs/book/'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
348
images/static-pools/static-pools.graphml
Normal file
348
images/static-pools/static-pools.graphml
Normal file
@ -0,0 +1,348 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
|
||||
<!--Created by yEd 3.23.2-->
|
||||
<key attr.name="Description" attr.type="string" for="graph" id="d0"/>
|
||||
<key for="port" id="d1" yfiles.type="portgraphics"/>
|
||||
<key for="port" id="d2" yfiles.type="portgeometry"/>
|
||||
<key for="port" id="d3" yfiles.type="portuserdata"/>
|
||||
<key attr.name="url" attr.type="string" for="node" id="d4"/>
|
||||
<key attr.name="description" attr.type="string" for="node" id="d5"/>
|
||||
<key for="node" id="d6" yfiles.type="nodegraphics"/>
|
||||
<key for="graphml" id="d7" yfiles.type="resources"/>
|
||||
<key attr.name="url" attr.type="string" for="edge" id="d8"/>
|
||||
<key attr.name="description" attr.type="string" for="edge" id="d9"/>
|
||||
<key for="edge" id="d10" yfiles.type="edgegraphics"/>
|
||||
<graph edgedefault="directed" id="G">
|
||||
<data key="d0"/>
|
||||
<node id="n0">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="174.17919999999998" width="273.1071999999999" x="1000.3040000000003" y="433.61760000000004"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="67.380859375" x="12.06892812499973" xml:space="preserve" y="7.247609374999911">Static Pool<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.45580882479480683" nodeRatioY="-0.45838992615076934" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n1">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="120.0" x="1141.8400000000001" y="536.1087687499992"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" lineColor="#000000" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="86.142578125" x="16.9287109375" xml:space="preserve" y="21.015625">1 x 128 bytes<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n2" yfiles.foldertype="group">
|
||||
<data key="d4" xml:space="preserve"/>
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ProxyAutoBoundsNode>
|
||||
<y:Realizers active="0">
|
||||
<y:GroupNode>
|
||||
<y:Geometry height="90.0" width="164.73919999999998" x="1113.1776" y="451.01919999999996"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle hasColor="false" type="dashed" width="1.0"/>
|
||||
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="false" width="164.73919999999998" x="0.0" xml:space="preserve" y="0.0">Group 1</y:NodeLabel>
|
||||
<y:Shape type="roundrectangle"/>
|
||||
<y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
|
||||
<y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
|
||||
<y:BorderInsets bottom="0" bottomF="0.0" left="15" leftF="14.739199999999983" right="0" rightF="0.0" top="0" topF="0.0"/>
|
||||
</y:GroupNode>
|
||||
<y:GroupNode>
|
||||
<y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
|
||||
<y:Fill color="#F5F5F5" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
|
||||
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" xml:space="preserve" y="0.0">Folder 1</y:NodeLabel>
|
||||
<y:Shape type="roundrectangle"/>
|
||||
<y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
|
||||
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
|
||||
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
|
||||
</y:GroupNode>
|
||||
</y:Realizers>
|
||||
</y:ProxyAutoBoundsNode>
|
||||
</data>
|
||||
<graph edgedefault="directed" id="n2:">
|
||||
<node id="n2::n0">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="60.0" x="1142.9168" y="466.01919999999996"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="28.0" y="28.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n2::n1">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="120.0" x="1142.9168" y="466.01919999999996"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" backgroundColor="#FFFFFF" fontFamily="Dialog" fontSize="12" fontStyle="plain" height="17.96875" horizontalTextPosition="center" iconTextGap="4" lineColor="#000000" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="78.5078125" x="20.746093749999773" xml:space="preserve" y="4.862813159989173">2 x 64 bytes<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="-0.5" nodeRatioX="-1.1657341758564144E-15" nodeRatioY="-0.41895311400018" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
</graph>
|
||||
</node>
|
||||
<node id="n3" yfiles.foldertype="group">
|
||||
<data key="d4" xml:space="preserve"/>
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ProxyAutoBoundsNode>
|
||||
<y:Realizers active="0">
|
||||
<y:GroupNode>
|
||||
<y:Geometry height="89.99999999999994" width="150.0" x="997.9168" y="451.0192"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle hasColor="false" type="dashed" width="1.0"/>
|
||||
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="false" width="150.0" x="0.0" xml:space="preserve" y="0.0">Group 2</y:NodeLabel>
|
||||
<y:Shape type="roundrectangle"/>
|
||||
<y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
|
||||
<y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
|
||||
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
|
||||
</y:GroupNode>
|
||||
<y:GroupNode>
|
||||
<y:Geometry height="50.0" width="50.0" x="997.9168" y="451.01919999999996"/>
|
||||
<y:Fill color="#F5F5F5" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
|
||||
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" xml:space="preserve" y="0.0">Folder 2</y:NodeLabel>
|
||||
<y:Shape type="roundrectangle"/>
|
||||
<y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
|
||||
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
|
||||
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
|
||||
</y:GroupNode>
|
||||
</y:Realizers>
|
||||
</y:ProxyAutoBoundsNode>
|
||||
</data>
|
||||
<graph edgedefault="directed" id="n3:">
|
||||
<node id="n3::n0">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="20.0" x="1092.9168" y="466.0192"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n3::n1">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="20.0" x="1072.9168" y="466.0192"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n3::n2">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="20.0" x="1032.9168" y="466.0192"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n3::n3">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="20.0" x="1052.9168" y="466.0192"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n3::n4">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="20.0" x="1012.9168" y="466.0192"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="8.0" y="28.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n3::n5">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="120.0" x="1012.9168" y="466.0192"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" backgroundColor="#FFFFFF" fontFamily="Dialog" fontSize="12" fontStyle="plain" height="17.96875" horizontalTextPosition="center" iconTextGap="4" lineColor="#000000" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="78.5078125" x="14.814773932043863" xml:space="preserve" y="4.392671444094276">6 x 16 bytes<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="-0.5" nodeRatioX="-0.04942766514963404" nodeRatioY="-0.426788809265095" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
</graph>
|
||||
</node>
|
||||
<node id="n4" yfiles.foldertype="group">
|
||||
<data key="d4" xml:space="preserve"/>
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ProxyAutoBoundsNode>
|
||||
<y:Realizers active="0">
|
||||
<y:GroupNode>
|
||||
<y:Geometry height="90.0" width="150.0" x="997.9168" y="521.1759843749999"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle hasColor="false" type="dashed" width="1.0"/>
|
||||
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="false" width="150.0" x="0.0" xml:space="preserve" y="0.0">Group 3</y:NodeLabel>
|
||||
<y:Shape type="roundrectangle"/>
|
||||
<y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
|
||||
<y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
|
||||
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
|
||||
</y:GroupNode>
|
||||
<y:GroupNode>
|
||||
<y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
|
||||
<y:Fill color="#F5F5F5" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
|
||||
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" xml:space="preserve" y="0.0">Folder 3</y:NodeLabel>
|
||||
<y:Shape type="roundrectangle"/>
|
||||
<y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
|
||||
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
|
||||
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
|
||||
</y:GroupNode>
|
||||
</y:Realizers>
|
||||
</y:ProxyAutoBoundsNode>
|
||||
</data>
|
||||
<graph edgedefault="directed" id="n4:">
|
||||
<node id="n4::n0">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="30.0" x="1012.9168" y="536.1759843749999"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="13.0" y="28.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n4::n1">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="30.0" x="1042.9168" y="536.1759843749999"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="13.0" y="28.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n4::n2">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="30.0" x="1072.9168" y="536.1759843749999"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" hasText="false" height="4.0" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="4.0" x="13.0" y="28.0">
|
||||
<y:LabelModel>
|
||||
<y:SmartNodeLabelModel distance="4.0"/>
|
||||
</y:LabelModel>
|
||||
<y:ModelParameter>
|
||||
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
|
||||
</y:ModelParameter>
|
||||
</y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n4::n3">
|
||||
<data key="d5"/>
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="120.0" x="1012.9168" y="536.1759843749999"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" backgroundColor="#FFFFFF" fontFamily="Dialog" fontSize="12" fontStyle="plain" height="17.96875" horizontalTextPosition="center" iconTextGap="4" lineColor="#000000" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="78.5078125" x="20.74609375" xml:space="preserve" y="4.392671444094276">4 x 32 bytes<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="-0.5" nodeRatioX="0.0" nodeRatioY="-0.426788809265095" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
</graph>
|
||||
</node>
|
||||
</graph>
|
||||
<data key="d7">
|
||||
<y:Resources/>
|
||||
</data>
|
||||
</graphml>
|
256
images/static-pools/static-pools.pdf
Normal file
256
images/static-pools/static-pools.pdf
Normal file
@ -0,0 +1,256 @@
|
||||
%PDF-1.4
|
||||
%âãÏÓ
|
||||
1 0 obj
|
||||
<<
|
||||
/Title ()
|
||||
/Author ()
|
||||
/Subject ()
|
||||
/Keywords ()
|
||||
/Creator (yExport 1.5)
|
||||
/Producer (org.freehep.graphicsio.pdf.YPDFGraphics2D 1.5)
|
||||
/CreationDate (D:20240209121153+01'00')
|
||||
/ModDate (D:20240209121153+01'00')
|
||||
/Trapped /False
|
||||
>>
|
||||
endobj
|
||||
2 0 obj
|
||||
<<
|
||||
/Type /Catalog
|
||||
/Pages 3 0 R
|
||||
/ViewerPreferences 4 0 R
|
||||
/OpenAction [5 0 R /Fit]
|
||||
>>
|
||||
endobj
|
||||
4 0 obj
|
||||
<<
|
||||
/FitWindow true
|
||||
/CenterWindow false
|
||||
>>
|
||||
endobj
|
||||
5 0 obj
|
||||
<<
|
||||
/Parent 3 0 R
|
||||
/Type /Page
|
||||
/Contents 6 0 R
|
||||
>>
|
||||
endobj
|
||||
6 0 obj
|
||||
<<
|
||||
/Length 7 0 R
|
||||
/Filter [/ASCII85Decode /FlateDecode]
|
||||
>>
|
||||
stream
|
||||
GauHqbDmmZP2*dc,(JOJpjOd%&g0Gt.kP?b'JS!oXFWk;Im?:Y1FF&?pX/3EoVU?*%+=B^)P-:.4Rk3i
|
||||
s*]RdT>/;PVD+YBq=ssLgOFU_/,o=0j/OX---Sln^>2M2IJ`Ed^O5qQVk*kY?bcTdDuVnJr7h6u29C@I
|
||||
n%\htn,N!oI>bl:q_/!E8Gq*>`4k*>H&qp+S%%gYRm0Q`^J"46=8=LjUt,*E8fY3j6_j1O)dT5Rr;36d
|
||||
2TE1sT$toUNk^6OlG0M$`a8\SDu][+2SjlHja.QR?iT*F^H[dDqOmlY/O#F`o1-puZR+(6c_!Z$9=T]T
|
||||
3D@>hs2JCF[r;<4>?W&7(f(C?hGX+g+/RK5>#oRYmB]N&/Qlj"0&n123'X=7S:R]6PWlg21E6kE\4XeS
|
||||
2\[`<&[K"H.d`i7:KfE2]p+JeNVt]"Y*L+:.OV5^Fb.U2n0a"#gC<8<]$kU=OJ*NV)=du!KCWJ.>a@;!
|
||||
AiTJpf@'RJ7&^"(Yb"R/l(pRdQ<kVs%?8)!>V`PO_WeB@PWi!pB+5<W>7FTapG>"eLU:Cra(k/I;;^ah
|
||||
mAcX]0+W@c9"cLWCDs?VR[8F?6<T5F$_0C:m"MjrBBs4k)B)!QUC!)4)NFtV@<<r9q$g4$m3nNi+q1ru
|
||||
e]XeUoN<]O"rT6rdIc7nDrJXG_<,>V[+c%pmXpGV6L..t<8>20'_?%rO!N4JZZYrn7!-+)W,<%t>ADS8
|
||||
/TJAA+-bJKcYS>'s3KC9l,0-0e5A(AITl4'3i\f46/U?l%$3[[D1-a[<d(=Q9X^_,!@p9Wm'TZV7GRh3
|
||||
rF)Ja082PP(B7n)^+A+X7k=ASO*kK6,(6!VRDEO4*]U9%D@#R--_LN6iTq@`,YPD6Cs8m4P?AfT*5Gd]
|
||||
[;(LpJ)b!=C%su^D3#$WIOeISK@*am.##XK[S[_P!?^>tVXHJ#qEt3?2Z8-=k2XWs.,17T[bN*>#j]U:
|
||||
+n?FH0^6*EPO[^;:H%KH3f("]U9q;P]"j#B)eLQ$@1?"IBSn;YH6lYb&!tVhGK>8uB'7MF"FFfqpVGc<
|
||||
e,4hFfV*0s,_P,I_QT;tW4=HSW2;Ra[gBM4`qdES$e#Amc(@.hj5@*2oF=>L%a<mBs"l[h;q(=d*OOlf
|
||||
J\oOmWJbKB>#1\>#4"cMCBF9nZ_8(\3B3X.Ce5M5@GC=9ETuK##3d.F9`om9\CVZIc4fpE_>+:BO<J]+
|
||||
H82#q>$E&a":@K&ZD/,"^/e9SMaTXR4:emV+:YVSV']$J([5Yf)XL^]K57i74uu_XpD*@olnX,&NO9ib
|
||||
%&N3p%WmY0V3,o$NEeT^eb<HbXfh=$96H48FpusINN>n+-?`G3[[)E[c:Y2<_OIriakDd!l>kAQ%VsAW
|
||||
AF8/If$8]MoQQX3ZSq>[Jc02#p-mQXojmFp?*'U_akBmhOKg^+ZZ`MGY*I2DWtB:KeU(@CG1Jk?KA87s
|
||||
@g/HOM6nM9!.9`I^:<BOrbA\h]cSgEhCU?uURj'M1$c"YMq5^c](iQ#&>PQ:2.[P-3c2mhE-_@`J$@fS
|
||||
Dr,`=a@ib&>8l*Uf6+&oBsDgN[$uSRWT9WU_`,ciCF7Mt7gQI=T/'sF(R6T"e#_EoZA5)r#HW*R#p`]_
|
||||
;dE]PNpSIdg\6CCQ:6F]TV^1VrK%68^6q0\hmJKElGB.7UIAF+V^67qcJ?fV+8sR\g-Z3Xb'QW-m65AL
|
||||
UC0S+Tsnq;>ibcqXPfO;:H"8jre($\6sJQ*T!GIQ4Lo&VH+V6@U@aE[<9K#^&i7s"S2Z@R:6kOt\?n?\
|
||||
@p8!#:7CKqp!R+&Y]'#C/Qrg2RMVR=@b(t;`foH&?N-VC;Id]"Fln?G<NR>6`9P]52`L$-C<*l4plc.7
|
||||
4LX"1<nT+Nr-?OgD0lC<YFkX:nR7D1lb[?95#;tmOVMWgf]B,h03"K'->^:DU1!shS^`%C-di:.>MJtE
|
||||
:J,\\RN)(L`nK=]CO(1Tdmj\N7gT%#Z1B<^qo0QCdR_QNI?m?+,6&K!4fE0TH4Xp3/#bs@]Ql.@WaMU]
|
||||
BWhp*L`O1NZaDS.g0J?#[[Y>lJYclJ%HQ=qZk].NXgD>/K%5%V<?,\IWJYSHNOHKbeii>SSm3KlhX7(m
|
||||
EbVLh^[9V@pJ^+\Sq#\_][o)h+u?TPo1!NNWGPYC7aKRZ"@1!5)Z=%7Gk!')(2ETj@O9&6$UQ$l,W5#f
|
||||
q92>l8u!Ra*@P4\#";duf5O:q?Lihd1dQS3oQCa.7'^3sQ.A<TLOPreY-r$(%o1.R9pT8^-Ke$C46_+/
|
||||
`eS`mioh5qd#K&0oVC7:jQWId]7!m5,a&m0hQ=c"ZPH0.p1eq0dAGT7:YaXJ`ZN^=N>&k8^p7rB.2tdh
|
||||
>7]pt>(T/ji2PU,j)3m?DJX0V4aUHr-0P!<FjKu<`jQ5I@g'TS-p/qo6bA*h`@>K!O@WDGNGfcFPDn8k
|
||||
]N0g#G1lYPO'jlnkp+!`qs>Ub7G_G4*lj8smU>/I@Y3o^Fm>;3YP,@#_qiT]gE-XV3s"W]nb.b<p$Y?3
|
||||
@p.AZZUN>E*hXPKbOH9YrARdUTCN!PnOqQaZPu(cT3X,pj&";:8,VKDOZqD-aSKhVqQ$O/25[h-T_6]R
|
||||
C9+8bQB+N5[;fCAnV-i0_0PGC#EHtl39hG%(g^PAcZlqVGQ/U8AnYrZ(Kg>P)i3=ig+4SW7aRXtd$=q8
|
||||
L%9Mtd$HV]Gf>!pdr-2@flXdJ(LPtj,MLHnp\kW*SaaSJb1esgYlB$7O7W0)U9S1#a./*sn^kcE-_o=M
|
||||
I85TH:.DQ2I1D!Gi$Z1lT>/6I"-Du2cT.EHFQ=[6rO+36h85+pMnhi2=P5BW=f<9C4LP$c2Li\nn2Q>/
|
||||
86aIt6etQI5`[KZ2E3b_<i]HljKMH:KY"CGZ?V7LVr;CX2Oif;5.FFdQb;jG+0?nF_p@U)<,M@*<4`aK
|
||||
)7I3"r9K'"d!+\r*2V?N3SEg7/[&sW'\#jaP1H<_Znr1[lc&EWhnM0pMdc"_:8QfFUdiam=V#(qFVU2P
|
||||
6`9k0jf`3QgpUO`EW+<g2@2a5%OX_YV6HW:\If`r(V&+CI%c@aW^)q#c$?X%X2Vsm_OT3YPd"2`l=-kK
|
||||
Das:U\!CiR(YnG5i>#L&.%$!kq/NSCgUU3Qm"p'\LpGKB":LoFAXH@$VMV`V[,8V9DI=TQ(@-18S2PtA
|
||||
#p\$$-S5\jm7qD$]fs/,rG[Ujfqmc#bj$fCNm`;<$&dI'B'Vt#8i)P7BZhobd:A7W,=T4K&,FXOip8p_
|
||||
LOQ5mBJ%hI`BU#`MR>[AM[6@;S!hjY_\ScsBmPt3:"*f2Dc=`_qlG]U3EaMtbh9gYQsI$c->q1Tn^X<F
|
||||
B^MnK`hQN!$r;KHG.U1*@D,\s_idP]nd8k`lcJB9?)Sci:d<<iAk*aB)c^K%1m?I&*P6=t5d9rE,m2_$
|
||||
Vn<<o:eoNRDDM*'FcQa361sg4]H*<Ip6FiBg6!taI_2m/9DeN52='`Z"+09,F'QE1KsjB9h>)I?UQK\,
|
||||
Up?#_#P95ma[2];_gZ=I=Nu-1=+4sjm!L#3.`Ou)]!rM#B/5^beuNBCAU0$?f$DdgQ%CA>f7M`$=0J%X
|
||||
#SL]o3a$TV'C?#TA`hfd@]!MG@]tt`ak0Va-H6NQ)O3e[.sNgi/Z@KEodYrunT!k2)NU0T2=8&]Z&LD;
|
||||
c<'Bue0<IHeB2N,>,\p9X>jgXQ&<0+A6*E=(@bpE05=Rp<EU4MM1k"f?*Z'J_JWF3?gT?.Y$SZE.+,Ca
|
||||
JE/T+Z'D)kYS9lJ]rKo2IL8aF+jlujC-9UUnMXTFe4`&:/f&qAB\Iin/1n_3=D--;0Ul[BZ1&_8Db2^b
|
||||
['Hk%40co"!"29l%*"Z3rktlWD2eW.9%^ZRNk3u9Ma+0YGen(&OLn,.GrJr91gVYDbJj.Yg8UN4YgmP3
|
||||
Y!GJBV;Jg7ITqVTi[KWJoL/WD1pI$.H@n./I[Wi"]__D15CcfGaQP!u1TR[g9*6Yui6*Kp(MI7$T^Bl4
|
||||
l4#-LRH0"HG"N)]RB]N\G0l5^ge]fV#>g27[#Sj(5X\1A7I["a?*hQ&War9\^@fsYo-*5J)#@>mOb#;?
|
||||
;rr^6]^*0nWSp<R?kd:ah*%WZO-=Wa0k[Tf`^=_Ta#4<ck0d5oZlbe`(65[J6f8.>=Y/`2oR4nkGg!L3
|
||||
%J;@oKhW'B3<Gm]mR*t:^!&E,Pp]&mJ/lP,])<P^q`ktSqPu_Ah:VB"opQE8:i>@QNd)%R?^]@8KY8]$
|
||||
.r78lfYIc>Z5b_;0L8GRq9"\nSE.(2XlM$>`^k0:f5@FBorM*W69'%!m.skVQ$M@lE?!AMY-NVO\nH:c
|
||||
I]NFkbrYC#-2CHlUBd/88?o[?IRlfq>0bkMj&cg6/OrroTlBHl3H3lDAfm4L&<hpKl\%T0QiHDJP2GYt
|
||||
U4`DcUq^i\OV:fom2J'(`3G#k[*W\[L/r8*/XHqTASA"9I+f,)`E[XCaqGQPOc=m_YU=#U$X\O5;D-bN
|
||||
)KjiEYJCg\-^#-k8sCYcRC4CWSN<^;C2_WT">0?Ci>16s["nQ],:-2g"!];L8L@r=lHRtr7#oB4OWs60
|
||||
q1)H*ql\Sf6AMaE!paTSP<K+=347A-D?f:";sM31R/=_i`c*-TWc/n!ra];sAjc'/K%G]7C_t<Z&M3N\
|
||||
o2O3_Bl#c^,'p/l2gC*(nr,[5&7Q7Yns@;$M!\[u0nNi882R-/,!R5mP_*HroaS(^qChALk+Wk-X)r5T
|
||||
L&+!bYbJHM\<[mH\6b2=ZQcl@#jk0<SaoR\E`Q-QN&@sqofX7`lNXo;FpmT-hCRaF]Ma><OaX=s8_V`T
|
||||
]d`0A9isrCnq?=ce?6$HHMJib`Y-?ooV$!Vh;1M(#kM0aQFH``Rk2hFk/$UJ[U'l"_q\sAX\5RrVE@Z^
|
||||
Mk1`ZpI_NpdE*>q]jT*IDlNbQDjG._M'_?Y`&ZD]98h)+.paUXcgoPB\@KipB8heU`Q9djs)G2Sq9W]T
|
||||
;!cin^gFr9$)@J=Hi8H`c,WVn[9%;6O7i2O^*efCg"R1`h]1Y;,lHbK%)+[AqJWJ39CJJ9qBI'trTSA$
|
||||
SK+&//4\jh6tD.*^G[^]c/)S#$%)V_mk9Z2Z+G_DK;)r/='roX^-%4mLQ5c)]S2rg*Ui2]A4I`kOEEfP
|
||||
rP96Qn+P%L+/mr]/_^N\Y.DO?ikpHkqrupaO,_jm?!tiu+:HrYnnnmO6a`q30V$j_(GT'QV)'H6A*ldc
|
||||
]n+r.`R3,V1mAT'D>#PR[kEgOTr$k+/H):Z7Vdi2^R.pdMTcN6WlRK1T?9+C,m@a`P-A9`=38dnZBTZd
|
||||
cP=(*]@"kFZb#iW6#VQ4_d\oA&(DIWh&*J,mg2-5Pp>"RL,A(Zd9eoZMUH5TY[Zo+774bhDLhCRQZiSA
|
||||
Q=Qr+h0$%kMf@YrO7f&KrOCIsG@W80QL#&Cb?3Jf'#W#5?RR#ZMAYSa+X:q8PbO[]5KbWkHIi\YIN:MJ
|
||||
bPjZfHf0IcRJ\7Ioj=lmHp7QL118I/*6;M[ZcG.!PK775p!*trXbdXWmgIVK1/'rqmlP?%X,0V!fFZaW
|
||||
pAG0$FOWBnG[M7jBoj-eftY.@*`$C;leS_WVs1m,cMY1Ko"+c:%A)'?1\jOeY[1U.l-eL`?eCJ%D:aZ=
|
||||
%1tOj_N@\O"pcgtpjni0>?sfuP%g;u*1LRhLU5?d="2]P4<Da`>A)#?>K]bs8g?;P!F\B1SoqS!Vtd_+
|
||||
+KXRk,Qm>c]!^n6[9Gh^q^+A2WmWG)B=8a0?9-NPrM"IH^MUs9NV:R3Y49T!?k];[]8upK4&*tHAr[8M
|
||||
Kda\eZ1(J+=fk<41YLXa_4(U?Q]QE![nEsZ0dr`,?\/(740cZ?,#.CFA2@`.4f3C`Wjr,m=6d82hPNFq
|
||||
X4(6OR`5)FgKD3k_no,.?f(lZHhMh]P0Ni-&SjRBLJJe:Ki+JD2"tn,XDnjOEdY6U\3=gNQa_IoJ)fNp
|
||||
/gPaQeORpaB,oM"hd8$^EQX?1bhWl3"R_K$o0eYZEl0WRKCRSN<WZaegA*;"'Ve\8,$e(3%Ai$21/.O.
|
||||
CcC!NMX!(O>K`08#kG8R7U0(4Hu16/[/6NY1=/g5+-L:lnoT/.XD[)k8BfhY:P-c6]8?JgYLI7XbY6[[
|
||||
roOisYrC=ZUfPC!\L[[mk]nkB'!Z=8F#ELjOaDr?6Z^n*W*PkEY!n4!HXsJ1PeHQ(?uEmUNTb%kS2aE`
|
||||
/00(<b-Y2Ec/>-]8jqg8]Zs.iNL'iq(bpCX1H2(@*UT;Kd,9P<;4Njo>SaWsb]p65_n@Hn_>/B:iP%k"
|
||||
QbUGtF[(CX&(TOH8baq85o.GU=3dT2r/gCWrFh]eS336gbE25d0bEqP?2/2Fb;^GYm!%J:bY6VTaImZJ
|
||||
`@(lW',Y-"KJ/mg&R![Bb.B0$Fh>"V3TB#"-%$/O@-Ai_)am0X-r3]@m.FYVbb0d&)mYF@]7ornZ"^uH
|
||||
*)62G&'m*Fj1F$!Yfg0c(Z.SELp8da&Ij(B"C3.n;/)B>>Vt0kcI?+-a8-aA?#H&WZu$DEPhBtd*\3/B
|
||||
9TDO0^nR5b;=US1T1%]E(VK:h?b6/V@FP8.CHe+OalME4S^0%)k"kc$o>9/5T]71+Qam+?7[o;7;r[uM
|
||||
IX9Gd"`tp-\DpHO4uhT]n?V):Xg!!?#;\0u;0Bh1.77$G#/0$?MR^:C9i[Lq<Wmd2Br^U2]5\YY6egKj
|
||||
pptXT*knOP.DBjDpt@:Xqr#.8V656ensl7ts!SfiSo9H%LZ]12.=Oe!S,La8p',2;qZabrhRa-/\G"@h
|
||||
F]g('HASO0Q]I\$A0I"%AU`G._[K/s#\&KV-'"Y,h:#m=9>1W),\Xh$k,]d(7=HbkN1NS'[uk?NA<,W7
|
||||
#`5Muq!/]:LCJIIghG0P7si>'2'O-PEC?"k&8sHOb0<iij1r+L0L"nP&4U=nVQ+=t*b!Z<cE(J:4g?PU
|
||||
7/!@so4gA1AMQ!$LaNcjXuo5'<ZRNIQ8TqW#+4jj5PQ]biLAeW0ADi+n0k#$W_lBl3=fl1Gg=?=(JV/K
|
||||
j-?KT3d;r.`S),CA#&+2&KXMH"Zo(?jZRFn\",*kB$Qt;O%:;:Et)d$(ZX*96I[0h@7Z/d]9OoL/JW;"
|
||||
pIoB:-S^Mu%`YMXam??RE*X-$jK8IK2D@p5_&(s7g?Dh<TR=]:9kE_Zb.+u0Hm#g)*:7NEBp=&kk;^V>
|
||||
L4-7]P@lBsg(h*0d8sL]]4"5`Y`N"r?349-dU&1*.]aSICOB:PKi0r'mq\\;V*t"Y*ZX-D)kFP:G-PSU
|
||||
^Ea<VoRBh400\iI^U$@:L!FM5ib6Jf3PYL0$qcn2:*[b?VZFGO(o[JSs(hmNIHP_Gp3QSuf))Z?5M]Om
|
||||
oi>G5eQ;)4=88EUXhK,IYMS,GH1tWp=)[WZ>CQ8=*m8mOiudM;^3Y]eYdZi[q;Ucj/&Pe6N5s;4@4M%i
|
||||
#'ed,roNh1=S;^$bh&I>fAFf"Kq.9!7,=GuKQUYDg:\jF]G5l=QkO])Q;EIK7#WMQD(NU;hm_Xfj_8mV
|
||||
GHng4Kcf-fLEp&9WUV-WJZNb"8pu3L9d8P2[ji$J;-FHdSE%;#3CZs3Bt!*i*dSBu6VHOhi1XMmeKl=V
|
||||
0QjE(U>DSRoi4I$B6LgdQAGmPb3hD;P1_g@`&[4C:!-?3etmbZl9$sb>KjdOZdnTk!:R9t&YsjH:ER1D
|
||||
/Lu0Z[gCt\e9OCec8[<e<@%a[He^lN\MNmgjDh1eX"(5f2c(%;3@jDR=si[4C!*+r8&rh3H$&.8GIiao
|
||||
<e/g9U-c/QCjTaGhGi3pR.Un>X]D8Z.G@kD4MN2/f5e2BdV8k1CsA+>aMJs)e)nHU\*g.\*`TW@,gL\.
|
||||
I\U'"Bb/2M!S4PL#LM+H<pnMb$Z66DKbghd_k+X/`YmiAB]6q^'(<'he4o%a.#iNf0$HR(VS-ji)-=Ao
|
||||
jL]lj>%Mt\s+[k]H)eU!T\4h.aq+=HILYd=jo$*_IOQQC`]SXB5-e0KS@IRu%^05]IqW]Ik_YAoa6ruC
|
||||
Y(<$kM-6I[1L*'"/tQttcaO_:[u9G(`UiK9oBkQ5G4Bs,nU.rn^0NkY?9I*tSo5Fk[hmB"Ms&j6oBjNg
|
||||
mGdqbnU,]RI(OX;>s."niq`<-q=`hDG4BslGeUk9^0NkY/tQu7So5Fk[u9G(Ms&j6oBkQ5p>YmknU.rn
|
||||
^:ed*>s.!sSo69RD\pa/Ms&iQkM*uImGds8iq[cfI(OX;]m.7s4Pqc_D_I]#(XKRKkM(pXgVrdOiq`<-
|
||||
q0):U]6M''GeRHGh_(L=0:m(ucaShqgPb7L`UiKVq=YKJpWg1->U=C14-o2E#/m7[#'sc)YOFFY7JP-u
|
||||
HBh2=THn&jh*S3M>YciqUiuA":/o"lc7VuV>3DHA`"?1kPKR:0\%=D7/d(kRGC+T&4Lr,Bn&"It/sNg1
|
||||
ai9l=m4&a"?9?gR&fphF`\V,&jHQ78);R9XZZfVL0qMQbi1"&`ncr#(3bCmYNU>F?CV<XN13;oSk@41S
|
||||
%.4jRIj<=6n\P/(&pCsLOM0JhnGUiFn)^>\\a$aM<S]MRN`)ud]A8E%Qc46s'!iY/i*q;L/]?7Z5;FQJ
|
||||
LZb<"dmJ[6_>/B:^-.=8'_eq=D8:Kj.o3UL);$SmG\BM-gQ9BV\/Mdn*6;(J2K-<:j`c,sj>.6pD-ZlO
|
||||
f;`1a\-j7EL/sOB4Vcca2Bqa``Y(YL$+l[D7(kNW<-Ln1PTua5ed-X.82ua0l-%aS(MA$,IC3"GeF:i^
|
||||
*#TY9HrGhsbZBl)`Xff/;Z.D<2EPi9=VtE"G,s:m/<fh3%`L(O9:=Hk*D\jjT&tB/L::FBel[F^Z#0(u
|
||||
MQtl0FJJ/M<34KpjZRaNdU.kN7GY?VI#ME+IH*HmrV!CN:&eS(q7mE`Y.*Qq,'h&U,C.&@&NZNJG^D%#
|
||||
m@BA]2r;bKr&@A]AaS'C=d5Pr?%0-N=d7(,2t+fT!nl8=0]bOOSV'l?\T-*bCkk)4EL+)[g)9YE9kAE^
|
||||
f_o@:F%%N5p?/DkYjpEu[jU2(BVAD'UdB1j8U093l,e72oT7<:RX1<a\*WZk[>EYarEV;@pSg)Qao7Y:
|
||||
m1%FVp5N35in2CNg=VSEkVX<'Z?GMpfdU+Gb*jVHYTN4id-:,UED)PVQ.o55"((qj:WYa+)XmGjAtcGW
|
||||
%?A4eOf9A0e+UlN&]jQXLLpb9./n+[h&JS#_U<*.q=,M.OO).[;hMhl_[)o]ZhJTpB#3=X%nV_7pZR3L
|
||||
Wlkd0+gPu=5ku7*%,p]o@Sj4NGqq,<h4[E>-[1$o1[Lt@AP>%GXU<C7;OkPi?,!!+.WNXJ%!p+1<0Zq_
|
||||
7nV)Y1u?OdH>Al9rGq9"&j#$ANchp;D^L4?/0NeR%%188c;K-&p-@&acD.qE'RmnI]9fVn+[\;XGtBuC
|
||||
Q,XA;\,0:^<=4b)e$V%1QLHnYgqutDB9oNQ!,q.nMASbLEdAr3N2=#;^h3RH><DIAn.iAW9XK@!7SS[;
|
||||
O&E4EH:MB-+WfL)oO>R>@E:T?8($'+Y3@LFGC_Kh,O0.kp1&#qQjbrcNO%XTqU-P^)(\B5Y`R;B?348B
|
||||
.c]<[;gs_RTlFVDdt*7d=3"D^b-Enu_H^Eu]OK->Yh'js;YD^Qjlj?VkP*Bt\o50TMQC2E'f]]F4'Ptf
|
||||
nrGSTaN<L42@AQ]?]M&Epe.^bs!VAV?G6#5DuJ5_]D(m.HL#cn=O"H:iqYN[n%Yp4QR8.!g:cT4VTD&e
|
||||
%E!D,l[-[THu9SrEa/k,f4Y%@<rG69)tmpfos_89q!rUaL!8>2h)\R=hj7aH)l*CPIsYJ12`BLL?A-'5
|
||||
+"c-sf0A5m;ZQ-uEVA/A+jh6UIEQR,N5P3M^L$IPG=gE7(NW`\47W\IbH$'+^O*&*pW.mf7mR*hfU's?
|
||||
K`.hF_:h[jjk`4oT-pf8ASdc`2fpean+(iiDin.+1TR(GfQX_=+sXSG;!s44<W)-98'`Z6A&qKeCk[l@
|
||||
(5[#$>FLJD*Ye9PUXi/@RId:(/S9.pmbo_8aP]0Q"8+TtpMbdDZ1$2Afic#>a`]3'-V$7WKFfNW;V633
|
||||
2=XFZqoW#b`&Gn<njm3IkQXZb&EQ\S*]`)RYS['?S/_D"(A._H,&bH'VI;o&l_X'o\S8,Q44Sc5_<-l,
|
||||
[md53DP!2n](+VEn0Zk!g\8T!&"U@9Iq,j?gH.C/Fec1]hs5:ZXj)4/Q[b6Fh-HXKUkg^D_eFa!i]V58
|
||||
*kkJCnPq8?g>f6HXZ*PEkM?uX;Ob:Pr%;c#-V>2RC\#=heh7OB+@r;TkpP/GHW@I8G1Nhs<4>g\NN@A9
|
||||
ioX\0$YQrA;I'2JFGs4rRCreKoZ;>3f5kM`^fIdZfdH(][0=V5'k@4=9GPim%[rkq=>L0i6,)C;)s2a,
|
||||
<_nrM'8=1ZG##F5M1<NOhnCaM(q/;/BCINllnJ>P'`4bM@H7bBf)]^<fV7TjL&8GUEf7J_D<HWl/@heK
|
||||
J::T-QF>S',Q?sMEPkAYK>ALUEMUM3R^./2c)k,Rh=A]hkI017ctbj<LTQoc=g5MlYW%&ec7o!#ICj3,
|
||||
=TWN3LL'3]o>Y,44q2:/hQ5SESjM93'PA=T;=OH?Pd"E`]-VO"/>ZZ&Da9Aj>lH(%I:$gNnU&JSiWJFn
|
||||
^l:&f`kSA12b*02ijsWg`P*u`9.hUr(C&`O%\CVN]tqE92XBU3hW`SmhagF)\Q@&W4\ROiDj^:lJ/'gp
|
||||
]n!tuXn^XeH6h4Dr<gLT__OhK`4m'Z)Njps@!"F:=^>.79#LI8T3Tj&r_Mqaen_bN;%`os?eQ]apr*_*
|
||||
=W*u&]@^aF"'<6-mMD<5CF<Th]H=!7H;Zi(g,1I]=)*s4SbcnM$0)oooD0a0=khF0IrU0glAD!h+QaXc
|
||||
bSJjrp3ZiPV+O6l@_9):L3>[j$hBkiMuCQi&CW8m0u_^p,:1R`Z/$Whj@(J,Om!MtZU5r4;^62;Mdni]
|
||||
XW_V`DgPVn>M*)EWo&hh"tRFKO[UPA;,)@Zir!eCm_G[_f;:MlD`kI847&Q*>1Q;XVIT;7hiAaZA,(3t
|
||||
i!/ZUEE=!#QV21OO:LV)r!c<S-4XR/mIi?oYDoU\`mNH-`_G+40P#)/YbueY-Is"\ZP_AW7;<'&$:V_C
|
||||
__G\N6aYp1VrqudM%^EF!)FAr;)ud3U_W@pY"rXZ$MPbDh?3:cTtOVD<c4mQk1t:\DFH^.@T9jVFVdNn
|
||||
+h]W5I\'prGX8lu.-0o/i[rcj2:gZpluJ"p6dM#PIB8Lj1%;-LV73TXmOK*2r5+Ef(k7_Cqngcg]$G]@
|
||||
<PR10cGpME\+<bWEUo"EO0p@T:Dh:a*=mHkPOV\t#/(f-jBHgJ'W%"l$Sa7MO4$R2Sl&A4>14)<bhE"4
|
||||
V6bGbMUIn$<H720g<Uh"C7#r09mMV4MDX'=(<JsVbDLt1f+80M=:b.5MtNsmm$6egP@iF\@rpKEEA2<K
|
||||
.Ul:<O+[%G@HOG)IRh#>rR%\>#&'Y+#,7i==$b\PQcd4NRfYq9\fRaZ$SkhkOMY3O:A_jZ(!nCY\*D"j
|
||||
;r'Tr,F;<uH^%aplh;I4CRi+n7c9lZe.+QRHeG$e,o2!2"a7J'AZQu&8]iF/BFjpL7OD2>k(k@tI,)m?
|
||||
&Do[H)F@eSj$srLMmb^cBP)bmn>6uD`s<bGn*n72+u^")W85\_<pomAn5nli)&+7HWpms/&WSUYU,f&C
|
||||
#5Ac'b6FZf]Bth(A]g3ZQ;6$>CcPBH_iW^Q&9OpeP^i8RATr]Y8RAZK;&i.\MQGHi*^P7lUqYf9B`/I\
|
||||
qZ8&2`u8jM;rLjE<=4aV*i+Z4\od.U6pol:BW7($Ur,NE1[b*JD)#'+91GP5KBnu9,J!g<bt<HTar4PG
|
||||
9"SP)e4Vp:.G?(tDgW`&KH3L"=1F5#f-pPdc#M5b<3Yg0R?d&rcH`u^%DL7/j%m?]0<c#"Z\j&8A=Z>>
|
||||
i>j2R^V<J)bmI8.G3]Ran2u^b7T[MIlijF^3R+SfBA72s/?(G/3)h87Q"'1kBs7D/5>A6+\`OTi[<lmg
|
||||
IecDbl*!]>gnF[RrpflHVtto@Tle->l0J>$j)m_ArI._+p*8=1b\E&As5i"M58+"hg]%MM_N%=~>
|
||||
endstream
|
||||
endobj
|
||||
7 0 obj
|
||||
11983
|
||||
endobj
|
||||
3 0 obj
|
||||
<<
|
||||
/Parent null
|
||||
/Type /Pages
|
||||
/MediaBox [0.0000 0.0000 311.00 209.00]
|
||||
/Resources 8 0 R
|
||||
/Kids [5 0 R]
|
||||
/Count 1
|
||||
>>
|
||||
endobj
|
||||
9 0 obj
|
||||
[/PDF /Text /ImageC]
|
||||
endobj
|
||||
10 0 obj
|
||||
<<
|
||||
/S /Transparency
|
||||
/CS /DeviceRGB
|
||||
/I true
|
||||
/K false
|
||||
>>
|
||||
endobj
|
||||
11 0 obj
|
||||
<<
|
||||
/Alpha1
|
||||
<<
|
||||
/ca 1.0000
|
||||
/CA 1.0000
|
||||
/BM /Normal
|
||||
/AIS false
|
||||
>>
|
||||
>>
|
||||
endobj
|
||||
8 0 obj
|
||||
<<
|
||||
/ProcSet 9 0 R
|
||||
/ExtGState 11 0 R
|
||||
>>
|
||||
endobj
|
||||
xref
|
||||
0 12
|
||||
0000000000 65535 f
|
||||
0000000015 00000 n
|
||||
0000000315 00000 n
|
||||
0000012726 00000 n
|
||||
0000000445 00000 n
|
||||
0000000521 00000 n
|
||||
0000000609 00000 n
|
||||
0000012702 00000 n
|
||||
0000013180 00000 n
|
||||
0000012896 00000 n
|
||||
0000012935 00000 n
|
||||
0000013037 00000 n
|
||||
trailer
|
||||
<<
|
||||
/Size 12
|
||||
/Root 2 0 R
|
||||
/Info 1 0 R
|
||||
>>
|
||||
startxref
|
||||
13253
|
||||
%%EOF
|
@ -19,15 +19,33 @@ A huge candidate for heap allocations is the TMTC and handling. TC, TMs and IPC
|
||||
candidates where the data size might vary greatly. The regular solution for host systems
|
||||
might be to send around this data as a `Vec<u8>` until it is dropped. `sat-rs` provides
|
||||
another solution to avoid run-time allocations by offering pre-allocated static
|
||||
pools.
|
||||
pools. These pools are split into subpools where each subpool can have different page sizes.
|
||||
For example, a very small telecommand (TC) pool might look like this:
|
||||
|
||||
These pools are split into subpools where each subpool can have different page sizes.
|
||||
For example, a very small TC pool might look like this:
|
||||

|
||||
|
||||
TODO: Add image
|
||||
The code to generate this static pool would look like this:
|
||||
|
||||
```rust
|
||||
use satrs_core::pool::{StaticMemoryPool, StaticPoolConfig};
|
||||
|
||||
let tc_pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![
|
||||
(6, 16),
|
||||
(4, 32),
|
||||
(2, 64),
|
||||
(1, 128)
|
||||
]));
|
||||
```
|
||||
|
||||
It should be noted that the buckets only show the maximum size of data being stored inside them.
|
||||
The store will keep a separate structure to track the actual size of the data being stored.
|
||||
|
||||
<!-- TODO: Add explanation and references for used data-structures. Also explain core trait
|
||||
to work with the pool. -->
|
||||
|
||||
A TC entry inside this pool has a store address which can then be sent around without having
|
||||
to dynamically allocate memory. The same principle can also be applied to the TM and IPC data.
|
||||
to dynamically allocate memory. The same principle can also be applied to the telemetry (TM) and
|
||||
inter-process communication (IPC) data.
|
||||
|
||||
# Using special crates to prevent smaller allocations
|
||||
|
||||
|
BIN
satrs-book/src/images/pools/static-pools.png
Normal file
BIN
satrs-book/src/images/pools/static-pools.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 38 KiB |
@ -3,11 +3,12 @@
|
||||
//! # Example for the [StaticMemoryPool]
|
||||
//!
|
||||
//! ```
|
||||
//! use satrs_core::pool::{PoolProviderMemInPlace, StaticMemoryPool, StaticPoolConfig};
|
||||
//! use satrs_core::pool::{PoolProvider, StaticMemoryPool, StaticPoolConfig};
|
||||
//!
|
||||
//! // 4 buckets of 4 bytes, 2 of 8 bytes and 1 of 16 bytes
|
||||
//! let pool_cfg = StaticPoolConfig::new(vec![(4, 4), (2, 8), (1, 16)]);
|
||||
//! let mut local_pool = StaticMemoryPool::new(pool_cfg);
|
||||
//! let mut read_buf: [u8; 16] = [0; 16];
|
||||
//! let mut addr;
|
||||
//! {
|
||||
//! // Add new data to the pool
|
||||
@ -20,25 +21,25 @@
|
||||
//!
|
||||
//! {
|
||||
//! // Read the store data back
|
||||
//! let res = local_pool.read(&addr);
|
||||
//! let res = local_pool.read(&addr, &mut read_buf);
|
||||
//! assert!(res.is_ok());
|
||||
//! let buf_read_back = res.unwrap();
|
||||
//! assert_eq!(buf_read_back.len(), 4);
|
||||
//! assert_eq!(buf_read_back[0], 42);
|
||||
//! let read_bytes = res.unwrap();
|
||||
//! assert_eq!(read_bytes, 4);
|
||||
//! assert_eq!(read_buf[0], 42);
|
||||
//! // Modify the stored data
|
||||
//! let res = local_pool.modify(&addr);
|
||||
//! let res = local_pool.modify(&addr, |buf| {
|
||||
//! buf[0] = 12;
|
||||
//! });
|
||||
//! assert!(res.is_ok());
|
||||
//! let buf_read_back = res.unwrap();
|
||||
//! buf_read_back[0] = 12;
|
||||
//! }
|
||||
//!
|
||||
//! {
|
||||
//! // Read the modified data back
|
||||
//! let res = local_pool.read(&addr);
|
||||
//! let res = local_pool.read(&addr, &mut read_buf);
|
||||
//! assert!(res.is_ok());
|
||||
//! let buf_read_back = res.unwrap();
|
||||
//! assert_eq!(buf_read_back.len(), 4);
|
||||
//! assert_eq!(buf_read_back[0], 12);
|
||||
//! let read_bytes = res.unwrap();
|
||||
//! assert_eq!(read_bytes, 4);
|
||||
//! assert_eq!(read_buf[0], 12);
|
||||
//! }
|
||||
//!
|
||||
//! // Delete the stored data
|
||||
@ -46,21 +47,21 @@
|
||||
//!
|
||||
//! // Get a free element in the pool with an appropriate size
|
||||
//! {
|
||||
//! let res = local_pool.free_element(12);
|
||||
//! let res = local_pool.free_element(12, |buf| {
|
||||
//! buf[0] = 7;
|
||||
//! });
|
||||
//! assert!(res.is_ok());
|
||||
//! let (tmp, mut_buf) = res.unwrap();
|
||||
//! addr = tmp;
|
||||
//! mut_buf[0] = 7;
|
||||
//! addr = res.unwrap();
|
||||
//! }
|
||||
//!
|
||||
//! // Read back the data
|
||||
//! {
|
||||
//! // Read the store data back
|
||||
//! let res = local_pool.read(&addr);
|
||||
//! let res = local_pool.read(&addr, &mut read_buf);
|
||||
//! assert!(res.is_ok());
|
||||
//! let buf_read_back = res.unwrap();
|
||||
//! assert_eq!(buf_read_back.len(), 12);
|
||||
//! assert_eq!(buf_read_back[0], 7);
|
||||
//! let read_bytes = res.unwrap();
|
||||
//! assert_eq!(read_bytes, 12);
|
||||
//! assert_eq!(read_buf[0], 7);
|
||||
//! }
|
||||
//! ```
|
||||
#[cfg(feature = "alloc")]
|
||||
@ -70,6 +71,7 @@ use core::fmt::{Display, Formatter};
|
||||
use delegate::delegate;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
use spacepackets::ByteConversionError;
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
|
||||
@ -151,6 +153,8 @@ pub enum StoreError {
|
||||
InvalidStoreId(StoreIdError, Option<StoreAddr>),
|
||||
/// Valid subpool and packet index, but no data is stored at the given address
|
||||
DataDoesNotExist(StoreAddr),
|
||||
ByteConversionError(spacepackets::ByteConversionError),
|
||||
LockError,
|
||||
/// Internal or configuration errors
|
||||
InternalError(u32),
|
||||
}
|
||||
@ -173,10 +177,22 @@ impl Display for StoreError {
|
||||
StoreError::InternalError(e) => {
|
||||
write!(f, "internal error: {e}")
|
||||
}
|
||||
StoreError::ByteConversionError(e) => {
|
||||
write!(f, "store error: {e}")
|
||||
}
|
||||
StoreError::LockError => {
|
||||
write!(f, "lock error")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ByteConversionError> for StoreError {
|
||||
fn from(value: ByteConversionError) -> Self {
|
||||
Self::ByteConversionError(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl Error for StoreError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
@ -189,39 +205,54 @@ impl Error for StoreError {
|
||||
|
||||
/// Generic trait for pool providers where the data can be modified and read in-place. This
|
||||
/// generally means that a shared pool structure has to be wrapped inside a lock structure.
|
||||
pub trait PoolProviderMemInPlace {
|
||||
pub trait PoolProvider {
|
||||
/// Add new data to the pool. The provider should attempt to reserve a memory block with the
|
||||
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
|
||||
/// be used to access the data stored in the pool
|
||||
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>;
|
||||
|
||||
/// The provider should attempt to reserve a free memory block with the appropriate size and
|
||||
/// then return a mutable reference to it. Yields a [StoreAddr] which can be used to access
|
||||
/// the data stored in the pool
|
||||
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError>;
|
||||
/// The provider should attempt to reserve a free memory block with the appropriate size first.
|
||||
/// It then executes a user-provided closure and passes a mutable reference to that memory
|
||||
/// block to the closure. This allows the user to write data to the memory block.
|
||||
/// The function should yield a [StoreAddr] which can be used to access the data stored in the
|
||||
/// pool.
|
||||
fn free_element<W: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
len: usize,
|
||||
writer: W,
|
||||
) -> Result<StoreAddr, StoreError>;
|
||||
|
||||
/// Modify data added previously using a given [StoreAddr] by yielding a mutable reference
|
||||
/// to it
|
||||
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError>;
|
||||
/// Modify data added previously using a given [StoreAddr]. The provider should use the store
|
||||
/// address to determine if a memory block exists for that address. If it does, it should
|
||||
/// call the user-provided closure and pass a mutable reference to the memory block
|
||||
/// to the closure. This allows the user to modify the memory block.
|
||||
fn modify<U: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
addr: &StoreAddr,
|
||||
updater: U,
|
||||
) -> Result<(), StoreError>;
|
||||
|
||||
/// Read data by yielding a read-only reference given a [StoreAddr]
|
||||
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError>;
|
||||
/// The provider should copy the data from the memory block to the user-provided buffer if
|
||||
/// it exists.
|
||||
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError>;
|
||||
|
||||
/// Delete data inside the pool given a [StoreAddr]
|
||||
/// Delete data inside the pool given a [StoreAddr].
|
||||
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
|
||||
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
|
||||
|
||||
/// Retrieve the length of the data at the given store address.
|
||||
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
|
||||
if !self.has_element_at(addr)? {
|
||||
return Err(StoreError::DataDoesNotExist(*addr));
|
||||
}
|
||||
Ok(self.read(addr)?.len())
|
||||
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError>;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
fn read_as_vec(&self, addr: &StoreAddr) -> Result<alloc::vec::Vec<u8>, StoreError> {
|
||||
let mut vec = alloc::vec![0; self.len_of_data(addr)?];
|
||||
self.read(addr, &mut vec)?;
|
||||
Ok(vec)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
|
||||
/// This function behaves like [PoolProviderMemInPlace::read], but consumes the provided address
|
||||
pub trait PoolProviderWithGuards: PoolProvider {
|
||||
/// This function behaves like [PoolProvider::read], but consumes the provided address
|
||||
/// and returns a RAII conformant guard object.
|
||||
///
|
||||
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
|
||||
@ -231,7 +262,7 @@ pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
|
||||
/// manual deletion is necessary when returning from a processing function prematurely.
|
||||
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self>;
|
||||
|
||||
/// This function behaves like [PoolProviderMemInPlace::modify], but consumes the provided
|
||||
/// This function behaves like [PoolProvider::modify], but consumes the provided
|
||||
/// address and returns a RAII conformant guard object.
|
||||
///
|
||||
/// Unless the guard [PoolRwGuard::release] method is called, the data for the
|
||||
@ -242,7 +273,7 @@ pub trait PoolProviderMemInPlaceWithGuards: PoolProviderMemInPlace {
|
||||
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self>;
|
||||
}
|
||||
|
||||
pub struct PoolGuard<'a, MemProvider: PoolProviderMemInPlace + ?Sized> {
|
||||
pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> {
|
||||
pool: &'a mut MemProvider,
|
||||
pub addr: StoreAddr,
|
||||
no_deletion: bool,
|
||||
@ -250,7 +281,7 @@ pub struct PoolGuard<'a, MemProvider: PoolProviderMemInPlace + ?Sized> {
|
||||
}
|
||||
|
||||
/// This helper object
|
||||
impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
|
||||
impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> {
|
||||
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
|
||||
Self {
|
||||
pool,
|
||||
@ -260,8 +291,13 @@ impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read(&self) -> Result<&[u8], StoreError> {
|
||||
self.pool.read(&self.addr)
|
||||
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError> {
|
||||
self.pool.read(&self.addr, buf)
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub fn read_as_vec(&self) -> Result<alloc::vec::Vec<u8>, StoreError> {
|
||||
self.pool.read_as_vec(&self.addr)
|
||||
}
|
||||
|
||||
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
|
||||
@ -271,7 +307,7 @@ impl<'a, MemProvider: PoolProviderMemInPlace> PoolGuard<'a, MemProvider> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<MemProvider: PoolProviderMemInPlace + ?Sized> Drop for PoolGuard<'_, MemProvider> {
|
||||
impl<MemProvider: PoolProvider + ?Sized> Drop for PoolGuard<'_, MemProvider> {
|
||||
fn drop(&mut self) {
|
||||
if !self.no_deletion {
|
||||
if let Err(e) = self.pool.delete(self.addr) {
|
||||
@ -281,24 +317,24 @@ impl<MemProvider: PoolProviderMemInPlace + ?Sized> Drop for PoolGuard<'_, MemPro
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PoolRwGuard<'a, MemProvider: PoolProviderMemInPlace + ?Sized> {
|
||||
pub struct PoolRwGuard<'a, MemProvider: PoolProvider + ?Sized> {
|
||||
guard: PoolGuard<'a, MemProvider>,
|
||||
}
|
||||
|
||||
impl<'a, MemProvider: PoolProviderMemInPlace> PoolRwGuard<'a, MemProvider> {
|
||||
impl<'a, MemProvider: PoolProvider> PoolRwGuard<'a, MemProvider> {
|
||||
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
|
||||
Self {
|
||||
guard: PoolGuard::new(pool, addr),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn modify(&mut self) -> Result<&mut [u8], StoreError> {
|
||||
self.guard.pool.modify(&self.guard.addr)
|
||||
pub fn update<U: FnMut(&mut [u8])>(&mut self, updater: &mut U) -> Result<(), StoreError> {
|
||||
self.guard.pool.modify(&self.guard.addr, updater)
|
||||
}
|
||||
|
||||
delegate!(
|
||||
to self.guard {
|
||||
pub fn read(&self) -> Result<&[u8], StoreError>;
|
||||
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError>;
|
||||
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
|
||||
/// is dropped.
|
||||
pub fn release(&mut self);
|
||||
@ -308,13 +344,11 @@ impl<'a, MemProvider: PoolProviderMemInPlace> PoolRwGuard<'a, MemProvider> {
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
mod alloc_mod {
|
||||
use super::{
|
||||
PoolGuard, PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, PoolRwGuard,
|
||||
StaticPoolAddr,
|
||||
};
|
||||
use super::{PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticPoolAddr};
|
||||
use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError};
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use spacepackets::ByteConversionError;
|
||||
#[cfg(feature = "std")]
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
@ -476,7 +510,7 @@ mod alloc_mod {
|
||||
}
|
||||
}
|
||||
|
||||
impl PoolProviderMemInPlace for StaticMemoryPool {
|
||||
impl PoolProvider for StaticMemoryPool {
|
||||
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> {
|
||||
let data_len = data.len();
|
||||
if data_len > POOL_MAX_SIZE {
|
||||
@ -487,7 +521,11 @@ mod alloc_mod {
|
||||
Ok(addr.into())
|
||||
}
|
||||
|
||||
fn free_element(&mut self, len: usize) -> Result<(StoreAddr, &mut [u8]), StoreError> {
|
||||
fn free_element<W: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
len: usize,
|
||||
mut writer: W,
|
||||
) -> Result<StoreAddr, StoreError> {
|
||||
if len > POOL_MAX_SIZE {
|
||||
return Err(StoreError::DataTooLarge(len));
|
||||
}
|
||||
@ -495,25 +533,40 @@ mod alloc_mod {
|
||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||
let block =
|
||||
&mut self.pool.get_mut(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + len];
|
||||
Ok((addr.into(), block))
|
||||
writer(block);
|
||||
Ok(addr.into())
|
||||
}
|
||||
|
||||
fn modify(&mut self, addr: &StoreAddr) -> Result<&mut [u8], StoreError> {
|
||||
fn modify<U: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
addr: &StoreAddr,
|
||||
mut updater: U,
|
||||
) -> Result<(), StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
let curr_size = self.addr_check(&addr)?;
|
||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap()
|
||||
[raw_pos..raw_pos + curr_size];
|
||||
Ok(block)
|
||||
updater(block);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read(&self, addr: &StoreAddr) -> Result<&[u8], StoreError> {
|
||||
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
let curr_size = self.addr_check(&addr)?;
|
||||
if buf.len() < curr_size {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
found: buf.len(),
|
||||
expected: curr_size,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||
let block =
|
||||
&self.pool.get(addr.pool_idx as usize).unwrap()[raw_pos..raw_pos + curr_size];
|
||||
Ok(block)
|
||||
//block.copy_from_slice(&src);
|
||||
buf[..curr_size].copy_from_slice(block);
|
||||
Ok(curr_size)
|
||||
}
|
||||
|
||||
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> {
|
||||
@ -540,9 +593,21 @@ mod alloc_mod {
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
self.validate_addr(&addr)?;
|
||||
let pool_idx = addr.pool_idx as usize;
|
||||
let size_list = self.sizes_lists.get(pool_idx).unwrap();
|
||||
let size = size_list[addr.packet_idx as usize];
|
||||
Ok(match size {
|
||||
STORE_FREE => 0,
|
||||
_ => size,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PoolProviderMemInPlaceWithGuards for StaticMemoryPool {
|
||||
impl PoolProviderWithGuards for StaticMemoryPool {
|
||||
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self> {
|
||||
PoolRwGuard::new(self, addr)
|
||||
}
|
||||
@ -556,9 +621,8 @@ mod alloc_mod {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::pool::{
|
||||
PoolGuard, PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, PoolRwGuard,
|
||||
StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError,
|
||||
POOL_MAX_SIZE,
|
||||
PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticMemoryPool,
|
||||
StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError, POOL_MAX_SIZE,
|
||||
};
|
||||
use std::vec;
|
||||
|
||||
@ -594,13 +658,14 @@ mod tests {
|
||||
for (i, val) in test_buf.iter_mut().enumerate() {
|
||||
*val = i as u8;
|
||||
}
|
||||
let mut other_buf: [u8; 16] = [0; 16];
|
||||
let addr = local_pool.add(&test_buf).expect("Adding data failed");
|
||||
// Read back data and verify correctness
|
||||
let res = local_pool.read(&addr);
|
||||
let res = local_pool.read(&addr, &mut other_buf);
|
||||
assert!(res.is_ok());
|
||||
let buf_read_back = res.unwrap();
|
||||
assert_eq!(buf_read_back.len(), 16);
|
||||
for (i, &val) in buf_read_back.iter().enumerate() {
|
||||
let read_len = res.unwrap();
|
||||
assert_eq!(read_len, 16);
|
||||
for (i, &val) in other_buf.iter().enumerate() {
|
||||
assert_eq!(val, i as u8);
|
||||
}
|
||||
}
|
||||
@ -610,8 +675,10 @@ mod tests {
|
||||
let mut local_pool = basic_small_pool();
|
||||
let test_buf: [u8; 12] = [0; 12];
|
||||
let addr = local_pool.add(&test_buf).expect("Adding data failed");
|
||||
let res = local_pool.read(&addr).expect("Read back failed");
|
||||
assert_eq!(res.len(), 12);
|
||||
let res = local_pool
|
||||
.read(&addr, &mut [0; 12])
|
||||
.expect("Read back failed");
|
||||
assert_eq!(res, 12);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -622,10 +689,13 @@ mod tests {
|
||||
// Delete the data
|
||||
let res = local_pool.delete(addr);
|
||||
assert!(res.is_ok());
|
||||
let mut writer = |buf: &mut [u8]| {
|
||||
assert_eq!(buf.len(), 12);
|
||||
};
|
||||
// Verify that the slot is free by trying to get a reference to it
|
||||
let res = local_pool.free_element(12);
|
||||
let res = local_pool.free_element(12, &mut writer);
|
||||
assert!(res.is_ok());
|
||||
let (addr, buf_ref) = res.unwrap();
|
||||
let addr = res.unwrap();
|
||||
assert_eq!(
|
||||
addr,
|
||||
u64::from(StaticPoolAddr {
|
||||
@ -633,7 +703,6 @@ mod tests {
|
||||
packet_idx: 0
|
||||
})
|
||||
);
|
||||
assert_eq!(buf_ref.len(), 12);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -647,29 +716,34 @@ mod tests {
|
||||
|
||||
{
|
||||
// Verify that the slot is free by trying to get a reference to it
|
||||
let res = local_pool.modify(&addr).expect("Modifying data failed");
|
||||
res[0] = 0;
|
||||
res[1] = 0x42;
|
||||
local_pool
|
||||
.modify(&addr, &mut |buf: &mut [u8]| {
|
||||
buf[0] = 0;
|
||||
buf[1] = 0x42;
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
}
|
||||
|
||||
let res = local_pool.read(&addr).expect("Reading back data failed");
|
||||
assert_eq!(res[0], 0);
|
||||
assert_eq!(res[1], 0x42);
|
||||
assert_eq!(res[2], 2);
|
||||
assert_eq!(res[3], 3);
|
||||
local_pool
|
||||
.read(&addr, &mut test_buf)
|
||||
.expect("Reading back data failed");
|
||||
assert_eq!(test_buf[0], 0);
|
||||
assert_eq!(test_buf[1], 0x42);
|
||||
assert_eq!(test_buf[2], 2);
|
||||
assert_eq!(test_buf[3], 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_consecutive_reservation() {
|
||||
let mut local_pool = basic_small_pool();
|
||||
// Reserve two smaller blocks consecutively and verify that the third reservation fails
|
||||
let res = local_pool.free_element(8);
|
||||
let res = local_pool.free_element(8, |_| {});
|
||||
assert!(res.is_ok());
|
||||
let (addr0, _) = res.unwrap();
|
||||
let res = local_pool.free_element(8);
|
||||
let addr0 = res.unwrap();
|
||||
let res = local_pool.free_element(8, |_| {});
|
||||
assert!(res.is_ok());
|
||||
let (addr1, _) = res.unwrap();
|
||||
let res = local_pool.free_element(8);
|
||||
let addr1 = res.unwrap();
|
||||
let res = local_pool.free_element(8, |_| {});
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert_eq!(err, StoreError::StoreFull(1));
|
||||
@ -689,6 +763,7 @@ mod tests {
|
||||
pool_idx: 0,
|
||||
}
|
||||
.into(),
|
||||
&mut [],
|
||||
);
|
||||
assert!(res.is_err());
|
||||
assert!(matches!(
|
||||
@ -720,7 +795,7 @@ mod tests {
|
||||
packet_idx: 0,
|
||||
}
|
||||
.into();
|
||||
let res = local_pool.read(&addr);
|
||||
let res = local_pool.read(&addr, &mut []);
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert!(matches!(
|
||||
@ -737,7 +812,7 @@ mod tests {
|
||||
packet_idx: 1,
|
||||
};
|
||||
assert_eq!(addr.raw(), 0x00020001);
|
||||
let res = local_pool.read(&addr.into());
|
||||
let res = local_pool.read(&addr.into(), &mut []);
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert!(matches!(
|
||||
@ -759,7 +834,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_data_too_large_1() {
|
||||
let mut local_pool = basic_small_pool();
|
||||
let res = local_pool.free_element(POOL_MAX_SIZE + 1);
|
||||
let res = local_pool.free_element(POOL_MAX_SIZE + 1, |_| {});
|
||||
assert!(res.is_err());
|
||||
assert_eq!(
|
||||
res.unwrap_err(),
|
||||
@ -771,7 +846,7 @@ mod tests {
|
||||
fn test_free_element_too_large() {
|
||||
let mut local_pool = basic_small_pool();
|
||||
// Try to request a slot which is too large
|
||||
let res = local_pool.free_element(20);
|
||||
let res = local_pool.free_element(20, |_| {});
|
||||
assert!(res.is_err());
|
||||
assert_eq!(res.unwrap_err(), StoreError::DataTooLarge(20));
|
||||
}
|
||||
@ -813,7 +888,7 @@ mod tests {
|
||||
let test_buf: [u8; 16] = [0; 16];
|
||||
let addr = local_pool.add(&test_buf).expect("Adding data failed");
|
||||
let mut rw_guard = PoolRwGuard::new(&mut local_pool, addr);
|
||||
let _ = rw_guard.modify().expect("modify failed");
|
||||
rw_guard.update(&mut |_| {}).expect("modify failed");
|
||||
drop(rw_guard);
|
||||
assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
|
||||
}
|
||||
@ -824,7 +899,7 @@ mod tests {
|
||||
let test_buf: [u8; 16] = [0; 16];
|
||||
let addr = local_pool.add(&test_buf).expect("Adding data failed");
|
||||
let mut rw_guard = local_pool.modify_with_guard(addr);
|
||||
let _ = rw_guard.modify().expect("modify failed");
|
||||
rw_guard.update(&mut |_| {}).expect("modify failed");
|
||||
drop(rw_guard);
|
||||
assert!(!local_pool.has_element_at(&addr).expect("Invalid address"));
|
||||
}
|
||||
@ -840,13 +915,25 @@ mod tests {
|
||||
let addr1 = local_pool.add(&test_buf_1).expect("Adding data failed");
|
||||
let addr2 = local_pool.add(&test_buf_2).expect("Adding data failed");
|
||||
let addr3 = local_pool.add(&test_buf_3).expect("Adding data failed");
|
||||
let tm0_raw = local_pool.modify(&addr0).expect("Modifying data failed");
|
||||
assert_eq!(tm0_raw, test_buf_0);
|
||||
let tm1_raw = local_pool.modify(&addr1).expect("Modifying data failed");
|
||||
assert_eq!(tm1_raw, test_buf_1);
|
||||
let tm2_raw = local_pool.modify(&addr2).expect("Modifying data failed");
|
||||
assert_eq!(tm2_raw, test_buf_2);
|
||||
let tm3_raw = local_pool.modify(&addr3).expect("Modifying data failed");
|
||||
assert_eq!(tm3_raw, test_buf_3);
|
||||
local_pool
|
||||
.modify(&addr0, |buf| {
|
||||
assert_eq!(buf, test_buf_0);
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
local_pool
|
||||
.modify(&addr1, |buf| {
|
||||
assert_eq!(buf, test_buf_1);
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
local_pool
|
||||
.modify(&addr2, |buf| {
|
||||
assert_eq!(buf, test_buf_2);
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
local_pool
|
||||
.modify(&addr3, |buf| {
|
||||
assert_eq!(buf, test_buf_3);
|
||||
})
|
||||
.expect("Modifying data failed");
|
||||
}
|
||||
}
|
||||
|
@ -390,7 +390,7 @@ mod alloc_mod {
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub mod std_mod {
|
||||
use crate::pool::{PoolProviderMemInPlaceWithGuards, SharedStaticMemoryPool, StoreAddr};
|
||||
use crate::pool::{PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr};
|
||||
use crate::pus::verification::{
|
||||
StdVerifReporterWithSender, TcStateAccepted, VerificationToken,
|
||||
};
|
||||
@ -789,12 +789,15 @@ pub mod std_mod {
|
||||
.shared_tc_store
|
||||
.write()
|
||||
.map_err(|_| PusPacketHandlingError::EcssTmtc(EcssTmtcError::StoreLock))?;
|
||||
let tc_guard = tc_pool.read_with_guard(addr);
|
||||
let tc_raw = tc_guard.read().unwrap();
|
||||
if tc_raw.len() > self.pus_buf.len() {
|
||||
return Err(PusPacketHandlingError::PusPacketTooLarge(tc_raw.len()));
|
||||
let tc_size = tc_pool
|
||||
.len_of_data(&addr)
|
||||
.map_err(|e| PusPacketHandlingError::EcssTmtc(EcssTmtcError::Store(e)))?;
|
||||
if tc_size > self.pus_buf.len() {
|
||||
return Err(PusPacketHandlingError::PusPacketTooLarge(tc_size));
|
||||
}
|
||||
self.pus_buf[0..tc_raw.len()].copy_from_slice(tc_raw);
|
||||
let tc_guard = tc_pool.read_with_guard(addr);
|
||||
// TODO: Proper error handling.
|
||||
tc_guard.read(&mut self.pus_buf[0..tc_size]).unwrap();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -947,8 +950,7 @@ pub mod tests {
|
||||
use spacepackets::CcsdsPacket;
|
||||
|
||||
use crate::pool::{
|
||||
PoolProviderMemInPlace, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig,
|
||||
StoreAddr,
|
||||
PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig, StoreAddr,
|
||||
};
|
||||
use crate::pus::verification::RequestId;
|
||||
use crate::tmtc::tm_helper::SharedTmPool;
|
||||
@ -1078,8 +1080,8 @@ pub mod tests {
|
||||
assert!(next_msg.is_ok());
|
||||
let tm_addr = next_msg.unwrap();
|
||||
let tm_pool = self.tm_pool.0.read().unwrap();
|
||||
let tm_raw = tm_pool.read(&tm_addr).unwrap();
|
||||
self.tm_buf[0..tm_raw.len()].copy_from_slice(tm_raw);
|
||||
let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap();
|
||||
self.tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw);
|
||||
PusTmReader::new(&self.tm_buf, 7).unwrap().0
|
||||
}
|
||||
|
||||
@ -1096,8 +1098,8 @@ pub mod tests {
|
||||
assert!(next_msg.is_ok());
|
||||
let tm_addr = next_msg.unwrap();
|
||||
let tm_pool = self.tm_pool.0.read().unwrap();
|
||||
let tm_raw = tm_pool.read(&tm_addr).unwrap();
|
||||
let tm = PusTmReader::new(tm_raw, 7).unwrap().0;
|
||||
let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap();
|
||||
let tm = PusTmReader::new(&tm_raw, 7).unwrap().0;
|
||||
assert_eq!(PusPacket::service(&tm), 1);
|
||||
assert_eq!(PusPacket::subservice(&tm), subservice);
|
||||
assert_eq!(tm.apid(), TEST_APID);
|
||||
|
@ -16,7 +16,7 @@ use spacepackets::{ByteConversionError, CcsdsPacket};
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
|
||||
use crate::pool::{PoolProviderMemInPlace, StoreError};
|
||||
use crate::pool::{PoolProvider, StoreError};
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
|
||||
@ -241,10 +241,7 @@ impl Error for ScheduleError {
|
||||
pub trait PusSchedulerInterface {
|
||||
type TimeProvider: CcsdsTimeProvider + TimeReader;
|
||||
|
||||
fn reset(
|
||||
&mut self,
|
||||
store: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
) -> Result<(), StoreError>;
|
||||
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError>;
|
||||
|
||||
fn is_enabled(&self) -> bool;
|
||||
|
||||
@ -267,7 +264,7 @@ pub trait PusSchedulerInterface {
|
||||
fn insert_wrapped_tc<TimeProvider>(
|
||||
&mut self,
|
||||
pus_tc: &(impl IsPusTelecommand + PusPacket + GenericPusTcSecondaryHeader),
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
if PusPacket::service(pus_tc) != 11 {
|
||||
return Err(ScheduleError::WrongService(PusPacket::service(pus_tc)));
|
||||
@ -293,7 +290,7 @@ pub trait PusSchedulerInterface {
|
||||
&mut self,
|
||||
time_stamp: UnixTimestamp,
|
||||
tc: &[u8],
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
let check_tc = PusTcReader::new(tc)?;
|
||||
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
|
||||
@ -343,7 +340,7 @@ pub fn generate_insert_telecommand_app_data(
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod alloc_mod {
|
||||
use super::*;
|
||||
use crate::pool::{PoolProviderMemInPlace, StoreAddr, StoreError};
|
||||
use crate::pool::{PoolProvider, StoreAddr, StoreError};
|
||||
use alloc::collections::btree_map::{Entry, Range};
|
||||
use alloc::collections::BTreeMap;
|
||||
use alloc::vec;
|
||||
@ -379,7 +376,7 @@ pub mod alloc_mod {
|
||||
/// This is the core data structure for scheduling PUS telecommands with [alloc] support.
|
||||
///
|
||||
/// It is assumed that the actual telecommand data is stored in a separate TC pool offering
|
||||
/// a [crate::pool::PoolProviderMemInPlace] API. This data structure just tracks the store
|
||||
/// a [crate::pool::PoolProvider] API. This data structure just tracks the store
|
||||
/// addresses and their release times and offers a convenient API to insert and release
|
||||
/// telecommands and perform other functionality specified by the ECSS standard in section 6.11.
|
||||
/// The time is tracked as a [spacepackets::time::UnixTimestamp] but the only requirement to
|
||||
@ -413,6 +410,8 @@ pub mod alloc_mod {
|
||||
/// * `time_margin` - This time margin is used when inserting new telecommands into the
|
||||
/// schedule. If the release time of a new telecommand is earlier than the time margin
|
||||
/// added to the current time, it will not be inserted into the schedule.
|
||||
/// * `tc_buf_size` - Buffer for temporary storage of telecommand packets. This buffer
|
||||
/// should be large enough to accomodate the largest expected TC packets.
|
||||
pub fn new(init_current_time: UnixTimestamp, time_margin: Duration) -> Self {
|
||||
PusScheduler {
|
||||
tc_map: Default::default(),
|
||||
@ -476,7 +475,7 @@ pub mod alloc_mod {
|
||||
&mut self,
|
||||
time_stamp: UnixTimestamp,
|
||||
tc: &[u8],
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
let check_tc = PusTcReader::new(tc)?;
|
||||
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
|
||||
@ -499,7 +498,7 @@ pub mod alloc_mod {
|
||||
pub fn insert_wrapped_tc_cds_short(
|
||||
&mut self,
|
||||
pus_tc: &PusTc,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
self.insert_wrapped_tc::<cds::TimeProvider>(pus_tc, pool)
|
||||
}
|
||||
@ -509,7 +508,7 @@ pub mod alloc_mod {
|
||||
pub fn insert_wrapped_tc_cds_long(
|
||||
&mut self,
|
||||
pus_tc: &PusTc,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<TcInfo, ScheduleError> {
|
||||
self.insert_wrapped_tc::<cds::TimeProvider<DaysLen24Bits>>(pus_tc, pool)
|
||||
}
|
||||
@ -525,7 +524,7 @@ pub mod alloc_mod {
|
||||
pub fn delete_by_time_filter<TimeProvider: CcsdsTimeProvider + Clone>(
|
||||
&mut self,
|
||||
time_window: TimeWindow<TimeProvider>,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
let range = self.retrieve_by_time_filter(time_window);
|
||||
let mut del_packets = 0;
|
||||
@ -555,7 +554,7 @@ pub mod alloc_mod {
|
||||
/// the last deletion will be supplied in addition to the number of deleted commands.
|
||||
pub fn delete_all(
|
||||
&mut self,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
self.delete_by_time_filter(TimeWindow::<cds::TimeProvider>::new_select_all(), pool)
|
||||
}
|
||||
@ -613,7 +612,7 @@ pub mod alloc_mod {
|
||||
pub fn delete_by_request_id_and_from_pool(
|
||||
&mut self,
|
||||
req_id: &RequestId,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<bool, StoreError> {
|
||||
if let DeletionResult::WithStoreDeletion(v) =
|
||||
self.delete_by_request_id_internal_with_store_deletion(req_id, pool)
|
||||
@ -645,7 +644,7 @@ pub mod alloc_mod {
|
||||
fn delete_by_request_id_internal_with_store_deletion(
|
||||
&mut self,
|
||||
req_id: &RequestId,
|
||||
pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> DeletionResult {
|
||||
let mut idx_found = None;
|
||||
for time_bucket in &mut self.tc_map {
|
||||
@ -675,7 +674,8 @@ pub mod alloc_mod {
|
||||
/// Utility method which calls [Self::telecommands_to_release] and then calls a releaser
|
||||
/// closure for each telecommand which should be released. This function will also delete
|
||||
/// the telecommands from the holding store after calling the release closure if the user
|
||||
/// returns [true] from the release closure.
|
||||
/// returns [true] from the release closure. A buffer must be provided to hold the
|
||||
/// telecommands for the release process.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
@ -685,18 +685,55 @@ pub mod alloc_mod {
|
||||
/// note that returning false might lead to memory leaks if the TC is not cleared from
|
||||
/// the store in some other way.
|
||||
/// * `tc_store` - The holding store of the telecommands.
|
||||
/// * `tc_buf` - Buffer to hold each telecommand being released.
|
||||
pub fn release_telecommands_with_buffer<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
|
||||
&mut self,
|
||||
releaser: R,
|
||||
tc_store: &mut (impl PoolProvider + ?Sized),
|
||||
tc_buf: &mut [u8],
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
self.release_telecommands_internal(releaser, tc_store, Some(tc_buf))
|
||||
}
|
||||
|
||||
/// This functions is almost identical to [Self::release_telecommands_with_buffer] but does
|
||||
/// not require a user provided TC buffer because it will always use the
|
||||
/// [PoolProvider::read_as_vec] API to read the TC packets.
|
||||
///
|
||||
/// However, this might also perform frequent allocations for all telecommands being
|
||||
/// released.
|
||||
pub fn release_telecommands<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
|
||||
&mut self,
|
||||
releaser: R,
|
||||
tc_store: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
self.release_telecommands_internal(releaser, tc_store, None)
|
||||
}
|
||||
|
||||
fn release_telecommands_internal<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
|
||||
&mut self,
|
||||
mut releaser: R,
|
||||
tc_store: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
tc_store: &mut (impl PoolProvider + ?Sized),
|
||||
mut tc_buf: Option<&mut [u8]>,
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
let tcs_to_release = self.telecommands_to_release();
|
||||
let mut released_tcs = 0;
|
||||
let mut store_error = Ok(());
|
||||
for tc in tcs_to_release {
|
||||
for info in tc.1 {
|
||||
let tc = tc_store.read(&info.addr).map_err(|e| (released_tcs, e))?;
|
||||
let should_delete = releaser(self.enabled, info, tc);
|
||||
let should_delete = match tc_buf.as_mut() {
|
||||
Some(buf) => {
|
||||
tc_store
|
||||
.read(&info.addr, buf)
|
||||
.map_err(|e| (released_tcs, e))?;
|
||||
releaser(self.enabled, info, buf)
|
||||
}
|
||||
None => {
|
||||
let tc = tc_store
|
||||
.read_as_vec(&info.addr)
|
||||
.map_err(|e| (released_tcs, e))?;
|
||||
releaser(self.enabled, info, &tc)
|
||||
}
|
||||
};
|
||||
released_tcs += 1;
|
||||
if should_delete {
|
||||
let res = tc_store.delete(info.addr);
|
||||
@ -721,16 +758,17 @@ pub mod alloc_mod {
|
||||
pub fn release_telecommands_no_deletion<R: FnMut(bool, &TcInfo, &[u8])>(
|
||||
&mut self,
|
||||
mut releaser: R,
|
||||
tc_store: &(impl PoolProviderMemInPlace + ?Sized),
|
||||
tc_store: &(impl PoolProvider + ?Sized),
|
||||
tc_buf: &mut [u8],
|
||||
) -> Result<Vec<TcInfo>, (Vec<TcInfo>, StoreError)> {
|
||||
let tcs_to_release = self.telecommands_to_release();
|
||||
let mut released_tcs = Vec::new();
|
||||
for tc in tcs_to_release {
|
||||
for info in tc.1 {
|
||||
let tc = tc_store
|
||||
.read(&info.addr)
|
||||
tc_store
|
||||
.read(&info.addr, tc_buf)
|
||||
.map_err(|e| (released_tcs.clone(), e))?;
|
||||
releaser(self.is_enabled(), info, tc);
|
||||
releaser(self.is_enabled(), info, tc_buf);
|
||||
released_tcs.push(*info);
|
||||
}
|
||||
}
|
||||
@ -753,10 +791,7 @@ pub mod alloc_mod {
|
||||
/// The holding store for the telecommands needs to be passed so all the stored telecommands
|
||||
/// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error
|
||||
/// will be returned but the method will still try to delete all the commands in the schedule.
|
||||
fn reset(
|
||||
&mut self,
|
||||
store: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
) -> Result<(), StoreError> {
|
||||
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError> {
|
||||
self.enabled = false;
|
||||
let mut deletion_ok = Ok(());
|
||||
for tc_lists in &mut self.tc_map {
|
||||
@ -814,8 +849,7 @@ pub mod alloc_mod {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::pool::{
|
||||
PoolProviderMemInPlace, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreAddr,
|
||||
StoreError,
|
||||
PoolProvider, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreAddr, StoreError,
|
||||
};
|
||||
use alloc::collections::btree_map::Range;
|
||||
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
|
||||
@ -1088,8 +1122,9 @@ mod tests {
|
||||
// test 1: too early, no tcs
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
|
||||
|
||||
let mut tc_buf: [u8; 128] = [0; 128];
|
||||
scheduler
|
||||
.release_telecommands(&mut test_closure_1, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure_1, &mut pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
|
||||
// test 2: exact time stamp of tc, releases 1 tc
|
||||
@ -1111,7 +1146,7 @@ mod tests {
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(206));
|
||||
|
||||
released = scheduler
|
||||
.release_telecommands(&mut test_closure_2, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure_2, &mut pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
assert_eq!(released, 1);
|
||||
// TC is deleted.
|
||||
@ -1157,9 +1192,10 @@ mod tests {
|
||||
|
||||
// test 1: too early, no tcs
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
|
||||
let mut tc_buf: [u8; 128] = [0; 128];
|
||||
|
||||
let mut released = scheduler
|
||||
.release_telecommands(&mut test_closure, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure, &mut pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
assert_eq!(released, 0);
|
||||
|
||||
@ -1209,11 +1245,13 @@ mod tests {
|
||||
true
|
||||
};
|
||||
|
||||
let mut tc_buf: [u8; 128] = [0; 128];
|
||||
|
||||
// test 1: too early, no tcs
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(99));
|
||||
|
||||
scheduler
|
||||
.release_telecommands(&mut test_closure_1, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure_1, &mut pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
|
||||
// test 2: exact time stamp of tc, releases 1 tc
|
||||
@ -1267,8 +1305,9 @@ mod tests {
|
||||
|
||||
assert!(pool.has_element_at(&tc_info_0.addr()).unwrap());
|
||||
|
||||
let data = pool.read(&tc_info_0.addr()).unwrap();
|
||||
let check_tc = PusTcReader::new(data).expect("incorrect Pus tc raw data");
|
||||
let mut read_buf: [u8; 64] = [0; 64];
|
||||
pool.read(&tc_info_0.addr(), &mut read_buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
|
||||
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
|
||||
@ -1289,8 +1328,9 @@ mod tests {
|
||||
.release_telecommands(&mut test_closure, &mut pool)
|
||||
.unwrap();
|
||||
|
||||
let data = pool.read(&addr_vec[0]).unwrap();
|
||||
let check_tc = PusTcReader::new(data).expect("incorrect Pus tc raw data");
|
||||
let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
|
||||
assert_eq!(read_len, check_tc.1);
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
}
|
||||
|
||||
@ -1313,8 +1353,9 @@ mod tests {
|
||||
|
||||
assert!(pool.has_element_at(&info.addr).unwrap());
|
||||
|
||||
let data = pool.read(&info.addr).unwrap();
|
||||
let check_tc = PusTcReader::new(data).expect("incorrect Pus tc raw data");
|
||||
let read_len = pool.read(&info.addr, &mut buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data");
|
||||
assert_eq!(read_len, check_tc.1);
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
|
||||
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
|
||||
@ -1331,12 +1372,15 @@ mod tests {
|
||||
false
|
||||
};
|
||||
|
||||
let mut tc_buf: [u8; 64] = [0; 64];
|
||||
|
||||
scheduler
|
||||
.release_telecommands(&mut test_closure, &mut pool)
|
||||
.release_telecommands_with_buffer(&mut test_closure, &mut pool, &mut tc_buf)
|
||||
.unwrap();
|
||||
|
||||
let data = pool.read(&addr_vec[0]).unwrap();
|
||||
let check_tc = PusTcReader::new(data).expect("incorrect PUS tc raw data");
|
||||
let read_len = pool.read(&addr_vec[0], &mut buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data");
|
||||
assert_eq!(read_len, check_tc.1);
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
}
|
||||
|
||||
@ -1903,8 +1947,9 @@ mod tests {
|
||||
|
||||
scheduler.update_time(UnixTimestamp::new_only_seconds(205));
|
||||
|
||||
let mut tc_buf: [u8; 64] = [0; 64];
|
||||
let tc_info_vec = scheduler
|
||||
.release_telecommands_no_deletion(&mut test_closure_1, &pool)
|
||||
.release_telecommands_no_deletion(&mut test_closure_1, &pool, &mut tc_buf)
|
||||
.expect("deletion failed");
|
||||
assert_eq!(tc_info_vec[0], tc_info_0);
|
||||
assert_eq!(tc_info_vec[1], tc_info_1);
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::scheduler::PusSchedulerInterface;
|
||||
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
|
||||
use crate::pool::PoolProviderMemInPlace;
|
||||
use crate::pool::PoolProvider;
|
||||
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
|
||||
use alloc::string::ToString;
|
||||
use spacepackets::ecss::{scheduling, PusPacket};
|
||||
@ -42,7 +42,7 @@ impl<TcInMemConverter: EcssTcInMemConverter, Scheduler: PusSchedulerInterface>
|
||||
|
||||
pub fn handle_one_tc(
|
||||
&mut self,
|
||||
sched_tc_pool: &mut (impl PoolProviderMemInPlace + ?Sized),
|
||||
sched_tc_pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
|
||||
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
|
||||
if possible_packet.is_none() {
|
||||
@ -237,7 +237,7 @@ mod tests {
|
||||
|
||||
fn reset(
|
||||
&mut self,
|
||||
_store: &mut (impl crate::pool::PoolProviderMemInPlace + ?Sized),
|
||||
_store: &mut (impl crate::pool::PoolProvider + ?Sized),
|
||||
) -> Result<(), crate::pool::StoreError> {
|
||||
self.reset_count += 1;
|
||||
Ok(())
|
||||
|
@ -15,7 +15,7 @@
|
||||
//! ```
|
||||
//! use std::sync::{Arc, mpsc, RwLock};
|
||||
//! use std::time::Duration;
|
||||
//! use satrs_core::pool::{PoolProviderMemInPlaceWithGuards, StaticMemoryPool, StaticPoolConfig};
|
||||
//! use satrs_core::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
|
||||
//! use satrs_core::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender};
|
||||
//! use satrs_core::seq_count::SeqCountProviderSimple;
|
||||
//! use satrs_core::pus::MpscTmInSharedPoolSender;
|
||||
@ -56,9 +56,7 @@
|
||||
//! {
|
||||
//! let mut rg = tm_store.write().expect("Error locking shared pool");
|
||||
//! let store_guard = rg.read_with_guard(addr);
|
||||
//! let slice = store_guard.read().expect("Error reading TM slice");
|
||||
//! tm_len = slice.len();
|
||||
//! tm_buf[0..tm_len].copy_from_slice(slice);
|
||||
//! tm_len = store_guard.read(&mut tm_buf).expect("Error reading TM slice");
|
||||
//! }
|
||||
//! let (pus_tm, _) = PusTmReader::new(&tm_buf[0..tm_len], 7)
|
||||
//! .expect("Error reading verification TM");
|
||||
@ -1325,7 +1323,7 @@ mod std_mod {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::pool::{PoolProviderMemInPlaceWithGuards, StaticMemoryPool, StaticPoolConfig};
|
||||
use crate::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
|
||||
use crate::pus::tests::CommonTmInfo;
|
||||
use crate::pus::verification::{
|
||||
EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone,
|
||||
@ -1547,7 +1545,7 @@ mod tests {
|
||||
let mut sender = TestSender::default();
|
||||
let fail_code = EcssEnumU16::new(2);
|
||||
let fail_params = FailParams::new(Some(stamp_buf.as_slice()), &fail_code, None);
|
||||
b.vr.acceptance_failure(tok, &mut sender, fail_params)
|
||||
b.vr.acceptance_failure(tok, &sender, fail_params)
|
||||
.expect("Sending acceptance success failed");
|
||||
acceptance_fail_check(&mut sender, tok.req_id, stamp_buf);
|
||||
}
|
||||
@ -1682,12 +1680,10 @@ mod tests {
|
||||
);
|
||||
|
||||
let accepted_token =
|
||||
b.vr.acceptance_success(tok, &mut sender, Some(&EMPTY_STAMP))
|
||||
b.vr.acceptance_success(tok, &sender, Some(&EMPTY_STAMP))
|
||||
.expect("Sending acceptance success failed");
|
||||
let empty =
|
||||
b.vr.start_failure(accepted_token, &mut sender, fail_params)
|
||||
.expect("Start failure failure");
|
||||
assert_eq!(empty, ());
|
||||
b.vr.start_failure(accepted_token, &mut sender, fail_params)
|
||||
.expect("Start failure failure");
|
||||
start_fail_check(&mut sender, tok.req_id, fail_data_raw);
|
||||
}
|
||||
|
||||
@ -1779,23 +1775,23 @@ mod tests {
|
||||
let mut sender = TestSender::default();
|
||||
let accepted_token = b
|
||||
.rep()
|
||||
.acceptance_success(tok, &mut sender, Some(&EMPTY_STAMP))
|
||||
.acceptance_success(tok, &sender, Some(&EMPTY_STAMP))
|
||||
.expect("Sending acceptance success failed");
|
||||
let started_token = b
|
||||
.rep()
|
||||
.start_success(accepted_token, &mut sender, Some(&[0, 1, 0, 1, 0, 1, 0]))
|
||||
.start_success(accepted_token, &sender, Some(&[0, 1, 0, 1, 0, 1, 0]))
|
||||
.expect("Sending start success failed");
|
||||
b.rep()
|
||||
.step_success(
|
||||
&started_token,
|
||||
&mut sender,
|
||||
&sender,
|
||||
Some(&EMPTY_STAMP),
|
||||
EcssEnumU8::new(0),
|
||||
)
|
||||
.expect("Sending step 0 success failed");
|
||||
b.vr.step_success(
|
||||
&started_token,
|
||||
&mut sender,
|
||||
&sender,
|
||||
Some(&EMPTY_STAMP),
|
||||
EcssEnumU8::new(1),
|
||||
)
|
||||
@ -2176,9 +2172,9 @@ mod tests {
|
||||
{
|
||||
let mut rg = shared_tm_pool.write().expect("Error locking shared pool");
|
||||
let store_guard = rg.read_with_guard(addr);
|
||||
let slice = store_guard.read().expect("Error reading TM slice");
|
||||
tm_len = slice.len();
|
||||
tm_buf[0..tm_len].copy_from_slice(slice);
|
||||
tm_len = store_guard
|
||||
.read(&mut tm_buf)
|
||||
.expect("Error reading TM slice");
|
||||
}
|
||||
let (pus_tm, _) =
|
||||
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");
|
||||
|
@ -8,9 +8,7 @@ pub use std_mod::*;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod std_mod {
|
||||
use crate::pool::{
|
||||
PoolProviderMemInPlace, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr,
|
||||
};
|
||||
use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr};
|
||||
use crate::pus::EcssTmtcError;
|
||||
use spacepackets::ecss::tm::PusTmCreator;
|
||||
use spacepackets::ecss::WritablePusPacket;
|
||||
@ -37,10 +35,11 @@ pub mod std_mod {
|
||||
|
||||
pub fn add_pus_tm(&self, pus_tm: &PusTmCreator) -> Result<StoreAddr, EcssTmtcError> {
|
||||
let mut pg = self.0.write().map_err(|_| EcssTmtcError::StoreLock)?;
|
||||
let (addr, buf) = pg.free_element(pus_tm.len_written())?;
|
||||
pus_tm
|
||||
.write_to_bytes(buf)
|
||||
.expect("writing PUS TM to store failed");
|
||||
let addr = pg.free_element(pus_tm.len_written(), |buf| {
|
||||
pus_tm
|
||||
.write_to_bytes(buf)
|
||||
.expect("writing PUS TM to store failed");
|
||||
})?;
|
||||
Ok(addr)
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,4 @@
|
||||
use satrs_core::pool::{
|
||||
PoolGuard, PoolProviderMemInPlace, StaticMemoryPool, StaticPoolConfig, StoreAddr,
|
||||
};
|
||||
use satrs_core::pool::{PoolGuard, PoolProvider, StaticMemoryPool, StaticPoolConfig, StoreAddr};
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
@ -27,8 +25,10 @@ fn threaded_usage() {
|
||||
addr = rx.recv().expect("Receiving store address failed");
|
||||
let mut pool_access = shared_clone.write().unwrap();
|
||||
let pg = PoolGuard::new(pool_access.deref_mut(), addr);
|
||||
let read_res = pg.read().expect("Reading failed");
|
||||
assert_eq!(read_res, DUMMY_DATA);
|
||||
let mut read_buf: [u8; 4] = [0; 4];
|
||||
let read_bytes = pg.read(&mut read_buf).expect("Reading failed");
|
||||
assert_eq!(read_buf, DUMMY_DATA);
|
||||
assert_eq!(read_bytes, 4);
|
||||
}
|
||||
let pool_access = shared_clone.read().unwrap();
|
||||
assert!(!pool_access.has_element_at(&addr).expect("Invalid address"));
|
||||
|
@ -2,8 +2,7 @@
|
||||
pub mod crossbeam_test {
|
||||
use hashbrown::HashMap;
|
||||
use satrs_core::pool::{
|
||||
PoolProviderMemInPlace, PoolProviderMemInPlaceWithGuards, StaticMemoryPool,
|
||||
StaticPoolConfig,
|
||||
PoolProvider, PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig,
|
||||
};
|
||||
use satrs_core::pus::verification::{
|
||||
FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender,
|
||||
@ -59,15 +58,21 @@ pub mod crossbeam_test {
|
||||
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
|
||||
let pus_tc_0 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true);
|
||||
req_id_0 = RequestId::new(&pus_tc_0);
|
||||
let (addr, buf) = tc_guard.free_element(pus_tc_0.len_written()).unwrap();
|
||||
pus_tc_0.write_to_bytes(buf).unwrap();
|
||||
let addr = tc_guard
|
||||
.free_element(pus_tc_0.len_written(), |buf| {
|
||||
pus_tc_0.write_to_bytes(buf).unwrap();
|
||||
})
|
||||
.unwrap();
|
||||
tx_tc_0.send(addr).unwrap();
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID, 1, 0).unwrap();
|
||||
let tc_header = PusTcSecondaryHeader::new_simple(5, 1);
|
||||
let pus_tc_1 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true);
|
||||
req_id_1 = RequestId::new(&pus_tc_1);
|
||||
let (addr, buf) = tc_guard.free_element(pus_tc_0.len_written()).unwrap();
|
||||
pus_tc_1.write_to_bytes(buf).unwrap();
|
||||
let addr = tc_guard
|
||||
.free_element(pus_tc_0.len_written(), |buf| {
|
||||
pus_tc_1.write_to_bytes(buf).unwrap();
|
||||
})
|
||||
.unwrap();
|
||||
tx_tc_1.send(addr).unwrap();
|
||||
}
|
||||
let verif_sender_0 = thread::spawn(move || {
|
||||
@ -79,9 +84,7 @@ pub mod crossbeam_test {
|
||||
{
|
||||
let mut tc_guard = shared_tc_pool_0.write().unwrap();
|
||||
let pg = tc_guard.read_with_guard(tc_addr);
|
||||
let buf = pg.read().unwrap();
|
||||
tc_len = buf.len();
|
||||
tc_buf[0..tc_len].copy_from_slice(buf);
|
||||
tc_len = pg.read(&mut tc_buf).unwrap();
|
||||
}
|
||||
let (_tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
|
||||
|
||||
@ -117,9 +120,7 @@ pub mod crossbeam_test {
|
||||
{
|
||||
let mut tc_guard = shared_tc_pool_1.write().unwrap();
|
||||
let pg = tc_guard.read_with_guard(tc_addr);
|
||||
let buf = pg.read().unwrap();
|
||||
tc_len = buf.len();
|
||||
tc_buf[0..tc_len].copy_from_slice(buf);
|
||||
tc_len = pg.read(&mut tc_buf).unwrap();
|
||||
}
|
||||
let (tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
|
||||
let token = reporter_with_sender_1.add_tc(&tc);
|
||||
@ -149,9 +150,9 @@ pub mod crossbeam_test {
|
||||
{
|
||||
let mut rg = shared_tm_store.write().expect("Error locking shared pool");
|
||||
let store_guard = rg.read_with_guard(verif_addr);
|
||||
let slice = store_guard.read().expect("Error reading TM slice");
|
||||
tm_len = slice.len();
|
||||
tm_buf[0..tm_len].copy_from_slice(slice);
|
||||
tm_len = store_guard
|
||||
.read(&mut tm_buf)
|
||||
.expect("Error reading TM slice");
|
||||
}
|
||||
let (pus_tm, _) =
|
||||
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");
|
||||
|
@ -2,7 +2,7 @@ use std::sync::mpsc;
|
||||
use std::time::Duration;
|
||||
|
||||
use log::{error, info, warn};
|
||||
use satrs_core::pool::{PoolProviderMemInPlace, StaticMemoryPool, StoreAddr};
|
||||
use satrs_core::pool::{PoolProvider, StaticMemoryPool, StoreAddr};
|
||||
use satrs_core::pus::scheduler::{PusScheduler, TcInfo};
|
||||
use satrs_core::pus::scheduler_srv::PusService11SchedHandler;
|
||||
use satrs_core::pus::verification::VerificationReporterWithSender;
|
||||
@ -54,6 +54,7 @@ impl TcReleaser for mpsc::Sender<Vec<u8>> {
|
||||
pub struct Pus11Wrapper<TcInMemConverter: EcssTcInMemConverter> {
|
||||
pub pus_11_handler: PusService11SchedHandler<TcInMemConverter, PusScheduler>,
|
||||
pub sched_tc_pool: StaticMemoryPool,
|
||||
pub releaser_buf: [u8; 4096],
|
||||
pub tc_releaser: Box<dyn TcReleaser + Send>,
|
||||
}
|
||||
|
||||
@ -70,7 +71,11 @@ impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> {
|
||||
let released_tcs = self
|
||||
.pus_11_handler
|
||||
.scheduler_mut()
|
||||
.release_telecommands(releaser, &mut self.sched_tc_pool)
|
||||
.release_telecommands_with_buffer(
|
||||
releaser,
|
||||
&mut self.sched_tc_pool,
|
||||
&mut self.releaser_buf,
|
||||
)
|
||||
.expect("releasing TCs failed");
|
||||
if released_tcs > 0 {
|
||||
info!("{released_tcs} TC(s) released from scheduler");
|
||||
@ -136,6 +141,7 @@ pub fn create_scheduler_service_static(
|
||||
Pus11Wrapper {
|
||||
pus_11_handler,
|
||||
sched_tc_pool,
|
||||
releaser_buf: [0; 4096],
|
||||
tc_releaser: Box::new(tc_releaser),
|
||||
}
|
||||
}
|
||||
@ -172,6 +178,7 @@ pub fn create_scheduler_service_dynamic(
|
||||
Pus11Wrapper {
|
||||
pus_11_handler,
|
||||
sched_tc_pool,
|
||||
releaser_buf: [0; 4096],
|
||||
tc_releaser: Box::new(tc_source_sender),
|
||||
}
|
||||
}
|
||||
|
@ -3,8 +3,9 @@ use std::{
|
||||
sync::mpsc::{Receiver, Sender},
|
||||
};
|
||||
|
||||
use log::info;
|
||||
use satrs_core::{
|
||||
pool::{PoolProviderMemInPlace, StoreAddr},
|
||||
pool::{PoolProvider, StoreAddr},
|
||||
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
|
||||
spacepackets::{
|
||||
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
|
||||
@ -63,9 +64,14 @@ impl TmFunnelCommon {
|
||||
*entry += 1;
|
||||
}
|
||||
|
||||
Self::packet_printout(&zero_copy_writer);
|
||||
// This operation has to come last!
|
||||
zero_copy_writer.finish();
|
||||
}
|
||||
|
||||
fn packet_printout(tm: &PusTmZeroCopyWriter) {
|
||||
info!("Sending PUS TM[{},{}]", tm.service(), tm.subservice());
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TmFunnelStatic {
|
||||
@ -96,16 +102,21 @@ impl TmFunnelStatic {
|
||||
// the CRC.
|
||||
let shared_pool = self.shared_tm_store.clone_backing_pool();
|
||||
let mut pool_guard = shared_pool.write().expect("Locking TM pool failed");
|
||||
let tm_raw = pool_guard
|
||||
.modify(&addr)
|
||||
let mut tm_copy = Vec::new();
|
||||
pool_guard
|
||||
.modify(&addr, |buf| {
|
||||
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN)
|
||||
.expect("Creating TM zero copy writer failed");
|
||||
self.common.apply_packet_processing(zero_copy_writer);
|
||||
tm_copy = buf.to_vec()
|
||||
})
|
||||
.expect("Reading TM from pool failed");
|
||||
let zero_copy_writer = PusTmZeroCopyWriter::new(tm_raw, MIN_CDS_FIELD_LEN)
|
||||
.expect("Creating TM zero copy writer failed");
|
||||
self.common.apply_packet_processing(zero_copy_writer);
|
||||
self.tm_server_tx
|
||||
.send(addr)
|
||||
.expect("Sending TM to server failed");
|
||||
self.common.sync_tm_tcp_source.add_tm(tm_raw);
|
||||
// We could also do this step in the update closure, but I'd rather avoid this, could
|
||||
// lead to nested locking.
|
||||
self.common.sync_tm_tcp_source.add_tm(&tm_copy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ use std::sync::mpsc::{self, Receiver, SendError, Sender, TryRecvError};
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::pus::PusReceiver;
|
||||
use satrs_core::pool::{PoolProviderMemInPlace, SharedStaticMemoryPool, StoreAddr, StoreError};
|
||||
use satrs_core::pool::{PoolProvider, SharedStaticMemoryPool, StoreAddr, StoreError};
|
||||
use satrs_core::spacepackets::ecss::tc::PusTcReader;
|
||||
use satrs_core::spacepackets::ecss::PusPacket;
|
||||
use satrs_core::tmtc::ReceivesCcsdsTc;
|
||||
@ -28,8 +28,9 @@ pub struct SharedTcPool {
|
||||
impl SharedTcPool {
|
||||
pub fn add_pus_tc(&mut self, pus_tc: &PusTcReader) -> Result<StoreAddr, StoreError> {
|
||||
let mut pg = self.pool.write().expect("error locking TC store");
|
||||
let (addr, buf) = pg.free_element(pus_tc.len_packed())?;
|
||||
buf[0..pus_tc.len_packed()].copy_from_slice(pus_tc.raw_data());
|
||||
let addr = pg.free_element(pus_tc.len_packed(), |buf| {
|
||||
buf[0..pus_tc.len_packed()].copy_from_slice(pus_tc.raw_data());
|
||||
})?;
|
||||
Ok(addr)
|
||||
}
|
||||
}
|
||||
@ -125,8 +126,8 @@ impl TcSourceTaskStatic {
|
||||
.pool
|
||||
.read()
|
||||
.expect("locking tc pool failed");
|
||||
let data = pool.read(&addr).expect("reading pool failed");
|
||||
self.tc_buf[0..data.len()].copy_from_slice(data);
|
||||
pool.read(&addr, &mut self.tc_buf)
|
||||
.expect("reading pool failed");
|
||||
drop(pool);
|
||||
match PusTcReader::new(&self.tc_buf) {
|
||||
Ok((pus_tc, _)) => {
|
||||
|
@ -6,7 +6,7 @@ use std::{
|
||||
use log::{info, warn};
|
||||
use satrs_core::{
|
||||
hal::std::udp_server::{ReceiveResult, UdpTcServer},
|
||||
pool::{PoolProviderMemInPlaceWithGuards, SharedStaticMemoryPool, StoreAddr},
|
||||
pool::{PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr},
|
||||
tmtc::CcsdsError,
|
||||
};
|
||||
|
||||
@ -29,20 +29,13 @@ impl UdpTmHandler for StaticUdpTmHandler {
|
||||
}
|
||||
let mut store_lock = store_lock.unwrap();
|
||||
let pg = store_lock.read_with_guard(addr);
|
||||
let read_res = pg.read();
|
||||
let read_res = pg.read_as_vec();
|
||||
if read_res.is_err() {
|
||||
warn!("Error reading TM pool data");
|
||||
continue;
|
||||
}
|
||||
let buf = read_res.unwrap();
|
||||
if buf.len() > 9 {
|
||||
let service = buf[7];
|
||||
let subservice = buf[8];
|
||||
info!("Sending PUS TM[{service},{subservice}]")
|
||||
} else {
|
||||
info!("Sending PUS TM");
|
||||
}
|
||||
let result = socket.send_to(buf, recv_addr);
|
||||
let result = socket.send_to(&buf, recv_addr);
|
||||
if let Err(e) = result {
|
||||
warn!("Sending TM with UDP socket failed: {e}")
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user