From df2733a176ef64b319ceb764e01eb2d9efa013ad Mon Sep 17 00:00:00 2001 From: Robin Mueller Date: Thu, 4 Apr 2024 15:18:53 +0200 Subject: [PATCH] Major refactoring and update of PUS module --- .gitignore | 1 + README.md | 9 +- coverage.py | 2 +- misc/satrs-logo-v2.png | Bin 0 -> 49931 bytes satrs-example/Cargo.toml | 9 +- satrs-example/satrs-tmtc/common.py | 13 +- satrs-example/satrs-tmtc/main.py | 26 +- satrs-example/satrs-tmtc/pus_tc.py | 94 +- satrs-example/src/acs.rs | 118 - satrs-example/src/acs/mgm.rs | 284 +++ satrs-example/src/acs/mod.rs | 1 + satrs-example/src/bin/simpleclient.rs | 3 +- satrs-example/src/ccsds.rs | 27 +- satrs-example/src/config.rs | 99 +- satrs-example/src/events.rs | 106 +- satrs-example/src/hk.rs | 18 +- satrs-example/src/lib.rs | 38 + satrs-example/src/main.rs | 277 ++- satrs-example/src/pus/action.rs | 756 +++++- satrs-example/src/pus/event.rs | 112 +- satrs-example/src/pus/hk.rs | 577 +++-- satrs-example/src/pus/mod.rs | 702 +++++- satrs-example/src/pus/mode.rs | 434 ++++ satrs-example/src/pus/scheduler.rs | 110 +- satrs-example/src/pus/stack.rs | 112 +- satrs-example/src/pus/test.rs | 146 +- satrs-example/src/requests.rs | 162 +- satrs-example/src/tcp.rs | 14 +- satrs-example/src/tm_funnel.rs | 31 +- satrs-example/src/tmtc.rs | 13 +- satrs-example/src/udp.rs | 31 +- satrs-mib/Cargo.toml | 2 +- satrs-mib/codegen/Cargo.toml | 2 +- satrs-shared/Cargo.toml | 4 +- satrs/CHANGELOG.md | 41 + satrs/Cargo.toml | 5 +- satrs/release-checklist.md | 4 +- satrs/src/action.rs | 105 +- satrs/src/cfdp/dest.rs | 17 +- satrs/src/cfdp/mod.rs | 29 +- satrs/src/encoding/ccsds.rs | 101 +- satrs/src/event_man.rs | 580 +++-- satrs/src/events.rs | 2 +- satrs/src/hal/mod.rs | 1 - satrs/src/hal/std/tcp_spacepackets_server.rs | 59 +- satrs/src/hal/std/udp_server.rs | 8 +- satrs/src/hk.rs | 44 +- satrs/src/lib.rs | 78 +- satrs/src/mode.rs | 539 ++++- satrs/src/mode_tree.rs | 37 + satrs/src/objects.rs | 308 --- satrs/src/params.rs | 612 ++++- satrs/src/pool.rs | 1 - satrs/src/pus/action.rs | 877 +++++-- satrs/src/pus/event.rs | 242 +- satrs/src/pus/event_man.rs | 60 +- satrs/src/pus/event_srv.rs | 191 +- satrs/src/pus/hk.rs | 406 ---- satrs/src/pus/mod.rs | 1140 +++++---- satrs/src/pus/mode.rs | 141 ++ satrs/src/pus/scheduler.rs | 121 +- satrs/src/pus/scheduler_srv.rs | 204 +- satrs/src/pus/test.rs | 189 +- satrs/src/pus/verification.rs | 2245 ++++++++++-------- satrs/src/queue.rs | 61 +- satrs/src/request.rs | 632 ++++- satrs/src/seq_count.rs | 26 +- satrs/src/time.rs | 7 + satrs/src/tmtc/ccsds_distrib.rs | 99 +- satrs/src/tmtc/pus_distrib.rs | 29 +- satrs/src/tmtc/tm_helper.rs | 10 +- satrs/tests/mode_tree.rs | 358 +++ satrs/tests/pus_events.rs | 59 +- satrs/tests/pus_verification.rs | 44 +- satrs/tests/tcp_servers.rs | 14 +- 75 files changed, 9295 insertions(+), 4764 deletions(-) create mode 100644 misc/satrs-logo-v2.png delete mode 100644 satrs-example/src/acs.rs create mode 100644 satrs-example/src/acs/mgm.rs create mode 100644 satrs-example/src/acs/mod.rs create mode 100644 satrs-example/src/pus/mode.rs create mode 100644 satrs/src/mode_tree.rs delete mode 100644 satrs/src/objects.rs delete mode 100644 satrs/src/pus/hk.rs create mode 100644 satrs/src/time.rs create mode 100644 satrs/tests/mode_tree.rs diff --git a/.gitignore b/.gitignore index fba2216..cf44893 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ target/ +output.log /Cargo.lock output.log diff --git a/README.md b/README.md index aaaee5c..b1e76c9 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -

+

[![sat-rs website](https://img.shields.io/badge/sat--rs-website-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/) [![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/) @@ -24,6 +24,11 @@ A lot of the architecture and general design considerations are based on the through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/) and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/). +This framework is in the early stages of development. Important features are missing. New releases +with breaking changes are released regularly, with all changes documented inside respective +changelog files. You should only use this framework if your are willing to work in this +environment. + # Overview This project currently contains following crates: @@ -40,7 +45,7 @@ This project currently contains following crates: * [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib): Components to build a mission information base from the on-board software directly. * [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example-stm32f3-disco): - Example of a simple example on-board software using sat-rs components on a bare-metal system + Example of a simple example using low-level sat-rs components on a bare-metal system with constrained resources. Each project has its own `CHANGELOG.md`. diff --git a/coverage.py b/coverage.py index 3b1c3c4..c932d9a 100755 --- a/coverage.py +++ b/coverage.py @@ -47,7 +47,7 @@ def main(): parser.add_argument( "-p", "--package", - choices=["satrs", "satrs-minisim"], + choices=["satrs", "satrs-minisim", "satrs-example"], default="satrs", help="Choose project to generate coverage for", ) diff --git a/misc/satrs-logo-v2.png b/misc/satrs-logo-v2.png new file mode 100644 index 0000000000000000000000000000000000000000..76859f83627c78dfbfe42a6bf08cb2930f2b8eb0 GIT binary patch literal 49931 zcmZs?by$?m_dk9k=^`a8U5WyN2#A0z4FaN4N+^w>k|IdM!V*hJiJ&M*r^M2=q_{{) z$I>97G}4XVS)cdk`u+3s((AdN*_ktE&YXD7Ip+@3)>NZ{vBDq-qEf$m=OF|UFMxl2 z6l7o|k^j6X_|N%Qcb~k5AWC||KO!jMGcy<@di_vM3Bq)aEE$-|0j73vS;XS*@fuDIho69Fl766hG&MOS1(^Ny2wj5 zSGQ>Ek8A(_>lypFm}T_d$@*H3>(P$Xe)9gd%x@ogy^xo#Rfd!MQ!m#yPW*m*-VlKx z?V%QykgKMD?{-YLA8n^s{oY+)E?HdLlwVo!zUG*6W$MFj4H2ZE$pp^#YS!N??x2iv zu)WIQ{14>j%kNGSdq-BgmV706B8XW=NeVKjF!q>w{+oC|Ey_L2&z*7zaCr`^oWj7m+S}+**1u1qK5ulNMnb3D`N_2rZ(k?9h`p}w~mgi z1i3;Ab`%Q`D!bYa>*{l55An$HAgY)evZeylwL^4VA~NRYzR$CrW|_;CY>OX+mGrVh zsh0u!skt#pG&2txMO`+(9G&OMC5B*^#WctrC~E`>FAA9Bmy#r1M0zc3>^2}A%zXP4x-c)V+wbo&jJYAY>v`#5s_;hP|#P08n=nu8e)Yx0+G)pUD-dc z_mgq!(i_PG8iNr)Uc0Pp_z>d7oG`jxqEt7)U+#CI!OG6(^iBb{QI|fB9HM8V*r3!~ z-~!^rBm!IqV8ssQXUey*32zDusxXH(oCzM!$g`OvsxJe0+ z8?m-3vpp&;u|+_G78g0x92k4~$e_mrCRsGT#yd#Bxd8cVf^NQEa-tCPZ6zsG8k<_H zTw-CWrYr|Ew!j!_N$L&-0v!{%V>yR#g8sDQp2u!~>vI}|%oV`7f&;t|P>^Dp@lT(Y z+$v&b2#{z1B&LSqAG8p0UssD98YkLePh(f$y5=JoD$JIjL#)8{Abk!cDoft040uG| ztQ<{uuq1|dhIA~ws8iJdRi)qAv-c&86ghx?&=Us934`Ji`r72s4|Z|=m*lkkV57Ii z0YHs7tDKK@h*1haB0)`hgB#-6vkKU=OIEQuOT^yQA5l@*r*MRH4rAvknkSJlJT)Okv4X+DFcIJk%EG#`e`&D_ zRubo)2jb(Pn1fK4J~97fSI_{AZc1T!Pz#)(& z37C}mERl38h6q9-{;dPEnpnoKM?_OW&@CInzWIQS@2X(mR7$}=49teSGzAC*q0RO(<}Sh**RHHuqP><>U?B zcOvLdU*+QC($T+VTFfZL)CTGc-%3l;5fB899{!amP=TNP$ie>lEE%NG#@pvq?^G|< zYh?V0dS{^RIX&V-njT}g_T*F@5H;$sw8Q3)!Nlh(0)W!}*<5O=jBhcj#3cX#Q@ngp-! zkR3d;FSBc^WPu=sQ{L}NuTTb$7peXP8nvyF^1LO6P`@HpPn2F6bCP?Yju5@|PA)zz zkK4eVma9>IMRq)AGb$n>Ty7ji$;&y zh#b`-MH;&`zU}$=XB*N%8ZZ^b^)Z2%1Gu8KQ8tdor}nP_gC{M6?A9z`;#Ax7fz1@j zY@mEieh<3*8RH(la#S=%oJMg!1HwD%(&*|@ZFOWp-J@7g5Jy;^ymC3290EZY z3RdXxma910dP1x{MoN}?J?ZN3)0aHp#+WhcF2;>V7ZntlSW!32heodv1O-K;&pmKi z6@$>=?%%Mew9+M-C{u$I?&_yoR*8y}JI^IJbIVsEz|@b9R`y9`3`+dujIYCMIBxzM ze2jBQ%8vhg8*$bKS*15n|7d;s5^%Vn5uq}JssWeMmfbR8DsoN=+|(1EM4@x;MMQU1 zkQ^g-_3|4X%l>+58vx1VuE$DHE*9NsttB(3+I2OTU0w!6x;`8D*1f&VL9;l0r4NQ1*gb0qp0@YV==T%MOAXYZ*7R8-_ahv=32eVgzM z4A^!?P@{AA^P0WO$p<201^!lT>+#4&yTKtKZJ!+TvoAwq-q}M<)%bBYUd?0x4Q-m{XFJSA+=yno~h0wObY1AQgms=L|!FU9fP;;~ee1amVJ znJJqtiCo_aej5= z(#muJjJ9pW<_kah#9_)4_sv}`qU@NU>t*|zA>L$O#eQvt7UUm1T`OI9AL9aVYltsQ#T zf$&H;SSSY}28a4U+tHUDQ?da36m%jH7PS&-9z_`Y5cBWmX@kS)e)L^bk{T1a`R_C! zWClnXzDgtN>aUo~iBJG4gDQ@QsCe6Zj(5TOov;5A(`XA}gCO<%61%6qV<<9_Yif_& z{<&J{=j&SXwVSC#Tq3EyM`+hu2svv4j|V$-Ztc(U@r_RvA}JPf6Dj8jqtmoMbflG} zmdhm(XBfh%0UACtb*ZybpaPRR8-QE)&XS4)?qIUn0 z;0Gkq|B+C@{v+WGNKpJEAu{c;d-W@O4Q)pZ6CsX3Z7=@OL{luZ1^%PSAQlz1pFVe$ z_!oDaWsFImQyZ8CG3>T4e!SBvNM;2_IZ3xN2+~R|xXSabdH7;eX3D=o3vW-{uI0$A%&`P*4lu>+F-@YaWsjHuK!J%G%R@{(h8OIj=$m%{U?i=z>==y6ibwx zIdeL)!TFMrT%ACp8bz|wp?7~1yPzzEq&hhR~8)wJ*UOcFtuhbV0`t`GI91n!vY zdi>6|$sE@t4GRO6__?C87H?aDHO%Fm!i_#&#?I|m$s9S#N4MQCKxsbAE=U-q8u;0q zwQb=$o8v8yNc1Q20PQKijK$l}%VgUD10t^^CtiKq$-)SR;CD_;dgj!EO)XQleA!f5 zDZkSI^w-2~Bar+#blUTGQl-rRulL{6{k1PS$%gw4zf=6CO#Mz>0}TMQQE{oWTfC~| z*_d=wKp1Qp=(a!UJ}$dIQTAsp*=I}LdoeBKf+>Q71JCxgE`H=gE*XO^CADrf1y zGFtZepJcnH(h$!cZLq>>p!@!XffhL4b=tmtJhryjz z94!N*7iCY|V;ed!*yL5v#>ubb=}}%h{?DAu>tme4>|Q-y6uNFWvi6(cN1=Z3Vi58h zrX^AGyuGLTpGSv&4ZK;6UG!*~kp<+wwLKmoOjYnTJkSsaha(7 zZwSwipk}RW2DdsE`#5{FMJ;-5qr0s-irsp0A>K?*_D0S8MR1NL27J7GsP+^GhKC%Hs&GSc z$|D2pVs2|?Y&ONBL+uX~!Ky|>!=*3v>afdoDJTUW8^}%fT+Hb$J-)lcZxGIH= zDn~jx-iXgEFM3=>_;&W*US3&a9<@I%iy>P4!RA<3vfhKm7*jSbT?H%^G2r6kh|{9t!`5NqS8T{i0_4^=`gX3rLK{xlBJ5-?79jaxQ#xGFVveLiW6gvv>&f@ z7~qTa3OHD=-56UMD4-*O^sGoXZ=aqG9kV8U$p4OEe_fxZ6SVcqMt%zfrQp%fz1=h(@~)%MjGE_8Fq`<@sx;Bx$iJI(&?n#u2XHlB1UBDvVvO*s4L z8()*taLVj=OK*JcT>0@y>~}ifM@3``JNVRmTvZAUM zw=bo{?yZe84eob+{ra_UUPnBXZ^_;Do7px?;?Ssn>Z+KQ@1oT0VBhPo$slLys9m)w z?z8)iJNJM4u7kf5G^f2Z4!3=OGWq_TNOWwdS>IS4s}Rf`_YVvV%;?UD3Oh(s4(U!p zxY~X@-rJa-{=WaJx0gcS%<=ok^ZY_$h&D(C9~$l^5SsmS4`|%zec}4@Kd()_ma+zf z$g5sXB2LQ~Q3C>Bz0O0*+P_#gZx^<2&c|wL7O}YFBh|i6rPJrI$zi_gjm(uVj=xFM z58674j93FMmdN^YD4$JyV3E&)-~j3dIW>E+b-E?gYoI~0Ise=5capnAok?098ZFKz zG0q!VmM?xe4!?2NOPC@w-464h7uq|H;I{$Y~Du7A$>Kvb~hyJkTg(v`WiQ9&Khs* z-(Nu3g;&QeuibHBmmCaG_e81!lY?f$Fo7QZ#56MI-g~Q`8hj7_{9F=`foK!*6peo# zS}BepEVT7>aFsiBcF{01O|n0rSr4u zx+6pf4;qc&w!5`@1Dok%6thm8x+7>-ZvX>_bd1QMTQ>%|In~e{C`TpAS@r*ZQyIkP zKOvkv^%4>+AkavS;>IikH`gkC{{JX|ojU*9scRxuMX&al78kpN1g^&3+ely)8hdC! zi@#&6!DZ(0(1OW!6;9jHnNdj%<*Fp}gBxkjh<)n+d&s`b1oXSMZyikL3kTWN>K*u1?VMyGYgp*j0(ySy1AZ5v*) zXvC{@=(DO$4aus}ZZN{#QvH@GW;boI0;BCcU)AeelJ!RxtX=+4$`Pl@jE_8jtq9W z)5>l%?RQ;h|4Z)m&;p<2hc0nHLS&Y4J3?SSXLv0U zsuj#u0kc(;6zi6PDdBrhWiZ8^xW_vVRB0zNKnM^G9WEpgCfytLHo$|O@Ox1)Wq3p3 z<+z>*`%1n;pqfcMp)~YB{!*~1qa?4D!m5`e-w@H50FR6Cy_0jf#Vg?>rAN-7${==q zoebi5bTB-qs&%z(_I1(nUUDWWu!t=dIEIYEx> z-*HK_E#kilAF1-caFT?q;fn0r+7{$*R=jDo(wmdbAh_ET@Z8}lNDJ9|mZ%3ZSX7ro zd4vU4737pD363TFJg`m#vG*XE?PA*JYf{jyAnvGAV8ctn|9l)1{arNjB5;8UOvu`{ z;QP@(Kr+`hRx(r&#ULo+hA!?-;cwlw2%QjIdHTp!BaB&d}k!}(d5Q9=YgPLu}jcEN;0 z(U{%|jc-UE1gFr5W|4K z&OI+|XT#0O$c8x$l2xmW(JCLdeSAl(U_s(k9;P=UO5*8T)Gb!dog7yQs|^{L{vHQB zbaFC|d4kbK`N9GsOF{x_QX+A3LBphv-f)|yr0I>o>#7{a^`(FrdtJ0_A*)mw>s2}| zEg@xveyGrf28i7D)!dYAI*EPOb|H4FOLnBNrx`YjfDEBxRIJ zqyP)J?Q;jsaVeh_w*W%WKv-_}++BPt%zHplrhOomId*a6DY0m#c7y$cb|P~sMfL`8 zwBA8F+?>aRFk>6%1f@6&&eglxVH4O8ykGr7(t8fvIU;4)0+@e+Q;|I_t(kgy$$bgL z#2m$YX?gsz+2V8tY*&OtHmg&F=V9J#ipJ6T2Wzw=t_MfWRRs;J72)>GE+Bsfi71db z1wDvXH?Y9ZeSCF}tC|~QTjSfx2&W-<5XM*c$#^;Ux0fpW96r`@xlAA2KGpl958+Sq z6sA;ZqX)`m4w+w@g@o{ly39=g=wJ9dSmYYlsC~n(OT0de^lzD0G?TB9h;Erv*sFF!!8{8V{ zNLw$HE4oYHY9p&5#5sE&@(SUO0&jT7N}GJmyhKtu_`dC=!;XQ2Gj58En9}GxX71Q+V`YO# z9^l`uoDZnm9X4|76sn?a6ZHU_4OS5_O#SCN_+A>>W~Krgi{NmXjpww|;mGqeb|@h0 zuhg>NyC~jy;rg9-M5)bDwu+yd2K{UkY`#`q(HqJ7!i{N-h$+VSY0yB95dTANnE>~izcm-W+Ow*LGH ze2%u91i&v|3V+31G?Qg)pziRLOhZFM*v~MYd%dR!0MM9TnJ>p>52@RXzhNAT=jM#x z+g;hrnl4IaGm0*qRvG3ZD2UQYSRjUv{8h42jqeRz3p~9VDTBc3>CC#8MnLE1SLX{8 zl0zsTY>qdZP@1$(jU&_lx-XYx#7gN7g!k=#C@6NEZjcS?yb||UzkvMc<~1%h5TIqh zo|9|x0eARK16NUyK1S((#owmw0afa+g2bj5afVc6n;Q z{n_UR{5->>uaBz6X}R__G;t=KPp#Jy3shCPH8M>fbRN@ow0@6cSBS5hDh&CND@K-(iMB+~v?U==o<_h`i%GdK3?MVq3`wZdjh3LbhHzqwrUlxPKD`quPs z$L}ei)X)Qo+xB5(_L(Y;w=OXaz1I^)n@v2`fjxSF%Rlcip4U$?yVk!}*a^+L7=3Br zYU-JKL)zfzxtQj|!13Y&Vz6F4M@?Lu zoL?NCubRR3Qv6lb$OlG;*La!VeXS1J%O7057u{%*I0X$0x`cLeTE>>g8XkmKWSx3z zke>Bs2?;RwK21>k-TYShkRS1Z3i9GVBe5UI))@H0joN*Tu9wyetls`1*7G2zo@BG7 z|J?K3{Nh^KMc@k%Uo%?Y-*~N_xV87wN|pYcDca;a;~#|OrT-k3&>2Yt=`jPY@uHG--PCg;y4^njfa3-8y%&_HM|C&f{486R%*MvnzIlZO7cuat-G~2TH z0?(Ww&?KhD;6JDP(^b*PRZBASdL}a;CoHU%#AVG=#2CC3Dp$p4H&rUmH9oI+jbM3q zq15nCFJ5nC_phj|+nZI{l#ohly^-#+$BAbqvg_|$zTEMleE4JW;XA8s{^FKfRUp@O zj($0l;EkpR=&AOD{9)2>fxA0pczVlWP+bwtq9GZsi7Vrc_ctpWt0^Dvse*Zr-KmRG z!2IRny#31DVa>n5*6ST-wC>3EBS;HY5LRwa1akri0XS_kCH$xj5xWK4&74QyqDal> zGDcfZcgi9A+_c-5*A1BFsrfs`(SowsdF9s_W?a*`4{LU-zji$e6$4y)MPPO4;GxC7Y-sB9}f@ZlC8bz5K4&zxj!`rCz#`B9M;kaESf2d%PBpoicU*7@% zZY%&2Aq}uv&QwM5eC7P26+=)xZ$e!WT*aGuukBn#ZRZvj;q?F?<*?ek5*vDi{h$?k z5?#cbx8!v_>9*5TFZrtaEh&IX`vq1JQaQ0V{d=FFyBI^?@-B3OWVMJwJM4J4(1?m) zx%pbSIruDscXqoN<}L1OFtR(+XQ|@dBlt&BoDOe!pMLX3<>icK(+6YcOPhRE1F@@8 znd(x&D+R*M(5D7|y9R4#$DnBO+vXm|rsjf;_(mT6CNJYqN1)zF{ufKQnh9mmdp&G{E&K1#vek`B*5qoY7?!MBb1cU_MjKx6g_6+#PdW2U*8O_TS+}Ck3j7t3SPs>!MAaf8^iNn#R7DcQ$Wc>$8@SLy(7lWO4 zO}vO3(6LAn?m@u)3!86*bZrau`xJq$E>L}Wq&wo@qEt}Km%n7&!f<}Rq2Q$n*?%j& zWW@C}Hk1P|G1Xf!FSEb!nKVrz%h?#J^ zcODP?NI$v=v{PU;T%V3B!2@+U)If3&<~;-=cF35}Q4;ISS|)i_)eDmwH~oazKrLz5 z3GODF<__PBH}z_VKIZ^Dyx_gRzIH2)9>+8g>ock41fx=>S@i0Yjm1OgQpu~99gmgpjR!j*ME0meuOnsfPsJN;mp(>fgZtOm-@QXJ1UUpzL zr2)z2?h$YzN4Fo+t2G9CG2>zxIt+e@H4NheN9H*oEWtZmH?_bAJa(`z$!a%>u|tLc z2o{IUs`z^W^W)9LEc@tP;#`sT6VFBUGo1yI3xmMm;U1DSyRf8bTn zCO^IU^c6`qU6gZ+C1vTfjz4=%?7IpDAQsPVJ6oNqYBd;)xGI_QQB%xjEirX`7>b|z zBN`{nHl9k#<>swWzFOW{LD>%U$}I2FXC{EP0-qkpQmL5u~C z-}hOS_LWe_S7t6AyWtB;+cZP2(Bme(krurXq5^OYW!3>En}|j_cZT;VV)!5xT`~sG)E(cta{+qHZ`AZ1@%;`-PwsSV{JqgbJODpNknAQjHTVpuc z5TmmDC>w6E`ggk%qvwgL)$i1(UE(h1100(ZH)1c5+y}Ps0@QKHp?UD&BgC4=KDotJbht!VJ9u^5vtz$uRAG}TSVt-tBcz`R@ zwh+#_hpvwbtJciMRaGn7fx7xd?;IbB1)hIdfvbcFXII<2J|J=bl5J`8R4aG#9dvzl zI^1ot!S$@<8DXAp%QeE@>x6IF^Q=lsp+>?)q?#Mv)dl2@& z09vl^KDFe8c`ypBE?5cm=h{>zz}v5R0l@~Ql9FPIsa<6OF!rCo)NS1(MhVPnmA=F- zWiR2_n>-1r%KDxD`vdr09JXCHG^yN1c%Z{)M_NcoMu55N5rHxuP)622d2eUcX_%?A z?;fMjeMjc@c){GB8$RvxUojQAzlS|+kS<7x{kybsxb8zkw5(8Q{3mPPC3oAq{}fMj z{bEQd#q3wRvj=PJCV2c%{+BZ&G4psc-@_fhG5OO0c?ZYF(@G+RC;!tM75bMZ#L1bx zWe($&HVVQ<*SgcWF~Q2inQ~WjN0?s6-`3U$ajly%ijUoT#^%Azgm$jk{LV%N<)s2I zqK1~e4r{adSrQgjtsf7RZA z(lOOQys-gwssOXw-|6j#xK-fQo)2odD_}y+GM%~vc@S0}lZf`8*q*Ue? zHFzBztxwkG-~%Pp(`MRYctD1g?=S&tkqzF2GhayOT(~5~UH2#7YQpGnI)TKXnZ|Hf zkDqI2YCb&RFe6&d*6gT1{sPp|+kE#WWonYNJH|wr(p=Tzj-IZL^}t}UI*k0N=0R}K zCE@C<@$@e(AOpX$w24`EGd9zN)wIS49Afp8xuaM=S5og?-?$tXoiRtscKdYe?l??i zV6$As0^$bwGQtzAR!3Z+OPRK3xC$n%$S&0TX$)?s!<1%xTKFum9$eSTS;3nxjJ_GR zuvGBeCy0H#&x+O^9}40g7ynM`9adD#0 z(LhQiP$7{1AS?$PCJtE9M%&H#u7uNS!G_>C*ICe(6L1^W8<;i36;)YvXJE>a=i8XR zHBh#_TcImi#b#0{1Wq`xW-dw^Jk7A>-}>nPW(Q=<36!ZLN5#e*wzFq)Vrde739n7y~C%#O0Sx+%jS2g8jSE!iIcj6E<*RaM?IIiKZ{`JcjZq8{YM4?WqA5Hk7 z&%@m1xlrz?;PRu+nl5cjN`dcZYE0YOeT;0!WRS7&Zn%!)?{+a%-cAtC#PZAZHKVGR z;?PQxjHmP3u0*rbnCpt1rr>XNGJcI)ySo51&wUkG%lr;-=5 zrcR96qXn|N>^kBdD%-|qYP3!FPNBEt`M@ETz`W%!>Zy={`;eL9JqsfuyFQ1MiZ|8k zl8jzP1uL#pd2&%Z2d@OPUq{|HER{w?8B&gYj(cQcR>A*@zKv)Qykyy1qv#lCBgHB;)#`rTno z0D~9L5FmYlts+2XDMmX>fNc0?k>j7BuYJP#z`?Q&0Sdsu4wn#s4^w_|4rwF?BZ%M6 zPt8}))@0=W<0cE9_iI?6B##SA*pnOL6)^8DCtds<_Qj8!O`XF5l}et~_ruRAG!X&; zRY`L)9GCCR$G%KGYx7t2Vwy|}0t8VmH4-P1^!N9R$8!PZTt+}H!C=2>T7lI~TdyRd zVYG~ayD3Qg^=fEecmjfDljo-+VI=k2SP#6;$s7iZyw4>-Fm> zzygNCVlg`M1OUG1I=*0t6+#G%?0#vU!gD9@Ra~L#*zQ}4-#ZAGUq3(Fgord4NyPk* zhbUOByvswYo@80K<&eGspCT2MR>TsquualFGlq=luIyP~ie3tf#kNH-KZYsQfFlBeI zm(vo!haoqcyabE@_FSQ+YHm7anx%ov8KClH>9j|CXL_{wcAkBYw1g;s>vHgv%{0!L zni7RjOu5DGTCtCr%m?_H!SkkB|mFHF*iDfo==k&=igE}zIt(OtmmbKo*Dj0kR=)P z2L|7Ji7r3F@=i)l`)uC|s(ovCClntSE)3sG55{r~X$!8pk~$^1^tT&AH8v$N@cbu_ zVx99=pqWbotCRf>g)@bE!yDCeQLb=DjSsaA!VC@KRPIKfpY`Ftd})xASRwgFuO(w~ zX&E5MSGb1Wg$rK*tRt^W{ki>SsNjqG;ipB=kkcex(7aAVDzDM4qG&*wQNi-V^`WX+ zU}9)be%6Xpx^0yywlzXd&No2OU*E#SUuHg5)}aU_&D2hfe<{*V^E&_xrkN<|SlN5c z2V@A5SZC~Uy$OjTdu~$Rql&-IHvh#eV#p&5R%^!4VIphn{Z~TPLjSFPs6GWgPFgTj zP}gqu6f^^91n#|Mg*WJpoS)s^E&Ru;0Z7A#9UZ+Rg6gvPY;{LY>C7HKpkLutssY?B z)UA9o5i(IU_V>w+rUc3Yn*)zS?oi=gP{{+~EPQGS2dWg-6C<|N3mOkPv{YIp03U*K z?=zaGgpwFMK~fHXc2W|JEO2Ia%U@Z^49}ibv0M6+(~P+$d&y1|Jqj}91>T7p@I6pa z)MVbZ$(QDaax&&%`8tyz?OwJ5Dzls!&Fk@4fz><{`K>md!G61$`)HGB^3%Gfa^kEm z50!=IeDaIwasE>y7dhzj+tK<(CEzqnMYF#q-C4UlL30M))Bs(7UBex|9=o3tP^H%gJ$e{#Ati065};^(Ay*MUa!O_)*ZhVWZMw@S2H~ z$aZM~0OcOTV(IqAG%c$=f-ZxUg)PIb$8#EowNHDg*Y&^vMS8!8u8{=GKH($Pu=QINsi8gctiZg{`D<1b^17G_%)RR*vUAaCG; z&6mjQD_go&E-X~BLfp)G->_dkpQ{Ow!qTTW+r^Z48pU_fr>xtu-O|l?zm#NB>-J>e z)|$q{KZ0faBeXX1`@$CsX=aN&}0W zW>bwmm^aXRBmA|{vPc#Y{)+h-#0qF|fWYeeK9psp`Lp{K&6she<6^;EHli|p_fMZV zIxd&oCm?m4P5wyjaTloz5DDvGskk9WnvcEsdLJ2N%>-_N|D*^lXTr{wleHRRkt>PjWNK1*-J^_UJvG+5xwiQ$PG$U28=`o}KY4N_ZyRWVE{07H5p zkqCfVIqbNwsx+cU2E=+6*u)Vs7Xp6k?Lcc`dR|)H2Pau5)d;wPi!`WGYo*Uwj_^GI z^YZ(T^WUV*Tl26#vgm0g-SiNzU%Vs3i4$kRZzzMae*Uh%>Aao?4hx}7K}3xAfa(r^ zB^E{if75ufAZ*~}r?(l)?hs@tusUjN=q9=u6C_Q4=X<+EP zD|eJx>t#)k9hFMIDu4)a1Js*=8+%KSBl37^57(ty(8Uv_^5qsEs}PBBDs+-91G{of zT%D+S?N$G(G!L_16U(fT-aLgV=Qf=`2C8PQp7r;VvH1dM7z0&wFU!AYrs>(QU@$V!{EQ3a^^V}l=Vu|WbQihjzzQFS z=#89i0jFPBZ@PE3I4-d2lwer9p~3gAqx}3ETW^rp)JPJ!4|9U=dC(HNx3c;4D8TZk z_E+CiEo2N4L%m{v*@tS2tFO@}vkn0gnpa$PK*gzF9i$e^0z6DfoFISZC+(*lox#rf zCgiB(^yc*xgwhL;j@y**=PYvgd^f#Wj!TifN#Q*fQ)kcr!p|961Q`zBFFY=L_08+N zc&x+A##1*tR`^~d*jxLhM^`30R7?cldu~nY>hLVfq5DnHQ&P>U7oMYgO4ESu#@eq$ z$I5JwI0dHBgvj!}7Jq&L0e=9#MFe5OluNP(2^uC>1}zd&rTZ| zX`s^dfxCm=+E`u12ny2^9OI%a$TQS%W5Nyytv^)rN3ZLgv~MxUZ{q0fiUeIAT9_4u z%XWt767sjZu3WcYp@18r_^wa**7NNfn71evS+3 zKw$0WZg|$oINsn9)P#7Ese|{ z1zTE!SC3LhYhM4xz8}oj9qBZcocs;*X7c{9U)U_i`cF_AKkIz_;qKiKRMAhroRxKT zya9Q<^X%k&WnMCttc=o2ss5jG!(?FZm2--75RIA-e^4T^^`N{dMfDYAN`}YAcikCg zCtDe6GYPo%kyM_PxKtYdo8)ElIoUj1JTOB-0rJHE3Yc|7GZKJEASbU1S!+5f#~ z<&)Tp?4MGaloXr@m$r`H-(wYa_**_q=VzioNLOs`hOTS5r<8AQ=cTk;@q+}n=c6M4 zK>uPGQG@N+a?;-rWCufUrSH0$OjKJzsEG~|LUn0PyZGVVq06@0*##xll{s%xsx+vudlDz&;8a*@dF)eAmaiRk`6O8RLGGkvO|arv9iQ% z-&Zl(a-~neXW^OJe};E6^NK#4pG_!OV%0d6MIYYYx2hF13>%fL!8boiGZ@*{OF#!JO6L{y;yj5g zO~7qvP&2$wetnAO-D?(M3WZ;&!bPLFL6WusT1hmUJ}X z#H&MpM8$OpE4cc*J4p~UDs_P5A=zf-di()CYi->)P)P$yig+k?X>ED~U~{3)yz*DT zzx3u#_Bq|^A!j_laMZYJN{sTi11+sxHKw{fTPve)t5M)V05(aitArn9+M-Ro(*zYB|hQ_X5fz!{N9ghe_B`MVwQr7L&G@N>#&B;9GZ=dVvMvK8y{V|^eI z+K`O7DH$%0v;_kXdKd7i8R@3?M!h@Y>#)l``mdM$jkSDfxa4FC%WVU)d&C{tE}{3X zB#{(TpR1_`>H5OvDT&is7y?%Mq9-}2VK8ORV%$M&uZ<`ALCy!5ItnawpThC=46f5fY?WSCPYk?tch_dQ09bL7ra&gL zV4WNw)qSkrt)!R7ByTyzZ2XOP-1LS9a2q{CDR|W-oGItg=+J55wCoW_rid0dFsOy@ z9u?k&jmK(QWJIZy|7DcT9Q)=U5*dY8ELkZghgA#XFB=pV3r=7~>T{b-L1XdFS@3XS z!uCw)%B(be!6qOlXRGWI3RF>Aa5r9X88s8(^$S!BV=va?h!lEUS#YsiFFgl8+z?zn zY8bk~fA6RI!2M}P(${(VAhIu3#$Xxk^6ypEFcKTPjG`PBC)CMSu5%r>sXd&YdPYEp z&{TWHh|!dG13UO;0P<=B0MQn;IehC%8N!?*%!oAKvMqF-9S z)~!{$BsE6IQ_Vk#F*7Tx<&?9TT3*o{hr2w)JwFTt(yKYmYGdgs@1db!X|!WnU{^z6e5fM{fCOC#y?^ z*AY>?2{^!*)Moo%!VzSG%wBEiOZ#YGv41s`?~AMR{M|igF_og|qC`$Hu;0|$@Zg&h z(31;^QZjjW(s#tC&wT{Fmq!f$WvD+HAKiFjVZe{Ks#@Y+=iK)U^q5EoK`@@7T4jB3 z##%a*4-|)>0>kPNqh#&w%Z^Z;u6QU@(gnAx4GS@1gL)&!t0k?RiNWpJ@1=;cjt#|E ziOH!85)Zy}k=rn?C7F`Dv!LzhGj(Y3__URYp#tlVcKuz?@ zpxJuNR*0ozx86=^RPU;ORa)&SJeXo(p3Yg)0MJ<3P=W^cK3}b+0$ff zuO7Od2DE@XwTC&5!}sb0a|D9~Biv6vW;Rnpp^Fi#zdX5$`QyOn+Yv+xoeb0fy~KFlS{uMaHwgc9#^S-XQ%hlT`=M=xzn|UJUT41f2t{ zBmmkC0Sn1#9B@HTPP&2b+Pr_A?ZBq7s>UhfRrS3bpgSC;H`ZtWZnqn>zn}J%^nh-3 zSCUQ8-uavrcYJ0Q53BvVRd8k!QVD|SBe+|NzGe~Vd4&|_}Y!Y+PTcyY*WAuc6aE#K|~oPn@bid>SE5I$4d&M;XrwAKzskb95Z z@#@%q_A&U19SXc9X&{^smr^>K%K-^E{WJ%i(%h!+UMib3vgM3F?ff8=G|79szhRdD zTR>##=;tM7L(t@_>9gy|A?18!dk%d0QN6c`U8^K~|G@6`$t|fmpt|zL>)oBVY|KYz z|6zN+qIPoVLNo3;{(X)iLq`t_6sIfBPK%QSxRzYQ52OlJ>;j5_jXyw}gcAjqJ9{M& zq-(Fam(0{5eo6irq^)4`6J_%d0dl@-GS+%Uswg8Z}_EQT?jh zjFknNPvx7<%dvfv&-ZJ8%G+__ZR-1$k{HmW0BqA;CBIlYA%GfPIg_WPcmndA1E}o# zv@X}0(iNs~J%&#ZEVMbXFi#t2%!YCo%w#Jy!7GK0ag$zeJ;4WRnz53rsW1Lvb`F*I zQ{ktwxTeNSt_E^P}uy`|4H=B;u3skV<`P?dQ$z}(h>SJ68QG0BDUp2}ODId0 zu`e?WGxL3pUhmK2_s4G@_x(qgne#fY^E#L3avaCAhNGV2doNl0Dk+G&jGUPM^Jp7d zPR2J|0l)0gxBunFS=T1ElZb%pKU(msvxLUf=a%0PeG;y3%9VnZR6mYj>%!lFUv^F_ zNUp)g%S1(Zj(I{JlZ^SDzooBH!>qr*aVa^mDKe!Eer{pIJ<^Iq(ANem`~y zyW*(R)=a1?5a7^CIC>o|`hU(ea_xLRO zQwwBn?KWiHomrqZ_-%n(1APbheSnXKHXdDh<+NRq`_1KD4BGP zexR#7zBM&8|LIj}!fuGG5poL$;|%*$%Oa$K#P6D?~uamUircKR#54bI+y5S9wn@YBm+%_z7JzIdbC z-~AY@h)F%?Wg&-my8T1(hJBAaQ{9^OJYSHJWFe41WU=jE97`>bNl6r1ROsn42WlZDS`!Ez1Dh?2KYJO5Z{ zPKCHeOk9L$v=R6(0oCN$+-_bB-_1)Q1nN&OV{5MG{AiE*SwP?;_J~MYS_V0j?<>{; zGl6*oqKQ_r^zBQG4=@ZC*JIy35LudvNemH~3VP42%wR_pzA~sMgu`=|u@dJZQgz@m5*F+1%r z>B0o;tJ`~)0R~yHNrhsKw5je=Bj^mSxT+{ zlop4UJFIRk*7~1ZTG#O>%Kh)L@2(u={JY7En9sT#VtY2T$r)@YQTuq$YwR(r;9F&_bmV<-{}q=3CH*m5#_6aR^!c9?vNujz9%TSNpw7ypCMspi z9)kIOoa7U4q>$lUGNmbJSTh6U#6oNt zj+qA(MfQ#Lrt9`sqN$}#Ov`7DM$SVT4}CyfS7G$5N}vaL$$DmDdS;x%4x%Aj#jXaY z==(9RwT`@#|1HtA??UGkRlLTs1>^XXng5GtKem6uAgtuz+g%~<@~D$R=a|j~*#w;*rh>PFRSt8p)=4TIlgce{G!BxhSwh9P z`~1B;GJk&L7#yqt*A2_Y2ZB~d_Ve-tdR#69dpFR-SI3<-BEen6@zktIklfB_=m7Pn z%e7a10wpiX0w6=;*xG+NLo1rBopF8wD_WD zY$lkmTT4!hW=?9{?;j8IA9fqEub?@wu`OQ!C=l#M3I{w5HR`mjaL4hAb19w*Zow4HGEf^_YJ-?{fI%JqJPp%X6x|Rvt{< z@Zn}Eov8iv4$mP3zPA|dMnEg_;!j?4&`6G&LRAn@Y`?H~3{V-o~0*~R<#k}W0fMY#r_Q z^TSuTe;`@)BfDPQV_SU0i^qRT{_uDoBv|(E5t;-1m+HNLP^QJ#zl1q6s*g-R=L__J z=^*@y|1-fzTmIh$nx? z`fEjR51@)~ds;mRl2gz#Q)Lsa-Cmh-YC0c#9D#)!ER)I9>3&Pay0!l-0CL*R|LpN> zlB-_G6r{K{0Y7CBA)>D3#}UqMta}$*8IMtnFYQ3x3)V4nd>&YiIpWUJ3}H6+lFbd0 zt_3X35`HB>R~!e<%uqXZu6^+9VFkOxa%X8t3WwZZ>6(j@LnVfumSNX;Od$qq>Q{3ty zR?Twd#&e~=Rr~!0+iNrr5t#rt4V)W?{}bQX`}gu!diwEW33G%%~rxR^G^h(V^EX>Zh?pZE}!2BvzR@*Eof>|b~ zd~2a{9__tgu;G+bnA>$I`7&FAeV7I3w~?!t>Mr|&ry1X6dy?H>XVxYU$&|r5?WP{= z%vn56`kb4QOTCuP%RL3wr)ee|J5uhuanP`@MotJl0MK`Z2`zhld!i}28;hL{Wz;uo z&v~3lnai!`27WJj^|xl!jCqltC=$T&BJ)0J%$44G)9#>6qvA88j*74O$l1c>u7E1!RL?DF zPr$wVlhJZU9y1nfV%E;q07-CleO^cBOQ*bos^5Sg-1vg~lrjWoTs_9$NcB++6!Z-@ zL|!C{1w7OC^&Oiftjk3+IiIv)jkq_h9;^mC!Az-I@QU=V3YM{W-t_!Po>Z{US9d$f zo$2MC(%8FP&I*|}AXyK))AzKrDzM#bz7DNDbp_3Y0Z@gpLsW@VcqW6E6%ZQT(dxQK zCC_*7zNOoGR%;M9Vv*xuV8rNJVgC-hcC~X{B_hGRG28%YtazU?Y#+wC5{j0vmQ)GK z5dFqOqC*?%Bk*i$lsVfjAL87=j5=i?$RUW6V(<>7ahwFX%`=geyck>+UjdoY>GnI! z(fu9YW>J#6{xfL~iri2`U!X&71>j;@gKx)bU)6QgIuQLoZ3ZoWa(8!ZlyM@LVrE9| zFQl*UHRw3;0qHo{c^pIKA{=41)QVq*Y4@3fOdJ;U5PHA zZFpwL5dYh<)BG;~%dvl}xV+6vLUG?RT*M083lh=&!5Bdcy?slTVj%VP2 za%OTS6@3!NB!TpWpT|4}AzkL6Q==eFJOND6Dp}w5Ch5;38MoJ2B~CQZDaON15h=`` z91#92;`|$s}C5VA z`H;h&&L3iQ&9wR+Y~8r7tVK-duetN=Id6^04-Pko3EqkpE}weq?sUz)>o-KBer_8B z$727O8QpcOc9W2Ht0a?O9{6D`83Oz%td<^Zl;#H$1Iy9Vgo`~Bl7(Jl&E~e%k-~F0 z2cMC26GWzV*3t zN#iyrHcSQ(Nb47IT1C@~Wv>4E|BTDn`Jq_f-UlDRT`8~k9zhDm?;8tucy5cWelKnU z_B*1(79<&~Stc`6%@Y`Mfg==MQrvNGw-)cp%)OxMCf)_jg>tuNyKXtRI|s4#7JfD< z6kXUbF-s~V<4Rj}A{{Sn188wP>bkI?W6&^@Q_x1!qlaNVo0Y?%S`E5C-R}+4{ej_9 z69L$+5Nmxi)8&D^_|F-$(It;4gEK}$~4K@5o z%iip8UT)Md88UCD&`b1;tD(ZThhV=z(glYtgQ!|{-u09!(Xc>|D4Xgg|0HW29OMP; zt)xTvIr0k#q@r+4VEMFPy!+W3{CA2CYr5BW`S{Y`gO_@3i&UkyUz_-G*S*UdY>Yff zOE3QeKCl=tQz74cs{SAc(FJml30-ci5yJP(k}|VH%SBH;DSM+CcL8^BOqT#L%0~+d z5p*Voq9gK2%EdE0yUPw269xP3-~EF9k!tUyla>7jT~1I-&5-iimE=XGYQh+0qxG9mTf-G@0BN$}QftKgo-+%M?#@e^dBt+@%hO&LrfV8393f%%-J?0ifOlqM?fn*T4Pv?VBR%t( z?Ya9u?Rv|t9RGlhbq-_p_EXL+Xtb?OeK)io2P6=*J{m~dy=$`k3dK1%W#IPFr&z2g zU(DW|1zr6BR>N?76C(UBea-YO9Sd6)N%5Ea(JTo1#DV z@%3(kRLR~G6A}l*_qJ>K&fe?ZZw^l>=-I`ajcF>EIr3I2r12a7czllqau+MjgXB0p zuCV>*@Tr`z(Ze5Ib_a~a0qf<=K5`OPDLevxD01J8@GV!3&Ij3!=)Kl@DQWU(w&a&o_)r{L)t$R*nPQ$Hkao>*05^Y*L^-LRBj{ZLXi z`QeG>-AZ1+qvm&`&v+(`g6|SM8`a9JDZNWkOk9wJlnoKSBY1xk+~BMNeK@+s2CrB_ zJi%)L)DA!#VLRB^jFhhlIU%&Zp(N7rDVy#5nFWlhXxg-WmTfPWNbyTAx_eDE(^*K# z<3;PkgKtV|n_)BThD@z?2-6ppa|$@~QDydbgXDgu+BOU;xoP$Vx_h!$9~_T8I6a>G zk+qI4?~&|&^G>r|=gS@HLi||{B8Z$o{&n+Ue!WS$-*72hNewFzJS_vkD-}m&U!Uzq zjQ`sl{P#_A&6=}D0ED0w(hT?LS>3>LB@cQuLd5ElM)UhVE8wBjsBjPqb2BPy3G#aU z*9P+n8nv##`P!%r3^PQBSP$N)k3@F>s&U2HSmKxiM8H0O;zjE*9)4D{P@J?sL+$cH zL)?&s`g|C4Z1|QQSgXtF0LN9))5Sk@bWQxXk{lVtgU( z?qdz12OUqXNHp1}#JShnLL1s0IhR8k&kN5zcI-~g&B1$W$h6Uq7mAXAs^?%MZK z#bDm3(HVhb(Ra99XP?p^(@#F6u}u3TCq3Izrgc3R@>rROO#9Xw&wRk8(bMbH?Iqwc zs`vRufL>fuH7H8C{m-;0bGyMoA8ZBDkZ{gabGxh-x&3{#(?4ZXbg$|l4YY15ANXy+ z9vRi`q(OB}Ww`7$mvu95NNt@e{WXnSJ%^ESk|w`r=>l_FQ<8FGWJhR7jkE2cX{Y-Z zDDNOM%Aqdv^YbqZ6V|q_^IukZrejueXz%&2INBfayde{!fuMkETzxrYp@0I1Iv27r!ExQo6jm&gR z-rAsDH5!zS?+Zs1XP*af2%adK!_Jw~7>V{FsKs*|y*$Kq@;+}Yprjz0lkeEYH>P69 zU=Z8rLYb(^1{36)C@W68yHU_4-h)-|O%)JprgLWPfm5AqtjDJ4SWCcE<{hT*TK?jK zV%M)5r|4+;dQYd0;;?gYpJ-JV*1=loM!NoX9v|lyr|ys9h0WV`F#KZ1AO? zcwNTjva%A&k1ohzzfrsL4tt0HubOXsszvASfgmf5<}kZ&$1>Yf+Rx*vT@L!Zjc_bX zNshi?O~R(JT_%&Z6#0)AZ)8qe8qiQo0F+bnJVNG4O1NpA+ImBlhbdsgrnyZ!V^Wv@ z0|c}i_D@`<%%H#2Dz*8L{FDd2xTvr zlLLWKmdpccfdQ$}%N-bW6FL#{gk6|x{)$(XYU!dI~yv!QXs{)Fx9lNyk%0z{1 z!5Nf`Qtn;5AuHmsy4$WMPq$y!p41yzrhUieA*^tXyWI`g>{y{P>whzLSmQ58LkVAi z&GPC=R<&hl6?7aBtnx~p4AxppD$k;M<>{DpV6m(M6AZWKYAC@~j>_Qg z9>>R=UJa@O9;V#%lwhF^&Y||vy5SjUoin$XAn6s+T!&%y`#r4ouC_0Q_c)Hg*q@_I^QLz0*yuD;KB`bk-@O#_mcvlSL-BWLTEbg0k2-T^t>|1D#khani7#d*j#PW+AHwmh zrQgu{&0#{w%OLo<>zeEKbYtE+GX8r^gNQd1O+fS5J=TbyUUSQ^^i{$n`c5>g$90Z6 zdW)^ehb0|oElNIq>|UfBoBc3)mAD6$jPL|FOai)X!-3ddO9G#NQoOCBWH+4i9))+rZuF(atwV)WsSOim($ zkUW`t0yFBGzP0A;yBN|qQ`+%J3U;TNU*oq@Fn3y$co!gM3*;?RWULM5?RmzLS$NqYb-ot^| zc%$a2Vvt*KIlqNJ-w&zEyVwopesR#x;Bw~58q1*K!q!)iTjCG%*hPSR0yat0K|mtR zp*=5@B)huH6HQ8!mrhAl-*XxrjR+XsHz^5=>5i;ouV>_DS@sgmK5Zx0yXIxR z{CVQxb6+{Z`|D0OB&!u%6mX5z#0I~yQ2qnG4V>;z5571HKrqF`>3EV~@l?eWI$^{g z$Af(SYSbI?n;&@(LtXlzy&uvOH&`82`-J9v4e$ZH;SzcEBd<4dhQV5);i?-s&DLFM z4U1ZC!3+q;YIWagzIZ)8IzvlO5WF>E{_L zjaZS)XRKG~yO%g?vuzJ)$Kfd!|Hc;Lqa-bOf6{3}=qKpJ@r68R_Etvw5(i4~B_Z7P zpqhZ#R`spE34C>Wf`QFCv7zIkJ&-EUH5z|x0B?9oUNoKAEp|-Tq%n2~+dO{ji+aS7 zvT`1V(;!GjfJa8b*$_bDW1bYd(b%sY1_@hTZ2nV)1wKJ>fRE;P@?x5;2ibNFJK!i$ z5=YL5h`cv`GdMH5|M>OO_VmtA9}Td@RD55+5iqW?yRw=Z-aj=1F$%=eAoErX{(^nG z6%$*MGDPkw-R27cEv5I|W5s;^ViO^mV^--}cISEhNey|b%T~$q?z|o5;-#nvZy?P? zZqk(yyLJ0qD`(av0AHti(Ci+FZ#ctt@tX&R^k+`N)|L5n!1b(~aV(1h>w`b!>s~l0 zoj}mAm8s;vl7s9M5R$EMWvM-LhrYmUg}qNUOK2YNx2SHhSF zm2S1+{%JO>`0*PNoN^B3sJk2-#Bc21*&7^z^jxu{lK7E#M1kItQ8$@vpq%dmq~IeB zgkPNTfS5D@q#H?B$rHE?Y)-yhyPbMK5ef(DibfXM*2%FSTc&|b2XM3*B}dgX^VpOt zK&a=4WB#cYLU2XA`r&7XOYc9%8BMZZDocc(CLy zF#1@vs6%j)!PpVnty4bohRYv^i~)B8(DiT*EJarXua42@Lu{MPzd)W%@BN@^fZ@QS zNCG|rR2a%0N`q9|w098lixI=G-tPTl0w4&9@R&iQR9$DDY_fKQ7}u`ojE4APdh+J3 z8I}*!GjORH=RmP8^`vY^5CnLbZ-G##!I1DaXo3}@D7~l>z|i`T*KG?w@P`1H%6ae* zZ-_Z084$k{tp!lw;pc(}bw3clKldmMHhu^F$;L#VRDM^AxqcUf)cC07`oEW|>oJk} zk?H$AqUtL~YC-9xj$r;w)@5bttt$728coK7h^m6Ev&!ac3YCy?jaFe=C>FoNiqbL@ z_uO(VDKWnFYdp&?Es!@na)$JAi{c5HM{gd08i75Pn(Y}?UpA!rvAW@Iu#ppr-+Qen zHF}ipnHhmS8HlZT0q zVP@>S&`B?iW;LmU&GAHB?Cxbd>8DAhudYJ)9Qbu_lpxo6mzOi~0b&+TEX40vmN(iX zS;Ylu{9%S?%EJ{G&;biz4&YwpU=Ffmr`Cj&zE^kVmkhpj-B|bz0cW7W7zPyE@~Z&I zS#wJL{!AW(=Y1hN0ROcgL$b7$j9CTJrI!Fwg-auo1)$8!Nsze0raH1MduGuYrLUN? z>oIKRxOBlV&8p4=N^JCbAysN@W%Y5_U0r6q_<+U!&uOucR|WE6I)9<>_Zz1L+9g9y z>-PJ8vZ2ren`HUwVN%d6tEg^29pToI?atB6AAIxOL1mzrJP-sJ2dH3qfMw&&lcae6 z;F$EY>#Tb%l(GwK18X5pJ=0#9n?v|S%_}RKx*AVMv{WrGFy&&irt}?X|4l@d)nn>_ zH8_J4n$5d}Mw;P6Uv-o@WgBYXN!Z3$^->I{uZqET_Dqx>@@NyBv#${+?v=2D{Iva9 zOn?e+5-cI%mf$Q9YCy|YG=`yX9QpBVT`8x45edqEh{S|FHDP1^ici-j#49A@r%yWV z+IW~#!2jI#mKLwX{3u}X*St;79x7Jma3if|Fw5XA83`54TNxS&a3rNjbs%`fyIH4a z3G|J%BMat$-tgnc!*r<`;LkbGTCgG56prGWIM_3?V8HCHM9A)aPvMtEOFx$od)kdK%n5ek=cI8=U6ZEHtJT;K}ijwgIsQ__{^>*Zq?s-P{vunELQ<_f<4( zR`%bJ4IxN7G0n(swaV;vO7>nJJq5%0p#7gN3L^hO8OU!~2p6O={o904PTx4($ZOWQ z&(V_)0Qfx@%@ z*sg28!LFjK05Bx8_24U`^vp&}?STRJ5eCW7B|nQV@M`Wfqu)Pa1ulg2XH1Rg)Pa21 zaN&&f#+{V<3tOV_ak0T}BQ1%Z+uD-VEo!X#8hFYL#NPV<8~9>}_(rkU)n*T^z(%WX zDRvJj1Wti)3Sl19u0P73!Qa>hg=!J@A0I*(|G>MwKtG-hOO6SMO1K36+dzS702@7B z-h@#xmv7;nhcD%U%Y7pemLO0Kfp{opNHeSkN`yuJ6EcxO6kB|=$i-q4Vrng|;5=G^ zJE~wgEYBV~08Bi(f(fD{KGZwNwpj+u(bLdld)+`dLQo9d(oQ~Nu()$}n&ny%_6iIK;NAajM zo=_$PSAoIlnE0X`IG8MAU3|>H=$6TqLoR{O5r<5VfVp#wZYo&)I%dT6`m!LYt^-hl zN_m$T0Rr&(vSpk2k-Z&4|My;<*htMxq3%|oQC57LLHCP^VI27lh`*P-*jWk68 z(hV@7@n%~;n4FlNFga;_5Bqq7H-vH8aN8NO@O5;t=H!b6rFMYWgt~3kFnb%a;y*lv zhFB{^IAA0F_l+Y2zR9Luyy|z8$;4$)fhzm`QS*?M4a%a|rR=50HdzC)^h{G?c6S=w zz2FG_?&mNC55%>CozG+34F#Q5x@u4iv+rR8ARnw4s)dKHXs^VN#gHSLx#6 zV#|?`CFuwJ)Io%H6Yexx50!LWx?2}Klv^c^SiW`Xi`p6SJ9^*yUjR{0O99TAfJK8%4N&grtF)dsxXZ==lciDp!`|QXHkr=E#8EFi7Aue)5a~aa-mjSLL|lt%Mk@ z1BtH|6A7T500&oq9f+>cl>rcKmEauVO)MMP5m|F~D+gaVAGRJUNC%xAn9viP*aCV! zWY6!ae4{cOK5TUi zrZ;Q#OSsalN@nOYS^QB>z5=n|+!h~0Siu3Z2nys$Xf#RjkH&HAAQqG@qjDK!;wQ4c zo3I0q;;%Y&q#7)95F3rWVn6tZuFfv%m4#PnKi8Aft&UK%hMc4C${WtdJ_Vx3&&Ig_ z0AYc5>t~;R1ct|E1#_h>FOckYIh9k_+3)M(B_~8*hxuKh%O{1Ax<=rhy-zK%E8*FE zdM?5eFs3n?qe-GMwj6Fs*T|y6Au`f;OUxS&fy#K#IoG@Ed1NwOBHBeLw;p|ib#DNq zA{tpQFprHL3mgU+36MH2Iwu{@KYG}-0I_IHo zzb4!@a#1XX5WjMs5n*cDqwUN=e7^Uvj@e5*%t+Q3mKV;UP!dyOiIPxTB1E_Uj|}{7 zH~-V&QqSm>$?2;P5r_i^INUZuV0f;pBR!FSHJS~cLCM2gfM-Ur!p)@k3>M?01UdCg z0o@Kko-|=Px9k%ObFDdtFc@2VLYZX-=?`!6(-?lz@7UT`z zy8`;DqmSWUtk8f1G;ln0uogSF);0jY&LU`DHQL5IaX@T!fs`(#${KoiIR6PgnU zgN}uKXz`v(Rja6Cx9GPd9?(laBwjhHbS^(K$PAwUq$pklMz}6glruB z@=0SemOaCse3MWp=wsVMN5HHDAN4eJUa|GqmG;#a&L3{j^$JXrvkFL@(0E7WJ6WEX z%o5V;Sd1dfeVKpJMHR9~bNetZ)QF!i~oFRXoNLF+5@6^r;5w zFjCLq9ZZK><|#Ue>Y!rTprR$>0Oah*Fl`WW#X1wu!-7|SReL~1yw?DCqTk93qwOGS zh|n;ss7&pc!r8F(VPHQ-5EmnAZNEuwav;a>J;;TcM!VnS0EC zn8$4nO+z7z109{y{q{`nKE$wWy@Nhd=+-X-LSXWQaLQb|@pu4KX+rd`tP}!w{@ z(GkKSoPApW6&j9&iaS*%J6%*9*LR0Qv=gVZCT+%t6A;M-J4L?`3-Ux?Zfrx< znqrNFD+Ba*rX{!DgCoh&wpWQ{0`?TbLn+n{WhROHF`LOOlR~fMc1pn1_L5(_9Ldfj zDR2{%*s6(B{7F7hiihf?`134y3M}X~SrCz9P#p^R40Sv?nfx=j6+o|@KzLYvJK1qxfKH+qG2<~tEU5!U0sjsK zRfP6HftW`uOBqTPdg;0Lmt7HAj9b5V1)zTES>f`gyOXGEgOe)T=smyBOb*=)Zr2EM z_WdFZKiX*~jBnBqiTqdiYe6%0SAePw%5~WKp}K@>8s$7=z@hZ{=Y;xl6TY%B`uDA3 zhhn#jk|=X)p9CVsL_`JvTv|o+LoD4ekF`RXwpLt~Zp*9L9nfcu9V+&H5?$O8sYhl) z6kFb%=8IumYO%1LK02R46pU4P6|Qx&5p?Ys=bp15sN_L>jou6e(UJOYdV?qbj7G3? z=fDfClL(g=FcR>RI~fv0EJi~32!zyGvJ6yx@cxH1e0NJ^`=;bkcp)ryq2ZyD=}_^K z3;d9dYs4>1FlW?+`%HObGn6YN3cpArCcep+!&e1n)NCJ|O4-h0P<+|HmlLj>&wucB zaNi_Y>Mq~yv!8-3jnViqEMM+bIdT~Ooc`c*a4w;1P06vXy3?rxk^Gq6YdAaIj96M1 zOJU^A6pG07ULX*olDJu1_f22qeKSWpK%mk;Ht9@1`RUqX6%sWzI~C3ll2;u>taEEb z;ghyc)`;%F(C9z;{W7$~7zt>b$$D@$ z>7S(2rH5Q31@TIbi^q2^_;tkiKr1#n7`ahB*v{ce7xEwMeP#?C#Q*ydkIQ?l${Z}j z5G`NJ{oJ|r@ZD(dBG>evY85q389Pr9AnJ;bO#l;h%J_r{5M~Pd-*uOSEjgIa;&q%REu$lQa;E zmDp;!mMgF+ex7m9T!3rK%TWdZu)^0Jt&d_Zx;uB}ODU{RwK!}&v+5!4wctTr-1;un zUhA*U-b2F@T56_@TB%Ky7UP_)HJ%QXDGEzMYYpwW1#M_C&X#DeoQf7hlxShK zmTuimW3*!d6>dX$H6zzhvjk_5I6?#*6GZsW>J<4O)2OCdv)9qONJ+JB+s}FW%X4re z>*jX@YFEab&uj+1u1%Aot@B83g^jWWEosG367xILDU3z_i|szbl5Qwt&&hYeBM&ZW z0XeYb_t4HUHR$~1f~hcv|HM#B&Gu9)n#N+0w1FO2|1-MGVEg9S8*fo^S(&dDWr()6 zx3z|F_W9l&hJe?fEYE_SQHcFYpMnimOl#lyzTG`e`yE6OCgAO^Y$;%`4AI8>a%SQk zr>^*6d0y?>#8V$p8Y!miJ^pht2SsrI@5tMS=47e5Rs-7H=HB#DeGPUL%mDG>=txJ8 zw8$E2`Yy^C_4Ba*)*PYgYJ^#$gUA>ZNn3olMRHHp9 zl#fJPk}mRJ3mv#(&*}fuE{A%?Wu?mc=$KIC9^3>xzB?Cfpb`kK*;^Lz` zOn;SmE?3fYi^^zL#8TQKp$I|#<9OMd+@9dOjEbX3)!GM9nv*VXeHMtL{ozqHFZ6dL z`nYU=-zu6ZT68&kT{TMYwJJq|Lkp@V9LN8JERW^?kAmt*x3mRaB3ik^K|tdt$a#QS zZ+>sBWlbN}T2n#`b?7kq@Z{+0U7L%(=B3OP7#j7I!j%Me+Enca+Eag6oHZKlK56ai z_MMNm-!CR;v`W4`?3#N~W6Pfwk$fB-9s^+ywvsO1~LQ551;1t;tdFwf01l3vnpzKM6R~^+gmd)6%Gja+i5!7< zg=%tX7NeB>_gZVqwVPXgGJ~qf{AWr1kKFWK#>P^9wk~WH>BTwBCZvi}3I1Q^c#Q3d zeLM%fbr$FpgF=v-6}*-9n&{tg_l_iQS%SoATcG*VsI2Wi$4=03DjSFSo%Eq}); z(5>TcuS%1%$1Cki{XV$+B*y%F<@rDaI;B|cA~&gZ{+Lm>L;(GGlV%*|ueBGG&av zg*xWmzN_Nn=JRQ_4XG=Bb6tny=C{94hl9g_8R?4Ld>o6k*!R6b_Ta?WQ-PgDA7FCN zRMUhdU%}5p!bk*3b^R_hu0Tz+7+n`3jAqPtQzLR)!%^3A&1rpF36#}St+3x;*@rtu zBhemF8>flAlEg;8=DDq-v4}g^^IE@nMwg!%@Y2*rrK+?SN|n69cATf1acdw^>#1Ee zv`3_kr0w@34qOPt+91zOh84T!NBUt!6fp%!qcaruT5ig%WLBWvrpMLj5 ztMWj*3{H+ZVicwygAa5^kwe$FzR1v&N=Q!$@}DUDv2A*gvU4o?HldWRR-vb_&x;>( z6pgbTlGttjP4=(1fcp@=-e+=V&5=*@{%e&pi$h0)8q*Z4iK?S_k~;h@e;TAFbf%L~ z)Mp2y93uR;+qNS*=QmK4{yZW&{Q^}6h4Oz>qv_?dplXRF)R56tVTAPnn^~L#-99%? zh$KvQb#<-Hdnr`8_sAlDw=T=Y{5%6ssOTt&kl!A%Kxqs-HDXu^=aJaTRfwTBtk3b- z<&_dvx4({Z{hj@T6Ezm@B19oHzSN$_byDw-1w*A)|H-boljPcnVRZbf#o7yG8Gjq1 zWyvAxlk1W-juI!dM5i+p{E0WXVJ);L|E5FS_ndc;=BN6Yvw%uq~c~vG-_(i2}dnSHe zm0x**oU*!zFPW{SHC{O$rl)VY?z+e&*`7IZR7rqyvE2b~l^m~pHJC>cN^5 zjj)(kjVtAfGok*GYNeW*vLRKw=N!YrLHD3EY3tLdh1>ey)x}XVa@A-z&261bttFnb zIBZf%UD%lCURVz|VHwkz)_!00z(DRhY|a5DT`{CDQY|r`XoE?KK|eh;w=n~kY2W9b zcQy`_87&mpgW4BgqlZnX$Rd0#g)vxHt5%K$&phs5ZrbImBB4ezicJ_^swRwLYPb3y z#lRN|yYM8+N9YBD4r&o7{srJ7Q2O1!0vskYWgnL92k zghu^A(OLy~Z!Z2i#k8^4Pxd3T)sTtj=p3H9zA2tRbfxGut8a%G^~EQ?#nG+rn-;rR z155||&IGNDUom<^dB})JPNi&-hCm=CPY^0+#!*ipnm3cunihU8_R~g3su4}Dz3$vJ z_4nJ~qwGw+o0jZj_;|sJeb?V39RiU|QFAsiQa$yr3(s%}6+}g8*c9#m)LHoWp^{2` zorRhX#~YzjQRho!(q3p(6eeV;935v}x)uJV|B#G(ot9nX1gBq0+va*=?u_-itk+-p zwrivfgdD+(wJwm84;^UW00#mf0Rt7}J{l_8_W9wbqO@G;j z9kQ$KvXO_rv>8{7%=y>$&CP~&V@p)<{@XgZA4Udk@+EC(kEWsd{z3gA20R-@Vc}u| z@rk30$ZRP^s7Qm*Ez(t}y!P2&)4MiSqB3s8pM2?Q(~;K=U3dGw>=TI*j_fu9FJ1fG z$xD>5vI^c`Z<$A3J`IdkHO--rZ}+seCtli0!x~*PB-m5D6)a9EEXNYxYYp3>er@Y{ z*AhDY$DQ!nl-WyP7YsYKRG1}YRDG^Ajz!TRD7?s{?cpthM((O16B?1sxAH;BdqgzOEf14|(g+io6 zFIH~0S|t6lAy&7mah?@UlOiokwwFmNQ00v(-SUy(G81`-3neU~xF4GS{E4*`{m}>8 zi&%rAwgocPta770W4EGvCgDq<>7gtze5+-g|dM#zI{r#ScdD2cv-K@p-&eeK+pW`21?I`dG&O0YQ z{{9f^@a-%XU3$G_M9-^TV?3kgL`Hu{H~je2-tQ&nTLke3pD8C*52JnEFzQ<%4u~dM zYRW!zYcGS6=&Hl0t&M8Vf%CC#L0;?S1X^ZLEA5M`g0KCv+JwF9uW1cat)k9?zw3mj zXd}K0B)?6!BCdsvD?Dc-&)vE%vh;qxWDNgJM<_t9I4>YDvf$-hMnmFAZUYv-S_8bf zeSXNVyp8Aw7Ki!e%a@6u=+FW+)tjFB-{jhi5EDN@;nAuvb>M`+0I{TkmbvAh=Poyy zYBnzA-_4w2-|04L0~Ml+%pI=)^{!%c3+GM7^GIz?6cgqxsaNYI92kgE>OYL6!|A~q z8`UqyZ~B=?1EP^$@kW;IgFE>is5$Hag{LW10f}<8CW%Bc)e?Dl?Sm@!XSx{;Oj1F* z)~vmYOEJaPbC8lE!&9%Awie&I2p>sxcF0{%8Bkfqyxuty#JpIW;@(HFT|PLvmwwEj!-2F04>-q_eU zKi!!|gkte+gva=#&SQ=hPun*$O&$?_PyO}ZDkgN&KDTdGB%bPc>BLeTYpk>&7;Cy1 z40lQPKgj8(KstsOUj4=7m1gvfNURxq6-1kFIYioOH%KNB3`yJgirTzu6TkxQs8ST= zEcc?S=!MuLpKY7quhB|trM7gA$gL_?1hxf1sms51aL)5ObC!$CI8_h)6rHDR(P&OZ zMm1){snCI*uNDzD!q|_T{`aO8HiYnOerk)sYwE^YWPiVJ_5L(V6FVS^a8fDizi4Zt zH9wd;Jp~p#TITlB+U9=lc7Cy##q>*EH07mpN*RO%lJ~F+i9PsD(o~rDDsa3~Idf{p z?NPU=9ktch0`&x?u$De~vaQ$1|7Sw$$YlNMBt~-Hx!m4GZX92Up4O`FThBn%ERE_O zCi{DS8u*0Ff0BQiCQI_>p>h{3*0eCY51pIovuZh`x;t?6gYd+)$qyM9>9^(eGL+jU zS++QAqgO8zCkd_GPvyq_|0->$BgKeMjlF1X21VwFE3ioH(&{e~#?_4UEjIIOqdwtw zYy<80R*T>GvP(jd#sa}sRFiZO)vRS=P7K+S@sEs?TY5D3lo~AD>W49N8-Cinzd0U7 zJ?efZ*b(Wm1te&VFw+T_11ZKmM)Gt+BV=#`-*s(BVS9V?kFN62=v)oWA1P2flVG8+ zye&~qTV=EGUr8RoOjXno>ZcSEZ-3qF#guJrp4?OWYIh#Z-#n;#kmmQ)P|K&$u9_f; zLe+B<64%sG?_X4%YxX8>8M$AH#gqGHlt_hHD_#0>0((qIN`+=ua5n*g>u8rmpWEVtvr(Etcm5UG$Gf5hgvBn z#Ver^_8;T-u6v?ublbJ3&R6u&0=a4cRsCg-iA(rh?bbKUb&t#eJd&4|=-nbuzEqUk zmtgNMS5IpX>Xt$BQm0=J@R6#^kk-WDgENsN8LiBryy_q8UbP+TXvZom*Xx@vSg6Wn z>0jqdsy_r53x{A`QM3fkI7+9jh`UyjtJGOz)>@d%j&g~?Pln^leUM%UL|y*Sm=@NN zv|p=?Pw?gzi)5vfH!nAL)~*%0bQU|7 zo04CWGT#hK*}J5;l=kL!Y>IF;C{U6Iup{*t0e;<()Xr)cGi!z-pI z`+5mJrH_3@*h!Ry(Jgnei{4#wQg}57t3+N%_!k#+I;ipt} z(nm*kDejsX>KANf!m(O+?12AM*GckPwgQIoV|W>t)@VZ@Q<}E+PwczheYV-};(83r zPOl}MehR`oNFB&DF?aj_YWnU#s{il*mt=%1O6b~YP=vUNtO%9p%FMV{_MUO=va)3) zBP%20+B>7nZnC+yWL@`K+1u~w{rUd>d_7;U=Xsvzd9L$1&vPEr75tQ*Lw`?mcUwh$H^JOF`9eM6yMo%(FE8Gl6nJj`4Brzzokn|%)3lpB`7!CY zcW4i%`;h1MCzL-g8m>}gir?E5W%aWW$^*Yx*Z`A4uP-DX-gSbtuZS&z=yKCN-r7Z=fcbo_GH zee_`KUnuneXWGlRvZn3V2YZQ!2gx9v zaRlNdKwyeie1cEp}zkv|H@Yns)VUJr~ej#je}ev;)J{|XblQ_on(18z63(qw^Gm<3YQKBs0hDeJX z8oyU!ifaX70|JTO6BT!1lLSlLgq!Vjj^N5YN6&xNw{(_{oXIv68)JvRn)k1Ly`?v^ zKV#m1s<`V+knb^Be^x%Qx^}eMuoOZvi5xkZ*!Nv+);II*C~*xiozO6jc`x+;Cw}JV z6@!}JHep2AG&*pUMGrc)1bXF?=K$Sd@b0I{y)uy!-^0Qp9ma4F4xOa2D~<4#tnnbW z{NDLc9h_GDiF%<3lirZi(bQMG`>l7otG;8pRzQDUrQ+7X72?84ldn@0_3{9XuA)O` zlG5k*mfPDZ$|g6!-I&-GLx;rj^x+&wc}tVP1v({Tc@d$Y~8^7)tE59T2xNMsL)|wLD$Jn#i)UV z#JJiT>M-h?gWZtp%FM|uu+Vi1lLy?yqMY8l?$uq#MApFXLk0a7A~v(_5sU2QA7Cu^ zQx3Pgy1HtcoPjf+(uXg{Bm35ckyGyq%Zi0ZJu_y#KyrEjvgUKqT>xU>tw*__9zI^o z1Fl7JPA5-43m(qscAac&V?Hk2SQ1=0G}98md9C;q&V3vAK30pL8Ty#JInuHEiD=o> z>iXS#%d;Q#Pk1$`ttZvcNc6nr#I=Rhc4g{;-* zA4SB1tQ8d;FFs3*o>anbqgz?o*1muLzEj#W72F+U%U-rC8{fe6?INTpyU3GT4!|CL zR_jWmOG``p?Wm?^&~3i`+mxp($pf|X6isz|X+7NyXf`xVsUg9~bVhGcXR@`syWWHC zB8;5bA>*jJ$!=$Bw06yVJ$s4jpVRE5S(>SlXaMgR%ITHKKNi=@X=p1BA+wd2FUv?5 z#UX9?+e{dz5V|IAAMkoj4kh~NAln2p>$`>T+c9}1`S4CDz^v5Z$OZmCRZ*U*E;QSp zFdwE`{{3@&*R-+6C-BBoK|$dFeF9wac(*4qgH}5$&DN%kcXE1?BnJ?| z;+|}Ba&oog>prg^=`T`C5++J7ebkS(+jslL?L1w-bE$k*+R$yg&CC~FJbI||rPu2eMBYr z6gjVde@CIUEgt%1F?+MPex?uiPiK6)vYxB&AXQ73_@1M3IJZe!lGc>NGZdSVqmQ6b zWv(od@?y|Dz@h0-T1#nV!5k?oHnnmGkU_xlBH0W@g3{5z=edtR>fJt-bdtinc!}VWk%s z;?*MHnUCznG>9+XJ!9Hp&Qs0vTlKt%W^9$Sf$lz@$(U9!QkCa1V3(~uopM)r@wF@Z-bD$-b1W}L5GW69n*HYfJm!; z#*zKh>mmbR53!D*k15L`a@QkHecJlE2LJem4HkOfRfn?oU# z@CxAb>l1+vwSUcMVd5VW1cJS&_F71e<;@%^vN450Yt51Z| zIp1ezeX{=AodJ^7q>+#;p_=6zW(hZfD_OX$;^N-#dp1};@#Sl9kl_mo4$ji0RA0Q> zjrdx|C`Lx5PL#JKuwqRo`U9u^IQ4vww>KNjMh;T)HB(F}J?#k%#t(S^Q1Ih(VkAqC zpO=j+uqxPbIlLdTp{}rVy*U5l<#$Sjsw-o_!^E27pf`9-4i|IV6ORrGLfTs$txeYB zAa$khh<1&iLvr0E+YE9zdVe-e=YWk`{(j(dldD%y+O#OSR{ExKGc`#4iX2Xu#>gNM zy$Go*`b&&s1n+Ns=rO{JpA)r1=uZ?~&XCAnWo7h!ha0?e0x-cxJf{z*cA{Mu`hI`4 z0*!JlH+-#5;h*&*w8A@=Zlp!g>r!ePhwIpJ$K^6~jw+s;Qo9IpyGWkj)IhjM%DuBN z^3o)ki`c}n24T1z2pUgms=A}Ex+0eIjr@Wcz}B=a1#qDc^hp17eG*xmmpAt<<&qHY2?! z;sw`)T-ZpI3fpzlUAg0Ws>g?fNF{Qj)y&>Kj)(w9n;TwlX=166KDz_t@2ck5)FG=O zWDu{yj0En)0ncD2s(Moq@{dY#$|iaiR5JV6&vn_v&u@zrUKeeVJGj9H5yk+!pcZw9 z`D>tGDZ(`v5oC866GXEf_e1$AwFU1opjuo)-3aH+2PfBOqO$&be%&hdDYd&+4$3cH z=zcC^-;2HVqG~$IMqNg_p?lPESZEj)bNg)_swI+b|88M^{G^triDFvAHAN@9t5(C5 zpOk>e*m2ILLh6EI@lJ{)0->=-$=BDj=1)pNrQ4~tIM;3c=NbM1!TlTwyt+b|K^xgJ z#H+<0x>0&|R^3jr-m1|1!@KQTtMK$!U~qzU`p&k5w}*k|;Z%yaYd%6qsL#o$+?y{{ z`~;ko*@S@1zQ%FNrff!z^8oE+%AZs$yTkNE3_(e;hcdA@Vad=sWB0P~(+UxpT0Kqxw{X z$H;dY5z-A)w<8@t8KsBfYSihmyd1aZkeKzN&BPxW-5r?4>Rw{7-EGDqLfSZAy1?SX zSG$+$`+B@qfe0%Zj*5m^>tz5GssT6$itdswP&vRu;V9w7h4z?;vD; zrQ?=ch%0=uV)Jr=4A^oyOqw~_dRdf4-{PGHrbe&3>UHh>B5%8@V~iOpyOQpwzj((u z9GsH>=*py0dPZPlquXROo^>oYC?l$;Tx(5Xn0r-;ZVhwq?f%@{mp8@9NR@qLW3UsM zqdrS$ie~u);f?#7CNI{$SdCqn6^Fgc%fWkAy?$4it4pNt>T?<{?)jOES<_fFtrtt( zXN~&-`mk59rLEc++$;GW;oTyNXYxF`UOWbJwcQuQdk$QLOFK8?q89T|@kP`>dR=cf zi`!_^>1OiV6&IvpHb(0&XlH+`qSp=4+}%mlOy>7gO7VGpN1rJ4rJ)w+PN|24f2n2O z#L;5b^^ZPMmA}7Ko!+(@`ZvVm6pvnUqCeLldYe=5eF13@SrLLyp-53mW?dmBVVYn$fRYjG%GTb#W@?oBiI13^cbu%T|OO_$+i8LWqZtv7>} zTEw$uXl!HC75bQm;wUTGk!m(8D{IPtB9vxckJSIi^#qj*?bnza8zsgj8fzR={U}1& z;TKOW58lFaaw=Z*S1qrge0t#4_i7(OUmh+D8Ob|0-FKNmP^qP`W zx~%-v&)i4nyc-9R9L&rm*VeC64|JzKwDy*}(T>Dq5C~U0Snc5F0u4tF?g@E$?~8;> z3Z=_}JV&LOJc2Mse|v-_r`BAK6dm?(z(r&E*^}mLp|sD( ztilzZHk6Lg(=i=CUjrLKLp~ZWJuNSN+7f(G_&0fHM@=cx&464 z$3)%1?XtdKi&Q*m3$50rjm!+*ut6~b@Ju8()VO4#wA4tCJ?s#4Q_l%UVibOsnskgc zEI@uGZ6XJzW@hc7Io8u#TOy+L{_!S`)mJ#+q+vhi*M-=8t@ckkbw*$gmsF=_w>x2GIA)LX)+FD{Gd@Gdoko&LIk(kh*xCjp)u19Us{{T8LG`}n%2$})=0 zmrO>`c7V7MAb#c9i|?{!8LOiS)&N63*RhNefp_t$qlO3y1tlC}POY*z^d@yreBa~GqZ&WoK#QCc|V^ew2*{^jZklhP`sTA@eFuG{0%8Pi)c{MVkq)+ zld9H3@K?bz(&&9S_GZK+bLw0?{3aZhKD1!+fo%2c-;2eFYSLpX5`P-Aog;9QdJ(N| zfy;vXia;a*j~Dp@wa7-bUvqb=*F92x5S$A-EPhT1pD(X!I+!6BtRv(s%BF+KxHRb5 zuYZ@XIw&Y;tau)H^73;mc`WUJ4b42Qez6v2y;T)Dhu-*xZn#slw6vs|8VEGk-_pfW zFFX>ui%NZ^fCk8VgoqBski%+CR6K-|5@<~*sB*cygr=Wr*_SO$Sgz_@Y_4p14TWVb z7Hhomo`S3N++mTu0CDx^rA^k)qPlPCddf%nQ4GpWiOfYJQ7Nh>|48FxB0iiYnp>`T zR-{<~Gjc5N7Q`gqx_+<|@l3$0Q0`Ww1e~F;uza*S4b$18n7b-Bj6Eq6n zFDW_GP$}>TC>ndEt6qbNPFC6K^6!XhkUp0r*XTSaw%Gw(ZK(=$%(H1lb6t;F;P89t zoHV)u7c#y{B*k^yx8#Wc$Zq>1+D9A@w;AFlb!9jyAYaSWEBd6w5>9wnSSF{HOeD** zM?3t?PYa1(X+SZ)wf)9KGcCs;cbuxuBs(;4BZiHU|E&(HA+ zEyR2bMWQe^$s!9Mv(&A^c@jG&1b@PBn2wa2Uu+>G;iMxVrb|r}Jux1IIGG~rA4eb= z$aq?e*0aYv0%`54LndUG+!bN5i7g&iBQ9-6S~p&UI9cl!mL}Mo9|(B1GG}HVn_ZUs zN;yB&d*9OLC=~m8Mla&BEyv{n;|Er83}OMr_)}>p{lTjTJ$6XZWaY5;0=j|Mv}Y3* z7~;Z_D7K>Z&&)_JghB;+9|XdVMO-c#V@Jzq8Q#sW){~?YM?rq;rY&P8RQPxA-mRR| zVZKddCQcY=gT<57gtS5HZmdc>%S2d+EY!$rE| zQCt(RN&NC(H{fsgi3wBVU+rzj7^$wTw&nI3Uz zM_e(GMNdhRp}kF2XYFll_&#RD{(5Dky{609JJkESUKGMBFPdt&4QS!usSjNv)-CT= zch5K9yWTG0WXK-4!6+Yy!Z`Ljy;;;%Q&TI-j|dN++o{XR&DE73Xqth$#y9$ko5_p0 zEo)~N7u8Svj?#?Ta{e3x4F?;`2Z5 zl$7P!4QrRxc(X(9l#~9|!*;k3F$FIF(J--NwqM_E?p*8Z~Ct|9FZbXSE<9txm+*aI&j(yxR;+weUl$cdC7`q%zG+ zgUFegnH{R{uBuzGm`uf25G6-eHnPOVsJitJLe_&>MY~RrlrH;POCQK2*Ja)qup#NW zQ&DYJ5Ss~*jqr3`Bzy`3HGbzT**%Qc*)t?qXEC9vUexMeIvwWJUbqNI(eO_+`TBa= znM}qBxf(h+^g@PQjRskCB4ir}`V*w`JQshVOlCwOF_3QC&{A&LJ9%7N!9-i_9Qdwl zU*S=OVNV_xbHK)W$vR7>+MLv%0vO7a#u4-OWb*y{3*+@~isRd=GbRYU0td%K=E(}8 zet`@g$CsxWFU#{@G8waWpq!ui$!KHv?Q*8YG83E5nOM0i;+1FIt*o%%G%b9NPV;8z z)^B%vY&x|E5v~UzSSaT^uTs~Og(BH=TKn0Z2Pd|+D%c*nbpbOepSq4=;gd$uCs^8f zsy3=Q4TRaRM*5OC@I$K8w{_M0yC|oD_(!0`?XAzGS33<_hl@Ygy@RqW#nf3B{R{Jm<>{s-jq7g)q!LXY&A-edPrs+5A@t=k zdAypd=#x5@Ta6Shh?G#f@P#V@vi2fGZKC$xkK7>R-QPvmmLz%Z3F6Iy;rk-3Ew^Po zPCAsOt+~0mQ50KXR%rIovWd^F5ObXbG+$9j2$~wDKFDLWs)dPhkk)-x>Vv8F5xZyi zRh~bu2vt`P!OA9~bG_AEWL6fLcs5)S=z1+(@94NQ}wy`ggh>RkCHNp3@e=j|i77-33laizN|Fbkt( z3C1@A*uS?_Qc6HtyJ`*%4*{C2q1?*EPsCDwZ=ug1zGuD+U8mVI>@y|bzc~Sp%tJ|S zUN&of_?I39JayQTa}CHQdFNgI(#yTK@05P}?4Z3-+bFY2z29c##~yD_E3{w5NEaWl z;y3AHzeRrXS*R=a_gJgR*evV*31YbTw&9&(>5P|5vbQbhu(S8@ghVd1{QbXW>6mux z*;R?cGe%}c>k_<}Dc2baXVjRr?G#mvz+uGRmRCk;%-+p#DVDG@x8qo zE-Nblb*idu9`bZxNJ^{CZ7Bn|b}^0m>2{;~Cuj70JE{fU=Q`SA9xYEz53eYTB8|o0tTchul`!?kGM-&k{pt< z9!Hmy)(T4nRQ^~*^A+I%z%=XMi7>bu{ANB*CzpjD-B7xHZz<(Fom;D4a(|c_Gj5eF zBofYM)p08*j~+{Wf!1f;x;2OEl^4mxR&t1ZCWP{;0N){jIiKK576sKXyNiJUCgM+^);5wl~nr z#R9E}h3=k$7oW-^Ur>?aQ>Lt^a{Qpae5~y&lIt>{uZd{HFV-@qSvqsXf{3q|Q`v5% z!F2I}pqsK<$kMjeh=6Vu*X!^d?L>^CJSO`sHZS8l3-x2WL5(n~+zGPkz)fJ;tDg8B z`fMr|M4?q03ra~%Ir-cE#1%*jBM?Kk!=}dbeOHY=qD)`E{<1Oqbo;LNaM+w3_PnbE zTI18==ja-i{znZ_gJsr(&>%=G>iRa{^~$6Lbqw56gUl)ZWaaDvNj7`25twxv4V+)4 z$#clo+C6UFC>IqId$s@X3z{jz>tDYI#UgjKqV8;vj`1GaJEF!4{w>#tNF$m0q9v>{ zPdVNhzhxgR9Du`#iBSLKc5sg*nOK462$1jA_P@t zF^A%TWV1hnNt;p{6iCKR=k6*rT;cke^(H%<-I)W8{pHnQ8e+uk94`N2zdt+e zk#gnN!#tq~2RIx~MQF%M(6@?mUS!Avkztn9#8wR=oS8Y#boOyE4%co<>3o8a7Vnuk zsZ;&ZW2EXuHGi9}zSll<-%T#;Sr~H4fH#iOWA&fL)S?5nAgQWCk~Vz-t{`}^Y;fe| z?De_Y-%5SfYHO=bjNAofUMoUc@0vP4$7iCd?K38JcRhGVdt#zGDptWY)hpg~tMjj( zQ{UUk?4h7WDCiF;fZWF!M&~!v?+_(JvMD6v1gBnEJ=k|T`bA?j+S@O&565ayiDew6 z@RnFSYKr!WBxnMogjP6-WUDpj&>LkIe^!}`Fs)p^!prcXtU=0wHi=g9P1vTg{1KW_V_38oEYtJIcHqNuQ!p7lmhi604fqN2tt zQu)oN-moRB2TS{HgYJC7DFupcxw9V?i!~@SBntAGj0O*-fvvXjYD>z%I`|o~Fdn}_pZY1hh!inh2a#6zDW z_d}rt{;G=9sCr)GmX?;4*%T(W@+`%uvAMa|o?*ha)AI$(n>i+>uEgOTbK9>)CtC+q zHlxN|Kc|Ri76Aii)h!>Qxnfl5=@H~H;Gww4eH>2k`jR#-(E|pgR!6!X#NOJR9_4#U z`DW3L^G%-6JP1Zc28P#&+vLH3ai#q|oS%yL$^k1o=#=Le6aV8aySbU!Sd*sQ^+Ob! zX@{sa&0WzHH*jjOz(xKZRewmP{}5;+nJ@0^qYiC+JT5fLwl-Ycwyzb%)EE1Y_Si2X z=qjtmhiXXlDd)AjLg>5t=zp8u`{;a!#4H@M)6@hsVuQfoq03Kxo&O2>IAS2@~;NQ;z z`|yuBJ3{HOr7xAa6MG_S0TjF!x^*U>6JzD;agm>$)E^a;zNq=n`?L9?vQ|Bx{sfE%COMxc zTNOG_e7%x%!_H&{(RYE~Mi;k^yklht;k|o2rCR|%*`9+B=N_$KZ523!gDL0NHH??u zJD>TZ`^5t!C@ZZp3bAjkvi9qf%Iyi3pji9r4rlN#)|= zxUQ38ZibWn0fi-kFf|+%QJM6d43DA(ydAp?3i13xn%atn+IU#T)A0CFz>Py=PU#Qv zbB}oxLoS(^0?!$!OSK-!CR$h$`KP3pMfQL7df(b*k)ID0n-BS(UeMAm0j7|jxX~lg z$NtDjZIS;qNVq1%=puJLQ7uyYdP28w+JC`wg_eBypb)`aRw0l6p;M8CPe4|!1iymG zgsMY79y%<2R!+fHrFZ*hlg6*t@?`H&pY0y!y)ItGF8KeN>Alf@uMFf^^YMJXb zw19Ht4Vcy`LMsb!GDz2gQ+)$70akBFMgv?M6r>5J=iqr2Cr!t5Y@HB>;B%f|Ukcv9 zTZUAf1DrTv3w1mtwjf+iMr`LdBPjHwF`!tmW^Or?e&`40mLY!+JTJw<|0oJEV`#Tz zcqqC*!pzNG&m5VCeu0>k8u4Dz7My<@B7Ej-81KZNRg*75Kw(eyY@HEipbQ^8_QFaL zpsmRZda-FI1-+V_vuf}6SYSDuA=G^i+iz;cU(;w&B($!(rc1-Lf+LCsz#Ft>-Iy)k z0y!Ux#AbpeeN8L|WzTmG3&e%hr zFp|zI8LT|y;Cce$B)1wkN-7DLgI91lgPVln6g<%Nb> znfH|QG9LetX|#reXJCbssr~O+>LrW3wdS;ag%tS}LEd}|rqyA_0Y-Hr zryKgsVh@J|3K5VL7$Ha3W+1$cEK6rA0~=ugtac@+nAFEgNBm1_;2#j&Y=z*TT(Ief z+=G#{dtv`8x{d+KRS+i0!p*t?PR+ifJC-`6Z~uGc^qRUhRS-g%WQcO|!L%;7M}v9N zt2@viI6*lp!wD8QrIPW|IsSTcUm#OX_-Bk@V+-Y+43fE#HsC$i3Mk9UAk>Fz4Eb{k zd9hI%v{${2g|1wb`Ku z$3pHB!LKxgg^qtAj?_^}&sJz5nG~W+YV9XhFGPsKWGVoUgK~>rn?VH;fFrL#G5aG% zQaVX68Uo5nyQ1ceU|>&v?yxldOcbRZBzMO8A6V7ET@g0Gk^PynN&Z!&E@+T&#}V<@ zDp^Pi2Kn6<;ZGIDJOjA^0VeAAT7%`-Yuh^HW5NOg{5KM` z>l#LDNSeXDu3um{^9(EGCt)G)%K!Ji71SFzgd^b#upI<@aH@@G`I}dDnc`4N@MkCJ zS6@lRqSpvsi~=&`ebuvY5c+Ns$mkg#Y<5%-N7~>$>wgf8(Qr#QY`}fo& zMBEPoLl%HBDg*>wU4HbV9E0*mDbM8eoIE0M2I6#6Hkr8((E~w*sJ7rGDf|+zC(x?q zYO%_=0BXjf7F(Z5WkMJllM^tRQzB5vnI!Z@_+a?L30la<8h$5_H1Dj-;#f^P^J!;j zo%F1PAC%Owl(|?OMw39;FPxYwgRd%JGDUzH&Y=<)_rvvNZYZ5u*~A@;KG<&jHW4;< zjx_)iS@RB%@xk6QfbA&<43yM7QX&=K>;cIpx-L`rnF5J7Z@}OrY(SXprI)_ zKMem+-&O?dq#(tRL*Tz0aNQ>hs1C6t@uPc52IbA^q>cDS$e}v`-^kAk2A7*4X?>ru z1R)_HaR&2VN<8!f%v93KX&C^yR0LG9TJmH2^@pcWdU;4dRS1^MIG@j?_P*g zT3N}nW!wD+1ff7z{F?Cc6eaNYzXKhvG|WK%jemfNVO1Gw3#?sdhPvd0yZt#vOF+f@ zrUqb`hI_Hgzepcav2uru^-^YvA=i6rmtp2 z;R%K8;N)c$N(w(pAgy#Ma$CEadiGDTw0qgkDti~W14w1S|29c_V_D!VIX~Nmlp%zT z(}w?e0jDk}k+B7Jpiiq@Yg;*_`%A2$n2wx2YFbDT$j#7^Y5|r!ovebSVEjn79($ju z8Gsn3Q-D|L2IpN3X-Qb2lG{K4GVH*<)KSFNL~i^+8~)8a9HNCfFJyrA5oZ&LW*k}4 zP~;K!Y`Sc6!v&J@2czy+Mn60g*1CXhjEyWlqj?=r@e?R3`7CL-a=>14i7=^R-$Iit>2{RF7Se}Dt1(S;Ge5GWo|ADM27%ffzGDplW*+hC5^SS zxJivSx%Ia{9iKdpT6^OI13_VDjcYfcxElJlJx7a0xAuIBi$5NmIPo+1)i+HaZ-Woh gQ0d@^qB^FozUuztT3c)rILe@>@+yxpvW9Q}A1*yihyVZp literal 0 HcmV?d00001 diff --git a/satrs-example/Cargo.toml b/satrs-example/Cargo.toml index d874a9c..b22904b 100644 --- a/satrs-example/Cargo.toml +++ b/satrs-example/Cargo.toml @@ -17,11 +17,15 @@ zerocopy = "0.6" csv = "1" num_enum = "0.7" thiserror = "1" +lazy_static = "1" +strum = { version = "0.26", features = ["derive"] } derive-new = "0.5" +serde = { version = "1", features = ["derive"] } +serde_json = "1" [dependencies.satrs] -# version = "0.2.0-rc.0" path = "../satrs" +features = ["test_util"] [dependencies.satrs-mib] version = "0.1.1" @@ -30,3 +34,6 @@ path = "../satrs-mib" [features] dyn_tmtc = [] default = ["dyn_tmtc"] + +[dev-dependencies] +env_logger = "0.11" diff --git a/satrs-example/satrs-tmtc/common.py b/satrs-example/satrs-tmtc/common.py index 8f57e54..6f56604 100644 --- a/satrs-example/satrs-tmtc/common.py +++ b/satrs-example/satrs-tmtc/common.py @@ -4,11 +4,12 @@ import dataclasses import enum import struct -from spacepackets.ecss.tc import PacketId, PacketType -EXAMPLE_PUS_APID = 0x02 -EXAMPLE_PUS_PACKET_ID_TM = PacketId(PacketType.TM, True, EXAMPLE_PUS_APID) -TM_PACKET_IDS = [EXAMPLE_PUS_PACKET_ID_TM] +class Apid(enum.IntEnum): + SCHED = 1 + GENERIC_PUS = 2 + ACS = 3 + CFDP = 4 class EventSeverity(enum.IntEnum): @@ -36,8 +37,8 @@ class EventU32: ) -class RequestTargetId(enum.IntEnum): - ACS = 1 +class AcsId(enum.IntEnum): + MGM_0 = 0 class AcsHkIds(enum.IntEnum): diff --git a/satrs-example/satrs-tmtc/main.py b/satrs-example/satrs-tmtc/main.py index 66a41e4..a3e0caf 100755 --- a/satrs-example/satrs-tmtc/main.py +++ b/satrs-example/satrs-tmtc/main.py @@ -3,10 +3,11 @@ import logging import sys import time -from typing import Optional +from typing import Any, Optional from prompt_toolkit.history import History from prompt_toolkit.history import FileHistory +from spacepackets.ccsds import PacketId, PacketType import tmtccmd from spacepackets.ecss import PusTelemetry, PusVerificator from spacepackets.ecss.pus_17_test import Service17Tm @@ -16,7 +17,7 @@ from spacepackets.ccsds.time import CdsShortTimestamp from tmtccmd import TcHandlerBase, ProcedureParamsWrapper from tmtccmd.core.base import BackendRequest from tmtccmd.pus import VerificationWrapper -from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase +from tmtccmd.tmtc import CcsdsTmHandler, GenericApidHandlerBase from tmtccmd.com import ComInterface from tmtccmd.config import ( CmdTreeNode, @@ -46,7 +47,7 @@ from tmtccmd.util.obj_id import ObjectIdDictT import pus_tc -from common import EXAMPLE_PUS_APID, TM_PACKET_IDS, EventU32 +from common import Apid, EventU32 _LOGGER = logging.getLogger() @@ -62,10 +63,13 @@ class SatRsConfigHook(HookBase): ) assert self.cfg_path is not None + packet_id_list = [] + for apid in Apid: + packet_id_list.append(PacketId(PacketType.TM, True, apid)) cfg = create_com_interface_cfg_default( com_if_key=com_if_key, json_cfg_path=self.cfg_path, - space_packet_ids=TM_PACKET_IDS, + space_packet_ids=packet_id_list, ) assert cfg is not None return create_com_interface_default(cfg) @@ -85,19 +89,19 @@ class SatRsConfigHook(HookBase): return get_core_object_ids() -class PusHandler(SpecificApidHandlerBase): +class PusHandler(GenericApidHandlerBase): def __init__( self, file_logger: logging.Logger, verif_wrapper: VerificationWrapper, raw_logger: RawTmtcTimedLogWrapper, ): - super().__init__(EXAMPLE_PUS_APID, None) + super().__init__(None) self.file_logger = file_logger self.raw_logger = raw_logger self.verif_wrapper = verif_wrapper - def handle_tm(self, packet: bytes, _user_args: any): + def handle_tm(self, apid: int, packet: bytes, _user_args: Any): try: pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty()) except ValueError as e: @@ -177,7 +181,7 @@ class TcHandler(TcHandlerBase): tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE, seq_cnt_provider=seq_count_provider, pus_verificator=self.verif_wrapper.pus_verificator, - default_pus_apid=EXAMPLE_PUS_APID, + default_pus_apid=None, ) def send_cb(self, send_params: SendCbParams): @@ -221,7 +225,6 @@ def main(): post_args_wrapper.set_params_without_prompts(proc_wrapper) else: post_args_wrapper.set_params_with_prompts(proc_wrapper) - params.apid = EXAMPLE_PUS_APID setup_args = SetupWrapper( hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper ) @@ -233,8 +236,9 @@ def main(): verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger) # Create primary TM handler and add it to the CCSDS Packet Handler tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger) - ccsds_handler = CcsdsTmHandler(generic_handler=None) - ccsds_handler.add_apid_handler(tm_handler) + ccsds_handler = CcsdsTmHandler(generic_handler=tm_handler) + # TODO: We could add the CFDP handler for the CFDP APID at a later stage. + # ccsds_handler.add_apid_handler(tm_handler) # Create TC handler seq_count_provider = PusFileSeqCountProvider() diff --git a/satrs-example/satrs-tmtc/pus_tc.py b/satrs-example/satrs-tmtc/pus_tc.py index f73b755..b0febdc 100644 --- a/satrs-example/satrs-tmtc/pus_tc.py +++ b/satrs-example/satrs-tmtc/pus_tc.py @@ -1,27 +1,58 @@ import datetime +import struct import logging from spacepackets.ccsds import CdsShortTimestamp from spacepackets.ecss import PusTelecommand from tmtccmd.config import CmdTreeNode +from tmtccmd.pus.tc.s200_fsfw_mode import Mode from tmtccmd.tmtc import DefaultPusQueueHelper from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd -from tmtccmd.pus.tc.s3_fsfw_hk import create_request_one_hk_command +from tmtccmd.pus.s200_fsfw_mode import Subservice as ModeSubservice -from common import ( - EXAMPLE_PUS_APID, - make_addressable_id, - RequestTargetId, - AcsHkIds, -) +from common import AcsId, Apid _LOGGER = logging.getLogger(__name__) +def create_set_mode_cmd( + apid: int, unique_id: int, mode: int, submode: int +) -> PusTelecommand: + app_data = bytearray() + app_data.extend(struct.pack("!I", unique_id)) + app_data.extend(struct.pack("!I", mode)) + app_data.extend(struct.pack("!H", submode)) + return PusTelecommand( + service=200, + subservice=ModeSubservice.TC_MODE_COMMAND, + apid=apid, + app_data=app_data, + ) + + def create_cmd_definition_tree() -> CmdTreeNode: root_node = CmdTreeNode.root_node() + hk_node = CmdTreeNode("hk", "Housekeeping Node", hide_children_for_print=True) + hk_node.add_child(CmdTreeNode("one_shot_hk", "Request One Shot HK set")) + hk_node.add_child( + CmdTreeNode("enable", "Enable periodic housekeeping data generation") + ) + hk_node.add_child( + CmdTreeNode("disable", "Disable periodic housekeeping data generation") + ) + + mode_node = CmdTreeNode("mode", "Mode Node", hide_children_for_print=True) + set_mode_node = CmdTreeNode( + "set_mode", "Set Node", hide_children_which_are_leaves=True + ) + set_mode_node.add_child(CmdTreeNode("off", "Set OFF Mode")) + set_mode_node.add_child(CmdTreeNode("on", "Set ON Mode")) + set_mode_node.add_child(CmdTreeNode("normal", "Set NORMAL Mode")) + mode_node.add_child(set_mode_node) + mode_node.add_child(CmdTreeNode("read_mode", "Read Mode")) + test_node = CmdTreeNode("test", "Test Node") test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC")) test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event")) @@ -37,7 +68,9 @@ def create_cmd_definition_tree() -> CmdTreeNode: acs_node = CmdTreeNode("acs", "ACS Subsystem Node") mgm_node = CmdTreeNode("mgms", "MGM devices node") - mgm_node.add_child(CmdTreeNode("one_shot_hk", "Request one shot HK")) + mgm_node.add_child(mode_node) + mgm_node.add_child(hk_node) + acs_node.add_child(mgm_node) root_node.add_child(acs_node) @@ -54,10 +87,14 @@ def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str): assert len(cmd_path_list) >= 2 if cmd_path_list[1] == "ping": q.add_log_cmd("Sending PUS ping telecommand") - return q.add_pus_tc(PusTelecommand(service=17, subservice=1)) + return q.add_pus_tc( + PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=1) + ) elif cmd_path_list[1] == "trigger_event": q.add_log_cmd("Triggering test event") - return q.add_pus_tc(PusTelecommand(service=17, subservice=128)) + return q.add_pus_tc( + PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=128) + ) if cmd_path_list[0] == "scheduler": assert len(cmd_path_list) >= 2 if cmd_path_list[1] == "schedule_ping_10_secs_ahead": @@ -69,17 +106,38 @@ def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str): create_time_tagged_cmd( time_stamp, PusTelecommand(service=17, subservice=1), - apid=EXAMPLE_PUS_APID, + apid=Apid.SCHED, ) ) if cmd_path_list[0] == "acs": assert len(cmd_path_list) >= 2 - if cmd_path_list[1] == "mgm": + if cmd_path_list[1] == "mgms": assert len(cmd_path_list) >= 3 - if cmd_path_list[2] == "one_shot_hk": - q.add_log_cmd("Sending HK one shot request") - q.add_pus_tc( - create_request_one_hk_command( - make_addressable_id(RequestTargetId.ACS, AcsHkIds.MGM_SET) + if cmd_path_list[2] == "hk": + if cmd_path_list[3] == "one_shot_hk": + q.add_log_cmd("Sending HK one shot request") + # TODO: Fix + # q.add_pus_tc( + # create_request_one_hk_command( + # make_addressable_id(Apid.ACS, AcsId.MGM_SET) + # ) + # ) + if cmd_path_list[2] == "mode": + if cmd_path_list[3] == "set_mode": + handle_set_mode_cmd( + q, "MGM 0", cmd_path_list[4], Apid.ACS, AcsId.MGM_0 ) - ) + + +def handle_set_mode_cmd( + q: DefaultPusQueueHelper, target_str: str, mode_str: str, apid: int, unique_id: int +): + if mode_str == "off": + q.add_log_cmd(f"Sending Mode OFF to {target_str}") + q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.OFF, 0)) + elif mode_str == "on": + q.add_log_cmd(f"Sending Mode ON to {target_str}") + q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.ON, 0)) + elif mode_str == "normal": + q.add_log_cmd(f"Sending Mode NORMAL to {target_str}") + q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.NORMAL, 0)) diff --git a/satrs-example/src/acs.rs b/satrs-example/src/acs.rs deleted file mode 100644 index d7add5e..0000000 --- a/satrs-example/src/acs.rs +++ /dev/null @@ -1,118 +0,0 @@ -use std::sync::mpsc::{self, TryRecvError}; - -use log::{info, warn}; -use satrs::pus::verification::VerificationReportingProvider; -use satrs::pus::{EcssTmSender, PusTmWrapper}; -use satrs::request::TargetAndApidId; -use satrs::spacepackets::ecss::hk::Subservice as HkSubservice; -use satrs::{ - hk::HkRequest, - spacepackets::{ - ecss::tm::{PusTmCreator, PusTmSecondaryHeader}, - time::cds::{CdsTime, DaysLen16Bits}, - SequenceFlags, SpHeader, - }, -}; -use satrs_example::config::{RequestTargetId, PUS_APID}; - -use crate::{ - hk::{AcsHkIds, HkUniqueId}, - requests::{Request, RequestWithToken}, - update_time, -}; - -pub struct AcsTask { - timestamp: [u8; 7], - time_provider: CdsTime, - verif_reporter: VerificationReporter, - tm_sender: Box, - request_rx: mpsc::Receiver, -} - -impl AcsTask { - pub fn new( - tm_sender: impl EcssTmSender, - request_rx: mpsc::Receiver, - verif_reporter: VerificationReporter, - ) -> Self { - Self { - timestamp: [0; 7], - time_provider: CdsTime::new_with_u16_days(0, 0), - verif_reporter, - tm_sender: Box::new(tm_sender), - request_rx, - } - } - - fn handle_hk_request(&mut self, target_id: u32, unique_id: u32) { - assert_eq!(target_id, RequestTargetId::AcsSubsystem as u32); - if unique_id == AcsHkIds::TestMgmSet as u32 { - let mut sp_header = SpHeader::tm(PUS_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTmSecondaryHeader::new_simple( - 3, - HkSubservice::TmHkPacket as u8, - &self.timestamp, - ); - let mut buf: [u8; 8] = [0; 8]; - let hk_id = HkUniqueId::new(target_id, unique_id); - hk_id.write_to_be_bytes(&mut buf).unwrap(); - let pus_tm = PusTmCreator::new(&mut sp_header, sec_header, &buf, true); - self.tm_sender - .send_tm(PusTmWrapper::Direct(pus_tm)) - .expect("Sending HK TM failed"); - } - // TODO: Verification failure for invalid unique IDs. - } - - pub fn try_reading_one_request(&mut self) -> bool { - match self.request_rx.try_recv() { - Ok(request) => { - info!( - "ACS thread: Received HK request {:?}", - request.targeted_request - ); - let target_and_apid_id = TargetAndApidId::from(request.targeted_request.target_id); - match request.targeted_request.request { - Request::Hk(hk_req) => match hk_req { - HkRequest::OneShot(unique_id) => { - self.handle_hk_request(target_and_apid_id.target(), unique_id) - } - HkRequest::Enable(_) => {} - HkRequest::Disable(_) => {} - HkRequest::ModifyCollectionInterval(_, _) => {} - }, - Request::Mode(_mode_req) => { - warn!("mode request handling not implemented yet") - } - Request::Action(_action_req) => { - warn!("action request handling not implemented yet") - } - } - let started_token = self - .verif_reporter - .start_success(request.token, &self.timestamp) - .expect("Sending start success failed"); - self.verif_reporter - .completion_success(started_token, &self.timestamp) - .expect("Sending completion success failed"); - true - } - Err(e) => match e { - TryRecvError::Empty => false, - TryRecvError::Disconnected => { - warn!("ACS thread: Message Queue TX disconnected!"); - false - } - }, - } - } - - pub fn periodic_operation(&mut self) { - update_time(&mut self.time_provider, &mut self.timestamp); - loop { - if !self.try_reading_one_request() { - break; - } - } - } -} diff --git a/satrs-example/src/acs/mgm.rs b/satrs-example/src/acs/mgm.rs new file mode 100644 index 0000000..1cb7eee --- /dev/null +++ b/satrs-example/src/acs/mgm.rs @@ -0,0 +1,284 @@ +use derive_new::new; +use satrs::hk::{HkRequest, HkRequestVariant}; +use satrs::queue::{GenericSendError, GenericTargetedMessagingError}; +use satrs::spacepackets::ecss::hk; +use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader}; +use satrs::spacepackets::SpHeader; +use satrs_example::{DeviceMode, TimeStampHelper}; +use std::sync::mpsc::{self}; +use std::sync::{Arc, Mutex}; + +use satrs::mode::{ + ModeAndSubmode, ModeError, ModeProvider, ModeReply, ModeRequest, ModeRequestHandler, +}; +use satrs::pus::{EcssTmSenderCore, PusTmVariant}; +use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId}; +use satrs_example::config::components::PUS_MODE_SERVICE; + +use crate::pus::hk::{HkReply, HkReplyVariant}; +use crate::requests::CompositeRequest; + +use serde::{Deserialize, Serialize}; + +const GAUSS_TO_MICROTESLA_FACTOR: f32 = 100.0; +// This is the selected resoltion for the STM LIS3MDL device for the 4 Gauss sensitivity setting. +const FIELD_LSB_PER_GAUSS_4_SENS: f32 = 1.0 / 6842.0; + +pub trait SpiInterface { + type Error; + fn transfer(&mut self, tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error>; +} + +#[derive(Default)] +pub struct SpiDummyInterface { + pub dummy_val_0: i16, + pub dummy_val_1: i16, + pub dummy_val_2: i16, +} + +impl SpiInterface for SpiDummyInterface { + type Error = (); + + fn transfer(&mut self, _tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error> { + rx[0..2].copy_from_slice(&self.dummy_val_0.to_be_bytes()); + rx[2..4].copy_from_slice(&self.dummy_val_1.to_be_bytes()); + rx[4..6].copy_from_slice(&self.dummy_val_2.to_be_bytes()); + Ok(()) + } +} + +#[derive(Default, Debug, Copy, Clone, Serialize, Deserialize)] +pub struct MgmData { + pub valid: bool, + pub x: f32, + pub y: f32, + pub z: f32, +} + +pub struct MpscModeLeafInterface { + pub request_rx: mpsc::Receiver>, + pub reply_tx_to_pus: mpsc::Sender>, + pub reply_tx_to_parent: mpsc::Sender>, +} + +/// Example MGM device handler strongly based on the LIS3MDL MEMS device. +#[derive(new)] +#[allow(clippy::too_many_arguments)] +pub struct MgmHandlerLis3Mdl { + id: UniqueApidTargetId, + dev_str: &'static str, + mode_interface: MpscModeLeafInterface, + composite_request_receiver: mpsc::Receiver>, + hk_reply_sender: mpsc::Sender>, + tm_sender: TmSender, + com_interface: ComInterface, + shared_mgm_set: Arc>, + #[new(value = "ModeAndSubmode::new(satrs_example::DeviceMode::Off as u32, 0)")] + mode_and_submode: ModeAndSubmode, + #[new(default)] + tx_buf: [u8; 12], + #[new(default)] + rx_buf: [u8; 12], + #[new(default)] + tm_buf: [u8; 16], + #[new(default)] + stamp_helper: TimeStampHelper, +} + +impl + MgmHandlerLis3Mdl +{ + pub fn periodic_operation(&mut self) { + self.stamp_helper.update_from_now(); + // Handle requests. + self.handle_composite_requests(); + self.handle_mode_requests(); + if self.mode() == DeviceMode::Normal as u32 { + log::trace!("polling LIS3MDL sensor {}", self.dev_str); + // Communicate with the device. + let result = self.com_interface.transfer(&self.tx_buf, &mut self.rx_buf); + assert!(result.is_ok()); + // Actual data begins on the second byte, similarly to how a lot of SPI devices behave. + let x_raw = i16::from_be_bytes(self.rx_buf[1..3].try_into().unwrap()); + let y_raw = i16::from_be_bytes(self.rx_buf[3..5].try_into().unwrap()); + let z_raw = i16::from_be_bytes(self.rx_buf[5..7].try_into().unwrap()); + // Simple scaling to retrieve the float value, assuming a sensor resolution of + let mut mgm_guard = self.shared_mgm_set.lock().unwrap(); + mgm_guard.x = x_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS; + mgm_guard.y = y_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS; + mgm_guard.z = z_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS; + drop(mgm_guard); + } + } + + pub fn handle_composite_requests(&mut self) { + loop { + match self.composite_request_receiver.try_recv() { + Ok(ref msg) => match &msg.message { + CompositeRequest::Hk(hk_request) => { + self.handle_hk_request(&msg.requestor_info, hk_request) + } + // TODO: This object does not have actions (yet).. Still send back completion failure + // reply. + CompositeRequest::Action(_action_req) => {} + }, + + Err(e) => { + if e != mpsc::TryRecvError::Empty { + log::warn!( + "{}: failed to receive composite request: {:?}", + self.dev_str, + e + ); + } else { + break; + } + } + } + } + } + + pub fn handle_hk_request(&mut self, requestor_info: &MessageMetadata, hk_request: &HkRequest) { + match hk_request.variant { + HkRequestVariant::OneShot => { + self.hk_reply_sender + .send(GenericMessage::new( + *requestor_info, + HkReply::new(hk_request.unique_id, HkReplyVariant::Ack), + )) + .expect("failed to send HK reply"); + let sec_header = PusTmSecondaryHeader::new( + 3, + hk::Subservice::TmHkPacket as u8, + 0, + 0, + self.stamp_helper.stamp(), + ); + let mgm_snapshot = *self.shared_mgm_set.lock().unwrap(); + // Use binary serialization here. We want the data to be tightly packed. + self.tm_buf[0] = mgm_snapshot.valid as u8; + self.tm_buf[1..5].copy_from_slice(&mgm_snapshot.x.to_be_bytes()); + self.tm_buf[5..9].copy_from_slice(&mgm_snapshot.y.to_be_bytes()); + self.tm_buf[9..13].copy_from_slice(&mgm_snapshot.z.to_be_bytes()); + let hk_tm = PusTmCreator::new( + SpHeader::new_from_apid(self.id.apid), + sec_header, + &self.tm_buf[0..12], + true, + ); + self.tm_sender + .send_tm(self.id.id(), PusTmVariant::Direct(hk_tm)) + .expect("failed to send HK TM"); + } + HkRequestVariant::EnablePeriodic => todo!(), + HkRequestVariant::DisablePeriodic => todo!(), + HkRequestVariant::ModifyCollectionInterval(_) => todo!(), + } + } + + pub fn handle_mode_requests(&mut self) { + loop { + // TODO: Only allow one set mode request per cycle? + match self.mode_interface.request_rx.try_recv() { + Ok(msg) => { + let result = self.handle_mode_request(msg); + // TODO: Trigger event? + if result.is_err() { + log::warn!( + "{}: mode request failed with error {:?}", + self.dev_str, + result.err().unwrap() + ); + } + } + Err(e) => { + if e != mpsc::TryRecvError::Empty { + log::warn!("{}: failed to receive mode request: {:?}", self.dev_str, e); + } else { + break; + } + } + } + } + } +} + +impl ModeProvider + for MgmHandlerLis3Mdl +{ + fn mode_and_submode(&self) -> ModeAndSubmode { + self.mode_and_submode + } +} + +impl ModeRequestHandler + for MgmHandlerLis3Mdl +{ + type Error = ModeError; + fn start_transition( + &mut self, + requestor: MessageMetadata, + mode_and_submode: ModeAndSubmode, + ) -> Result<(), satrs::mode::ModeError> { + log::info!( + "{}: transitioning to mode {:?}", + self.dev_str, + mode_and_submode + ); + self.mode_and_submode = mode_and_submode; + self.handle_mode_reached(Some(requestor))?; + Ok(()) + } + + fn announce_mode(&self, _requestor_info: Option, _recursive: bool) { + log::info!( + "{} announcing mode: {:?}", + self.dev_str, + self.mode_and_submode + ); + } + + fn handle_mode_reached( + &mut self, + requestor: Option, + ) -> Result<(), Self::Error> { + self.announce_mode(requestor, false); + if let Some(requestor) = requestor { + if requestor.sender_id() != PUS_MODE_SERVICE.id() { + log::warn!( + "can not send back mode reply to sender {}", + requestor.sender_id() + ); + } else { + self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode()))?; + } + } + Ok(()) + } + + fn send_mode_reply( + &self, + requestor: MessageMetadata, + reply: ModeReply, + ) -> Result<(), Self::Error> { + if requestor.sender_id() != PUS_MODE_SERVICE.id() { + log::warn!( + "can not send back mode reply to sender {}", + requestor.sender_id() + ); + } + self.mode_interface + .reply_tx_to_pus + .send(GenericMessage::new(requestor, reply)) + .map_err(|_| GenericTargetedMessagingError::Send(GenericSendError::RxDisconnected))?; + Ok(()) + } + + fn handle_mode_info( + &mut self, + _requestor_info: MessageMetadata, + _info: ModeAndSubmode, + ) -> Result<(), Self::Error> { + Ok(()) + } +} diff --git a/satrs-example/src/acs/mod.rs b/satrs-example/src/acs/mod.rs new file mode 100644 index 0000000..bd61e8b --- /dev/null +++ b/satrs-example/src/acs/mod.rs @@ -0,0 +1 @@ +pub mod mgm; diff --git a/satrs-example/src/bin/simpleclient.rs b/satrs-example/src/bin/simpleclient.rs index 04281ca..bbe7609 100644 --- a/satrs-example/src/bin/simpleclient.rs +++ b/satrs-example/src/bin/simpleclient.rs @@ -12,8 +12,7 @@ use std::time::Duration; fn main() { let mut buf = [0; 32]; let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); - let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap(); - let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let pus_tc = PusTcCreator::new_simple(SpHeader::new_from_apid(0x02), 17, 1, &[], true); let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed"); let tc_req_id = RequestId::new(&pus_tc); println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}"); diff --git a/satrs-example/src/ccsds.rs b/satrs-example/src/ccsds.rs index e61172e..1841d17 100644 --- a/satrs-example/src/ccsds.rs +++ b/satrs-example/src/ccsds.rs @@ -1,7 +1,9 @@ use satrs::pus::ReceivesEcssPusTc; use satrs::spacepackets::{CcsdsPacket, SpHeader}; use satrs::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc}; -use satrs_example::config::PUS_APID; +use satrs::ValidatorU16Id; +use satrs_example::config::components::Apid; +use satrs_example::config::APID_VALIDATOR; #[derive(Clone)] pub struct CcsdsReceiver< @@ -11,6 +13,16 @@ pub struct CcsdsReceiver< pub tc_source: TcSource, } +impl< + TcSource: ReceivesCcsdsTc + ReceivesEcssPusTc + Clone + 'static, + E: 'static, + > ValidatorU16Id for CcsdsReceiver +{ + fn validate(&self, apid: u16) -> bool { + APID_VALIDATOR.contains(&apid) + } +} + impl< TcSource: ReceivesCcsdsTc + ReceivesEcssPusTc + Clone + 'static, E: 'static, @@ -18,27 +30,24 @@ impl< { type Error = E; - fn valid_apids(&self) -> &'static [u16] { - &[PUS_APID] - } - - fn handle_known_apid( + fn handle_packet_with_valid_apid( &mut self, sp_header: &SpHeader, tc_raw: &[u8], ) -> Result<(), Self::Error> { - if sp_header.apid() == PUS_APID { + if sp_header.apid() == Apid::Cfdp as u16 { + } else { return self.tc_source.pass_ccsds(sp_header, tc_raw); } Ok(()) } - fn handle_unknown_apid( + fn handle_packet_with_unknown_apid( &mut self, sp_header: &SpHeader, _tc_raw: &[u8], ) -> Result<(), Self::Error> { - println!("Unknown APID 0x{:x?} detected", sp_header.apid()); + log::warn!("unknown APID 0x{:x?} detected", sp_header.apid()); Ok(()) } } diff --git a/satrs-example/src/config.rs b/satrs-example/src/config.rs index 9d04403..7e474e9 100644 --- a/satrs-example/src/config.rs +++ b/satrs-example/src/config.rs @@ -1,7 +1,12 @@ -use satrs::res_code::ResultU16; +use lazy_static::lazy_static; +use satrs::{ + res_code::ResultU16, + spacepackets::{PacketId, PacketType}, +}; use satrs_mib::res_code::ResultU16Info; use satrs_mib::resultcode; -use std::net::Ipv4Addr; +use std::{collections::HashSet, net::Ipv4Addr}; +use strum::IntoEnumIterator; use num_enum::{IntoPrimitive, TryFromPrimitive}; use satrs::{ @@ -9,8 +14,6 @@ use satrs::{ pool::{StaticMemoryPool, StaticPoolConfig}, }; -pub const PUS_APID: u16 = 0x02; - #[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)] #[repr(u8)] pub enum CustomPusServiceId { @@ -29,6 +32,7 @@ pub const AOCS_APID: u16 = 1; pub enum GroupId { Tmtc = 0, Hk = 1, + Mode = 2, } pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED; @@ -37,6 +41,23 @@ pub const SERVER_PORT: u16 = 7301; pub const TEST_EVENT: EventU32TypedSev = EventU32TypedSev::::const_new(0, 0); +lazy_static! { + pub static ref PACKET_ID_VALIDATOR: HashSet = { + let mut set = HashSet::new(); + for id in components::Apid::iter() { + set.insert(PacketId::new(PacketType::Tc, true, id as u16)); + } + set + }; + pub static ref APID_VALIDATOR: HashSet = { + let mut set = HashSet::new(); + for id in components::Apid::iter() { + set.insert(id as u16); + } + set + }; +} + pub mod tmtc_err { use super::*; @@ -53,6 +74,8 @@ pub mod tmtc_err { pub const UNKNOWN_TARGET_ID: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 4); #[resultcode] pub const ROUTING_ERROR: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 5); + #[resultcode(info = "Request timeout for targeted PUS request. P1: Request ID. P2: Target ID")] + pub const REQUEST_TIMEOUT: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 6); #[resultcode( info = "Not enough data inside the TC application data field. Optionally includes: \ @@ -92,27 +115,59 @@ pub mod hk_err { ]; } -#[allow(clippy::enum_variant_names)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum TmSenderId { - PusVerification = 0, - PusTest = 1, - PusEvent = 2, - PusHk = 3, - PusAction = 4, - PusSched = 5, - AllEvents = 6, - AcsSubsystem = 7, +pub mod mode_err { + use super::*; + + #[resultcode] + pub const WRONG_MODE: ResultU16 = ResultU16::new(GroupId::Mode as u8, 0); } -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum TcReceiverId { - PusTest = 1, - PusEvent = 2, - PusHk = 3, - PusAction = 4, - PusSched = 5, +pub mod components { + use satrs::request::UniqueApidTargetId; + use strum::EnumIter; + + #[derive(Copy, Clone, PartialEq, Eq, EnumIter)] + pub enum Apid { + Sched = 1, + GenericPus = 2, + Acs = 3, + Cfdp = 4, + } + + // Component IDs for components with the PUS APID. + #[derive(Copy, Clone, PartialEq, Eq)] + pub enum PusId { + PusEventManagement = 0, + PusRouting = 1, + PusTest = 2, + PusAction = 3, + PusMode = 4, + PusHk = 5, + } + + #[derive(Copy, Clone, PartialEq, Eq)] + pub enum AcsId { + Mgm0 = 0, + } + + pub const PUS_ACTION_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32); + pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, 0); + pub const PUS_ROUTING_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32); + pub const PUS_TEST_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusTest as u32); + pub const PUS_MODE_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusMode as u32); + pub const PUS_HK_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusHk as u32); + pub const PUS_SCHED_SERVICE: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::Sched as u16, 0); + pub const MGM_HANDLER_0: UniqueApidTargetId = + UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32); } + pub mod pool { use super::*; pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) { diff --git a/satrs-example/src/events.rs b/satrs-example/src/events.rs index 4cb2cda..4d7ea9f 100644 --- a/satrs-example/src/events.rs +++ b/satrs-example/src/events.rs @@ -1,66 +1,87 @@ use std::sync::mpsc::{self}; +use crate::pus::create_verification_reporter; +use satrs::event_man::{EventMessageU32, EventRoutingError}; +use satrs::params::WritableToBeBytes; +use satrs::pus::event::EventTmHookProvider; +use satrs::pus::verification::VerificationReporter; +use satrs::pus::EcssTmSenderCore; +use satrs::request::UniqueApidTargetId; use satrs::{ event_man::{ EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded, MpscEventReceiver, }, - events::EventU32, - params::Params, pus::{ event_man::{ DefaultPusEventU32Dispatcher, EventReporter, EventRequest, EventRequestWithToken, }, verification::{TcStateStarted, VerificationReportingProvider, VerificationToken}, - EcssTmSender, }, - spacepackets::time::cds::{self, CdsTime}, + spacepackets::time::cds::CdsTime, }; -use satrs_example::config::PUS_APID; +use satrs_example::config::components::PUS_EVENT_MANAGEMENT; use crate::update_time; -pub struct PusEventHandler { +// This helper sets the APID of the event sender for the PUS telemetry. +#[derive(Default)] +pub struct EventApidSetter { + pub next_apid: u16, +} + +impl EventTmHookProvider for EventApidSetter { + fn modify_tm(&self, tm: &mut satrs::spacepackets::ecss::tm::PusTmCreator) { + tm.set_apid(self.next_apid); + } +} + +/// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event +/// packets. It also handles the verification completion of PUS event service requests. +pub struct PusEventHandler { event_request_rx: mpsc::Receiver, pus_event_dispatcher: DefaultPusEventU32Dispatcher<()>, - pus_event_man_rx: mpsc::Receiver<(EventU32, Option)>, - tm_sender: Box, + pus_event_man_rx: mpsc::Receiver, + tm_sender: TmSender, time_provider: CdsTime, timestamp: [u8; 7], verif_handler: VerificationReporter, + event_apid_setter: EventApidSetter, } -/* -*/ -impl PusEventHandler { +impl PusEventHandler { pub fn new( + tm_sender: TmSender, verif_handler: VerificationReporter, event_manager: &mut EventManagerWithBoundedMpsc, event_request_rx: mpsc::Receiver, - tm_sender: impl EcssTmSender, ) -> Self { let event_queue_cap = 30; let (pus_event_man_tx, pus_event_man_rx) = mpsc::sync_channel(event_queue_cap); // All events sent to the manager are routed to the PUS event manager, which generates PUS event // telemetry for each event. - let event_reporter = EventReporter::new(PUS_APID, 128).unwrap(); + let event_reporter = EventReporter::new(PUS_EVENT_MANAGEMENT.raw(), 0, 0, 128).unwrap(); let pus_event_dispatcher = DefaultPusEventU32Dispatcher::new_with_default_backend(event_reporter); - let pus_event_man_send_provider = - EventU32SenderMpscBounded::new(1, pus_event_man_tx, event_queue_cap); + let pus_event_man_send_provider = EventU32SenderMpscBounded::new( + PUS_EVENT_MANAGEMENT.raw(), + pus_event_man_tx, + event_queue_cap, + ); - event_manager.subscribe_all(pus_event_man_send_provider.channel_id()); + event_manager.subscribe_all(pus_event_man_send_provider.target_id()); event_manager.add_sender(pus_event_man_send_provider); Self { event_request_rx, pus_event_dispatcher, pus_event_man_rx, - time_provider: cds::CdsTime::new_with_u16_days(0, 0), + time_provider: CdsTime::new_with_u16_days(0, 0), timestamp: [0; 7], verif_handler, - tm_sender: Box::new(tm_sender), + tm_sender, + event_apid_setter: EventApidSetter::default(), } } @@ -71,7 +92,7 @@ impl PusEventHandler PusEventHandler)>, + event_sender: mpsc::Sender, } impl EventManagerWrapper { @@ -121,14 +148,15 @@ impl EventManagerWrapper { // The sender handle is the primary sender handle for all components which want to create events. // The event manager will receive the RX handle to receive all the events. let (event_sender, event_man_rx) = mpsc::channel(); - let event_recv = MpscEventReceiver::::new(event_man_rx); + let event_recv = MpscEventReceiver::new(event_man_rx); Self { event_manager: EventManagerWithBoundedMpsc::new(event_recv), event_sender, } } - pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option)> { + // Returns a cached event sender to send events to the event manager for routing. + pub fn clone_event_sender(&self) -> mpsc::Sender { self.event_sender.clone() } @@ -137,30 +165,34 @@ impl EventManagerWrapper { } pub fn try_event_routing(&mut self) { + let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| { + self.routing_error_handler(event_msg, error) + }; // Perform the event routing. - self.event_manager - .try_event_handling() - .expect("event handling failed"); + self.event_manager.try_event_handling(error_handler); + } + + pub fn routing_error_handler(&self, event_msg: &EventMessageU32, error: EventRoutingError) { + log::warn!("event routing error for event {event_msg:?}: {error:?}"); } } -pub struct EventHandler { +pub struct EventHandler { pub event_man_wrapper: EventManagerWrapper, - pub pus_event_handler: PusEventHandler, + pub pus_event_handler: PusEventHandler, } -impl EventHandler { +impl EventHandler { pub fn new( - tm_sender: impl EcssTmSender, - verif_handler: VerificationReporter, + tm_sender: TmSender, event_request_rx: mpsc::Receiver, ) -> Self { let mut event_man_wrapper = EventManagerWrapper::new(); let pus_event_handler = PusEventHandler::new( - verif_handler, + tm_sender, + create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid), event_man_wrapper.event_manager(), event_request_rx, - tm_sender, ); Self { event_man_wrapper, @@ -168,7 +200,7 @@ impl EventHandler mpsc::Sender<(EventU32, Option)> { + pub fn clone_event_sender(&self) -> mpsc::Sender { self.event_man_wrapper.clone_event_sender() } diff --git a/satrs-example/src/hk.rs b/satrs-example/src/hk.rs index 3147cbf..0852d04 100644 --- a/satrs-example/src/hk.rs +++ b/satrs-example/src/hk.rs @@ -1,27 +1,25 @@ use derive_new::new; +use satrs::hk::UniqueId; +use satrs::request::UniqueApidTargetId; use satrs::spacepackets::ByteConversionError; -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum AcsHkIds { - TestMgmSet = 1, -} - #[derive(Debug, new, Copy, Clone)] pub struct HkUniqueId { - target_id: u32, - set_id: u32, + target_id: UniqueApidTargetId, + set_id: UniqueId, } impl HkUniqueId { #[allow(dead_code)] - pub fn target_id(&self) -> u32 { + pub fn target_id(&self) -> UniqueApidTargetId { self.target_id } #[allow(dead_code)] - pub fn set_id(&self) -> u32 { + pub fn set_id(&self) -> UniqueId { self.set_id } + #[allow(dead_code)] pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { if buf.len() < 8 { return Err(ByteConversionError::ToSliceTooSmall { @@ -29,7 +27,7 @@ impl HkUniqueId { expected: 8, }); } - buf[0..4].copy_from_slice(&self.target_id.to_be_bytes()); + buf[0..4].copy_from_slice(&self.target_id.unique_id.to_be_bytes()); buf[4..8].copy_from_slice(&self.set_id.to_be_bytes()); Ok(8) diff --git a/satrs-example/src/lib.rs b/satrs-example/src/lib.rs index ef68c36..a224fe5 100644 --- a/satrs-example/src/lib.rs +++ b/satrs-example/src/lib.rs @@ -1 +1,39 @@ +use satrs::spacepackets::time::{cds::CdsTime, TimeWriter}; + pub mod config; + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum DeviceMode { + Off = 0, + On = 1, + Normal = 2, +} + +pub struct TimeStampHelper { + stamper: CdsTime, + time_stamp: [u8; 7], +} + +impl TimeStampHelper { + pub fn stamp(&self) -> &[u8] { + &self.time_stamp + } + + pub fn update_from_now(&mut self) { + self.stamper + .update_from_now() + .expect("Updating timestamp failed"); + self.stamper + .write_to_bytes(&mut self.time_stamp) + .expect("Writing timestamp failed"); + } +} + +impl Default for TimeStampHelper { + fn default() -> Self { + Self { + stamper: CdsTime::now_with_u16_days().expect("creating time stamper failed"), + time_stamp: Default::default(), + } + } +} diff --git a/satrs-example/src/main.rs b/satrs-example/src/main.rs index b4cbe74..a6456d6 100644 --- a/satrs-example/src/main.rs +++ b/satrs-example/src/main.rs @@ -17,52 +17,44 @@ use log::info; use pus::test::create_test_service_dynamic; use satrs::hal::std::tcp_server::ServerConfig; use satrs::hal::std::udp_server::UdpTcServer; -use satrs::request::TargetAndApidId; +use satrs::request::GenericMessage; use satrs::tmtc::tm_helper::SharedTmPool; use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools}; use satrs_example::config::tasks::{ FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC, }; -use satrs_example::config::{RequestTargetId, TmSenderId, OBSW_SERVER_ADDR, PUS_APID, SERVER_PORT}; +use satrs_example::config::{OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT}; use tmtc::PusTcSourceProviderDynamic; use udp::DynamicUdpTmHandler; -use crate::acs::AcsTask; +use crate::acs::mgm::{MgmHandlerLis3Mdl, MpscModeLeafInterface, SpiDummyInterface}; use crate::ccsds::CcsdsReceiver; use crate::logger::setup_logger; use crate::pus::action::{create_action_service_dynamic, create_action_service_static}; use crate::pus::event::{create_event_service_dynamic, create_event_service_static}; use crate::pus::hk::{create_hk_service_dynamic, create_hk_service_static}; +use crate::pus::mode::{create_mode_service_dynamic, create_mode_service_static}; use crate::pus::scheduler::{create_scheduler_service_dynamic, create_scheduler_service_static}; use crate::pus::test::create_test_service_static; use crate::pus::{PusReceiver, PusTcMpscRouter}; -use crate::requests::{GenericRequestRouter, RequestWithToken}; +use crate::requests::{CompositeRequest, GenericRequestRouter}; use crate::tcp::{SyncTcpTmSource, TcpTask}; use crate::tmtc::{ PusTcSourceProviderSharedPool, SharedTcPool, TcSourceTaskDynamic, TcSourceTaskStatic, }; use crate::udp::{StaticUdpTmHandler, UdpTmtcServer}; +use satrs::mode::ModeRequest; use satrs::pus::event_man::EventRequestWithToken; -use satrs::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender}; -use satrs::pus::{EcssTmSender, TmAsVecSenderWithId, TmInSharedPoolSenderWithId}; +use satrs::pus::TmInSharedPoolSender; use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter}; use satrs::tmtc::CcsdsDistributor; -use satrs::ChannelId; +use satrs_example::config::components::MGM_HANDLER_0; use std::net::{IpAddr, SocketAddr}; -use std::sync::mpsc::{self, channel}; +use std::sync::mpsc; use std::sync::{Arc, RwLock}; use std::thread; use std::time::Duration; -fn create_verification_reporter( - verif_sender: Sender, -) -> VerificationReporterWithSender { - let verif_cfg = VerificationReporterCfg::new(PUS_APID, 1, 2, 8).unwrap(); - // Every software component which needs to generate verification telemetry, gets a cloned - // verification reporter. - VerificationReporterWithSender::new(&verif_cfg, verif_sender) -} - #[allow(dead_code)] fn static_tmtc_pool_main() { let (tm_pool, tc_pool) = create_static_pools(); @@ -74,20 +66,21 @@ fn static_tmtc_pool_main() { let (tm_funnel_tx, tm_funnel_rx) = mpsc::sync_channel(50); let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50); - // Every software component which needs to generate verification telemetry, receives a cloned - // verification reporter. - let verif_reporter = create_verification_reporter(TmInSharedPoolSenderWithId::new( - TmSenderId::PusVerification as ChannelId, - "verif_sender", - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - )); + let tm_funnel_tx_sender = + TmInSharedPoolSender::new(shared_tm_pool.clone(), tm_funnel_tx.clone()); + + let (mgm_handler_composite_tx, mgm_handler_composite_rx) = + mpsc::channel::>(); + let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::>(); - let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32); - let (acs_thread_tx, acs_thread_rx) = channel::(); // Some request are targetable. This map is used to retrieve sender handles based on a target ID. let mut request_map = GenericRequestRouter::default(); - request_map.0.insert(acs_target_id.into(), acs_thread_tx); + request_map + .composite_router_map + .insert(MGM_HANDLER_0.id(), mgm_handler_composite_tx); + request_map + .mode_router_map + .insert(MGM_HANDLER_0.id(), mgm_handler_mode_tx); // This helper structure is used by all telecommand providers which need to send telecommands // to the TC source. @@ -103,82 +96,80 @@ fn static_tmtc_pool_main() { // The event task is the core handler to perform the event routing and TM handling as specified // in the sat-rs documentation. - let mut event_handler = EventHandler::new( - TmInSharedPoolSenderWithId::new( - TmSenderId::AllEvents as ChannelId, - "ALL_EVENTS_TX", - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - ), - verif_reporter.clone(), - event_request_rx, - ); + let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_request_rx); + + let (pus_test_tx, pus_test_rx) = mpsc::channel(); + let (pus_event_tx, pus_event_rx) = mpsc::channel(); + let (pus_sched_tx, pus_sched_rx) = mpsc::channel(); + let (pus_hk_tx, pus_hk_rx) = mpsc::channel(); + let (pus_action_tx, pus_action_rx) = mpsc::channel(); + let (pus_mode_tx, pus_mode_rx) = mpsc::channel(); + + let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel(); + let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel(); + let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel(); - let (pus_test_tx, pus_test_rx) = channel(); - let (pus_event_tx, pus_event_rx) = channel(); - let (pus_sched_tx, pus_sched_rx) = channel(); - let (pus_hk_tx, pus_hk_rx) = channel(); - let (pus_action_tx, pus_action_rx) = channel(); let pus_router = PusTcMpscRouter { - test_service_receiver: pus_test_tx, - event_service_receiver: pus_event_tx, - sched_service_receiver: pus_sched_tx, - hk_service_receiver: pus_hk_tx, - action_service_receiver: pus_action_tx, + test_tc_sender: pus_test_tx, + event_tc_sender: pus_event_tx, + sched_tc_sender: pus_sched_tx, + hk_tc_sender: pus_hk_tx, + action_tc_sender: pus_action_tx, + mode_tc_sender: pus_mode_tx, }; let pus_test_service = create_test_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), shared_tc_pool.pool.clone(), event_handler.clone_event_sender(), pus_test_rx, ); let pus_scheduler_service = create_scheduler_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), tc_source.clone(), pus_sched_rx, create_sched_tc_pool(), ); let pus_event_service = create_event_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), shared_tc_pool.pool.clone(), pus_event_rx, event_request_tx, ); let pus_action_service = create_action_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), shared_tc_pool.pool.clone(), pus_action_rx, request_map.clone(), + pus_action_reply_rx, ); let pus_hk_service = create_hk_service_static( - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - verif_reporter.clone(), + tm_funnel_tx_sender.clone(), shared_tc_pool.pool.clone(), pus_hk_rx, + request_map.clone(), + pus_hk_reply_rx, + ); + let pus_mode_service = create_mode_service_static( + tm_funnel_tx_sender.clone(), + shared_tc_pool.pool.clone(), + pus_mode_rx, request_map, + pus_mode_reply_rx, ); let mut pus_stack = PusStack::new( + pus_test_service, pus_hk_service, pus_event_service, pus_action_service, pus_scheduler_service, - pus_test_service, + pus_mode_service, ); let ccsds_receiver = CcsdsReceiver { tc_source }; let mut tmtc_task = TcSourceTaskStatic::new( shared_tc_pool.clone(), tc_source_rx, - PusReceiver::new(verif_reporter.clone(), pus_router), + PusReceiver::new(tm_funnel_tx_sender, pus_router), ); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); @@ -200,20 +191,10 @@ fn static_tmtc_pool_main() { tcp_server_cfg, sync_tm_tcp_source.clone(), tcp_ccsds_distributor, + PACKET_ID_VALIDATOR.clone(), ) .expect("tcp server creation failed"); - let mut acs_task = AcsTask::new( - TmInSharedPoolSenderWithId::new( - TmSenderId::AcsSubsystem as ChannelId, - "ACS_TASK_SENDER", - shared_tm_pool.clone(), - tm_funnel_tx.clone(), - ), - acs_thread_rx, - verif_reporter, - ); - let mut tm_funnel = TmFunnelStatic::new( shared_tm_pool, sync_tm_tcp_source, @@ -221,6 +202,27 @@ fn static_tmtc_pool_main() { tm_server_tx, ); + let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) = + mpsc::channel(); + + let dummy_spi_interface = SpiDummyInterface::default(); + let shared_mgm_set = Arc::default(); + let mode_leaf_interface = MpscModeLeafInterface { + request_rx: mgm_handler_mode_rx, + reply_tx_to_pus: pus_mode_reply_tx, + reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx, + }; + let mut mgm_handler = MgmHandlerLis3Mdl::new( + MGM_HANDLER_0, + "MGM_0", + mode_leaf_interface, + mgm_handler_composite_rx, + pus_hk_reply_tx, + tm_funnel_tx, + dummy_spi_interface, + shared_mgm_set, + ); + info!("Starting TMTC and UDP task"); let jh_udp_tmtc = thread::Builder::new() .name("TMTC and UDP".to_string()) @@ -266,7 +268,7 @@ fn static_tmtc_pool_main() { let jh_aocs = thread::Builder::new() .name("AOCS".to_string()) .spawn(move || loop { - acs_task.periodic_operation(); + mgm_handler.periodic_operation(); thread::sleep(Duration::from_millis(FREQ_MS_AOCS)); }) .unwrap(); @@ -300,22 +302,23 @@ fn static_tmtc_pool_main() { #[allow(dead_code)] fn dyn_tmtc_pool_main() { - let (tc_source_tx, tc_source_rx) = channel(); - let (tm_funnel_tx, tm_funnel_rx) = channel(); - let (tm_server_tx, tm_server_rx) = channel(); - // Every software component which needs to generate verification telemetry, gets a cloned - // verification reporter. - let verif_reporter = create_verification_reporter(TmAsVecSenderWithId::new( - TmSenderId::PusVerification as ChannelId, - "verif_sender", - tm_funnel_tx.clone(), - )); + let (tc_source_tx, tc_source_rx) = mpsc::channel(); + let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel(); + let (tm_server_tx, tm_server_rx) = mpsc::channel(); + + // Some request are targetable. This map is used to retrieve sender handles based on a target ID. + let (mgm_handler_composite_tx, mgm_handler_composite_rx) = + mpsc::channel::>(); + let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::>(); - let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32); - let (acs_thread_tx, acs_thread_rx) = channel::(); // Some request are targetable. This map is used to retrieve sender handles based on a target ID. let mut request_map = GenericRequestRouter::default(); - request_map.0.insert(acs_target_id.into(), acs_thread_tx); + request_map + .composite_router_map + .insert(MGM_HANDLER_0.raw(), mgm_handler_composite_tx); + request_map + .mode_router_map + .insert(MGM_HANDLER_0.raw(), mgm_handler_mode_tx); let tc_source = PusTcSourceProviderDynamic(tc_source_tx); @@ -325,74 +328,74 @@ fn dyn_tmtc_pool_main() { let (event_request_tx, event_request_rx) = mpsc::channel::(); // The event task is the core handler to perform the event routing and TM handling as specified // in the sat-rs documentation. - let mut event_handler = EventHandler::new( - TmAsVecSenderWithId::new( - TmSenderId::AllEvents as ChannelId, - "ALL_EVENTS_TX", - tm_funnel_tx.clone(), - ), - verif_reporter.clone(), - event_request_rx, - ); + let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_request_rx); + + let (pus_test_tx, pus_test_rx) = mpsc::channel(); + let (pus_event_tx, pus_event_rx) = mpsc::channel(); + let (pus_sched_tx, pus_sched_rx) = mpsc::channel(); + let (pus_hk_tx, pus_hk_rx) = mpsc::channel(); + let (pus_action_tx, pus_action_rx) = mpsc::channel(); + let (pus_mode_tx, pus_mode_rx) = mpsc::channel(); + + let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel(); + let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel(); + let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel(); - let (pus_test_tx, pus_test_rx) = channel(); - let (pus_event_tx, pus_event_rx) = channel(); - let (pus_sched_tx, pus_sched_rx) = channel(); - let (pus_hk_tx, pus_hk_rx) = channel(); - let (pus_action_tx, pus_action_rx) = channel(); let pus_router = PusTcMpscRouter { - test_service_receiver: pus_test_tx, - event_service_receiver: pus_event_tx, - sched_service_receiver: pus_sched_tx, - hk_service_receiver: pus_hk_tx, - action_service_receiver: pus_action_tx, + test_tc_sender: pus_test_tx, + event_tc_sender: pus_event_tx, + sched_tc_sender: pus_sched_tx, + hk_tc_sender: pus_hk_tx, + action_tc_sender: pus_action_tx, + mode_tc_sender: pus_mode_tx, }; let pus_test_service = create_test_service_dynamic( tm_funnel_tx.clone(), - verif_reporter.clone(), event_handler.clone_event_sender(), pus_test_rx, ); let pus_scheduler_service = create_scheduler_service_dynamic( tm_funnel_tx.clone(), - verif_reporter.clone(), tc_source.0.clone(), pus_sched_rx, create_sched_tc_pool(), ); - let pus_event_service = create_event_service_dynamic( - tm_funnel_tx.clone(), - verif_reporter.clone(), - pus_event_rx, - event_request_tx, - ); + let pus_event_service = + create_event_service_dynamic(tm_funnel_tx.clone(), pus_event_rx, event_request_tx); let pus_action_service = create_action_service_dynamic( tm_funnel_tx.clone(), - verif_reporter.clone(), pus_action_rx, request_map.clone(), + pus_action_reply_rx, ); let pus_hk_service = create_hk_service_dynamic( tm_funnel_tx.clone(), - verif_reporter.clone(), pus_hk_rx, + request_map.clone(), + pus_hk_reply_rx, + ); + let pus_mode_service = create_mode_service_dynamic( + tm_funnel_tx.clone(), + pus_mode_rx, request_map, + pus_mode_reply_rx, ); let mut pus_stack = PusStack::new( + pus_test_service, pus_hk_service, pus_event_service, pus_action_service, pus_scheduler_service, - pus_test_service, + pus_mode_service, ); let ccsds_receiver = CcsdsReceiver { tc_source }; let mut tmtc_task = TcSourceTaskDynamic::new( tc_source_rx, - PusReceiver::new(verif_reporter.clone(), pus_router), + PusReceiver::new(tm_funnel_tx.clone(), pus_router), ); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); @@ -413,20 +416,32 @@ fn dyn_tmtc_pool_main() { tcp_server_cfg, sync_tm_tcp_source.clone(), tcp_ccsds_distributor, + PACKET_ID_VALIDATOR.clone(), ) .expect("tcp server creation failed"); - let mut acs_task = AcsTask::new( - TmAsVecSenderWithId::new( - TmSenderId::AcsSubsystem as ChannelId, - "ACS_TASK_SENDER", - tm_funnel_tx.clone(), - ), - acs_thread_rx, - verif_reporter, - ); let mut tm_funnel = TmFunnelDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx); + let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) = + mpsc::channel(); + let dummy_spi_interface = SpiDummyInterface::default(); + let shared_mgm_set = Arc::default(); + let mode_leaf_interface = MpscModeLeafInterface { + request_rx: mgm_handler_mode_rx, + reply_tx_to_pus: pus_mode_reply_tx, + reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx, + }; + let mut mgm_handler = MgmHandlerLis3Mdl::new( + MGM_HANDLER_0, + "MGM_0", + mode_leaf_interface, + mgm_handler_composite_rx, + pus_hk_reply_tx, + tm_funnel_tx, + dummy_spi_interface, + shared_mgm_set, + ); + info!("Starting TMTC and UDP task"); let jh_udp_tmtc = thread::Builder::new() .name("TMTC and UDP".to_string()) @@ -472,7 +487,7 @@ fn dyn_tmtc_pool_main() { let jh_aocs = thread::Builder::new() .name("AOCS".to_string()) .spawn(move || loop { - acs_task.periodic_operation(); + mgm_handler.periodic_operation(); thread::sleep(Duration::from_millis(FREQ_MS_AOCS)); }) .unwrap(); diff --git a/satrs-example/src/pus/action.rs b/satrs-example/src/pus/action.rs index ef23786..22b6b93 100644 --- a/satrs-example/src/pus/action.rs +++ b/satrs-example/src/pus/action.rs @@ -1,181 +1,274 @@ use log::{error, warn}; -use satrs::action::ActionRequest; -use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; -use satrs::pus::action::{PusActionToRequestConverter, PusService8ActionHandler}; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, +use satrs::action::{ActionRequest, ActionRequestVariant}; +use satrs::params::WritableToBeBytes; +use satrs::pool::SharedStaticMemoryPool; +use satrs::pus::action::{ + ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap, PusActionReply, }; use satrs::pus::verification::{ - FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken, + FailParams, FailParamsWithStep, TcStateAccepted, TcStateStarted, VerificationReporter, + VerificationReportingProvider, VerificationToken, }; use satrs::pus::{ - EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, - EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, - PusPacketHandlingError, PusServiceHelper, TmAsVecSenderWithId, TmAsVecSenderWithMpsc, - TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId, + ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, + EcssTcInVecConverter, EcssTmSenderCore, EcssTmtcError, GenericConversionError, MpscTcReceiver, + MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler, + PusServiceHelper, PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; -use satrs::request::TargetAndApidId; +use satrs::request::{GenericMessage, UniqueApidTargetId}; use satrs::spacepackets::ecss::tc::PusTcReader; -use satrs::spacepackets::ecss::PusPacket; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::{ChannelId, TargetId}; -use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID}; -use std::sync::mpsc::{self}; +use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket}; +use satrs_example::config::components::PUS_ACTION_SERVICE; +use satrs_example::config::tmtc_err; +use std::sync::mpsc; +use std::time::Duration; use crate::requests::GenericRequestRouter; -use super::GenericRoutingErrorHandler; +use super::{ + create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus, + PusTargetedRequestService, TargetedPusService, +}; + +pub struct ActionReplyHandler { + fail_data_buf: [u8; 128], +} + +impl Default for ActionReplyHandler { + fn default() -> Self { + Self { + fail_data_buf: [0; 128], + } + } +} + +impl PusReplyHandler for ActionReplyHandler { + type Error = EcssTmtcError; + + fn handle_unrequested_reply( + &mut self, + reply: &GenericMessage, + _tm_sender: &impl EcssTmSenderCore, + ) -> Result<(), Self::Error> { + warn!("received unexpected reply for service 8: {reply:?}"); + Ok(()) + } + + fn handle_reply( + &mut self, + reply: &GenericMessage, + active_request: &ActivePusActionRequestStd, + tm_sender: &(impl EcssTmSenderCore + ?Sized), + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result { + let verif_token: VerificationToken = active_request + .token() + .try_into() + .expect("invalid token state"); + let remove_entry = match &reply.message.variant { + ActionReplyVariant::CompletionFailed { error_code, params } => { + let mut fail_data_len = 0; + if let Some(params) = params { + fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?; + } + verification_handler.completion_failure( + tm_sender, + verif_token, + FailParams::new(time_stamp, error_code, &self.fail_data_buf[..fail_data_len]), + )?; + true + } + ActionReplyVariant::StepFailed { + error_code, + step, + params, + } => { + let mut fail_data_len = 0; + if let Some(params) = params { + fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?; + } + verification_handler.step_failure( + tm_sender, + verif_token, + FailParamsWithStep::new( + time_stamp, + &EcssEnumU16::new(*step), + error_code, + &self.fail_data_buf[..fail_data_len], + ), + )?; + true + } + ActionReplyVariant::Completed => { + verification_handler.completion_success(tm_sender, verif_token, time_stamp)?; + true + } + ActionReplyVariant::StepSuccess { step } => { + verification_handler.step_success( + tm_sender, + &verif_token, + time_stamp, + EcssEnumU16::new(*step), + )?; + false + } + _ => false, + }; + Ok(remove_entry) + } + + fn handle_request_timeout( + &mut self, + active_request: &ActivePusActionRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(), Self::Error> { + generic_pus_request_timeout_handler( + tm_sender, + active_request, + verification_handler, + time_stamp, + "action", + ) + } +} #[derive(Default)] -pub struct ExampleActionRequestConverter {} +pub struct ActionRequestConverter {} -impl PusActionToRequestConverter for ExampleActionRequestConverter { - type Error = PusPacketHandlingError; +impl PusTcToRequestConverter for ActionRequestConverter { + type Error = GenericConversionError; fn convert( &mut self, token: VerificationToken, tc: &PusTcReader, - time_stamp: &[u8], + tm_sender: &(impl EcssTmSenderCore + ?Sized), verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, ActionRequest), Self::Error> { + time_stamp: &[u8], + ) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> { let subservice = tc.subservice(); let user_data = tc.user_data(); if user_data.len() < 8 { verif_reporter .start_failure( + tm_sender, token, FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA), ) .expect("Sending start failure failed"); - return Err(PusPacketHandlingError::NotEnoughAppData { + return Err(GenericConversionError::NotEnoughAppData { expected: 8, found: user_data.len(), }); } - let target_id = TargetAndApidId::from_pus_tc(tc).unwrap(); + let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap(); let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap()); if subservice == 128 { + let req_variant = if user_data.len() == 8 { + ActionRequestVariant::NoData + } else { + ActionRequestVariant::VecData(user_data[8..].to_vec()) + }; Ok(( - target_id.raw(), - ActionRequest::UnsignedIdAndVecData { + ActivePusActionRequestStd::new( action_id, - data: user_data[8..].to_vec(), - }, + target_id_and_apid.into(), + token.into(), + Duration::from_secs(30), + ), + ActionRequest::new(action_id, req_variant), )) } else { verif_reporter .start_failure( + tm_sender, token, FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE), ) .expect("Sending start failure failed"); - Err(PusPacketHandlingError::InvalidSubservice(subservice)) + Err(GenericConversionError::InvalidSubservice(subservice)) } } } pub fn create_action_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_pool: SharedStaticMemoryPool, pus_action_rx: mpsc::Receiver, action_router: GenericRequestRouter, -) -> Pus8Wrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let action_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusAction as ChannelId, - "PUS_8_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let action_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusAction as ChannelId, - "PUS_8_TC_RECV", - pus_action_rx, - ); - let pus_8_handler = PusService8ActionHandler::new( + reply_receiver: mpsc::Receiver>, +) -> ActionServiceWrapper { + let action_request_handler = PusTargetedRequestService::new( PusServiceHelper::new( - action_srv_receiver, - action_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_ACTION_SERVICE.id(), + pus_action_rx, + tm_sender, + create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid), EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048), ), - ExampleActionRequestConverter::default(), + ActionRequestConverter::default(), + // TODO: Implementation which does not use run-time allocation? Maybe something like + // a bounded wrapper which pre-allocates using [HashMap::with_capacity].. + DefaultActiveActionRequestMap::default(), + ActionReplyHandler::default(), action_router, - GenericRoutingErrorHandler::<8>::default(), + reply_receiver, ); - Pus8Wrapper { pus_8_handler } + ActionServiceWrapper { + service: action_request_handler, + } } pub fn create_action_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, + tm_funnel_tx: mpsc::Sender, pus_action_rx: mpsc::Receiver, action_router: GenericRequestRouter, -) -> Pus8Wrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let action_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusAction as ChannelId, - "PUS_8_TM_SENDER", - tm_funnel_tx.clone(), - ); - let action_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusAction as ChannelId, - "PUS_8_TC_RECV", - pus_action_rx, - ); - let pus_8_handler = PusService8ActionHandler::new( + reply_receiver: mpsc::Receiver>, +) -> ActionServiceWrapper { + let action_request_handler = PusTargetedRequestService::new( PusServiceHelper::new( - action_srv_receiver, - action_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_ACTION_SERVICE.id(), + pus_action_rx, + tm_funnel_tx, + create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid), EcssTcInVecConverter::default(), ), - ExampleActionRequestConverter::default(), + ActionRequestConverter::default(), + DefaultActiveActionRequestMap::default(), + ActionReplyHandler::default(), action_router, - GenericRoutingErrorHandler::<8>::default(), + reply_receiver, ); - Pus8Wrapper { pus_8_handler } + ActionServiceWrapper { + service: action_request_handler, + } } -pub struct Pus8Wrapper< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, -> { - pub(crate) pus_8_handler: PusService8ActionHandler< - TcReceiver, +pub struct ActionServiceWrapper +{ + pub(crate) service: PusTargetedRequestService< + MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter, - ExampleActionRequestConverter, - GenericRequestRouter, - GenericRoutingErrorHandler<8>, + ActionRequestConverter, + ActionReplyHandler, + DefaultActiveActionRequestMap, + ActivePusActionRequestStd, + ActionRequest, + PusActionReply, >, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Pus8Wrapper +impl TargetedPusService + for ActionServiceWrapper { - pub fn handle_next_packet(&mut self) -> bool { - match self.pus_8_handler.handle_one_tc() { + /// Returns [true] if the packet handling is finished. + fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self.service.poll_and_handle_next_tc(time_stamp) { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { @@ -197,4 +290,463 @@ impl< } false } + + fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus { + // This only fails if all senders disconnected. Treat it like an empty queue. + self.service + .poll_and_check_next_reply(time_stamp) + .unwrap_or_else(|e| { + warn!("PUS 8: Handling reply failed with error {e:?}"); + HandlingStatus::Empty + }) + } + + fn check_for_request_timeouts(&mut self) { + self.service.check_for_request_timeouts(); + } +} + +#[cfg(test)] +mod tests { + use satrs::pus::test_util::{ + TEST_APID, TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1, + }; + use satrs::pus::verification; + use satrs::pus::verification::test_util::TestVerificationReporter; + use satrs::request::MessageMetadata; + use satrs::ComponentId; + use satrs::{ + res_code::ResultU16, + spacepackets::{ + ecss::{ + tc::{PusTcCreator, PusTcSecondaryHeader}, + tm::PusTmReader, + WritablePusPacket, + }, + SpHeader, + }, + }; + + use crate::{ + pus::tests::{PusConverterTestbench, ReplyHandlerTestbench, TargetedPusRequestTestbench}, + requests::CompositeRequest, + }; + + use super::*; + + impl + TargetedPusRequestTestbench< + ActionRequestConverter, + ActionReplyHandler, + DefaultActiveActionRequestMap, + ActivePusActionRequestStd, + ActionRequest, + PusActionReply, + > + { + pub fn new_for_action(owner_id: ComponentId, target_id: ComponentId) -> Self { + let _ = env_logger::builder().is_test(true).try_init(); + let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel(); + let (pus_action_tx, pus_action_rx) = mpsc::channel(); + let (action_reply_tx, action_reply_rx) = mpsc::channel(); + let (action_req_tx, action_req_rx) = mpsc::channel(); + let verif_reporter = TestVerificationReporter::new(owner_id); + let mut generic_req_router = GenericRequestRouter::default(); + generic_req_router + .composite_router_map + .insert(target_id, action_req_tx); + Self { + service: PusTargetedRequestService::new( + PusServiceHelper::new( + owner_id, + pus_action_rx, + tm_funnel_tx.clone(), + verif_reporter, + EcssTcInVecConverter::default(), + ), + ActionRequestConverter::default(), + DefaultActiveActionRequestMap::default(), + ActionReplyHandler::default(), + generic_req_router, + action_reply_rx, + ), + request_id: None, + pus_packet_tx: pus_action_tx, + tm_funnel_rx, + reply_tx: action_reply_tx, + request_rx: action_req_rx, + } + } + + pub fn verify_packet_started(&self) { + self.service + .service_helper + .common + .verif_reporter + .check_next_is_started_success( + self.service.service_helper.id(), + self.request_id.expect("request ID not set").into(), + ); + } + + pub fn verify_packet_completed(&self) { + self.service + .service_helper + .common + .verif_reporter + .check_next_is_completion_success( + self.service.service_helper.id(), + self.request_id.expect("request ID not set").into(), + ); + } + + pub fn verify_tm_empty(&self) { + let packet = self.tm_funnel_rx.try_recv(); + if let Err(mpsc::TryRecvError::Empty) = packet { + } else { + let tm = packet.unwrap(); + let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0; + panic!("unexpected TM packet {unexpected_tm:?}"); + } + } + + pub fn verify_next_tc_is_handled_properly(&mut self, time_stamp: &[u8]) { + let result = self.service.poll_and_handle_next_tc(time_stamp); + if let Err(e) = result { + panic!("unexpected error {:?}", e); + } + let result = result.unwrap(); + match result { + PusPacketHandlerResult::RequestHandled => (), + _ => panic!("unexpected result {result:?}"), + } + } + + pub fn verify_all_tcs_handled(&mut self, time_stamp: &[u8]) { + let result = self.service.poll_and_handle_next_tc(time_stamp); + if let Err(e) = result { + panic!("unexpected error {:?}", e); + } + let result = result.unwrap(); + match result { + PusPacketHandlerResult::Empty => (), + _ => panic!("unexpected result {result:?}"), + } + } + + pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) { + let result = self.service.poll_and_check_next_reply(time_stamp); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), HandlingStatus::HandledOne); + } + + pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) { + let result = self.service.poll_and_check_next_reply(time_stamp); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), HandlingStatus::Empty); + } + + pub fn add_tc(&mut self, tc: &PusTcCreator) { + self.request_id = Some(verification::RequestId::new(tc).into()); + let token = self.service.service_helper.verif_reporter_mut().add_tc(tc); + let accepted_token = self + .service + .service_helper + .verif_reporter() + .acceptance_success(self.service.service_helper.tm_sender(), token, &[0; 7]) + .expect("TC acceptance failed"); + self.service + .service_helper + .verif_reporter() + .check_next_was_added(accepted_token.request_id()); + let id = self.service.service_helper.id(); + self.service + .service_helper + .verif_reporter() + .check_next_is_acceptance_success(id, accepted_token.request_id()); + self.pus_packet_tx + .send(EcssTcAndToken::new(tc.to_vec().unwrap(), accepted_token)) + .unwrap(); + } + } + + #[test] + fn basic_request() { + let mut testbench = TargetedPusRequestTestbench::new_for_action( + TEST_COMPONENT_ID_0.id(), + TEST_COMPONENT_ID_1.id(), + ); + // Create a basic action request and verify forwarding. + let sp_header = SpHeader::new_from_apid(TEST_APID); + let sec_header = PusTcSecondaryHeader::new_simple(8, 128); + let action_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.to_be_bytes()); + app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); + let pus8_packet = PusTcCreator::new(sp_header, sec_header, &app_data, true); + testbench.add_tc(&pus8_packet); + let time_stamp: [u8; 7] = [0; 7]; + testbench.verify_next_tc_is_handled_properly(&time_stamp); + testbench.verify_all_tcs_handled(&time_stamp); + + testbench.verify_packet_started(); + + let possible_req = testbench.request_rx.try_recv(); + assert!(possible_req.is_ok()); + let req = possible_req.unwrap(); + if let CompositeRequest::Action(action_req) = req.message { + assert_eq!(action_req.action_id, action_id); + assert_eq!(action_req.variant, ActionRequestVariant::NoData); + let action_reply = PusActionReply::new(action_id, ActionReplyVariant::Completed); + testbench + .reply_tx + .send(GenericMessage::new(req.requestor_info, action_reply)) + .unwrap(); + } else { + panic!("unexpected request type"); + } + testbench.verify_next_reply_is_handled_properly(&time_stamp); + testbench.verify_all_replies_handled(&time_stamp); + + testbench.verify_packet_completed(); + testbench.verify_tm_empty(); + } + + #[test] + fn basic_request_routing_error() { + let mut testbench = TargetedPusRequestTestbench::new_for_action( + TEST_COMPONENT_ID_0.id(), + TEST_COMPONENT_ID_1.id(), + ); + // Create a basic action request and verify forwarding. + let sec_header = PusTcSecondaryHeader::new_simple(8, 128); + let action_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + // Invalid ID, routing should fail. + app_data[0..4].copy_from_slice(&0_u32.to_be_bytes()); + app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); + let pus8_packet = PusTcCreator::new( + SpHeader::new_from_apid(TEST_APID), + sec_header, + &app_data, + true, + ); + testbench.add_tc(&pus8_packet); + let time_stamp: [u8; 7] = [0; 7]; + + let result = testbench.service.poll_and_handle_next_tc(&time_stamp); + assert!(result.is_err()); + // Verify the correct result and completion failure. + } + + #[test] + fn converter_action_req_no_data() { + let mut testbench = PusConverterTestbench::new( + TEST_COMPONENT_ID_0.raw(), + ActionRequestConverter::default(), + ); + let sec_header = PusTcSecondaryHeader::new_simple(8, 128); + let action_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + // Invalid ID, routing should fail. + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes()); + app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); + let pus8_packet = PusTcCreator::new( + SpHeader::new_from_apid(TEST_APID), + sec_header, + &app_data, + true, + ); + let token = testbench.add_tc(&pus8_packet); + let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0); + assert!(result.is_ok()); + let (active_req, request) = result.unwrap(); + if let ActionRequestVariant::NoData = request.variant { + assert_eq!(request.action_id, action_id); + assert_eq!(active_req.action_id, action_id); + assert_eq!( + active_req.target_id(), + UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_0).raw() + ); + assert_eq!( + active_req.token().request_id(), + testbench.request_id().unwrap() + ); + } else { + panic!("unexpected action request variant"); + } + } + + #[test] + fn converter_action_req_with_data() { + let mut testbench = + PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ActionRequestConverter::default()); + let sec_header = PusTcSecondaryHeader::new_simple(8, 128); + let action_id = 5_u32; + let mut app_data: [u8; 16] = [0; 16]; + // Invalid ID, routing should fail. + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes()); + app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); + for i in 0..8 { + app_data[i + 8] = i as u8; + } + let pus8_packet = PusTcCreator::new( + SpHeader::new_from_apid(TEST_APID), + sec_header, + &app_data, + true, + ); + let token = testbench.add_tc(&pus8_packet); + let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0); + assert!(result.is_ok()); + let (active_req, request) = result.unwrap(); + if let ActionRequestVariant::VecData(vec) = request.variant { + assert_eq!(request.action_id, action_id); + assert_eq!(active_req.action_id, action_id); + assert_eq!(vec, app_data[8..].to_vec()); + } else { + panic!("unexpected action request variant"); + } + } + + #[test] + fn reply_handling_completion_success() { + let mut testbench = + ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]); + let active_action_req = + ActivePusActionRequestStd::new_from_common_req(action_id, active_req); + let reply = PusActionReply::new(action_id, ActionReplyVariant::Completed); + let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); + let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); + assert!(result.is_ok()); + assert!(result.unwrap()); + testbench.verif_reporter.assert_full_completion_success( + TEST_COMPONENT_ID_0.id(), + req_id, + None, + ); + } + + #[test] + fn reply_handling_completion_failure() { + let mut testbench = + ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]); + let active_action_req = + ActivePusActionRequestStd::new_from_common_req(action_id, active_req); + let error_code = ResultU16::new(2, 3); + let reply = PusActionReply::new( + action_id, + ActionReplyVariant::CompletionFailed { + error_code, + params: None, + }, + ); + let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); + let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); + assert!(result.is_ok()); + assert!(result.unwrap()); + testbench.verif_reporter.assert_completion_failure( + TEST_COMPONENT_ID_0.into(), + req_id, + None, + error_code.raw() as u64, + ); + } + + #[test] + fn reply_handling_step_success() { + let mut testbench = + ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]); + let active_action_req = + ActivePusActionRequestStd::new_from_common_req(action_id, active_req); + let reply = PusActionReply::new(action_id, ActionReplyVariant::StepSuccess { step: 1 }); + let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); + let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); + assert!(result.is_ok()); + // Entry should not be removed, completion not done yet. + assert!(!result.unwrap()); + testbench.verif_reporter.check_next_was_added(req_id); + testbench + .verif_reporter + .check_next_is_acceptance_success(TEST_COMPONENT_ID_0.raw(), req_id); + testbench + .verif_reporter + .check_next_is_started_success(TEST_COMPONENT_ID_0.raw(), req_id); + testbench + .verif_reporter + .check_next_is_step_success(TEST_COMPONENT_ID_0.raw(), req_id, 1); + } + + #[test] + fn reply_handling_step_failure() { + let mut testbench = + ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]); + let active_action_req = + ActivePusActionRequestStd::new_from_common_req(action_id, active_req); + let error_code = ResultU16::new(2, 3); + let reply = PusActionReply::new( + action_id, + ActionReplyVariant::StepFailed { + error_code, + step: 1, + params: None, + }, + ); + let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); + let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); + assert!(result.is_ok()); + assert!(result.unwrap()); + testbench.verif_reporter.check_next_was_added(req_id); + testbench + .verif_reporter + .check_next_is_acceptance_success(TEST_COMPONENT_ID_0.id(), req_id); + testbench + .verif_reporter + .check_next_is_started_success(TEST_COMPONENT_ID_0.id(), req_id); + testbench.verif_reporter.check_next_is_step_failure( + TEST_COMPONENT_ID_0.id(), + req_id, + error_code.raw().into(), + ); + } + + #[test] + fn reply_handling_unrequested_reply() { + let mut testbench = + ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default()); + let action_reply = PusActionReply::new(5_u32, ActionReplyVariant::Completed); + let unrequested_reply = + GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply); + // Right now this function does not do a lot. We simply check that it does not panic or do + // weird stuff. + let result = testbench.handle_unrequested_reply(&unrequested_reply); + assert!(result.is_ok()); + } + + #[test] + fn reply_handling_reply_timeout() { + let mut testbench = + ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default()); + let action_id = 5_u32; + let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]); + let result = testbench.handle_request_timeout( + &ActivePusActionRequestStd::new_from_common_req(action_id, active_request), + &[], + ); + assert!(result.is_ok()); + testbench.verif_reporter.assert_completion_failure( + TEST_COMPONENT_ID_0.raw(), + req_id, + None, + tmtc_err::REQUEST_TIMEOUT.raw() as u64, + ); + } } diff --git a/satrs-example/src/pus/event.rs b/satrs-example/src/pus/event.rs index 1d16f5c..865b1f1 100644 --- a/satrs-example/src/pus/event.rs +++ b/satrs-example/src/pus/event.rs @@ -1,113 +1,69 @@ use std::sync::mpsc; +use crate::pus::create_verification_reporter; use log::{error, warn}; -use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; +use satrs::pool::SharedStaticMemoryPool; use satrs::pus::event_man::EventRequestWithToken; -use satrs::pus::event_srv::PusService5EventHandler; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, -}; -use satrs::pus::verification::VerificationReportingProvider; +use satrs::pus::event_srv::PusEventServiceHandler; +use satrs::pus::verification::VerificationReporter; use satrs::pus::{ EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, - EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper, - TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithId, + EcssTmSenderCore, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, + PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::ChannelId; -use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID}; +use satrs_example::config::components::PUS_EVENT_MANAGEMENT; pub fn create_event_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_pool: SharedStaticMemoryPool, pus_event_rx: mpsc::Receiver, event_request_tx: mpsc::Sender, -) -> Pus5Wrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let event_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusEvent as ChannelId, - "PUS_5_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let event_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusEvent as ChannelId, - "PUS_5_TC_RECV", - pus_event_rx, - ); - let pus_5_handler = PusService5EventHandler::new( +) -> EventServiceWrapper { + let pus_5_handler = PusEventServiceHandler::new( PusServiceHelper::new( - event_srv_receiver, - event_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_EVENT_MANAGEMENT.id(), + pus_event_rx, + tm_sender, + create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid), EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048), ), event_request_tx, ); - Pus5Wrapper { pus_5_handler } + EventServiceWrapper { + handler: pus_5_handler, + } } pub fn create_event_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, + tm_funnel_tx: mpsc::Sender, pus_event_rx: mpsc::Receiver, event_request_tx: mpsc::Sender, -) -> Pus5Wrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let event_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusEvent as ChannelId, - "PUS_5_TM_SENDER", - tm_funnel_tx, - ); - let event_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusEvent as ChannelId, - "PUS_5_TC_RECV", - pus_event_rx, - ); - let pus_5_handler = PusService5EventHandler::new( +) -> EventServiceWrapper { + let pus_5_handler = PusEventServiceHandler::new( PusServiceHelper::new( - event_srv_receiver, - event_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_EVENT_MANAGEMENT.id(), + pus_event_rx, + tm_funnel_tx, + create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid), EcssTcInVecConverter::default(), ), event_request_tx, ); - Pus5Wrapper { pus_5_handler } + EventServiceWrapper { + handler: pus_5_handler, + } } -pub struct Pus5Wrapper< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, -> { - pub pus_5_handler: - PusService5EventHandler, +pub struct EventServiceWrapper { + pub handler: + PusEventServiceHandler, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Pus5Wrapper +impl + EventServiceWrapper { - pub fn handle_next_packet(&mut self) -> bool { - match self.pus_5_handler.handle_one_tc() { + pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self.handler.poll_and_handle_next_tc(time_stamp) { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { diff --git a/satrs-example/src/pus/hk.rs b/satrs-example/src/pus/hk.rs index 48a54be..cb3ebb9 100644 --- a/satrs-example/src/pus/hk.rs +++ b/satrs-example/src/pus/hk.rs @@ -1,50 +1,127 @@ +use derive_new::new; use log::{error, warn}; -use satrs::hk::{CollectionIntervalFactor, HkRequest}; -use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; -use satrs::pus::hk::{PusHkToRequestConverter, PusService3HkHandler}; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, -}; +use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId}; +use satrs::pool::SharedStaticMemoryPool; use satrs::pus::verification::{ - FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken, + FailParams, TcStateAccepted, TcStateStarted, VerificationReporter, + VerificationReportingProvider, VerificationToken, }; use satrs::pus::{ - EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, - EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, - PusPacketHandlingError, PusServiceHelper, TmAsVecSenderWithId, TmAsVecSenderWithMpsc, - TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId, + ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken, + EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSenderCore, + EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender, + MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler, PusServiceHelper, + PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; -use satrs::request::TargetAndApidId; +use satrs::request::{GenericMessage, UniqueApidTargetId}; use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::{hk, PusPacket}; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::{ChannelId, TargetId}; -use satrs_example::config::{hk_err, tmtc_err, TcReceiverId, TmSenderId, PUS_APID}; -use std::sync::mpsc::{self}; +use satrs_example::config::components::PUS_HK_SERVICE; +use satrs_example::config::{hk_err, tmtc_err}; +use std::sync::mpsc; +use std::time::Duration; +use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler}; use crate::requests::GenericRequestRouter; -use super::GenericRoutingErrorHandler; +use super::{HandlingStatus, PusTargetedRequestService}; + +#[derive(Clone, PartialEq, Debug, new)] +pub struct HkReply { + pub unique_id: UniqueId, + pub variant: HkReplyVariant, +} + +#[derive(Clone, PartialEq, Debug)] +pub enum HkReplyVariant { + Ack, +} #[derive(Default)] -pub struct ExampleHkRequestConverter {} +pub struct HkReplyHandler {} -impl PusHkToRequestConverter for ExampleHkRequestConverter { - type Error = PusPacketHandlingError; +impl PusReplyHandler for HkReplyHandler { + type Error = EcssTmtcError; + + fn handle_unrequested_reply( + &mut self, + reply: &GenericMessage, + _tm_sender: &impl EcssTmSenderCore, + ) -> Result<(), Self::Error> { + log::warn!("received unexpected reply for service 3: {reply:?}"); + Ok(()) + } + + fn handle_reply( + &mut self, + reply: &GenericMessage, + active_request: &ActivePusRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result { + let started_token: VerificationToken = active_request + .token() + .try_into() + .expect("invalid token state"); + match reply.message.variant { + HkReplyVariant::Ack => { + verification_handler + .completion_success(tm_sender, started_token, time_stamp) + .expect("sending completion success verification failed"); + } + }; + Ok(true) + } + + fn handle_request_timeout( + &mut self, + active_request: &ActivePusRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(), Self::Error> { + generic_pus_request_timeout_handler( + tm_sender, + active_request, + verification_handler, + time_stamp, + "HK", + )?; + Ok(()) + } +} + +pub struct HkRequestConverter { + timeout: Duration, +} + +impl Default for HkRequestConverter { + fn default() -> Self { + Self { + timeout: Duration::from_secs(60), + } + } +} + +impl PusTcToRequestConverter for HkRequestConverter { + type Error = GenericConversionError; fn convert( &mut self, token: VerificationToken, tc: &PusTcReader, - time_stamp: &[u8], + tm_sender: &(impl EcssTmSenderCore + ?Sized), verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, HkRequest), Self::Error> { + time_stamp: &[u8], + ) -> Result<(ActivePusRequestStd, HkRequest), Self::Error> { let user_data = tc.user_data(); if user_data.is_empty() { let user_data_len = user_data.len() as u32; let user_data_len_raw = user_data_len.to_be_bytes(); verif_reporter .start_failure( + tm_sender, token, FailParams::new( time_stamp, @@ -53,7 +130,7 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter { ), ) .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::NotEnoughAppData { + return Err(GenericConversionError::NotEnoughAppData { expected: 4, found: 0, }); @@ -67,178 +144,164 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter { let user_data_len = user_data.len() as u32; let user_data_len_raw = user_data_len.to_be_bytes(); verif_reporter - .start_failure(token, FailParams::new(time_stamp, err, &user_data_len_raw)) + .start_failure( + tm_sender, + token, + FailParams::new(time_stamp, err, &user_data_len_raw), + ) .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::NotEnoughAppData { + return Err(GenericConversionError::NotEnoughAppData { expected: 8, found: 4, }); } let subservice = tc.subservice(); - let target_id = TargetAndApidId::from_pus_tc(tc).expect("invalid tc format"); + let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).expect("invalid tc format"); let unique_id = u32::from_be_bytes(tc.user_data()[4..8].try_into().unwrap()); let standard_subservice = hk::Subservice::try_from(subservice); if standard_subservice.is_err() { verif_reporter .start_failure( + tm_sender, token, FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]), ) .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::InvalidSubservice(subservice)); + return Err(GenericConversionError::InvalidSubservice(subservice)); } - Ok(( - target_id.into(), - match standard_subservice.unwrap() { - hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => { - HkRequest::Enable(unique_id) - } - hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => { - HkRequest::Disable(unique_id) - } - hk::Subservice::TcReportHkReportStructures => todo!(), - hk::Subservice::TmHkPacket => todo!(), - hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => { - HkRequest::OneShot(unique_id) - } - hk::Subservice::TcModifyDiagCollectionInterval - | hk::Subservice::TcModifyHkCollectionInterval => { - if user_data.len() < 12 { - verif_reporter - .start_failure( - token, - FailParams::new_no_fail_data( - time_stamp, - &tmtc_err::NOT_ENOUGH_APP_DATA, - ), - ) - .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::NotEnoughAppData { - expected: 12, - found: user_data.len(), - }); - } - HkRequest::ModifyCollectionInterval( - unique_id, - CollectionIntervalFactor::from_be_bytes( - user_data[8..12].try_into().unwrap(), - ), - ) - } - _ => { + let request = match standard_subservice.unwrap() { + hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => { + HkRequest::new(unique_id, HkRequestVariant::EnablePeriodic) + } + hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => { + HkRequest::new(unique_id, HkRequestVariant::DisablePeriodic) + } + hk::Subservice::TcReportHkReportStructures => todo!(), + hk::Subservice::TmHkPacket => todo!(), + hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => { + HkRequest::new(unique_id, HkRequestVariant::OneShot) + } + hk::Subservice::TcModifyDiagCollectionInterval + | hk::Subservice::TcModifyHkCollectionInterval => { + if user_data.len() < 12 { verif_reporter .start_failure( + tm_sender, token, - FailParams::new( + FailParams::new_no_fail_data( time_stamp, - &tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED, - &[subservice], + &tmtc_err::NOT_ENOUGH_APP_DATA, ), ) .expect("Sending start failure TM failed"); - return Err(PusPacketHandlingError::InvalidSubservice(subservice)); + return Err(GenericConversionError::NotEnoughAppData { + expected: 12, + found: user_data.len(), + }); } - }, + HkRequest::new( + unique_id, + HkRequestVariant::ModifyCollectionInterval( + CollectionIntervalFactor::from_be_bytes( + user_data[8..12].try_into().unwrap(), + ), + ), + ) + } + _ => { + verif_reporter + .start_failure( + tm_sender, + token, + FailParams::new( + time_stamp, + &tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED, + &[subservice], + ), + ) + .expect("Sending start failure TM failed"); + return Err(GenericConversionError::InvalidSubservice(subservice)); + } + }; + Ok(( + ActivePusRequestStd::new(target_id_and_apid.into(), token, self.timeout), + request, )) } } pub fn create_hk_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_pool: SharedStaticMemoryPool, pus_hk_rx: mpsc::Receiver, request_router: GenericRequestRouter, -) -> Pus3Wrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let hk_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusHk as ChannelId, - "PUS_3_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let hk_srv_receiver = - MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx); - let pus_3_handler = PusService3HkHandler::new( + reply_receiver: mpsc::Receiver>, +) -> HkServiceWrapper { + let pus_3_handler = PusTargetedRequestService::new( PusServiceHelper::new( - hk_srv_receiver, - hk_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_HK_SERVICE.id(), + pus_hk_rx, + tm_sender, + create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid), EcssTcInSharedStoreConverter::new(tc_pool, 2048), ), - ExampleHkRequestConverter::default(), + HkRequestConverter::default(), + DefaultActiveRequestMap::default(), + HkReplyHandler::default(), request_router, - GenericRoutingErrorHandler::default(), + reply_receiver, ); - Pus3Wrapper { pus_3_handler } + HkServiceWrapper { + service: pus_3_handler, + } } pub fn create_hk_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, + tm_funnel_tx: mpsc::Sender, pus_hk_rx: mpsc::Receiver, request_router: GenericRequestRouter, -) -> Pus3Wrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let hk_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusHk as ChannelId, - "PUS_3_TM_SENDER", - tm_funnel_tx.clone(), - ); - let hk_srv_receiver = - MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx); - let pus_3_handler = PusService3HkHandler::new( + reply_receiver: mpsc::Receiver>, +) -> HkServiceWrapper { + let pus_3_handler = PusTargetedRequestService::new( PusServiceHelper::new( - hk_srv_receiver, - hk_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_HK_SERVICE.id(), + pus_hk_rx, + tm_funnel_tx, + create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid), EcssTcInVecConverter::default(), ), - ExampleHkRequestConverter::default(), + HkRequestConverter::default(), + DefaultActiveRequestMap::default(), + HkReplyHandler::default(), request_router, - GenericRoutingErrorHandler::default(), + reply_receiver, ); - Pus3Wrapper { pus_3_handler } + HkServiceWrapper { + service: pus_3_handler, + } } -pub struct Pus3Wrapper< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, -> { - pub(crate) pus_3_handler: PusService3HkHandler< - TcReceiver, +pub struct HkServiceWrapper { + pub(crate) service: PusTargetedRequestService< + MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter, - ExampleHkRequestConverter, - GenericRequestRouter, - GenericRoutingErrorHandler<3>, + HkRequestConverter, + HkReplyHandler, + DefaultActiveRequestMap, + ActivePusRequestStd, + HkRequest, + HkReply, >, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Pus3Wrapper +impl + HkServiceWrapper { - pub fn handle_next_packet(&mut self) -> bool { - match self.pus_3_handler.handle_one_tc() { + pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self.service.poll_and_handle_next_tc(time_stamp) { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { @@ -260,4 +323,242 @@ impl< } false } + + pub fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus { + // This only fails if all senders disconnected. Treat it like an empty queue. + self.service + .poll_and_check_next_reply(time_stamp) + .unwrap_or_else(|e| { + warn!("PUS 3: Handling reply failed with error {e:?}"); + HandlingStatus::Empty + }) + } + + pub fn check_for_request_timeouts(&mut self) { + self.service.check_for_request_timeouts(); + } +} + +#[cfg(test)] +mod tests { + use satrs::pus::test_util::{ + TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1, + }; + use satrs::request::MessageMetadata; + use satrs::{ + hk::HkRequestVariant, + pus::test_util::TEST_APID, + request::GenericMessage, + spacepackets::{ + ecss::{hk::Subservice, tc::PusTcCreator}, + SpHeader, + }, + }; + use satrs_example::config::tmtc_err; + + use crate::pus::{ + hk::HkReplyVariant, + tests::{PusConverterTestbench, ReplyHandlerTestbench}, + }; + + use super::{HkReply, HkReplyHandler, HkRequestConverter}; + + #[test] + fn hk_converter_one_shot_req() { + let mut hk_bench = + PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default()); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); + let target_id = TEST_UNIQUE_ID_0; + let unique_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); + app_data[4..8].copy_from_slice(&unique_id.to_be_bytes()); + + let hk_req = PusTcCreator::new_simple( + sp_header, + 3, + Subservice::TcGenerateOneShotHk as u8, + &app_data, + true, + ); + let accepted_token = hk_bench.add_tc(&hk_req); + let (_active_req, req) = hk_bench + .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0) + .expect("conversion failed"); + + assert_eq!(req.unique_id, unique_id); + if let HkRequestVariant::OneShot = req.variant { + } else { + panic!("unexpected HK request") + } + } + + #[test] + fn hk_converter_enable_periodic_generation() { + let mut hk_bench = + PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default()); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); + let target_id = TEST_UNIQUE_ID_0; + let unique_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); + app_data[4..8].copy_from_slice(&unique_id.to_be_bytes()); + let mut generic_check = |tc: &PusTcCreator| { + let accepted_token = hk_bench.add_tc(tc); + let (_active_req, req) = hk_bench + .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0) + .expect("conversion failed"); + assert_eq!(req.unique_id, unique_id); + if let HkRequestVariant::EnablePeriodic = req.variant { + } else { + panic!("unexpected HK request") + } + }; + let tc0 = PusTcCreator::new_simple( + sp_header, + 3, + Subservice::TcEnableHkGeneration as u8, + &app_data, + true, + ); + generic_check(&tc0); + let tc1 = PusTcCreator::new_simple( + sp_header, + 3, + Subservice::TcEnableDiagGeneration as u8, + &app_data, + true, + ); + generic_check(&tc1); + } + + #[test] + fn hk_conversion_disable_periodic_generation() { + let mut hk_bench = + PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default()); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); + let target_id = TEST_UNIQUE_ID_0; + let unique_id = 5_u32; + let mut app_data: [u8; 8] = [0; 8]; + app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); + app_data[4..8].copy_from_slice(&unique_id.to_be_bytes()); + let mut generic_check = |tc: &PusTcCreator| { + let accepted_token = hk_bench.add_tc(tc); + let (_active_req, req) = hk_bench + .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0) + .expect("conversion failed"); + assert_eq!(req.unique_id, unique_id); + if let HkRequestVariant::DisablePeriodic = req.variant { + } else { + panic!("unexpected HK request") + } + }; + let tc0 = PusTcCreator::new_simple( + sp_header, + 3, + Subservice::TcDisableHkGeneration as u8, + &app_data, + true, + ); + generic_check(&tc0); + let tc1 = PusTcCreator::new_simple( + sp_header, + 3, + Subservice::TcDisableDiagGeneration as u8, + &app_data, + true, + ); + generic_check(&tc1); + } + + #[test] + fn hk_conversion_modify_interval() { + let mut hk_bench = + PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default()); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); + let target_id = TEST_UNIQUE_ID_0; + let unique_id = 5_u32; + let mut app_data: [u8; 12] = [0; 12]; + let collection_interval_factor = 5_u32; + app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); + app_data[4..8].copy_from_slice(&unique_id.to_be_bytes()); + app_data[8..12].copy_from_slice(&collection_interval_factor.to_be_bytes()); + + let mut generic_check = |tc: &PusTcCreator| { + let accepted_token = hk_bench.add_tc(tc); + let (_active_req, req) = hk_bench + .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0) + .expect("conversion failed"); + assert_eq!(req.unique_id, unique_id); + if let HkRequestVariant::ModifyCollectionInterval(interval_factor) = req.variant { + assert_eq!(interval_factor, collection_interval_factor); + } else { + panic!("unexpected HK request") + } + }; + let tc0 = PusTcCreator::new_simple( + sp_header, + 3, + Subservice::TcModifyHkCollectionInterval as u8, + &app_data, + true, + ); + generic_check(&tc0); + let tc1 = PusTcCreator::new_simple( + sp_header, + 3, + Subservice::TcModifyDiagCollectionInterval as u8, + &app_data, + true, + ); + generic_check(&tc1); + } + + #[test] + fn hk_reply_handler() { + let mut reply_testbench = + ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), HkReplyHandler::default()); + let sender_id = 2_u64; + let apid_target_id = 3_u32; + let unique_id = 5_u32; + let (req_id, active_req) = reply_testbench.add_tc(TEST_APID, apid_target_id, &[]); + let reply = GenericMessage::new( + MessageMetadata::new(req_id.into(), sender_id), + HkReply::new(unique_id, HkReplyVariant::Ack), + ); + let result = reply_testbench.handle_reply(&reply, &active_req, &[]); + assert!(result.is_ok()); + assert!(result.unwrap()); + reply_testbench + .verif_reporter + .assert_full_completion_success(TEST_COMPONENT_ID_0.raw(), req_id, None); + } + + #[test] + fn reply_handling_unrequested_reply() { + let mut testbench = + ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default()); + let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack); + let unrequested_reply = + GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply); + // Right now this function does not do a lot. We simply check that it does not panic or do + // weird stuff. + let result = testbench.handle_unrequested_reply(&unrequested_reply); + assert!(result.is_ok()); + } + + #[test] + fn reply_handling_reply_timeout() { + let mut testbench = + ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default()); + let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_1, &[]); + let result = testbench.handle_request_timeout(&active_request, &[]); + assert!(result.is_ok()); + testbench.verif_reporter.assert_completion_failure( + TEST_COMPONENT_ID_1.raw(), + req_id, + None, + tmtc_err::REQUEST_TIMEOUT.raw() as u64, + ); + } } diff --git a/satrs-example/src/pus/mod.rs b/satrs-example/src/pus/mod.rs index 70102b4..83bd34a 100644 --- a/satrs-example/src/pus/mod.rs +++ b/satrs-example/src/pus/mod.rs @@ -1,75 +1,80 @@ +use crate::requests::GenericRequestRouter; use crate::tmtc::MpscStoreAndSendError; use log::warn; -use satrs::pus::verification::{FailParams, VerificationReportingProvider}; -use satrs::pus::{ - EcssTcAndToken, GenericRoutingError, PusPacketHandlerResult, PusRoutingErrorHandler, TcInMemory, +use satrs::pus::verification::{ + self, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter, + VerificationReporterCfg, VerificationReportingProvider, VerificationToken, }; +use satrs::pus::{ + ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, + EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, GenericConversionError, + GenericRoutingError, PusPacketHandlerResult, PusPacketHandlingError, PusReplyHandler, + PusRequestRouter, PusServiceHelper, PusTcToRequestConverter, TcInMemory, +}; +use satrs::queue::GenericReceiveError; +use satrs::request::{Apid, GenericMessage, MessageMetadata}; use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::PusServiceId; -use satrs::spacepackets::time::cds::CdsTime; -use satrs::spacepackets::time::TimeWriter; +use satrs::ComponentId; +use satrs_example::config::components::PUS_ROUTING_SERVICE; use satrs_example::config::{tmtc_err, CustomPusServiceId}; -use std::sync::mpsc::Sender; +use satrs_example::TimeStampHelper; +use std::fmt::Debug; +use std::sync::mpsc::{self, Sender}; pub mod action; pub mod event; pub mod hk; +pub mod mode; pub mod scheduler; pub mod stack; pub mod test; -pub struct PusTcMpscRouter { - pub test_service_receiver: Sender, - pub event_service_receiver: Sender, - pub sched_service_receiver: Sender, - pub hk_service_receiver: Sender, - pub action_service_receiver: Sender, +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub enum HandlingStatus { + Empty, + HandledOne, } -pub struct PusReceiver { +pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> VerificationReporter { + let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap(); + // Every software component which needs to generate verification telemetry, gets a cloned + // verification reporter. + VerificationReporter::new(owner_id, &verif_cfg) +} + +/// Simple router structure which forwards PUS telecommands to dedicated handlers. +pub struct PusTcMpscRouter { + pub test_tc_sender: Sender, + pub event_tc_sender: Sender, + pub sched_tc_sender: Sender, + pub hk_tc_sender: Sender, + pub action_tc_sender: Sender, + pub mode_tc_sender: Sender, +} + +pub struct PusReceiver { + pub id: ComponentId, + pub tm_sender: TmSender, pub verif_reporter: VerificationReporter, pub pus_router: PusTcMpscRouter, stamp_helper: TimeStampHelper, } -struct TimeStampHelper { - stamper: CdsTime, - time_stamp: [u8; 7], -} - -impl TimeStampHelper { - pub fn new() -> Self { +impl PusReceiver { + pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self { Self { - stamper: CdsTime::new_with_u16_days(0, 0), - time_stamp: [0; 7], - } - } - - pub fn stamp(&self) -> &[u8] { - &self.time_stamp - } - - pub fn update_from_now(&mut self) { - self.stamper - .update_from_now() - .expect("Updating timestamp failed"); - self.stamper - .write_to_bytes(&mut self.time_stamp) - .expect("Writing timestamp failed"); - } -} - -impl PusReceiver { - pub fn new(verif_reporter: VerificationReporter, pus_router: PusTcMpscRouter) -> Self { - Self { - verif_reporter, + id: PUS_ROUTING_SERVICE.raw(), + tm_sender, + verif_reporter: create_verification_reporter( + PUS_ROUTING_SERVICE.id(), + PUS_ROUTING_SERVICE.apid, + ), pus_router, - stamp_helper: TimeStampHelper::new(), + stamp_helper: TimeStampHelper::default(), } } -} -impl PusReceiver { pub fn handle_tc_packet( &mut self, tc_in_memory: TcInMemory, @@ -80,41 +85,34 @@ impl PusReceiver match standard_service { - PusServiceId::Test => { - self.pus_router.test_service_receiver.send(EcssTcAndToken { - tc_in_memory, - token: Some(accepted_token.into()), - })? - } + PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken { + tc_in_memory, + token: Some(accepted_token.into()), + })?, PusServiceId::Housekeeping => { - self.pus_router.hk_service_receiver.send(EcssTcAndToken { + self.pus_router.hk_tc_sender.send(EcssTcAndToken { tc_in_memory, token: Some(accepted_token.into()), })? } - PusServiceId::Event => { - self.pus_router - .event_service_receiver - .send(EcssTcAndToken { - tc_in_memory, - token: Some(accepted_token.into()), - })? - } + PusServiceId::Event => self.pus_router.event_tc_sender.send(EcssTcAndToken { + tc_in_memory, + token: Some(accepted_token.into()), + })?, PusServiceId::Scheduling => { - self.pus_router - .sched_service_receiver - .send(EcssTcAndToken { - tc_in_memory, - token: Some(accepted_token.into()), - })? + self.pus_router.sched_tc_sender.send(EcssTcAndToken { + tc_in_memory, + token: Some(accepted_token.into()), + })? } _ => { let result = self.verif_reporter.start_failure( + &self.tm_sender, accepted_token, FailParams::new( self.stamp_helper.stamp(), @@ -131,14 +129,17 @@ impl PusReceiver { - // TODO: Fix mode service. - //self.handle_mode_service(pus_tc, accepted_token) + self.pus_router.mode_tc_sender.send(EcssTcAndToken { + tc_in_memory, + token: Some(accepted_token.into()), + })? } CustomPusServiceId::Health => {} } } else { self.verif_reporter .start_failure( + &self.tm_sender, accepted_token, FailParams::new( self.stamp_helper.stamp(), @@ -154,55 +155,550 @@ impl PusReceiver {} +pub trait TargetedPusService { + /// Returns [true] if the packet handling is finished. + fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool; + fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus; + fn check_for_request_timeouts(&mut self); +} -impl PusRoutingErrorHandler for GenericRoutingErrorHandler { - type Error = satrs::pus::GenericRoutingError; +/// This is a generic handler class for all PUS services where a PUS telecommand is converted +/// to a targeted request. +/// +/// The generic steps for this process are the following +/// +/// 1. Poll for TC packets +/// 2. Convert the raw packets to a [PusTcReader]. +/// 3. Convert the PUS TC to a typed request using the [PusTcToRequestConverter]. +/// 4. Route the requests using the [GenericRequestRouter]. +/// 5. Add the request to the active request map using the [ActiveRequestMapProvider] abstraction. +/// 6. Check for replies which complete the forwarded request. The handler takes care of +/// the verification process. +/// 7. Check for timeouts of active requests. Generally, the timeout on the service level should +/// be highest expected timeout for the given target. +/// +/// The handler exposes the following API: +/// +/// 1. [Self::handle_one_tc] which tries to poll and handle one TC packet, covering steps 1-5. +/// 2. [Self::check_one_reply] which tries to poll and handle one reply, covering step 6. +/// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7. +pub struct PusTargetedRequestService< + TcReceiver: EcssTcReceiverCore, + TmSender: EcssTmSenderCore, + TcInMemConverter: EcssTcInMemConverter, + VerificationReporter: VerificationReportingProvider, + RequestConverter: PusTcToRequestConverter, + ReplyHandler: PusReplyHandler, + ActiveRequestMap: ActiveRequestMapProvider, + ActiveRequestInfo: ActiveRequestProvider, + RequestType, + ReplyType, +> { + pub service_helper: + PusServiceHelper, + pub request_router: GenericRequestRouter, + pub request_converter: RequestConverter, + pub active_request_map: ActiveRequestMap, + pub reply_handler: ReplyHandler, + pub reply_receiver: mpsc::Receiver>, + phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>, +} - fn handle_error( - &self, - target_id: satrs::TargetId, - token: satrs::pus::verification::VerificationToken< - satrs::pus::verification::TcStateAccepted, +impl< + TcReceiver: EcssTcReceiverCore, + TmSender: EcssTmSenderCore, + TcInMemConverter: EcssTcInMemConverter, + VerificationReporter: VerificationReportingProvider, + RequestConverter: PusTcToRequestConverter, + ReplyHandler: PusReplyHandler, + ActiveRequestMap: ActiveRequestMapProvider, + ActiveRequestInfo: ActiveRequestProvider, + RequestType, + ReplyType, + > + PusTargetedRequestService< + TcReceiver, + TmSender, + TcInMemConverter, + VerificationReporter, + RequestConverter, + ReplyHandler, + ActiveRequestMap, + ActiveRequestInfo, + RequestType, + ReplyType, + > +where + GenericRequestRouter: PusRequestRouter, +{ + pub fn new( + service_helper: PusServiceHelper< + TcReceiver, + TmSender, + TcInMemConverter, + VerificationReporter, >, - _tc: &PusTcReader, - error: Self::Error, + request_converter: RequestConverter, + active_request_map: ActiveRequestMap, + reply_hook: ReplyHandler, + request_router: GenericRequestRouter, + reply_receiver: mpsc::Receiver>, + ) -> Self { + Self { + service_helper, + request_converter, + active_request_map, + reply_handler: reply_hook, + request_router, + reply_receiver, + phantom: std::marker::PhantomData, + } + } + + pub fn poll_and_handle_next_tc( + &mut self, + time_stamp: &[u8], + ) -> Result { + let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; + if possible_packet.is_none() { + return Ok(PusPacketHandlerResult::Empty); + } + let ecss_tc_and_token = possible_packet.unwrap(); + self.service_helper + .tc_in_mem_converter_mut() + .cache(&ecss_tc_and_token.tc_in_memory)?; + let tc = self.service_helper.tc_in_mem_converter().convert()?; + let (mut request_info, request) = match self.request_converter.convert( + ecss_tc_and_token.token, + &tc, + self.service_helper.tm_sender(), + &self.service_helper.common.verif_reporter, + time_stamp, + ) { + Ok((info, req)) => (info, req), + Err(e) => { + self.handle_conversion_to_request_error(&e, ecss_tc_and_token.token, time_stamp); + return Err(e.into()); + } + }; + let accepted_token: VerificationToken = request_info + .token() + .try_into() + .expect("token not in expected accepted state"); + let verif_request_id = verification::RequestId::new(&tc).raw(); + match self.request_router.route( + MessageMetadata::new(verif_request_id, self.service_helper.id()), + request_info.target_id(), + request, + ) { + Ok(()) => { + let started_token = self + .service_helper + .verif_reporter() + .start_success( + &self.service_helper.common.tm_sender, + accepted_token, + time_stamp, + ) + .expect("Start success failure"); + request_info.set_token(started_token.into()); + self.active_request_map + .insert(&verif_request_id, request_info); + } + Err(e) => { + self.request_router.handle_error_generic( + &request_info, + &tc, + e.clone(), + self.service_helper.tm_sender(), + self.service_helper.verif_reporter(), + time_stamp, + ); + return Err(e.into()); + } + } + Ok(PusPacketHandlerResult::RequestHandled) + } + + fn handle_conversion_to_request_error( + &mut self, + error: &GenericConversionError, + token: VerificationToken, time_stamp: &[u8], - verif_reporter: &impl VerificationReportingProvider, ) { - warn!("Routing request for service {SERVICE_ID} failed: {error:?}"); match error { - GenericRoutingError::UnknownTargetId(id) => { - let mut fail_data: [u8; 8] = [0; 8]; - fail_data.copy_from_slice(&id.to_be_bytes()); - verif_reporter - .start_failure( + GenericConversionError::WrongService(service) => { + let service_slice: [u8; 1] = [*service]; + self.service_helper + .verif_reporter() + .completion_failure( + self.service_helper.tm_sender(), token, - FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data), + FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SERVICE, &service_slice), ) - .expect("Sending start failure failed"); + .expect("Sending completion failure failed"); } - GenericRoutingError::SendError(_) => { - let mut fail_data: [u8; 8] = [0; 8]; - fail_data.copy_from_slice(&target_id.to_be_bytes()); - verif_reporter - .start_failure( + GenericConversionError::InvalidSubservice(subservice) => { + let subservice_slice: [u8; 1] = [*subservice]; + self.service_helper + .verif_reporter() + .completion_failure( + self.service_helper.tm_sender(), token, - FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data), + FailParams::new( + time_stamp, + &tmtc_err::INVALID_PUS_SUBSERVICE, + &subservice_slice, + ), ) - .expect("Sending start failure failed"); + .expect("Sending completion failure failed"); } - GenericRoutingError::NotEnoughAppData { expected, found } => { - let mut context_info = (found as u32).to_be_bytes().to_vec(); - context_info.extend_from_slice(&(expected as u32).to_be_bytes()); - verif_reporter - .start_failure( + GenericConversionError::NotEnoughAppData { expected, found } => { + let mut context_info = (*found as u32).to_be_bytes().to_vec(); + context_info.extend_from_slice(&(*expected as u32).to_be_bytes()); + self.service_helper + .verif_reporter() + .completion_failure( + self.service_helper.tm_sender(), token, FailParams::new(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA, &context_info), ) - .expect("Sending start failure failed"); + .expect("Sending completion failure failed"); + } + // Do nothing.. this is service-level and can not be handled generically here. + GenericConversionError::InvalidAppData(_) => (), + } + } + + pub fn poll_and_check_next_reply( + &mut self, + time_stamp: &[u8], + ) -> Result { + match self.reply_receiver.try_recv() { + Ok(reply) => { + self.handle_reply(&reply, time_stamp)?; + Ok(HandlingStatus::HandledOne) + } + Err(e) => match e { + mpsc::TryRecvError::Empty => Ok(HandlingStatus::Empty), + mpsc::TryRecvError::Disconnected => Err(EcssTmtcError::Receive( + GenericReceiveError::TxDisconnected(None), + )), + }, + } + } + + pub fn handle_reply( + &mut self, + reply: &GenericMessage, + time_stamp: &[u8], + ) -> Result<(), EcssTmtcError> { + let active_req_opt = self.active_request_map.get(reply.request_id()); + if active_req_opt.is_none() { + self.reply_handler + .handle_unrequested_reply(reply, &self.service_helper.common.tm_sender)?; + return Ok(()); + } + let active_request = active_req_opt.unwrap(); + let request_finished = self + .reply_handler + .handle_reply( + reply, + active_request, + &self.service_helper.common.tm_sender, + &self.service_helper.common.verif_reporter, + time_stamp, + ) + .unwrap_or(false); + if request_finished { + self.active_request_map.remove(reply.request_id()); + } + Ok(()) + } + + pub fn check_for_request_timeouts(&mut self) { + let mut requests_to_delete = Vec::new(); + self.active_request_map + .for_each(|request_id, request_info| { + if request_info.has_timed_out() { + requests_to_delete.push(*request_id); + } + }); + if !requests_to_delete.is_empty() { + for request_id in requests_to_delete { + self.active_request_map.remove(request_id); } } } } + +/// Generic timeout handling: Handle the verification failure with a dedicated return code +/// and also log the error. +pub fn generic_pus_request_timeout_handler( + sender: &(impl EcssTmSenderCore + ?Sized), + active_request: &(impl ActiveRequestProvider + Debug), + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + service_str: &'static str, +) -> Result<(), EcssTmtcError> { + log::warn!("timeout for active request {active_request:?} on {service_str} service"); + let started_token: VerificationToken = active_request + .token() + .try_into() + .expect("token not in expected started state"); + verification_handler.completion_failure( + sender, + started_token, + FailParams::new(time_stamp, &tmtc_err::REQUEST_TIMEOUT, &[]), + )?; + Ok(()) +} + +#[cfg(test)] +pub(crate) mod tests { + use std::time::Duration; + + use satrs::pus::test_util::TEST_COMPONENT_ID_0; + use satrs::pus::{MpscTmAsVecSender, PusTmAsVec, PusTmVariant}; + use satrs::request::RequestId; + use satrs::{ + pus::{ + verification::test_util::TestVerificationReporter, ActivePusRequestStd, + ActiveRequestMapProvider, EcssTcInVecConverter, MpscTcReceiver, + }, + request::UniqueApidTargetId, + spacepackets::{ + ecss::{ + tc::{PusTcCreator, PusTcSecondaryHeader}, + WritablePusPacket, + }, + SpHeader, + }, + }; + + use crate::requests::CompositeRequest; + + use super::*; + + // Testbench dedicated to the testing of [PusReplyHandler]s + pub struct ReplyHandlerTestbench< + ReplyHandler: PusReplyHandler, + ActiveRequestInfo: ActiveRequestProvider, + Reply, + > { + pub id: ComponentId, + pub verif_reporter: TestVerificationReporter, + pub reply_handler: ReplyHandler, + pub tm_receiver: mpsc::Receiver, + pub default_timeout: Duration, + tm_sender: MpscTmAsVecSender, + phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>, + } + + impl< + ReplyHandler: PusReplyHandler, + ActiveRequestInfo: ActiveRequestProvider, + Reply, + > ReplyHandlerTestbench + { + pub fn new(owner_id: ComponentId, reply_handler: ReplyHandler) -> Self { + let test_verif_reporter = TestVerificationReporter::new(owner_id); + let (tm_sender, tm_receiver) = mpsc::channel(); + Self { + id: TEST_COMPONENT_ID_0.raw(), + verif_reporter: test_verif_reporter, + reply_handler, + default_timeout: Duration::from_secs(30), + tm_sender, + tm_receiver, + phantom: std::marker::PhantomData, + } + } + + pub fn add_tc( + &mut self, + apid: u16, + apid_target: u32, + time_stamp: &[u8], + ) -> (verification::RequestId, ActivePusRequestStd) { + let sp_header = SpHeader::new_from_apid(apid); + let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0); + let init = self.verif_reporter.add_tc(&PusTcCreator::new( + sp_header, + sec_header_dummy, + &[], + true, + )); + let accepted = self + .verif_reporter + .acceptance_success(&self.tm_sender, init, time_stamp) + .expect("acceptance failed"); + let started = self + .verif_reporter + .start_success(&self.tm_sender, accepted, time_stamp) + .expect("start failed"); + ( + started.request_id(), + ActivePusRequestStd::new( + UniqueApidTargetId::new(apid, apid_target).raw(), + started, + self.default_timeout, + ), + ) + } + + pub fn handle_reply( + &mut self, + reply: &GenericMessage, + active_request: &ActiveRequestInfo, + time_stamp: &[u8], + ) -> Result { + self.reply_handler.handle_reply( + reply, + active_request, + &self.tm_sender, + &self.verif_reporter, + time_stamp, + ) + } + + pub fn handle_unrequested_reply( + &mut self, + reply: &GenericMessage, + ) -> Result<(), ReplyHandler::Error> { + self.reply_handler + .handle_unrequested_reply(reply, &self.tm_sender) + } + pub fn handle_request_timeout( + &mut self, + active_request_info: &ActiveRequestInfo, + time_stamp: &[u8], + ) -> Result<(), ReplyHandler::Error> { + self.reply_handler.handle_request_timeout( + active_request_info, + &self.tm_sender, + &self.verif_reporter, + time_stamp, + ) + } + } + + #[derive(Default)] + pub struct DummySender {} + + /// Dummy sender component which does nothing on the [Self::send_tm] call. + /// + /// Useful for unit tests. + impl EcssTmSenderCore for DummySender { + fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> { + Ok(()) + } + } + + // Testbench dedicated to the testing of [PusTcToRequestConverter]s + pub struct PusConverterTestbench< + Converter: PusTcToRequestConverter, + ActiveRequestInfo: ActiveRequestProvider, + Request, + > { + pub id: ComponentId, + pub verif_reporter: TestVerificationReporter, + pub converter: Converter, + dummy_sender: DummySender, + current_request_id: Option, + current_packet: Option>, + phantom: std::marker::PhantomData<(ActiveRequestInfo, Request)>, + } + + impl< + Converter: PusTcToRequestConverter, + ActiveRequestInfo: ActiveRequestProvider, + Request, + > PusConverterTestbench + { + pub fn new(owner_id: ComponentId, converter: Converter) -> Self { + let test_verif_reporter = TestVerificationReporter::new(owner_id); + Self { + id: owner_id, + verif_reporter: test_verif_reporter, + converter, + dummy_sender: DummySender::default(), + current_request_id: None, + current_packet: None, + phantom: std::marker::PhantomData, + } + } + + pub fn add_tc(&mut self, tc: &PusTcCreator) -> VerificationToken { + let token = self.verif_reporter.add_tc(tc); + self.current_request_id = Some(verification::RequestId::new(tc)); + self.current_packet = Some(tc.to_vec().unwrap()); + self.verif_reporter + .acceptance_success(&self.dummy_sender, token, &[]) + .expect("acceptance failed") + } + + pub fn request_id(&self) -> Option { + self.current_request_id + } + + pub fn convert( + &mut self, + token: VerificationToken, + time_stamp: &[u8], + expected_apid: u16, + expected_apid_target: u32, + ) -> Result<(ActiveRequestInfo, Request), Converter::Error> { + if self.current_packet.is_none() { + return Err(GenericConversionError::InvalidAppData( + "call add_tc first".to_string(), + )); + } + let current_packet = self.current_packet.take().unwrap(); + let tc_reader = PusTcReader::new(¤t_packet).unwrap(); + let (active_info, request) = self.converter.convert( + token, + &tc_reader.0, + &self.dummy_sender, + &self.verif_reporter, + time_stamp, + )?; + assert_eq!( + active_info.token().request_id(), + self.request_id().expect("no request id is set") + ); + assert_eq!( + active_info.target_id(), + UniqueApidTargetId::new(expected_apid, expected_apid_target).raw() + ); + Ok((active_info, request)) + } + } + + pub struct TargetedPusRequestTestbench< + RequestConverter: PusTcToRequestConverter, + ReplyHandler: PusReplyHandler, + ActiveRequestMap: ActiveRequestMapProvider, + ActiveRequestInfo: ActiveRequestProvider, + RequestType, + ReplyType, + > { + pub service: PusTargetedRequestService< + MpscTcReceiver, + MpscTmAsVecSender, + EcssTcInVecConverter, + TestVerificationReporter, + RequestConverter, + ReplyHandler, + ActiveRequestMap, + ActiveRequestInfo, + RequestType, + ReplyType, + >, + pub request_id: Option, + pub tm_funnel_rx: mpsc::Receiver, + pub pus_packet_tx: mpsc::Sender, + pub reply_tx: mpsc::Sender>, + pub request_rx: mpsc::Receiver>, + } +} diff --git a/satrs-example/src/pus/mode.rs b/satrs-example/src/pus/mode.rs new file mode 100644 index 0000000..4f2ff13 --- /dev/null +++ b/satrs-example/src/pus/mode.rs @@ -0,0 +1,434 @@ +use derive_new::new; +use log::{error, warn}; +use std::sync::mpsc; +use std::time::Duration; + +use crate::requests::GenericRequestRouter; +use satrs::pool::SharedStaticMemoryPool; +use satrs::pus::verification::VerificationReporter; +use satrs::pus::{ + DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, + EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, + PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, +}; +use satrs::request::GenericMessage; +use satrs::{ + mode::{ModeAndSubmode, ModeReply, ModeRequest}, + pus::{ + mode::Subservice, + verification::{ + self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider, + VerificationToken, + }, + ActivePusRequestStd, ActiveRequestProvider, EcssTmSenderCore, EcssTmtcError, + GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant, + }, + request::UniqueApidTargetId, + spacepackets::{ + ecss::{ + tc::PusTcReader, + tm::{PusTmCreator, PusTmSecondaryHeader}, + PusPacket, + }, + SpHeader, + }, + ComponentId, +}; +use satrs_example::config::components::PUS_MODE_SERVICE; +use satrs_example::config::{mode_err, tmtc_err}; + +use super::{ + create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus, + PusTargetedRequestService, TargetedPusService, +}; + +#[derive(new)] +pub struct ModeReplyHandler { + owner_id: ComponentId, +} + +impl PusReplyHandler for ModeReplyHandler { + type Error = EcssTmtcError; + + fn handle_unrequested_reply( + &mut self, + reply: &GenericMessage, + _tm_sender: &impl EcssTmSenderCore, + ) -> Result<(), Self::Error> { + log::warn!("received unexpected reply for mode service 5: {reply:?}"); + Ok(()) + } + + fn handle_reply( + &mut self, + reply: &GenericMessage, + active_request: &ActivePusRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result { + let started_token: VerificationToken = active_request + .token() + .try_into() + .expect("invalid token state"); + match reply.message { + ModeReply::ModeReply(mode_reply) => { + let mut source_data: [u8; 12] = [0; 12]; + mode_reply + .write_to_be_bytes(&mut source_data) + .expect("writing mode reply failed"); + let req_id = verification::RequestId::from(reply.request_id()); + let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), 0, 0); + let sec_header = + PusTmSecondaryHeader::new(200, Subservice::TmModeReply as u8, 0, 0, time_stamp); + let pus_tm = PusTmCreator::new(sp_header, sec_header, &source_data, true); + tm_sender.send_tm(self.owner_id, PusTmVariant::Direct(pus_tm))?; + verification_handler.completion_success(tm_sender, started_token, time_stamp)?; + } + ModeReply::CantReachMode(error_code) => { + verification_handler.completion_failure( + tm_sender, + started_token, + FailParams::new(time_stamp, &error_code, &[]), + )?; + } + ModeReply::WrongMode { expected, reached } => { + let mut error_info: [u8; 24] = [0; 24]; + let mut written_len = expected + .write_to_be_bytes(&mut error_info[0..ModeAndSubmode::RAW_LEN]) + .expect("writing expected mode failed"); + written_len += reached + .write_to_be_bytes(&mut error_info[ModeAndSubmode::RAW_LEN..]) + .expect("writing reached mode failed"); + verification_handler.completion_failure( + tm_sender, + started_token, + FailParams::new( + time_stamp, + &mode_err::WRONG_MODE, + &error_info[..written_len], + ), + )?; + } + }; + Ok(true) + } + + fn handle_request_timeout( + &mut self, + active_request: &ActivePusRequestStd, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(), Self::Error> { + generic_pus_request_timeout_handler( + tm_sender, + active_request, + verification_handler, + time_stamp, + "HK", + )?; + Ok(()) + } +} + +#[derive(Default)] +pub struct ModeRequestConverter {} + +impl PusTcToRequestConverter for ModeRequestConverter { + type Error = GenericConversionError; + + fn convert( + &mut self, + token: VerificationToken, + tc: &PusTcReader, + tm_sender: &(impl EcssTmSenderCore + ?Sized), + verif_reporter: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> { + let subservice = tc.subservice(); + let user_data = tc.user_data(); + let not_enough_app_data = |expected: usize| { + verif_reporter + .start_failure( + tm_sender, + token, + FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA), + ) + .expect("Sending start failure failed"); + Err(GenericConversionError::NotEnoughAppData { + expected, + found: user_data.len(), + }) + }; + if user_data.len() < core::mem::size_of::() { + return not_enough_app_data(4); + } + let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap(); + let active_request = + ActivePusRequestStd::new(target_id_and_apid.into(), token, Duration::from_secs(30)); + let subservice_typed = Subservice::try_from(subservice); + let invalid_subservice = || { + // Invalid subservice + verif_reporter + .start_failure( + tm_sender, + token, + FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE), + ) + .expect("Sending start failure failed"); + Err(GenericConversionError::InvalidSubservice(subservice)) + }; + if subservice_typed.is_err() { + return invalid_subservice(); + } + let subservice_typed = subservice_typed.unwrap(); + match subservice_typed { + Subservice::TcSetMode => { + if user_data.len() < core::mem::size_of::() + ModeAndSubmode::RAW_LEN { + return not_enough_app_data(4 + ModeAndSubmode::RAW_LEN); + } + let mode_and_submode = ModeAndSubmode::from_be_bytes(&tc.user_data()[4..]) + .expect("mode and submode extraction failed"); + Ok((active_request, ModeRequest::SetMode(mode_and_submode))) + } + Subservice::TcReadMode => Ok((active_request, ModeRequest::ReadMode)), + Subservice::TcAnnounceMode => Ok((active_request, ModeRequest::AnnounceMode)), + Subservice::TcAnnounceModeRecursive => { + Ok((active_request, ModeRequest::AnnounceModeRecursive)) + } + _ => invalid_subservice(), + } + } +} + +pub fn create_mode_service_static( + tm_sender: TmInSharedPoolSender>, + tc_pool: SharedStaticMemoryPool, + pus_action_rx: mpsc::Receiver, + mode_router: GenericRequestRouter, + reply_receiver: mpsc::Receiver>, +) -> ModeServiceWrapper { + let mode_request_handler = PusTargetedRequestService::new( + PusServiceHelper::new( + PUS_MODE_SERVICE.id(), + pus_action_rx, + tm_sender, + create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid), + EcssTcInSharedStoreConverter::new(tc_pool, 2048), + ), + ModeRequestConverter::default(), + DefaultActiveRequestMap::default(), + ModeReplyHandler::new(PUS_MODE_SERVICE.id()), + mode_router, + reply_receiver, + ); + ModeServiceWrapper { + service: mode_request_handler, + } +} + +pub fn create_mode_service_dynamic( + tm_funnel_tx: mpsc::Sender, + pus_action_rx: mpsc::Receiver, + mode_router: GenericRequestRouter, + reply_receiver: mpsc::Receiver>, +) -> ModeServiceWrapper { + let mode_request_handler = PusTargetedRequestService::new( + PusServiceHelper::new( + PUS_MODE_SERVICE.id(), + pus_action_rx, + tm_funnel_tx, + create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid), + EcssTcInVecConverter::default(), + ), + ModeRequestConverter::default(), + DefaultActiveRequestMap::default(), + ModeReplyHandler::new(PUS_MODE_SERVICE.id()), + mode_router, + reply_receiver, + ); + ModeServiceWrapper { + service: mode_request_handler, + } +} + +pub struct ModeServiceWrapper { + pub(crate) service: PusTargetedRequestService< + MpscTcReceiver, + TmSender, + TcInMemConverter, + VerificationReporter, + ModeRequestConverter, + ModeReplyHandler, + DefaultActiveRequestMap, + ActivePusRequestStd, + ModeRequest, + ModeReply, + >, +} + +impl TargetedPusService + for ModeServiceWrapper +{ + /// Returns [true] if the packet handling is finished. + fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self.service.poll_and_handle_next_tc(time_stamp) { + Ok(result) => match result { + PusPacketHandlerResult::RequestHandled => {} + PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { + warn!("PUS mode service: partial packet handling success: {e:?}") + } + PusPacketHandlerResult::CustomSubservice(invalid, _) => { + warn!("PUS mode service: invalid subservice {invalid}"); + } + PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { + warn!("PUS mode service: {subservice} not implemented"); + } + PusPacketHandlerResult::Empty => { + return true; + } + }, + Err(error) => { + error!("PUS mode service: packet handling error: {error:?}") + } + } + false + } + + fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus { + self.service + .poll_and_check_next_reply(time_stamp) + .unwrap_or_else(|e| { + warn!("PUS action service: Handling reply failed with error {e:?}"); + HandlingStatus::HandledOne + }) + } + + fn check_for_request_timeouts(&mut self) { + self.service.check_for_request_timeouts(); + } +} +#[cfg(test)] +mod tests { + use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0}; + use satrs::request::MessageMetadata; + use satrs::{ + mode::{ModeAndSubmode, ModeReply, ModeRequest}, + pus::mode::Subservice, + request::GenericMessage, + spacepackets::{ + ecss::tc::{PusTcCreator, PusTcSecondaryHeader}, + SpHeader, + }, + }; + use satrs_example::config::tmtc_err; + + use crate::pus::{ + mode::ModeReplyHandler, + tests::{PusConverterTestbench, ReplyHandlerTestbench}, + }; + + use super::ModeRequestConverter; + + #[test] + fn mode_converter_read_mode_request() { + let mut testbench = + PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default()); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); + let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8); + let mut app_data: [u8; 4] = [0; 4]; + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes()); + let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&tc); + let (_active_req, req) = testbench + .convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0) + .expect("conversion has failed"); + assert_eq!(req, ModeRequest::ReadMode); + } + + #[test] + fn mode_converter_set_mode_request() { + let mut testbench = + PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default()); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); + let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8); + let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN]; + let mode_and_submode = ModeAndSubmode::new(2, 1); + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes()); + mode_and_submode + .write_to_be_bytes(&mut app_data[4..]) + .unwrap(); + let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&tc); + let (_active_req, req) = testbench + .convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0) + .expect("conversion has failed"); + assert_eq!(req, ModeRequest::SetMode(mode_and_submode)); + } + + #[test] + fn mode_converter_announce_mode() { + let mut testbench = + PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default()); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); + let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8); + let mut app_data: [u8; 4] = [0; 4]; + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes()); + let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&tc); + let (_active_req, req) = testbench + .convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0) + .expect("conversion has failed"); + assert_eq!(req, ModeRequest::AnnounceMode); + } + + #[test] + fn mode_converter_announce_mode_recursively() { + let mut testbench = + PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default()); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); + let sec_header = + PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8); + let mut app_data: [u8; 4] = [0; 4]; + app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes()); + let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true); + let token = testbench.add_tc(&tc); + let (_active_req, req) = testbench + .convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0) + .expect("conversion has failed"); + assert_eq!(req, ModeRequest::AnnounceModeRecursive); + } + + #[test] + fn reply_handling_unrequested_reply() { + let mut testbench = ReplyHandlerTestbench::new( + TEST_COMPONENT_ID_0.id(), + ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()), + ); + let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1)); + let unrequested_reply = + GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply); + // Right now this function does not do a lot. We simply check that it does not panic or do + // weird stuff. + let result = testbench.handle_unrequested_reply(&unrequested_reply); + assert!(result.is_ok()); + } + + #[test] + fn reply_handling_reply_timeout() { + let mut testbench = ReplyHandlerTestbench::new( + TEST_COMPONENT_ID_0.id(), + ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()), + ); + let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]); + let result = testbench.handle_request_timeout(&active_request, &[]); + assert!(result.is_ok()); + testbench.verif_reporter.assert_completion_failure( + TEST_COMPONENT_ID_0.raw(), + req_id, + None, + tmtc_err::REQUEST_TIMEOUT.raw() as u64, + ); + } +} diff --git a/satrs-example/src/pus/scheduler.rs b/satrs-example/src/pus/scheduler.rs index c5d2c06..d75c666 100644 --- a/satrs-example/src/pus/scheduler.rs +++ b/satrs-example/src/pus/scheduler.rs @@ -1,23 +1,18 @@ use std::sync::mpsc; use std::time::Duration; +use crate::pus::create_verification_reporter; use log::{error, info, warn}; -use satrs::pool::{PoolProvider, StaticMemoryPool, StoreAddr}; +use satrs::pool::{PoolProvider, StaticMemoryPool}; use satrs::pus::scheduler::{PusScheduler, TcInfo}; -use satrs::pus::scheduler_srv::PusService11SchedHandler; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, -}; -use satrs::pus::verification::VerificationReportingProvider; +use satrs::pus::scheduler_srv::PusSchedServiceHandler; +use satrs::pus::verification::VerificationReporter; use satrs::pus::{ EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, - EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper, - TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithId, + EcssTmSenderCore, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, + PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::ChannelId; -use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID}; +use satrs_example::config::components::PUS_SCHED_SERVICE; use crate::tmtc::PusTcSourceProviderSharedPool; @@ -55,14 +50,12 @@ impl TcReleaser for mpsc::Sender> { } } -pub struct Pus11Wrapper< - TcReceiver: EcssTcReceiverCore, +pub struct SchedulingServiceWrapper< TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, > { - pub pus_11_handler: PusService11SchedHandler< - TcReceiver, + pub pus_11_handler: PusSchedServiceHandler< + MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter, @@ -73,12 +66,8 @@ pub struct Pus11Wrapper< pub tc_releaser: Box, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Pus11Wrapper +impl + SchedulingServiceWrapper { pub fn release_tcs(&mut self) { let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool { @@ -103,8 +92,11 @@ impl< } } - pub fn handle_next_packet(&mut self) -> bool { - match self.pus_11_handler.handle_one_tc(&mut self.sched_tc_pool) { + pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { + match self + .pus_11_handler + .poll_and_handle_next_tc(time_stamp, &mut self.sched_tc_pool) + { Ok(result) => match result { PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { @@ -129,42 +121,24 @@ impl< } pub fn create_scheduler_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_releaser: PusTcSourceProviderSharedPool, pus_sched_rx: mpsc::Receiver, sched_tc_pool: StaticMemoryPool, -) -> Pus11Wrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let sched_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusSched as ChannelId, - "PUS_11_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let sched_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusSched as ChannelId, - "PUS_11_TC_RECV", - pus_sched_rx, - ); +) -> SchedulingServiceWrapper { let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) .expect("Creating PUS Scheduler failed"); - let pus_11_handler = PusService11SchedHandler::new( + let pus_11_handler = PusSchedServiceHandler::new( PusServiceHelper::new( - sched_srv_receiver, - sched_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_SCHED_SERVICE.id(), + pus_sched_rx, + tm_sender, + create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid), EcssTcInSharedStoreConverter::new(tc_releaser.clone_backing_pool(), 2048), ), scheduler, ); - Pus11Wrapper { + SchedulingServiceWrapper { pus_11_handler, sched_tc_pool, releaser_buf: [0; 4096], @@ -173,40 +147,26 @@ pub fn create_scheduler_service_static( } pub fn create_scheduler_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, + tm_funnel_tx: mpsc::Sender, tc_source_sender: mpsc::Sender>, pus_sched_rx: mpsc::Receiver, sched_tc_pool: StaticMemoryPool, -) -> Pus11Wrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let sched_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusSched as ChannelId, - "PUS_11_TM_SENDER", - tm_funnel_tx, - ); - let sched_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusSched as ChannelId, - "PUS_11_TC_RECV", - pus_sched_rx, - ); +) -> SchedulingServiceWrapper { + //let sched_srv_receiver = + //MpscTcReceiver::new(PUS_SCHED_SERVICE.raw(), "PUS_11_TC_RECV", pus_sched_rx); let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) .expect("Creating PUS Scheduler failed"); - let pus_11_handler = PusService11SchedHandler::new( + let pus_11_handler = PusSchedServiceHandler::new( PusServiceHelper::new( - sched_srv_receiver, - sched_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_SCHED_SERVICE.id(), + pus_sched_rx, + tm_funnel_tx, + create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid), EcssTcInVecConverter::default(), ), scheduler, ); - Pus11Wrapper { + SchedulingServiceWrapper { pus_11_handler, sched_tc_pool, releaser_buf: [0; 4096], diff --git a/satrs-example/src/pus/stack.rs b/satrs-example/src/pus/stack.rs index ed06e08..a11463c 100644 --- a/satrs-example/src/pus/stack.rs +++ b/satrs-example/src/pus/stack.rs @@ -1,69 +1,71 @@ -use satrs::pus::{ - verification::VerificationReportingProvider, EcssTcInMemConverter, EcssTcReceiverCore, - EcssTmSenderCore, +use crate::pus::mode::ModeServiceWrapper; +use derive_new::new; +use satrs::{ + pus::{EcssTcInMemConverter, EcssTmSenderCore}, + spacepackets::time::{cds, TimeWriter}, }; use super::{ - action::Pus8Wrapper, event::Pus5Wrapper, hk::Pus3Wrapper, scheduler::Pus11Wrapper, - test::Service17CustomWrapper, + action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper, + scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, HandlingStatus, + TargetedPusService, }; -pub struct PusStack< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, -> { - event_srv: Pus5Wrapper, - hk_srv: Pus3Wrapper, - action_srv: Pus8Wrapper, - schedule_srv: Pus11Wrapper, - test_srv: Service17CustomWrapper, +#[derive(new)] +pub struct PusStack { + test_srv: TestCustomServiceWrapper, + hk_srv_wrapper: HkServiceWrapper, + event_srv: EventServiceWrapper, + action_srv_wrapper: ActionServiceWrapper, + schedule_srv: SchedulingServiceWrapper, + mode_srv: ModeServiceWrapper, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > PusStack +impl + PusStack { - pub fn new( - hk_srv: Pus3Wrapper, - event_srv: Pus5Wrapper, - action_srv: Pus8Wrapper, - schedule_srv: Pus11Wrapper, - test_srv: Service17CustomWrapper< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - >, - ) -> Self { - Self { - event_srv, - action_srv, - schedule_srv, - test_srv, - hk_srv, - } - } - pub fn periodic_operation(&mut self) { + // Release all telecommands which reached their release time before calling the service + // handlers. self.schedule_srv.release_tcs(); + let time_stamp = cds::CdsTime::now_with_u16_days() + .expect("time stamp generation error") + .to_vec() + .unwrap(); loop { - let mut all_queues_empty = true; - let mut is_srv_finished = |srv_handler_finished: bool| { - if !srv_handler_finished { - all_queues_empty = false; - } - }; - is_srv_finished(self.test_srv.handle_next_packet()); - is_srv_finished(self.schedule_srv.handle_next_packet()); - is_srv_finished(self.event_srv.handle_next_packet()); - is_srv_finished(self.action_srv.handle_next_packet()); - is_srv_finished(self.hk_srv.handle_next_packet()); - if all_queues_empty { + let mut nothing_to_do = true; + let mut is_srv_finished = + |tc_handling_done: bool, reply_handling_done: Option| { + if !tc_handling_done + || (reply_handling_done.is_some() + && reply_handling_done.unwrap() == HandlingStatus::Empty) + { + nothing_to_do = false; + } + }; + is_srv_finished(self.test_srv.poll_and_handle_next_packet(&time_stamp), None); + is_srv_finished(self.schedule_srv.poll_and_handle_next_tc(&time_stamp), None); + is_srv_finished(self.event_srv.poll_and_handle_next_tc(&time_stamp), None); + is_srv_finished( + self.action_srv_wrapper.poll_and_handle_next_tc(&time_stamp), + Some( + self.action_srv_wrapper + .poll_and_handle_next_reply(&time_stamp), + ), + ); + is_srv_finished( + self.hk_srv_wrapper.poll_and_handle_next_tc(&time_stamp), + Some(self.hk_srv_wrapper.poll_and_handle_next_reply(&time_stamp)), + ); + is_srv_finished( + self.mode_srv.poll_and_handle_next_tc(&time_stamp), + Some(self.mode_srv.poll_and_handle_next_reply(&time_stamp)), + ); + if nothing_to_do { + // Timeout checking is only done once. + self.action_srv_wrapper.check_for_request_timeouts(); + self.hk_srv_wrapper.check_for_request_timeouts(); + self.mode_srv.check_for_request_timeouts(); break; } } diff --git a/satrs-example/src/pus/test.rs b/satrs-example/src/pus/test.rs index 4b0164b..0111026 100644 --- a/satrs-example/src/pus/test.rs +++ b/satrs-example/src/pus/test.rs @@ -1,118 +1,74 @@ +use crate::pus::create_verification_reporter; use log::{info, warn}; -use satrs::params::Params; -use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; +use satrs::event_man::{EventMessage, EventMessageU32}; +use satrs::pool::SharedStaticMemoryPool; use satrs::pus::test::PusService17TestHandler; -use satrs::pus::verification::{FailParams, VerificationReportingProvider}; -use satrs::pus::verification::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, -}; +use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider}; +use satrs::pus::EcssTcInSharedStoreConverter; use satrs::pus::{ - EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTcReceiverCore, - EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper, - TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithId, + EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTmSenderCore, MpscTcReceiver, + MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusServiceHelper, + PusTmAsVec, PusTmInPool, TmInSharedPoolSender, }; use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::PusPacket; use satrs::spacepackets::time::cds::CdsTime; use satrs::spacepackets::time::TimeWriter; -use satrs::tmtc::tm_helper::SharedTmPool; -use satrs::ChannelId; -use satrs::{events::EventU32, pus::EcssTcInSharedStoreConverter}; -use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID, TEST_EVENT}; -use std::sync::mpsc::{self, Sender}; +use satrs_example::config::components::PUS_TEST_SERVICE; +use satrs_example::config::{tmtc_err, TEST_EVENT}; +use std::sync::mpsc; pub fn create_test_service_static( - shared_tm_store: SharedTmPool, - tm_funnel_tx: mpsc::SyncSender, - verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_sender: TmInSharedPoolSender>, tc_pool: SharedStaticMemoryPool, - event_sender: mpsc::Sender<(EventU32, Option)>, + event_sender: mpsc::Sender, pus_test_rx: mpsc::Receiver, -) -> Service17CustomWrapper< - MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, - EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, -> { - let test_srv_tm_sender = TmInSharedPoolSenderWithId::new( - TmSenderId::PusTest as ChannelId, - "PUS_17_TM_SENDER", - shared_tm_store.clone(), - tm_funnel_tx.clone(), - ); - let test_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusTest as ChannelId, - "PUS_17_TC_RECV", - pus_test_rx, - ); +) -> TestCustomServiceWrapper { let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( - test_srv_receiver, - test_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_TEST_SERVICE.id(), + pus_test_rx, + tm_sender, + create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid), EcssTcInSharedStoreConverter::new(tc_pool, 2048), )); - Service17CustomWrapper { - pus17_handler, + TestCustomServiceWrapper { + handler: pus17_handler, test_srv_event_sender: event_sender, } } pub fn create_test_service_dynamic( - tm_funnel_tx: mpsc::Sender>, - verif_reporter: VerificationReporterWithVecMpscSender, - event_sender: mpsc::Sender<(EventU32, Option)>, + tm_funnel_tx: mpsc::Sender, + event_sender: mpsc::Sender, pus_test_rx: mpsc::Receiver, -) -> Service17CustomWrapper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, -> { - let test_srv_tm_sender = TmAsVecSenderWithId::new( - TmSenderId::PusTest as ChannelId, - "PUS_17_TM_SENDER", - tm_funnel_tx.clone(), - ); - let test_srv_receiver = MpscTcReceiver::new( - TcReceiverId::PusTest as ChannelId, - "PUS_17_TC_RECV", - pus_test_rx, - ); +) -> TestCustomServiceWrapper { let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( - test_srv_receiver, - test_srv_tm_sender, - PUS_APID, - verif_reporter.clone(), + PUS_TEST_SERVICE.id(), + pus_test_rx, + tm_funnel_tx, + create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid), EcssTcInVecConverter::default(), )); - Service17CustomWrapper { - pus17_handler, + TestCustomServiceWrapper { + handler: pus17_handler, test_srv_event_sender: event_sender, } } -pub struct Service17CustomWrapper< - TcReceiver: EcssTcReceiverCore, +pub struct TestCustomServiceWrapper< TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, > { - pub pus17_handler: - PusService17TestHandler, - pub test_srv_event_sender: Sender<(EventU32, Option)>, + pub handler: + PusService17TestHandler, + pub test_srv_event_sender: mpsc::Sender, } -impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - > Service17CustomWrapper +impl + TestCustomServiceWrapper { - pub fn handle_next_packet(&mut self) -> bool { - let res = self.pus17_handler.handle_one_tc(); + pub fn poll_and_handle_next_packet(&mut self, time_stamp: &[u8]) -> bool { + let res = self.handler.poll_and_handle_next_tc(time_stamp); if res.is_err() { warn!("PUS17 handler failed with error {:?}", res.unwrap_err()); return true; @@ -133,7 +89,7 @@ impl< } PusPacketHandlerResult::CustomSubservice(subservice, token) => { let (tc, _) = PusTcReader::new( - self.pus17_handler + self.handler .service_helper .tc_in_mem_converter .tc_slice_raw(), @@ -145,28 +101,30 @@ impl< if subservice == 128 { info!("Generating test event"); self.test_srv_event_sender - .send((TEST_EVENT.into(), None)) + .send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into())) .expect("Sending test event failed"); let start_token = self - .pus17_handler + .handler .service_helper - .common - .verification_handler - .start_success(token, &stamp_buf) + .verif_reporter() + .start_success(self.handler.service_helper.tm_sender(), token, &stamp_buf) .expect("Error sending start success"); - self.pus17_handler + self.handler .service_helper - .common - .verification_handler - .completion_success(start_token, &stamp_buf) + .verif_reporter() + .completion_success( + self.handler.service_helper.tm_sender(), + start_token, + &stamp_buf, + ) .expect("Error sending completion success"); } else { let fail_data = [tc.subservice()]; - self.pus17_handler + self.handler .service_helper - .common - .verification_handler + .verif_reporter() .start_failure( + self.handler.service_helper.tm_sender(), token, FailParams::new( &stamp_buf, diff --git a/satrs-example/src/requests.rs b/satrs-example/src/requests.rs index 6703d93..498be3f 100644 --- a/satrs-example/src/requests.rs +++ b/satrs-example/src/requests.rs @@ -1,94 +1,152 @@ use std::collections::HashMap; use std::sync::mpsc; -use derive_new::new; +use log::warn; use satrs::action::ActionRequest; use satrs::hk::HkRequest; use satrs::mode::ModeRequest; -use satrs::pus::action::PusActionRequestRouter; -use satrs::pus::hk::PusHkRequestRouter; -use satrs::pus::verification::{TcStateAccepted, VerificationToken}; -use satrs::pus::GenericRoutingError; +use satrs::pus::verification::{ + FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken, +}; +use satrs::pus::{ActiveRequestProvider, EcssTmSenderCore, GenericRoutingError, PusRequestRouter}; use satrs::queue::GenericSendError; -use satrs::TargetId; +use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId}; +use satrs::spacepackets::ecss::tc::PusTcReader; +use satrs::spacepackets::ecss::PusPacket; +use satrs::ComponentId; +use satrs_example::config::components::PUS_ROUTING_SERVICE; +use satrs_example::config::tmtc_err; -#[allow(dead_code)] -#[derive(Clone, Eq, PartialEq, Debug)] +#[derive(Clone, Debug)] #[non_exhaustive] -pub enum Request { +pub enum CompositeRequest { Hk(HkRequest), - Mode(ModeRequest), Action(ActionRequest), } -#[derive(Clone, Eq, PartialEq, Debug, new)] -pub struct TargetedRequest { - pub(crate) target_id: TargetId, - pub(crate) request: Request, +#[derive(Clone)] +pub struct GenericRequestRouter { + pub id: ComponentId, + // All messages which do not have a dedicated queue. + pub composite_router_map: HashMap>>, + pub mode_router_map: HashMap>>, } -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct RequestWithToken { - pub(crate) targeted_request: TargetedRequest, - pub(crate) token: VerificationToken, -} - -impl RequestWithToken { - pub fn new( - target_id: TargetId, - request: Request, - token: VerificationToken, - ) -> Self { +impl Default for GenericRequestRouter { + fn default() -> Self { Self { - targeted_request: TargetedRequest::new(target_id, request), - token, + id: PUS_ROUTING_SERVICE.raw(), + composite_router_map: Default::default(), + mode_router_map: Default::default(), } } } - -#[derive(Default, Clone)] -pub struct GenericRequestRouter(pub HashMap>); - -impl PusHkRequestRouter for GenericRequestRouter { +impl GenericRequestRouter { + pub(crate) fn handle_error_generic( + &self, + active_request: &impl ActiveRequestProvider, + tc: &PusTcReader, + error: GenericRoutingError, + tm_sender: &(impl EcssTmSenderCore + ?Sized), + verif_reporter: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) { + warn!( + "Routing request for service {} failed: {error:?}", + tc.service() + ); + let accepted_token: VerificationToken = active_request + .token() + .try_into() + .expect("token is not in accepted state"); + match error { + GenericRoutingError::UnknownTargetId(id) => { + let apid_target_id = UniqueApidTargetId::from(id); + warn!("Target APID for request: {}", apid_target_id.apid); + warn!("Target Unique ID for request: {}", apid_target_id.unique_id); + let mut fail_data: [u8; 8] = [0; 8]; + fail_data.copy_from_slice(&id.to_be_bytes()); + verif_reporter + .completion_failure( + tm_sender, + accepted_token, + FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data), + ) + .expect("Sending start failure failed"); + } + GenericRoutingError::Send(_) => { + let mut fail_data: [u8; 8] = [0; 8]; + fail_data.copy_from_slice(&active_request.target_id().to_be_bytes()); + verif_reporter + .completion_failure( + tm_sender, + accepted_token, + FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data), + ) + .expect("Sending start failure failed"); + } + } + } +} +impl PusRequestRouter for GenericRequestRouter { type Error = GenericRoutingError; fn route( &self, - target_id: TargetId, + requestor_info: MessageMetadata, + target_id: ComponentId, hk_request: HkRequest, - token: VerificationToken, ) -> Result<(), Self::Error> { - if let Some(sender) = self.0.get(&target_id) { + if let Some(sender) = self.composite_router_map.get(&target_id) { sender - .send(RequestWithToken::new( - target_id, - Request::Hk(hk_request), - token, + .send(GenericMessage::new( + requestor_info, + CompositeRequest::Hk(hk_request), )) - .map_err(|_| GenericRoutingError::SendError(GenericSendError::RxDisconnected))?; + .map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?; + return Ok(()); } - Ok(()) + Err(GenericRoutingError::UnknownTargetId(target_id)) } } -impl PusActionRequestRouter for GenericRequestRouter { +impl PusRequestRouter for GenericRequestRouter { type Error = GenericRoutingError; fn route( &self, - target_id: TargetId, + requestor_info: MessageMetadata, + target_id: ComponentId, action_request: ActionRequest, - token: VerificationToken, ) -> Result<(), Self::Error> { - if let Some(sender) = self.0.get(&target_id) { + if let Some(sender) = self.composite_router_map.get(&target_id) { sender - .send(RequestWithToken::new( - target_id, - Request::Action(action_request), - token, + .send(GenericMessage::new( + requestor_info, + CompositeRequest::Action(action_request), )) - .map_err(|_| GenericRoutingError::SendError(GenericSendError::RxDisconnected))?; + .map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?; + return Ok(()); } - Ok(()) + Err(GenericRoutingError::UnknownTargetId(target_id)) + } +} + +impl PusRequestRouter for GenericRequestRouter { + type Error = GenericRoutingError; + + fn route( + &self, + requestor_info: MessageMetadata, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), Self::Error> { + if let Some(sender) = self.mode_router_map.get(&target_id) { + sender + .send(GenericMessage::new(requestor_info, request)) + .map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?; + return Ok(()); + } + Err(GenericRoutingError::UnknownTargetId(target_id)) } } diff --git a/satrs-example/src/tcp.rs b/satrs-example/src/tcp.rs index 014f300..04bb136 100644 --- a/satrs-example/src/tcp.rs +++ b/satrs-example/src/tcp.rs @@ -1,5 +1,5 @@ use std::{ - collections::VecDeque, + collections::{HashSet, VecDeque}, sync::{Arc, Mutex}, }; @@ -10,12 +10,9 @@ use satrs::{ spacepackets::PacketId, tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, TmPacketSourceCore}, }; -use satrs_example::config::PUS_APID; use crate::ccsds::CcsdsReceiver; -pub const PACKET_ID_LOOKUP: &[PacketId] = &[PacketId::const_tc(true, PUS_APID)]; - #[derive(Default, Clone)] pub struct SyncTcpTmSource { tm_queue: Arc>>>, @@ -77,6 +74,7 @@ pub type TcpServerType = TcpSpacepacketsServer< CcsdsError, SyncTcpTmSource, CcsdsDistributor, MpscErrorType>, + HashSet, >; pub struct TcpTask< @@ -103,14 +101,10 @@ impl< cfg: ServerConfig, tm_source: SyncTcpTmSource, tc_receiver: CcsdsDistributor, MpscErrorType>, + packet_id_lookup: HashSet, ) -> Result { Ok(Self { - server: TcpSpacepacketsServer::new( - cfg, - tm_source, - tc_receiver, - Box::new(PACKET_ID_LOOKUP), - )?, + server: TcpSpacepacketsServer::new(cfg, tm_source, tc_receiver, packet_id_lookup)?, }) } diff --git a/satrs-example/src/tm_funnel.rs b/satrs-example/src/tm_funnel.rs index 8b6285f..61cddd1 100644 --- a/satrs-example/src/tm_funnel.rs +++ b/satrs-example/src/tm_funnel.rs @@ -4,8 +4,9 @@ use std::{ }; use log::info; +use satrs::pus::{PusTmAsVec, PusTmInPool}; use satrs::{ - pool::{PoolProvider, StoreAddr}, + pool::PoolProvider, seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}, spacepackets::{ ecss::{tm::PusTmZeroCopyWriter, PusPacket}, @@ -77,16 +78,16 @@ impl TmFunnelCommon { pub struct TmFunnelStatic { common: TmFunnelCommon, shared_tm_store: SharedTmPool, - tm_funnel_rx: mpsc::Receiver, - tm_server_tx: mpsc::SyncSender, + tm_funnel_rx: mpsc::Receiver, + tm_server_tx: mpsc::SyncSender, } impl TmFunnelStatic { pub fn new( shared_tm_store: SharedTmPool, sync_tm_tcp_source: SyncTcpTmSource, - tm_funnel_rx: mpsc::Receiver, - tm_server_tx: mpsc::SyncSender, + tm_funnel_rx: mpsc::Receiver, + tm_server_tx: mpsc::SyncSender, ) -> Self { Self { common: TmFunnelCommon::new(sync_tm_tcp_source), @@ -97,14 +98,14 @@ impl TmFunnelStatic { } pub fn operation(&mut self) { - if let Ok(addr) = self.tm_funnel_rx.recv() { + if let Ok(pus_tm_in_pool) = self.tm_funnel_rx.recv() { // Read the TM, set sequence counter and message counter, and finally update // the CRC. let shared_pool = self.shared_tm_store.clone_backing_pool(); let mut pool_guard = shared_pool.write().expect("Locking TM pool failed"); let mut tm_copy = Vec::new(); pool_guard - .modify(&addr, |buf| { + .modify(&pus_tm_in_pool.store_addr, |buf| { let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN) .expect("Creating TM zero copy writer failed"); self.common.apply_packet_processing(zero_copy_writer); @@ -112,7 +113,7 @@ impl TmFunnelStatic { }) .expect("Reading TM from pool failed"); self.tm_server_tx - .send(addr) + .send(pus_tm_in_pool) .expect("Sending TM to server failed"); // We could also do this step in the update closure, but I'd rather avoid this, could // lead to nested locking. @@ -123,15 +124,15 @@ impl TmFunnelStatic { pub struct TmFunnelDynamic { common: TmFunnelCommon, - tm_funnel_rx: mpsc::Receiver>, - tm_server_tx: mpsc::Sender>, + tm_funnel_rx: mpsc::Receiver, + tm_server_tx: mpsc::Sender, } impl TmFunnelDynamic { pub fn new( sync_tm_tcp_source: SyncTcpTmSource, - tm_funnel_rx: mpsc::Receiver>, - tm_server_tx: mpsc::Sender>, + tm_funnel_rx: mpsc::Receiver, + tm_server_tx: mpsc::Sender, ) -> Self { Self { common: TmFunnelCommon::new(sync_tm_tcp_source), @@ -144,13 +145,13 @@ impl TmFunnelDynamic { if let Ok(mut tm) = self.tm_funnel_rx.recv() { // Read the TM, set sequence counter and message counter, and finally update // the CRC. - let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm, MIN_CDS_FIELD_LEN) + let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN) .expect("Creating TM zero copy writer failed"); self.common.apply_packet_processing(zero_copy_writer); + self.common.sync_tm_tcp_source.add_tm(&tm.packet); self.tm_server_tx - .send(tm.clone()) + .send(tm) .expect("Sending TM to server failed"); - self.common.sync_tm_tcp_source.add_tm(&tm); } } } diff --git a/satrs-example/src/tmtc.rs b/satrs-example/src/tmtc.rs index 0a43504..43d5889 100644 --- a/satrs-example/src/tmtc.rs +++ b/satrs-example/src/tmtc.rs @@ -1,8 +1,7 @@ use log::warn; -use satrs::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, +use satrs::pus::{ + EcssTcAndToken, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, ReceivesEcssPusTc, }; -use satrs::pus::{EcssTcAndToken, ReceivesEcssPusTc}; use satrs::spacepackets::SpHeader; use std::sync::mpsc::{self, Receiver, SendError, Sender, SyncSender, TryRecvError}; use thiserror::Error; @@ -100,14 +99,14 @@ pub struct TcSourceTaskStatic { shared_tc_pool: SharedTcPool, tc_receiver: Receiver, tc_buf: [u8; 4096], - pus_receiver: PusReceiver, + pus_receiver: PusReceiver, } impl TcSourceTaskStatic { pub fn new( shared_tc_pool: SharedTcPool, tc_receiver: Receiver, - pus_receiver: PusReceiver, + pus_receiver: PusReceiver, ) -> Self { Self { shared_tc_pool, @@ -164,13 +163,13 @@ impl TcSourceTaskStatic { // TC source components where the heap is the backing memory of the received telecommands. pub struct TcSourceTaskDynamic { pub tc_receiver: Receiver>, - pus_receiver: PusReceiver, + pus_receiver: PusReceiver, } impl TcSourceTaskDynamic { pub fn new( tc_receiver: Receiver>, - pus_receiver: PusReceiver, + pus_receiver: PusReceiver, ) -> Self { Self { tc_receiver, diff --git a/satrs-example/src/udp.rs b/satrs-example/src/udp.rs index b6d1f6b..2cb4823 100644 --- a/satrs-example/src/udp.rs +++ b/satrs-example/src/udp.rs @@ -1,12 +1,11 @@ -use std::{ - net::{SocketAddr, UdpSocket}, - sync::mpsc::Receiver, -}; +use std::net::{SocketAddr, UdpSocket}; +use std::sync::mpsc; use log::{info, warn}; +use satrs::pus::{PusTmAsVec, PusTmInPool}; use satrs::{ hal::std::udp_server::{ReceiveResult, UdpTcServer}, - pool::{PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr}, + pool::{PoolProviderWithGuards, SharedStaticMemoryPool}, tmtc::CcsdsError, }; @@ -15,20 +14,20 @@ pub trait UdpTmHandler { } pub struct StaticUdpTmHandler { - pub tm_rx: Receiver, + pub tm_rx: mpsc::Receiver, pub tm_store: SharedStaticMemoryPool, } impl UdpTmHandler for StaticUdpTmHandler { fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, &recv_addr: &SocketAddr) { - while let Ok(addr) = self.tm_rx.try_recv() { + while let Ok(pus_tm_in_pool) = self.tm_rx.try_recv() { let store_lock = self.tm_store.write(); if store_lock.is_err() { warn!("Locking TM store failed"); continue; } let mut store_lock = store_lock.unwrap(); - let pg = store_lock.read_with_guard(addr); + let pg = store_lock.read_with_guard(pus_tm_in_pool.store_addr); let read_res = pg.read_as_vec(); if read_res.is_err() { warn!("Error reading TM pool data"); @@ -44,20 +43,20 @@ impl UdpTmHandler for StaticUdpTmHandler { } pub struct DynamicUdpTmHandler { - pub tm_rx: Receiver>, + pub tm_rx: mpsc::Receiver, } impl UdpTmHandler for DynamicUdpTmHandler { fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr) { while let Ok(tm) = self.tm_rx.try_recv() { - if tm.len() > 9 { - let service = tm[7]; - let subservice = tm[8]; + if tm.packet.len() > 9 { + let service = tm.packet[7]; + let subservice = tm.packet[8]; info!("Sending PUS TM[{service},{subservice}]") } else { info!("Sending PUS TM"); } - let result = socket.send_to(&tm, recv_addr); + let result = socket.send_to(&tm.packet, recv_addr); if let Err(e) = result { warn!("Sending TM with UDP socket failed: {e}") } @@ -120,7 +119,7 @@ mod tests { }, tmtc::ReceivesTcCore, }; - use satrs_example::config::{OBSW_SERVER_ADDR, PUS_APID}; + use satrs_example::config::{components, OBSW_SERVER_ADDR}; use super::*; @@ -178,8 +177,8 @@ mod tests { udp_tc_server, tm_handler, }; - let mut sph = SpHeader::tc_unseg(PUS_APID, 0, 0).unwrap(); - let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true) + let sph = SpHeader::new_for_unseg_tc(components::Apid::GenericPus as u16, 0, 0); + let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true) .to_vec() .unwrap(); let client = UdpSocket::bind("127.0.0.1:0").expect("Connecting to UDP server failed"); diff --git a/satrs-mib/Cargo.toml b/satrs-mib/Cargo.toml index 9024b86..e97971d 100644 --- a/satrs-mib/Cargo.toml +++ b/satrs-mib/Cargo.toml @@ -24,7 +24,7 @@ optional = true [dependencies.satrs-shared] path = "../satrs-shared" -version = "0.1.2" +version = "0.1.3" features = ["serde"] [dependencies.satrs-mib-codegen] diff --git a/satrs-mib/codegen/Cargo.toml b/satrs-mib/codegen/Cargo.toml index a25358d..43ba785 100644 --- a/satrs-mib/codegen/Cargo.toml +++ b/satrs-mib/codegen/Cargo.toml @@ -28,7 +28,7 @@ features = ["full"] trybuild = { version = "1", features = ["diff"] } [dev-dependencies.satrs-shared] -version = "0.1.2" +version = "0.1.3" path = "../../satrs-shared" [dev-dependencies.satrs-mib] diff --git a/satrs-shared/Cargo.toml b/satrs-shared/Cargo.toml index e706d6a..a6efa36 100644 --- a/satrs-shared/Cargo.toml +++ b/satrs-shared/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "satrs-shared" description = "Components shared by multiple sat-rs crates" -version = "0.1.2" +version = "0.1.3" edition = "2021" authors = ["Robin Mueller "] homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/" @@ -19,7 +19,7 @@ optional = true [dependencies.spacepackets] git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" -version = "0.11.0-rc.0" +version = "0.11.0-rc.2" branch = "main" default-features = false diff --git a/satrs/CHANGELOG.md b/satrs/CHANGELOG.md index b0bc493..7a2e027 100644 --- a/satrs/CHANGELOG.md +++ b/satrs/CHANGELOG.md @@ -8,8 +8,28 @@ and this project adheres to [Semantic Versioning](http://semver.org/). # [unreleased] +## Added + +- Added `params::WritableToBeBytes::to_vec`. +- New `ComponentId` (`u64` typedef for now) which replaces former `TargetId` as a generic + way to identify components. +- Various abstraction and objects for targeted requests. This includes mode request/reply + types for actions, HK and modes. +- `VerificationReportingProvider::owner_id` method. +- Introduced generic `EventMessage` which is generic over the event type and the additional + parameter type. This message also contains the sender ID which can be useful for debugging + or application layer / FDIR logic. + ## Changed +- `encoding::ccsds::PacketIdValidator` renamed to `ValidatorU16Id`, which lives in the crate root. + It can be used for both CCSDS packet ID and CCSDS APID validation. +- `EventManager::try_event_handling` not expects a mutable error handling closure instead of + returning the occured errors. +- Renamed `EventManagerBase` to `EventReportCreator` +- Renamed `VerificationReporterCore` to `VerificationReportCreator`. +- Removed `VerificationReporterCore`. The high-level API exposed by `VerificationReporter` and + the low level API exposed by `VerificationReportCreator` should be sufficient for all use-cases. - Refactored `EventManager` to heavily use generics instead of trait objects. - `SendEventProvider` -> `EventSendProvider`. `id` trait method renamed to `channel_id`. - `ListenerTable` -> `ListenerMapProvider` @@ -18,16 +38,37 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Refactored ECSS TM sender abstractions to be generic over different message queue backends. - Refactored Verification Reporter abstractions and implementation to be generic over the sender instead of using trait objects. +- Renamed `WritableToBeBytes::raw_len` to `WritableToBeBytes::written_len` for consistency. - `PusServiceProvider` renamed to `PusServiceDistributor` to make the purpose of the object more clear - `PusServiceProvider::handle_pus_tc_packet` renamed to `PusServiceDistributor::distribute_packet`. - `PusServiceDistibutor` and `CcsdsDistributor` now use generics instead of trait objects. This makes accessing the concrete trait implementations more easy as well. +- Major overhaul of the PUS handling module. +- Replace `TargetId` by `ComponentId`. +- Replace most usages of `ChannelId` by `ComponentId`. A dedicated channel ID has limited usage + due to the nature of typed channels in Rust. +- `CheckTimer` renamed to `CountdownProvider`. +- Renamed `TargetId` to `ComponentId`. +- Replaced most `ChannelId` occurences with `ComponentId`. For typed channels, there is generally + no need for dedicated channel IDs. +- Changed `params::WritableToBeBytes::raw_len` to `written_len` for consistency. +- `EventReporter` caches component ID. +- Renamed `PusService11SchedHandler` to `PusSchedServiceHandler`. +- Fixed general naming of PUS handlers from `handle_one_tc` to `poll_and_handle_next_tc`. +- Reworked verification module: The sender (`impl EcssTmSenderCore`) + now needs to be passed explicitely to the `VerificationReportingProvider` abstraction. This + allows easier sharing of the TM sender component. ## Fixed - Update deprecated API for `PusScheduler::insert_wrapped_tc_cds_short` and `PusScheduler::insert_wrapped_tc_cds_long`. +- `EventReporter` uses interior mutability pattern to allow non-mutable API. + +## Removed + +- Remove `objects` module. # [v0.2.0-rc.0] 2024-02-21 diff --git a/satrs/Cargo.toml b/satrs/Cargo.toml index ad11272..976421d 100644 --- a/satrs/Cargo.toml +++ b/satrs/Cargo.toml @@ -19,7 +19,7 @@ smallvec = "1" crc = "3" [dependencies.satrs-shared] -version = "0.1.2" +version = "0.1.3" path = "../satrs-shared" [dependencies.num_enum] @@ -72,7 +72,7 @@ optional = true [dependencies.spacepackets] git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" -version = "0.11.0-rc.0" +version = "0.11.0-rc.2" branch = "main" default-features = false @@ -117,6 +117,7 @@ alloc = [ serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"] crossbeam = ["crossbeam-channel"] heapless = ["dep:heapless"] +test_util = [] doc-images = [] [package.metadata.docs.rs] diff --git a/satrs/release-checklist.md b/satrs/release-checklist.md index 349a903..e76959e 100644 --- a/satrs/release-checklist.md +++ b/satrs/release-checklist.md @@ -4,11 +4,11 @@ Checklist for new releases # Pre-Release 1. Make sure any new modules are documented sufficiently enough and check docs with - `cargo +nightly doc --all-features --config 'rustdocflags=["--cfg", "doc_cfg"]' --open`. + `cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]' --open`. 2. Bump version specifier in `Cargo.toml`. 3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new `unreleased` section. -4. Run `cargo test --all-features`. +4. Run `cargo test --all-features` or `cargo nextest r --all-features` and `cargo test --doc`. 5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`. 6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal targets. diff --git a/satrs/src/action.rs b/satrs/src/action.rs index 7caeaa6..4aea9f1 100644 --- a/satrs/src/action.rs +++ b/satrs/src/action.rs @@ -1,63 +1,68 @@ -use crate::{pool::StoreAddr, TargetId}; +use crate::{params::Params, pool::StoreAddr}; + +#[cfg(feature = "alloc")] +pub use alloc_mod::*; pub type ActionId = u32; +#[derive(Debug, Eq, PartialEq, Clone)] +pub struct ActionRequest { + pub action_id: ActionId, + pub variant: ActionRequestVariant, +} + +impl ActionRequest { + pub fn new(action_id: ActionId, variant: ActionRequestVariant) -> Self { + Self { action_id, variant } + } +} + #[non_exhaustive] #[derive(Clone, Eq, PartialEq, Debug)] -pub enum ActionRequest { - UnsignedIdAndStoreData { - action_id: ActionId, - data_addr: StoreAddr, - }, +pub enum ActionRequestVariant { + NoData, + StoreData(StoreAddr), #[cfg(feature = "alloc")] - UnsignedIdAndVecData { - action_id: ActionId, - data: alloc::vec::Vec, - }, - #[cfg(feature = "alloc")] - StringIdAndVecData { - action_id: alloc::string::String, - data: alloc::vec::Vec, - }, - #[cfg(feature = "alloc")] - StringIdAndStoreData { - action_id: alloc::string::String, - data: StoreAddr, - }, + VecData(alloc::vec::Vec), } -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TargetedActionRequest { - target: TargetId, - action_request: ActionRequest, -} - -impl TargetedActionRequest { - pub fn new(target: TargetId, action_request: ActionRequest) -> Self { - Self { - target, - action_request, - } - } +#[derive(Debug, PartialEq, Clone)] +pub struct ActionReply { + pub action_id: ActionId, + pub variant: ActionReplyVariant, } /// A reply to an action request. #[non_exhaustive] -#[derive(Clone, Eq, PartialEq, Debug)] -pub enum ActionReply { - CompletionFailed(ActionId), - StepFailed { - id: ActionId, - step: u32, - }, - Completed(ActionId), - #[cfg(feature = "alloc")] - CompletedStringId(alloc::string::String), - #[cfg(feature = "alloc")] - CompletionFailedStringId(alloc::string::String), - #[cfg(feature = "alloc")] - StepFailedStringId { - id: alloc::string::String, - step: u32, - }, +#[derive(Clone, Debug, PartialEq)] +pub enum ActionReplyVariant { + CompletionFailed(Params), + StepFailed { step: u32, reason: Params }, + Completed, } + +#[cfg(feature = "alloc")] +pub mod alloc_mod { + use super::*; + + #[derive(Debug, Eq, PartialEq, Clone)] + pub struct ActionRequestStringId { + pub action_id: alloc::string::String, + pub variant: ActionRequestVariant, + } + + impl ActionRequestStringId { + pub fn new(action_id: alloc::string::String, variant: ActionRequestVariant) -> Self { + Self { action_id, variant } + } + } + + #[derive(Debug, PartialEq, Clone)] + pub struct ActionReplyStringId { + pub action_id: alloc::string::String, + pub variant: ActionReplyVariant, + } +} + +#[cfg(test)] +mod tests {} diff --git a/satrs/src/cfdp/dest.rs b/satrs/src/cfdp/dest.rs index b42df3a..4a87ce6 100644 --- a/satrs/src/cfdp/dest.rs +++ b/satrs/src/cfdp/dest.rs @@ -5,7 +5,7 @@ use std::path::{Path, PathBuf}; use super::{ filestore::{FilestoreError, VirtualFilestore}, user::{CfdpUser, FileSegmentRecvdParams, MetadataReceivedParams}, - CheckTimer, CheckTimerCreator, EntityType, LocalEntityConfig, PacketInfo, PacketTarget, + CheckTimerCreator, CountdownProvider, EntityType, LocalEntityConfig, PacketInfo, PacketTarget, RemoteEntityConfig, RemoteEntityConfigProvider, State, TimerContext, TransactionId, TransactionStep, }; @@ -54,7 +54,7 @@ struct TransferState { completion_disposition: CompletionDisposition, checksum: u32, current_check_count: u32, - current_check_timer: Option>, + current_check_timer: Option>, } impl Default for TransferState { @@ -799,9 +799,9 @@ mod tests { }; use crate::cfdp::{ - filestore::NativeFilestore, user::OwnedMetadataRecvdParams, CheckTimer, CheckTimerCreator, - DefaultFaultHandler, IndicationConfig, RemoteEntityConfig, StdRemoteEntityConfigProvider, - UserFaultHandler, CRC_32, + filestore::NativeFilestore, user::OwnedMetadataRecvdParams, CheckTimerCreator, + CountdownProvider, DefaultFaultHandler, IndicationConfig, RemoteEntityConfig, + StdRemoteEntityConfigProvider, UserFaultHandler, CRC_32, }; use super::*; @@ -1057,7 +1057,7 @@ mod tests { expired: Arc, } - impl CheckTimer for TestCheckTimer { + impl CountdownProvider for TestCheckTimer { fn has_expired(&self) -> bool { self.expired.load(core::sync::atomic::Ordering::Relaxed) } @@ -1088,7 +1088,10 @@ mod tests { } impl CheckTimerCreator for TestCheckTimerCreator { - fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box { + fn get_check_timer_provider( + &self, + timer_context: TimerContext, + ) -> Box { match timer_context { TimerContext::CheckLimit { .. } => { Box::new(TestCheckTimer::new(self.check_limit_expired_flag.clone())) diff --git a/satrs/src/cfdp/mod.rs b/satrs/src/cfdp/mod.rs index 8c88fda..c2f6d01 100644 --- a/satrs/src/cfdp/mod.rs +++ b/satrs/src/cfdp/mod.rs @@ -17,6 +17,8 @@ use alloc::boxed::Box; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use crate::time::CountdownProvider; + #[cfg(feature = "std")] pub mod dest; #[cfg(feature = "alloc")] @@ -45,7 +47,15 @@ pub enum TimerContext { }, } -/// Generic abstraction for a check timer which is used by 3 mechanisms of the CFDP protocol. +/// A generic trait which allows CFDP entities to create check timers which are required to +/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2 +/// and 4.6.3.3. +/// +/// This trait also allows the creation of different check timers depending on context and purpose +/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or +/// other factors. +/// +/// The countdown timer is used by 3 mechanisms of the CFDP protocol. /// /// ## 1. Check limit handling /// @@ -74,22 +84,9 @@ pub enum TimerContext { /// The timer will be used to perform the Positive Acknowledgement Procedures as specified in /// 4.7. 1of the CFDP standard. The expiration period will be provided by the Positive ACK timer /// interval of the remote entity configuration. -pub trait CheckTimer: Debug { - fn has_expired(&self) -> bool; - fn reset(&mut self); -} - -/// A generic trait which allows CFDP entities to create check timers which are required to -/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2 -/// and 4.6.3.3. The [CheckTimer] documentation provides more information about the purpose of the -/// check timer in the context of CFDP. -/// -/// This trait also allows the creation of different check timers depending on context and purpose -/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or -/// other factors. #[cfg(feature = "alloc")] pub trait CheckTimerCreator { - fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box; + fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box; } /// Simple implementation of the [CheckTimerCreator] trait assuming a standard runtime. @@ -112,7 +109,7 @@ impl StdCheckTimer { } #[cfg(feature = "std")] -impl CheckTimer for StdCheckTimer { +impl CountdownProvider for StdCheckTimer { fn has_expired(&self) -> bool { let elapsed_time = self.start_time.elapsed(); if elapsed_time.as_secs() > self.expiry_time_seconds { diff --git a/satrs/src/encoding/ccsds.rs b/satrs/src/encoding/ccsds.rs index 37694d7..30adccf 100644 --- a/satrs/src/encoding/ccsds.rs +++ b/satrs/src/encoding/ccsds.rs @@ -1,65 +1,4 @@ -#[cfg(feature = "alloc")] -use alloc::vec::Vec; -#[cfg(feature = "alloc")] -use hashbrown::HashSet; -use spacepackets::PacketId; - -use crate::tmtc::ReceivesTcCore; - -pub trait PacketIdLookup { - fn validate(&self, packet_id: u16) -> bool; -} - -#[cfg(feature = "alloc")] -impl PacketIdLookup for Vec { - fn validate(&self, packet_id: u16) -> bool { - self.contains(&packet_id) - } -} - -#[cfg(feature = "alloc")] -impl PacketIdLookup for HashSet { - fn validate(&self, packet_id: u16) -> bool { - self.contains(&packet_id) - } -} - -impl PacketIdLookup for [u16] { - fn validate(&self, packet_id: u16) -> bool { - self.binary_search(&packet_id).is_ok() - } -} - -impl PacketIdLookup for &[u16] { - fn validate(&self, packet_id: u16) -> bool { - self.binary_search(&packet_id).is_ok() - } -} - -#[cfg(feature = "alloc")] -impl PacketIdLookup for Vec { - fn validate(&self, packet_id: u16) -> bool { - self.contains(&PacketId::from(packet_id)) - } -} -#[cfg(feature = "alloc")] -impl PacketIdLookup for HashSet { - fn validate(&self, packet_id: u16) -> bool { - self.contains(&PacketId::from(packet_id)) - } -} - -impl PacketIdLookup for [PacketId] { - fn validate(&self, packet_id: u16) -> bool { - self.binary_search(&PacketId::from(packet_id)).is_ok() - } -} - -impl PacketIdLookup for &[PacketId] { - fn validate(&self, packet_id: u16) -> bool { - self.binary_search(&PacketId::from(packet_id)).is_ok() - } -} +use crate::{tmtc::ReceivesTcCore, ValidatorU16Id}; /// This function parses a given buffer for tightly packed CCSDS space packets. It uses the /// [PacketId] field of the CCSDS packets to detect the start of a CCSDS space packet and then @@ -75,7 +14,7 @@ impl PacketIdLookup for &[PacketId] { /// error will be returned. pub fn parse_buffer_for_ccsds_space_packets( buf: &mut [u8], - packet_id_lookup: &(impl PacketIdLookup + ?Sized), + packet_id_validator: &(impl ValidatorU16Id + ?Sized), tc_receiver: &mut (impl ReceivesTcCore + ?Sized), next_write_idx: &mut usize, ) -> Result { @@ -88,7 +27,7 @@ pub fn parse_buffer_for_ccsds_space_packets( break; } let packet_id = u16::from_be_bytes(buf[current_idx..current_idx + 2].try_into().unwrap()); - if packet_id_lookup.validate(packet_id) { + if packet_id_validator.validate(packet_id) { let length_field = u16::from_be_bytes(buf[current_idx + 4..current_idx + 6].try_into().unwrap()); let packet_size = length_field + 7; @@ -123,13 +62,13 @@ mod tests { const TEST_APID_0: u16 = 0x02; const TEST_APID_1: u16 = 0x10; - const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0); - const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1); + const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0); + const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1); #[test] fn test_basic() { - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let sph = SpHeader::new_from_apid(TEST_APID_0); + let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); let mut buffer: [u8; 32] = [0; 32]; let packet_len = ping_tc .write_to_bytes(&mut buffer) @@ -155,9 +94,9 @@ mod tests { #[test] fn test_multi_packet() { - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); - let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); + let sph = SpHeader::new_from_apid(TEST_APID_0); + let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); + let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true); let mut buffer: [u8; 32] = [0; 32]; let packet_len_ping = ping_tc .write_to_bytes(&mut buffer) @@ -190,10 +129,10 @@ mod tests { #[test] fn test_multi_apid() { - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); - sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); - let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); + let sph = SpHeader::new_from_apid(TEST_APID_0); + let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); + let sph = SpHeader::new_from_apid(TEST_APID_1); + let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true); let mut buffer: [u8; 32] = [0; 32]; let packet_len_ping = ping_tc .write_to_bytes(&mut buffer) @@ -226,10 +165,10 @@ mod tests { #[test] fn test_split_packet_multi() { - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); - sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); - let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); + let ping_tc = + PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true); + let action_tc = + PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true); let mut buffer: [u8; 32] = [0; 32]; let packet_len_ping = ping_tc .write_to_bytes(&mut buffer) @@ -257,8 +196,8 @@ mod tests { #[test] fn test_one_split_packet() { - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let ping_tc = + PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true); let mut buffer: [u8; 32] = [0; 32]; let packet_len_ping = ping_tc .write_to_bytes(&mut buffer) diff --git a/satrs/src/event_man.rs b/satrs/src/event_man.rs index 304f9a1..38752eb 100644 --- a/satrs/src/event_man.rs +++ b/satrs/src/event_man.rs @@ -11,7 +11,7 @@ //! about events first: //! //! The event manager has a listener table abstracted by the [ListenerMapProvider], which maps -//! listener groups identified by [ListenerKey]s to a [sender ID][ChannelId]. +//! listener groups identified by [ListenerKey]s to a [listener ID][ComponentId]. //! It also contains a sender table abstracted by the [SenderMapProvider] which maps these sender //! IDs to concrete [EventSendProvider]s. A simple approach would be to use one send event provider //! for each OBSW thread and then subscribe for all interesting events for a particular thread @@ -28,8 +28,8 @@ //! manager. //! 3. The event manager receives the receiver component as part of a [EventReceiveProvider] //! implementation so all events are routed to the manager. -//! 4. Create the [send event providers][EventSendProvider]s which allow routing events to -//! subscribers. You can now use their [sender IDs][EventSendProvider::channel_id] to subscribe +//! 4. Create the [event sender map][SenderMapProvider]s which allow routing events to +//! subscribers. You can now use the subscriber component IDs to subscribe //! for event groups, for example by using the [EventManager::subscribe_single] method. //! 5. Add the send provider as well using the [EventManager::add_sender] call so the event //! manager can route listener groups to a the send provider. @@ -45,12 +45,13 @@ //! for a concrete example using multi-threading where events are routed to //! different threads. use crate::events::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw}; -use crate::params::{Params, ParamsHeapless}; +use crate::params::Params; use crate::queue::GenericSendError; +use core::fmt::Debug; use core::marker::PhantomData; use core::slice::Iter; -use crate::ChannelId; +use crate::ComponentId; #[cfg(feature = "alloc")] pub use alloc_mod::*; @@ -65,87 +66,122 @@ pub enum ListenerKey { All, } -pub type EventWithHeaplessAuxData = (Event, Option); -pub type EventU32WithHeaplessAuxData = EventWithHeaplessAuxData; -pub type EventU16WithHeaplessAuxData = EventWithHeaplessAuxData; +#[derive(Debug)] +pub struct EventMessage { + sender_id: ComponentId, + event: Event, + params: Option, +} -pub type EventWithAuxData = (Event, Option); -pub type EventU32WithAuxData = EventWithAuxData; -pub type EventU16WithAuxData = EventWithAuxData; - -pub trait EventSendProvider { - fn channel_id(&self) -> ChannelId; - - fn send_no_data(&self, event: EV) -> Result<(), GenericSendError> { - self.send(event, None) +impl EventMessage { + pub fn new_generic( + sender_id: ComponentId, + event: Event, + params: Option<&ParamProvider>, + ) -> Self { + Self { + sender_id, + event, + params: params.cloned(), + } } - fn send(&self, event: EV, aux_data: Option) -> Result<(), GenericSendError>; + pub fn sender_id(&self) -> ComponentId { + self.sender_id + } + + pub fn event(&self) -> Event { + self.event + } + + pub fn params(&self) -> Option<&ParamProvider> { + self.params.as_ref() + } + + pub fn new(sender_id: ComponentId, event: Event) -> Self { + Self::new_generic(sender_id, event, None) + } + + pub fn new_with_params(sender_id: ComponentId, event: Event, params: &ParamProvider) -> Self { + Self::new_generic(sender_id, event, Some(params)) + } +} + +pub type EventMessageU32 = EventMessage; +pub type EventMessageU16 = EventMessage; + +/// Generic abstraction +pub trait EventSendProvider { + type Error; + + fn target_id(&self) -> ComponentId; + + fn send(&self, message: EventMessage) -> Result<(), Self::Error>; } /// Generic abstraction for an event receiver. -pub trait EventReceiveProvider { +pub trait EventReceiveProvider { + type Error; + /// This function has to be provided by any event receiver. A call may or may not return /// an event and optional auxiliary data. - fn try_recv_event(&self) -> Option<(Event, Option)>; + fn try_recv_event(&self) -> Result>, Self::Error>; } pub trait ListenerMapProvider { #[cfg(feature = "alloc")] - #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] fn get_listeners(&self) -> alloc::vec::Vec; fn contains_listener(&self, key: &ListenerKey) -> bool; - fn get_listener_ids(&self, key: &ListenerKey) -> Option>; - fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool; + fn get_listener_ids(&self, key: &ListenerKey) -> Option>; + fn add_listener(&mut self, key: ListenerKey, listener_id: ComponentId) -> bool; fn remove_duplicates(&mut self, key: &ListenerKey); } pub trait SenderMapProvider< - SP: EventSendProvider, - EV: GenericEvent = EventU32, - AUX = Params, + EventSender: EventSendProvider, + Event: GenericEvent = EventU32, + ParamProvider: Debug = Params, > { - fn contains_send_event_provider(&self, id: &ChannelId) -> bool; + fn contains_send_event_provider(&self, target_id: &ComponentId) -> bool; - fn get_send_event_provider(&self, id: &ChannelId) -> Option<&SP>; - fn add_send_event_provider(&mut self, send_provider: SP) -> bool; + fn get_send_event_provider(&self, target_id: &ComponentId) -> Option<&EventSender>; + fn add_send_event_provider(&mut self, send_provider: EventSender) -> bool; } /// Generic event manager implementation. /// /// # Generics /// -/// * `ERP`: [EventReceiveProvider] used to receive all events. -/// * `SMP`: [SenderMapProvider] which maps channel IDs to send providers. -/// * `LTR`: [ListenerMapProvider] which maps listener keys to channel IDs. -/// * `SP`: [EventSendProvider] contained within the sender map which sends the events. -/// * `EV`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32] +/// * `EventReceiver`: [EventReceiveProvider] used to receive all events. +/// * `SenderMap`: [SenderMapProvider] which maps channel IDs to send providers. +/// * `ListenerMap`: [ListenerMapProvider] which maps listener keys to channel IDs. +/// * `EventSender`: [EventSendProvider] contained within the sender map which sends the events. +/// * `Ev`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32] /// and [EventU16] are supported. -/// * `AUX`: Auxiliary data which is sent with the event to provide optional context information +/// * `Data`: Auxiliary data which is sent with the event to provide optional context information pub struct EventManager< - ERP: EventReceiveProvider, - SMP: SenderMapProvider, - LTR: ListenerMapProvider, - SP: EventSendProvider, - EV: GenericEvent = EventU32, - AUX = Params, + EventReceiver: EventReceiveProvider, + SenderMap: SenderMapProvider, + ListenerMap: ListenerMapProvider, + EventSender: EventSendProvider, + Event: GenericEvent = EventU32, + ParamProvider: Debug = Params, > { - event_receiver: ERP, - sender_map: SMP, - listener_map: LTR, - phantom: core::marker::PhantomData<(SP, EV, AUX)>, + event_receiver: EventReceiver, + sender_map: SenderMap, + listener_map: ListenerMap, + phantom: core::marker::PhantomData<(EventSender, Event, ParamProvider)>, } #[derive(Debug)] -pub enum EventRoutingResult { +pub enum EventRoutingResult { /// No event was received Empty, /// An event was received and routed to listeners. Handled { num_recipients: u32, - event: EV, - aux_data: Option, + event_msg: EventMessage, }, } @@ -153,35 +189,29 @@ pub enum EventRoutingResult { pub enum EventRoutingError { Send(GenericSendError), NoSendersForKey(ListenerKey), - NoSenderForId(ChannelId), -} - -#[derive(Debug)] -pub struct EventRoutingErrorsWithResult { - pub result: EventRoutingResult, - pub errors: [Option; 3], + NoSenderForId(ComponentId), } impl< - ER: EventReceiveProvider, - S: SenderMapProvider, - L: ListenerMapProvider, - SP: EventSendProvider, - EV: GenericEvent + Copy, - AUX: Clone, - > EventManager + EventReceiver: EventReceiveProvider, + SenderMap: SenderMapProvider, + ListenerMap: ListenerMapProvider, + EventSender: EventSendProvider, + Event: GenericEvent + Copy, + ParamProvider: Debug, + > EventManager { pub fn remove_duplicates(&mut self, key: &ListenerKey) { self.listener_map.remove_duplicates(key) } /// Subscribe for a unique event. - pub fn subscribe_single(&mut self, event: &EV, sender_id: ChannelId) { + pub fn subscribe_single(&mut self, event: &Event, sender_id: ComponentId) { self.update_listeners(ListenerKey::Single(event.raw_as_largest_type()), sender_id); } /// Subscribe for an event group. - pub fn subscribe_group(&mut self, group_id: LargestGroupIdRaw, sender_id: ChannelId) { + pub fn subscribe_group(&mut self, group_id: LargestGroupIdRaw, sender_id: ComponentId) { self.update_listeners(ListenerKey::Group(group_id), sender_id); } @@ -189,21 +219,24 @@ impl< /// /// For example, this can be useful for a handler component which sends every event as /// a telemetry packet. - pub fn subscribe_all(&mut self, sender_id: ChannelId) { + pub fn subscribe_all(&mut self, sender_id: ComponentId) { self.update_listeners(ListenerKey::All, sender_id); } } - impl< - ERP: EventReceiveProvider, - SMP: SenderMapProvider, - LTR: ListenerMapProvider, - SP: EventSendProvider, - EV: GenericEvent + Copy, - AUX: Clone, - > EventManager + EventReceiver: EventReceiveProvider, + SenderMap: SenderMapProvider, + ListenerMap: ListenerMapProvider, + EventSenderMap: EventSendProvider, + Event: GenericEvent + Copy, + ParamProvider: Debug, + > EventManager { - pub fn new_with_custom_maps(event_receiver: ERP, sender_map: SMP, listener_map: LTR) -> Self { + pub fn new_with_custom_maps( + event_receiver: EventReceiver, + sender_map: SenderMap, + listener_map: ListenerMap, + ) -> Self { EventManager { listener_map, sender_map, @@ -213,81 +246,79 @@ impl< } /// Add a new sender component which can be used to send events to subscribers. - pub fn add_sender(&mut self, send_provider: SP) { + pub fn add_sender(&mut self, send_provider: EventSenderMap) { if !self .sender_map - .contains_send_event_provider(&send_provider.channel_id()) + .contains_send_event_provider(&send_provider.target_id()) { self.sender_map.add_send_event_provider(send_provider); } } /// Generic function to update the event subscribers. - fn update_listeners(&mut self, key: ListenerKey, sender_id: ChannelId) { + fn update_listeners(&mut self, key: ListenerKey, sender_id: ComponentId) { self.listener_map.add_listener(key, sender_id); } +} +impl< + EventReceiver: EventReceiveProvider, + SenderMap: SenderMapProvider, + ListenerMap: ListenerMapProvider, + EventSenderMap: EventSendProvider, + Event: GenericEvent + Copy, + ParamProvider: Clone + Debug, + > EventManager +{ /// This function will use the cached event receiver and try to receive one event. /// If an event was received, it will try to route that event to all subscribed event listeners. /// If this works without any issues, the [EventRoutingResult] will contain context information /// about the routed event. /// - /// This function will track up to 3 errors returned as part of the - /// [EventRoutingErrorsWithResult] error struct. - pub fn try_event_handling( + /// If an error occurs during the routing, the error handler will be called. The error handler + /// should take a reference to the event message as the first argument, and the routing error + /// as the second argument. + pub fn try_event_handling, EventRoutingError)>( &self, - ) -> Result, EventRoutingErrorsWithResult> { - let mut err_idx = 0; - let mut err_slice = [None, None, None]; + mut error_handler: E, + ) -> EventRoutingResult { let mut num_recipients = 0; - let mut add_error = |error: EventRoutingError| { - if err_idx < 3 { - err_slice[err_idx] = Some(error); - err_idx += 1; - } - }; - let mut send_handler = |key: &ListenerKey, event: EV, aux_data: &Option| { - if self.listener_map.contains_listener(key) { - if let Some(ids) = self.listener_map.get_listener_ids(key) { - for id in ids { - if let Some(sender) = self.sender_map.get_send_event_provider(id) { - if let Err(e) = sender.send(event, aux_data.clone()) { - add_error(EventRoutingError::Send(e)); + let mut send_handler = + |key: &ListenerKey, event_msg: &EventMessage| { + if self.listener_map.contains_listener(key) { + if let Some(ids) = self.listener_map.get_listener_ids(key) { + for id in ids { + if let Some(sender) = self.sender_map.get_send_event_provider(id) { + if let Err(e) = sender.send(EventMessage::new_generic( + *id, + event_msg.event, + event_msg.params.as_ref(), + )) { + error_handler(event_msg, EventRoutingError::Send(e)); + } else { + num_recipients += 1; + } } else { - num_recipients += 1; + error_handler(event_msg, EventRoutingError::NoSenderForId(*id)); } - } else { - add_error(EventRoutingError::NoSenderForId(*id)); } + } else { + error_handler(event_msg, EventRoutingError::NoSendersForKey(*key)); } - } else { - add_error(EventRoutingError::NoSendersForKey(*key)); } - } - }; - if let Some((event, aux_data)) = self.event_receiver.try_recv_event() { - let single_key = ListenerKey::Single(event.raw_as_largest_type()); - send_handler(&single_key, event, &aux_data); - let group_key = ListenerKey::Group(event.group_id_as_largest_type()); - send_handler(&group_key, event, &aux_data); - send_handler(&ListenerKey::All, event, &aux_data); - if err_idx > 0 { - return Err(EventRoutingErrorsWithResult { - result: EventRoutingResult::Handled { - num_recipients, - event, - aux_data, - }, - errors: err_slice, - }); - } - return Ok(EventRoutingResult::Handled { + }; + if let Ok(Some(event_msg)) = self.event_receiver.try_recv_event() { + let single_key = ListenerKey::Single(event_msg.event.raw_as_largest_type()); + send_handler(&single_key, &event_msg); + let group_key = ListenerKey::Group(event_msg.event.group_id_as_largest_type()); + send_handler(&group_key, &event_msg); + send_handler(&ListenerKey::All, &event_msg); + return EventRoutingResult::Handled { num_recipients, - event, - aux_data, - }); + event_msg, + }; } - Ok(EventRoutingResult::Empty) + EventRoutingResult::Empty } } @@ -311,23 +342,31 @@ pub mod alloc_mod { /// and the [DefaultListenerMap]. It uses /// [bounded mpsc senders](https://doc.rust-lang.org/std/sync/mpsc/struct.SyncSender.html) as the /// message queue backend. - pub type EventManagerWithBoundedMpsc = EventManager< + pub type EventManagerWithBoundedMpsc = EventManager< MpscEventReceiver, - DefaultSenderMap, EV, AUX>, + DefaultSenderMap, Event, ParamProvider>, DefaultListenerMap, - EventSenderMpscBounded, + EventSenderMpscBounded, >; impl< - ER: EventReceiveProvider, - SP: EventSendProvider, - EV: GenericEvent + Copy, - AUX: 'static, - > EventManager, DefaultListenerMap, SP, EV, AUX> + EventReceiver: EventReceiveProvider, + EventSender: EventSendProvider, + Event: GenericEvent + Copy, + ParamProvider: 'static + Debug, + > + EventManager< + EventReceiver, + DefaultSenderMap, + DefaultListenerMap, + EventSender, + Event, + ParamProvider, + > { /// Create an event manager where the sender table will be the [DefaultSenderMap] /// and the listener table will be the [DefaultListenerMap]. - pub fn new(event_receiver: ER) -> Self { + pub fn new(event_receiver: EventReceiver) -> Self { Self { listener_map: DefaultListenerMap::default(), sender_map: DefaultSenderMap::default(), @@ -342,7 +381,7 @@ pub mod alloc_mod { /// Simple implementation which uses a [HashMap] and a [Vec] internally. #[derive(Default)] pub struct DefaultListenerMap { - listeners: HashMap>, + listeners: HashMap>, } impl ListenerMapProvider for DefaultListenerMap { @@ -358,11 +397,11 @@ pub mod alloc_mod { self.listeners.contains_key(key) } - fn get_listener_ids(&self, key: &ListenerKey) -> Option> { + fn get_listener_ids(&self, key: &ListenerKey) -> Option> { self.listeners.get(key).map(|vec| vec.iter()) } - fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool { + fn add_listener(&mut self, key: ListenerKey, sender_id: ComponentId) -> bool { if let Some(existing_list) = self.listeners.get_mut(&key) { existing_list.push(sender_id); } else { @@ -384,16 +423,19 @@ pub mod alloc_mod { /// /// Simple implementation which uses a [HashMap] internally. pub struct DefaultSenderMap< - SP: EventSendProvider, - EV: GenericEvent = EventU32, - AUX = Params, + EventSender: EventSendProvider, + Event: GenericEvent = EventU32, + ParamProvider: Debug = Params, > { - senders: HashMap, - phantom: PhantomData<(EV, AUX)>, + senders: HashMap, + phantom: PhantomData<(Event, ParamProvider)>, } - impl, EV: GenericEvent, AUX> Default - for DefaultSenderMap + impl< + EventSender: EventSendProvider, + Event: GenericEvent, + ParamProvider: Debug, + > Default for DefaultSenderMap { fn default() -> Self { Self { @@ -403,21 +445,25 @@ pub mod alloc_mod { } } - impl, EV: GenericEvent, AUX> SenderMapProvider - for DefaultSenderMap + impl< + EventSender: EventSendProvider, + Event: GenericEvent, + ParamProvider: Debug, + > SenderMapProvider + for DefaultSenderMap { - fn contains_send_event_provider(&self, id: &ChannelId) -> bool { + fn contains_send_event_provider(&self, id: &ComponentId) -> bool { self.senders.contains_key(id) } - fn get_send_event_provider(&self, id: &ChannelId) -> Option<&SP> { + fn get_send_event_provider(&self, id: &ComponentId) -> Option<&EventSender> { self.senders .get(id) - .filter(|sender| sender.channel_id() == *id) + .filter(|sender| sender.target_id() == *id) } - fn add_send_event_provider(&mut self, send_provider: SP) -> bool { - let id = send_provider.channel_id(); + fn add_send_event_provider(&mut self, send_provider: EventSender) -> bool { + let id = send_provider.target_id(); if self.senders.contains_key(&id) { return false; } @@ -428,26 +474,33 @@ pub mod alloc_mod { #[cfg(feature = "std")] pub mod std_mod { + use crate::queue::GenericReceiveError; + use super::*; use std::sync::mpsc; pub struct MpscEventReceiver { - mpsc_receiver: mpsc::Receiver<(Event, Option)>, + receiver: mpsc::Receiver>, } impl MpscEventReceiver { - pub fn new(receiver: mpsc::Receiver<(Event, Option)>) -> Self { - Self { - mpsc_receiver: receiver, - } + pub fn new(receiver: mpsc::Receiver>) -> Self { + Self { receiver } } } impl EventReceiveProvider for MpscEventReceiver { - fn try_recv_event(&self) -> Option> { - if let Ok(event_and_data) = self.mpsc_receiver.try_recv() { - return Some(event_and_data); + type Error = GenericReceiveError; + + fn try_recv_event(&self) -> Result>, Self::Error> { + match self.receiver.try_recv() { + Ok(msg) => Ok(Some(msg)), + Err(e) => match e { + mpsc::TryRecvError::Empty => Ok(None), + mpsc::TryRecvError::Disconnected => { + Err(GenericReceiveError::TxDisconnected(None)) + } + }, } - None } } @@ -458,23 +511,26 @@ pub mod std_mod { /// send events. #[derive(Clone)] pub struct EventSenderMpsc { - id: u32, - sender: mpsc::Sender<(Event, Option)>, + target_id: ComponentId, + sender: mpsc::Sender>, } impl EventSenderMpsc { - pub fn new(id: u32, sender: mpsc::Sender<(Event, Option)>) -> Self { - Self { id, sender } + pub fn new(target_id: ComponentId, sender: mpsc::Sender>) -> Self { + Self { target_id, sender } } } impl EventSendProvider for EventSenderMpsc { - fn channel_id(&self) -> u32 { - self.id + type Error = GenericSendError; + + fn target_id(&self) -> ComponentId { + self.target_id } - fn send(&self, event: Event, aux_data: Option) -> Result<(), GenericSendError> { + + fn send(&self, event_msg: EventMessage) -> Result<(), GenericSendError> { self.sender - .send((event, aux_data)) + .send(event_msg) .map_err(|_| GenericSendError::RxDisconnected) } } @@ -483,19 +539,19 @@ pub mod std_mod { /// events. This has the advantage that the channel is bounded and thus more deterministic. #[derive(Clone)] pub struct EventSenderMpscBounded { - channel_id: u32, - sender: mpsc::SyncSender<(Event, Option)>, + target_id: ComponentId, + sender: mpsc::SyncSender>, capacity: usize, } impl EventSenderMpscBounded { pub fn new( - channel_id: u32, - sender: mpsc::SyncSender<(Event, Option)>, + target_id: ComponentId, + sender: mpsc::SyncSender>, capacity: usize, ) -> Self { Self { - channel_id, + target_id, sender, capacity, } @@ -503,11 +559,14 @@ pub mod std_mod { } impl EventSendProvider for EventSenderMpscBounded { - fn channel_id(&self) -> u32 { - self.channel_id + type Error = GenericSendError; + + fn target_id(&self) -> ComponentId { + self.target_id } - fn send(&self, event: Event, aux_data: Option) -> Result<(), GenericSendError> { - if let Err(e) = self.sender.try_send((event, aux_data)) { + + fn send(&self, event_msg: EventMessage) -> Result<(), Self::Error> { + if let Err(e) = self.sender.try_send(event_msg) { return match e { mpsc::TrySendError::Full(_) => { Err(GenericSendError::QueueFull(Some(self.capacity as u32))) @@ -530,19 +589,20 @@ mod tests { use super::*; use crate::event_man::EventManager; use crate::events::{EventU32, GenericEvent, Severity}; - use crate::params::ParamsRaw; + use crate::params::{ParamsHeapless, ParamsRaw}; + use crate::pus::test_util::{TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1}; use std::format; - use std::sync::mpsc::{self, channel, Receiver, Sender}; + use std::sync::mpsc::{self}; const TEST_EVENT: EventU32 = EventU32::const_new(Severity::INFO, 0, 5); fn check_next_event( expected: EventU32, - receiver: &Receiver, + receiver: &mpsc::Receiver, ) -> Option { - if let Ok(event) = receiver.try_recv() { - assert_eq!(event.0, expected); - return event.1; + if let Ok(event_msg) = receiver.try_recv() { + assert_eq!(event_msg.event, expected); + return event_msg.params; } None } @@ -555,17 +615,16 @@ mod tests { assert!(matches!(res, EventRoutingResult::Handled { .. })); if let EventRoutingResult::Handled { num_recipients, - event, - .. + event_msg, } = res { - assert_eq!(event, expected); + assert_eq!(event_msg.event, expected); assert_eq!(num_recipients, expected_num_sent); } } - fn generic_event_man() -> (Sender, EventManagerWithMpsc) { - let (event_sender, manager_queue) = channel(); + fn generic_event_man() -> (mpsc::Sender, EventManagerWithMpsc) { + let (event_sender, manager_queue) = mpsc::channel(); let event_man_receiver = MpscEventReceiver::new(manager_queue); (event_sender, EventManager::new(event_man_receiver)) } @@ -575,48 +634,56 @@ mod tests { let (event_sender, mut event_man) = generic_event_man(); let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap(); let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); - let (single_event_sender, single_event_receiver) = channel(); + let (single_event_sender, single_event_receiver) = mpsc::channel(); let single_event_listener = EventSenderMpsc::new(0, single_event_sender); - event_man.subscribe_single(&event_grp_0, single_event_listener.channel_id()); + event_man.subscribe_single(&event_grp_0, single_event_listener.target_id()); event_man.add_sender(single_event_listener); - let (group_event_sender_0, group_event_receiver_0) = channel(); + let (group_event_sender_0, group_event_receiver_0) = mpsc::channel(); let group_event_listener = EventU32SenderMpsc::new(1, group_event_sender_0); - event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.channel_id()); + event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.target_id()); event_man.add_sender(group_event_listener); + let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| { + panic!("routing error occurred for event {:?}: {:?}", event_msg, e); + }; // Test event with one listener event_sender - .send((event_grp_0, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_grp_0)) .expect("Sending single error failed"); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_grp_0, 1); + let res = event_man.try_event_handling(&error_handler); + // assert!(res.is_ok()); + check_handled_event(res, event_grp_0, 1); check_next_event(event_grp_0, &single_event_receiver); // Test event which is sent to all group listeners event_sender - .send((event_grp_1_0, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0)) .expect("Sending group error failed"); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_grp_1_0, 1); + let res = event_man.try_event_handling(&error_handler); + check_handled_event(res, event_grp_1_0, 1); check_next_event(event_grp_1_0, &group_event_receiver_0); } #[test] - fn test_with_basic_aux_data() { + fn test_with_basic_params() { + let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| { + panic!("routing error occurred for event {:?}: {:?}", event_msg, e); + }; let (event_sender, mut event_man) = generic_event_man(); let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap(); - let (single_event_sender, single_event_receiver) = channel(); + let (single_event_sender, single_event_receiver) = mpsc::channel(); let single_event_listener = EventSenderMpsc::new(0, single_event_sender); - event_man.subscribe_single(&event_grp_0, single_event_listener.channel_id()); + event_man.subscribe_single(&event_grp_0, single_event_listener.target_id()); event_man.add_sender(single_event_listener); event_sender - .send((event_grp_0, Some(Params::Heapless((2_u32, 3_u32).into())))) + .send(EventMessage::new_with_params( + TEST_COMPONENT_ID_0.id(), + event_grp_0, + &Params::Heapless((2_u32, 3_u32).into()), + )) .expect("Sending group error failed"); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_grp_0, 1); + let res = event_man.try_event_handling(&error_handler); + check_handled_event(res, event_grp_0, 1); let aux = check_next_event(event_grp_0, &single_event_receiver); assert!(aux.is_some()); let aux = aux.unwrap(); @@ -631,38 +698,37 @@ mod tests { /// Test listening for multiple groups #[test] fn test_multi_group() { + let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| { + panic!("routing error occurred for event {:?}: {:?}", event_msg, e); + }; let (event_sender, mut event_man) = generic_event_man(); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - let hres = res.unwrap(); - assert!(matches!(hres, EventRoutingResult::Empty)); + let res = event_man.try_event_handling(error_handler); + assert!(matches!(res, EventRoutingResult::Empty)); let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap(); let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); - let (event_grp_0_sender, event_grp_0_receiver) = channel(); + let (event_grp_0_sender, event_grp_0_receiver) = mpsc::channel(); let event_grp_0_and_1_listener = EventU32SenderMpsc::new(0, event_grp_0_sender); event_man.subscribe_group( event_grp_0.group_id(), - event_grp_0_and_1_listener.channel_id(), + event_grp_0_and_1_listener.target_id(), ); event_man.subscribe_group( event_grp_1_0.group_id(), - event_grp_0_and_1_listener.channel_id(), + event_grp_0_and_1_listener.target_id(), ); event_man.add_sender(event_grp_0_and_1_listener); event_sender - .send((event_grp_0, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_grp_0)) .expect("Sending Event Group 0 failed"); event_sender - .send((event_grp_1_0, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0)) .expect("Sendign Event Group 1 failed"); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_grp_0, 1); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_grp_1_0, 1); + let res = event_man.try_event_handling(error_handler); + check_handled_event(res, event_grp_0, 1); + let res = event_man.try_event_handling(error_handler); + check_handled_event(res, event_grp_1_0, 1); check_next_event(event_grp_0, &event_grp_0_receiver); check_next_event(event_grp_1_0, &event_grp_0_receiver); @@ -672,42 +738,42 @@ mod tests { /// to both group and single events from one listener #[test] fn test_listening_to_same_event_and_multi_type() { + let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| { + panic!("routing error occurred for event {:?}: {:?}", event_msg, e); + }; let (event_sender, mut event_man) = generic_event_man(); let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap(); let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); - let (event_0_tx_0, event_0_rx_0) = channel(); - let (event_0_tx_1, event_0_rx_1) = channel(); + let (event_0_tx_0, event_0_rx_0) = mpsc::channel(); + let (event_0_tx_1, event_0_rx_1) = mpsc::channel(); let event_listener_0 = EventU32SenderMpsc::new(0, event_0_tx_0); let event_listener_1 = EventU32SenderMpsc::new(1, event_0_tx_1); - let event_listener_0_sender_id = event_listener_0.channel_id(); + let event_listener_0_sender_id = event_listener_0.target_id(); event_man.subscribe_single(&event_0, event_listener_0_sender_id); event_man.add_sender(event_listener_0); - let event_listener_1_sender_id = event_listener_1.channel_id(); + let event_listener_1_sender_id = event_listener_1.target_id(); event_man.subscribe_single(&event_0, event_listener_1_sender_id); event_man.add_sender(event_listener_1); event_sender - .send((event_0, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0)) .expect("Triggering Event 0 failed"); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_0, 2); + let res = event_man.try_event_handling(error_handler); + check_handled_event(res, event_0, 2); check_next_event(event_0, &event_0_rx_0); check_next_event(event_0, &event_0_rx_1); event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id); event_sender - .send((event_0, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0)) .expect("Triggering Event 0 failed"); event_sender - .send((event_1, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_1)) .expect("Triggering Event 1 failed"); // 3 Events messages will be sent now - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_0, 2); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_1, 1); + let res = event_man.try_event_handling(error_handler); + check_handled_event(res, event_0, 2); + let res = event_man.try_event_handling(error_handler); + check_handled_event(res, event_1, 1); // Both the single event and the group event should arrive now check_next_event(event_0, &event_0_rx_0); check_next_event(event_1, &event_0_rx_0); @@ -716,36 +782,36 @@ mod tests { event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id); event_man.remove_duplicates(&ListenerKey::Group(event_1.group_id())); event_sender - .send((event_1, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_1)) .expect("Triggering Event 1 failed"); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_1, 1); + let res = event_man.try_event_handling(error_handler); + check_handled_event(res, event_1, 1); } #[test] fn test_all_events_listener() { - let (event_sender, manager_queue) = channel(); + let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| { + panic!("routing error occurred for event {:?}: {:?}", event_msg, e); + }; + let (event_sender, manager_queue) = mpsc::channel(); let event_man_receiver = MpscEventReceiver::new(manager_queue); let mut event_man = EventManagerWithMpsc::new(event_man_receiver); let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap(); let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); - let (event_0_tx_0, all_events_rx) = channel(); + let (event_0_tx_0, all_events_rx) = mpsc::channel(); let all_events_listener = EventU32SenderMpsc::new(0, event_0_tx_0); - event_man.subscribe_all(all_events_listener.channel_id()); + event_man.subscribe_all(all_events_listener.target_id()); event_man.add_sender(all_events_listener); event_sender - .send((event_0, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0)) .expect("Triggering event 0 failed"); event_sender - .send((event_1, None)) + .send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_1)) .expect("Triggering event 1 failed"); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_0, 1); - let res = event_man.try_event_handling(); - assert!(res.is_ok()); - check_handled_event(res.unwrap(), event_1, 1); + let res = event_man.try_event_handling(error_handler); + check_handled_event(res, event_0, 1); + let res = event_man.try_event_handling(error_handler); + check_handled_event(res, event_1, 1); check_next_event(event_0, &all_events_rx); check_next_event(event_1, &all_events_rx); } @@ -755,15 +821,15 @@ mod tests { let (event_sender, _event_receiver) = mpsc::sync_channel(3); let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3); event_sender - .send_no_data(TEST_EVENT) + .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT)) .expect("sending test event failed"); event_sender - .send_no_data(TEST_EVENT) + .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT)) .expect("sending test event failed"); event_sender - .send_no_data(TEST_EVENT) + .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT)) .expect("sending test event failed"); - let error = event_sender.send_no_data(TEST_EVENT); + let error = event_sender.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT)); if let Err(e) = error { assert!(matches!(e, GenericSendError::QueueFull(Some(3)))); } else { @@ -775,7 +841,7 @@ mod tests { let (event_sender, event_receiver) = mpsc::sync_channel(3); let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3); drop(event_receiver); - if let Err(e) = event_sender.send_no_data(TEST_EVENT) { + if let Err(e) = event_sender.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT)) { assert!(matches!(e, GenericSendError::RxDisconnected)); } else { panic!("Expected error"); diff --git a/satrs/src/events.rs b/satrs/src/events.rs index 2732726..032322a 100644 --- a/satrs/src/events.rs +++ b/satrs/src/events.rs @@ -80,7 +80,7 @@ impl HasSeverity for SeverityHigh { const SEVERITY: Severity = Severity::HIGH; } -pub trait GenericEvent: EcssEnumeration { +pub trait GenericEvent: EcssEnumeration + Copy + Clone { type Raw; type GroupId; type UniqueId; diff --git a/satrs/src/hal/mod.rs b/satrs/src/hal/mod.rs index b6ab984..c374f9b 100644 --- a/satrs/src/hal/mod.rs +++ b/satrs/src/hal/mod.rs @@ -1,4 +1,3 @@ //! # Hardware Abstraction Layer module #[cfg(feature = "std")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] pub mod std; diff --git a/satrs/src/hal/std/tcp_spacepackets_server.rs b/satrs/src/hal/std/tcp_spacepackets_server.rs index 257f0c1..a33b137 100644 --- a/satrs/src/hal/std/tcp_spacepackets_server.rs +++ b/satrs/src/hal/std/tcp_spacepackets_server.rs @@ -4,11 +4,10 @@ use std::{ net::{SocketAddr, TcpListener, TcpStream}, }; -use alloc::boxed::Box; - use crate::{ - encoding::{ccsds::PacketIdLookup, parse_buffer_for_ccsds_space_packets}, + encoding::parse_buffer_for_ccsds_space_packets, tmtc::{ReceivesTc, TmPacketSource}, + ValidatorU16Id, }; use super::tcp_server::{ @@ -16,17 +15,19 @@ use super::tcp_server::{ }; /// Concrete [TcpTcParser] implementation for the [TcpSpacepacketsServer]. -pub struct SpacepacketsTcParser { - packet_id_lookup: Box, +pub struct SpacepacketsTcParser { + packet_id_lookup: PacketIdChecker, } -impl SpacepacketsTcParser { - pub fn new(packet_id_lookup: Box) -> Self { +impl SpacepacketsTcParser { + pub fn new(packet_id_lookup: PacketIdChecker) -> Self { Self { packet_id_lookup } } } -impl TcpTcParser for SpacepacketsTcParser { +impl TcpTcParser + for SpacepacketsTcParser +{ fn handle_tc_parsing( &mut self, tc_buffer: &mut [u8], @@ -38,7 +39,7 @@ impl TcpTcParser for SpacepacketsTc // Reader vec full, need to parse for packets. conn_result.num_received_tcs += parse_buffer_for_ccsds_space_packets( &mut tc_buffer[..current_write_idx], - self.packet_id_lookup.as_ref(), + &self.packet_id_lookup, tc_receiver.upcast_mut(), next_write_idx, ) @@ -95,6 +96,7 @@ pub struct TcpSpacepacketsServer< TcError: 'static, TmSource: TmPacketSource, TcReceiver: ReceivesTc, + PacketIdChecker: ValidatorU16Id, > { generic_server: TcpTmtcGenericServer< TmError, @@ -102,7 +104,7 @@ pub struct TcpSpacepacketsServer< TmSource, TcReceiver, SpacepacketsTmSender, - SpacepacketsTcParser, + SpacepacketsTcParser, >, } @@ -111,7 +113,8 @@ impl< TcError: 'static, TmSource: TmPacketSource, TcReceiver: ReceivesTc, - > TcpSpacepacketsServer + PacketIdChecker: ValidatorU16Id, + > TcpSpacepacketsServer { /// /// ## Parameter @@ -127,12 +130,12 @@ impl< cfg: ServerConfig, tm_source: TmSource, tc_receiver: TcReceiver, - packet_id_lookup: Box, + packet_id_checker: PacketIdChecker, ) -> Result { Ok(Self { generic_server: TcpTmtcGenericServer::new( cfg, - SpacepacketsTcParser::new(packet_id_lookup), + SpacepacketsTcParser::new(packet_id_checker), SpacepacketsTmSender::default(), tm_source, tc_receiver, @@ -170,7 +173,7 @@ mod tests { thread, }; - use alloc::{boxed::Box, sync::Arc}; + use alloc::sync::Arc; use hashbrown::HashSet; use spacepackets::{ ecss::{tc::PusTcCreator, WritablePusPacket}, @@ -185,21 +188,21 @@ mod tests { use super::TcpSpacepacketsServer; const TEST_APID_0: u16 = 0x02; - const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0); + const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0); const TEST_APID_1: u16 = 0x10; - const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1); + const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1); fn generic_tmtc_server( addr: &SocketAddr, tc_receiver: SyncTcCacher, tm_source: SyncTmSource, packet_id_lookup: HashSet, - ) -> TcpSpacepacketsServer<(), (), SyncTmSource, SyncTcCacher> { + ) -> TcpSpacepacketsServer<(), (), SyncTmSource, SyncTcCacher, HashSet> { TcpSpacepacketsServer::new( ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024), tm_source, tc_receiver, - Box::new(packet_id_lookup), + packet_id_lookup, ) .expect("TCP server generation failed") } @@ -233,8 +236,8 @@ mod tests { assert_eq!(conn_result.num_sent_tms, 0); set_if_done.store(true, Ordering::Relaxed); }); - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let ping_tc = + PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true); let tc_0 = ping_tc.to_vec().expect("packet generation failed"); let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed"); stream @@ -265,13 +268,13 @@ mod tests { // Add telemetry let mut total_tm_len = 0; - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 1, None, true); + let verif_tm = + PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 1, 1, &[], true); let tm_0 = verif_tm.to_vec().expect("writing packet failed"); total_tm_len += tm_0.len(); tm_source.add_tm(&tm_0); - let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); - let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 3, None, true); + let verif_tm = + PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 1, 3, &[], true); let tm_1 = verif_tm.to_vec().expect("writing packet failed"); total_tm_len += tm_1.len(); tm_source.add_tm(&tm_1); @@ -312,14 +315,14 @@ mod tests { .expect("setting reas timeout failed"); // Send telecommands - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let ping_tc = + PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true); let tc_0 = ping_tc.to_vec().expect("ping tc creation failed"); stream .write_all(&tc_0) .expect("writing to TCP server failed"); - let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); - let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); + let action_tc = + PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true); let tc_1 = action_tc.to_vec().expect("action tc creation failed"); stream .write_all(&tc_1) diff --git a/satrs/src/hal/std/udp_server.rs b/satrs/src/hal/std/udp_server.rs index fa117f0..8f77c2b 100644 --- a/satrs/src/hal/std/udp_server.rs +++ b/satrs/src/hal/std/udp_server.rs @@ -40,8 +40,8 @@ use std::vec::Vec; /// let ping_receiver = PingReceiver::default(); /// let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver)) /// .expect("Creating UDP TMTC server failed"); -/// let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap(); -/// let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); +/// let sph = SpHeader::new_from_apid(0x02); +/// let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); /// let len = pus_tc /// .write_to_bytes(&mut buf) /// .expect("Error writing PUS TC packet"); @@ -178,8 +178,8 @@ mod tests { let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver)) .expect("Creating UDP TMTC server failed"); is_send(&udp_tc_server); - let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap(); - let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let sph = SpHeader::new_from_apid(0x02); + let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); let len = pus_tc .write_to_bytes(&mut buf) .expect("Error writing PUS TC packet"); diff --git a/satrs/src/hk.rs b/satrs/src/hk.rs index 8033e15..50edfda 100644 --- a/satrs/src/hk.rs +++ b/satrs/src/hk.rs @@ -1,40 +1,40 @@ -use crate::{ - pus::verification::{TcStateAccepted, VerificationToken}, - TargetId, -}; +use crate::ComponentId; pub type CollectionIntervalFactor = u32; +/// Unique Identifier for a certain housekeeping dataset. pub type UniqueId = u32; #[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum HkRequest { - OneShot(UniqueId), - Enable(UniqueId), - Disable(UniqueId), - ModifyCollectionInterval(UniqueId, CollectionIntervalFactor), +pub struct HkRequest { + pub unique_id: UniqueId, + pub variant: HkRequestVariant, +} + +impl HkRequest { + pub fn new(unique_id: UniqueId, variant: HkRequestVariant) -> Self { + Self { unique_id, variant } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum HkRequestVariant { + OneShot, + EnablePeriodic, + DisablePeriodic, + ModifyCollectionInterval(CollectionIntervalFactor), } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TargetedHkRequest { - pub target_id: TargetId, - pub hk_request: HkRequest, + pub target_id: ComponentId, + pub hk_request: HkRequestVariant, } impl TargetedHkRequest { - pub fn new(target_id: TargetId, hk_request: HkRequest) -> Self { + pub fn new(target_id: ComponentId, hk_request: HkRequestVariant) -> Self { Self { target_id, hk_request, } } } - -pub trait PusHkRequestRouter { - type Error; - fn route( - &self, - target_id: TargetId, - hk_request: HkRequest, - token: VerificationToken, - ) -> Result<(), Self::Error>; -} diff --git a/satrs/src/lib.rs b/satrs/src/lib.rs index 5040d58..f6b43e6 100644 --- a/satrs/src/lib.rs +++ b/satrs/src/lib.rs @@ -14,7 +14,7 @@ //! - The [pus] module which provides special support for projects using //! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/). #![no_std] -#![cfg_attr(doc_cfg, feature(doc_cfg))] +#![cfg_attr(docs_rs, feature(doc_auto_cfg))] #[cfg(feature = "alloc")] extern crate alloc; #[cfg(feature = "alloc")] @@ -23,16 +23,15 @@ extern crate downcast_rs; extern crate std; #[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] pub mod cfdp; pub mod encoding; pub mod event_man; pub mod events; #[cfg(feature = "std")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] pub mod executable; pub mod hal; -pub mod objects; +#[cfg(feature = "std")] +pub mod mode_tree; pub mod pool; pub mod power; pub mod pus; @@ -40,6 +39,7 @@ pub mod queue; pub mod request; pub mod res_code; pub mod seq_count; +pub mod time; pub mod tmtc; pub mod action; @@ -49,8 +49,70 @@ pub mod params; pub use spacepackets; -/// Generic channel ID type. -pub type ChannelId = u32; +use spacepackets::PacketId; -/// Generic target ID type. -pub type TargetId = u64; +/// Generic component ID type. +pub type ComponentId = u64; + +pub trait ValidatorU16Id { + fn validate(&self, id: u16) -> bool; +} + +#[cfg(feature = "alloc")] +impl ValidatorU16Id for alloc::vec::Vec { + fn validate(&self, id: u16) -> bool { + self.contains(&id) + } +} + +#[cfg(feature = "alloc")] +impl ValidatorU16Id for hashbrown::HashSet { + fn validate(&self, id: u16) -> bool { + self.contains(&id) + } +} + +impl ValidatorU16Id for [u16] { + fn validate(&self, id: u16) -> bool { + self.binary_search(&id).is_ok() + } +} + +impl ValidatorU16Id for &[u16] { + fn validate(&self, id: u16) -> bool { + self.binary_search(&id).is_ok() + } +} + +#[cfg(feature = "alloc")] +impl ValidatorU16Id for alloc::vec::Vec { + fn validate(&self, packet_id: u16) -> bool { + self.contains(&PacketId::from(packet_id)) + } +} + +#[cfg(feature = "alloc")] +impl ValidatorU16Id for hashbrown::HashSet { + fn validate(&self, packet_id: u16) -> bool { + self.contains(&PacketId::from(packet_id)) + } +} + +#[cfg(feature = "std")] +impl ValidatorU16Id for std::collections::HashSet { + fn validate(&self, packet_id: u16) -> bool { + self.contains(&PacketId::from(packet_id)) + } +} + +impl ValidatorU16Id for [PacketId] { + fn validate(&self, packet_id: u16) -> bool { + self.binary_search(&PacketId::from(packet_id)).is_ok() + } +} + +impl ValidatorU16Id for &[PacketId] { + fn validate(&self, packet_id: u16) -> bool { + self.binary_search(&PacketId::from(packet_id)).is_ok() + } +} diff --git a/satrs/src/mode.rs b/satrs/src/mode.rs index c5968b4..65519a5 100644 --- a/satrs/src/mode.rs +++ b/satrs/src/mode.rs @@ -1,67 +1,95 @@ use core::mem::size_of; +use satrs_shared::res_code::ResultU16; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use spacepackets::ByteConversionError; -use crate::TargetId; +#[cfg(feature = "alloc")] +pub use alloc_mod::*; + +#[cfg(feature = "std")] +pub use std_mod::*; + +use crate::{ + queue::GenericTargetedMessagingError, + request::{GenericMessage, MessageMetadata, MessageReceiver, MessageReceiverWithId, RequestId}, + ComponentId, +}; + +pub type Mode = u32; +pub type Submode = u16; #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ModeAndSubmode { - mode: u32, - submode: u16, + mode: Mode, + submode: Submode, } impl ModeAndSubmode { - pub const fn new_mode_only(mode: u32) -> Self { + pub const RAW_LEN: usize = size_of::() + size_of::(); + + pub const fn new_mode_only(mode: Mode) -> Self { Self { mode, submode: 0 } } - pub const fn new(mode: u32, submode: u16) -> Self { + pub const fn new(mode: Mode, submode: Submode) -> Self { Self { mode, submode } } - pub fn raw_len() -> usize { - size_of::() + size_of::() - } - pub fn from_be_bytes(buf: &[u8]) -> Result { if buf.len() < 6 { return Err(ByteConversionError::FromSliceTooSmall { - expected: 6, + expected: Self::RAW_LEN, found: buf.len(), }); } Ok(Self { - mode: u32::from_be_bytes(buf[0..4].try_into().unwrap()), - submode: u16::from_be_bytes(buf[4..6].try_into().unwrap()), + mode: Mode::from_be_bytes(buf[0..size_of::()].try_into().unwrap()), + submode: Submode::from_be_bytes( + buf[size_of::()..size_of::() + size_of::()] + .try_into() + .unwrap(), + ), }) } - pub fn mode(&self) -> u32 { + pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { + if buf.len() < Self::RAW_LEN { + return Err(ByteConversionError::ToSliceTooSmall { + expected: Self::RAW_LEN, + found: buf.len(), + }); + } + buf[0..size_of::()].copy_from_slice(&self.mode.to_be_bytes()); + buf[size_of::()..Self::RAW_LEN].copy_from_slice(&self.submode.to_be_bytes()); + Ok(Self::RAW_LEN) + } + + pub fn mode(&self) -> Mode { self.mode } - pub fn submode(&self) -> u16 { + pub fn submode(&self) -> Submode { self.submode } } #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct TargetedModeCommand { - pub address: TargetId, + pub address: ComponentId, pub mode_submode: ModeAndSubmode, } impl TargetedModeCommand { - pub const fn new(address: TargetId, mode_submode: ModeAndSubmode) -> Self { + pub const fn new(address: ComponentId, mode_submode: ModeAndSubmode) -> Self { Self { address, mode_submode, } } - pub fn address(&self) -> TargetId { + pub fn address(&self) -> ComponentId { self.address } @@ -81,6 +109,8 @@ impl TargetedModeCommand { #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum ModeRequest { + /// Mode information. Can be used to notify other components of changed modes. + ModeInfo(ModeAndSubmode), SetMode(ModeAndSubmode), ReadMode, AnnounceMode, @@ -90,6 +120,479 @@ pub enum ModeRequest { #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct TargetedModeRequest { - target_id: TargetId, + target_id: ComponentId, mode_request: ModeRequest, } + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum ModeReply { + /// Reply to a mode request to confirm the commanded mode was reached. + ModeReply(ModeAndSubmode), + // Can not reach the commanded mode. Contains a reason as a [ResultU16]. + CantReachMode(ResultU16), + /// We are in the wrong mode for unknown reasons. Contains the expected and reached mode. + WrongMode { + expected: ModeAndSubmode, + reached: ModeAndSubmode, + }, +} + +pub type GenericModeReply = GenericMessage; + +pub trait ModeRequestSender { + fn local_channel_id(&self) -> ComponentId; + fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError>; +} + +pub trait ModeRequestReceiver { + fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError>; +} + +impl> ModeRequestReceiver + for MessageReceiverWithId +{ + fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } +} + +#[derive(Debug, Clone)] +pub enum ModeError { + Messaging(GenericTargetedMessagingError), +} + +impl From for ModeError { + fn from(value: GenericTargetedMessagingError) -> Self { + Self::Messaging(value) + } +} + +pub trait ModeProvider { + fn mode_and_submode(&self) -> ModeAndSubmode; + + fn mode(&self) -> Mode { + self.mode_and_submode().mode() + } + + fn submode(&self) -> Submode { + self.mode_and_submode().submode() + } +} + +pub trait ModeRequestHandler: ModeProvider { + type Error; + + fn start_transition( + &mut self, + requestor: MessageMetadata, + mode_and_submode: ModeAndSubmode, + ) -> Result<(), Self::Error>; + + fn announce_mode(&self, requestor_info: Option, recursive: bool); + + fn handle_mode_reached( + &mut self, + requestor_info: Option, + ) -> Result<(), Self::Error>; + + fn handle_mode_info( + &mut self, + requestor_info: MessageMetadata, + info: ModeAndSubmode, + ) -> Result<(), Self::Error>; + + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + reply: ModeReply, + ) -> Result<(), Self::Error>; + + fn handle_mode_request( + &mut self, + request: GenericMessage, + ) -> Result<(), Self::Error> { + match request.message { + ModeRequest::SetMode(mode_and_submode) => { + self.start_transition(request.requestor_info, mode_and_submode) + } + ModeRequest::ReadMode => self.send_mode_reply( + request.requestor_info, + ModeReply::ModeReply(self.mode_and_submode()), + ), + ModeRequest::AnnounceMode => { + self.announce_mode(Some(request.requestor_info), false); + Ok(()) + } + ModeRequest::AnnounceModeRecursive => { + self.announce_mode(Some(request.requestor_info), true); + Ok(()) + } + ModeRequest::ModeInfo(info) => self.handle_mode_info(request.requestor_info, info), + } + } +} + +pub trait ModeReplyReceiver { + fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError>; +} + +impl> ModeReplyReceiver for MessageReceiverWithId { + fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } +} + +pub trait ModeReplySender { + fn local_channel_id(&self) -> ComponentId; + + /// The requestor is assumed to be the target of the reply. + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + reply: ModeReply, + ) -> Result<(), GenericTargetedMessagingError>; +} + +#[cfg(feature = "alloc")] +pub mod alloc_mod { + use crate::{ + mode::ModeRequest, + queue::GenericTargetedMessagingError, + request::{ + MessageMetadata, MessageSender, MessageSenderAndReceiver, MessageSenderMap, + RequestAndReplySenderAndReceiver, RequestId, + }, + ComponentId, + }; + + use super::*; + + impl> MessageSenderMap { + pub fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + target_id: ComponentId, + request: ModeReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(requestor_info, target_id, request) + } + + pub fn add_reply_target(&mut self, target_id: ComponentId, request_sender: S) { + self.add_message_target(target_id, request_sender) + } + } + + impl, R: MessageReceiver> ModeReplySender + for MessageSenderAndReceiver + { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id_generic() + } + + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + request: ModeReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.message_sender_map.send_mode_reply( + MessageMetadata::new(requestor_info.request_id(), self.local_channel_id()), + requestor_info.sender_id(), + request, + ) + } + } + + impl, R: MessageReceiver> ModeReplyReceiver + for MessageSenderAndReceiver + { + fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.message_receiver.try_recv_message() + } + } + + impl< + REQUEST, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > RequestAndReplySenderAndReceiver + { + pub fn add_reply_target(&mut self, target_id: ComponentId, reply_sender: S1) { + self.reply_sender_map + .add_message_target(target_id, reply_sender) + } + } + + impl< + REQUEST, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > ModeReplySender for RequestAndReplySenderAndReceiver + { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id_generic() + } + + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + request: ModeReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.reply_sender_map.send_mode_reply( + MessageMetadata::new(requestor_info.request_id(), self.local_channel_id()), + requestor_info.sender_id(), + request, + ) + } + } + + impl< + REQUEST, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > ModeReplyReceiver + for RequestAndReplySenderAndReceiver + { + fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.reply_receiver.try_recv_message() + } + } + + /// Helper type definition for a mode handler which can handle mode requests. + pub type ModeRequestHandlerInterface = + MessageSenderAndReceiver; + + impl, R: MessageReceiver> + ModeRequestHandlerInterface + { + pub fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } + + pub fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + reply: ModeReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message( + requestor_info.request_id(), + requestor_info.sender_id(), + reply, + ) + } + } + + /// Helper type defintion for a mode handler object which can send mode requests and receive + /// mode replies. + pub type ModeRequestorInterface = MessageSenderAndReceiver; + + impl, R: MessageReceiver> ModeRequestorInterface { + pub fn try_recv_mode_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } + + pub fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + reply: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(request_id, target_id, reply) + } + } + + /// Helper type defintion for a mode handler object which can both send mode requests and + /// process mode requests. + pub type ModeInterface = + RequestAndReplySenderAndReceiver; + + impl> MessageSenderMap { + pub fn send_mode_request( + &self, + requestor_info: MessageMetadata, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(requestor_info, target_id, request) + } + + pub fn add_request_target(&mut self, target_id: ComponentId, request_sender: S) { + self.add_message_target(target_id, request_sender) + } + } + + /* + impl> ModeRequestSender for MessageSenderMapWithId { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id + } + + fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(request_id, target_id, request) + } + } + */ + + impl, R: MessageReceiver> ModeRequestReceiver + for MessageSenderAndReceiver + { + fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.message_receiver.try_recv_message() + } + } + + impl, R: MessageReceiver> ModeRequestSender + for MessageSenderAndReceiver + { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id_generic() + } + + fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.message_sender_map.send_mode_request( + MessageMetadata::new(request_id, self.local_channel_id()), + target_id, + request, + ) + } + } + + impl< + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > RequestAndReplySenderAndReceiver + { + pub fn add_request_target(&mut self, target_id: ComponentId, request_sender: S0) { + self.request_sender_map + .add_message_target(target_id, request_sender) + } + } + + impl< + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > ModeRequestSender + for RequestAndReplySenderAndReceiver + { + fn local_channel_id(&self) -> ComponentId { + self.local_channel_id_generic() + } + + fn send_mode_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ModeRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.request_sender_map.send_mode_request( + MessageMetadata::new(request_id, self.local_channel_id()), + target_id, + request, + ) + } + } + + impl< + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > ModeRequestReceiver + for RequestAndReplySenderAndReceiver + { + fn try_recv_mode_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.request_receiver.try_recv_message() + } + } +} + +#[cfg(feature = "std")] +pub mod std_mod { + use std::sync::mpsc; + + use crate::request::GenericMessage; + + use super::*; + + pub type ModeRequestHandlerMpsc = ModeRequestHandlerInterface< + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ModeRequestHandlerMpscBounded = ModeRequestHandlerInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + >; + + pub type ModeRequestorMpsc = ModeRequestorInterface< + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ModeRequestorBoundedMpsc = ModeRequestorInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + >; + + pub type ModeRequestorAndHandlerMpsc = ModeInterface< + mpsc::Sender>, + mpsc::Receiver>, + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ModeRequestorAndHandlerMpscBounded = ModeInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + mpsc::SyncSender>, + mpsc::Receiver>, + >; +} + +#[cfg(test)] +mod tests {} diff --git a/satrs/src/mode_tree.rs b/satrs/src/mode_tree.rs new file mode 100644 index 0000000..1cddd32 --- /dev/null +++ b/satrs/src/mode_tree.rs @@ -0,0 +1,37 @@ +use alloc::vec::Vec; +use hashbrown::HashMap; + +use crate::{ + mode::{Mode, ModeAndSubmode, Submode}, + ComponentId, +}; + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum TableEntryType { + /// Target table containing information of the expected children modes for given mode. + Target, + /// Sequence table which contains information about how to reach a target table, including + /// the order of the sequences. + Sequence, +} + +pub struct ModeTableEntry { + /// Name of respective table entry. + pub name: &'static str, + /// Target channel ID. + pub channel_id: ComponentId, + pub mode_submode: ModeAndSubmode, + pub allowed_submode_mask: Option, + pub check_success: bool, +} + +pub struct ModeTableMapValue { + /// Name for a given mode table entry. + pub name: &'static str, + pub entries: Vec, +} + +pub type ModeTable = HashMap; + +#[cfg(test)] +mod tests {} diff --git a/satrs/src/objects.rs b/satrs/src/objects.rs deleted file mode 100644 index a9b6881..0000000 --- a/satrs/src/objects.rs +++ /dev/null @@ -1,308 +0,0 @@ -//! # Module providing addressable object support and a manager for them -//! -//! Each addressable object can be identified using an [object ID][ObjectId]. -//! The [system object][ManagedSystemObject] trait also allows storing these objects into the -//! [object manager][ObjectManager]. They can then be retrieved and casted back to a known type -//! using the object ID. -//! -//! # Examples -//! -//! ```rust -//! use std::any::Any; -//! use std::error::Error; -//! use satrs::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject}; -//! -//! struct ExampleSysObj { -//! id: ObjectId, -//! dummy: u32, -//! was_initialized: bool, -//! } -//! -//! impl ExampleSysObj { -//! fn new(id: ObjectId, dummy: u32) -> ExampleSysObj { -//! ExampleSysObj { -//! id, -//! dummy, -//! was_initialized: false, -//! } -//! } -//! } -//! -//! impl SystemObject for ExampleSysObj { -//! type Error = (); -//! fn get_object_id(&self) -> &ObjectId { -//! &self.id -//! } -//! -//! fn initialize(&mut self) -> Result<(), Self::Error> { -//! self.was_initialized = true; -//! Ok(()) -//! } -//! } -//! -//! impl ManagedSystemObject for ExampleSysObj {} -//! -//! let mut obj_manager = ObjectManager::default(); -//! let obj_id = ObjectId { id: 0, name: "Example 0"}; -//! let example_obj = ExampleSysObj::new(obj_id, 42); -//! obj_manager.insert(Box::new(example_obj)); -//! let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&obj_id); -//! let example_obj = obj_back_casted.unwrap(); -//! assert_eq!(example_obj.id, obj_id); -//! assert_eq!(example_obj.dummy, 42); -//! ``` -#[cfg(feature = "alloc")] -use alloc::boxed::Box; -#[cfg(feature = "alloc")] -pub use alloc_mod::*; -#[cfg(feature = "alloc")] -use downcast_rs::Downcast; -#[cfg(feature = "alloc")] -use hashbrown::HashMap; -#[cfg(feature = "std")] -use std::error::Error; - -use crate::TargetId; - -#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)] -pub struct ObjectId { - pub id: TargetId, - pub name: &'static str, -} - -#[cfg(feature = "alloc")] -pub mod alloc_mod { - use super::*; - - /// Each object which is stored inside the [object manager][ObjectManager] needs to implemented - /// this trait - pub trait SystemObject: Downcast { - type Error; - fn get_object_id(&self) -> &ObjectId; - fn initialize(&mut self) -> Result<(), Self::Error>; - } - downcast_rs::impl_downcast!(SystemObject assoc Error); - - pub trait ManagedSystemObject: SystemObject + Send {} - downcast_rs::impl_downcast!(ManagedSystemObject assoc Error); - - /// Helper module to manage multiple [ManagedSystemObjects][ManagedSystemObject] by mapping them - /// using an [object ID][ObjectId] - #[cfg(feature = "alloc")] - pub struct ObjectManager { - obj_map: HashMap>>, - } - - #[cfg(feature = "alloc")] - impl Default for ObjectManager { - fn default() -> Self { - Self::new() - } - } - - #[cfg(feature = "alloc")] - impl ObjectManager { - pub fn new() -> Self { - ObjectManager { - obj_map: HashMap::new(), - } - } - pub fn insert(&mut self, sys_obj: Box>) -> bool { - let obj_id = sys_obj.get_object_id(); - if self.obj_map.contains_key(obj_id) { - return false; - } - self.obj_map.insert(*obj_id, sys_obj).is_none() - } - - /// Initializes all System Objects in the hash map and returns the number of successful - /// initializations - pub fn initialize(&mut self) -> Result> { - let mut init_success = 0; - for val in self.obj_map.values_mut() { - if val.initialize().is_ok() { - init_success += 1 - } - } - Ok(init_success) - } - - /// Retrieve a reference to an object stored inside the manager. The type to retrieve needs to - /// be explicitly passed as a generic parameter or specified on the left hand side of the - /// expression. - pub fn get_ref>(&self, key: &ObjectId) -> Option<&T> { - self.obj_map.get(key).and_then(|o| o.downcast_ref::()) - } - - /// Retrieve a mutable reference to an object stored inside the manager. The type to retrieve - /// needs to be explicitly passed as a generic parameter or specified on the left hand side - /// of the expression. - pub fn get_mut>( - &mut self, - key: &ObjectId, - ) -> Option<&mut T> { - self.obj_map - .get_mut(key) - .and_then(|o| o.downcast_mut::()) - } - } -} - -#[cfg(test)] -mod tests { - use crate::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject}; - use std::boxed::Box; - use std::string::String; - use std::sync::{Arc, Mutex}; - use std::thread; - - struct ExampleSysObj { - id: ObjectId, - dummy: u32, - was_initialized: bool, - } - - impl ExampleSysObj { - fn new(id: ObjectId, dummy: u32) -> ExampleSysObj { - ExampleSysObj { - id, - dummy, - was_initialized: false, - } - } - } - - impl SystemObject for ExampleSysObj { - type Error = (); - fn get_object_id(&self) -> &ObjectId { - &self.id - } - - fn initialize(&mut self) -> Result<(), Self::Error> { - self.was_initialized = true; - Ok(()) - } - } - - impl ManagedSystemObject for ExampleSysObj {} - - struct OtherExampleObject { - id: ObjectId, - string: String, - was_initialized: bool, - } - - impl SystemObject for OtherExampleObject { - type Error = (); - fn get_object_id(&self) -> &ObjectId { - &self.id - } - - fn initialize(&mut self) -> Result<(), Self::Error> { - self.was_initialized = true; - Ok(()) - } - } - - impl ManagedSystemObject for OtherExampleObject {} - - #[test] - fn test_obj_manager_simple() { - let mut obj_manager = ObjectManager::default(); - let expl_obj_id = ObjectId { - id: 0, - name: "Example 0", - }; - let example_obj = ExampleSysObj::new(expl_obj_id, 42); - assert!(obj_manager.insert(Box::new(example_obj))); - let res = obj_manager.initialize(); - assert!(res.is_ok()); - assert_eq!(res.unwrap(), 1); - let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&expl_obj_id); - assert!(obj_back_casted.is_some()); - let expl_obj_back_casted = obj_back_casted.unwrap(); - assert_eq!(expl_obj_back_casted.dummy, 42); - assert!(expl_obj_back_casted.was_initialized); - - let second_obj_id = ObjectId { - id: 12, - name: "Example 1", - }; - let second_example_obj = OtherExampleObject { - id: second_obj_id, - string: String::from("Hello Test"), - was_initialized: false, - }; - - assert!(obj_manager.insert(Box::new(second_example_obj))); - let res = obj_manager.initialize(); - assert!(res.is_ok()); - assert_eq!(res.unwrap(), 2); - let obj_back_casted: Option<&OtherExampleObject> = obj_manager.get_ref(&second_obj_id); - assert!(obj_back_casted.is_some()); - let expl_obj_back_casted = obj_back_casted.unwrap(); - assert_eq!(expl_obj_back_casted.string, String::from("Hello Test")); - assert!(expl_obj_back_casted.was_initialized); - - let existing_obj_id = ObjectId { - id: 12, - name: "Example 1", - }; - let invalid_obj = OtherExampleObject { - id: existing_obj_id, - string: String::from("Hello Test"), - was_initialized: false, - }; - - assert!(!obj_manager.insert(Box::new(invalid_obj))); - } - - #[test] - fn object_man_threaded() { - let obj_manager = Arc::new(Mutex::new(ObjectManager::new())); - let expl_obj_id = ObjectId { - id: 0, - name: "Example 0", - }; - let example_obj = ExampleSysObj::new(expl_obj_id, 42); - let second_obj_id = ObjectId { - id: 12, - name: "Example 1", - }; - let second_example_obj = OtherExampleObject { - id: second_obj_id, - string: String::from("Hello Test"), - was_initialized: false, - }; - - let mut obj_man_handle = obj_manager.lock().expect("Mutex lock failed"); - assert!(obj_man_handle.insert(Box::new(example_obj))); - assert!(obj_man_handle.insert(Box::new(second_example_obj))); - let res = obj_man_handle.initialize(); - std::mem::drop(obj_man_handle); - assert!(res.is_ok()); - assert_eq!(res.unwrap(), 2); - let obj_man_0 = obj_manager.clone(); - let jh0 = thread::spawn(move || { - let locked_man = obj_man_0.lock().expect("Mutex lock failed"); - let obj_back_casted: Option<&ExampleSysObj> = locked_man.get_ref(&expl_obj_id); - assert!(obj_back_casted.is_some()); - let expl_obj_back_casted = obj_back_casted.unwrap(); - assert_eq!(expl_obj_back_casted.dummy, 42); - assert!(expl_obj_back_casted.was_initialized); - std::mem::drop(locked_man) - }); - - let jh1 = thread::spawn(move || { - let locked_man = obj_manager.lock().expect("Mutex lock failed"); - let obj_back_casted: Option<&OtherExampleObject> = locked_man.get_ref(&second_obj_id); - assert!(obj_back_casted.is_some()); - let expl_obj_back_casted = obj_back_casted.unwrap(); - assert_eq!(expl_obj_back_casted.string, String::from("Hello Test")); - assert!(expl_obj_back_casted.was_initialized); - std::mem::drop(locked_man) - }); - jh0.join().expect("Joining thread 0 failed"); - jh1.join().expect("Joining thread 1 failed"); - } -} diff --git a/satrs/src/params.rs b/satrs/src/params.rs index 1279015..10fb41c 100644 --- a/satrs/src/params.rs +++ b/satrs/src/params.rs @@ -60,21 +60,28 @@ use alloc::vec::Vec; /// Generic trait which is used for objects which can be converted into a raw network (big) endian /// byte format. pub trait WritableToBeBytes { - fn raw_len(&self) -> usize; + fn written_len(&self) -> usize; /// Writes the object to a raw buffer in network endianness (big) fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result; + + #[cfg(feature = "alloc")] + fn to_vec(&self) -> Result, ByteConversionError> { + let mut vec = alloc::vec![0; self.written_len()]; + self.write_to_be_bytes(&mut vec)?; + Ok(vec) + } } macro_rules! param_to_be_bytes_impl { ($Newtype: ident) => { impl WritableToBeBytes for $Newtype { #[inline] - fn raw_len(&self) -> usize { + fn written_len(&self) -> usize { size_of::<::ByteArray>() } fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { - let raw_len = self.raw_len(); + let raw_len = WritableToBeBytes::written_len(self); if buf.len() < raw_len { return Err(ByteConversionError::ToSliceTooSmall { found: buf.len(), @@ -382,32 +389,32 @@ pub enum ParamsRaw { } impl WritableToBeBytes for ParamsRaw { - fn raw_len(&self) -> usize { + fn written_len(&self) -> usize { match self { - ParamsRaw::U8(v) => v.raw_len(), - ParamsRaw::U8Pair(v) => v.raw_len(), - ParamsRaw::U8Triplet(v) => v.raw_len(), - ParamsRaw::I8(v) => v.raw_len(), - ParamsRaw::I8Pair(v) => v.raw_len(), - ParamsRaw::I8Triplet(v) => v.raw_len(), - ParamsRaw::U16(v) => v.raw_len(), - ParamsRaw::U16Pair(v) => v.raw_len(), - ParamsRaw::U16Triplet(v) => v.raw_len(), - ParamsRaw::I16(v) => v.raw_len(), - ParamsRaw::I16Pair(v) => v.raw_len(), - ParamsRaw::I16Triplet(v) => v.raw_len(), - ParamsRaw::U32(v) => v.raw_len(), - ParamsRaw::U32Pair(v) => v.raw_len(), - ParamsRaw::U32Triplet(v) => v.raw_len(), - ParamsRaw::I32(v) => v.raw_len(), - ParamsRaw::I32Pair(v) => v.raw_len(), - ParamsRaw::I32Triplet(v) => v.raw_len(), - ParamsRaw::F32(v) => v.raw_len(), - ParamsRaw::F32Pair(v) => v.raw_len(), - ParamsRaw::F32Triplet(v) => v.raw_len(), - ParamsRaw::U64(v) => v.raw_len(), - ParamsRaw::I64(v) => v.raw_len(), - ParamsRaw::F64(v) => v.raw_len(), + ParamsRaw::U8(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U8Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U8Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I8(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I8Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I8Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U16(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U16Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U16Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I16(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I16Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I16Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U32(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U32Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U32Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I32(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I32Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I32Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::F32(v) => WritableToBeBytes::written_len(v), + ParamsRaw::F32Pair(v) => WritableToBeBytes::written_len(v), + ParamsRaw::F32Triplet(v) => WritableToBeBytes::written_len(v), + ParamsRaw::U64(v) => WritableToBeBytes::written_len(v), + ParamsRaw::I64(v) => WritableToBeBytes::written_len(v), + ParamsRaw::F64(v) => WritableToBeBytes::written_len(v), } } @@ -460,7 +467,7 @@ params_raw_from_newtype!( ); #[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum EcssEnumParams { +pub enum ParamsEcssEnum { U8(EcssEnumU8), U16(EcssEnumU16), U32(EcssEnumU32), @@ -468,40 +475,46 @@ pub enum EcssEnumParams { } macro_rules! writable_as_be_bytes_ecss_enum_impl { - ($EnumIdent: ident) => { + ($EnumIdent: ident, $Ty: ident) => { + impl From<$EnumIdent> for ParamsEcssEnum { + fn from(e: $EnumIdent) -> Self { + Self::$Ty(e) + } + } + impl WritableToBeBytes for $EnumIdent { - fn raw_len(&self) -> usize { + fn written_len(&self) -> usize { self.size() } fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { - ::write_to_be_bytes(self, buf).map(|_| self.raw_len()) + ::write_to_be_bytes(self, buf).map(|_| self.written_len()) } } }; } -writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8); -writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16); -writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32); -writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64); +writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8, U8); +writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16, U16); +writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32, U32); +writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64, U64); -impl WritableToBeBytes for EcssEnumParams { - fn raw_len(&self) -> usize { +impl WritableToBeBytes for ParamsEcssEnum { + fn written_len(&self) -> usize { match self { - EcssEnumParams::U8(e) => e.raw_len(), - EcssEnumParams::U16(e) => e.raw_len(), - EcssEnumParams::U32(e) => e.raw_len(), - EcssEnumParams::U64(e) => e.raw_len(), + ParamsEcssEnum::U8(e) => e.written_len(), + ParamsEcssEnum::U16(e) => e.written_len(), + ParamsEcssEnum::U32(e) => e.written_len(), + ParamsEcssEnum::U64(e) => e.written_len(), } } fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { match self { - EcssEnumParams::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf), - EcssEnumParams::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf), - EcssEnumParams::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf), - EcssEnumParams::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf), + ParamsEcssEnum::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf), + ParamsEcssEnum::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf), + ParamsEcssEnum::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf), + ParamsEcssEnum::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf), } } } @@ -510,7 +523,19 @@ impl WritableToBeBytes for EcssEnumParams { #[derive(Debug, Copy, Clone, PartialEq)] pub enum ParamsHeapless { Raw(ParamsRaw), - EcssEnum(EcssEnumParams), + EcssEnum(ParamsEcssEnum), +} + +impl From for ParamsHeapless { + fn from(v: ParamsRaw) -> Self { + Self::Raw(v) + } +} + +impl From for ParamsHeapless { + fn from(v: ParamsEcssEnum) -> Self { + Self::EcssEnum(v) + } } macro_rules! from_conversions_for_raw { @@ -559,16 +584,14 @@ from_conversions_for_raw!( /// Generic enumeration for additional parameters, including parameters which rely on heap /// allocations. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] #[non_exhaustive] pub enum Params { Heapless(ParamsHeapless), Store(StoreAddr), #[cfg(feature = "alloc")] - #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] Vec(Vec), #[cfg(feature = "alloc")] - #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] String(String), } @@ -584,8 +607,13 @@ impl From for Params { } } +impl From for Params { + fn from(x: ParamsRaw) -> Self { + Self::Heapless(ParamsHeapless::Raw(x)) + } +} + #[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] impl From> for Params { fn from(val: Vec) -> Self { Self::Vec(val) @@ -594,7 +622,6 @@ impl From> for Params { /// Converts a byte slice into the [Params::Vec] variant #[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] impl From<&[u8]> for Params { fn from(val: &[u8]) -> Self { Self::Vec(val.to_vec()) @@ -602,7 +629,6 @@ impl From<&[u8]> for Params { } #[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] impl From for Params { fn from(val: String) -> Self { Self::String(val) @@ -610,7 +636,6 @@ impl From for Params { } #[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] /// Converts a string slice into the [Params::String] variant impl From<&str> for Params { fn from(val: &str) -> Self { @@ -618,10 +643,90 @@ impl From<&str> for Params { } } +/// Please note while [WritableToBeBytes] is implemented for [Params], the default implementation +/// will not be able to process the [Params::Store] parameter variant. +impl WritableToBeBytes for Params { + fn written_len(&self) -> usize { + match self { + Params::Heapless(p) => match p { + ParamsHeapless::Raw(raw) => raw.written_len(), + ParamsHeapless::EcssEnum(enumeration) => enumeration.written_len(), + }, + Params::Store(_) => 0, + #[cfg(feature = "alloc")] + Params::Vec(vec) => vec.len(), + #[cfg(feature = "alloc")] + Params::String(string) => string.len(), + } + } + + fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result { + match self { + Params::Heapless(p) => match p { + ParamsHeapless::Raw(raw) => raw.write_to_be_bytes(buf), + ParamsHeapless::EcssEnum(enumeration) => enumeration.write_to_be_bytes(buf), + }, + Params::Store(_) => Ok(0), + #[cfg(feature = "alloc")] + Params::Vec(vec) => { + if buf.len() < vec.len() { + return Err(ByteConversionError::ToSliceTooSmall { + found: buf.len(), + expected: vec.len(), + }); + } + buf[0..vec.len()].copy_from_slice(vec); + Ok(vec.len()) + } + #[cfg(feature = "alloc")] + Params::String(string) => { + if buf.len() < string.len() { + return Err(ByteConversionError::ToSliceTooSmall { + found: buf.len(), + expected: string.len(), + }); + } + buf[0..string.len()].copy_from_slice(string.as_bytes()); + Ok(string.len()) + } + } + } +} + #[cfg(test)] mod tests { use super::*; + fn test_cloning_works(param_raw: &impl WritableToBeBytes) { + let _new_param = param_raw; + } + + fn test_writing_fails(param_raw: &(impl WritableToBeBytes + ToBeBytes)) { + let pair_size = WritableToBeBytes::written_len(param_raw); + assert_eq!(pair_size, ToBeBytes::written_len(param_raw)); + let mut vec = alloc::vec![0; pair_size - 1]; + let result = param_raw.write_to_be_bytes(&mut vec); + if let Err(ByteConversionError::ToSliceTooSmall { found, expected }) = result { + assert_eq!(found, pair_size - 1); + assert_eq!(expected, pair_size); + } else { + panic!("Expected ByteConversionError::ToSliceTooSmall"); + } + } + + fn test_writing(params_raw: &ParamsRaw, writeable: &impl WritableToBeBytes) { + assert_eq!(params_raw.written_len(), writeable.written_len()); + let mut vec = alloc::vec![0; writeable.written_len()]; + writeable + .write_to_be_bytes(&mut vec) + .expect("writing parameter to buffer failed"); + let mut other_vec = alloc::vec![0; writeable.written_len()]; + params_raw + .write_to_be_bytes(&mut other_vec) + .expect("writing parameter to buffer failed"); + assert_eq!(vec, other_vec); + } + #[test] fn test_basic_u32_pair() { let u32_pair = U32Pair(4, 8); @@ -632,10 +737,32 @@ mod tests { assert_eq!(u32_conv_back, 4); u32_conv_back = u32::from_be_bytes(raw[4..8].try_into().unwrap()); assert_eq!(u32_conv_back, 8); + test_writing_fails(&u32_pair); + test_cloning_works(&u32_pair); + let u32_praw = ParamsRaw::from(u32_pair); + test_writing(&u32_praw, &u32_pair); } #[test] - fn basic_signed_test_pair() { + fn test_u16_pair_writing_fails() { + let u16_pair = U16Pair(4, 8); + test_writing_fails(&u16_pair); + test_cloning_works(&u16_pair); + let u16_praw = ParamsRaw::from(u16_pair); + test_writing(&u16_praw, &u16_pair); + } + + #[test] + fn test_u8_pair_writing_fails() { + let u8_pair = U8Pair(4, 8); + test_writing_fails(&u8_pair); + test_cloning_works(&u8_pair); + let u8_praw = ParamsRaw::from(u8_pair); + test_writing(&u8_praw, &u8_pair); + } + + #[test] + fn basic_i8_test() { let i8_pair = I8Pair(-3, -16); assert_eq!(i8_pair.0, -3); assert_eq!(i8_pair.1, -16); @@ -644,10 +771,31 @@ mod tests { assert_eq!(i8_conv_back, -3); i8_conv_back = i8::from_be_bytes(raw[1..2].try_into().unwrap()); assert_eq!(i8_conv_back, -16); + test_writing_fails(&i8_pair); + test_cloning_works(&i8_pair); + let i8_praw = ParamsRaw::from(i8_pair); + test_writing(&i8_praw, &i8_pair); } #[test] - fn basic_signed_test_triplet() { + fn test_from_u32_triplet() { + let raw_params = U32Triplet::from((1, 2, 3)); + assert_eq!(raw_params.0, 1); + assert_eq!(raw_params.1, 2); + assert_eq!(raw_params.2, 3); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 12); + assert_eq!( + raw_params.to_be_bytes(), + [0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3] + ); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u32_triplet = ParamsRaw::from(raw_params); + test_writing(&u32_triplet, &raw_params); + } + + #[test] + fn test_i8_triplet() { let i8_triplet = I8Triplet(-3, -16, -126); assert_eq!(i8_triplet.0, -3); assert_eq!(i8_triplet.1, -16); @@ -659,6 +807,10 @@ mod tests { assert_eq!(i8_conv_back, -16); i8_conv_back = i8::from_be_bytes(raw[2..3].try_into().unwrap()); assert_eq!(i8_conv_back, -126); + test_writing_fails(&i8_triplet); + test_cloning_works(&i8_triplet); + let i8_praw = ParamsRaw::from(i8_triplet); + test_writing(&i8_praw, &i8_triplet); } #[test] @@ -681,4 +833,352 @@ mod tests { panic!("Params type is not a vector") } } + + #[test] + fn test_params_written_len_raw() { + let param_raw = ParamsRaw::from((500_u32, 1000_u32)); + let param: Params = Params::Heapless(param_raw.into()); + assert_eq!(param.written_len(), 8); + let mut buf: [u8; 8] = [0; 8]; + param + .write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500); + assert_eq!(u32::from_be_bytes(buf[4..8].try_into().unwrap()), 1000); + } + + #[test] + fn test_params_written_string() { + let string = "Test String".to_string(); + let param = Params::String(string.clone()); + assert_eq!(param.written_len(), string.len()); + let vec = param.to_vec().unwrap(); + let string_conv_back = String::from_utf8(vec).expect("conversion to string failed"); + assert_eq!(string_conv_back, string); + } + + #[test] + fn test_params_written_vec() { + let vec: Vec = alloc::vec![1, 2, 3, 4, 5]; + let param = Params::Vec(vec.clone()); + assert_eq!(param.written_len(), vec.len()); + assert_eq!(param.to_vec().expect("writing vec params failed"), vec); + } + + #[test] + fn test_u32_single() { + let raw_params = U32::from(20); + assert_eq!(raw_params.0, 20); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 4); + assert_eq!(raw_params.to_be_bytes(), [0, 0, 0, 20]); + let other = U32::from(20); + assert_eq!(raw_params, other); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u32_praw = ParamsRaw::from(raw_params); + test_writing(&u32_praw, &raw_params); + } + + #[test] + fn test_i8_single() { + let neg_number: i8 = -5_i8; + let raw_params = I8::from(neg_number); + assert_eq!(raw_params.0, neg_number); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 1); + assert_eq!(raw_params.to_be_bytes(), neg_number.to_be_bytes()); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u8_praw = ParamsRaw::from(raw_params); + test_writing(&u8_praw, &raw_params); + } + + #[test] + fn test_u8_single() { + let raw_params = U8::from(20); + assert_eq!(raw_params.0, 20); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 1); + assert_eq!(raw_params.to_be_bytes(), [20]); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u32_praw = ParamsRaw::from(raw_params); + test_writing(&u32_praw, &raw_params); + } + + #[test] + fn test_u16_single() { + let raw_params = U16::from(0x123); + assert_eq!(raw_params.0, 0x123); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 2); + assert_eq!(raw_params.to_be_bytes(), [0x01, 0x23]); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u16_praw = ParamsRaw::from(raw_params); + test_writing(&u16_praw, &raw_params); + } + + #[test] + fn test_u16_triplet() { + let raw_params = U16Triplet::from((1, 2, 3)); + assert_eq!(raw_params.0, 1); + assert_eq!(raw_params.1, 2); + assert_eq!(raw_params.2, 3); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 6); + assert_eq!(raw_params.to_be_bytes(), [0, 1, 0, 2, 0, 3]); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u16_praw = ParamsRaw::from(raw_params); + test_writing(&u16_praw, &raw_params); + } + + #[test] + fn test_u8_triplet() { + let raw_params = U8Triplet::from((1, 2, 3)); + assert_eq!(raw_params.0, 1); + assert_eq!(raw_params.1, 2); + assert_eq!(raw_params.2, 3); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 3); + assert_eq!(raw_params.to_be_bytes(), [1, 2, 3]); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let u8_praw = ParamsRaw::from(raw_params); + test_writing(&u8_praw, &raw_params); + } + + #[test] + fn test_i16_single() { + let value = -300_i16; + let raw_params = I16::from(value); + assert_eq!(raw_params.0, value); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 2); + assert_eq!(raw_params.to_be_bytes(), value.to_be_bytes()); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i16_praw = ParamsRaw::from(raw_params); + test_writing(&i16_praw, &raw_params); + } + + #[test] + fn test_i16_pair() { + let raw_params = I16Pair::from((-300, -400)); + assert_eq!(raw_params.0, -300); + assert_eq!(raw_params.1, -400); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 4); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i16_praw = ParamsRaw::from(raw_params); + test_writing(&i16_praw, &raw_params); + } + + #[test] + fn test_i16_triplet() { + let raw_params = I16Triplet::from((-300, -400, -350)); + assert_eq!(raw_params.0, -300); + assert_eq!(raw_params.1, -400); + assert_eq!(raw_params.2, -350); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 6); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i16_praw = ParamsRaw::from(raw_params); + test_writing(&i16_praw, &raw_params); + } + + #[test] + fn test_i32_single() { + let raw_params = I32::from(-80000); + assert_eq!(raw_params.0, -80000); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 4); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i32_praw = ParamsRaw::from(raw_params); + test_writing(&i32_praw, &raw_params); + } + + #[test] + fn test_i32_pair() { + let raw_params = I32Pair::from((-80000, -200)); + assert_eq!(raw_params.0, -80000); + assert_eq!(raw_params.1, -200); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 8); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i32_praw = ParamsRaw::from(raw_params); + test_writing(&i32_praw, &raw_params); + } + + #[test] + fn test_i32_triplet() { + let raw_params = I32Triplet::from((-80000, -5, -200)); + assert_eq!(raw_params.0, -80000); + assert_eq!(raw_params.1, -5); + assert_eq!(raw_params.2, -200); + assert_eq!(WritableToBeBytes::written_len(&raw_params), 12); + test_writing_fails(&raw_params); + test_cloning_works(&raw_params); + let i32_praw = ParamsRaw::from(raw_params); + test_writing(&i32_praw, &raw_params); + } + + #[test] + fn test_f32_single() { + let param = F32::from(0.1); + assert_eq!(param.0, 0.1); + assert_eq!(WritableToBeBytes::written_len(¶m), 4); + let f32_pair_raw = param.to_be_bytes(); + let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap()); + assert_eq!(f32_0, 0.1); + test_writing_fails(¶m); + test_cloning_works(¶m); + let praw = ParamsRaw::from(param); + test_writing(&praw, ¶m); + let p_try_from = F32::try_from(param.to_be_bytes().as_ref()).expect("try_from failed"); + assert_eq!(p_try_from, param); + } + + #[test] + fn test_f32_pair() { + let param = F32Pair::from((0.1, 0.2)); + assert_eq!(param.0, 0.1); + assert_eq!(param.1, 0.2); + assert_eq!(WritableToBeBytes::written_len(¶m), 8); + let f32_pair_raw = param.to_be_bytes(); + let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap()); + assert_eq!(f32_0, 0.1); + let f32_1 = f32::from_be_bytes(f32_pair_raw[4..8].try_into().unwrap()); + assert_eq!(f32_1, 0.2); + let other_pair = F32Pair::from((0.1, 0.2)); + assert_eq!(param, other_pair); + test_writing_fails(¶m); + test_cloning_works(¶m); + let praw = ParamsRaw::from(param); + test_writing(&praw, ¶m); + let p_try_from = F32Pair::try_from(param.to_be_bytes().as_ref()).expect("try_from failed"); + assert_eq!(p_try_from, param); + } + + #[test] + fn test_f32_triplet() { + let f32 = F32Triplet::from((0.1, -0.1, -5.2)); + assert_eq!(f32.0, 0.1); + assert_eq!(f32.1, -0.1); + assert_eq!(f32.2, -5.2); + assert_eq!(WritableToBeBytes::written_len(&f32), 12); + let f32_pair_raw = f32.to_be_bytes(); + let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap()); + assert_eq!(f32_0, 0.1); + let f32_1 = f32::from_be_bytes(f32_pair_raw[4..8].try_into().unwrap()); + assert_eq!(f32_1, -0.1); + let f32_2 = f32::from_be_bytes(f32_pair_raw[8..12].try_into().unwrap()); + assert_eq!(f32_2, -5.2); + test_writing_fails(&f32); + test_cloning_works(&f32); + let f32_praw = ParamsRaw::from(f32); + test_writing(&f32_praw, &f32); + let f32_try_from = + F32Triplet::try_from(f32.to_be_bytes().as_ref()).expect("try_from failed"); + assert_eq!(f32_try_from, f32); + } + + #[test] + fn test_u64_single() { + let u64 = U64::from(0x1010101010); + assert_eq!(u64.0, 0x1010101010); + assert_eq!(WritableToBeBytes::written_len(&u64), 8); + test_writing_fails(&u64); + test_cloning_works(&u64); + let praw = ParamsRaw::from(u64); + test_writing(&praw, &u64); + } + + #[test] + fn test_i64_single() { + let i64 = I64::from(-0xfffffffff); + assert_eq!(i64.0, -0xfffffffff); + assert_eq!(WritableToBeBytes::written_len(&i64), 8); + test_writing_fails(&i64); + test_cloning_works(&i64); + let praw = ParamsRaw::from(i64); + test_writing(&praw, &i64); + } + + #[test] + fn test_f64_single() { + let value = 823_823_812_832.232_3; + let f64 = F64::from(value); + assert_eq!(f64.0, value); + assert_eq!(WritableToBeBytes::written_len(&f64), 8); + test_writing_fails(&f64); + test_cloning_works(&f64); + let praw = ParamsRaw::from(f64); + test_writing(&praw, &f64); + } + + #[test] + fn test_f64_triplet() { + let f64_triplet = F64Triplet::from((0.1, 0.2, 0.3)); + assert_eq!(f64_triplet.0, 0.1); + assert_eq!(f64_triplet.1, 0.2); + assert_eq!(f64_triplet.2, 0.3); + assert_eq!(WritableToBeBytes::written_len(&f64_triplet), 24); + let f64_triplet_raw = f64_triplet.to_be_bytes(); + let f64_0 = f64::from_be_bytes(f64_triplet_raw[0..8].try_into().unwrap()); + assert_eq!(f64_0, 0.1); + let f64_1 = f64::from_be_bytes(f64_triplet_raw[8..16].try_into().unwrap()); + assert_eq!(f64_1, 0.2); + let f64_2 = f64::from_be_bytes(f64_triplet_raw[16..24].try_into().unwrap()); + assert_eq!(f64_2, 0.3); + test_writing_fails(&f64_triplet); + test_cloning_works(&f64_triplet); + } + + #[test] + fn test_u8_ecss_enum() { + let value = 200; + let u8p = EcssEnumU8::new(value); + test_cloning_works(&u8p); + let praw = ParamsEcssEnum::from(u8p); + assert_eq!(praw.written_len(), 1); + let mut buf = [0; 1]; + praw.write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + buf[0] = 200; + } + + #[test] + fn test_u16_ecss_enum() { + let value = 60000; + let u16p = EcssEnumU16::new(value); + test_cloning_works(&u16p); + let praw = ParamsEcssEnum::from(u16p); + assert_eq!(praw.written_len(), 2); + let mut buf = [0; 2]; + praw.write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + assert_eq!(u16::from_be_bytes(buf), value); + } + + #[test] + fn test_u32_ecss_enum() { + let value = 70000; + let u32p = EcssEnumU32::new(value); + test_cloning_works(&u32p); + let praw = ParamsEcssEnum::from(u32p); + assert_eq!(praw.written_len(), 4); + let mut buf = [0; 4]; + praw.write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + assert_eq!(u32::from_be_bytes(buf), value); + } + + #[test] + fn test_u64_ecss_enum() { + let value = 0xffffffffff; + let u64p = EcssEnumU64::new(value); + test_cloning_works(&u64p); + let praw = ParamsEcssEnum::from(u64p); + assert_eq!(praw.written_len(), 8); + let mut buf = [0; 8]; + praw.write_to_be_bytes(&mut buf) + .expect("writing to buffer failed"); + assert_eq!(u64::from_be_bytes(buf), value); + } } diff --git a/satrs/src/pool.rs b/satrs/src/pool.rs index d17f565..1c3b8a4 100644 --- a/satrs/src/pool.rs +++ b/satrs/src/pool.rs @@ -72,7 +72,6 @@ //! } //! ``` #[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] pub use alloc_mod::*; use core::fmt::{Display, Formatter}; use delegate::delegate; diff --git a/satrs/src/pus/action.rs b/satrs/src/pus/action.rs index 2ee4815..875621f 100644 --- a/satrs/src/pus/action.rs +++ b/satrs/src/pus/action.rs @@ -1,6 +1,10 @@ -use crate::{action::ActionRequest, TargetId}; +use crate::{ + action::{ActionId, ActionRequest}, + params::Params, + request::{GenericMessage, MessageMetadata, RequestId}, +}; -use super::verification::{TcStateAccepted, VerificationToken}; +use satrs_shared::res_code::ResultU16; #[cfg(feature = "std")] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] @@ -8,219 +12,278 @@ pub use std_mod::*; #[cfg(feature = "alloc")] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +#[allow(unused_imports)] pub use alloc_mod::*; -/// This trait is an abstraction for the routing of PUS service 8 action requests to a dedicated -/// recipient using the generic [TargetId]. -pub trait PusActionRequestRouter { - type Error; - fn route( - &self, - target_id: TargetId, - hk_request: ActionRequest, - token: VerificationToken, - ) -> Result<(), Self::Error>; +#[derive(Clone, Debug)] +pub struct ActionRequestWithId { + pub request_id: RequestId, + pub request: ActionRequest, +} + +/// A reply to an action request, but tailored to the PUS standard verification process. +#[non_exhaustive] +#[derive(Clone, PartialEq, Debug)] +pub enum ActionReplyVariant { + Completed, + StepSuccess { + step: u16, + }, + CompletionFailed { + error_code: ResultU16, + params: Option, + }, + StepFailed { + error_code: ResultU16, + step: u16, + params: Option, + }, +} + +#[derive(Debug, PartialEq, Clone)] +pub struct PusActionReply { + pub action_id: ActionId, + pub variant: ActionReplyVariant, +} + +impl PusActionReply { + pub fn new(action_id: ActionId, variant: ActionReplyVariant) -> Self { + Self { action_id, variant } + } +} + +pub type GenericActionReplyPus = GenericMessage; + +impl GenericActionReplyPus { + pub fn new_action_reply( + requestor_info: MessageMetadata, + action_id: ActionId, + reply: ActionReplyVariant, + ) -> Self { + Self::new(requestor_info, PusActionReply::new(action_id, reply)) + } } #[cfg(feature = "alloc")] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] pub mod alloc_mod { - use spacepackets::ecss::tc::PusTcReader; + use crate::{ + action::ActionRequest, + queue::GenericTargetedMessagingError, + request::{ + GenericMessage, MessageReceiver, MessageSender, MessageSenderAndReceiver, RequestId, + }, + ComponentId, + }; - use crate::pus::verification::VerificationReportingProvider; + use super::PusActionReply; - use super::*; + /// Helper type definition for a mode handler which can handle mode requests. + pub type ActionRequestHandlerInterface = + MessageSenderAndReceiver; - /// This trait is an abstraction for the conversion of a PUS service 8 action telecommand into - /// an [ActionRequest]. - /// - /// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard. - /// The only requirement is that a valid [TargetId] and an [ActionRequest] are returned by the - /// core conversion function. - /// - /// The user should take care of performing the error handling as well. Some of the following - /// aspects might be relevant: - /// - /// - Checking the validity of the APID, service ID, subservice ID. - /// - Checking the validity of the user data. - /// - /// A [VerificationReportingProvider] instance is passed to the user to also allow handling - /// of the verification process as part of the PUS standard requirements. - pub trait PusActionToRequestConverter { - type Error; - fn convert( - &mut self, - token: VerificationToken, - tc: &PusTcReader, - time_stamp: &[u8], - verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, ActionRequest), Self::Error>; + impl, R: MessageReceiver> + ActionRequestHandlerInterface + { + pub fn try_recv_action_request( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } + + pub fn send_action_reply( + &self, + request_id: RequestId, + target_id: ComponentId, + reply: PusActionReply, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(request_id, target_id, reply) + } + } + + /// Helper type defintion for a mode handler object which can send mode requests and receive + /// mode replies. + pub type ActionRequestorInterface = + MessageSenderAndReceiver; + + impl, R: MessageReceiver> + ActionRequestorInterface + { + pub fn try_recv_action_reply( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.try_recv_message() + } + + pub fn send_action_request( + &self, + request_id: RequestId, + target_id: ComponentId, + request: ActionRequest, + ) -> Result<(), GenericTargetedMessagingError> { + self.send_message(request_id, target_id, request) + } } } #[cfg(feature = "std")] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] pub mod std_mod { - use crate::pus::{ - get_current_cds_short_timestamp, verification::VerificationReportingProvider, - EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericRoutingError, - PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceHelper, + use std::sync::mpsc; + + use crate::{ + pus::{ + verification::{self, TcStateToken}, + ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, + }, + ComponentId, }; use super::*; - /// This is a high-level handler for the PUS service 8 action service. - /// - /// It performs the following handling steps: - /// - /// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter] - /// allows to configure the used telecommand memory backend. - /// 2. Convert the TC to a targeted action request using the provided - /// [PusActionToRequestConverter]. The generic error type is constrained to the - /// [PusPacketHandlingError] for the concrete implementation which offers a packet handler. - /// 3. Route the action request using the provided [PusActionRequestRouter]. - /// 4. Handle all routing errors using the provided [PusRoutingErrorHandler]. - pub struct PusService8ActionHandler< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - RequestConverter: PusActionToRequestConverter, - RequestRouter: PusActionRequestRouter, - RoutingErrorHandler: PusRoutingErrorHandler, - RoutingError = GenericRoutingError, - > { - service_helper: - PusServiceHelper, - pub request_converter: RequestConverter, - pub request_router: RequestRouter, - pub routing_error_handler: RoutingErrorHandler, + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct ActivePusActionRequestStd { + pub action_id: ActionId, + common: ActivePusRequestStd, } - impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - RequestConverter: PusActionToRequestConverter, - RequestRouter: PusActionRequestRouter, - RoutingErrorHandler: PusRoutingErrorHandler, - RoutingError: Clone, - > - PusService8ActionHandler< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - RequestConverter, - RequestRouter, - RoutingErrorHandler, - RoutingError, - > - where - PusPacketHandlingError: From, - { + impl ActiveRequestProvider for ActivePusActionRequestStd { + delegate::delegate! { + to self.common { + fn target_id(&self) -> ComponentId; + fn token(&self) -> verification::TcStateToken; + fn set_token(&mut self, token: verification::TcStateToken); + fn has_timed_out(&self) -> bool; + fn timeout(&self) -> core::time::Duration; + } + } + } + + impl ActivePusActionRequestStd { + pub fn new_from_common_req(action_id: ActionId, common: ActivePusRequestStd) -> Self { + Self { action_id, common } + } + pub fn new( - service_helper: PusServiceHelper< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - >, - request_converter: RequestConverter, - request_router: RequestRouter, - routing_error_handler: RoutingErrorHandler, + action_id: ActionId, + target_id: ComponentId, + token: TcStateToken, + timeout: core::time::Duration, ) -> Self { Self { - service_helper, - request_converter, - request_router, - routing_error_handler, + action_id, + common: ActivePusRequestStd::new(target_id, token, timeout), } } - - /// Core function to poll the next TC packet and try to handle it. - pub fn handle_one_tc(&mut self) -> Result { - let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; - if possible_packet.is_none() { - return Ok(PusPacketHandlerResult::Empty); - } - let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; - let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); - let (target_id, action_request) = self.request_converter.convert( - ecss_tc_and_token.token, - &tc, - &time_stamp, - &self.service_helper.common.verification_handler, - )?; - if let Err(e) = - self.request_router - .route(target_id, action_request, ecss_tc_and_token.token) - { - self.routing_error_handler.handle_error( - target_id, - ecss_tc_and_token.token, - &tc, - e.clone(), - &time_stamp, - &self.service_helper.common.verification_handler, - ); - return Err(e.into()); - } - Ok(PusPacketHandlerResult::RequestHandled) - } } + pub type DefaultActiveActionRequestMap = DefaultActiveRequestMap; + + pub type ActionRequestHandlerMpsc = ActionRequestHandlerInterface< + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ActionRequestHandlerMpscBounded = ActionRequestHandlerInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + >; + + pub type ActionRequestorMpsc = ActionRequestorInterface< + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ActionRequestorBoundedMpsc = ActionRequestorInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + >; + + /* + pub type ModeRequestorAndHandlerMpsc = ModeInterface< + mpsc::Sender>, + mpsc::Receiver>, + mpsc::Sender>, + mpsc::Receiver>, + >; + pub type ModeRequestorAndHandlerMpscBounded = ModeInterface< + mpsc::SyncSender>, + mpsc::Receiver>, + mpsc::SyncSender>, + mpsc::Receiver>, + >; + */ } #[cfg(test)] mod tests { + /* + use core::{cell::RefCell, time::Duration}; + use std::{sync::mpsc, time::SystemTimeError}; + + use alloc::{collections::VecDeque, vec::Vec}; use delegate::delegate; use spacepackets::{ ecss::{ - tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}, + tc::{PusTcCreator, PusTcReader}, tm::PusTmReader, PusPacket, }, - CcsdsPacket, SequenceFlags, SpHeader, + time::{cds, TimeWriter}, + CcsdsPacket, }; - use crate::pus::{ - tests::{ - PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler, TestConverter, - TestRouter, TestRoutingErrorHandler, APP_DATA_TOO_SHORT, TEST_APID, + use crate::{ + action::ActionRequestVariant, + params::{self, ParamsRaw, WritableToBeBytes}, + pus::{ + tests::{ + PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler, + TestConverter, TestRouter, APP_DATA_TOO_SHORT, + }, + verification::{ + self, + tests::{SharedVerificationMap, TestVerificationReporter, VerificationStatus}, + FailParams, TcStateAccepted, TcStateNone, TcStateStarted, + VerificationReportingProvider, + }, + EcssTcInMemConverter, EcssTcInVecConverter, EcssTmtcError, GenericRoutingError, + MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError, PusRequestRouter, + PusServiceHelper, PusTcToRequestConverter, TmAsVecSenderWithMpsc, }, - verification::{ - tests::TestVerificationReporter, FailParams, RequestId, VerificationReportingProvider, - }, - EcssTcInVecConverter, GenericRoutingError, MpscTcReceiver, PusPacketHandlerResult, - PusPacketHandlingError, TmAsVecSenderWithMpsc, }; use super::*; - impl PusActionRequestRouter for TestRouter { + impl PusRequestRouter for TestRouter { type Error = GenericRoutingError; fn route( &self, target_id: TargetId, - hk_request: ActionRequest, + request: Request, _token: VerificationToken, ) -> Result<(), Self::Error> { self.routing_requests .borrow_mut() - .push_back((target_id, hk_request)); + .push_back((target_id, request)); self.check_for_injected_error() } + + fn handle_error( + &self, + target_id: TargetId, + token: VerificationToken, + tc: &PusTcReader, + error: Self::Error, + time_stamp: &[u8], + verif_reporter: &impl VerificationReportingProvider, + ) { + self.routing_errors + .borrow_mut() + .push_back((target_id, error)); + } } - impl PusActionToRequestConverter for TestConverter<8> { + impl PusTcToRequestConverter for TestConverter<8> { type Error = PusPacketHandlingError; fn convert( &mut self, @@ -254,9 +317,9 @@ mod tests { .expect("start success failure"); return Ok(( target_id.into(), - ActionRequest::UnsignedIdAndVecData { + ActionRequest { action_id: u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()), - data: tc.user_data()[4..].to_vec(), + variant: ActionRequestVariant::VecData(tc.user_data()[4..].to_vec()), }, )); } @@ -266,31 +329,32 @@ mod tests { } } - struct Pus8HandlerWithVecTester { - common: PusServiceHandlerWithVecCommon, - handler: PusService8ActionHandler< + pub struct PusDynRequestHandler { + srv_helper: PusServiceHelper< MpscTcReceiver, TmAsVecSenderWithMpsc, EcssTcInVecConverter, TestVerificationReporter, - TestConverter<8>, - TestRouter, - TestRoutingErrorHandler, >, + request_converter: TestConverter, + request_router: TestRouter, } - impl Pus8HandlerWithVecTester { + struct Pus8RequestTestbenchWithVec { + common: PusServiceHandlerWithVecCommon, + handler: PusDynRequestHandler<8, ActionRequest>, + } + + impl Pus8RequestTestbenchWithVec { pub fn new() -> Self { - let (common, srv_handler) = - PusServiceHandlerWithVecCommon::new_with_test_verif_sender(); + let (common, srv_helper) = PusServiceHandlerWithVecCommon::new_with_test_verif_sender(); Self { common, - handler: PusService8ActionHandler::new( - srv_handler, - TestConverter::default(), - TestRouter::default(), - TestRoutingErrorHandler::default(), - ), + handler: PusDynRequestHandler { + srv_helper, + request_converter: TestConverter::default(), + request_router: TestRouter::default(), + }, } } @@ -305,13 +369,13 @@ mod tests { } } delegate! { - to self.handler.routing_error_handler { - pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError); + to self.handler.request_router { + pub fn retrieve_next_routing_error(&mut self) -> (TargetId, GenericRoutingError); } } } - impl PusTestHarness for Pus8HandlerWithVecTester { + impl PusTestHarness for Pus8RequestTestbenchWithVec { delegate! { to self.common { fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; @@ -320,78 +384,421 @@ mod tests { fn check_next_verification_tm( &self, subservice: u8, - expected_request_id: RequestId, + expected_request_id: verification::RequestId, ); } } } - impl SimplePusPacketHandler for Pus8HandlerWithVecTester { + impl SimplePusPacketHandler for Pus8RequestTestbenchWithVec { + fn handle_one_tc(&mut self) -> Result { + let possible_packet = self.handler.srv_helper.retrieve_and_accept_next_packet()?; + if possible_packet.is_none() { + return Ok(PusPacketHandlerResult::Empty); + } + let ecss_tc_and_token = possible_packet.unwrap(); + let tc = self + .handler + .srv_helper + .tc_in_mem_converter + .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; + let time_stamp = cds::TimeProvider::from_now_with_u16_days() + .expect("timestamp generation failed") + .to_vec() + .unwrap(); + let (target_id, action_request) = self.handler.request_converter.convert( + ecss_tc_and_token.token, + &tc, + &time_stamp, + &self.handler.srv_helper.common.verification_handler, + )?; + if let Err(e) = self.handler.request_router.route( + target_id, + action_request, + ecss_tc_and_token.token, + ) { + self.handler.request_router.handle_error( + target_id, + ecss_tc_and_token.token, + &tc, + e.clone(), + &time_stamp, + &self.handler.srv_helper.common.verification_handler, + ); + return Err(e.into()); + } + Ok(PusPacketHandlerResult::RequestHandled) + } + } + + const TIMEOUT_ERROR_CODE: ResultU16 = ResultU16::new(1, 2); + const COMPLETION_ERROR_CODE: ResultU16 = ResultU16::new(2, 0); + const COMPLETION_ERROR_CODE_STEP: ResultU16 = ResultU16::new(2, 1); + + #[derive(Default)] + pub struct TestReplyHandlerHook { + pub unexpected_replies: VecDeque, + pub timeouts: RefCell>, + } + + impl ReplyHandlerHook for TestReplyHandlerHook { + fn handle_unexpected_reply(&mut self, reply: &GenericActionReplyPus) { + self.unexpected_replies.push_back(reply.clone()); + } + + fn timeout_callback(&self, active_request: &ActivePusActionRequest) { + self.timeouts.borrow_mut().push_back(active_request.clone()); + } + + fn timeout_error_code(&self) -> ResultU16 { + TIMEOUT_ERROR_CODE + } + } + + pub struct Pus8ReplyTestbench { + verif_reporter: TestVerificationReporter, + #[allow(dead_code)] + ecss_tm_receiver: mpsc::Receiver>, + handler: PusService8ReplyHandler< + TestVerificationReporter, + DefaultActiveActionRequestMap, + TestReplyHandlerHook, + mpsc::Sender>, + >, + } + + impl Pus8ReplyTestbench { + pub fn new(normal_ctor: bool) -> Self { + let reply_handler_hook = TestReplyHandlerHook::default(); + let shared_verif_map = SharedVerificationMap::default(); + let test_verif_reporter = TestVerificationReporter::new(shared_verif_map.clone()); + let (ecss_tm_sender, ecss_tm_receiver) = mpsc::channel(); + let reply_handler = if normal_ctor { + PusService8ReplyHandler::new_from_now_with_default_map( + test_verif_reporter.clone(), + 128, + reply_handler_hook, + ecss_tm_sender, + ) + .expect("creating reply handler failed") + } else { + PusService8ReplyHandler::new_from_now( + test_verif_reporter.clone(), + DefaultActiveActionRequestMap::default(), + 128, + reply_handler_hook, + ecss_tm_sender, + ) + .expect("creating reply handler failed") + }; + Self { + verif_reporter: test_verif_reporter, + ecss_tm_receiver, + handler: reply_handler, + } + } + + pub fn init_handling_for_request( + &mut self, + request_id: RequestId, + _action_id: ActionId, + ) -> VerificationToken { + assert!(!self.handler.request_active(request_id)); + // let action_req = ActionRequest::new(action_id, ActionRequestVariant::NoData); + let token = self.add_tc_with_req_id(request_id.into()); + let token = self + .verif_reporter + .acceptance_success(token, &[]) + .expect("acceptance success failure"); + let token = self + .verif_reporter + .start_success(token, &[]) + .expect("start success failure"); + let verif_info = self + .verif_reporter + .verification_info(&verification::RequestId::from(request_id)) + .expect("no verification info found"); + assert!(verif_info.started.expect("request was not started")); + assert!(verif_info.accepted.expect("request was not accepted")); + token + } + + pub fn next_unrequested_reply(&self) -> Option { + self.handler.user_hook.unexpected_replies.front().cloned() + } + + pub fn assert_request_completion_success(&self, step: Option, request_id: RequestId) { + let verif_info = self + .verif_reporter + .verification_info(&verification::RequestId::from(request_id)) + .expect("no verification info found"); + self.assert_request_completion_common(request_id, &verif_info, step, true); + } + + pub fn assert_request_completion_failure( + &self, + step: Option, + request_id: RequestId, + fail_enum: ResultU16, + fail_data: &[u8], + ) { + let verif_info = self + .verif_reporter + .verification_info(&verification::RequestId::from(request_id)) + .expect("no verification info found"); + self.assert_request_completion_common(request_id, &verif_info, step, false); + assert_eq!(verif_info.fail_enum.unwrap(), fail_enum.raw() as u64); + assert_eq!(verif_info.failure_data.unwrap(), fail_data); + } + + pub fn assert_request_completion_common( + &self, + request_id: RequestId, + verif_info: &VerificationStatus, + step: Option, + completion_success: bool, + ) { + if let Some(step) = step { + assert!(verif_info.step_status.is_some()); + assert!(verif_info.step_status.unwrap()); + assert_eq!(step, verif_info.step); + } + assert_eq!( + verif_info.completed.expect("request is not completed"), + completion_success + ); + assert!(!self.handler.request_active(request_id)); + } + + pub fn assert_request_step_failure(&self, step: u16, request_id: RequestId) { + let verif_info = self + .verif_reporter + .verification_info(&verification::RequestId::from(request_id)) + .expect("no verification info found"); + assert!(verif_info.step_status.is_some()); + assert!(!verif_info.step_status.unwrap()); + assert_eq!(step, verif_info.step); + } + pub fn add_routed_request( + &mut self, + request_id: verification::RequestId, + target_id: TargetId, + action_id: ActionId, + token: VerificationToken, + timeout: Duration, + ) { + if self.handler.request_active(request_id.into()) { + panic!("request already present"); + } + self.handler + .add_routed_action_request(request_id, target_id, action_id, token, timeout); + if !self.handler.request_active(request_id.into()) { + panic!("request should be active now"); + } + } + delegate! { to self.handler { - fn handle_one_tc(&mut self) -> Result; + pub fn request_active(&self, request_id: RequestId) -> bool; + + pub fn handle_action_reply( + &mut self, + action_reply_with_ids: GenericMessage, + time_stamp: &[u8] + ) -> Result<(), EcssTmtcError>; + + pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError>; + + pub fn check_for_timeouts(&mut self, time_stamp: &[u8]) -> Result<(), EcssTmtcError>; + } + to self.verif_reporter { + fn add_tc_with_req_id(&mut self, req_id: verification::RequestId) -> VerificationToken; } } } #[test] - fn basic_test() { - let mut action_handler = Pus8HandlerWithVecTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTcSecondaryHeader::new_simple(8, 1); - let action_id: u32 = 1; - let action_id_raw = action_id.to_be_bytes(); - let tc = PusTcCreator::new(&mut sp_header, sec_header, action_id_raw.as_ref(), true); - action_handler.send_tc(&tc); - let result = action_handler.handle_one_tc(); - assert!(result.is_ok()); - action_handler.check_next_conversion(&tc); - let (target_id, action_req) = action_handler.retrieve_next_request(); - assert_eq!(target_id, TEST_APID.into()); - if let ActionRequest::UnsignedIdAndVecData { action_id, data } = action_req { - assert_eq!(action_id, 1); - assert_eq!(data, &[]); - } + fn test_reply_handler_completion_success() { + let mut reply_testbench = Pus8ReplyTestbench::new(true); + let sender_id = 0x06; + let request_id = 0x02; + let target_id = 0x05; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + assert!(reply_testbench.request_active(request_id)); + let action_reply = GenericMessage::new( + request_id, + sender_id, + ActionReplyPusWithActionId { + action_id, + variant: ActionReplyPus::Completed, + }, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + reply_testbench.assert_request_completion_success(None, request_id); } #[test] - fn test_routing_error() { - let mut action_handler = Pus8HandlerWithVecTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTcSecondaryHeader::new_simple(8, 1); - let action_id: u32 = 1; - let action_id_raw = action_id.to_be_bytes(); - let tc = PusTcCreator::new(&mut sp_header, sec_header, action_id_raw.as_ref(), true); - let error = GenericRoutingError::UnknownTargetId(25); - action_handler - .handler - .request_router - .inject_routing_error(error); - action_handler.send_tc(&tc); - let result = action_handler.handle_one_tc(); - assert!(result.is_err()); - let check_error = |routing_error: GenericRoutingError| { - if let GenericRoutingError::UnknownTargetId(id) = routing_error { - assert_eq!(id, 25); - } else { - panic!("unexpected error type"); - } - }; - if let PusPacketHandlingError::RequestRoutingError(routing_error) = result.unwrap_err() { - check_error(routing_error); - } else { - panic!("unexpected error type"); - } - - action_handler.check_next_conversion(&tc); - let (target_id, action_req) = action_handler.retrieve_next_request(); - assert_eq!(target_id, TEST_APID.into()); - if let ActionRequest::UnsignedIdAndVecData { action_id, data } = action_req { - assert_eq!(action_id, 1); - assert_eq!(data, &[]); - } - - let (target_id, found_error) = action_handler.retrieve_next_error(); - assert_eq!(target_id, TEST_APID.into()); - check_error(found_error); + fn test_reply_handler_step_success() { + let mut reply_testbench = Pus8ReplyTestbench::new(false); + let request_id = 0x02; + let target_id = 0x05; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + action_id, + action_id, + ActionReplyPus::StepSuccess { step: 1 }, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + action_id, + action_id, + ActionReplyPus::Completed, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + reply_testbench.assert_request_completion_success(Some(1), request_id); } + + #[test] + fn test_reply_handler_completion_failure() { + let mut reply_testbench = Pus8ReplyTestbench::new(true); + let sender_id = 0x01; + let request_id = 0x02; + let target_id = 0x05; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + let params_raw = ParamsRaw::U32(params::U32(5)); + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + sender_id, + action_id, + ActionReplyPus::CompletionFailed { + error_code: COMPLETION_ERROR_CODE, + params: params_raw.into(), + }, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + reply_testbench.assert_request_completion_failure( + None, + request_id, + COMPLETION_ERROR_CODE, + ¶ms_raw.to_vec().unwrap(), + ); + } + + #[test] + fn test_reply_handler_step_failure() { + let mut reply_testbench = Pus8ReplyTestbench::new(false); + let sender_id = 0x01; + let request_id = 0x02; + let target_id = 0x05; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + sender_id, + action_id, + ActionReplyPus::StepFailed { + error_code: COMPLETION_ERROR_CODE_STEP, + step: 2, + params: ParamsRaw::U32(crate::params::U32(5)).into(), + }, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + reply_testbench.assert_request_step_failure(2, request_id); + } + + #[test] + fn test_reply_handler_timeout_handling() { + let mut reply_testbench = Pus8ReplyTestbench::new(true); + let request_id = 0x02; + let target_id = 0x06; + let action_id = 0x03; + let token = reply_testbench.init_handling_for_request(request_id, action_id); + reply_testbench.add_routed_request( + request_id.into(), + target_id, + action_id, + token, + Duration::from_millis(1), + ); + let timeout_param = Duration::from_millis(1).as_millis() as u64; + let timeout_param_raw = timeout_param.to_be_bytes(); + std::thread::sleep(Duration::from_millis(2)); + reply_testbench + .update_time_from_now() + .expect("time update failure"); + reply_testbench.check_for_timeouts(&[]).unwrap(); + reply_testbench.assert_request_completion_failure( + None, + request_id, + TIMEOUT_ERROR_CODE, + &timeout_param_raw, + ); + } + + #[test] + fn test_unrequested_reply() { + let mut reply_testbench = Pus8ReplyTestbench::new(true); + let sender_id = 0x01; + let request_id = 0x02; + let action_id = 0x03; + + let action_reply = GenericActionReplyPus::new_action_reply( + request_id, + sender_id, + action_id, + ActionReplyPus::Completed, + ); + reply_testbench + .handle_action_reply(action_reply, &[]) + .expect("reply handling failure"); + let reply = reply_testbench.next_unrequested_reply(); + assert!(reply.is_some()); + let reply = reply.unwrap(); + assert_eq!(reply.message.action_id, action_id); + assert_eq!(reply.request_id, request_id); + assert_eq!(reply.message.variant, ActionReplyPus::Completed); + } + */ } diff --git a/satrs/src/pus/event.rs b/satrs/src/pus/event.rs index 4165601..233b609 100644 --- a/satrs/src/pus/event.rs +++ b/satrs/src/pus/event.rs @@ -6,8 +6,10 @@ use spacepackets::ByteConversionError; use spacepackets::{SpHeader, MAX_APID}; use crate::pus::EcssTmSenderCore; + #[cfg(feature = "alloc")] -pub use alloc_mod::EventReporter; +pub use alloc_mod::*; + pub use spacepackets::ecss::event::*; pub struct EventReportCreator { @@ -16,117 +18,112 @@ pub struct EventReportCreator { } impl EventReportCreator { - pub fn new(apid: u16) -> Option { + pub fn new(apid: u16, dest_id: u16) -> Option { if apid > MAX_APID { return None; } - Some(Self { - // msg_count: 0, - dest_id: 0, - apid, - }) + Some(Self { dest_id, apid }) } pub fn event_info<'time, 'src_data>( - &mut self, - src_data_buf: &'src_data mut [u8], + &self, time_stamp: &'time [u8], event_id: impl EcssEnumeration, - aux_data: Option<&'src_data [u8]>, + params: Option<&'src_data [u8]>, + src_data_buf: &'src_data mut [u8], ) -> Result, ByteConversionError> { self.generate_and_send_generic_tm( - src_data_buf, Subservice::TmInfoReport, time_stamp, event_id, - aux_data, + params, + src_data_buf, ) } pub fn event_low_severity<'time, 'src_data>( - &mut self, - src_data_buf: &'src_data mut [u8], + &self, time_stamp: &'time [u8], event_id: impl EcssEnumeration, - aux_data: Option<&'src_data [u8]>, + params: Option<&'src_data [u8]>, + src_data_buf: &'src_data mut [u8], ) -> Result, ByteConversionError> { self.generate_and_send_generic_tm( - src_data_buf, Subservice::TmLowSeverityReport, time_stamp, event_id, - aux_data, + params, + src_data_buf, ) } pub fn event_medium_severity<'time, 'src_data>( - &mut self, - buf: &'src_data mut [u8], + &self, time_stamp: &'time [u8], event_id: impl EcssEnumeration, - aux_data: Option<&'src_data [u8]>, + params: Option<&'src_data [u8]>, + buf: &'src_data mut [u8], ) -> Result, ByteConversionError> { self.generate_and_send_generic_tm( - buf, Subservice::TmMediumSeverityReport, time_stamp, event_id, - aux_data, + params, + buf, ) } pub fn event_high_severity<'time, 'src_data>( - &mut self, - src_data_buf: &'src_data mut [u8], + &self, time_stamp: &'time [u8], event_id: impl EcssEnumeration, - aux_data: Option<&'src_data [u8]>, + params: Option<&'src_data [u8]>, + src_data_buf: &'src_data mut [u8], ) -> Result, ByteConversionError> { self.generate_and_send_generic_tm( - src_data_buf, Subservice::TmHighSeverityReport, time_stamp, event_id, - aux_data, + params, + src_data_buf, ) } fn generate_and_send_generic_tm<'time, 'src_data>( - &mut self, - src_data_buf: &'src_data mut [u8], + &self, subservice: Subservice, time_stamp: &'time [u8], event_id: impl EcssEnumeration, - aux_data: Option<&'src_data [u8]>, + params: Option<&'src_data [u8]>, + src_data_buf: &'src_data mut [u8], ) -> Result, ByteConversionError> { - self.generate_generic_event_tm(src_data_buf, subservice, time_stamp, event_id, aux_data) + self.generate_generic_event_tm(subservice, time_stamp, event_id, params, src_data_buf) } fn generate_generic_event_tm<'time, 'src_data>( &self, - src_data_buf: &'src_data mut [u8], subservice: Subservice, time_stamp: &'time [u8], event_id: impl EcssEnumeration, - aux_data: Option<&'src_data [u8]>, + params: Option<&'src_data [u8]>, + src_data_buf: &'src_data mut [u8], ) -> Result, ByteConversionError> { let mut src_data_len = event_id.size(); - if let Some(aux_data) = aux_data { + if let Some(aux_data) = params { src_data_len += aux_data.len(); } source_buffer_large_enough(src_data_buf.len(), src_data_len)?; - let mut sp_header = SpHeader::tm_unseg(self.apid, 0, 0).unwrap(); let sec_header = - PusTmSecondaryHeader::new(5, subservice.into(), 0, self.dest_id, Some(time_stamp)); + PusTmSecondaryHeader::new(5, subservice.into(), 0, self.dest_id, time_stamp); let mut current_idx = 0; event_id.write_to_be_bytes(&mut src_data_buf[0..event_id.size()])?; current_idx += event_id.size(); - if let Some(aux_data) = aux_data { + if let Some(aux_data) = params { src_data_buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data); current_idx += aux_data.len(); } Ok(PusTmCreator::new( - &mut sp_header, + SpHeader::new_from_apid(self.apid), sec_header, &src_data_buf[0..current_idx], true, @@ -137,99 +134,129 @@ impl EventReportCreator { #[cfg(feature = "alloc")] mod alloc_mod { use super::*; + use crate::ComponentId; use alloc::vec; use alloc::vec::Vec; + use core::cell::RefCell; - pub struct EventReporter { - source_data_buf: Vec, - pub reporter: EventReportCreator, + pub trait EventTmHookProvider { + fn modify_tm(&self, tm: &mut PusTmCreator); } - impl EventReporter { - pub fn new(apid: u16, max_event_id_and_aux_data_size: usize) -> Option { - let reporter = EventReportCreator::new(apid)?; + #[derive(Default)] + pub struct DummyEventHook {} + + impl EventTmHookProvider for DummyEventHook { + fn modify_tm(&self, _tm: &mut PusTmCreator) {} + } + + pub struct EventReporter { + id: ComponentId, + // Use interior mutability pattern here. This is just an intermediate buffer to the PUS event packet + // generation. + source_data_buf: RefCell>, + pub report_creator: EventReportCreator, + pub tm_hook: EventTmHook, + } + + impl EventReporter { + pub fn new( + id: ComponentId, + default_apid: u16, + default_dest_id: u16, + max_event_id_and_aux_data_size: usize, + ) -> Option { + let reporter = EventReportCreator::new(default_apid, default_dest_id)?; Some(Self { - source_data_buf: vec![0; max_event_id_and_aux_data_size], - reporter, + id, + source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]), + report_creator: reporter, + tm_hook: DummyEventHook::default(), }) } + } + impl EventReporter { + pub fn new_with_hook( + id: ComponentId, + default_apid: u16, + default_dest_id: u16, + max_event_id_and_aux_data_size: usize, + tm_hook: EventTmHook, + ) -> Option { + let reporter = EventReportCreator::new(default_apid, default_dest_id)?; + Some(Self { + id, + source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]), + report_creator: reporter, + tm_hook, + }) + } + pub fn event_info( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event_id: impl EcssEnumeration, - aux_data: Option<&[u8]>, + params: Option<&[u8]>, ) -> Result<(), EcssTmtcError> { - let tm_creator = self - .reporter - .event_info( - self.source_data_buf.as_mut_slice(), - time_stamp, - event_id, - aux_data, - ) + let mut mut_buf = self.source_data_buf.borrow_mut(); + let mut tm_creator = self + .report_creator + .event_info(time_stamp, event_id, params, mut_buf.as_mut_slice()) .map_err(PusError::ByteConversion)?; - sender.send_tm(tm_creator.into())?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.id, tm_creator.into())?; Ok(()) } pub fn event_low_severity( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event_id: impl EcssEnumeration, - aux_data: Option<&[u8]>, + params: Option<&[u8]>, ) -> Result<(), EcssTmtcError> { - let tm_creator = self - .reporter - .event_low_severity( - self.source_data_buf.as_mut_slice(), - time_stamp, - event_id, - aux_data, - ) + let mut mut_buf = self.source_data_buf.borrow_mut(); + let mut tm_creator = self + .report_creator + .event_low_severity(time_stamp, event_id, params, mut_buf.as_mut_slice()) .map_err(PusError::ByteConversion)?; - sender.send_tm(tm_creator.into())?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.id, tm_creator.into())?; Ok(()) } pub fn event_medium_severity( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event_id: impl EcssEnumeration, - aux_data: Option<&[u8]>, + params: Option<&[u8]>, ) -> Result<(), EcssTmtcError> { - let tm_creator = self - .reporter - .event_medium_severity( - self.source_data_buf.as_mut_slice(), - time_stamp, - event_id, - aux_data, - ) + let mut mut_buf = self.source_data_buf.borrow_mut(); + let mut tm_creator = self + .report_creator + .event_medium_severity(time_stamp, event_id, params, mut_buf.as_mut_slice()) .map_err(PusError::ByteConversion)?; - sender.send_tm(tm_creator.into())?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.id, tm_creator.into())?; Ok(()) } pub fn event_high_severity( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event_id: impl EcssEnumeration, - aux_data: Option<&[u8]>, + params: Option<&[u8]>, ) -> Result<(), EcssTmtcError> { - let tm_creator = self - .reporter - .event_high_severity( - self.source_data_buf.as_mut_slice(), - time_stamp, - event_id, - aux_data, - ) + let mut mut_buf = self.source_data_buf.borrow_mut(); + let mut tm_creator = self + .report_creator + .event_high_severity(time_stamp, event_id, params, mut_buf.as_mut_slice()) .map_err(PusError::ByteConversion)?; - sender.send_tm(tm_creator.into())?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.id, tm_creator.into())?; Ok(()) } } @@ -239,9 +266,10 @@ mod alloc_mod { mod tests { use super::*; use crate::events::{EventU32, Severity}; + use crate::pus::test_util::TEST_COMPONENT_ID_0; use crate::pus::tests::CommonTmInfo; - use crate::pus::{EcssChannel, PusTmWrapper}; - use crate::ChannelId; + use crate::pus::{ChannelWithId, PusTmVariant}; + use crate::ComponentId; use spacepackets::ByteConversionError; use std::cell::RefCell; use std::collections::VecDeque; @@ -255,6 +283,7 @@ mod tests { #[derive(Debug, Eq, PartialEq, Clone)] struct TmInfo { + pub sender_id: ComponentId, pub common: CommonTmInfo, pub event: EventU32, pub aux_data: Vec, @@ -265,19 +294,19 @@ mod tests { pub service_queue: RefCell>, } - impl EcssChannel for TestSender { - fn channel_id(&self) -> ChannelId { + impl ChannelWithId for TestSender { + fn id(&self) -> ComponentId { 0 } } impl EcssTmSenderCore for TestSender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(_) => { + PusTmVariant::InStore(_) => { panic!("TestSender: unexpected call with address"); } - PusTmWrapper::Direct(tm) => { + PusTmVariant::Direct(tm) => { assert!(!tm.source_data().is_empty()); let src_data = tm.source_data(); assert!(src_data.len() >= 4); @@ -288,6 +317,7 @@ mod tests { aux_data.extend_from_slice(&src_data[4..]); } self.service_queue.borrow_mut().push_back(TmInfo { + sender_id, common: CommonTmInfo::new_from_tm(&tm), event, aux_data, @@ -345,7 +375,12 @@ mod tests { error_data: Option<&[u8]>, ) { let mut sender = TestSender::default(); - let reporter = EventReporter::new(EXAMPLE_APID, max_event_aux_data_buf); + let reporter = EventReporter::new( + TEST_COMPONENT_ID_0.id(), + EXAMPLE_APID, + 0, + max_event_aux_data_buf, + ); assert!(reporter.is_some()); let mut reporter = reporter.unwrap(); let time_stamp_empty: [u8; 7] = [0; 7]; @@ -375,6 +410,7 @@ mod tests { assert_eq!(tm_info.common.msg_counter, 0); assert_eq!(tm_info.common.apid, EXAMPLE_APID); assert_eq!(tm_info.event, event); + assert_eq!(tm_info.sender_id, TEST_COMPONENT_ID_0.id()); assert_eq!(tm_info.aux_data, error_copy); } @@ -437,7 +473,7 @@ mod tests { fn insufficient_buffer() { let mut sender = TestSender::default(); for i in 0..3 { - let reporter = EventReporter::new(EXAMPLE_APID, i); + let reporter = EventReporter::new(0, EXAMPLE_APID, 0, i); assert!(reporter.is_some()); let mut reporter = reporter.unwrap(); check_buf_too_small(&mut reporter, &mut sender, i); diff --git a/satrs/src/pus/event_man.rs b/satrs/src/pus/event_man.rs index e6e18c9..eecb375 100644 --- a/satrs/src/pus/event_man.rs +++ b/satrs/src/pus/event_man.rs @@ -157,8 +157,8 @@ pub mod alloc_mod { phantom: PhantomData<(E, EV)>, } - impl, EV: GenericEvent, E> - PusEventDispatcher + impl, Event: GenericEvent, E> + PusEventDispatcher { pub fn new(reporter: EventReporter, backend: B) -> Self { Self { @@ -168,20 +168,20 @@ pub mod alloc_mod { } } - pub fn enable_tm_for_event(&mut self, event: &EV) -> Result { + pub fn enable_tm_for_event(&mut self, event: &Event) -> Result { self.backend.enable_event_reporting(event) } - pub fn disable_tm_for_event(&mut self, event: &EV) -> Result { + pub fn disable_tm_for_event(&mut self, event: &Event) -> Result { self.backend.disable_event_reporting(event) } pub fn generate_pus_event_tm_generic( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], - event: EV, - aux_data: Option<&[u8]>, + event: Event, + params: Option<&[u8]>, ) -> Result { if !self.backend.event_enabled(&event) { return Ok(false); @@ -189,22 +189,22 @@ pub mod alloc_mod { match event.severity() { Severity::INFO => self .reporter - .event_info(sender, time_stamp, event, aux_data) + .event_info(sender, time_stamp, event, params) .map(|_| true) .map_err(|e| e.into()), Severity::LOW => self .reporter - .event_low_severity(sender, time_stamp, event, aux_data) + .event_low_severity(sender, time_stamp, event, params) .map(|_| true) .map_err(|e| e.into()), Severity::MEDIUM => self .reporter - .event_medium_severity(sender, time_stamp, event, aux_data) + .event_medium_severity(sender, time_stamp, event, params) .map(|_| true) .map_err(|e| e.into()), Severity::HIGH => self .reporter - .event_high_severity(sender, time_stamp, event, aux_data) + .event_high_severity(sender, time_stamp, event, params) .map(|_| true) .map_err(|e| e.into()), } @@ -239,8 +239,8 @@ pub mod alloc_mod { } pub fn generate_pus_event_tm( - &mut self, - sender: &mut (impl EcssTmSenderCore + ?Sized), + &self, + sender: &(impl EcssTmSenderCore + ?Sized), time_stamp: &[u8], event: EventU32TypedSev, aux_data: Option<&[u8]>, @@ -257,31 +257,36 @@ pub mod alloc_mod { #[cfg(test)] mod tests { use super::*; - use crate::{events::SeverityInfo, pus::TmAsVecSenderWithMpsc}; + use crate::events::SeverityInfo; + use crate::pus::PusTmAsVec; + use crate::request::UniqueApidTargetId; use std::sync::mpsc::{self, TryRecvError}; const INFO_EVENT: EventU32TypedSev = EventU32TypedSev::::const_new(1, 0); const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5); const EMPTY_STAMP: [u8; 7] = [0; 7]; + const TEST_APID: u16 = 0x02; + const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05); fn create_basic_man_1() -> DefaultPusEventU32Dispatcher<()> { - let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed"); + let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128) + .expect("Creating event repoter failed"); PusEventDispatcher::new_with_default_backend(reporter) } fn create_basic_man_2() -> DefaultPusEventU32Dispatcher<()> { - let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed"); + let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128) + .expect("Creating event repoter failed"); let backend = DefaultPusEventMgmtBackend::default(); PusEventDispatcher::new(reporter, backend) } #[test] fn test_basic() { - let mut event_man = create_basic_man_1(); - let (event_tx, event_rx) = mpsc::channel(); - let mut sender = TmAsVecSenderWithMpsc::new(0, "test_sender", event_tx); + let event_man = create_basic_man_1(); + let (event_tx, event_rx) = mpsc::channel::(); let event_sent = event_man - .generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None) + .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None) .expect("Sending info event failed"); assert!(event_sent); @@ -292,13 +297,13 @@ mod tests { #[test] fn test_disable_event() { let mut event_man = create_basic_man_2(); - let (event_tx, event_rx) = mpsc::channel(); - let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx); + let (event_tx, event_rx) = mpsc::channel::(); + // let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx); let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT); assert!(res.is_ok()); assert!(res.unwrap()); let mut event_sent = event_man - .generate_pus_event_tm_generic(&mut sender, &EMPTY_STAMP, LOW_SEV_EVENT, None) + .generate_pus_event_tm_generic(&event_tx, &EMPTY_STAMP, LOW_SEV_EVENT, None) .expect("Sending low severity event failed"); assert!(!event_sent); let res = event_rx.try_recv(); @@ -306,7 +311,7 @@ mod tests { assert!(matches!(res.unwrap_err(), TryRecvError::Empty)); // Check that only the low severity event was disabled event_sent = event_man - .generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None) + .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None) .expect("Sending info event failed"); assert!(event_sent); event_rx.try_recv().expect("No info event received"); @@ -315,8 +320,7 @@ mod tests { #[test] fn test_reenable_event() { let mut event_man = create_basic_man_1(); - let (event_tx, event_rx) = mpsc::channel(); - let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx); + let (event_tx, event_rx) = mpsc::channel::(); let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT); assert!(res.is_ok()); assert!(res.unwrap()); @@ -324,7 +328,7 @@ mod tests { assert!(res.is_ok()); assert!(res.unwrap()); let event_sent = event_man - .generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None) + .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None) .expect("Sending info event failed"); assert!(event_sent); event_rx.try_recv().expect("No info event received"); diff --git a/satrs/src/pus/event_srv.rs b/satrs/src/pus/event_srv.rs index 64c1ba0..bb08f58 100644 --- a/satrs/src/pus/event_srv.rs +++ b/satrs/src/pus/event_srv.rs @@ -2,17 +2,18 @@ use crate::events::EventU32; use crate::pus::event_man::{EventRequest, EventRequestWithToken}; use crate::pus::verification::TcStateToken; use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError}; +use crate::queue::GenericSendError; use spacepackets::ecss::event::Subservice; use spacepackets::ecss::PusPacket; use std::sync::mpsc::Sender; use super::verification::VerificationReportingProvider; use super::{ - get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, - PusServiceHelper, + EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericConversionError, + GenericRoutingError, PusServiceHelper, }; -pub struct PusService5EventHandler< +pub struct PusEventServiceHandler< TcReceiver: EcssTcReceiverCore, TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, @@ -28,7 +29,7 @@ impl< TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, VerificationReporter: VerificationReportingProvider, - > PusService5EventHandler + > PusEventServiceHandler { pub fn new( service_helper: PusServiceHelper< @@ -45,16 +46,19 @@ impl< } } - pub fn handle_one_tc(&mut self) -> Result { + pub fn poll_and_handle_next_tc( + &mut self, + time_stamp: &[u8], + ) -> Result { let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; if possible_packet.is_none() { return Ok(PusPacketHandlerResult::Empty); } let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; + self.service_helper + .tc_in_mem_converter_mut() + .cache(&ecss_tc_and_token.tc_in_memory)?; + let tc = self.service_helper.tc_in_mem_converter().convert()?; let subservice = tc.subservice(); let srv = Subservice::try_from(subservice); if srv.is_err() { @@ -63,63 +67,73 @@ impl< ecss_tc_and_token.token, )); } - let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| { - if tc.user_data().len() < 4 { - return Err(PusPacketHandlingError::NotEnoughAppData { - expected: 4, - found: tc.user_data().len(), - }); - } - let user_data = tc.user_data(); - let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap())); - let start_token = self - .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &stamp) - .map_err(|_| PartialPusHandlingError::Verification); - let partial_error = start_token.clone().err(); - let mut token: TcStateToken = ecss_tc_and_token.token.into(); - if let Ok(start_token) = start_token { - token = start_token.into(); - } - let event_req_with_token = if enable { - EventRequestWithToken { - request: EventRequest::Enable(event_u32), - token, + let handle_enable_disable_request = + |enable: bool| -> Result { + if tc.user_data().len() < 4 { + return Err(GenericConversionError::NotEnoughAppData { + expected: 4, + found: tc.user_data().len(), + } + .into()); } - } else { - EventRequestWithToken { - request: EventRequest::Disable(event_u32), - token, + let user_data = tc.user_data(); + let event_u32 = + EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap())); + let start_token = self + .service_helper + .common + .verif_reporter + .start_success( + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) + .map_err(|_| PartialPusHandlingError::Verification); + let partial_error = start_token.clone().err(); + let mut token: TcStateToken = ecss_tc_and_token.token.into(); + if let Ok(start_token) = start_token { + token = start_token.into(); } + let event_req_with_token = if enable { + EventRequestWithToken { + request: EventRequest::Enable(event_u32), + token, + } + } else { + EventRequestWithToken { + request: EventRequest::Disable(event_u32), + token, + } + }; + self.event_request_tx + .send(event_req_with_token) + .map_err(|_| { + PusPacketHandlingError::RequestRouting(GenericRoutingError::Send( + GenericSendError::RxDisconnected, + )) + })?; + if let Some(partial_error) = partial_error { + return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( + partial_error, + )); + } + Ok(PusPacketHandlerResult::RequestHandled) }; - self.event_request_tx - .send(event_req_with_token) - .map_err(|_| { - PusPacketHandlingError::Other("Forwarding event request failed".into()) - })?; - if let Some(partial_error) = partial_error { - return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess( - partial_error, - )); - } - Ok(PusPacketHandlerResult::RequestHandled) - }; - let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); + match srv.unwrap() { Subservice::TmInfoReport | Subservice::TmLowSeverityReport | Subservice::TmMediumSeverityReport | Subservice::TmHighSeverityReport => { - return Err(PusPacketHandlingError::InvalidSubservice(tc.subservice())) + return Err(PusPacketHandlingError::RequestConversion( + GenericConversionError::WrongService(tc.subservice()), + )) } Subservice::TcEnableEventGeneration => { - handle_enable_disable_request(true, time_stamp)?; + handle_enable_disable_request(true)?; } Subservice::TcDisableEventGeneration => { - handle_enable_disable_request(false, time_stamp)?; + handle_enable_disable_request(false)?; } Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => { return Ok(PusPacketHandlerResult::SubserviceNotImplemented( @@ -137,60 +151,70 @@ impl< mod tests { use delegate::delegate; use spacepackets::ecss::event::Subservice; + use spacepackets::time::{cds, TimeWriter}; use spacepackets::util::UnsignedEnum; use spacepackets::{ ecss::{ tc::{PusTcCreator, PusTcSecondaryHeader}, tm::PusTmReader, }, - SequenceFlags, SpHeader, + SpHeader, }; use std::sync::mpsc::{self, Sender}; use crate::pus::event_man::EventRequest; - use crate::pus::tests::SimplePusPacketHandler; + use crate::pus::test_util::{PusTestHarness, SimplePusPacketHandler, TEST_APID}; use crate::pus::verification::{ - RequestId, VerificationReporterWithSharedPoolMpscBoundedSender, + RequestId, VerificationReporter, VerificationReportingProvider, }; - use crate::pus::{MpscTcReceiver, TmInSharedPoolSenderWithBoundedMpsc}; + use crate::pus::{GenericConversionError, MpscTcReceiver, MpscTmInSharedPoolSenderBounded}; use crate::{ events::EventU32, pus::{ event_man::EventRequestWithToken, - tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness, TEST_APID}, + tests::PusServiceHandlerWithSharedStoreCommon, verification::{TcStateAccepted, VerificationToken}, EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError, }, }; - use super::PusService5EventHandler; + use super::PusEventServiceHandler; const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25); struct Pus5HandlerWithStoreTester { common: PusServiceHandlerWithSharedStoreCommon, - handler: PusService5EventHandler< + handler: PusEventServiceHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, >, } impl Pus5HandlerWithStoreTester { pub fn new(event_request_tx: Sender) -> Self { - let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(); + let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(0); Self { common, - handler: PusService5EventHandler::new(srv_handler, event_request_tx), + handler: PusEventServiceHandler::new(srv_handler, event_request_tx), } } } impl PusTestHarness for Pus5HandlerWithStoreTester { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken { + let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc); + self.handler + .service_helper + .verif_reporter() + .acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7]) + .expect("acceptance success failure") + } + delegate! { to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); fn read_next_tm(&mut self) -> PusTmReader<'_>; fn check_no_tm_available(&self) -> bool; fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId); @@ -200,10 +224,9 @@ mod tests { } impl SimplePusPacketHandler for Pus5HandlerWithStoreTester { - delegate! { - to self.handler { - fn handle_one_tc(&mut self) -> Result; - } + fn handle_one_tc(&mut self) -> Result { + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); + self.handler.poll_and_handle_next_tc(&time_stamp) } } @@ -213,15 +236,16 @@ mod tests { expected_event_req: EventRequest, event_req_receiver: mpsc::Receiver, ) { - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8); let mut app_data = [0; 4]; TEST_EVENT_0 .write_to_be_bytes(&mut app_data) .expect("writing test event failed"); - let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); - let token = test_harness.send_tc(&ping_tc); - let request_id = token.req_id(); + let ping_tc = PusTcCreator::new(sp_header, sec_header, &app_data, true); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); + let request_id = token.request_id(); test_harness.handle_one_tc().unwrap(); test_harness.check_next_verification_tm(1, request_id); test_harness.check_next_verification_tm(3, request_id); @@ -274,10 +298,11 @@ mod tests { fn test_sending_custom_subservice() { let (event_request_tx, _) = mpsc::channel(); let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); let sec_header = PusTcSecondaryHeader::new_simple(5, 200); - let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); - test_harness.send_tc(&ping_tc); + let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); let result = test_harness.handle_one_tc(); assert!(result.is_ok()); let result = result.unwrap(); @@ -292,15 +317,19 @@ mod tests { fn test_sending_invalid_app_data() { let (event_request_tx, _) = mpsc::channel(); let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); let sec_header = PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8); - let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &[0, 1, 2], true); - test_harness.send_tc(&ping_tc); + let ping_tc = PusTcCreator::new(sp_header, sec_header, &[0, 1, 2], true); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); let result = test_harness.handle_one_tc(); assert!(result.is_err()); let result = result.unwrap_err(); - if let PusPacketHandlingError::NotEnoughAppData { expected, found } = result { + if let PusPacketHandlingError::RequestConversion( + GenericConversionError::NotEnoughAppData { expected, found }, + ) = result + { assert_eq!(expected, 4); assert_eq!(found, 3); } else { diff --git a/satrs/src/pus/hk.rs b/satrs/src/pus/hk.rs deleted file mode 100644 index 852e8f7..0000000 --- a/satrs/src/pus/hk.rs +++ /dev/null @@ -1,406 +0,0 @@ -pub use spacepackets::ecss::hk::*; - -#[cfg(feature = "std")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] -pub use std_mod::*; - -#[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] -pub use alloc_mod::*; - -use crate::{hk::HkRequest, TargetId}; - -use super::verification::{TcStateAccepted, VerificationToken}; - -/// This trait is an abstraction for the routing of PUS service 3 housekeeping requests to a -/// dedicated recipient using the generic [TargetId]. -pub trait PusHkRequestRouter { - type Error; - fn route( - &self, - target_id: TargetId, - hk_request: HkRequest, - token: VerificationToken, - ) -> Result<(), Self::Error>; -} - -#[cfg(feature = "alloc")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] -pub mod alloc_mod { - use spacepackets::ecss::tc::PusTcReader; - - use crate::pus::verification::VerificationReportingProvider; - - use super::*; - - /// This trait is an abstraction for the conversion of a PUS service 8 action telecommand into - /// a [HkRequest]. - /// - /// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard. - /// The only requirement is that a valid [TargetId] and a [HkRequest] are returned by the - /// core conversion function. - /// - /// The user should take care of performing the error handling as well. Some of the following - /// aspects might be relevant: - /// - /// - Checking the validity of the APID, service ID, subservice ID. - /// - Checking the validity of the user data. - /// - /// A [VerificationReportingProvider] is passed to the user to also allow handling - /// of the verification process as part of the PUS standard requirements. - pub trait PusHkToRequestConverter { - type Error; - fn convert( - &mut self, - token: VerificationToken, - tc: &PusTcReader, - time_stamp: &[u8], - verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, HkRequest), Self::Error>; - } -} - -#[cfg(feature = "std")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] -pub mod std_mod { - use crate::pus::{ - get_current_cds_short_timestamp, verification::VerificationReportingProvider, - EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericRoutingError, - PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceHelper, - }; - - use super::*; - - /// This is a generic high-level handler for the PUS service 3 housekeeping service. - /// - /// It performs the following handling steps: - /// - /// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter] - /// allows to configure the used telecommand memory backend. - /// 2. Convert the TC to a targeted action request using the provided - /// [PusHkToRequestConverter]. The generic error type is constrained to the - /// [PusPacketHandlerResult] for the concrete implementation which offers a packet handler. - /// 3. Route the action request using the provided [PusHkRequestRouter]. The generic error - /// type is constrained to the [GenericRoutingError] for the concrete implementation. - /// 4. Handle all routing errors using the provided [PusRoutingErrorHandler]. The generic error - /// type is constrained to the [GenericRoutingError] for the concrete implementation. - pub struct PusService3HkHandler< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - RequestConverter: PusHkToRequestConverter, - RequestRouter: PusHkRequestRouter, - RoutingErrorHandler: PusRoutingErrorHandler, - RoutingError = GenericRoutingError, - > { - service_helper: - PusServiceHelper, - pub request_converter: RequestConverter, - pub request_router: RequestRouter, - pub routing_error_handler: RoutingErrorHandler, - } - - impl< - TcReceiver: EcssTcReceiverCore, - TmSender: EcssTmSenderCore, - TcInMemConverter: EcssTcInMemConverter, - VerificationReporter: VerificationReportingProvider, - RequestConverter: PusHkToRequestConverter, - RequestRouter: PusHkRequestRouter, - RoutingErrorHandler: PusRoutingErrorHandler, - RoutingError: Clone, - > - PusService3HkHandler< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - RequestConverter, - RequestRouter, - RoutingErrorHandler, - RoutingError, - > - where - PusPacketHandlingError: From, - { - pub fn new( - service_helper: PusServiceHelper< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - >, - request_converter: RequestConverter, - request_router: RequestRouter, - routing_error_handler: RoutingErrorHandler, - ) -> Self { - Self { - service_helper, - request_converter, - request_router, - routing_error_handler, - } - } - - pub fn handle_one_tc(&mut self) -> Result { - let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; - if possible_packet.is_none() { - return Ok(PusPacketHandlerResult::Empty); - } - let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; - let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); - let (target_id, hk_request) = self.request_converter.convert( - ecss_tc_and_token.token, - &tc, - &time_stamp, - &self.service_helper.common.verification_handler, - )?; - if let Err(e) = - self.request_router - .route(target_id, hk_request, ecss_tc_and_token.token) - { - self.routing_error_handler.handle_error( - target_id, - ecss_tc_and_token.token, - &tc, - e.clone(), - &time_stamp, - &self.service_helper.common.verification_handler, - ); - return Err(e.into()); - } - Ok(PusPacketHandlerResult::RequestHandled) - } - } -} - -#[cfg(test)] -mod tests { - use delegate::delegate; - use spacepackets::ecss::hk::Subservice; - - use spacepackets::{ - ecss::{ - tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}, - tm::PusTmReader, - PusPacket, - }, - CcsdsPacket, SequenceFlags, SpHeader, - }; - - use crate::pus::{MpscTcReceiver, TmAsVecSenderWithMpsc}; - use crate::{ - hk::HkRequest, - pus::{ - tests::{ - PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler, - TestConverter, TestRouter, TestRoutingErrorHandler, APP_DATA_TOO_SHORT, TEST_APID, - }, - verification::{ - tests::TestVerificationReporter, FailParams, RequestId, TcStateAccepted, - VerificationReportingProvider, VerificationToken, - }, - EcssTcInVecConverter, GenericRoutingError, PusPacketHandlerResult, - PusPacketHandlingError, - }, - TargetId, - }; - - use super::{PusHkRequestRouter, PusHkToRequestConverter, PusService3HkHandler}; - - impl PusHkRequestRouter for TestRouter { - type Error = GenericRoutingError; - - fn route( - &self, - target_id: TargetId, - hk_request: HkRequest, - _token: VerificationToken, - ) -> Result<(), Self::Error> { - self.routing_requests - .borrow_mut() - .push_back((target_id, hk_request)); - self.check_for_injected_error() - } - } - - impl PusHkToRequestConverter for TestConverter<3> { - type Error = PusPacketHandlingError; - fn convert( - &mut self, - token: VerificationToken, - tc: &PusTcReader, - time_stamp: &[u8], - verif_reporter: &impl VerificationReportingProvider, - ) -> Result<(TargetId, HkRequest), Self::Error> { - self.conversion_request.push_back(tc.raw_data().to_vec()); - self.check_service(tc)?; - let target_id = tc.apid(); - if tc.user_data().len() < 4 { - verif_reporter - .start_failure( - token, - FailParams::new( - time_stamp, - &APP_DATA_TOO_SHORT, - (tc.user_data().len() as u32).to_be_bytes().as_ref(), - ), - ) - .expect("start success failure"); - return Err(PusPacketHandlingError::NotEnoughAppData { - expected: 4, - found: tc.user_data().len(), - }); - } - if tc.subservice() == Subservice::TcGenerateOneShotHk as u8 { - verif_reporter - .start_success(token, time_stamp) - .expect("start success failure"); - return Ok(( - target_id.into(), - HkRequest::OneShot(u32::from_be_bytes( - tc.user_data()[0..4].try_into().unwrap(), - )), - )); - } - Err(PusPacketHandlingError::InvalidAppData( - "unexpected app data".into(), - )) - } - } - - struct Pus3HandlerWithVecTester { - common: PusServiceHandlerWithVecCommon, - handler: PusService3HkHandler< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - EcssTcInVecConverter, - TestVerificationReporter, - TestConverter<3>, - TestRouter, - TestRoutingErrorHandler, - >, - } - - impl Pus3HandlerWithVecTester { - pub fn new() -> Self { - let (common, srv_handler) = - PusServiceHandlerWithVecCommon::new_with_test_verif_sender(); - Self { - common, - handler: PusService3HkHandler::new( - srv_handler, - TestConverter::default(), - TestRouter::default(), - TestRoutingErrorHandler::default(), - ), - } - } - - delegate! { - to self.handler.request_converter { - pub fn check_next_conversion(&mut self, tc: &PusTcCreator); - } - } - delegate! { - to self.handler.request_router { - pub fn retrieve_next_request(&mut self) -> (TargetId, HkRequest); - } - } - delegate! { - to self.handler.routing_error_handler { - pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError); - } - } - } - - impl PusTestHarness for Pus3HandlerWithVecTester { - delegate! { - to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; - fn read_next_tm(&mut self) -> PusTmReader<'_>; - fn check_no_tm_available(&self) -> bool; - fn check_next_verification_tm( - &self, - subservice: u8, - expected_request_id: RequestId, - ); - } - } - } - impl SimplePusPacketHandler for Pus3HandlerWithVecTester { - delegate! { - to self.handler { - fn handle_one_tc(&mut self) -> Result; - } - } - } - - #[test] - fn basic_test() { - let mut hk_handler = Pus3HandlerWithVecTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTcSecondaryHeader::new_simple(3, Subservice::TcGenerateOneShotHk as u8); - let unique_id: u32 = 1; - let unique_id_raw = unique_id.to_be_bytes(); - let tc = PusTcCreator::new(&mut sp_header, sec_header, unique_id_raw.as_ref(), true); - hk_handler.send_tc(&tc); - let result = hk_handler.handle_one_tc(); - assert!(result.is_ok()); - hk_handler.check_next_conversion(&tc); - let (target_id, hk_request) = hk_handler.retrieve_next_request(); - assert_eq!(target_id, TEST_APID.into()); - if let HkRequest::OneShot(id) = hk_request { - assert_eq!(id, unique_id); - } else { - panic!("unexpected request"); - } - } - - #[test] - fn test_routing_error() { - let mut hk_handler = Pus3HandlerWithVecTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); - let sec_header = PusTcSecondaryHeader::new_simple(3, Subservice::TcGenerateOneShotHk as u8); - let unique_id: u32 = 1; - let unique_id_raw = unique_id.to_be_bytes(); - let tc = PusTcCreator::new(&mut sp_header, sec_header, unique_id_raw.as_ref(), true); - let error = GenericRoutingError::UnknownTargetId(25); - hk_handler - .handler - .request_router - .inject_routing_error(error); - hk_handler.send_tc(&tc); - let result = hk_handler.handle_one_tc(); - assert!(result.is_err()); - let check_error = |routing_error: GenericRoutingError| { - if let GenericRoutingError::UnknownTargetId(id) = routing_error { - assert_eq!(id, 25); - } else { - panic!("unexpected error type"); - } - }; - if let PusPacketHandlingError::RequestRoutingError(routing_error) = result.unwrap_err() { - check_error(routing_error); - } else { - panic!("unexpected error type"); - } - - hk_handler.check_next_conversion(&tc); - let (target_id, hk_req) = hk_handler.retrieve_next_request(); - assert_eq!(target_id, TEST_APID.into()); - if let HkRequest::OneShot(unique_id) = hk_req { - assert_eq!(unique_id, 1); - } - - let (target_id, found_error) = hk_handler.retrieve_next_error(); - assert_eq!(target_id, TEST_APID.into()); - check_error(found_error); - } -} diff --git a/satrs/src/pus/mod.rs b/satrs/src/pus/mod.rs index 37d69ef..64187e4 100644 --- a/satrs/src/pus/mod.rs +++ b/satrs/src/pus/mod.rs @@ -4,9 +4,11 @@ //! The satrs-example application contains various usage examples of these components. use crate::pool::{StoreAddr, StoreError}; use crate::pus::verification::{TcStateAccepted, TcStateToken, VerificationToken}; -use crate::queue::{GenericRecvError, GenericSendError}; -use crate::ChannelId; +use crate::queue::{GenericReceiveError, GenericSendError}; +use crate::request::{GenericMessage, MessageMetadata, RequestId}; +use crate::ComponentId; use core::fmt::{Display, Formatter}; +use core::time::Duration; #[cfg(feature = "alloc")] use downcast_rs::{impl_downcast, Downcast}; #[cfg(feature = "alloc")] @@ -24,7 +26,6 @@ pub mod event; pub mod event_man; #[cfg(feature = "std")] pub mod event_srv; -pub mod hk; pub mod mode; pub mod scheduler; #[cfg(feature = "std")] @@ -39,46 +40,48 @@ pub use alloc_mod::*; #[cfg(feature = "std")] pub use std_mod::*; +use self::verification::VerificationReportingProvider; + #[derive(Debug, PartialEq, Eq, Clone)] -pub enum PusTmWrapper<'time, 'src_data> { +pub enum PusTmVariant<'time, 'src_data> { InStore(StoreAddr), Direct(PusTmCreator<'time, 'src_data>), } -impl From for PusTmWrapper<'_, '_> { +impl From for PusTmVariant<'_, '_> { fn from(value: StoreAddr) -> Self { Self::InStore(value) } } -impl<'time, 'src_data> From> for PusTmWrapper<'time, 'src_data> { +impl<'time, 'src_data> From> for PusTmVariant<'time, 'src_data> { fn from(value: PusTmCreator<'time, 'src_data>) -> Self { Self::Direct(value) } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum EcssTmtcError { - StoreLock, Store(StoreError), + ByteConversion(ByteConversionError), Pus(PusError), CantSendAddr(StoreAddr), CantSendDirectTm, Send(GenericSendError), - Recv(GenericRecvError), + Receive(GenericReceiveError), } impl Display for EcssTmtcError { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { match self { - EcssTmtcError::StoreLock => { - write!(f, "store lock error") - } EcssTmtcError::Store(store) => { - write!(f, "store error: {store}") + write!(f, "ecss tmtc error: {store}") } - EcssTmtcError::Pus(pus_e) => { - write!(f, "PUS error: {pus_e}") + EcssTmtcError::ByteConversion(e) => { + write!(f, "ecss tmtc error: {e}") + } + EcssTmtcError::Pus(e) => { + write!(f, "ecss tmtc error: {e}") } EcssTmtcError::CantSendAddr(addr) => { write!(f, "can not send address {addr}") @@ -86,11 +89,11 @@ impl Display for EcssTmtcError { EcssTmtcError::CantSendDirectTm => { write!(f, "can not send TM directly") } - EcssTmtcError::Send(send_e) => { - write!(f, "send error {send_e}") + EcssTmtcError::Send(e) => { + write!(f, "ecss tmtc error: {e}") } - EcssTmtcError::Recv(recv_e) => { - write!(f, "recv error {recv_e}") + EcssTmtcError::Receive(e) => { + write!(f, "ecss tmtc error {e}") } } } @@ -114,9 +117,15 @@ impl From for EcssTmtcError { } } -impl From for EcssTmtcError { - fn from(value: GenericRecvError) -> Self { - Self::Recv(value) +impl From for EcssTmtcError { + fn from(value: ByteConversionError) -> Self { + Self::ByteConversion(value) + } +} + +impl From for EcssTmtcError { + fn from(value: GenericReceiveError) -> Self { + Self::Receive(value) } } @@ -125,16 +134,17 @@ impl Error for EcssTmtcError { fn source(&self) -> Option<&(dyn Error + 'static)> { match self { EcssTmtcError::Store(e) => Some(e), + EcssTmtcError::ByteConversion(e) => Some(e), EcssTmtcError::Pus(e) => Some(e), EcssTmtcError::Send(e) => Some(e), - EcssTmtcError::Recv(e) => Some(e), + EcssTmtcError::Receive(e) => Some(e), _ => None, } } } -pub trait EcssChannel: Send { +pub trait ChannelWithId: Send { /// Each sender can have an ID associated with it - fn channel_id(&self) -> ChannelId; + fn id(&self) -> ComponentId; fn name(&self) -> &'static str { "unset" } @@ -144,7 +154,7 @@ pub trait EcssChannel: Send { /// /// This sender object is responsible for sending PUS telemetry to a TM sink. pub trait EcssTmSenderCore: Send { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError>; + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError>; } /// Generic trait for a user supplied sender object. @@ -155,6 +165,16 @@ pub trait EcssTcSenderCore { fn send_tc(&self, tc: PusTcCreator, token: Option) -> Result<(), EcssTmtcError>; } +/// Dummy object which can be useful for tests. +#[derive(Default)] +pub struct EcssTmDummySender {} + +impl EcssTmSenderCore for EcssTmDummySender { + fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> { + Ok(()) + } +} + /// A PUS telecommand packet can be stored in memory using different methods. Right now, /// storage inside a pool structure like [crate::pool::StaticMemoryPool], and storage inside a /// `Vec` are supported. @@ -249,7 +269,7 @@ impl From for TryRecvTmtcError { } /// Generic trait for a user supplied receiver object. -pub trait EcssTcReceiverCore: EcssChannel { +pub trait EcssTcReceiverCore { fn recv_tc(&self) -> Result; } @@ -263,9 +283,73 @@ pub trait ReceivesEcssPusTc { fn pass_pus_tc(&mut self, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error>; } +pub trait ActiveRequestMapProvider: Sized { + fn insert(&mut self, request_id: &RequestId, request_info: V); + fn get(&self, request_id: RequestId) -> Option<&V>; + fn get_mut(&mut self, request_id: RequestId) -> Option<&mut V>; + fn remove(&mut self, request_id: RequestId) -> bool; + + /// Call a user-supplied closure for each active request. + fn for_each(&self, f: F); + + /// Call a user-supplied closure for each active request. Mutable variant. + fn for_each_mut(&mut self, f: F); +} + +pub trait ActiveRequestProvider { + fn target_id(&self) -> ComponentId; + fn token(&self) -> TcStateToken; + fn set_token(&mut self, token: TcStateToken); + fn has_timed_out(&self) -> bool; + fn timeout(&self) -> Duration; +} + +/// This trait is an abstraction for the routing of PUS request to a dedicated +/// recipient using the generic [ComponentId]. +pub trait PusRequestRouter { + type Error; + + fn route( + &self, + requestor_info: MessageMetadata, + target_id: ComponentId, + request: Request, + ) -> Result<(), Self::Error>; +} + +pub trait PusReplyHandler { + type Error; + + /// This function handles a reply for a given PUS request and returns whether that request + /// is finished. A finished PUS request will be removed from the active request map. + fn handle_reply( + &mut self, + reply: &GenericMessage, + active_request: &ActiveRequestInfo, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result; + + fn handle_unrequested_reply( + &mut self, + reply: &GenericMessage, + tm_sender: &impl EcssTmSenderCore, + ) -> Result<(), Self::Error>; + + /// Handle the timeout of an active request. + fn handle_request_timeout( + &mut self, + active_request: &ActiveRequestInfo, + tm_sender: &impl EcssTmSenderCore, + verification_handler: &impl VerificationReportingProvider, + time_stamp: &[u8], + ) -> Result<(), Self::Error>; +} + #[cfg(feature = "alloc")] -mod alloc_mod { - use crate::TargetId; +pub mod alloc_mod { + use hashbrown::HashMap; use super::*; @@ -351,38 +435,241 @@ mod alloc_mod { impl_downcast!(EcssTcReceiver); - pub trait PusRoutingErrorHandler { + /// This trait is an abstraction for the conversion of a PUS telecommand into a generic request + /// type. + /// + /// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard. + /// The only requirement is that a valid active request information instance and a request + /// are returned by the core conversion function. The active request type needs to fulfill + /// the [ActiveRequestProvider] trait bound. + /// + /// The user should take care of performing the error handling as well. Some of the following + /// aspects might be relevant: + /// + /// - Checking the validity of the APID, service ID, subservice ID. + /// - Checking the validity of the user data. + /// + /// A [VerificationReportingProvider] instance is passed to the user to also allow handling + /// of the verification process as part of the PUS standard requirements. + pub trait PusTcToRequestConverter { type Error; - fn handle_error( - &self, - target_id: TargetId, + fn convert( + &mut self, token: VerificationToken, tc: &PusTcReader, - error: Self::Error, - time_stamp: &[u8], + tm_sender: &(impl EcssTmSenderCore + ?Sized), verif_reporter: &impl VerificationReportingProvider, - ); + time_stamp: &[u8], + ) -> Result<(ActiveRequestInfo, Request), Self::Error>; } + + #[derive(Clone, Debug)] + pub struct DefaultActiveRequestMap(pub HashMap); + + impl Default for DefaultActiveRequestMap { + fn default() -> Self { + Self(HashMap::new()) + } + } + + impl ActiveRequestMapProvider for DefaultActiveRequestMap { + fn insert(&mut self, request_id: &RequestId, request: V) { + self.0.insert(*request_id, request); + } + + fn get(&self, request_id: RequestId) -> Option<&V> { + self.0.get(&request_id) + } + + fn get_mut(&mut self, request_id: RequestId) -> Option<&mut V> { + self.0.get_mut(&request_id) + } + + fn remove(&mut self, request_id: RequestId) -> bool { + self.0.remove(&request_id).is_some() + } + + fn for_each(&self, mut f: F) { + for (req_id, active_req) in &self.0 { + f(req_id, active_req); + } + } + + fn for_each_mut(&mut self, mut f: F) { + for (req_id, active_req) in &mut self.0 { + f(req_id, active_req); + } + } + } + + /* + /// Generic reply handler structure which can be used to handle replies for a specific PUS + /// service. + /// + /// This is done by keeping track of active requests using an internal map structure. An API + /// to register new active requests is exposed as well. + /// The reply handler performs boilerplate tasks like performing the verification handling and + /// timeout handling. + /// + /// This object is not useful by itself but serves as a common building block for high-level + /// PUS reply handlers. Concrete PUS handlers should constrain the [ActiveRequestProvider] and + /// the `ReplyType` generics to specific types tailored towards PUS services in addition to + /// providing an API which can process received replies and convert them into verification + /// completions or other operation like user hook calls. The framework also provides some + /// concrete PUS handlers for common PUS services like the mode, action and housekeeping + /// service. + /// + /// This object does not automatically update its internal time information used to check for + /// timeouts. The user should call the [Self::update_time] and [Self::update_time_from_now] + /// methods to do this. + pub struct PusServiceReplyHandler< + ActiveRequestMap: ActiveRequestMapProvider, + ReplyHook: ReplyHandlerHook, + ActiveRequestType: ActiveRequestProvider, + ReplyType, + > { + pub active_request_map: ActiveRequestMap, + pub tm_buf: alloc::vec::Vec, + pub current_time: UnixTimestamp, + pub user_hook: ReplyHook, + phantom: PhantomData<(ActiveRequestType, ReplyType)>, + } + + impl< + ActiveRequestMap: ActiveRequestMapProvider, + ReplyHook: ReplyHandlerHook, + ActiveRequestType: ActiveRequestProvider, + ReplyType, + > + PusServiceReplyHandler< + ActiveRequestMap, + ReplyHook, + ActiveRequestType, + ReplyType, + > + { + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + pub fn new_from_now( + active_request_map: ActiveRequestMap, + fail_data_buf_size: usize, + user_hook: ReplyHook, + ) -> Result { + let current_time = UnixTimestamp::from_now()?; + Ok(Self::new( + active_request_map, + fail_data_buf_size, + user_hook, + tm_sender, + current_time, + )) + } + + pub fn new( + active_request_map: ActiveRequestMap, + fail_data_buf_size: usize, + user_hook: ReplyHook, + tm_sender: TmSender, + init_time: UnixTimestamp, + ) -> Self { + Self { + active_request_map, + tm_buf: alloc::vec![0; fail_data_buf_size], + current_time: init_time, + user_hook, + tm_sender, + phantom: PhantomData, + } + } + + pub fn add_routed_request( + &mut self, + request_id: verification::RequestId, + active_request_type: ActiveRequestType, + ) { + self.active_request_map + .insert(&request_id.into(), active_request_type); + } + + pub fn request_active(&self, request_id: RequestId) -> bool { + self.active_request_map.get(request_id).is_some() + } + + /// Check for timeouts across all active requests. + /// + /// It will call [Self::handle_timeout] for all active requests which have timed out. + pub fn check_for_timeouts(&mut self, time_stamp: &[u8]) -> Result<(), EcssTmtcError> { + let mut timed_out_commands = alloc::vec::Vec::new(); + self.active_request_map.for_each(|request_id, active_req| { + let diff = self.current_time - active_req.start_time(); + if diff.duration_absolute > active_req.timeout() { + self.handle_timeout(active_req, time_stamp); + } + timed_out_commands.push(*request_id); + }); + for timed_out_command in timed_out_commands { + self.active_request_map.remove(timed_out_command); + } + Ok(()) + } + + /// Handle the timeout for a given active request. + /// + /// This implementation will report a verification completion failure with a user-provided + /// error code. It supplies the configured request timeout in milliseconds as a [u64] + /// serialized in big-endian format as the failure data. + pub fn handle_timeout(&self, active_request: &ActiveRequestType, time_stamp: &[u8]) { + let timeout = active_request.timeout().as_millis() as u64; + let timeout_raw = timeout.to_be_bytes(); + self.verification_reporter + .completion_failure( + active_request.token(), + FailParams::new( + time_stamp, + &self.user_hook.timeout_error_code(), + &timeout_raw, + ), + ) + .unwrap(); + self.user_hook.timeout_callback(active_request); + } + + /// Update the current time used for timeout checks based on the current OS time. + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + pub fn update_time_from_now(&mut self) -> Result<(), std::time::SystemTimeError> { + self.current_time = UnixTimestamp::from_now()?; + Ok(()) + } + + /// Update the current time used for timeout checks. + pub fn update_time(&mut self, time: UnixTimestamp) { + self.current_time = time; + } + } + */ } #[cfg(feature = "std")] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] pub mod std_mod { - use crate::pool::{PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr}; + use crate::pool::{ + PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr, StoreError, + }; use crate::pus::verification::{TcStateAccepted, VerificationToken}; use crate::pus::{ - EcssChannel, EcssTcAndToken, EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, - GenericRecvError, GenericSendError, PusTmWrapper, TryRecvTmtcError, + EcssTcAndToken, EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, GenericReceiveError, + GenericSendError, PusTmVariant, TryRecvTmtcError, }; use crate::tmtc::tm_helper::SharedTmPool; - use crate::{ChannelId, TargetId}; + use crate::ComponentId; use alloc::vec::Vec; + use core::time::Duration; use spacepackets::ecss::tc::PusTcReader; use spacepackets::ecss::tm::PusTmCreator; - use spacepackets::ecss::{PusError, WritablePusPacket}; - use spacepackets::time::cds::CdsTime; + use spacepackets::ecss::WritablePusPacket; use spacepackets::time::StdTimestampError; - use spacepackets::time::TimeWriter; + use spacepackets::ByteConversionError; use std::string::String; use std::sync::mpsc; use std::sync::mpsc::TryRecvError; @@ -391,8 +678,14 @@ pub mod std_mod { #[cfg(feature = "crossbeam")] pub use cb_mod::*; - use super::verification::VerificationReportingProvider; - use super::{AcceptedEcssTcAndToken, TcInMemory}; + use super::verification::{TcStateToken, VerificationReportingProvider}; + use super::{AcceptedEcssTcAndToken, ActiveRequestProvider, TcInMemory}; + + #[derive(Debug)] + pub struct PusTmInPool { + pub source_id: ComponentId, + pub store_addr: StoreAddr, + } impl From> for EcssTmtcError { fn from(_: mpsc::SendError) -> Self { @@ -400,48 +693,70 @@ pub mod std_mod { } } - impl EcssTmSenderCore for mpsc::Sender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + impl EcssTmSenderCore for mpsc::Sender { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => self - .send(addr) + PusTmVariant::InStore(store_addr) => self + .send(PusTmInPool { + source_id, + store_addr, + }) .map_err(|_| GenericSendError::RxDisconnected)?, - PusTmWrapper::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), + PusTmVariant::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), }; Ok(()) } } - impl EcssTmSenderCore for mpsc::SyncSender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + impl EcssTmSenderCore for mpsc::SyncSender { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => self - .try_send(addr) + PusTmVariant::InStore(store_addr) => self + .try_send(PusTmInPool { + source_id, + store_addr, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, - PusTmWrapper::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), + PusTmVariant::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), }; Ok(()) } } - impl EcssTmSenderCore for mpsc::Sender> { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + #[derive(Debug)] + pub struct PusTmAsVec { + pub source_id: ComponentId, + pub packet: Vec, + } + + pub type MpscTmAsVecSender = mpsc::Sender; + + impl EcssTmSenderCore for MpscTmAsVecSender { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), - PusTmWrapper::Direct(tm) => self - .send(tm.to_vec()?) + PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), + PusTmVariant::Direct(tm) => self + .send(PusTmAsVec { + source_id, + packet: tm.to_vec()?, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, }; Ok(()) } } - impl EcssTmSenderCore for mpsc::SyncSender> { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + pub type MpscTmAsVecSenderBounded = mpsc::SyncSender; + + impl EcssTmSenderCore for MpscTmAsVecSenderBounded { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), - PusTmWrapper::Direct(tm) => self - .send(tm.to_vec()?) + PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), + PusTmVariant::Direct(tm) => self + .send(PusTmAsVec { + source_id, + packet: tm.to_vec()?, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, }; Ok(()) @@ -449,144 +764,62 @@ pub mod std_mod { } #[derive(Clone)] - pub struct TmInSharedPoolSenderWithId { - channel_id: ChannelId, - name: &'static str, + pub struct TmInSharedPoolSender { shared_tm_store: SharedTmPool, sender: Sender, } - impl EcssChannel for TmInSharedPoolSenderWithId { - fn channel_id(&self) -> ChannelId { - self.channel_id - } - - fn name(&self) -> &'static str { - self.name - } - } - - impl TmInSharedPoolSenderWithId { - pub fn send_direct_tm(&self, tm: PusTmCreator) -> Result<(), EcssTmtcError> { + impl TmInSharedPoolSender { + pub fn send_direct_tm( + &self, + source_id: ComponentId, + tm: PusTmCreator, + ) -> Result<(), EcssTmtcError> { let addr = self.shared_tm_store.add_pus_tm(&tm)?; - self.sender.send_tm(PusTmWrapper::InStore(addr)) + self.sender.send_tm(source_id, PusTmVariant::InStore(addr)) } } - impl EcssTmSenderCore for TmInSharedPoolSenderWithId { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { - if let PusTmWrapper::Direct(tm) = tm { - return self.send_direct_tm(tm); + impl EcssTmSenderCore for TmInSharedPoolSender { + fn send_tm(&self, source_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { + if let PusTmVariant::Direct(tm) = tm { + return self.send_direct_tm(source_id, tm); } - self.sender.send_tm(tm) + self.sender.send_tm(source_id, tm) } } - impl TmInSharedPoolSenderWithId { - pub fn new( - id: ChannelId, - name: &'static str, - shared_tm_store: SharedTmPool, - sender: Sender, - ) -> Self { + impl TmInSharedPoolSender { + pub fn new(shared_tm_store: SharedTmPool, sender: Sender) -> Self { Self { - channel_id: id, - name, shared_tm_store, sender, } } } - pub type TmInSharedPoolSenderWithMpsc = TmInSharedPoolSenderWithId>; - pub type TmInSharedPoolSenderWithBoundedMpsc = - TmInSharedPoolSenderWithId>; + pub type MpscTmInSharedPoolSender = TmInSharedPoolSender>; + pub type MpscTmInSharedPoolSenderBounded = TmInSharedPoolSender>; - /// This class can be used if frequent heap allocations during run-time are not an issue. - /// PUS TM packets will be sent around as [Vec]s. Please note that the current implementation - /// of this class can not deal with store addresses, so it is assumed that is is always - /// going to be called with direct packets. - #[derive(Clone)] - pub struct TmAsVecSenderWithId { - id: ChannelId, - name: &'static str, - sender: Sender, - } - - impl From>> for EcssTmtcError { - fn from(_: mpsc::SendError>) -> Self { - Self::Send(GenericSendError::RxDisconnected) - } - } - - impl TmAsVecSenderWithId { - pub fn new(id: u32, name: &'static str, sender: Sender) -> Self { - Self { id, sender, name } - } - } - - impl EcssChannel for TmAsVecSenderWithId { - fn channel_id(&self) -> ChannelId { - self.id - } - fn name(&self) -> &'static str { - self.name - } - } - - impl EcssTmSenderCore for TmAsVecSenderWithId { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { - self.sender.send_tm(tm) - } - } - - pub type TmAsVecSenderWithMpsc = TmAsVecSenderWithId>>; - pub type TmAsVecSenderWithBoundedMpsc = TmAsVecSenderWithId>>; - - pub struct MpscTcReceiver { - id: ChannelId, - name: &'static str, - receiver: mpsc::Receiver, - } - - impl EcssChannel for MpscTcReceiver { - fn channel_id(&self) -> ChannelId { - self.id - } - - fn name(&self) -> &'static str { - self.name - } - } + pub type MpscTcReceiver = mpsc::Receiver; impl EcssTcReceiverCore for MpscTcReceiver { fn recv_tc(&self) -> Result { - self.receiver.try_recv().map_err(|e| match e { + self.try_recv().map_err(|e| match e { TryRecvError::Empty => TryRecvTmtcError::Empty, - TryRecvError::Disconnected => { - TryRecvTmtcError::Tmtc(EcssTmtcError::from(GenericRecvError::TxDisconnected)) - } + TryRecvError::Disconnected => TryRecvTmtcError::Tmtc(EcssTmtcError::from( + GenericReceiveError::TxDisconnected(None), + )), }) } } - impl MpscTcReceiver { - pub fn new( - id: ChannelId, - name: &'static str, - receiver: mpsc::Receiver, - ) -> Self { - Self { id, name, receiver } - } - } - #[cfg(feature = "crossbeam")] pub mod cb_mod { use super::*; use crossbeam_channel as cb; - pub type TmInSharedPoolSenderWithCrossbeam = - TmInSharedPoolSenderWithId>; + pub type TmInSharedPoolSenderWithCrossbeam = TmInSharedPoolSender>; impl From> for EcssTmtcError { fn from(_: cb::SendError) -> Self { @@ -605,64 +838,87 @@ pub mod std_mod { } } - impl EcssTmSenderCore for cb::Sender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + impl EcssTmSenderCore for cb::Sender { + fn send_tm( + &self, + source_id: ComponentId, + tm: PusTmVariant, + ) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => self - .try_send(addr) + PusTmVariant::InStore(addr) => self + .try_send(PusTmInPool { + source_id, + store_addr: addr, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, - PusTmWrapper::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), + PusTmVariant::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm), }; Ok(()) } } - impl EcssTmSenderCore for cb::Sender> { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + impl EcssTmSenderCore for cb::Sender { + fn send_tm( + &self, + source_id: ComponentId, + tm: PusTmVariant, + ) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), - PusTmWrapper::Direct(tm) => self - .send(tm.to_vec()?) + PusTmVariant::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)), + PusTmVariant::Direct(tm) => self + .send(PusTmAsVec { + source_id, + packet: tm.to_vec()?, + }) .map_err(|e| EcssTmtcError::Send(e.into()))?, }; Ok(()) } } - pub struct CrossbeamTcReceiver { - id: ChannelId, - name: &'static str, - receiver: cb::Receiver, - } + pub type CrossbeamTcReceiver = cb::Receiver; + } - impl CrossbeamTcReceiver { - pub fn new( - id: ChannelId, - name: &'static str, - receiver: cb::Receiver, - ) -> Self { - Self { id, name, receiver } + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct ActivePusRequestStd { + target_id: ComponentId, + token: TcStateToken, + start_time: std::time::Instant, + timeout: Duration, + } + + impl ActivePusRequestStd { + pub fn new( + target_id: ComponentId, + token: impl Into, + timeout: Duration, + ) -> Self { + Self { + target_id, + token: token.into(), + start_time: std::time::Instant::now(), + timeout, } } + } - impl EcssChannel for CrossbeamTcReceiver { - fn channel_id(&self) -> ChannelId { - self.id - } - - fn name(&self) -> &'static str { - self.name - } + impl ActiveRequestProvider for ActivePusRequestStd { + fn target_id(&self) -> ComponentId { + self.target_id } - impl EcssTcReceiverCore for CrossbeamTcReceiver { - fn recv_tc(&self) -> Result { - self.receiver.try_recv().map_err(|e| match e { - cb::TryRecvError::Empty => TryRecvTmtcError::Empty, - cb::TryRecvError::Disconnected => TryRecvTmtcError::Tmtc(EcssTmtcError::from( - GenericRecvError::TxDisconnected, - )), - }) - } + fn token(&self) -> TcStateToken { + self.token + } + + fn timeout(&self) -> Duration { + self.timeout + } + fn set_token(&mut self, token: TcStateToken) { + self.token = token; + } + + fn has_timed_out(&self) -> bool { + std::time::Instant::now() - self.start_time > self.timeout } } @@ -671,37 +927,52 @@ pub mod std_mod { // will be no_std soon, see https://github.com/rust-lang/rust/issues/103765 . #[derive(Debug, Clone, Error)] - pub enum GenericRoutingError { - #[error("not enough application data, expected at least {expected}, found {found}")] - NotEnoughAppData { expected: usize, found: usize }, - #[error("Unknown target ID {0}")] - UnknownTargetId(TargetId), - #[error("Sending action request failed: {0}")] - SendError(GenericSendError), + pub enum PusTcFromMemError { + #[error("generic PUS error: {0}")] + EcssTmtc(#[from] EcssTmtcError), + #[error("invalid format of TC in memory: {0:?}")] + InvalidFormat(TcInMemory), } #[derive(Debug, Clone, Error)] - pub enum PusPacketHandlingError { - #[error("generic PUS error: {0}")] - Pus(#[from] PusError), + pub enum GenericRoutingError { + // #[error("not enough application data, expected at least {expected}, found {found}")] + // NotEnoughAppData { expected: usize, found: usize }, + #[error("Unknown target ID {0}")] + UnknownTargetId(ComponentId), + #[error("Sending action request failed: {0}")] + Send(GenericSendError), + } + + /// This error can be used for generic conversions from PUS Telecommands to request types. + /// + /// Please note that this error can also be used if no request is generated and the PUS + /// service, subservice and application data is used directly to perform some request. + #[derive(Debug, Clone, Error)] + pub enum GenericConversionError { #[error("wrong service number {0} for packet handler")] WrongService(u8), #[error("invalid subservice {0}")] InvalidSubservice(u8), #[error("not enough application data, expected at least {expected}, found {found}")] NotEnoughAppData { expected: usize, found: usize }, - #[error("PUS packet too large, does not fit in buffer: {0}")] - PusPacketTooLarge(usize), #[error("invalid application data")] InvalidAppData(String), - #[error("invalid format of TC in memory: {0:?}")] - InvalidTcInMemoryFormat(TcInMemory), - #[error("generic ECSS tmtc error: {0}")] - EcssTmtc(#[from] EcssTmtcError), + } + + /// Wrapper type which tries to encapsulate all possible errors when handling PUS packets. + #[derive(Debug, Clone, Error)] + pub enum PusPacketHandlingError { + #[error("error polling PUS TC packet: {0}")] + TcPolling(#[from] EcssTmtcError), + #[error("error generating PUS reader from memory: {0}")] + TcFromMem(#[from] PusTcFromMemError), + #[error("generic request conversion error: {0}")] + RequestConversion(#[from] GenericConversionError), + #[error("request routing error: {0}")] + RequestRouting(#[from] GenericRoutingError), #[error("invalid verification token")] InvalidVerificationToken, - #[error("request routing error: {0}")] - RequestRoutingError(#[from] GenericRoutingError), #[error("other error {0}")] Other(String), } @@ -735,19 +1006,24 @@ pub mod std_mod { } pub trait EcssTcInMemConverter { - fn cache_ecss_tc_in_memory( - &mut self, - possible_packet: &TcInMemory, - ) -> Result<(), PusPacketHandlingError>; + fn cache(&mut self, possible_packet: &TcInMemory) -> Result<(), PusTcFromMemError>; fn tc_slice_raw(&self) -> &[u8]; - fn convert_ecss_tc_in_memory_to_reader( + fn cache_and_convert( &mut self, possible_packet: &TcInMemory, - ) -> Result, PusPacketHandlingError> { - self.cache_ecss_tc_in_memory(possible_packet)?; - Ok(PusTcReader::new(self.tc_slice_raw())?.0) + ) -> Result, PusTcFromMemError> { + self.cache(possible_packet)?; + Ok(PusTcReader::new(self.tc_slice_raw()) + .map_err(EcssTmtcError::Pus)? + .0) + } + + fn convert(&self) -> Result, PusTcFromMemError> { + Ok(PusTcReader::new(self.tc_slice_raw()) + .map_err(EcssTmtcError::Pus)? + .0) } } @@ -760,16 +1036,11 @@ pub mod std_mod { } impl EcssTcInMemConverter for EcssTcInVecConverter { - fn cache_ecss_tc_in_memory( - &mut self, - tc_in_memory: &TcInMemory, - ) -> Result<(), PusPacketHandlingError> { + fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> { self.pus_tc_raw = None; match tc_in_memory { super::TcInMemory::StoreAddr(_) => { - return Err(PusPacketHandlingError::InvalidTcInMemoryFormat( - tc_in_memory.clone(), - )); + return Err(PusTcFromMemError::InvalidFormat(tc_in_memory.clone())); } super::TcInMemory::Vec(vec) => { self.pus_tc_raw = Some(vec.clone()); @@ -803,17 +1074,20 @@ pub mod std_mod { } } - pub fn copy_tc_to_buf(&mut self, addr: StoreAddr) -> Result<(), PusPacketHandlingError> { + pub fn copy_tc_to_buf(&mut self, addr: StoreAddr) -> Result<(), PusTcFromMemError> { // Keep locked section as short as possible. - let mut tc_pool = self - .shared_tc_store - .write() - .map_err(|_| PusPacketHandlingError::EcssTmtc(EcssTmtcError::StoreLock))?; - let tc_size = tc_pool - .len_of_data(&addr) - .map_err(|e| PusPacketHandlingError::EcssTmtc(EcssTmtcError::Store(e)))?; + let mut tc_pool = self.shared_tc_store.write().map_err(|_| { + PusTcFromMemError::EcssTmtc(EcssTmtcError::Store(StoreError::LockError)) + })?; + let tc_size = tc_pool.len_of_data(&addr).map_err(EcssTmtcError::Store)?; if tc_size > self.pus_buf.len() { - return Err(PusPacketHandlingError::PusPacketTooLarge(tc_size)); + return Err( + EcssTmtcError::ByteConversion(ByteConversionError::ToSliceTooSmall { + found: self.pus_buf.len(), + expected: tc_size, + }) + .into(), + ); } let tc_guard = tc_pool.read_with_guard(addr); // TODO: Proper error handling. @@ -823,18 +1097,13 @@ pub mod std_mod { } impl EcssTcInMemConverter for EcssTcInSharedStoreConverter { - fn cache_ecss_tc_in_memory( - &mut self, - tc_in_memory: &TcInMemory, - ) -> Result<(), PusPacketHandlingError> { + fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> { match tc_in_memory { super::TcInMemory::StoreAddr(addr) => { self.copy_tc_to_buf(*addr)?; } super::TcInMemory::Vec(_) => { - return Err(PusPacketHandlingError::InvalidTcInMemoryFormat( - tc_in_memory.clone(), - )); + return Err(PusTcFromMemError::InvalidFormat(tc_in_memory.clone())); } }; Ok(()) @@ -850,29 +1119,10 @@ pub mod std_mod { TmSender: EcssTmSenderCore, VerificationReporter: VerificationReportingProvider, > { + pub id: ComponentId, pub tc_receiver: TcReceiver, pub tm_sender: TmSender, - pub tm_apid: u16, - pub verification_handler: VerificationReporter, - } - #[cfg(feature = "std")] - pub fn get_current_cds_short_timestamp( - partial_error: &mut Option, - ) -> [u8; 7] { - let mut time_stamp: [u8; 7] = [0; 7]; - let time_provider = CdsTime::now_with_u16_days().map_err(PartialPusHandlingError::Time); - if let Ok(time_provider) = time_provider { - // Can't fail, we have a buffer with the exact required size. - time_provider.write_to_bytes(&mut time_stamp).unwrap(); - } else { - *partial_error = Some(time_provider.unwrap_err()); - } - time_stamp - } - #[cfg(feature = "std")] - pub fn get_current_timestamp_ignore_error() -> [u8; 7] { - let mut dummy = None; - get_current_cds_short_timestamp(&mut dummy) + pub verif_reporter: VerificationReporter, } /// This is a high-level PUS packet handler helper. @@ -902,23 +1152,31 @@ pub mod std_mod { > PusServiceHelper { pub fn new( + id: ComponentId, tc_receiver: TcReceiver, tm_sender: TmSender, - tm_apid: u16, verification_handler: VerificationReporter, tc_in_mem_converter: TcInMemConverter, ) -> Self { Self { common: PusServiceBase { + id, tc_receiver, tm_sender, - tm_apid, - verification_handler, + verif_reporter: verification_handler, }, tc_in_mem_converter, } } + pub fn id(&self) -> ComponentId { + self.common.id + } + + pub fn tm_sender(&self) -> &TmSender { + &self.common.tm_sender + } + /// This function can be used to poll the internal [EcssTcReceiverCore] object for the next /// telecommand packet. It will return `Ok(None)` if there are not packets available. /// In any other case, it will perform the acceptance of the ECSS TC packet using the @@ -944,37 +1202,48 @@ pub mod std_mod { })) } Err(e) => match e { - TryRecvTmtcError::Tmtc(e) => Err(PusPacketHandlingError::EcssTmtc(e)), + TryRecvTmtcError::Tmtc(e) => Err(PusPacketHandlingError::TcPolling(e)), TryRecvTmtcError::Empty => Ok(None), }, } } + + pub fn verif_reporter(&self) -> &VerificationReporter { + &self.common.verif_reporter + } + pub fn verif_reporter_mut(&mut self) -> &mut VerificationReporter { + &mut self.common.verif_reporter + } + + pub fn tc_in_mem_converter(&self) -> &TcInMemConverter { + &self.tc_in_mem_converter + } + + pub fn tc_in_mem_converter_mut(&mut self) -> &mut TcInMemConverter { + &mut self.tc_in_mem_converter + } } - pub type PusServiceHelperDynWithMpsc = PusServiceHelper< - MpscTcReceiver, - TmAsVecSenderWithMpsc, - TcInMemConverter, - VerificationReporter, - >; + pub type PusServiceHelperDynWithMpsc = + PusServiceHelper; pub type PusServiceHelperDynWithBoundedMpsc = PusServiceHelper< MpscTcReceiver, - TmAsVecSenderWithBoundedMpsc, + MpscTmAsVecSenderBounded, TcInMemConverter, VerificationReporter, >; pub type PusServiceHelperStaticWithMpsc = PusServiceHelper< MpscTcReceiver, - TmInSharedPoolSenderWithMpsc, + MpscTmInSharedPoolSender, TcInMemConverter, VerificationReporter, >; pub type PusServiceHelperStaticWithBoundedMpsc = PusServiceHelper< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, TcInMemConverter, VerificationReporter, >; @@ -988,12 +1257,46 @@ pub(crate) fn source_buffer_large_enough( return Err(ByteConversionError::ToSliceTooSmall { found: cap, expected: len, - } - .into()); + }); } Ok(()) } +#[cfg(any(feature = "test_util", test))] +pub mod test_util { + use crate::request::UniqueApidTargetId; + use spacepackets::ecss::{tc::PusTcCreator, tm::PusTmReader}; + + use super::{ + verification::{self, TcStateAccepted, VerificationToken}, + PusPacketHandlerResult, PusPacketHandlingError, + }; + + pub const TEST_APID: u16 = 0x101; + pub const TEST_UNIQUE_ID_0: u32 = 0x05; + pub const TEST_UNIQUE_ID_1: u32 = 0x06; + pub const TEST_COMPONENT_ID_0: UniqueApidTargetId = + UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_0); + pub const TEST_COMPONENT_ID_1: UniqueApidTargetId = + UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_1); + + pub trait PusTestHarness { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); + fn read_next_tm(&mut self) -> PusTmReader<'_>; + fn check_no_tm_available(&self) -> bool; + fn check_next_verification_tm( + &self, + subservice: u8, + expected_request_id: verification::RequestId, + ); + } + + pub trait SimplePusPacketHandler { + fn handle_one_tc(&mut self) -> Result; + } +} + #[cfg(test)] pub mod tests { use core::cell::RefCell; @@ -1008,57 +1311,53 @@ pub mod tests { use spacepackets::ecss::{PusPacket, WritablePusPacket}; use spacepackets::CcsdsPacket; - use crate::pool::{ - PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig, StoreAddr, - }; - use crate::pus::verification::RequestId; + use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig}; + use crate::pus::verification::{RequestId, VerificationReporter}; use crate::tmtc::tm_helper::SharedTmPool; - use crate::TargetId; + use crate::ComponentId; - use super::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, - }; - use super::verification::tests::{SharedVerificationMap, TestVerificationReporter}; + use super::test_util::{TEST_APID, TEST_COMPONENT_ID_0}; + + use super::verification::test_util::TestVerificationReporter; use super::verification::{ - TcStateAccepted, VerificationReporterCfg, VerificationReporterWithSender, - VerificationReportingProvider, VerificationToken, + TcStateAccepted, VerificationReporterCfg, VerificationReportingProvider, VerificationToken, }; - use super::{ - EcssTcAndToken, EcssTcInSharedStoreConverter, EcssTcInVecConverter, GenericRoutingError, - MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, - PusServiceHelper, TcInMemory, TmAsVecSenderWithId, TmAsVecSenderWithMpsc, - TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId, - }; - - pub const TEST_APID: u16 = 0x101; + use super::*; #[derive(Debug, Eq, PartialEq, Clone)] pub(crate) struct CommonTmInfo { pub subservice: u8, pub apid: u16, + pub seq_count: u16, pub msg_counter: u16, pub dest_id: u16, pub time_stamp: [u8; 7], } - pub trait PusTestHarness { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; - fn read_next_tm(&mut self) -> PusTmReader<'_>; - fn check_no_tm_available(&self) -> bool; - fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId); - } - - pub trait SimplePusPacketHandler { - fn handle_one_tc(&mut self) -> Result; - } - impl CommonTmInfo { + pub fn new_zero_seq_count( + subservice: u8, + apid: u16, + dest_id: u16, + time_stamp: [u8; 7], + ) -> Self { + Self { + subservice, + apid, + seq_count: 0, + msg_counter: 0, + dest_id, + time_stamp, + } + } + pub fn new_from_tm(tm: &PusTmCreator) -> Self { let mut time_stamp = [0; 7]; time_stamp.clone_from_slice(&tm.timestamp()[0..7]); Self { subservice: PusPacket::subservice(tm), apid: tm.apid(), + seq_count: tm.seq_count(), msg_counter: tm.msg_counter(), dest_id: tm.dest_id(), time_stamp, @@ -1068,20 +1367,19 @@ pub mod tests { /// Common fields for a PUS service test harness. pub struct PusServiceHandlerWithSharedStoreCommon { - pus_buf: [u8; 2048], + pus_buf: RefCell<[u8; 2048]>, tm_buf: [u8; 2048], tc_pool: SharedStaticMemoryPool, tm_pool: SharedTmPool, tc_sender: mpsc::SyncSender, - tm_receiver: mpsc::Receiver, - verification_handler: VerificationReporterWithSharedPoolMpscBoundedSender, + tm_receiver: mpsc::Receiver, } pub type PusServiceHelperStatic = PusServiceHelper< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, >; impl PusServiceHandlerWithSharedStoreCommon { @@ -1089,7 +1387,7 @@ pub mod tests { /// [PusServiceHandler] which might be required for a specific PUS service handler. /// /// The PUS service handler is instantiated with a [EcssTcInStoreConverter]. - pub fn new() -> (Self, PusServiceHelperStatic) { + pub fn new(id: ComponentId) -> (Self, PusServiceHelperStatic) { let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false); let tc_pool = StaticMemoryPool::new(pool_cfg.clone()); let tm_pool = StaticMemoryPool::new(pool_cfg); @@ -1098,62 +1396,48 @@ pub mod tests { let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::sync_channel(10); let (tm_tx, tm_rx) = mpsc::sync_channel(10); - let verif_sender = TmInSharedPoolSenderWithBoundedMpsc::new( - 0, - "verif_sender", - shared_tm_pool.clone(), - tm_tx.clone(), - ); let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); let verification_handler = - VerificationReporterWithSharedPoolMpscBoundedSender::new(&verif_cfg, verif_sender); - let test_srv_tm_sender = - TmInSharedPoolSenderWithId::new(0, "TEST_SENDER", shared_tm_pool.clone(), tm_tx); - let test_srv_tc_receiver = MpscTcReceiver::new(0, "TEST_RECEIVER", test_srv_tc_rx); + VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &verif_cfg); + let test_srv_tm_sender = TmInSharedPoolSender::new(shared_tm_pool.clone(), tm_tx); let in_store_converter = EcssTcInSharedStoreConverter::new(shared_tc_pool.clone(), 2048); ( Self { - pus_buf: [0; 2048], + pus_buf: RefCell::new([0; 2048]), tm_buf: [0; 2048], tc_pool: shared_tc_pool, tm_pool: shared_tm_pool, tc_sender: test_srv_tc_tx, tm_receiver: tm_rx, - verification_handler: verification_handler.clone(), }, PusServiceHelper::new( - test_srv_tc_receiver, + id, + test_srv_tc_rx, test_srv_tm_sender, - TEST_APID, verification_handler, in_store_converter, ), ) } - pub fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken { - let token = self.verification_handler.add_tc(tc); - let token = self - .verification_handler - .acceptance_success(token, &[0; 7]) - .unwrap(); - let tc_size = tc.write_to_bytes(&mut self.pus_buf).unwrap(); + pub fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator) { + let mut mut_buf = self.pus_buf.borrow_mut(); + let tc_size = tc.write_to_bytes(mut_buf.as_mut_slice()).unwrap(); let mut tc_pool = self.tc_pool.write().unwrap(); - let addr = tc_pool.add(&self.pus_buf[..tc_size]).unwrap(); + let addr = tc_pool.add(&mut_buf[..tc_size]).unwrap(); drop(tc_pool); // Send accepted TC to test service handler. self.tc_sender - .send(EcssTcAndToken::new(addr, token)) + .send(EcssTcAndToken::new(addr, *token)) .expect("sending tc failed"); - token } pub fn read_next_tm(&mut self) -> PusTmReader<'_> { let next_msg = self.tm_receiver.try_recv(); assert!(next_msg.is_ok()); - let tm_addr = next_msg.unwrap(); + let tm_in_pool = next_msg.unwrap(); let tm_pool = self.tm_pool.0.read().unwrap(); - let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap(); + let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap(); self.tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw); PusTmReader::new(&self.tm_buf, 7).unwrap().0 } @@ -1169,9 +1453,9 @@ pub mod tests { pub fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId) { let next_msg = self.tm_receiver.try_recv(); assert!(next_msg.is_ok()); - let tm_addr = next_msg.unwrap(); + let tm_in_pool = next_msg.unwrap(); let tm_pool = self.tm_pool.0.read().unwrap(); - let tm_raw = tm_pool.read_as_vec(&tm_addr).unwrap(); + let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap(); let tm = PusTmReader::new(&tm_raw, 7).unwrap().0; assert_eq!(PusPacket::service(&tm), 1); assert_eq!(PusPacket::subservice(&tm), subservice); @@ -1182,43 +1466,39 @@ pub mod tests { } } - pub struct PusServiceHandlerWithVecCommon { - current_tm: Option>, + pub struct PusServiceHandlerWithVecCommon { + current_tm: Option>, tc_sender: mpsc::Sender, - tm_receiver: mpsc::Receiver>, - pub verification_handler: VerificationReporter, + tm_receiver: mpsc::Receiver, } pub type PusServiceHelperDynamic = PusServiceHelper< MpscTcReceiver, - TmAsVecSenderWithMpsc, + MpscTmAsVecSender, EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, + VerificationReporter, >; - impl PusServiceHandlerWithVecCommon { - pub fn new_with_standard_verif_reporter() -> (Self, PusServiceHelperDynamic) { + impl PusServiceHandlerWithVecCommon { + pub fn new_with_standard_verif_reporter( + id: ComponentId, + ) -> (Self, PusServiceHelperDynamic) { let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel(); let (tm_tx, tm_rx) = mpsc::channel(); - let verif_sender = TmAsVecSenderWithId::new(0, "verififcatio-sender", tm_tx.clone()); let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); let verification_handler = - VerificationReporterWithSender::new(&verif_cfg, verif_sender); - - let test_srv_tm_sender = TmAsVecSenderWithId::new(0, "test-sender", tm_tx); - let test_srv_tc_receiver = MpscTcReceiver::new(0, "test-receiver", test_srv_tc_rx); + VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &verif_cfg); let in_store_converter = EcssTcInVecConverter::default(); ( Self { current_tm: None, tc_sender: test_srv_tc_tx, tm_receiver: tm_rx, - verification_handler: verification_handler.clone(), }, PusServiceHelper::new( - test_srv_tc_receiver, - test_srv_tm_sender, - TEST_APID, + id, + test_srv_tc_rx, + tm_tx, verification_handler, in_store_converter, ), @@ -1226,12 +1506,14 @@ pub mod tests { } } - impl PusServiceHandlerWithVecCommon { - pub fn new_with_test_verif_sender() -> ( + impl PusServiceHandlerWithVecCommon { + pub fn new_with_test_verif_sender( + id: ComponentId, + ) -> ( Self, PusServiceHelper< MpscTcReceiver, - TmAsVecSenderWithMpsc, + MpscTmAsVecSender, EcssTcInVecConverter, TestVerificationReporter, >, @@ -1239,22 +1521,19 @@ pub mod tests { let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel(); let (tm_tx, tm_rx) = mpsc::channel(); - let test_srv_tm_sender = TmAsVecSenderWithId::new(0, "test-sender", tm_tx); - let test_srv_tc_receiver = MpscTcReceiver::new(0, "test-receiver", test_srv_tc_rx); let in_store_converter = EcssTcInVecConverter::default(); - let shared_verif_map = SharedVerificationMap::default(); - let verification_handler = TestVerificationReporter::new(shared_verif_map); + let verification_handler = TestVerificationReporter::new(id); ( Self { current_tm: None, tc_sender: test_srv_tc_tx, tm_receiver: tm_rx, - verification_handler: verification_handler.clone(), + //verification_handler: verification_handler.clone(), }, PusServiceHelper::new( - test_srv_tc_receiver, - test_srv_tm_sender, - TEST_APID, + id, + test_srv_tc_rx, + tm_tx, verification_handler, in_store_converter, ), @@ -1262,29 +1541,21 @@ pub mod tests { } } - impl - PusServiceHandlerWithVecCommon - { - pub fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken { - let token = self.verification_handler.add_tc(tc); - let token = self - .verification_handler - .acceptance_success(token, &[0; 7]) - .unwrap(); + impl PusServiceHandlerWithVecCommon { + pub fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator) { // Send accepted TC to test service handler. self.tc_sender .send(EcssTcAndToken::new( TcInMemory::Vec(tc.to_vec().expect("pus tc conversion to vec failed")), - token, + *token, )) .expect("sending tc failed"); - token } pub fn read_next_tm(&mut self) -> PusTmReader<'_> { let next_msg = self.tm_receiver.try_recv(); assert!(next_msg.is_ok()); - self.current_tm = Some(next_msg.unwrap()); + self.current_tm = Some(next_msg.unwrap().packet); PusTmReader::new(self.current_tm.as_ref().unwrap(), 7) .unwrap() .0 @@ -1302,7 +1573,7 @@ pub mod tests { let next_msg = self.tm_receiver.try_recv(); assert!(next_msg.is_ok()); let next_msg = next_msg.unwrap(); - let tm = PusTmReader::new(next_msg.as_slice(), 7).unwrap().0; + let tm = PusTmReader::new(next_msg.packet.as_slice(), 7).unwrap().0; assert_eq!(PusPacket::service(&tm), 1); assert_eq!(PusPacket::subservice(&tm), subservice); assert_eq!(tm.apid(), TEST_APID); @@ -1322,7 +1593,9 @@ pub mod tests { impl TestConverter { pub fn check_service(&self, tc: &PusTcReader) -> Result<(), PusPacketHandlingError> { if tc.service() != SERVICE { - return Err(PusPacketHandlingError::WrongService(tc.service())); + return Err(PusPacketHandlingError::RequestConversion( + GenericConversionError::WrongService(tc.service()), + )); } Ok(()) } @@ -1340,44 +1613,9 @@ pub mod tests { } } - #[derive(Default)] - pub struct TestRoutingErrorHandler { - pub routing_errors: RefCell>, - } - - impl PusRoutingErrorHandler for TestRoutingErrorHandler { - type Error = GenericRoutingError; - - fn handle_error( - &self, - target_id: TargetId, - _token: VerificationToken, - _tc: &PusTcReader, - error: Self::Error, - _time_stamp: &[u8], - _verif_reporter: &impl VerificationReportingProvider, - ) { - self.routing_errors - .borrow_mut() - .push_back((target_id, error)); - } - } - - impl TestRoutingErrorHandler { - pub fn is_empty(&self) -> bool { - self.routing_errors.borrow().is_empty() - } - - pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError) { - if self.routing_errors.borrow().is_empty() { - panic!("no routing request available"); - } - self.routing_errors.borrow_mut().pop_front().unwrap() - } - } - pub struct TestRouter { - pub routing_requests: RefCell>, + pub routing_requests: RefCell>, + pub routing_errors: RefCell>, pub injected_routing_failure: RefCell>, } @@ -1385,6 +1623,7 @@ pub mod tests { fn default() -> Self { Self { routing_requests: Default::default(), + routing_errors: Default::default(), injected_routing_failure: Default::default(), } } @@ -1398,6 +1637,31 @@ pub mod tests { Ok(()) } + pub fn handle_error( + &self, + target_id: ComponentId, + _token: VerificationToken, + _tc: &PusTcReader, + error: GenericRoutingError, + _time_stamp: &[u8], + _verif_reporter: &impl VerificationReportingProvider, + ) { + self.routing_errors + .borrow_mut() + .push_back((target_id, error)); + } + + pub fn no_routing_errors(&self) -> bool { + self.routing_errors.borrow().is_empty() + } + + pub fn retrieve_next_routing_error(&mut self) -> (ComponentId, GenericRoutingError) { + if self.routing_errors.borrow().is_empty() { + panic!("no routing request available"); + } + self.routing_errors.borrow_mut().pop_front().unwrap() + } + pub fn inject_routing_error(&mut self, error: GenericRoutingError) { *self.injected_routing_failure.borrow_mut() = Some(error); } @@ -1406,7 +1670,7 @@ pub mod tests { self.routing_requests.borrow().is_empty() } - pub fn retrieve_next_request(&mut self) -> (TargetId, REQUEST) { + pub fn retrieve_next_request(&mut self) -> (ComponentId, REQUEST) { if self.routing_requests.borrow().is_empty() { panic!("no routing request available"); } diff --git a/satrs/src/pus/mode.rs b/satrs/src/pus/mode.rs index 1ab46ef..abb6b99 100644 --- a/satrs/src/pus/mode.rs +++ b/satrs/src/pus/mode.rs @@ -2,6 +2,16 @@ use num_enum::{IntoPrimitive, TryFromPrimitive}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "alloc")] +#[allow(unused_imports)] +pub use alloc_mod::*; + +#[cfg(feature = "std")] +#[allow(unused_imports)] +pub use std_mod::*; + +pub const MODE_SERVICE_ID: u8 = 200; + #[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[repr(u8)] @@ -14,3 +24,134 @@ pub enum Subservice { TmCantReachMode = 7, TmWrongModeReply = 8, } + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub mod alloc_mod {} + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub mod std_mod {} + +#[cfg(test)] +mod tests { + + use std::sync::mpsc; + + use crate::{ + mode::{ + ModeAndSubmode, ModeReply, ModeReplySender, ModeRequest, ModeRequestSender, + ModeRequestorAndHandlerMpsc, ModeRequestorMpsc, + }, + request::{GenericMessage, MessageMetadata}, + }; + + const TEST_COMPONENT_ID_0: u64 = 5; + const TEST_COMPONENT_ID_1: u64 = 6; + const TEST_COMPONENT_ID_2: u64 = 7; + + #[test] + fn test_simple_mode_requestor() { + let (reply_sender, reply_receiver) = mpsc::channel(); + let (request_sender, request_receiver) = mpsc::channel(); + let mut mode_requestor = ModeRequestorMpsc::new(TEST_COMPONENT_ID_0, reply_receiver); + mode_requestor.add_message_target(TEST_COMPONENT_ID_1, request_sender); + + // Send a request and verify it arrives at the receiver. + let request_id = 2; + let sent_request = ModeRequest::ReadMode; + mode_requestor + .send_mode_request(request_id, TEST_COMPONENT_ID_1, sent_request) + .expect("send failed"); + let request = request_receiver.recv().expect("recv failed"); + assert_eq!(request.request_id(), 2); + assert_eq!(request.sender_id(), TEST_COMPONENT_ID_0); + assert_eq!(request.message, sent_request); + + // Send a reply and verify it arrives at the requestor. + let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(1, 5)); + reply_sender + .send(GenericMessage::new( + MessageMetadata::new(request_id, TEST_COMPONENT_ID_1), + mode_reply, + )) + .expect("send failed"); + let reply = mode_requestor.try_recv_mode_reply().expect("recv failed"); + assert!(reply.is_some()); + let reply = reply.unwrap(); + assert_eq!(reply.sender_id(), TEST_COMPONENT_ID_1); + assert_eq!(reply.request_id(), 2); + assert_eq!(reply.message, mode_reply); + } + + #[test] + fn test_mode_requestor_and_request_handler_request_sending() { + let (_reply_sender_to_connector, reply_receiver_of_connector) = mpsc::channel(); + let (_request_sender_to_connector, request_receiver_of_connector) = mpsc::channel(); + + let (request_sender_to_channel_1, request_receiver_channel_1) = mpsc::channel(); + //let (reply_sender_to_channel_2, reply_receiver_channel_2) = mpsc::channel(); + let mut mode_connector = ModeRequestorAndHandlerMpsc::new( + TEST_COMPONENT_ID_0, + request_receiver_of_connector, + reply_receiver_of_connector, + ); + assert_eq!( + ModeRequestSender::local_channel_id(&mode_connector), + TEST_COMPONENT_ID_0 + ); + assert_eq!( + ModeReplySender::local_channel_id(&mode_connector), + TEST_COMPONENT_ID_0 + ); + assert_eq!( + mode_connector.local_channel_id_generic(), + TEST_COMPONENT_ID_0 + ); + + mode_connector.add_request_target(TEST_COMPONENT_ID_1, request_sender_to_channel_1); + + // Send a request and verify it arrives at the receiver. + let request_id = 2; + let sent_request = ModeRequest::ReadMode; + mode_connector + .send_mode_request(request_id, TEST_COMPONENT_ID_1, sent_request) + .expect("send failed"); + + let request = request_receiver_channel_1.recv().expect("recv failed"); + assert_eq!(request.request_id(), 2); + assert_eq!(request.sender_id(), TEST_COMPONENT_ID_0); + assert_eq!(request.message, ModeRequest::ReadMode); + } + + #[test] + fn test_mode_requestor_and_request_handler_reply_sending() { + let (_reply_sender_to_connector, reply_receiver_of_connector) = mpsc::channel(); + let (_request_sender_to_connector, request_receiver_of_connector) = mpsc::channel(); + + let (reply_sender_to_channel_2, reply_receiver_channel_2) = mpsc::channel(); + let mut mode_connector = ModeRequestorAndHandlerMpsc::new( + TEST_COMPONENT_ID_0, + request_receiver_of_connector, + reply_receiver_of_connector, + ); + mode_connector.add_reply_target(TEST_COMPONENT_ID_2, reply_sender_to_channel_2); + + // Send a reply and verify it arrives at the receiver. + let request_id = 2; + let sent_reply = ModeReply::ModeReply(ModeAndSubmode::new(3, 5)); + mode_connector + .send_mode_reply( + MessageMetadata::new(request_id, TEST_COMPONENT_ID_2), + sent_reply, + ) + .expect("send failed"); + let reply = reply_receiver_channel_2.recv().expect("recv failed"); + assert_eq!(reply.request_id(), 2); + assert_eq!(reply.sender_id(), TEST_COMPONENT_ID_0); + assert_eq!(reply.message, sent_reply); + } + + #[test] + fn test_mode_reply_handler() {} +} diff --git a/satrs/src/pus/scheduler.rs b/satrs/src/pus/scheduler.rs index 0c2f4db..b3b9ef2 100644 --- a/satrs/src/pus/scheduler.rs +++ b/satrs/src/pus/scheduler.rs @@ -381,7 +381,7 @@ pub mod alloc_mod { /// a [crate::pool::PoolProvider] API. This data structure just tracks the store /// addresses and their release times and offers a convenient API to insert and release /// telecommands and perform other functionality specified by the ECSS standard in section 6.11. - /// The time is tracked as a [spacepackets::time::UnixTimestamp] but the only requirement to + /// The time is tracked as a [spacepackets::time::UnixTime] but the only requirement to /// the timekeeping of the user is that it is convertible to that timestamp. /// /// The standard also specifies that the PUS scheduler can be enabled and disabled. @@ -871,28 +871,28 @@ mod tests { cds::CdsTime::from_unix_time_with_u16_days(×tamp, cds::SubmillisPrecision::Absent) .unwrap(); let len_time_stamp = cds_time.write_to_bytes(buf).unwrap(); - let len_packet = base_ping_tc_simple_ctor(0, None) + let len_packet = base_ping_tc_simple_ctor(0, &[]) .write_to_bytes(&mut buf[len_time_stamp..]) .unwrap(); ( - SpHeader::tc_unseg(0x02, 0x34, len_packet as u16).unwrap(), + SpHeader::new_for_unseg_tc(0x02, 0x34, len_packet as u16), len_packet + len_time_stamp, ) } fn scheduled_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator { - let (mut sph, len_app_data) = pus_tc_base(timestamp, buf); - PusTcCreator::new_simple(&mut sph, 11, 4, Some(&buf[..len_app_data]), true) + let (sph, len_app_data) = pus_tc_base(timestamp, buf); + PusTcCreator::new_simple(sph, 11, 4, &buf[..len_app_data], true) } fn wrong_tc_service(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator { - let (mut sph, len_app_data) = pus_tc_base(timestamp, buf); - PusTcCreator::new_simple(&mut sph, 12, 4, Some(&buf[..len_app_data]), true) + let (sph, len_app_data) = pus_tc_base(timestamp, buf); + PusTcCreator::new_simple(sph, 12, 4, &buf[..len_app_data], true) } fn wrong_tc_subservice(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator { - let (mut sph, len_app_data) = pus_tc_base(timestamp, buf); - PusTcCreator::new_simple(&mut sph, 11, 5, Some(&buf[..len_app_data]), true) + let (sph, len_app_data) = pus_tc_base(timestamp, buf); + PusTcCreator::new_simple(sph, 11, 5, &buf[..len_app_data], true) } fn double_wrapped_time_tagged_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator { @@ -900,40 +900,31 @@ mod tests { cds::CdsTime::from_unix_time_with_u16_days(×tamp, cds::SubmillisPrecision::Absent) .unwrap(); let len_time_stamp = cds_time.write_to_bytes(buf).unwrap(); - let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap(); + let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 0); // app data should not matter, double wrapped time-tagged commands should be rejected right // away - let inner_time_tagged_tc = PusTcCreator::new_simple(&mut sph, 11, 4, None, true); + let inner_time_tagged_tc = PusTcCreator::new_simple(sph, 11, 4, &[], true); let packet_len = inner_time_tagged_tc .write_to_bytes(&mut buf[len_time_stamp..]) .expect("writing inner time tagged tc failed"); - PusTcCreator::new_simple( - &mut sph, - 11, - 4, - Some(&buf[..len_time_stamp + packet_len]), - true, - ) + PusTcCreator::new_simple(sph, 11, 4, &buf[..len_time_stamp + packet_len], true) } fn invalid_time_tagged_cmd() -> PusTcCreator<'static> { - let mut sph = SpHeader::tc_unseg(0x02, 0x34, 1).unwrap(); - PusTcCreator::new_simple(&mut sph, 11, 4, None, true) + let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 1); + PusTcCreator::new_simple(sph, 11, 4, &[], true) } - fn base_ping_tc_simple_ctor( - seq_count: u16, - app_data: Option<&'static [u8]>, - ) -> PusTcCreator<'static> { - let mut sph = SpHeader::tc_unseg(0x02, seq_count, 0).unwrap(); - PusTcCreator::new_simple(&mut sph, 17, 1, app_data, true) + fn base_ping_tc_simple_ctor(seq_count: u16, app_data: &'static [u8]) -> PusTcCreator<'static> { + let sph = SpHeader::new_for_unseg_tc(0x02, seq_count, 0); + PusTcCreator::new_simple(sph, 17, 1, app_data, true) } fn ping_tc_to_store( pool: &mut StaticMemoryPool, buf: &mut [u8], seq_count: u16, - app_data: Option<&'static [u8]>, + app_data: &'static [u8], ) -> TcInfo { let ping_tc = base_ping_tc_simple_ctor(seq_count, app_data); let ping_size = ping_tc.write_to_bytes(buf).expect("writing ping TC failed"); @@ -957,7 +948,7 @@ mod tests { let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc( @@ -967,7 +958,7 @@ mod tests { .unwrap(); let app_data = &[0, 1, 2]; - let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, Some(app_data)); + let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, app_data); scheduler .insert_unwrapped_and_stored_tc( UnixTime::new_only_secs(200), @@ -976,7 +967,7 @@ mod tests { .unwrap(); let app_data = &[0, 1, 2]; - let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, Some(app_data)); + let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, app_data); scheduler .insert_unwrapped_and_stored_tc( UnixTime::new_only_secs(300), @@ -1087,10 +1078,10 @@ mod tests { let src_id_to_set = 12; let apid_to_set = 0x22; let seq_count = 105; - let mut sp_header = SpHeader::tc_unseg(apid_to_set, 105, 0).unwrap(); + let sp_header = SpHeader::new_for_unseg_tc(apid_to_set, 105, 0); let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1); sec_header.source_id = src_id_to_set; - let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); + let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true); let req_id = RequestId::from_tc(&ping_tc); assert_eq!(req_id.source_id(), src_id_to_set); assert_eq!(req_id.apid(), apid_to_set); @@ -1106,13 +1097,13 @@ mod tests { let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .expect("insertion failed"); - let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); + let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1) .expect("insertion failed"); @@ -1171,13 +1162,13 @@ mod tests { let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .expect("insertion failed"); - let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); + let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1) .expect("insertion failed"); @@ -1230,13 +1221,13 @@ mod tests { scheduler.disable(); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .expect("insertion failed"); - let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); + let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1) .expect("insertion failed"); @@ -1294,7 +1285,7 @@ mod tests { let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); let info = scheduler .insert_unwrapped_tc( @@ -1309,7 +1300,7 @@ mod tests { let mut read_buf: [u8; 64] = [0; 64]; pool.read(&tc_info_0.addr(), &mut read_buf).unwrap(); let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data"); - assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None)); + assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[])); assert_eq!(scheduler.num_scheduled_telecommands(), 1); @@ -1332,7 +1323,7 @@ mod tests { let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap(); let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data"); assert_eq!(read_len, check_tc.1); - assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None)); + assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[])); } #[test] @@ -1356,7 +1347,7 @@ mod tests { let read_len = pool.read(&info.addr, &mut buf).unwrap(); let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data"); assert_eq!(read_len, check_tc.1); - assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None)); + assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[])); assert_eq!(scheduler.num_scheduled_telecommands(), 1); @@ -1381,7 +1372,7 @@ mod tests { let read_len = pool.read(&addr_vec[0], &mut buf).unwrap(); let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data"); assert_eq!(read_len, check_tc.1); - assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None)); + assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[])); } #[test] @@ -1506,7 +1497,7 @@ mod tests { let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .expect("insertion failed"); @@ -1540,7 +1531,7 @@ mod tests { let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .expect("insertion failed"); @@ -1563,7 +1554,7 @@ mod tests { let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .expect("inserting tc failed"); @@ -1581,7 +1572,7 @@ mod tests { let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .expect("inserting tc failed"); @@ -1599,15 +1590,15 @@ mod tests { let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .expect("inserting tc failed"); - let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); + let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1) .expect("inserting tc failed"); - let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, None); + let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_2) .expect("inserting tc failed"); @@ -1667,7 +1658,7 @@ mod tests { release_secs: u64, ) -> TcInfo { let mut buf: [u8; 32] = [0; 32]; - let tc_info = ping_tc_to_store(pool, &mut buf, seq_count, None); + let tc_info = ping_tc_to_store(pool, &mut buf, seq_count, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(release_secs as i64), tc_info) @@ -1915,13 +1906,13 @@ mod tests { let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut buf: [u8; 32] = [0; 32]; - let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); + let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .expect("insertion failed"); - let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); + let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]); scheduler .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1) .expect("insertion failed"); @@ -1949,13 +1940,13 @@ mod tests { #[test] fn test_generic_insert_app_data_test() { let time_writer = cds::CdsTime::new_with_u16_days(1, 1); - let mut sph = SpHeader::new( - PacketId::const_new(PacketType::Tc, true, 0x002), - PacketSequenceCtrl::const_new(SequenceFlags::Unsegmented, 5), + let sph = SpHeader::new( + PacketId::new(PacketType::Tc, true, 0x002), + PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5), 0, ); let sec_header = PusTcSecondaryHeader::new_simple(17, 1); - let ping_tc = PusTcCreator::new_no_app_data(&mut sph, sec_header, true); + let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true); let mut buf: [u8; 64] = [0; 64]; let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc); assert!(result.is_ok()); @@ -1971,13 +1962,13 @@ mod tests { #[test] fn test_generic_insert_app_data_test_byte_conv_error() { let time_writer = cds::CdsTime::new_with_u16_days(1, 1); - let mut sph = SpHeader::new( - PacketId::const_new(PacketType::Tc, true, 0x002), - PacketSequenceCtrl::const_new(SequenceFlags::Unsegmented, 5), + let sph = SpHeader::new( + PacketId::new(PacketType::Tc, true, 0x002), + PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5), 0, ); let sec_header = PusTcSecondaryHeader::new_simple(17, 1); - let ping_tc = PusTcCreator::new_no_app_data(&mut sph, sec_header, true); + let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true); let mut buf: [u8; 16] = [0; 16]; let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc); assert!(result.is_err()); @@ -2000,13 +1991,13 @@ mod tests { #[test] fn test_generic_insert_app_data_test_as_vec() { let time_writer = cds::CdsTime::new_with_u16_days(1, 1); - let mut sph = SpHeader::new( - PacketId::const_new(PacketType::Tc, true, 0x002), - PacketSequenceCtrl::const_new(SequenceFlags::Unsegmented, 5), + let sph = SpHeader::new( + PacketId::new(PacketType::Tc, true, 0x002), + PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5), 0, ); let sec_header = PusTcSecondaryHeader::new_simple(17, 1); - let ping_tc = PusTcCreator::new_no_app_data(&mut sph, sec_header, true); + let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true); let mut buf: [u8; 64] = [0; 64]; generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc).unwrap(); let vec = generate_insert_telecommand_app_data_as_vec(&time_writer, &ping_tc) diff --git a/satrs/src/pus/scheduler_srv.rs b/satrs/src/pus/scheduler_srv.rs index cc75fe0..6812770 100644 --- a/satrs/src/pus/scheduler_srv.rs +++ b/satrs/src/pus/scheduler_srv.rs @@ -1,20 +1,16 @@ use super::scheduler::PusSchedulerProvider; -use super::verification::{ - VerificationReporterWithSharedPoolMpscBoundedSender, - VerificationReporterWithSharedPoolMpscSender, VerificationReporterWithVecMpscBoundedSender, - VerificationReporterWithVecMpscSender, VerificationReportingProvider, -}; +use super::verification::{VerificationReporter, VerificationReportingProvider}; use super::{ - get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcInSharedStoreConverter, - EcssTcInVecConverter, EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusServiceHelper, - TmAsVecSenderWithBoundedMpsc, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithMpsc, + EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiverCore, + EcssTmSenderCore, MpscTcReceiver, MpscTmInSharedPoolSender, MpscTmInSharedPoolSenderBounded, + PusServiceHelper, PusTmAsVec, }; use crate::pool::PoolProvider; use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError}; use alloc::string::ToString; use spacepackets::ecss::{scheduling, PusPacket}; use spacepackets::time::cds::CdsTime; +use std::sync::mpsc; /// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service) /// packets. This handler is able to handle the most important PUS requests for a scheduling @@ -24,7 +20,7 @@ use spacepackets::time::cds::CdsTime; /// telecommands inside the scheduler. The user can retrieve the wrapped scheduler via the /// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release /// telecommands when applicable. -pub struct PusService11SchedHandler< +pub struct PusSchedServiceHandler< TcReceiver: EcssTcReceiverCore, TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter, @@ -43,13 +39,7 @@ impl< VerificationReporter: VerificationReportingProvider, Scheduler: PusSchedulerProvider, > - PusService11SchedHandler< - TcReceiver, - TmSender, - TcInMemConverter, - VerificationReporter, - Scheduler, - > + PusSchedServiceHandler { pub fn new( service_helper: PusServiceHelper< @@ -74,8 +64,9 @@ impl< &self.scheduler } - pub fn handle_one_tc( + pub fn poll_and_handle_next_tc( &mut self, + time_stamp: &[u8], sched_tc_pool: &mut (impl PoolProvider + ?Sized), ) -> Result { let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; @@ -83,10 +74,10 @@ impl< return Ok(PusPacketHandlerResult::Empty); } let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; + self.service_helper + .tc_in_mem_converter_mut() + .cache(&ecss_tc_and_token.tc_in_memory)?; + let tc = self.service_helper.tc_in_mem_converter().convert()?; let subservice = PusPacket::subservice(&tc); let standard_subservice = scheduling::Subservice::try_from(subservice); if standard_subservice.is_err() { @@ -95,23 +86,28 @@ impl< ecss_tc_and_token.token, )); } - let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); + let partial_error = None; match standard_subservice.unwrap() { scheduling::Subservice::TcEnableScheduling => { let start_token = self .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter() + .start_success( + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .expect("Error sending start success"); self.scheduler.enable(); if self.scheduler.is_enabled() { self.service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .expect("Error sending completion success"); } else { return Err(PusPacketHandlingError::Other( @@ -122,17 +118,23 @@ impl< scheduling::Subservice::TcDisableScheduling => { let start_token = self .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter() + .start_success( + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .expect("Error sending start success"); self.scheduler.disable(); if !self.scheduler.is_enabled() { self.service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .expect("Error sending completion success"); } else { return Err(PusPacketHandlingError::Other( @@ -143,9 +145,12 @@ impl< scheduling::Subservice::TcResetScheduling => { let start_token = self .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter() + .start_success( + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .expect("Error sending start success"); self.scheduler @@ -153,17 +158,24 @@ impl< .expect("Error resetting TC Pool"); self.service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .expect("Error sending completion success"); } scheduling::Subservice::TcInsertActivity => { let start_token = self .service_helper .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter + .start_success( + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .expect("error sending start success"); // let mut pool = self.sched_tc_pool.write().expect("locking pool failed"); @@ -172,9 +184,12 @@ impl< .expect("insertion of activity into pool failed"); self.service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .expect("sending completion success failed"); } _ => { @@ -195,53 +210,57 @@ impl< } /// Helper type definition for a PUS 11 handler with a dynamic TMTC memory backend and regular /// mpsc queues. -pub type PusService11SchedHandlerDynWithMpsc = PusService11SchedHandler< +pub type PusService11SchedHandlerDynWithMpsc = PusSchedServiceHandler< MpscTcReceiver, - TmAsVecSenderWithMpsc, + mpsc::Sender, EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, + VerificationReporter, PusScheduler, >; /// Helper type definition for a PUS 11 handler with a dynamic TMTC memory backend and bounded MPSC /// queues. -pub type PusService11SchedHandlerDynWithBoundedMpsc = PusService11SchedHandler< +pub type PusService11SchedHandlerDynWithBoundedMpsc = PusSchedServiceHandler< MpscTcReceiver, - TmAsVecSenderWithBoundedMpsc, + mpsc::SyncSender, EcssTcInVecConverter, - VerificationReporterWithVecMpscBoundedSender, + VerificationReporter, PusScheduler, >; /// Helper type definition for a PUS 11 handler with a shared store TMTC memory backend and regular /// mpsc queues. -pub type PusService11SchedHandlerStaticWithMpsc = PusService11SchedHandler< +pub type PusService11SchedHandlerStaticWithMpsc = PusSchedServiceHandler< MpscTcReceiver, - TmInSharedPoolSenderWithMpsc, + MpscTmInSharedPoolSender, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscSender, + VerificationReporter, PusScheduler, >; /// Helper type definition for a PUS 11 handler with a shared store TMTC memory backend and bounded /// mpsc queues. -pub type PusService11SchedHandlerStaticWithBoundedMpsc = PusService11SchedHandler< +pub type PusService11SchedHandlerStaticWithBoundedMpsc = PusSchedServiceHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, PusScheduler, >; #[cfg(test)] mod tests { use crate::pool::{StaticMemoryPool, StaticPoolConfig}; - use crate::pus::tests::TEST_APID; - use crate::pus::verification::VerificationReporterWithSharedPoolMpscBoundedSender; + use crate::pus::test_util::{PusTestHarness, TEST_APID}; + use crate::pus::verification::{VerificationReporter, VerificationReportingProvider}; + use crate::pus::{ scheduler::{self, PusSchedulerProvider, TcInfo}, - tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness}, + tests::PusServiceHandlerWithSharedStoreCommon, verification::{RequestId, TcStateAccepted, VerificationToken}, EcssTcInSharedStoreConverter, }; - use crate::pus::{MpscTcReceiver, TmInSharedPoolSenderWithBoundedMpsc}; + use crate::pus::{ + MpscTcReceiver, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, + PusPacketHandlingError, + }; use alloc::collections::VecDeque; use delegate::delegate; use spacepackets::ecss::scheduling::Subservice; @@ -254,15 +273,15 @@ mod tests { time::cds, }; - use super::PusService11SchedHandler; + use super::PusSchedServiceHandler; struct Pus11HandlerWithStoreTester { common: PusServiceHandlerWithSharedStoreCommon, - handler: PusService11SchedHandler< + handler: PusSchedServiceHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, TestScheduler, >, sched_tc_pool: StaticMemoryPool, @@ -273,19 +292,34 @@ mod tests { let test_scheduler = TestScheduler::default(); let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false); let sched_tc_pool = StaticMemoryPool::new(pool_cfg.clone()); - let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(); + let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(0); Self { common, - handler: PusService11SchedHandler::new(srv_handler, test_scheduler), + handler: PusSchedServiceHandler::new(srv_handler, test_scheduler), sched_tc_pool, } } + + pub fn handle_one_tc(&mut self) -> Result { + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); + self.handler + .poll_and_handle_next_tc(&time_stamp, &mut self.sched_tc_pool) + } } impl PusTestHarness for Pus11HandlerWithStoreTester { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken { + let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc); + self.handler + .service_helper + .verif_reporter() + .acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7]) + .expect("acceptance success failure") + } + delegate! { to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); fn read_next_tm(&mut self) -> PusTmReader<'_>; fn check_no_tm_available(&self) -> bool; fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId); @@ -341,15 +375,17 @@ mod tests { test_harness: &mut Pus11HandlerWithStoreTester, subservice: Subservice, ) { - let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap(); + let reply_header = SpHeader::new_for_unseg_tm(TEST_APID, 0, 0); let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8); - let enable_scheduling = PusTcCreator::new(&mut reply_header, tc_header, &[0; 7], true); - let token = test_harness.send_tc(&enable_scheduling); + let enable_scheduling = PusTcCreator::new(reply_header, tc_header, &[0; 7], true); + let token = test_harness.init_verification(&enable_scheduling); + test_harness.send_tc(&token, &enable_scheduling); - let request_id = token.req_id(); + let request_id = token.request_id(); + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); test_harness .handler - .handle_one_tc(&mut test_harness.sched_tc_pool) + .poll_and_handle_next_tc(&time_stamp, &mut test_harness.sched_tc_pool) .unwrap(); test_harness.check_next_verification_tm(1, request_id); test_harness.check_next_verification_tm(3, request_id); @@ -386,9 +422,9 @@ mod tests { #[test] fn test_insert_activity_tc() { let mut test_harness = Pus11HandlerWithStoreTester::new(); - let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap(); + let mut reply_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1); - let ping_tc = PusTcCreator::new(&mut reply_header, sec_header, &[], true); + let ping_tc = PusTcCreator::new(reply_header, sec_header, &[], true); let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc); let stamper = cds::CdsTime::now_with_u16_days().expect("time provider failed"); let mut sched_app_data: [u8; 64] = [0; 64]; @@ -396,21 +432,19 @@ mod tests { let ping_raw = ping_tc.to_vec().expect("generating raw tc failed"); sched_app_data[written_len..written_len + ping_raw.len()].copy_from_slice(&ping_raw); written_len += ping_raw.len(); - reply_header = SpHeader::tm_unseg(TEST_APID, 1, 0).unwrap(); + reply_header = SpHeader::new_for_unseg_tc(TEST_APID, 1, 0); sec_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcInsertActivity as u8); let enable_scheduling = PusTcCreator::new( - &mut reply_header, + reply_header, sec_header, &sched_app_data[..written_len], true, ); - let token = test_harness.send_tc(&enable_scheduling); + let token = test_harness.init_verification(&enable_scheduling); + test_harness.send_tc(&token, &enable_scheduling); - let request_id = token.req_id(); - test_harness - .handler - .handle_one_tc(&mut test_harness.sched_tc_pool) - .unwrap(); + let request_id = token.request_id(); + test_harness.handle_one_tc().unwrap(); test_harness.check_next_verification_tm(1, request_id); test_harness.check_next_verification_tm(3, request_id); test_harness.check_next_verification_tm(7, request_id); diff --git a/satrs/src/pus/test.rs b/satrs/src/pus/test.rs index ea5a720..58abb0f 100644 --- a/satrs/src/pus/test.rs +++ b/satrs/src/pus/test.rs @@ -1,20 +1,17 @@ use crate::pus::{ - PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmWrapper, + PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmAsVec, + PusTmInPool, PusTmVariant, }; use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader}; use spacepackets::ecss::PusPacket; use spacepackets::SpHeader; +use std::sync::mpsc; -use super::verification::{ - VerificationReporterWithSharedPoolMpscBoundedSender, - VerificationReporterWithSharedPoolMpscSender, VerificationReporterWithVecMpscBoundedSender, - VerificationReporterWithVecMpscSender, VerificationReportingProvider, -}; +use super::verification::{VerificationReporter, VerificationReportingProvider}; use super::{ - get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcInSharedStoreConverter, - EcssTcInVecConverter, EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusServiceHelper, - TmAsVecSenderWithBoundedMpsc, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, - TmInSharedPoolSenderWithMpsc, + EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiverCore, + EcssTmSenderCore, GenericConversionError, MpscTcReceiver, MpscTmInSharedPoolSender, + MpscTmInSharedPoolSenderBounded, PusServiceHelper, }; /// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets. @@ -47,27 +44,32 @@ impl< Self { service_helper } } - pub fn handle_one_tc(&mut self) -> Result { + pub fn poll_and_handle_next_tc( + &mut self, + time_stamp: &[u8], + ) -> Result { let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; if possible_packet.is_none() { return Ok(PusPacketHandlerResult::Empty); } let ecss_tc_and_token = possible_packet.unwrap(); - let tc = self - .service_helper - .tc_in_mem_converter - .convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; + self.service_helper + .tc_in_mem_converter_mut() + .cache(&ecss_tc_and_token.tc_in_memory)?; + let tc = self.service_helper.tc_in_mem_converter().convert()?; if tc.service() != 17 { - return Err(PusPacketHandlingError::WrongService(tc.service())); + return Err(GenericConversionError::WrongService(tc.service()).into()); } if tc.subservice() == 1 { let mut partial_error = None; - let time_stamp = get_current_cds_short_timestamp(&mut partial_error); let result = self .service_helper - .common - .verification_handler - .start_success(ecss_tc_and_token.token, &time_stamp) + .verif_reporter() + .start_success( + &self.service_helper.common.tm_sender, + ecss_tc_and_token.token, + time_stamp, + ) .map_err(|_| PartialPusHandlingError::Verification); let start_token = if let Ok(result) = result { Some(result) @@ -76,15 +78,17 @@ impl< None }; // Sequence count will be handled centrally in TM funnel. - let mut reply_header = - SpHeader::tm_unseg(self.service_helper.common.tm_apid, 0, 0).unwrap(); - let tc_header = PusTmSecondaryHeader::new_simple(17, 2, &time_stamp); - let ping_reply = PusTmCreator::new(&mut reply_header, tc_header, &[], true); + // It is assumed that the verification reporter was built with a valid APID, so we use + // the unchecked API here. + let reply_header = + SpHeader::new_for_unseg_tm(self.service_helper.verif_reporter().apid(), 0, 0); + let tc_header = PusTmSecondaryHeader::new_simple(17, 2, time_stamp); + let ping_reply = PusTmCreator::new(reply_header, tc_header, &[], true); let result = self .service_helper .common .tm_sender - .send_tm(PusTmWrapper::Direct(ping_reply)) + .send_tm(self.service_helper.id(), PusTmVariant::Direct(ping_reply)) .map_err(PartialPusHandlingError::TmSend); if let Err(err) = result { partial_error = Some(err); @@ -93,9 +97,12 @@ impl< if let Some(start_token) = start_token { if self .service_helper - .common - .verification_handler - .completion_success(start_token, &time_stamp) + .verif_reporter() + .completion_success( + &self.service_helper.common.tm_sender, + start_token, + time_stamp, + ) .is_err() { partial_error = Some(PartialPusHandlingError::Verification) @@ -120,55 +127,57 @@ impl< /// mpsc queues. pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler< MpscTcReceiver, - TmAsVecSenderWithMpsc, + mpsc::Sender, EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, + VerificationReporter, >; /// Helper type definition for a PUS 17 handler with a dynamic TMTC memory backend and bounded MPSC /// queues. pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler< MpscTcReceiver, - TmAsVecSenderWithBoundedMpsc, + mpsc::SyncSender, EcssTcInVecConverter, - VerificationReporterWithVecMpscBoundedSender, + VerificationReporter, >; /// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and regular /// mpsc queues. pub type PusService17TestHandlerStaticWithMpsc = PusService17TestHandler< MpscTcReceiver, - TmInSharedPoolSenderWithMpsc, + MpscTmInSharedPoolSender, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscSender, + VerificationReporter, >; /// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and bounded /// mpsc queues. pub type PusService17TestHandlerStaticWithBoundedMpsc = PusService17TestHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, >; #[cfg(test)] mod tests { + use crate::pus::test_util::{PusTestHarness, SimplePusPacketHandler, TEST_APID}; use crate::pus::tests::{ - PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon, PusTestHarness, - SimplePusPacketHandler, TEST_APID, + PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon, }; - use crate::pus::verification::std_mod::{ - VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender, + use crate::pus::verification::{ + RequestId, VerificationReporter, VerificationReportingProvider, }; - use crate::pus::verification::RequestId; use crate::pus::verification::{TcStateAccepted, VerificationToken}; use crate::pus::{ - EcssTcInSharedStoreConverter, EcssTcInVecConverter, MpscTcReceiver, PusPacketHandlerResult, - PusPacketHandlingError, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc, + EcssTcInSharedStoreConverter, EcssTcInVecConverter, GenericConversionError, MpscTcReceiver, + MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, + PusPacketHandlingError, }; + use crate::ComponentId; use delegate::delegate; use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader}; use spacepackets::ecss::tm::PusTmReader; use spacepackets::ecss::PusPacket; - use spacepackets::{SequenceFlags, SpHeader}; + use spacepackets::time::{cds, TimeWriter}; + use spacepackets::SpHeader; use super::PusService17TestHandler; @@ -176,15 +185,15 @@ mod tests { common: PusServiceHandlerWithSharedStoreCommon, handler: PusService17TestHandler< MpscTcReceiver, - TmInSharedPoolSenderWithBoundedMpsc, + MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, - VerificationReporterWithSharedPoolMpscBoundedSender, + VerificationReporter, >, } impl Pus17HandlerWithStoreTester { - pub fn new() -> Self { - let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(); + pub fn new(id: ComponentId) -> Self { + let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(id); let pus_17_handler = PusService17TestHandler::new(srv_handler); Self { common, @@ -194,10 +203,19 @@ mod tests { } impl PusTestHarness for Pus17HandlerWithStoreTester { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken { + let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc); + self.handler + .service_helper + .verif_reporter() + .acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7]) + .expect("acceptance success failure") + } + delegate! { to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; fn read_next_tm(&mut self) -> PusTmReader<'_>; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); fn check_no_tm_available(&self) -> bool; fn check_next_verification_tm( &self, @@ -208,27 +226,26 @@ mod tests { } } impl SimplePusPacketHandler for Pus17HandlerWithStoreTester { - delegate! { - to self.handler { - fn handle_one_tc(&mut self) -> Result; - } + fn handle_one_tc(&mut self) -> Result { + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); + self.handler.poll_and_handle_next_tc(&time_stamp) } } struct Pus17HandlerWithVecTester { - common: PusServiceHandlerWithVecCommon, + common: PusServiceHandlerWithVecCommon, handler: PusService17TestHandler< MpscTcReceiver, - TmAsVecSenderWithMpsc, + MpscTmAsVecSender, EcssTcInVecConverter, - VerificationReporterWithVecMpscSender, + VerificationReporter, >, } impl Pus17HandlerWithVecTester { - pub fn new() -> Self { + pub fn new(id: ComponentId) -> Self { let (common, srv_handler) = - PusServiceHandlerWithVecCommon::new_with_standard_verif_reporter(); + PusServiceHandlerWithVecCommon::new_with_standard_verif_reporter(id); Self { common, handler: PusService17TestHandler::new(srv_handler), @@ -237,9 +254,18 @@ mod tests { } impl PusTestHarness for Pus17HandlerWithVecTester { + fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken { + let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc); + self.handler + .service_helper + .verif_reporter() + .acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7]) + .expect("acceptance success failure") + } + delegate! { to self.common { - fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken; + fn send_tc(&self, token: &VerificationToken, tc: &PusTcCreator); fn read_next_tm(&mut self) -> PusTmReader<'_>; fn check_no_tm_available(&self) -> bool; fn check_next_verification_tm( @@ -251,20 +277,20 @@ mod tests { } } impl SimplePusPacketHandler for Pus17HandlerWithVecTester { - delegate! { - to self.handler { - fn handle_one_tc(&mut self) -> Result; - } + fn handle_one_tc(&mut self) -> Result { + let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); + self.handler.poll_and_handle_next_tc(&time_stamp) } } fn ping_test(test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler)) { // Create a ping TC, verify acceptance. - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); let sec_header = PusTcSecondaryHeader::new_simple(17, 1); - let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); - let token = test_harness.send_tc(&ping_tc); - let request_id = token.req_id(); + let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); + let request_id = token.request_id(); let result = test_harness.handle_one_tc(); assert!(result.is_ok()); // We should see 4 replies in the TM queue now: Acceptance TM, Start TM, ping reply and @@ -288,19 +314,19 @@ mod tests { #[test] fn test_basic_ping_processing_using_store() { - let mut test_harness = Pus17HandlerWithStoreTester::new(); + let mut test_harness = Pus17HandlerWithStoreTester::new(0); ping_test(&mut test_harness); } #[test] fn test_basic_ping_processing_using_vec() { - let mut test_harness = Pus17HandlerWithVecTester::new(); + let mut test_harness = Pus17HandlerWithVecTester::new(0); ping_test(&mut test_harness); } #[test] fn test_empty_tc_queue() { - let mut test_harness = Pus17HandlerWithStoreTester::new(); + let mut test_harness = Pus17HandlerWithStoreTester::new(0); let result = test_harness.handle_one_tc(); assert!(result.is_ok()); let result = result.unwrap(); @@ -312,15 +338,19 @@ mod tests { #[test] fn test_sending_unsupported_service() { - let mut test_harness = Pus17HandlerWithStoreTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); + let mut test_harness = Pus17HandlerWithStoreTester::new(0); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); let sec_header = PusTcSecondaryHeader::new_simple(3, 1); - let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); - test_harness.send_tc(&ping_tc); + let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); let result = test_harness.handle_one_tc(); assert!(result.is_err()); let error = result.unwrap_err(); - if let PusPacketHandlingError::WrongService(num) = error { + if let PusPacketHandlingError::RequestConversion(GenericConversionError::WrongService( + num, + )) = error + { assert_eq!(num, 3); } else { panic!("unexpected error type {error}") @@ -329,11 +359,12 @@ mod tests { #[test] fn test_sending_custom_subservice() { - let mut test_harness = Pus17HandlerWithStoreTester::new(); - let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); + let mut test_harness = Pus17HandlerWithStoreTester::new(0); + let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); let sec_header = PusTcSecondaryHeader::new_simple(17, 200); - let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); - test_harness.send_tc(&ping_tc); + let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true); + let token = test_harness.init_verification(&ping_tc); + test_harness.send_tc(&token, &ping_tc); let result = test_harness.handle_one_tc(); assert!(result.is_ok()); let result = result.unwrap(); diff --git a/satrs/src/pus/verification.rs b/satrs/src/pus/verification.rs index e44bc73..35dc972 100644 --- a/satrs/src/pus/verification.rs +++ b/satrs/src/pus/verification.rs @@ -17,10 +17,11 @@ //! use std::time::Duration; //! use satrs::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig}; //! use satrs::pus::verification::{ -//! VerificationReportingProvider, VerificationReporterCfg, VerificationReporterWithSender +//! VerificationReportingProvider, VerificationReporterCfg, VerificationReporter //! }; //! use satrs::seq_count::SeqCountProviderSimple; -//! use satrs::pus::TmInSharedPoolSenderWithMpsc; +//! use satrs::request::UniqueApidTargetId; +//! use satrs::pus::MpscTmInSharedPoolSender; //! use satrs::tmtc::tm_helper::SharedTmPool; //! use spacepackets::ecss::PusPacket; //! use spacepackets::SpHeader; @@ -29,35 +30,39 @@ //! //! const EMPTY_STAMP: [u8; 7] = [0; 7]; //! const TEST_APID: u16 = 0x02; +//! const TEST_COMPONENT_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05); //! //! let pool_cfg = StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false); //! let tm_pool = StaticMemoryPool::new(pool_cfg.clone()); //! let shared_tm_store = SharedTmPool::new(tm_pool); //! let tm_store = shared_tm_store.clone_backing_pool(); //! let (verif_tx, verif_rx) = mpsc::channel(); -//! let sender = TmInSharedPoolSenderWithMpsc::new(0, "Test Sender", shared_tm_store, verif_tx); +//! let sender = MpscTmInSharedPoolSender::new(shared_tm_store, verif_tx); //! let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); -//! let mut reporter = VerificationReporterWithSender::new(&cfg , sender); +//! let mut reporter = VerificationReporter::new(TEST_COMPONENT_ID.id(), &cfg); //! -//! let mut sph = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); //! let tc_header = PusTcSecondaryHeader::new_simple(17, 1); -//! let pus_tc_0 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true); +//! let pus_tc_0 = PusTcCreator::new_no_app_data( +//! SpHeader::new_from_apid(TEST_APID), +//! tc_header, +//! true +//! ); //! let init_token = reporter.add_tc(&pus_tc_0); //! //! // Complete success sequence for a telecommand -//! let accepted_token = reporter.acceptance_success(init_token, &EMPTY_STAMP).unwrap(); -//! let started_token = reporter.start_success(accepted_token, &EMPTY_STAMP).unwrap(); -//! reporter.completion_success(started_token, &EMPTY_STAMP).unwrap(); +//! let accepted_token = reporter.acceptance_success(&sender, init_token, &EMPTY_STAMP).unwrap(); +//! let started_token = reporter.start_success(&sender, accepted_token, &EMPTY_STAMP).unwrap(); +//! reporter.completion_success(&sender, started_token, &EMPTY_STAMP).unwrap(); //! //! // Verify it arrives correctly on receiver end //! let mut tm_buf: [u8; 1024] = [0; 1024]; //! let mut packet_idx = 0; //! while packet_idx < 3 { -//! let addr = verif_rx.recv_timeout(Duration::from_millis(10)).unwrap(); +//! let tm_in_store = verif_rx.recv_timeout(Duration::from_millis(10)).unwrap(); //! let tm_len; //! { //! let mut rg = tm_store.write().expect("Error locking shared pool"); -//! let store_guard = rg.read_with_guard(addr); +//! let store_guard = rg.read_with_guard(tm_in_store.store_addr); //! tm_len = store_guard.read(&mut tm_buf).expect("Error reading TM slice"); //! } //! let (pus_tm, _) = PusTmReader::new(&tm_buf[0..tm_len], 7) @@ -98,9 +103,14 @@ pub use spacepackets::ecss::verification::*; #[cfg_attr(feature = "doc_cfg", doc(cfg(feature = "alloc")))] pub use alloc_mod::*; +use crate::request::Apid; +use crate::ComponentId; + +/* #[cfg(feature = "std")] #[cfg_attr(feature = "doc_cfg", doc(cfg(feature = "std")))] pub use std_mod::*; + */ /// This is a request identifier as specified in 5.4.11.2 c. of the PUS standard. /// @@ -139,12 +149,34 @@ impl PartialEq for RequestId { impl RequestId { pub const SIZE_AS_BYTES: usize = size_of::(); + /// This allows extracting the request ID from a given PUS telecommand. + pub fn new(tc: &(impl CcsdsPacket + IsPusTelecommand)) -> Self { + Self::new_from_ccsds_tc(tc) + } + + /// Extract the request ID from a CCSDS TC packet. + pub fn new_from_ccsds_tc(tc: &impl CcsdsPacket) -> Self { + RequestId { + version_number: tc.ccsds_version(), + packet_id: tc.packet_id(), + psc: tc.psc(), + } + } + pub fn raw(&self) -> u32 { ((self.version_number as u32) << 29) | ((self.packet_id.raw() as u32) << 16) | self.psc.raw() as u32 } + pub fn packet_id(&self) -> PacketId { + self.packet_id + } + + pub fn packet_seq_ctrl(&self) -> PacketSequenceCtrl { + self.psc + } + pub fn to_bytes(&self, buf: &mut [u8]) { let raw = self.raw(); buf.copy_from_slice(raw.to_be_bytes().as_slice()); @@ -162,17 +194,23 @@ impl RequestId { }) } } -impl RequestId { - /// This allows extracting the request ID from a given PUS telecommand. - pub fn new(tc: &(impl CcsdsPacket + IsPusTelecommand)) -> Self { - RequestId { - version_number: tc.ccsds_version(), - packet_id: tc.packet_id(), - psc: tc.psc(), + +impl From for RequestId { + fn from(value: u32) -> Self { + Self { + version_number: ((value >> 29) & 0b111) as u8, + packet_id: PacketId::from(((value >> 16) & 0xffff) as u16), + psc: PacketSequenceCtrl::from((value & 0xffff) as u16), } } } +impl From for u32 { + fn from(value: RequestId) -> Self { + value.raw() + } +} + /// If a verification operation fails, the passed token will be returned as well. This allows /// re-trying the operation at a later point. #[derive(Debug, Clone)] @@ -186,12 +224,48 @@ impl From> for VerificationOrSendErrorWithToken VerificationOrSendErrorWithToken(value.0, value.1) } } + /// Support token to allow type-state programming. This prevents calling the verification /// steps in an invalid order. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct VerificationToken { state: PhantomData, - req_id: RequestId, + request_id: RequestId, +} + +impl VerificationToken { + fn new(req_id: RequestId) -> VerificationToken { + VerificationToken { + state: PhantomData, + request_id: req_id, + } + } + + pub fn request_id(&self) -> RequestId { + self.request_id + } +} + +impl VerificationToken { + /// Create a verification token with the accepted state. This can be useful for test purposes. + /// For general purposes, it is recommended to use the API exposed by verification handlers. + pub fn new_accepted_state(req_id: RequestId) -> VerificationToken { + VerificationToken { + state: PhantomData, + request_id: req_id, + } + } +} + +impl VerificationToken { + /// Create a verification token with the started state. This can be useful for test purposes. + /// For general purposes, it is recommended to use the API exposed by verification handlers. + pub fn new_started_state(req_id: RequestId) -> VerificationToken { + VerificationToken { + state: PhantomData, + request_id: req_id, + } + } } pub trait WasAtLeastAccepted {} @@ -219,6 +293,17 @@ pub enum TcStateToken { Completed(VerificationToken), } +impl TcStateToken { + pub fn request_id(&self) -> RequestId { + match self { + TcStateToken::None(token) => token.request_id(), + TcStateToken::Accepted(token) => token.request_id(), + TcStateToken::Started(token) => token.request_id(), + TcStateToken::Completed(token) => token.request_id(), + } + } +} + impl From> for TcStateToken { fn from(t: VerificationToken) -> Self { TcStateToken::None(t) @@ -267,24 +352,11 @@ impl From> for TcStateToken { } } -impl VerificationToken { - fn new(req_id: RequestId) -> VerificationToken { - VerificationToken { - state: PhantomData, - req_id, - } - } - - pub fn req_id(&self) -> RequestId { - self.req_id - } -} - /// Composite helper struct to pass failure parameters to the [VerificationReporter] pub struct FailParams<'stamp, 'fargs> { - time_stamp: &'stamp [u8], - failure_code: &'fargs dyn EcssEnumeration, - failure_data: &'fargs [u8], + pub time_stamp: &'stamp [u8], + pub failure_code: &'fargs dyn EcssEnumeration, + pub failure_data: &'fargs [u8], } impl<'stamp, 'fargs> FailParams<'stamp, 'fargs> { @@ -310,8 +382,8 @@ impl<'stamp, 'fargs> FailParams<'stamp, 'fargs> { /// Composite helper struct to pass step failure parameters to the [VerificationReporter] pub struct FailParamsWithStep<'stamp, 'fargs> { - bp: FailParams<'stamp, 'fargs>, - step: &'fargs dyn EcssEnumeration, + pub common: FailParams<'stamp, 'fargs>, + pub step: &'fargs dyn EcssEnumeration, } impl<'stamp, 'fargs> FailParamsWithStep<'stamp, 'fargs> { @@ -322,13 +394,26 @@ impl<'stamp, 'fargs> FailParamsWithStep<'stamp, 'fargs> { failure_data: &'fargs [u8], ) -> Self { Self { - bp: FailParams::new(time_stamp, failure_code, failure_data), + common: FailParams::new(time_stamp, failure_code, failure_data), step, } } } +/// This is a generic trait implemented by an object which can perform the ECSS PUS 1 verification +/// process according to PUS standard ECSS-E-ST-70-41C. +/// +/// This trait allows using different message queue backends for the verification reporting process +/// or to swap the actual reporter with a test reporter for unit tests. +/// For general purposes, the [VerificationReporter] should be sufficient. pub trait VerificationReportingProvider { + /// It is generally assumed that the reporting provider is owned by some PUS service with + /// a unique ID. + fn owner_id(&self) -> ComponentId; + + fn set_apid(&mut self, apid: Apid); + fn apid(&self) -> Apid; + fn add_tc( &mut self, pus_tc: &(impl CcsdsPacket + IsPusTelecommand), @@ -340,30 +425,35 @@ pub trait VerificationReportingProvider { fn acceptance_success( &self, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, time_stamp: &[u8], ) -> Result, EcssTmtcError>; fn acceptance_failure( &self, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError>; fn start_success( &self, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, time_stamp: &[u8], ) -> Result, EcssTmtcError>; fn start_failure( &self, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError>; fn step_success( &self, + sender: &(impl EcssTmSenderCore + ?Sized), token: &VerificationToken, time_stamp: &[u8], step: impl EcssEnumeration, @@ -371,25 +461,28 @@ pub trait VerificationReportingProvider { fn step_failure( &self, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParamsWithStep, ) -> Result<(), EcssTmtcError>; fn completion_success( &self, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, time_stamp: &[u8], ) -> Result<(), EcssTmtcError>; fn completion_failure( &self, + sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError>; } -/// Primary verification handler. It provides an API to generate PUS 1 verification telemetry -/// packets and verify the various steps of telecommand handling as specified in the PUS standard. +/// Low level object which generates ECSS PUS 1 verification packets to verify the various steps +/// of telecommand handling as specified in the PUS standard. /// /// This is the core component which can be used without [`alloc`] support. Please note that /// the buffer passed to the API exposes by this struct will be used to serialize the source data. @@ -458,7 +551,7 @@ impl VerificationReportCreator { subservice, seq_count, msg_count, - &token.req_id, + &token.request_id(), time_stamp, None::<&dyn EcssEnumeration>, )?; @@ -482,7 +575,7 @@ impl VerificationReportCreator { subservice, seq_count, msg_count, - &token.req_id, + &token.request_id(), step, params, )?; @@ -516,7 +609,7 @@ impl VerificationReportCreator { tm_creator, VerificationToken { state: PhantomData, - req_id: token.req_id, + request_id: token.request_id(), }, )) } @@ -570,7 +663,7 @@ impl VerificationReportCreator { tm_creator, VerificationToken { state: PhantomData, - req_id: token.req_id, + request_id: token.request_id(), }, )) } @@ -615,7 +708,7 @@ impl VerificationReportCreator { Subservice::TmStepSuccess.into(), seq_count, msg_count, - &token.req_id, + &token.request_id(), time_stamp, Some(&step), ) @@ -638,9 +731,9 @@ impl VerificationReportCreator { Subservice::TmStepFailure.into(), seq_count, msg_count, - &token.req_id, + &token.request_id(), Some(params.step), - ¶ms.bp, + ¶ms.common, ) } @@ -714,12 +807,12 @@ impl VerificationReportCreator { step.write_to_be_bytes(&mut src_data_buf[idx..idx + step.size()]) .unwrap(); } - let mut sp_header = SpHeader::tm_unseg(self.apid(), seq_count, 0).unwrap(); + let sp_header = SpHeader::new_for_unseg_tm(self.apid(), seq_count, 0); Ok(self.create_pus_verif_tm_base( src_data_buf, subservice, msg_counter, - &mut sp_header, + sp_header, time_stamp, source_data_len, )) @@ -757,12 +850,12 @@ impl VerificationReportCreator { .write_to_be_bytes(&mut src_data_buf[idx..idx + params.failure_code.size()])?; idx += params.failure_code.size(); src_data_buf[idx..idx + params.failure_data.len()].copy_from_slice(params.failure_data); - let mut sp_header = SpHeader::tm_unseg(self.apid(), seq_count, 0).unwrap(); + let sp_header = SpHeader::new_for_unseg_tm(self.apid(), seq_count, 0); Ok(self.create_pus_verif_tm_base( src_data_buf, subservice, msg_counter, - &mut sp_header, + sp_header, params.time_stamp, source_data_len, )) @@ -773,12 +866,12 @@ impl VerificationReportCreator { src_data_buf: &'src_data mut [u8], subservice: u8, msg_counter: u16, - sp_header: &mut SpHeader, + sp_header: SpHeader, time_stamp: &'time [u8], source_data_len: usize, ) -> PusTmCreator<'time, 'src_data> { let tm_sec_header = - PusTmSecondaryHeader::new(1, subservice, msg_counter, self.dest_id, Some(time_stamp)); + PusTmSecondaryHeader::new(1, subservice, msg_counter, self.dest_id, time_stamp); PusTmCreator::new( sp_header, tm_sec_header, @@ -791,10 +884,7 @@ impl VerificationReportCreator { #[cfg(feature = "alloc")] pub mod alloc_mod { use super::*; - use crate::{ - pus::{PusTmWrapper, TmAsVecSenderWithId, TmInSharedPoolSenderWithId}, - seq_count::SequenceCountProvider, - }; + use crate::{pus::PusTmVariant, ComponentId}; use core::cell::RefCell; #[derive(Clone)] @@ -824,22 +914,46 @@ pub mod alloc_mod { } } - /// Primary verification handler. It provides an API to send PUS 1 verification telemetry packets - /// and verify the various steps of telecommand handling as specified in the PUS standard. - /// It is assumed that the sequence counter and message counters are updated in a central - /// TM funnel. This helper will always set those fields to 0. - #[derive(Clone)] - pub struct VerificationReporter { - source_data_buf: RefCell>, - pub seq_count_provider: Option + Send>>, - pub msg_count_provider: Option + Send>>, - pub reporter: VerificationReportCreator, + /// This trait allows hooking into the TM generation process of the [VerificationReporter]. + /// + /// The [Self::modify_tm] function is called before the TM is sent. This allows users to change + /// fields like the message count or sequence counter before the TM is sent. + pub trait VerificationHookProvider { + fn modify_tm(&self, tm: &mut PusTmCreator); } - impl VerificationReporter { - pub fn new(cfg: &VerificationReporterCfg) -> Self { + /// [VerificationHookProvider] which does nothing. This is the default hook variant for + /// the [VerificationReporter], assuming that any necessary packet manipulation is performed by + /// a centralized TM funnel or inlet. + #[derive(Default, Copy, Clone)] + pub struct DummyVerificationHook {} + + impl VerificationHookProvider for DummyVerificationHook { + fn modify_tm(&self, _tm: &mut PusTmCreator) {} + } + + /// Primary verification reportewr object. It provides an API to send PUS 1 verification + /// telemetry packets and verify the various steps of telecommand handling as specified in the + /// PUS standard. + /// + /// It is assumed that the sequence counter and message counters are updated in a central + /// TM funnel or TM inlet. This helper will always set those fields to 0. The APID and + /// destination fields are assumed to be constant for a given repoter instance. + #[derive(Clone)] + pub struct VerificationReporter< + VerificationHook: VerificationHookProvider = DummyVerificationHook, + > { + owner_id: ComponentId, + source_data_buf: RefCell>, + pub reporter_creator: VerificationReportCreator, + pub tm_hook: VerificationHook, + } + + impl VerificationReporter { + pub fn new(owner_id: ComponentId, cfg: &VerificationReporterCfg) -> Self { let reporter = VerificationReportCreator::new(cfg.apid).unwrap(); Self { + owner_id, source_data_buf: RefCell::new(alloc::vec![ 0; RequestId::SIZE_AS_BYTES @@ -847,14 +961,37 @@ pub mod alloc_mod { + cfg.fail_code_field_width + cfg.max_fail_data_len ]), - seq_count_provider: None, - msg_count_provider: None, - reporter, + reporter_creator: reporter, + tm_hook: DummyVerificationHook::default(), + } + } + } + + impl VerificationReporter { + /// The provided [VerificationHookProvider] can be used to modify a verification packet + /// before it is sent. + pub fn new_with_hook( + owner_id: ComponentId, + cfg: &VerificationReporterCfg, + tm_hook: VerificationHook, + ) -> Self { + let reporter = VerificationReportCreator::new(cfg.apid).unwrap(); + Self { + owner_id, + source_data_buf: RefCell::new(alloc::vec![ + 0; + RequestId::SIZE_AS_BYTES + + cfg.step_field_width + + cfg.fail_code_field_width + + cfg.max_fail_data_len + ]), + reporter_creator: reporter, + tm_hook, } } delegate!( - to self.reporter { + to self.reporter_creator { pub fn set_apid(&mut self, apid: u16) -> bool; pub fn apid(&self) -> u16; pub fn add_tc(&mut self, pus_tc: &(impl CcsdsPacket + IsPusTelecommand)) -> VerificationToken; @@ -867,146 +1004,114 @@ pub mod alloc_mod { pub fn allowed_source_data_len(&self) -> usize { self.source_data_buf.borrow().capacity() } + } + + impl VerificationReportingProvider + for VerificationReporter + { + delegate!( + to self.reporter_creator { + fn set_apid(&mut self, apid: Apid); + fn apid(&self) -> Apid; + fn add_tc(&mut self, pus_tc: &(impl CcsdsPacket + IsPusTelecommand)) -> VerificationToken; + fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken; + } + ); + + fn owner_id(&self) -> ComponentId { + self.owner_id + } /// Package and send a PUS TM\[1, 1\] packet, see 8.1.2.1 of the PUS standard - pub fn acceptance_success( + fn acceptance_success( &self, - token: VerificationToken, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, time_stamp: &[u8], ) -> Result, EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut source_data_buf = self.source_data_buf.borrow_mut(); - let (tm_creator, token) = self - .reporter - .acceptance_success( - source_data_buf.as_mut_slice(), - token, - seq_count, - msg_count, - time_stamp, - ) + let (mut tm_creator, token) = self + .reporter_creator + .acceptance_success(source_data_buf.as_mut_slice(), token, 0, 0, time_stamp) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(tm_creator))?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.owner_id(), PusTmVariant::Direct(tm_creator))?; Ok(token) } /// Package and send a PUS TM\[1, 2\] packet, see 8.1.2.2 of the PUS standard - pub fn acceptance_failure( + fn acceptance_failure( &self, - token: VerificationToken, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); - let sendable = self - .reporter - .acceptance_failure(buf.as_mut_slice(), token, seq_count, msg_count, params) + let mut tm_creator = self + .reporter_creator + .acceptance_failure(buf.as_mut_slice(), token, 0, 0, params) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.owner_id(), PusTmVariant::Direct(tm_creator))?; Ok(()) } /// Package and send a PUS TM\[1, 3\] packet, see 8.1.2.3 of the PUS standard. /// /// Requires a token previously acquired by calling [Self::acceptance_success]. - pub fn start_success( + fn start_success( &self, - token: VerificationToken, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, time_stamp: &[u8], ) -> Result, EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); - let (tm_creator, started_token) = self - .reporter - .start_success(buf.as_mut_slice(), token, seq_count, msg_count, time_stamp) + let (mut tm_creator, started_token) = self + .reporter_creator + .start_success(buf.as_mut_slice(), token, 0, 0, time_stamp) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(tm_creator))?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.owner_id(), PusTmVariant::Direct(tm_creator))?; Ok(started_token) - //self.reporter.send_start_success(sendable, sender) } /// Package and send a PUS TM\[1, 4\] packet, see 8.1.2.4 of the PUS standard. /// /// Requires a token previously acquired by calling [Self::acceptance_success]. It consumes /// the token because verification handling is done. - pub fn start_failure( + fn start_failure( &self, - token: VerificationToken, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); - let sendable = self - .reporter - .start_failure(buf.as_mut_slice(), token, seq_count, msg_count, params) + let mut tm_creator = self + .reporter_creator + .start_failure(buf.as_mut_slice(), token, 0, 0, params) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.owner_id(), PusTmVariant::Direct(tm_creator))?; Ok(()) } /// Package and send a PUS TM\[1, 5\] packet, see 8.1.2.5 of the PUS standard. /// /// Requires a token previously acquired by calling [Self::start_success]. - pub fn step_success( + fn step_success( &self, - token: &VerificationToken, sender: &(impl EcssTmSenderCore + ?Sized), + token: &VerificationToken, time_stamp: &[u8], step: impl EcssEnumeration, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); - let sendable = self - .reporter - .step_success( - buf.as_mut_slice(), - token, - seq_count, - msg_count, - time_stamp, - step, - ) + let mut tm_creator = self + .reporter_creator + .step_success(buf.as_mut_slice(), token, 0, 0, time_stamp, step) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.owner_id(), PusTmVariant::Direct(tm_creator))?; Ok(()) } @@ -1014,26 +1119,19 @@ pub mod alloc_mod { /// /// Requires a token previously acquired by calling [Self::start_success]. It consumes the /// token because verification handling is done. - pub fn step_failure( + fn step_failure( &self, - token: VerificationToken, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, params: FailParamsWithStep, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); - let sendable = self - .reporter - .step_failure(buf.as_mut_slice(), token, seq_count, msg_count, params) + let mut tm_creator = self + .reporter_creator + .step_failure(buf.as_mut_slice(), token, 0, 0, params) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.owner_id(), PusTmVariant::Direct(tm_creator))?; Ok(()) } @@ -1041,26 +1139,20 @@ pub mod alloc_mod { /// /// Requires a token previously acquired by calling [Self::start_success]. It consumes the /// token because verification handling is done. - pub fn completion_success( + fn completion_success( &self, - token: VerificationToken, + // sender_id: ComponentId, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, time_stamp: &[u8], ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); - let sendable = self - .reporter - .completion_success(buf.as_mut_slice(), token, seq_count, msg_count, time_stamp) + let mut tm_creator = self + .reporter_creator + .completion_success(buf.as_mut_slice(), token, 0, 0, time_stamp) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.owner_id, PusTmVariant::Direct(tm_creator))?; Ok(()) } @@ -1068,395 +1160,521 @@ pub mod alloc_mod { /// /// Requires a token previously acquired by calling [Self::start_success]. It consumes the /// token because verification handling is done. - pub fn completion_failure( + fn completion_failure( &self, - token: VerificationToken, sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let seq_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); - let msg_count = self - .seq_count_provider - .as_ref() - .map_or(0, |v| v.get_and_increment()); let mut buf = self.source_data_buf.borrow_mut(); - let sendable = self - .reporter - .completion_failure(buf.as_mut_slice(), token, seq_count, msg_count, params) + let mut tm_creator = self + .reporter_creator + .completion_failure(buf.as_mut_slice(), token, 0, 00, params) .map_err(PusError::ByteConversion)?; - sender.send_tm(PusTmWrapper::Direct(sendable))?; + self.tm_hook.modify_tm(&mut tm_creator); + sender.send_tm(self.owner_id(), PusTmVariant::Direct(tm_creator))?; Ok(()) } } - - /// Helper object which caches the sender passed as a trait object. Provides the same - /// API as [VerificationReporter] but without the explicit sender arguments. - #[derive(Clone)] - pub struct VerificationReporterWithSender { - pub reporter: VerificationReporter, - pub sender: Sender, - } - - impl VerificationReporterWithSender { - pub fn new(cfg: &VerificationReporterCfg, sender: Sender) -> Self { - let reporter = VerificationReporter::new(cfg); - Self::new_from_reporter(reporter, sender) - } - - pub fn new_from_reporter(reporter: VerificationReporter, sender: Sender) -> Self { - Self { reporter, sender } - } - - delegate! { - to self.reporter { - pub fn set_apid(&mut self, apid: u16) -> bool; - pub fn apid(&self) -> u16; - pub fn dest_id(&self) -> u16; - pub fn set_dest_id(&mut self, dest_id: u16); - } - } - } - - impl VerificationReportingProvider - for VerificationReporterWithSender - { - delegate! { - to self.reporter { - fn add_tc( - &mut self, - pus_tc: &(impl CcsdsPacket + IsPusTelecommand), - ) -> VerificationToken; - fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken; - } - } - - fn acceptance_success( - &self, - token: VerificationToken, - time_stamp: &[u8], - ) -> Result, EcssTmtcError> { - self.reporter - .acceptance_success(token, &self.sender, time_stamp) - } - - fn acceptance_failure( - &self, - token: VerificationToken, - params: FailParams, - ) -> Result<(), EcssTmtcError> { - self.reporter - .acceptance_failure(token, &self.sender, params) - } - - fn start_success( - &self, - token: VerificationToken, - time_stamp: &[u8], - ) -> Result, EcssTmtcError> { - self.reporter.start_success(token, &self.sender, time_stamp) - } - - fn start_failure( - &self, - token: VerificationToken, - params: FailParams, - ) -> Result<(), EcssTmtcError> { - self.reporter.start_failure(token, &self.sender, params) - } - - fn step_success( - &self, - token: &VerificationToken, - time_stamp: &[u8], - step: impl EcssEnumeration, - ) -> Result<(), EcssTmtcError> { - self.reporter - .step_success(token, &self.sender, time_stamp, step) - } - - fn step_failure( - &self, - token: VerificationToken, - params: FailParamsWithStep, - ) -> Result<(), EcssTmtcError> { - self.reporter.step_failure(token, &self.sender, params) - } - - fn completion_success( - &self, - token: VerificationToken, - time_stamp: &[u8], - ) -> Result<(), EcssTmtcError> { - self.reporter - .completion_success(token, &self.sender, time_stamp) - } - - fn completion_failure( - &self, - token: VerificationToken, - params: FailParams, - ) -> Result<(), EcssTmtcError> { - self.reporter - .completion_failure(token, &self.sender, params) - } - } - - pub type VerificationReporterWithSharedPoolSender = - VerificationReporterWithSender>; - pub type VerificationReporterWithVecSender = - VerificationReporterWithSender>; } +/* #[cfg(feature = "std")] pub mod std_mod { use std::sync::mpsc; use crate::pool::StoreAddr; + use crate::pus::verification::VerificationReporterWithSender; - use super::alloc_mod::{ - VerificationReporterWithSharedPoolSender, VerificationReporterWithVecSender, - }; + use super::alloc_mod::VerificationReporterWithSharedPoolSender; pub type VerificationReporterWithSharedPoolMpscSender = VerificationReporterWithSharedPoolSender>; pub type VerificationReporterWithSharedPoolMpscBoundedSender = VerificationReporterWithSharedPoolSender>; pub type VerificationReporterWithVecMpscSender = - VerificationReporterWithVecSender>>; + VerificationReporterWithSender>>; pub type VerificationReporterWithVecMpscBoundedSender = - VerificationReporterWithVecSender>>; + VerificationReporterWithSender>>; } + */ -#[cfg(test)] -pub mod tests { - use crate::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig}; - use crate::pus::tests::CommonTmInfo; - use crate::pus::verification::{ - EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone, - VerificationReporter, VerificationReporterCfg, VerificationReporterWithSender, - VerificationToken, - }; - use crate::pus::{ - EcssChannel, PusTmWrapper, TmInSharedPoolSenderWithId, TmInSharedPoolSenderWithMpsc, - }; - use crate::tmtc::tm_helper::SharedTmPool; - use crate::ChannelId; - use alloc::format; - use alloc::sync::Arc; - use hashbrown::HashMap; - use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader}; - use spacepackets::ecss::tm::PusTmReader; - use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU8, PusError, PusPacket}; - use spacepackets::util::UnsignedEnum; - use spacepackets::{ByteConversionError, CcsdsPacket, SpHeader}; - use std::cell::RefCell; +#[cfg(any(feature = "test_util", test))] +pub mod test_util { + use alloc::vec::Vec; + use core::cell::RefCell; use std::collections::VecDeque; - use std::sync::{mpsc, Mutex}; - use std::time::Duration; - use std::vec; - use std::vec::Vec; - use super::VerificationReportingProvider; + use super::*; - fn is_send(_: &T) {} - #[allow(dead_code)] - fn is_sync(_: &T) {} - - pub struct VerificationStatus { - pub accepted: Option, - pub started: Option, - pub step: u64, - pub step_status: Option, - pub completed: Option, - pub failure_data: Option>, - pub fail_enum: Option, + #[derive(Debug, PartialEq)] + pub struct SuccessData { + pub sender: ComponentId, + pub time_stamp: Vec, } - pub type SharedVerificationMap = Arc>>>; + #[derive(Debug, PartialEq)] + pub struct FailureData { + pub sender: ComponentId, + pub error_enum: u64, + pub fail_data: Vec, + pub time_stamp: Vec, + } + + #[derive(Debug, PartialEq)] + pub enum VerificationReportInfo { + Added, + AcceptanceSuccess(SuccessData), + AcceptanceFailure(FailureData), + StartedSuccess(SuccessData), + StartedFailure(FailureData), + StepSuccess { data: SuccessData, step: u16 }, + StepFailure(FailureData), + CompletionSuccess(SuccessData), + CompletionFailure(FailureData), + } - #[derive(Clone)] pub struct TestVerificationReporter { - pub verification_map: SharedVerificationMap, + pub id: ComponentId, + pub report_queue: RefCell>, } impl TestVerificationReporter { - pub fn new(verification_map: SharedVerificationMap) -> Self { - Self { verification_map } + pub fn new(id: ComponentId) -> Self { + Self { + id, + report_queue: Default::default(), + } } } impl VerificationReportingProvider for TestVerificationReporter { + fn set_apid(&mut self, _apid: Apid) {} + + fn apid(&self) -> Apid { + 0 + } + fn add_tc_with_req_id(&mut self, req_id: RequestId) -> VerificationToken { - let verif_map = self.verification_map.lock().unwrap(); - verif_map.borrow_mut().insert( - req_id, - VerificationStatus { - accepted: None, - started: None, - step: 0, - step_status: None, - completed: None, - failure_data: None, - fail_enum: None, - }, - ); + self.report_queue + .borrow_mut() + .push_back((req_id, VerificationReportInfo::Added)); VerificationToken { - state: core::marker::PhantomData, - req_id, + state: PhantomData, + request_id: req_id, } } fn acceptance_success( &self, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, - _time_stamp: &[u8], - ) -> Result, EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => entry.accepted = Some(true), - None => panic!( - "unexpected acceptance success for request ID {}", - token.req_id() - ), - }; + time_stamp: &[u8], + ) -> Result, EcssTmtcError> { + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::AcceptanceSuccess(SuccessData { + sender: self.owner_id(), + time_stamp: time_stamp.to_vec(), + }), + )); Ok(VerificationToken { - state: core::marker::PhantomData, - req_id: token.req_id, + state: PhantomData, + request_id: token.request_id, }) } fn acceptance_failure( &self, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.accepted = Some(false); - entry.failure_data = Some(params.failure_data.to_vec()); - entry.fail_enum = Some(params.failure_code.value()); - } - None => panic!( - "unexpected acceptance failure for request ID {}", - token.req_id() - ), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::AcceptanceFailure(FailureData { + sender: self.owner_id(), + error_enum: params.failure_code.value(), + fail_data: params.failure_data.to_vec(), + time_stamp: params.time_stamp.to_vec(), + }), + )); Ok(()) } fn start_success( &self, - token: VerificationToken, - _time_stamp: &[u8], - ) -> Result, EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => entry.started = Some(true), - None => panic!("unexpected start success for request ID {}", token.req_id()), - }; + _sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, + time_stamp: &[u8], + ) -> Result, EcssTmtcError> { + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::StartedSuccess(SuccessData { + sender: self.owner_id(), + time_stamp: time_stamp.to_vec(), + }), + )); Ok(VerificationToken { - state: core::marker::PhantomData, - req_id: token.req_id, + state: PhantomData, + request_id: token.request_id, }) } fn start_failure( &self, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.started = Some(false); - entry.failure_data = Some(params.failure_data.to_vec()); - entry.fail_enum = Some(params.failure_code.value()); - } - None => panic!("unexpected start failure for request ID {}", token.req_id()), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::StartedFailure(FailureData { + sender: self.owner_id(), + error_enum: params.failure_code.value(), + fail_data: params.failure_data.to_vec(), + time_stamp: params.time_stamp.to_vec(), + }), + )); Ok(()) } fn step_success( &self, - token: &VerificationToken, - _time_stamp: &[u8], - step: impl spacepackets::ecss::EcssEnumeration, + _sender: &(impl EcssTmSenderCore + ?Sized), + token: &VerificationToken, + time_stamp: &[u8], + step: impl EcssEnumeration, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.step = step.value(); - entry.step_status = Some(true); - } - None => panic!("unexpected start success for request ID {}", token.req_id()), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::StepSuccess { + data: SuccessData { + sender: self.owner_id(), + time_stamp: time_stamp.to_vec(), + }, + step: step.value() as u16, + }, + )); Ok(()) } fn step_failure( &self, - token: VerificationToken, - _params: FailParamsWithStep, + _sender: &(impl EcssTmSenderCore + ?Sized), + token: VerificationToken, + params: FailParamsWithStep, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.step_status = Some(false); - } - None => panic!("unexpected start success for request ID {}", token.req_id()), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::StepFailure(FailureData { + sender: self.owner_id(), + error_enum: params.common.failure_code.value(), + fail_data: params.common.failure_data.to_vec(), + time_stamp: params.common.time_stamp.to_vec(), + }), + )); Ok(()) } fn completion_success( &self, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, - _time_stamp: &[u8], + time_stamp: &[u8], ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => entry.completed = Some(true), - None => panic!( - "unexpected acceptance success for request ID {}", - token.req_id() - ), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::CompletionSuccess(SuccessData { + sender: self.owner_id(), + time_stamp: time_stamp.to_vec(), + }), + )); Ok(()) } - fn completion_failure( + fn completion_failure( &self, + _sender: &(impl EcssTmSenderCore + ?Sized), token: VerificationToken, params: FailParams, ) -> Result<(), EcssTmtcError> { - let verif_map = self.verification_map.lock().unwrap(); - match verif_map.borrow_mut().get_mut(&token.req_id) { - Some(entry) => { - entry.completed = Some(false); - entry.failure_data = Some(params.failure_data.to_vec()); - entry.fail_enum = Some(params.failure_code.value()); - } - None => panic!( - "unexpected acceptance success for request ID {}", - token.req_id() - ), - }; + self.report_queue.borrow_mut().push_back(( + token.request_id(), + VerificationReportInfo::CompletionFailure(FailureData { + sender: self.owner_id(), + error_enum: params.failure_code.value(), + fail_data: params.failure_data.to_vec(), + time_stamp: params.time_stamp.to_vec(), + }), + )); Ok(()) } + + fn owner_id(&self) -> ComponentId { + self.id + } } - const TEST_APID: u16 = 0x02; + impl TestVerificationReporter { + pub fn check_next_was_added(&self, request_id: RequestId) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + assert_eq!(info, VerificationReportInfo::Added); + } + pub fn check_next_is_acceptance_success(&self, sender_id: ComponentId, req_id: RequestId) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(req_id, last_report_req_id); + if let VerificationReportInfo::AcceptanceSuccess(data) = info { + assert_eq!(data.sender, sender_id); + return; + } + panic!("next message is not acceptance success message") + } + + pub fn check_next_is_started_success(&self, sender_id: ComponentId, req_id: RequestId) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(req_id, last_report_req_id); + if let VerificationReportInfo::StartedSuccess(data) = info { + assert_eq!(data.sender, sender_id); + return; + } + panic!("next message is not start success message") + } + + pub fn check_next_is_step_success( + &self, + sender_id: ComponentId, + request_id: RequestId, + expected_step: u16, + ) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + if let VerificationReportInfo::StepSuccess { data, step } = info { + assert_eq!(data.sender, sender_id); + assert_eq!(expected_step, step); + return; + } + panic!("next message is not step success message: {info:?}") + } + + pub fn check_next_is_step_failure( + &self, + sender_id: ComponentId, + request_id: RequestId, + error_code: u64, + ) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + if let VerificationReportInfo::StepFailure(data) = info { + assert_eq!(data.sender, sender_id); + assert_eq!(data.error_enum, error_code); + return; + } + panic!("next message is not step failure message") + } + + pub fn check_next_is_completion_success( + &self, + sender_id: ComponentId, + request_id: RequestId, + ) { + let (last_report_req_id, info) = self + .report_queue + .borrow_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + if let VerificationReportInfo::CompletionSuccess(data) = info { + assert_eq!(data.sender, sender_id); + return; + } + panic!("next message is not completion success message: {info:?}") + } + + pub fn check_next_is_completion_failure( + &mut self, + sender_id: ComponentId, + request_id: RequestId, + error_code: u64, + ) { + let (last_report_req_id, info) = self + .report_queue + .get_mut() + .pop_front() + .expect("report queue is empty"); + assert_eq!(request_id, last_report_req_id); + if let VerificationReportInfo::CompletionFailure(data) = info { + assert_eq!(data.sender, sender_id); + assert_eq!(data.error_enum, error_code); + return; + } + panic!("next message is not completion failure message: {info:?}") + } + + pub fn assert_full_completion_success( + &mut self, + sender_id: ComponentId, + request_id: RequestId, + expected_steps: Option, + ) { + self.check_next_was_added(request_id); + self.check_next_is_acceptance_success(sender_id, request_id); + self.check_next_is_started_success(sender_id, request_id); + if let Some(highest_num) = expected_steps { + for i in 0..highest_num { + self.check_next_is_step_success(sender_id, request_id, i); + } + } + self.check_next_is_completion_success(sender_id, request_id); + } + + pub fn assert_completion_failure( + &mut self, + sender_id: ComponentId, + request_id: RequestId, + expected_steps: Option, + error_code: u64, + ) { + self.check_next_was_added(request_id); + self.check_next_is_acceptance_success(sender_id, request_id); + self.check_next_is_started_success(sender_id, request_id); + if let Some(highest_num) = expected_steps { + for i in 0..highest_num { + self.check_next_is_step_success(sender_id, request_id, i); + } + } + self.check_next_is_completion_failure(sender_id, request_id, error_code); + } + + pub fn get_next_verification_message(&mut self) -> (RequestId, VerificationReportInfo) { + self.report_queue + .get_mut() + .pop_front() + .expect("report queue is empty") + } + /* + pub fn verification_info(&self, req_id: &RequestId) -> Option { + let verif_map = self.verification_map.lock().unwrap(); + let value = verif_map.borrow().get(req_id).cloned(); + value + } + + + pub fn check_started(&self, req_id: &RequestId) -> bool { + let verif_map = self.verification_map.lock().unwrap(); + if let Some(entry) = verif_map.borrow().get(req_id) { + return entry.started.unwrap_or(false); + } + false + } + + fn generic_completion_checks( + entry: &VerificationStatus, + step: Option, + completion_success: bool, + ) { + assert!(entry.accepted.unwrap()); + assert!(entry.started.unwrap()); + if let Some(step) = step { + assert!(entry.step_status.unwrap()); + assert_eq!(entry.step, step); + } else { + assert!(entry.step_status.is_none()); + } + assert_eq!(entry.completed.unwrap(), completion_success); + } + + + pub fn assert_completion_failure( + &self, + req_id: &RequestId, + step: Option, + error_code: u64, + ) { + let verif_map = self.verification_map.lock().unwrap(); + if let Some(entry) = verif_map.borrow().get(req_id) { + Self::generic_completion_checks(entry, step, false); + assert_eq!(entry.fail_enum.unwrap(), error_code); + return; + } + panic!("request not in verification map"); + } + + pub fn completion_status(&self, req_id: &RequestId) -> Option { + let verif_map = self.verification_map.lock().unwrap(); + if let Some(entry) = verif_map.borrow().get(req_id) { + return entry.completed; + } + panic!("request not in verification map"); + } + */ + } +} + +#[cfg(test)] +pub mod tests { + use crate::pool::{StaticMemoryPool, StaticPoolConfig}; + use crate::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0}; + use crate::pus::tests::CommonTmInfo; + use crate::pus::verification::{ + EcssTmSenderCore, EcssTmtcError, FailParams, FailParamsWithStep, RequestId, TcStateNone, + VerificationReporter, VerificationReporterCfg, VerificationToken, + }; + use crate::pus::{ChannelWithId, MpscTmInSharedPoolSender, PusTmVariant}; + use crate::request::MessageMetadata; + use crate::seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}; + use crate::tmtc::tm_helper::SharedTmPool; + use crate::ComponentId; + use alloc::format; + use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}; + use spacepackets::ecss::{ + EcssEnumU16, EcssEnumU32, EcssEnumU8, EcssEnumeration, PusError, PusPacket, + WritablePusPacket, + }; + use spacepackets::util::UnsignedEnum; + use spacepackets::{ByteConversionError, SpHeader}; + use std::cell::RefCell; + use std::collections::VecDeque; + use std::sync::mpsc; + use std::vec; + use std::vec::Vec; + + use super::{ + DummyVerificationHook, SeqCountProviderSimple, TcStateAccepted, TcStateStarted, + VerificationHookProvider, VerificationReportingProvider, WasAtLeastAccepted, + }; + + fn is_send(_: &T) {} + #[allow(dead_code)] + fn is_sync(_: &T) {} + const EMPTY_STAMP: [u8; 7] = [0; 7]; #[derive(Debug, Eq, PartialEq, Clone)] struct TmInfo { + pub requestor: MessageMetadata, pub common: CommonTmInfo, - pub req_id: RequestId, pub additional_data: Option>, } @@ -1465,8 +1683,8 @@ pub mod tests { pub service_queue: RefCell>, } - impl EcssChannel for TestSender { - fn channel_id(&self) -> ChannelId { + impl ChannelWithId for TestSender { + fn id(&self) -> ComponentId { 0 } fn name(&self) -> &'static str { @@ -1475,12 +1693,12 @@ pub mod tests { } impl EcssTmSenderCore for TestSender { - fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { + fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { match tm { - PusTmWrapper::InStore(_) => { + PusTmVariant::InStore(_) => { panic!("TestSender: Can not deal with addresses"); } - PusTmWrapper::Direct(tm) => { + PusTmVariant::Direct(tm) => { assert_eq!(PusPacket::service(&tm), 1); assert!(!tm.source_data().is_empty()); let mut time_stamp = [0; 7]; @@ -1496,8 +1714,8 @@ pub mod tests { vec = Some(new_vec); } self.service_queue.borrow_mut().push_back(TmInfo { + requestor: MessageMetadata::new(req_id.into(), sender_id), common: CommonTmInfo::new_from_tm(&tm), - req_id, additional_data: vec, }); Ok(()) @@ -1506,463 +1724,575 @@ pub mod tests { } } - struct TestBase<'a> { - vr: VerificationReporter, - #[allow(dead_code)] - tc: PusTcCreator<'a>, + #[derive(Default)] + pub struct SequenceCounterHook { + pub seq_counter: CcsdsSimpleSeqCountProvider, + pub msg_counter: SeqCountProviderSimple, } - impl<'a> TestBase<'a> { - fn rep(&mut self) -> &mut VerificationReporter { - &mut self.vr - } - } - struct TestBaseWithHelper<'a, Sender: EcssTmSenderCore + Clone + 'static> { - helper: VerificationReporterWithSender, - #[allow(dead_code)] - tc: PusTcCreator<'a>, - } - - impl<'a, Sender: EcssTmSenderCore + Clone + 'static> TestBaseWithHelper<'a, Sender> { - fn rep(&mut self) -> &mut VerificationReporter { - &mut self.helper.reporter + impl VerificationHookProvider for SequenceCounterHook { + fn modify_tm(&self, tm: &mut spacepackets::ecss::tm::PusTmCreator) { + tm.set_seq_count(self.seq_counter.get_and_increment()); + tm.set_msg_counter(self.msg_counter.get_and_increment()); } } - fn base_reporter() -> VerificationReporter { + struct VerificationReporterTestbench< + VerificationHook: VerificationHookProvider = DummyVerificationHook, + > { + pub id: ComponentId, + sender: TestSender, + reporter: VerificationReporter, + pub request_id: RequestId, + tc: Vec, + } + + fn base_reporter(id: ComponentId) -> VerificationReporter { let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); - VerificationReporter::new(&cfg) + VerificationReporter::new(id, &cfg) } - fn base_tc_init(app_data: Option<&[u8]>) -> (PusTcCreator, RequestId) { - let mut sph = SpHeader::tc_unseg(TEST_APID, 0x34, 0).unwrap(); + fn reporter_with_hook( + id: ComponentId, + hook: VerificationHook, + ) -> VerificationReporter { + let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); + VerificationReporter::new_with_hook(id, &cfg, hook) + } + + impl VerificationReporterTestbench { + fn new_with_hook(id: ComponentId, tc: PusTcCreator, tm_hook: VerificiationHook) -> Self { + let reporter = reporter_with_hook(id, tm_hook); + Self { + id, + sender: TestSender::default(), + reporter, + request_id: RequestId::new(&tc), + tc: tc.to_vec().unwrap(), + } + } + + #[allow(dead_code)] + fn set_dest_id(&mut self, dest_id: u16) { + self.reporter.set_dest_id(dest_id); + } + + fn init(&mut self) -> VerificationToken { + self.reporter.add_tc(&PusTcReader::new(&self.tc).unwrap().0) + } + + fn acceptance_success( + &self, + token: VerificationToken, + time_stamp: &[u8], + ) -> Result, EcssTmtcError> { + self.reporter + .acceptance_success(&self.sender, token, time_stamp) + } + + fn acceptance_failure( + &self, + token: VerificationToken, + params: FailParams, + ) -> Result<(), EcssTmtcError> { + self.reporter + .acceptance_failure(&self.sender, token, params) + } + + fn start_success( + &self, + token: VerificationToken, + time_stamp: &[u8], + ) -> Result, EcssTmtcError> { + self.reporter.start_success(&self.sender, token, time_stamp) + } + + fn start_failure( + &self, + token: VerificationToken, + params: FailParams, + ) -> Result<(), EcssTmtcError> { + self.reporter.start_failure(&self.sender, token, params) + } + + fn step_success( + &self, + token: &VerificationToken, + time_stamp: &[u8], + step: impl EcssEnumeration, + ) -> Result<(), EcssTmtcError> { + self.reporter + .step_success(&self.sender, token, time_stamp, step) + } + + fn step_failure( + &self, + token: VerificationToken, + params: FailParamsWithStep, + ) -> Result<(), EcssTmtcError> { + self.reporter.step_failure(&self.sender, token, params) + } + + fn completion_success( + &self, + token: VerificationToken, + time_stamp: &[u8], + ) -> Result<(), EcssTmtcError> { + self.reporter + .completion_success(&self.sender, token, time_stamp) + } + + fn completion_failure( + &self, + token: VerificationToken, + params: FailParams, + ) -> Result<(), EcssTmtcError> { + self.reporter + .completion_failure(&self.sender, token, params) + } + + fn completion_success_check(&mut self, incrementing_couters: bool) { + assert_eq!(self.sender.service_queue.borrow().len(), 3); + let mut current_seq_count = 0; + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 1, + apid: TEST_APID, + seq_count: current_seq_count, + msg_counter: current_seq_count, + dest_id: self.reporter.dest_id(), + time_stamp: EMPTY_STAMP, + }, + additional_data: None, + }; + let mut info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + if incrementing_couters { + current_seq_count += 1; + } + + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 3, + apid: TEST_APID, + msg_counter: current_seq_count, + seq_count: current_seq_count, + dest_id: self.reporter.dest_id(), + time_stamp: [0, 1, 0, 1, 0, 1, 0], + }, + additional_data: None, + }; + info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + if incrementing_couters { + current_seq_count += 1; + } + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 7, + apid: TEST_APID, + msg_counter: current_seq_count, + seq_count: current_seq_count, + dest_id: self.reporter.dest_id(), + time_stamp: EMPTY_STAMP, + }, + additional_data: None, + }; + info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + } + + impl VerificationReporterTestbench { + fn new(id: ComponentId, tc: PusTcCreator) -> Self { + let reporter = base_reporter(id); + Self { + id, + sender: TestSender::default(), + reporter, + request_id: RequestId::new(&tc), + tc: tc.to_vec().unwrap(), + } + } + + fn acceptance_check(&self, time_stamp: &[u8; 7]) { + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 1, + apid: TEST_APID, + seq_count: 0, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: *time_stamp, + }, + additional_data: None, + }; + let mut service_queue = self.sender.service_queue.borrow_mut(); + assert_eq!(service_queue.len(), 1); + let info = service_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn acceptance_fail_check(&mut self, stamp_buf: [u8; 7]) { + let cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo { + subservice: 2, + seq_count: 0, + apid: TEST_APID, + msg_counter: 0, + dest_id: self.reporter.dest_id(), + time_stamp: stamp_buf, + }, + additional_data: Some([0, 2].to_vec()), + }; + let service_queue = self.sender.service_queue.get_mut(); + assert_eq!(service_queue.len(), 1); + let info = service_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn start_fail_check(&mut self, fail_data_raw: [u8; 4]) { + let mut srv_queue = self.sender.service_queue.borrow_mut(); + assert_eq!(srv_queue.len(), 2); + let mut cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count(1, TEST_APID, 0, EMPTY_STAMP), + additional_data: None, + }; + let mut info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count(4, TEST_APID, 0, EMPTY_STAMP), + additional_data: Some([&[22], fail_data_raw.as_slice()].concat().to_vec()), + }; + info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn step_success_check(&mut self, time_stamp: &[u8; 7]) { + let mut cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count(1, TEST_APID, 0, *time_stamp), + additional_data: None, + }; + let mut srv_queue = self.sender.service_queue.borrow_mut(); + let mut info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count(3, TEST_APID, 0, *time_stamp), + additional_data: None, + }; + info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count(5, TEST_APID, 0, *time_stamp), + additional_data: Some([0].to_vec()), + }; + info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count(5, TEST_APID, 0, *time_stamp), + additional_data: Some([1].to_vec()), + }; + info = srv_queue.pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn check_step_failure(&mut self, fail_data_raw: [u8; 4]) { + assert_eq!(self.sender.service_queue.borrow().len(), 4); + let mut cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count( + 1, + TEST_APID, + self.reporter.dest_id(), + EMPTY_STAMP, + ), + additional_data: None, + }; + let mut info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count( + 3, + TEST_APID, + self.reporter.dest_id(), + [0, 1, 0, 1, 0, 1, 0], + ), + additional_data: None, + }; + info = self.sender.service_queue.borrow_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count( + 5, + TEST_APID, + self.reporter.dest_id(), + EMPTY_STAMP, + ), + additional_data: Some([0].to_vec()), + }; + info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count( + 6, + TEST_APID, + self.reporter.dest_id(), + EMPTY_STAMP, + ), + additional_data: Some( + [ + [1].as_slice(), + &[0, 0, 0x10, 0x20], + fail_data_raw.as_slice(), + ] + .concat() + .to_vec(), + ), + }; + info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + + fn completion_fail_check(&mut self) { + assert_eq!(self.sender.service_queue.borrow().len(), 3); + + let mut cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count( + 1, + TEST_APID, + self.reporter.dest_id(), + EMPTY_STAMP, + ), + additional_data: None, + }; + let mut info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count( + 3, + TEST_APID, + self.reporter.dest_id(), + [0, 1, 0, 1, 0, 1, 0], + ), + additional_data: None, + }; + info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + + cmp_info = TmInfo { + requestor: MessageMetadata::new(self.request_id.into(), self.id), + common: CommonTmInfo::new_zero_seq_count( + 8, + TEST_APID, + self.reporter.dest_id(), + EMPTY_STAMP, + ), + additional_data: Some([0, 0, 0x10, 0x20].to_vec()), + }; + info = self.sender.service_queue.get_mut().pop_front().unwrap(); + assert_eq!(info, cmp_info); + } + } + + fn create_generic_ping() -> PusTcCreator<'static> { + let sph = SpHeader::new_for_unseg_tc(TEST_APID, 0x34, 0); let tc_header = PusTcSecondaryHeader::new_simple(17, 1); - let app_data = app_data.unwrap_or(&[]); - let pus_tc = PusTcCreator::new(&mut sph, tc_header, app_data, true); - let req_id = RequestId::new(&pus_tc); - (pus_tc, req_id) - } - - fn base_init(api_sel: bool) -> (TestBase<'static>, VerificationToken) { - let mut reporter = base_reporter(); - let (tc, req_id) = base_tc_init(None); - let init_tok = if api_sel { - reporter.add_tc_with_req_id(req_id) - } else { - reporter.add_tc(&tc) - }; - (TestBase { vr: reporter, tc }, init_tok) - } - - fn base_with_helper_init() -> ( - TestBaseWithHelper<'static, TestSender>, - VerificationToken, - ) { - let mut reporter = base_reporter(); - let (tc, _) = base_tc_init(None); - let init_tok = reporter.add_tc(&tc); - let sender = TestSender::default(); - let helper = VerificationReporterWithSender::new_from_reporter(reporter, sender); - (TestBaseWithHelper { helper, tc }, init_tok) - } - - fn acceptance_check(sender: &mut TestSender, req_id: &RequestId) { - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id: *req_id, - }; - let mut service_queue = sender.service_queue.borrow_mut(); - assert_eq!(service_queue.len(), 1); - let info = service_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); + PusTcCreator::new(sph, tc_header, &[], true) } #[test] - fn test_mpsc_verif_send_sync() { + fn test_mpsc_verif_send() { let pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(8, 8)], false)); let shared_tm_store = SharedTmPool::new(pool); let (tx, _) = mpsc::channel(); - let mpsc_verif_sender = - TmInSharedPoolSenderWithMpsc::new(0, "verif_sender", shared_tm_store, tx); + let mpsc_verif_sender = MpscTmInSharedPoolSender::new(shared_tm_store, tx); is_send(&mpsc_verif_sender); } #[test] fn test_state() { - let (mut b, _) = base_init(false); - assert_eq!(b.vr.apid(), TEST_APID); - b.vr.set_apid(TEST_APID + 1); - assert_eq!(b.vr.apid(), TEST_APID + 1); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + assert_eq!(testbench.reporter.apid(), TEST_APID); + testbench.reporter.set_apid(TEST_APID + 1); + assert_eq!(testbench.reporter.apid(), TEST_APID + 1); } #[test] fn test_basic_acceptance_success() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - acceptance_check(&mut sender, &tok.req_id); - } - - #[test] - fn test_basic_acceptance_success_with_helper() { - let (mut b, tok) = base_with_helper_init(); - b.helper - .acceptance_success(tok, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - acceptance_check(&mut b.helper.sender, &tok.req_id); - } - - fn acceptance_fail_check(sender: &mut TestSender, req_id: RequestId, stamp_buf: [u8; 7]) { - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 2, - apid: TEST_APID, - msg_counter: 0, - dest_id: 5, - time_stamp: stamp_buf, - }, - additional_data: Some([0, 2].to_vec()), - req_id, - }; - let mut service_queue = sender.service_queue.borrow_mut(); - assert_eq!(service_queue.len(), 1); - let info = service_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); + testbench + .acceptance_success(token, &EMPTY_STAMP) + .expect("sending acceptance success failed"); + testbench.acceptance_check(&EMPTY_STAMP); } #[test] fn test_basic_acceptance_failure() { - let (mut b, tok) = base_init(true); - b.rep().reporter.dest_id = 5; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let init_token = testbench.init(); let stamp_buf = [1, 2, 3, 4, 5, 6, 7]; - let mut sender = TestSender::default(); let fail_code = EcssEnumU16::new(2); let fail_params = FailParams::new_no_fail_data(stamp_buf.as_slice(), &fail_code); - b.vr.acceptance_failure(tok, &sender, fail_params) - .expect("Sending acceptance success failed"); - acceptance_fail_check(&mut sender, tok.req_id, stamp_buf); + testbench + .acceptance_failure(init_token, fail_params) + .expect("sending acceptance failure failed"); + testbench.acceptance_fail_check(stamp_buf); } #[test] fn test_basic_acceptance_failure_with_helper() { - let (mut b, tok) = base_with_helper_init(); - b.rep().reporter.dest_id = 5; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let init_token = testbench.init(); let stamp_buf = [1, 2, 3, 4, 5, 6, 7]; let fail_code = EcssEnumU16::new(2); let fail_params = FailParams::new_no_fail_data(stamp_buf.as_slice(), &fail_code); - b.helper - .acceptance_failure(tok, fail_params) - .expect("Sending acceptance success failed"); - acceptance_fail_check(&mut b.helper.sender, tok.req_id, stamp_buf); + testbench + .acceptance_failure(init_token, fail_params) + .expect("sending acceptance failure failed"); + testbench.acceptance_fail_check(stamp_buf); } #[test] fn test_acceptance_fail_data_too_large() { - let (mut b, tok) = base_with_helper_init(); - b.rep().reporter.dest_id = 5; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let init_token = testbench.init(); let stamp_buf = [1, 2, 3, 4, 5, 6, 7]; let fail_code = EcssEnumU16::new(2); let fail_data: [u8; 16] = [0; 16]; // 4 req ID + 1 byte step + 2 byte error code + 8 byte fail data - assert_eq!(b.rep().allowed_source_data_len(), 15); + assert_eq!(testbench.reporter.allowed_source_data_len(), 15); let fail_params = FailParams::new(stamp_buf.as_slice(), &fail_code, fail_data.as_slice()); - let res = b.helper.acceptance_failure(tok, fail_params); - assert!(res.is_err()); - let err_with_token = res.unwrap_err(); - match err_with_token { + let result = testbench.acceptance_failure(init_token, fail_params); + assert!(result.is_err()); + let error = result.unwrap_err(); + match error { EcssTmtcError::Pus(PusError::ByteConversion(e)) => match e { ByteConversionError::ToSliceTooSmall { found, expected } => { assert_eq!( expected, fail_data.len() + RequestId::SIZE_AS_BYTES + fail_code.size() ); - assert_eq!(found, b.rep().allowed_source_data_len()); + assert_eq!(found, testbench.reporter.allowed_source_data_len()); } _ => { panic!("{}", format!("Unexpected error {:?}", e)) } }, _ => { - panic!("{}", format!("Unexpected error {:?}", err_with_token)) + panic!("{}", format!("Unexpected error {:?}", error)) } } } #[test] fn test_basic_acceptance_failure_with_fail_data() { - let (b, tok) = base_init(false); - let sender = TestSender::default(); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); let fail_code = EcssEnumU8::new(10); let fail_data = EcssEnumU32::new(12); let mut fail_data_raw = [0; 4]; fail_data.write_to_be_bytes(&mut fail_data_raw).unwrap(); let fail_params = FailParams::new(&EMPTY_STAMP, &fail_code, fail_data_raw.as_slice()); - b.vr.acceptance_failure(tok, &sender, fail_params) - .expect("Sending acceptance success failed"); + let init_token = testbench.init(); + testbench + .acceptance_failure(init_token, fail_params) + .expect("sending acceptance failure failed"); let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 2, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, + requestor: MessageMetadata::new(testbench.request_id.into(), testbench.id), + common: CommonTmInfo::new_zero_seq_count(2, TEST_APID, 0, EMPTY_STAMP), additional_data: Some([10, 0, 0, 0, 12].to_vec()), - req_id: tok.req_id, }; - let mut service_queue = sender.service_queue.borrow_mut(); + let mut service_queue = testbench.sender.service_queue.borrow_mut(); assert_eq!(service_queue.len(), 1); let info = service_queue.pop_front().unwrap(); assert_eq!(info, cmp_info); } - fn start_fail_check(sender: &mut TestSender, req_id: RequestId, fail_data_raw: [u8; 4]) { - let mut srv_queue = sender.service_queue.borrow_mut(); - assert_eq!(srv_queue.len(), 2); - let mut cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 4, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([&[22], fail_data_raw.as_slice()].concat().to_vec()), - req_id, - }; - info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - } - #[test] fn test_start_failure() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let init_token = testbench.init(); let fail_code = EcssEnumU8::new(22); let fail_data: i32 = -12; let mut fail_data_raw = [0; 4]; fail_data_raw.copy_from_slice(fail_data.to_be_bytes().as_slice()); let fail_params = FailParams::new(&EMPTY_STAMP, &fail_code, fail_data_raw.as_slice()); - let accepted_token = - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - b.vr.start_failure(accepted_token, &sender, fail_params) + let accepted_token = testbench + .acceptance_success(init_token, &EMPTY_STAMP) + .expect("Sending acceptance success failed"); + testbench + .start_failure(accepted_token, fail_params) .expect("Start failure failure"); - start_fail_check(&mut sender, tok.req_id, fail_data_raw); + testbench.start_fail_check(fail_data_raw); } #[test] fn test_start_failure_with_helper() { - let (mut b, tok) = base_with_helper_init(); + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); let fail_code = EcssEnumU8::new(22); let fail_data: i32 = -12; let mut fail_data_raw = [0; 4]; fail_data_raw.copy_from_slice(fail_data.to_be_bytes().as_slice()); let fail_params = FailParams::new(&EMPTY_STAMP, &fail_code, fail_data_raw.as_slice()); - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - b.helper + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) + .expect("acceptance failed"); + testbench .start_failure(accepted_token, fail_params) - .expect("Start failure failure"); - start_fail_check(&mut b.helper.sender, tok.req_id, fail_data_raw); - } - - fn step_success_check(sender: &mut TestSender, req_id: RequestId) { - let mut cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut srv_queue = sender.service_queue.borrow_mut(); - let mut info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 3, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: [0, 1, 0, 1, 0, 1, 0], - }, - additional_data: None, - req_id, - }; - info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 5, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([0].to_vec()), - req_id, - }; - info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 5, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([1].to_vec()), - req_id, - }; - info = srv_queue.pop_front().unwrap(); - assert_eq!(info, cmp_info); + .expect("start failure failed"); + testbench.start_fail_check(fail_data_raw); } #[test] fn test_steps_success() { - let (mut b, tok) = base_init(false); - let mut sender = TestSender::default(); - let accepted_token = b - .rep() - .acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = b - .rep() - .start_success(accepted_token, &sender, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.rep() - .step_success(&started_token, &sender, &EMPTY_STAMP, EcssEnumU8::new(0)) - .expect("Sending step 0 success failed"); - b.vr.step_success(&started_token, &sender, &EMPTY_STAMP, EcssEnumU8::new(1)) - .expect("Sending step 1 success failed"); - assert_eq!(sender.service_queue.borrow().len(), 4); - step_success_check(&mut sender, tok.req_id); - } - - #[test] - fn test_steps_success_with_helper() { - let (mut b, tok) = base_with_helper_init(); - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = b - .helper - .start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.helper + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) + .expect("acceptance failed"); + let started_token = testbench + .start_success(accepted_token, &EMPTY_STAMP) + .expect("acceptance failed"); + testbench .step_success(&started_token, &EMPTY_STAMP, EcssEnumU8::new(0)) - .expect("Sending step 0 success failed"); - b.helper + .expect("step 0 failed"); + testbench .step_success(&started_token, &EMPTY_STAMP, EcssEnumU8::new(1)) - .expect("Sending step 1 success failed"); - assert_eq!(b.helper.sender.service_queue.borrow().len(), 4); - step_success_check(&mut b.helper.sender, tok.req_id); - } - - fn check_step_failure(sender: &mut TestSender, req_id: RequestId, fail_data_raw: [u8; 4]) { - assert_eq!(sender.service_queue.borrow().len(), 4); - let mut cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 3, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: [0, 1, 0, 1, 0, 1, 0], - }, - additional_data: None, - req_id, - }; - info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 5, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([0].to_vec()), - req_id, - }; - info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 6, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some( - [ - [1].as_slice(), - &[0, 0, 0x10, 0x20], - fail_data_raw.as_slice(), - ] - .concat() - .to_vec(), - ), - req_id, - }; - info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); + .expect("step 1 failed"); + assert_eq!(testbench.sender.service_queue.borrow().len(), 4); + testbench.step_success_check(&EMPTY_STAMP); } #[test] fn test_step_failure() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); - let req_id = tok.req_id; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); let fail_code = EcssEnumU32::new(0x1020); let fail_data: f32 = -22.3232; let mut fail_data_raw = [0; 4]; @@ -1975,269 +2305,74 @@ pub mod tests { fail_data_raw.as_slice(), ); - let accepted_token = - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = - b.vr.start_success(accepted_token, &sender, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.vr.step_success(&started_token, &sender, &EMPTY_STAMP, EcssEnumU8::new(0)) - .expect("Sending completion success failed"); - b.vr.step_failure(started_token, &sender, fail_params) - .expect("Step failure failed"); - check_step_failure(&mut sender, req_id, fail_data_raw); - } - - #[test] - fn test_steps_failure_with_helper() { - let (mut b, tok) = base_with_helper_init(); - let req_id = tok.req_id; - let fail_code = EcssEnumU32::new(0x1020); - let fail_data: f32 = -22.3232; - let mut fail_data_raw = [0; 4]; - fail_data_raw.copy_from_slice(fail_data.to_be_bytes().as_slice()); - let fail_step = EcssEnumU8::new(1); - let fail_params = FailParamsWithStep::new( - &EMPTY_STAMP, - &fail_step, - &fail_code, - fail_data_raw.as_slice(), - ); - - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) .expect("Sending acceptance success failed"); - let started_token = b - .helper + let started_token = testbench .start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0]) .expect("Sending start success failed"); - b.helper + testbench .step_success(&started_token, &EMPTY_STAMP, EcssEnumU8::new(0)) .expect("Sending completion success failed"); - b.helper + testbench .step_failure(started_token, fail_params) .expect("Step failure failed"); - check_step_failure(&mut b.helper.sender, req_id, fail_data_raw); - } - - fn completion_fail_check(sender: &mut TestSender, req_id: RequestId) { - assert_eq!(sender.service_queue.borrow().len(), 3); - - let mut cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 3, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: [0, 1, 0, 1, 0, 1, 0], - }, - additional_data: None, - req_id, - }; - info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 8, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: Some([0, 0, 0x10, 0x20].to_vec()), - req_id, - }; - info = sender.service_queue.get_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); + testbench.check_step_failure(fail_data_raw); } #[test] fn test_completion_failure() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); - let req_id = tok.req_id; + let mut testbench = VerificationReporterTestbench::new(0, create_generic_ping()); + let token = testbench.init(); let fail_code = EcssEnumU32::new(0x1020); let fail_params = FailParams::new_no_fail_data(&EMPTY_STAMP, &fail_code); - let accepted_token = - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = - b.vr.start_success(accepted_token, &sender, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.vr.completion_failure(started_token, &sender, fail_params) - .expect("Completion failure"); - completion_fail_check(&mut sender, req_id); - } - - #[test] - fn test_completion_failure_with_helper() { - let (mut b, tok) = base_with_helper_init(); - let req_id = tok.req_id; - let fail_code = EcssEnumU32::new(0x1020); - let fail_params = FailParams::new_no_fail_data(&EMPTY_STAMP, &fail_code); - - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) .expect("Sending acceptance success failed"); - let started_token = b - .helper + let started_token = testbench .start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0]) .expect("Sending start success failed"); - b.helper + testbench .completion_failure(started_token, fail_params) .expect("Completion failure"); - completion_fail_check(&mut b.helper.sender, req_id); - } - - fn completion_success_check(sender: &mut TestSender, req_id: RequestId) { - assert_eq!(sender.service_queue.borrow().len(), 3); - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 1, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - let mut info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 3, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: [0, 1, 0, 1, 0, 1, 0], - }, - additional_data: None, - req_id, - }; - info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); - let cmp_info = TmInfo { - common: CommonTmInfo { - subservice: 7, - apid: TEST_APID, - msg_counter: 0, - dest_id: 0, - time_stamp: EMPTY_STAMP, - }, - additional_data: None, - req_id, - }; - info = sender.service_queue.borrow_mut().pop_front().unwrap(); - assert_eq!(info, cmp_info); + testbench.completion_fail_check(); } #[test] fn test_complete_success_sequence() { - let (b, tok) = base_init(false); - let mut sender = TestSender::default(); - let accepted_token = - b.vr.acceptance_success(tok, &sender, &EMPTY_STAMP) - .expect("Sending acceptance success failed"); - let started_token = - b.vr.start_success(accepted_token, &sender, &[0, 1, 0, 1, 0, 1, 0]) - .expect("Sending start success failed"); - b.vr.completion_success(started_token, &sender, &EMPTY_STAMP) - .expect("Sending completion success failed"); - completion_success_check(&mut sender, tok.req_id); - } - - #[test] - fn test_complete_success_sequence_with_helper() { - let (mut b, tok) = base_with_helper_init(); - let accepted_token = b - .helper - .acceptance_success(tok, &EMPTY_STAMP) + let mut testbench = + VerificationReporterTestbench::new(TEST_COMPONENT_ID_0.id(), create_generic_ping()); + let token = testbench.init(); + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) .expect("Sending acceptance success failed"); - let started_token = b - .helper + let started_token = testbench .start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0]) .expect("Sending start success failed"); - b.helper + testbench .completion_success(started_token, &EMPTY_STAMP) .expect("Sending completion success failed"); - completion_success_check(&mut b.helper.sender, tok.req_id); + testbench.completion_success_check(false); } #[test] - fn test_seq_count_increment() { - let pool_cfg = - StaticPoolConfig::new(vec![(10, 32), (10, 64), (10, 128), (10, 1024)], false); - let tm_pool = StaticMemoryPool::new(pool_cfg.clone()); - let shared_tm_store = SharedTmPool::new(tm_pool); - let shared_tm_pool = shared_tm_store.clone_backing_pool(); - let (verif_tx, verif_rx) = mpsc::channel(); - let sender = - TmInSharedPoolSenderWithId::new(0, "Verification Sender", shared_tm_store, verif_tx); - let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap(); - let mut reporter = VerificationReporterWithSender::new(&cfg, sender); - - let mut sph = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); - let tc_header = PusTcSecondaryHeader::new_simple(17, 1); - let pus_tc_0 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true); - let init_token = reporter.add_tc(&pus_tc_0); - - // Complete success sequence for a telecommand - let accepted_token = reporter - .acceptance_success(init_token, &EMPTY_STAMP) - .unwrap(); - let started_token = reporter - .start_success(accepted_token, &EMPTY_STAMP) - .unwrap(); - reporter + fn test_packet_manipulation() { + let mut testbench = VerificationReporterTestbench::new_with_hook( + TEST_COMPONENT_ID_0.id(), + create_generic_ping(), + SequenceCounterHook::default(), + ); + let token = testbench.init(); + let accepted_token = testbench + .acceptance_success(token, &EMPTY_STAMP) + .expect("Sending acceptance success failed"); + let started_token = testbench + .start_success(accepted_token, &[0, 1, 0, 1, 0, 1, 0]) + .expect("Sending start success failed"); + testbench .completion_success(started_token, &EMPTY_STAMP) - .unwrap(); - - // Verify it arrives correctly on receiver end - let mut tm_buf: [u8; 1024] = [0; 1024]; - let mut packet_idx = 0; - while packet_idx < 3 { - let addr = verif_rx.recv_timeout(Duration::from_millis(10)).unwrap(); - let tm_len; - { - let mut rg = shared_tm_pool.write().expect("Error locking shared pool"); - let store_guard = rg.read_with_guard(addr); - tm_len = store_guard - .read(&mut tm_buf) - .expect("Error reading TM slice"); - } - let (pus_tm, _) = - PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM"); - if packet_idx == 0 { - assert_eq!(pus_tm.subservice(), 1); - assert_eq!(pus_tm.sp_header.seq_count(), 0); - } else if packet_idx == 1 { - assert_eq!(pus_tm.subservice(), 3); - assert_eq!(pus_tm.sp_header.seq_count(), 0); - } else if packet_idx == 2 { - assert_eq!(pus_tm.subservice(), 7); - assert_eq!(pus_tm.sp_header.seq_count(), 0); - } - packet_idx += 1; - } + .expect("Sending completion success failed"); + testbench.completion_success_check(true); } } diff --git a/satrs/src/queue.rs b/satrs/src/queue.rs index 5ba4bdc..93c8ec8 100644 --- a/satrs/src/queue.rs +++ b/satrs/src/queue.rs @@ -4,11 +4,17 @@ use std::error::Error; #[cfg(feature = "std")] use std::sync::mpsc; +use crate::ComponentId; + +/// Generic channel ID type. +pub type ChannelId = u32; + /// Generic error type for sending something via a message queue. -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum GenericSendError { RxDisconnected, QueueFull(Option), + TargetDoesNotExist(ComponentId), } impl Display for GenericSendError { @@ -20,6 +26,9 @@ impl Display for GenericSendError { GenericSendError::QueueFull(max_cap) => { write!(f, "queue with max capacity of {max_cap:?} is full") } + GenericSendError::TargetDoesNotExist(target) => { + write!(f, "target queue with ID {target} does not exist") + } } } } @@ -28,17 +37,17 @@ impl Display for GenericSendError { impl Error for GenericSendError {} /// Generic error type for sending something via a message queue. -#[derive(Debug, Copy, Clone)] -pub enum GenericRecvError { +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum GenericReceiveError { Empty, - TxDisconnected, + TxDisconnected(Option), } -impl Display for GenericRecvError { +impl Display for GenericReceiveError { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { match self { - Self::TxDisconnected => { - write!(f, "tx side has disconnected") + Self::TxDisconnected(channel_id) => { + write!(f, "tx side with id {channel_id:?} has disconnected") } Self::Empty => { write!(f, "nothing to receive") @@ -48,7 +57,43 @@ impl Display for GenericRecvError { } #[cfg(feature = "std")] -impl Error for GenericRecvError {} +impl Error for GenericReceiveError {} + +#[derive(Debug, Clone)] +pub enum GenericTargetedMessagingError { + Send(GenericSendError), + Receive(GenericReceiveError), +} +impl From for GenericTargetedMessagingError { + fn from(value: GenericSendError) -> Self { + Self::Send(value) + } +} + +impl From for GenericTargetedMessagingError { + fn from(value: GenericReceiveError) -> Self { + Self::Receive(value) + } +} + +impl Display for GenericTargetedMessagingError { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + Self::Send(err) => write!(f, "generic targeted messaging error: {}", err), + Self::Receive(err) => write!(f, "generic targeted messaging error: {}", err), + } + } +} + +#[cfg(feature = "std")] +impl Error for GenericTargetedMessagingError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + GenericTargetedMessagingError::Send(send) => Some(send), + GenericTargetedMessagingError::Receive(receive) => Some(receive), + } + } +} #[cfg(feature = "std")] impl From> for GenericSendError { diff --git a/satrs/src/request.rs b/satrs/src/request.rs index 24ca497..f2104ed 100644 --- a/satrs/src/request.rs +++ b/satrs/src/request.rs @@ -1,110 +1,586 @@ -use core::fmt; +use core::{fmt, marker::PhantomData}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "alloc")] +pub use alloc_mod::*; + #[cfg(feature = "std")] -use std::error::Error; +pub use std_mod::*; use spacepackets::{ ecss::{tc::IsPusTelecommand, PusPacket}, ByteConversionError, CcsdsPacket, }; -use crate::TargetId; +use crate::{queue::GenericTargetedMessagingError, ComponentId}; +/// Generic request ID type. Requests can be associated with an ID to have a unique identifier +/// for them. This can be useful for tasks like tracking their progress. +pub type RequestId = u32; + +/// CCSDS APID type definition. Please note that the APID is a 14 bit value. pub type Apid = u16; -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum TargetIdCreationError { - ByteConversion(ByteConversionError), - NotEnoughAppData(usize), +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct UniqueApidTargetId { + pub apid: Apid, + pub unique_id: u32, } -impl From for TargetIdCreationError { - fn from(e: ByteConversionError) -> Self { - Self::ByteConversion(e) +impl UniqueApidTargetId { + pub const fn new(apid: Apid, target: u32) -> Self { + Self { + apid, + unique_id: target, + } + } + + pub fn raw(&self) -> ComponentId { + ((self.apid as u64) << 32) | (self.unique_id as u64) + } + + pub fn id(&self) -> ComponentId { + self.raw() + } + + /// This function attempts to build the ID from a PUS telecommand by extracting the APID + /// and the first four bytes of the application data field as the target field. + pub fn from_pus_tc( + tc: &(impl CcsdsPacket + PusPacket + IsPusTelecommand), + ) -> Result { + if tc.user_data().len() < 4 { + return Err(ByteConversionError::FromSliceTooSmall { + found: tc.user_data().len(), + expected: 4, + }); + } + Ok(Self::new( + tc.apid(), + u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()), + )) } } -impl fmt::Display for TargetIdCreationError { +impl From for UniqueApidTargetId { + fn from(raw: u64) -> Self { + Self { + apid: (raw >> 32) as u16, + unique_id: raw as u32, + } + } +} + +impl From for u64 { + fn from(target_and_apid_id: UniqueApidTargetId) -> Self { + target_and_apid_id.raw() + } +} + +impl fmt::Display for UniqueApidTargetId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::ByteConversion(e) => write!(f, "target ID creation: {}", e), - Self::NotEnoughAppData(len) => { - write!(f, "not enough app data to generate target ID: {}", len) + write!( + f, + "Target and APID ID with APID {:#03x} and target {}", + self.apid, self.unique_id + ) + } +} + +/// This contains metadata information which might be useful when used together with a +/// generic message tpye. +/// +/// This could for example be used to build request/reply patterns or state tracking for request. +#[derive(Debug, Copy, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct MessageMetadata { + request_id: RequestId, + sender_id: ComponentId, +} + +impl MessageMetadata { + pub const fn new(request_id: RequestId, sender_id: ComponentId) -> Self { + Self { + request_id, + sender_id, + } + } + + pub fn request_id(&self) -> RequestId { + self.request_id + } + + pub fn sender_id(&self) -> ComponentId { + self.sender_id + } +} + +/// Generic message type which adds [metadata][MessageMetadata] to a generic message typ. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct GenericMessage { + pub requestor_info: MessageMetadata, + pub message: Message, +} + +impl GenericMessage { + pub fn new(requestor_info: MessageMetadata, message: Message) -> Self { + Self { + requestor_info, + message, + } + } + + delegate::delegate! { + to self.requestor_info { + pub fn request_id(&self) -> RequestId; + pub fn sender_id(&self) -> ComponentId; + } + } +} + +/// Generic trait for objects which can send targeted messages. +pub trait MessageSender: Send { + fn send(&self, message: GenericMessage) -> Result<(), GenericTargetedMessagingError>; +} + +// Generic trait for objects which can receive targeted messages. +pub trait MessageReceiver { + fn try_recv(&self) -> Result>, GenericTargetedMessagingError>; +} + +pub struct MessageWithSenderIdReceiver>(pub R, PhantomData); + +impl> From for MessageWithSenderIdReceiver { + fn from(receiver: R) -> Self { + MessageWithSenderIdReceiver(receiver, PhantomData) + } +} + +impl> MessageWithSenderIdReceiver { + pub fn try_recv_message( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.0.try_recv() + } +} + +pub struct MessageReceiverWithId> { + local_channel_id: ComponentId, + reply_receiver: MessageWithSenderIdReceiver, +} + +impl> MessageReceiverWithId { + pub fn new(local_channel_id: ComponentId, reply_receiver: R) -> Self { + Self { + local_channel_id, + reply_receiver: MessageWithSenderIdReceiver::from(reply_receiver), + } + } + + pub fn local_channel_id(&self) -> ComponentId { + self.local_channel_id + } +} + +impl> MessageReceiverWithId { + pub fn try_recv_message( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.reply_receiver.0.try_recv() + } +} + +#[cfg(feature = "alloc")] +pub mod alloc_mod { + use core::marker::PhantomData; + + use crate::queue::GenericSendError; + + use super::*; + use hashbrown::HashMap; + + pub struct MessageSenderMap>( + pub HashMap, + pub(crate) PhantomData, + ); + + impl> Default for MessageSenderMap { + fn default() -> Self { + Self(Default::default(), PhantomData) + } + } + + impl> MessageSenderMap { + pub fn add_message_target(&mut self, target_id: ComponentId, message_sender: S) { + self.0.insert(target_id, message_sender); + } + + pub fn send_message( + &self, + requestor_info: MessageMetadata, + target_channel_id: ComponentId, + message: MSG, + ) -> Result<(), GenericTargetedMessagingError> { + if self.0.contains_key(&target_channel_id) { + return self + .0 + .get(&target_channel_id) + .unwrap() + .send(GenericMessage::new(requestor_info, message)); } + Err(GenericSendError::TargetDoesNotExist(target_channel_id).into()) + } + } + + pub struct MessageSenderAndReceiver, R: MessageReceiver> { + pub local_channel_id: ComponentId, + pub message_sender_map: MessageSenderMap, + pub message_receiver: MessageWithSenderIdReceiver, + } + + impl, R: MessageReceiver> + MessageSenderAndReceiver + { + pub fn new(local_channel_id: ComponentId, message_receiver: R) -> Self { + Self { + local_channel_id, + message_sender_map: Default::default(), + message_receiver: MessageWithSenderIdReceiver::from(message_receiver), + } + } + + pub fn add_message_target(&mut self, target_id: ComponentId, message_sender: S) { + self.message_sender_map + .add_message_target(target_id, message_sender) + } + + pub fn local_channel_id_generic(&self) -> ComponentId { + self.local_channel_id + } + + /// Try to send a message, which can be a reply or a request, depending on the generics. + pub fn send_message( + &self, + request_id: RequestId, + target_id: ComponentId, + message: TO, + ) -> Result<(), GenericTargetedMessagingError> { + self.message_sender_map.send_message( + MessageMetadata::new(request_id, self.local_channel_id_generic()), + target_id, + message, + ) + } + + /// Try to receive a message, which can be a reply or a request, depending on the generics. + pub fn try_recv_message( + &self, + ) -> Result>, GenericTargetedMessagingError> { + self.message_receiver.try_recv_message() + } + } + + pub struct RequestAndReplySenderAndReceiver< + REQUEST, + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > { + pub local_channel_id: ComponentId, + // These 2 are a functional group. + pub request_sender_map: MessageSenderMap, + pub reply_receiver: MessageWithSenderIdReceiver, + // These 2 are a functional group. + pub request_receiver: MessageWithSenderIdReceiver, + pub reply_sender_map: MessageSenderMap, + } + + impl< + REQUEST, + REPLY, + S0: MessageSender, + R0: MessageReceiver, + S1: MessageSender, + R1: MessageReceiver, + > RequestAndReplySenderAndReceiver + { + pub fn new( + local_channel_id: ComponentId, + request_receiver: R1, + reply_receiver: R0, + ) -> Self { + Self { + local_channel_id, + request_receiver: request_receiver.into(), + reply_receiver: reply_receiver.into(), + request_sender_map: Default::default(), + reply_sender_map: Default::default(), + } + } + + pub fn local_channel_id_generic(&self) -> ComponentId { + self.local_channel_id } } } #[cfg(feature = "std")] -impl Error for TargetIdCreationError { - fn source(&self) -> Option<&(dyn Error + 'static)> { - if let Self::ByteConversion(e) = self { - return Some(e); +pub mod std_mod { + + use super::*; + use std::sync::mpsc; + + use crate::queue::{GenericReceiveError, GenericSendError, GenericTargetedMessagingError}; + + impl MessageSender for mpsc::Sender> { + fn send(&self, message: GenericMessage) -> Result<(), GenericTargetedMessagingError> { + self.send(message) + .map_err(|_| GenericSendError::RxDisconnected)?; + Ok(()) } - None } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct TargetAndApidId { - pub apid: Apid, - pub target: u32, -} - -impl TargetAndApidId { - pub fn new(apid: Apid, target: u32) -> Self { - Self { apid, target } - } - - pub fn apid(&self) -> Apid { - self.apid - } - - pub fn target(&self) -> u32 { - self.target - } - - pub fn raw(&self) -> TargetId { - ((self.apid as u64) << 32) | (self.target as u64) - } - - pub fn target_id(&self) -> TargetId { - self.raw() - } - - pub fn from_pus_tc( - tc: &(impl CcsdsPacket + PusPacket + IsPusTelecommand), - ) -> Result { - if tc.user_data().len() < 4 { - return Err(ByteConversionError::FromSliceTooSmall { - found: tc.user_data().len(), - expected: 8, + impl MessageSender for mpsc::SyncSender> { + fn send(&self, message: GenericMessage) -> Result<(), GenericTargetedMessagingError> { + if let Err(e) = self.try_send(message) { + return match e { + mpsc::TrySendError::Full(_) => Err(GenericSendError::QueueFull(None).into()), + mpsc::TrySendError::Disconnected(_) => { + Err(GenericSendError::RxDisconnected.into()) + } + }; } - .into()); + Ok(()) } - Ok(Self { - apid: tc.apid(), - target: u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()), - }) } + + pub type MessageSenderMapMpsc = MessageReceiverWithId>; + pub type MessageSenderMapBoundedMpsc = MessageReceiverWithId>; + + impl MessageReceiver for mpsc::Receiver> { + fn try_recv(&self) -> Result>, GenericTargetedMessagingError> { + match self.try_recv() { + Ok(msg) => Ok(Some(msg)), + Err(e) => match e { + mpsc::TryRecvError::Empty => Ok(None), + mpsc::TryRecvError::Disconnected => { + Err(GenericReceiveError::TxDisconnected(None).into()) + } + }, + } + } + } + + pub type MessageReceiverWithIdMpsc = MessageReceiverWithId>; } -impl From for TargetAndApidId { - fn from(raw: u64) -> Self { - Self { - apid: (raw >> 32) as u16, - target: raw as u32, +#[cfg(test)] +mod tests { + use std::sync::mpsc; + + use alloc::string::ToString; + use spacepackets::{ + ecss::tc::{PusTcCreator, PusTcSecondaryHeader}, + ByteConversionError, SpHeader, + }; + + use crate::{ + queue::{GenericReceiveError, GenericSendError, GenericTargetedMessagingError}, + request::{MessageMetadata, MessageSenderMap}, + }; + + use super::{GenericMessage, MessageReceiverWithId, UniqueApidTargetId}; + + const TEST_CHANNEL_ID_0: u64 = 1; + const TEST_CHANNEL_ID_1: u64 = 2; + const TEST_CHANNEL_ID_2: u64 = 3; + + #[test] + fn test_basic_target_id_with_apid() { + let id = UniqueApidTargetId::new(0x111, 0x01); + assert_eq!(id.apid, 0x111); + assert_eq!(id.unique_id, 0x01); + assert_eq!(id.id(), id.raw()); + assert_eq!(u64::from(id), id.raw()); + let id_raw = id.raw(); + let id_from_raw = UniqueApidTargetId::from(id_raw); + assert_eq!(id_from_raw, id); + assert_eq!(id.id(), (0x111 << 32) | 0x01); + let string = id.to_string(); + assert_eq!( + string, + "Target and APID ID with APID 0x111 and target 1".to_string() + ); + } + + #[test] + fn test_basic_target_id_with_apid_from_pus_tc() { + let sp_header = SpHeader::new_for_unseg_tc(0x111, 5, 0); + let app_data = 1_u32.to_be_bytes(); + let pus_tc = PusTcCreator::new_simple(sp_header, 17, 1, &app_data, true); + let id = UniqueApidTargetId::from_pus_tc(&pus_tc).unwrap(); + assert_eq!(id.apid, 0x111); + assert_eq!(id.unique_id, 1); + } + + #[test] + fn test_basic_target_id_with_apid_from_pus_tc_invalid_app_data() { + let sp_header = SpHeader::new_for_unseg_tc(0x111, 5, 0); + let sec_header = PusTcSecondaryHeader::new_simple(17, 1); + let pus_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true); + let error = UniqueApidTargetId::from_pus_tc(&pus_tc); + assert!(error.is_err()); + let error = error.unwrap_err(); + if let ByteConversionError::FromSliceTooSmall { found, expected } = error { + assert_eq!(found, 0); + assert_eq!(expected, 4); + } else { + panic!("Unexpected error type"); + } + } + + #[test] + fn test_receiver_only() { + let (sender, receiver) = mpsc::channel(); + // Test structure with only a receiver which has a channel ID. + let receiver = MessageReceiverWithId::new(TEST_CHANNEL_ID_0, receiver); + let request_id = 5; + sender + .send(GenericMessage::new( + MessageMetadata::new(request_id, TEST_CHANNEL_ID_1), + 5, + )) + .unwrap(); + let reply = receiver.try_recv_message().unwrap(); + assert!(reply.is_some()); + assert_eq!(receiver.local_channel_id(), TEST_CHANNEL_ID_0); + let reply = reply.unwrap(); + assert_eq!(reply.requestor_info.request_id, request_id); + assert_eq!(reply.requestor_info.sender_id, TEST_CHANNEL_ID_1); + assert_eq!(reply.message, 5); + } + + #[test] + fn test_receiver_empty() { + let (_sender, receiver) = mpsc::sync_channel::>(2); + // Test structure with only a receiver which has a channel ID. + let receiver = MessageReceiverWithId::new(TEST_CHANNEL_ID_0, receiver); + let reply = receiver.try_recv_message().unwrap(); + assert!(reply.is_none()); + } + + #[test] + fn test_all_tx_disconnected() { + let (sender, receiver) = mpsc::sync_channel::>(2); + // Test structure with only a receiver which has a channel ID. + let receiver = MessageReceiverWithId::new(TEST_CHANNEL_ID_0, receiver); + drop(sender); + let reply = receiver.try_recv_message(); + assert!(reply.is_err()); + let error = reply.unwrap_err(); + if let GenericTargetedMessagingError::Receive(GenericReceiveError::TxDisconnected(None)) = + error + { + } else { + panic!("unexpected error type"); + } + } + + #[test] + fn test_sender_map() { + let (sender0, receiver0) = mpsc::channel(); + let (sender1, receiver1) = mpsc::channel(); + let mut sender_map = MessageSenderMap::default(); + sender_map.add_message_target(TEST_CHANNEL_ID_1, sender0); + sender_map.add_message_target(TEST_CHANNEL_ID_2, sender1); + sender_map + .send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_1, + 5, + ) + .expect("sending message failed"); + let mut reply = receiver0.recv().expect("receiving message failed"); + assert_eq!(reply.request_id(), 1); + assert_eq!(reply.sender_id(), TEST_CHANNEL_ID_0); + assert_eq!(reply.message, 5); + sender_map + .send_message( + MessageMetadata::new(2, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_2, + 10, + ) + .expect("sending message failed"); + reply = receiver1.recv().expect("receiving message failed"); + assert_eq!(reply.request_id(), 2); + assert_eq!(reply.sender_id(), TEST_CHANNEL_ID_0); + assert_eq!(reply.message, 10); + } + + #[test] + fn test_sender_map_target_does_not_exist() { + let (sender0, _) = mpsc::channel(); + let mut sender_map_with_id = MessageSenderMap::default(); + sender_map_with_id.add_message_target(TEST_CHANNEL_ID_1, sender0); + let result = sender_map_with_id.send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_2, + 5, + ); + assert!(result.is_err()); + let error = result.unwrap_err(); + if let GenericTargetedMessagingError::Send(GenericSendError::TargetDoesNotExist(target)) = + error + { + assert_eq!(target, TEST_CHANNEL_ID_2); + } else { + panic!("Unexpected error type"); + } + } + #[test] + fn test_sender_map_queue_full() { + let (sender0, _receiver0) = mpsc::sync_channel(1); + let mut sender_map_with_id = MessageSenderMap::default(); + sender_map_with_id.add_message_target(TEST_CHANNEL_ID_1, sender0); + sender_map_with_id + .send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_1, + 5, + ) + .expect("sending message failed"); + let result = sender_map_with_id.send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_1, + 5, + ); + assert!(result.is_err()); + let error = result.unwrap_err(); + if let GenericTargetedMessagingError::Send(GenericSendError::QueueFull(capacity)) = error { + assert!(capacity.is_none()); + } else { + panic!("Unexpected error type {}", error); + } + } + + #[test] + fn test_sender_map_queue_receiver_disconnected() { + let (sender0, receiver0) = mpsc::sync_channel(1); + let mut sender_map_with_id = MessageSenderMap::default(); + sender_map_with_id.add_message_target(TEST_CHANNEL_ID_1, sender0); + drop(receiver0); + let result = sender_map_with_id.send_message( + MessageMetadata::new(1, TEST_CHANNEL_ID_0), + TEST_CHANNEL_ID_1, + 5, + ); + assert!(result.is_err()); + let error = result.unwrap_err(); + if let GenericTargetedMessagingError::Send(GenericSendError::RxDisconnected) = error { + } else { + panic!("Unexpected error type {}", error); } } } - -impl From for u64 { - fn from(target_and_apid_id: TargetAndApidId) -> Self { - target_and_apid_id.raw() - } -} - -impl fmt::Display for TargetAndApidId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}, {}", self.apid, self.target) - } -} diff --git a/satrs/src/seq_count.rs b/satrs/src/seq_count.rs index c60c8f7..b4539b0 100644 --- a/satrs/src/seq_count.rs +++ b/satrs/src/seq_count.rs @@ -32,7 +32,7 @@ dyn_clone::clone_trait_object!(SequenceCountProvider); #[cfg(feature = "alloc")] impl SequenceCountProvider for T where T: SequenceCountProviderCore + Clone {} -#[derive(Default, Clone)] +#[derive(Clone)] pub struct SeqCountProviderSimple { seq_count: Cell, max_val: T, @@ -43,13 +43,12 @@ macro_rules! impl_for_primitives { $( paste! { impl SeqCountProviderSimple<$ty> { - pub fn [](max_val: $ty) -> Self { + pub fn [](max_val: $ty) -> Self { Self { seq_count: Cell::new(0), max_val, } } - pub fn []() -> Self { Self { seq_count: Cell::new(0), @@ -58,6 +57,12 @@ macro_rules! impl_for_primitives { } } + impl Default for SeqCountProviderSimple<$ty> { + fn default() -> Self { + Self::[]() + } + } + impl SequenceCountProviderCore<$ty> for SeqCountProviderSimple<$ty> { fn get(&self) -> $ty { self.seq_count.get() @@ -86,21 +91,16 @@ macro_rules! impl_for_primitives { impl_for_primitives!(u8, u16, u32, u64,); /// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT]. +#[derive(Clone)] pub struct CcsdsSimpleSeqCountProvider { provider: SeqCountProviderSimple, } -impl CcsdsSimpleSeqCountProvider { - pub fn new() -> Self { - Self { - provider: SeqCountProviderSimple::new_u16_max_val(MAX_SEQ_COUNT), - } - } -} - impl Default for CcsdsSimpleSeqCountProvider { fn default() -> Self { - Self::new() + Self { + provider: SeqCountProviderSimple::new_custom_max_val_u16(MAX_SEQ_COUNT), + } } } @@ -187,7 +187,7 @@ mod tests { #[test] fn test_u8_counter() { - let u8_counter = SeqCountProviderSimple::new_u8(); + let u8_counter = SeqCountProviderSimple::::default(); assert_eq!(u8_counter.get(), 0); assert_eq!(u8_counter.get_and_increment(), 0); assert_eq!(u8_counter.get_and_increment(), 1); diff --git a/satrs/src/time.rs b/satrs/src/time.rs new file mode 100644 index 0000000..abd3fac --- /dev/null +++ b/satrs/src/time.rs @@ -0,0 +1,7 @@ +use core::fmt::Debug; + +/// Generic abstraction for a check/countdown timer. +pub trait CountdownProvider: Debug { + fn has_expired(&self) -> bool; + fn reset(&mut self); +} diff --git a/satrs/src/tmtc/ccsds_distrib.rs b/satrs/src/tmtc/ccsds_distrib.rs index 10ee80e..607b461 100644 --- a/satrs/src/tmtc/ccsds_distrib.rs +++ b/satrs/src/tmtc/ccsds_distrib.rs @@ -18,6 +18,7 @@ //! # Example //! //! ```rust +//! use satrs::ValidatorU16Id; //! use satrs::tmtc::ccsds_distrib::{CcsdsPacketHandler, CcsdsDistributor}; //! use satrs::tmtc::{ReceivesTc, ReceivesTcCore}; //! use spacepackets::{CcsdsPacket, SpHeader}; @@ -34,16 +35,19 @@ //! fn mutable_foo(&mut self) {} //! } //! +//! impl ValidatorU16Id for ConcreteApidHandler { +//! fn validate(&self, apid: u16) -> bool { apid == 0x0002 } +//! } +//! //! impl CcsdsPacketHandler for ConcreteApidHandler { //! type Error = (); -//! fn valid_apids(&self) -> &'static [u16] { &[0x002] } -//! fn handle_known_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> { +//! fn handle_packet_with_valid_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> { //! assert_eq!(sp_header.apid(), 0x002); //! assert_eq!(tc_raw.len(), 13); //! self.known_call_count += 1; //! Ok(()) //! } -//! fn handle_unknown_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> { +//! fn handle_packet_with_unknown_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> { //! assert_eq!(sp_header.apid(), 0x003); //! assert_eq!(tc_raw.len(), 13); //! self.unknown_call_count += 1; @@ -55,8 +59,8 @@ //! let mut ccsds_distributor = CcsdsDistributor::new(apid_handler); //! //! // Create and pass PUS telecommand with a valid APID -//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap(); -//! let mut pus_tc = PusTcCreator::new_simple(&mut space_packet_header, 17, 1, None, true); +//! let sp_header = SpHeader::new_for_unseg_tc(0x002, 0x34, 0); +//! let mut pus_tc = PusTcCreator::new_simple(sp_header, 17, 1, &[], true); //! let mut test_buf: [u8; 32] = [0; 32]; //! let mut size = pus_tc //! .write_to_bytes(test_buf.as_mut_slice()) @@ -81,7 +85,10 @@ //! let mutable_handler_ref = ccsds_distributor.packet_handler_mut(); //! mutable_handler_ref.mutable_foo(); //! ``` -use crate::tmtc::{ReceivesCcsdsTc, ReceivesTcCore}; +use crate::{ + tmtc::{ReceivesCcsdsTc, ReceivesTcCore}, + ValidatorU16Id, +}; use core::fmt::{Display, Formatter}; use spacepackets::{ByteConversionError, CcsdsPacket, SpHeader}; #[cfg(feature = "std")] @@ -92,14 +99,18 @@ use std::error::Error; /// Users should implement this trait on their custom CCSDS packet handler and then pass a boxed /// instance of this handler to the [CcsdsDistributor]. The distributor will use the trait /// interface to dispatch received packets to the user based on the Application Process Identifier -/// (APID) field of the CCSDS packet. -pub trait CcsdsPacketHandler { +/// (APID) field of the CCSDS packet. The APID will be checked using the generic [ValidatorU16Id] +/// trait. +pub trait CcsdsPacketHandler: ValidatorU16Id { type Error; - fn valid_apids(&self) -> &'static [u16]; - fn handle_known_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) - -> Result<(), Self::Error>; - fn handle_unknown_apid( + fn handle_packet_with_valid_apid( + &mut self, + sp_header: &SpHeader, + tc_raw: &[u8], + ) -> Result<(), Self::Error>; + + fn handle_packet_with_unknown_apid( &mut self, sp_header: &SpHeader, tc_raw: &[u8], @@ -183,18 +194,15 @@ impl, E: 'static> CcsdsDistributor< } fn dispatch_ccsds(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), CcsdsError> { - let apid = sp_header.apid(); - let valid_apids = self.packet_handler.valid_apids(); - for &valid_apid in valid_apids { - if valid_apid == apid { - return self - .packet_handler - .handle_known_apid(sp_header, tc_raw) - .map_err(|e| CcsdsError::CustomError(e)); - } + let valid_apid = self.packet_handler().validate(sp_header.apid()); + if valid_apid { + self.packet_handler + .handle_packet_with_valid_apid(sp_header, tc_raw) + .map_err(|e| CcsdsError::CustomError(e))?; + return Ok(()); } self.packet_handler - .handle_unknown_apid(sp_header, tc_raw) + .handle_packet_with_unknown_apid(sp_header, tc_raw) .map_err(|e| CcsdsError::CustomError(e)) } } @@ -213,8 +221,8 @@ pub(crate) mod tests { fn is_send(_: &T) {} pub fn generate_ping_tc(buf: &mut [u8]) -> &[u8] { - let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap(); - let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let sph = SpHeader::new_for_unseg_tc(0x002, 0x34, 0); + let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); let size = pus_tc .write_to_bytes(buf) .expect("Error writing TC to buffer"); @@ -223,8 +231,8 @@ pub(crate) mod tests { } pub fn generate_ping_tc_as_vec() -> Vec { - let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap(); - PusTcCreator::new_simple(&mut sph, 17, 1, None, true) + let sph = SpHeader::new_for_unseg_tc(0x002, 0x34, 0); + PusTcCreator::new_simple(sph, 17, 1, &[], true) .to_vec() .unwrap() } @@ -241,13 +249,16 @@ pub(crate) mod tests { pub unknown_packet_queue: VecDeque<(u16, Vec)>, } + impl ValidatorU16Id for BasicApidHandlerSharedQueue { + fn validate(&self, packet_id: u16) -> bool { + [0x000, 0x002].contains(&packet_id) + } + } + impl CcsdsPacketHandler for BasicApidHandlerSharedQueue { type Error = (); - fn valid_apids(&self) -> &'static [u16] { - &[0x000, 0x002] - } - fn handle_known_apid( + fn handle_packet_with_valid_apid( &mut self, sp_header: &SpHeader, tc_raw: &[u8], @@ -261,7 +272,7 @@ pub(crate) mod tests { Ok(()) } - fn handle_unknown_apid( + fn handle_packet_with_unknown_apid( &mut self, sp_header: &SpHeader, tc_raw: &[u8], @@ -276,14 +287,16 @@ pub(crate) mod tests { } } + impl ValidatorU16Id for BasicApidHandlerOwnedQueue { + fn validate(&self, packet_id: u16) -> bool { + [0x000, 0x002].contains(&packet_id) + } + } + impl CcsdsPacketHandler for BasicApidHandlerOwnedQueue { type Error = (); - fn valid_apids(&self) -> &'static [u16] { - &[0x000, 0x002] - } - - fn handle_known_apid( + fn handle_packet_with_valid_apid( &mut self, sp_header: &SpHeader, tc_raw: &[u8], @@ -294,7 +307,7 @@ pub(crate) mod tests { Ok(()) } - fn handle_unknown_apid( + fn handle_packet_with_unknown_apid( &mut self, sp_header: &SpHeader, tc_raw: &[u8], @@ -332,8 +345,8 @@ pub(crate) mod tests { fn test_unknown_apid_handling() { let apid_handler = BasicApidHandlerOwnedQueue::default(); let mut ccsds_distrib = CcsdsDistributor::new(apid_handler); - let mut sph = SpHeader::tc_unseg(0x004, 0x34, 0).unwrap(); - let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let sph = SpHeader::new_for_unseg_tc(0x004, 0x34, 0); + let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); let mut test_buf: [u8; 32] = [0; 32]; pus_tc .write_to_bytes(test_buf.as_mut_slice()) @@ -351,8 +364,8 @@ pub(crate) mod tests { #[test] fn test_ccsds_distribution() { let mut ccsds_distrib = CcsdsDistributor::new(BasicApidHandlerOwnedQueue::default()); - let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap(); - let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let sph = SpHeader::new_for_unseg_tc(0x002, 0x34, 0); + let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); let tc_vec = pus_tc.to_vec().unwrap(); ccsds_distrib .pass_ccsds(&sph, &tc_vec) @@ -370,8 +383,8 @@ pub(crate) mod tests { #[test] fn test_distribution_short_packet_fails() { let mut ccsds_distrib = CcsdsDistributor::new(BasicApidHandlerOwnedQueue::default()); - let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap(); - let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let sph = SpHeader::new_for_unseg_tc(0x002, 0x34, 0); + let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); let tc_vec = pus_tc.to_vec().unwrap(); let result = ccsds_distrib.pass_tc(&tc_vec[0..6]); assert!(result.is_err()); diff --git a/satrs/src/tmtc/pus_distrib.rs b/satrs/src/tmtc/pus_distrib.rs index f5d6c8d..53056bc 100644 --- a/satrs/src/tmtc/pus_distrib.rs +++ b/satrs/src/tmtc/pus_distrib.rs @@ -46,8 +46,8 @@ //! let mut pus_distributor = PusDistributor::new(service_handler); //! //! // Create and pass PUS ping telecommand with a valid APID -//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap(); -//! let mut pus_tc = PusTcCreator::new_simple(&mut space_packet_header, 17, 1, None, true); +//! let sp_header = SpHeader::new_for_unseg_tc(0x002, 0x34, 0); +//! let mut pus_tc = PusTcCreator::new_simple(sp_header, 17, 1, &[], true); //! let mut test_buf: [u8; 32] = [0; 32]; //! let mut size = pus_tc //! .write_to_bytes(test_buf.as_mut_slice()) @@ -176,6 +176,7 @@ mod tests { BasicApidHandlerSharedQueue, }; use crate::tmtc::ccsds_distrib::{CcsdsDistributor, CcsdsPacketHandler}; + use crate::ValidatorU16Id; use alloc::format; use alloc::vec::Vec; use spacepackets::ecss::PusError; @@ -253,17 +254,13 @@ mod tests { () => { type Error = PusError; - fn valid_apids(&self) -> &'static [u16] { - &[0x000, 0x002] - } - - fn handle_known_apid( + fn handle_packet_with_valid_apid( &mut self, sp_header: &SpHeader, tc_raw: &[u8], ) -> Result<(), Self::Error> { self.handler_base - .handle_known_apid(&sp_header, tc_raw) + .handle_packet_with_valid_apid(&sp_header, tc_raw) .ok() .expect("Unexpected error"); match self.pus_distrib.pass_ccsds(&sp_header, tc_raw) { @@ -275,13 +272,13 @@ mod tests { } } - fn handle_unknown_apid( + fn handle_packet_with_unknown_apid( &mut self, sp_header: &SpHeader, tc_raw: &[u8], ) -> Result<(), Self::Error> { self.handler_base - .handle_unknown_apid(&sp_header, tc_raw) + .handle_packet_with_unknown_apid(&sp_header, tc_raw) .ok() .expect("Unexpected error"); Ok(()) @@ -289,6 +286,18 @@ mod tests { }; } + impl ValidatorU16Id for ApidHandlerOwned { + fn validate(&self, packet_id: u16) -> bool { + [0x000, 0x002].contains(&packet_id) + } + } + + impl ValidatorU16Id for ApidHandlerShared { + fn validate(&self, packet_id: u16) -> bool { + [0x000, 0x002].contains(&packet_id) + } + } + impl CcsdsPacketHandler for ApidHandlerOwned { apid_handler_impl!(); } diff --git a/satrs/src/tmtc/tm_helper.rs b/satrs/src/tmtc/tm_helper.rs index 005625a..a305472 100644 --- a/satrs/src/tmtc/tm_helper.rs +++ b/satrs/src/tmtc/tm_helper.rs @@ -8,7 +8,9 @@ pub use std_mod::*; #[cfg(feature = "std")] pub mod std_mod { - use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr}; + use crate::pool::{ + PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr, StoreError, + }; use crate::pus::EcssTmtcError; use spacepackets::ecss::tm::PusTmCreator; use spacepackets::ecss::WritablePusPacket; @@ -34,7 +36,7 @@ pub mod std_mod { } pub fn add_pus_tm(&self, pus_tm: &PusTmCreator) -> Result { - let mut pg = self.0.write().map_err(|_| EcssTmtcError::StoreLock)?; + let mut pg = self.0.write().map_err(|_| StoreError::LockError)?; let addr = pg.free_element(pus_tm.len_written(), |buf| { pus_tm .write_to_bytes(buf) @@ -90,9 +92,9 @@ impl PusTmWithCdsShortHelper { source_data: &'a [u8], seq_count: u16, ) -> PusTmCreator { - let mut reply_header = SpHeader::tm_unseg(self.apid, seq_count, 0).unwrap(); + let reply_header = SpHeader::new_for_unseg_tm(self.apid, seq_count, 0); let tc_header = PusTmSecondaryHeader::new_simple(service, subservice, &self.cds_short_buf); - PusTmCreator::new(&mut reply_header, tc_header, source_data, true) + PusTmCreator::new(reply_header, tc_header, source_data, true) } } diff --git a/satrs/tests/mode_tree.rs b/satrs/tests/mode_tree.rs new file mode 100644 index 0000000..17f9836 --- /dev/null +++ b/satrs/tests/mode_tree.rs @@ -0,0 +1,358 @@ +use core::cell::Cell; +use std::{println, sync::mpsc}; + +use satrs::mode::{ + ModeError, ModeProvider, ModeReplyReceiver, ModeReplySender, ModeRequestHandler, + ModeRequestHandlerMpscBounded, ModeRequestReceiver, ModeRequestorAndHandlerMpscBounded, + ModeRequestorBoundedMpsc, +}; +use satrs::request::MessageMetadata; +use satrs::{ + mode::{ModeAndSubmode, ModeReply, ModeRequest}, + queue::GenericTargetedMessagingError, + request::GenericMessage, + ComponentId, +}; +use std::string::{String, ToString}; + +pub enum TestComponentId { + Device1 = 1, + Device2 = 2, + Assembly = 3, + PusModeService = 4, +} + +struct PusModeService { + pub request_id_counter: Cell, + pub mode_node: ModeRequestorBoundedMpsc, +} + +impl PusModeService { + pub fn send_announce_mode_cmd_to_assy(&self) { + self.mode_node + .send_mode_request( + self.request_id_counter.get(), + TestComponentId::Assembly as ComponentId, + ModeRequest::AnnounceModeRecursive, + ) + .unwrap(); + self.request_id_counter + .replace(self.request_id_counter.get() + 1); + } +} + +struct TestDevice { + pub name: String, + pub mode_node: ModeRequestHandlerMpscBounded, + pub mode_and_submode: ModeAndSubmode, +} + +impl TestDevice { + pub fn run(&mut self) { + self.check_mode_requests().expect("mode messaging error"); + } + + pub fn check_mode_requests(&mut self) -> Result<(), ModeError> { + if let Some(request) = self.mode_node.try_recv_mode_request()? { + self.handle_mode_request(request)? + } + Ok(()) + } +} + +impl ModeProvider for TestDevice { + fn mode_and_submode(&self) -> ModeAndSubmode { + self.mode_and_submode + } +} + +impl ModeRequestHandler for TestDevice { + type Error = ModeError; + + fn start_transition( + &mut self, + requestor: MessageMetadata, + mode_and_submode: ModeAndSubmode, + ) -> Result<(), ModeError> { + self.mode_and_submode = mode_and_submode; + self.handle_mode_reached(Some(requestor))?; + Ok(()) + } + + fn announce_mode(&self, _requestor_info: Option, _recursive: bool) { + println!( + "{}: announcing mode: {:?}", + self.name, self.mode_and_submode + ); + } + + fn handle_mode_reached(&mut self, requestor: Option) -> Result<(), ModeError> { + if let Some(requestor) = requestor { + self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode))?; + } + Ok(()) + } + fn send_mode_reply( + &self, + requestor_info: MessageMetadata, + reply: ModeReply, + ) -> Result<(), ModeError> { + self.mode_node.send_mode_reply(requestor_info, reply)?; + Ok(()) + } + + fn handle_mode_info( + &mut self, + requestor_info: MessageMetadata, + info: ModeAndSubmode, + ) -> Result<(), ModeError> { + // A device is a leaf in the tree.. so this really should not happen + println!( + "{}: unexpected mode info from {:?} with mode: {:?}", + self.name, + requestor_info.sender_id(), + info + ); + Ok(()) + } +} + +struct TestAssembly { + pub mode_node: ModeRequestorAndHandlerMpscBounded, + pub mode_requestor_info: Option, + pub mode_and_submode: ModeAndSubmode, + pub target_mode_and_submode: Option, +} + +impl ModeProvider for TestAssembly { + fn mode_and_submode(&self) -> ModeAndSubmode { + self.mode_and_submode + } +} + +impl TestAssembly { + pub fn run(&mut self) { + self.check_mode_requests().expect("mode messaging error"); + self.check_mode_replies().expect("mode messaging error"); + } + + pub fn check_mode_requests(&mut self) -> Result<(), GenericTargetedMessagingError> { + if let Some(request) = self.mode_node.try_recv_mode_request()? { + match request.message { + ModeRequest::SetMode(mode_and_submode) => { + self.start_transition(request.requestor_info, mode_and_submode) + .unwrap(); + } + ModeRequest::ReadMode => self + .mode_node + .send_mode_reply( + request.requestor_info, + ModeReply::ModeReply(self.mode_and_submode), + ) + .unwrap(), + ModeRequest::AnnounceMode => { + self.announce_mode(Some(request.requestor_info), false) + } + ModeRequest::AnnounceModeRecursive => { + self.announce_mode(Some(request.requestor_info), true) + } + ModeRequest::ModeInfo(_) => todo!(), + } + } + Ok(()) + } + + pub fn check_mode_replies(&mut self) -> Result<(), GenericTargetedMessagingError> { + if let Some(reply_and_id) = self.mode_node.try_recv_mode_reply()? { + match reply_and_id.message { + ModeReply::ModeReply(reply) => { + println!( + "TestAssembly: Received mode reply from {:?}, reached: {:?}", + reply_and_id.sender_id(), + reply + ); + } + ModeReply::CantReachMode(_) => todo!(), + ModeReply::WrongMode { expected, reached } => { + println!( + "TestAssembly: Wrong mode reply from {:?}, reached {:?}, expected {:?}", + reply_and_id.sender_id(), + reached, + expected + ); + } + } + } + Ok(()) + } +} + +impl ModeRequestHandler for TestAssembly { + type Error = ModeError; + fn start_transition( + &mut self, + requestor: MessageMetadata, + mode_and_submode: ModeAndSubmode, + ) -> Result<(), Self::Error> { + self.mode_requestor_info = Some(requestor); + self.target_mode_and_submode = Some(mode_and_submode); + Ok(()) + } + + fn announce_mode(&self, requestor_info: Option, recursive: bool) { + println!( + "TestAssembly: Announcing mode (recursively: {}): {:?}", + recursive, self.mode_and_submode + ); + // self.mode_requestor_info = Some((request_id, sender_id)); + let mut mode_request = ModeRequest::AnnounceMode; + if recursive { + mode_request = ModeRequest::AnnounceModeRecursive; + } + let request_id = requestor_info.map_or(0, |info| info.request_id()); + self.mode_node + .request_sender_map + .0 + .iter() + .for_each(|(_, sender)| { + sender + .send(GenericMessage::new( + MessageMetadata::new(request_id, self.mode_node.local_channel_id_generic()), + mode_request, + )) + .expect("sending mode request failed"); + }); + } + + fn handle_mode_reached( + &mut self, + mode_requestor: Option, + ) -> Result<(), Self::Error> { + if let Some(requestor) = mode_requestor { + self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode))?; + } + Ok(()) + } + + fn send_mode_reply( + &self, + requestor: MessageMetadata, + reply: ModeReply, + ) -> Result<(), Self::Error> { + self.mode_node.send_mode_reply(requestor, reply)?; + Ok(()) + } + + fn handle_mode_info( + &mut self, + _requestor_info: MessageMetadata, + _info: ModeAndSubmode, + ) -> Result<(), Self::Error> { + // TODO: A proper assembly must reach to mode changes of its children.. + Ok(()) + } +} + +fn main() { + // All request channel handles. + let (request_sender_to_dev1, request_receiver_dev1) = mpsc::sync_channel(10); + let (request_sender_to_dev2, request_receiver_dev2) = mpsc::sync_channel(10); + let (request_sender_to_assy, request_receiver_assy) = mpsc::sync_channel(10); + + // All reply channel handles. + let (reply_sender_to_assy, reply_receiver_assy) = mpsc::sync_channel(10); + let (reply_sender_to_pus, reply_receiver_pus) = mpsc::sync_channel(10); + + // Mode requestors and handlers. + let mut mode_node_assy = ModeRequestorAndHandlerMpscBounded::new( + TestComponentId::Assembly as ComponentId, + request_receiver_assy, + reply_receiver_assy, + ); + // Mode requestors only. + let mut mode_node_pus = ModeRequestorBoundedMpsc::new( + TestComponentId::PusModeService as ComponentId, + reply_receiver_pus, + ); + + // Request handlers only. + let mut mode_node_dev1 = ModeRequestHandlerMpscBounded::new( + TestComponentId::Device1 as ComponentId, + request_receiver_dev1, + ); + let mut mode_node_dev2 = ModeRequestHandlerMpscBounded::new( + TestComponentId::Device2 as ComponentId, + request_receiver_dev2, + ); + + // Set up mode request senders first. + mode_node_pus.add_message_target( + TestComponentId::Assembly as ComponentId, + request_sender_to_assy, + ); + mode_node_pus.add_message_target( + TestComponentId::Device1 as ComponentId, + request_sender_to_dev1.clone(), + ); + mode_node_pus.add_message_target( + TestComponentId::Device2 as ComponentId, + request_sender_to_dev2.clone(), + ); + mode_node_assy.add_request_target( + TestComponentId::Device1 as ComponentId, + request_sender_to_dev1, + ); + mode_node_assy.add_request_target( + TestComponentId::Device2 as ComponentId, + request_sender_to_dev2, + ); + + // Set up mode reply senders. + mode_node_dev1.add_message_target( + TestComponentId::Assembly as ComponentId, + reply_sender_to_assy.clone(), + ); + mode_node_dev1.add_message_target( + TestComponentId::PusModeService as ComponentId, + reply_sender_to_pus.clone(), + ); + mode_node_dev2.add_message_target( + TestComponentId::Assembly as ComponentId, + reply_sender_to_assy, + ); + mode_node_dev2.add_message_target( + TestComponentId::PusModeService as ComponentId, + reply_sender_to_pus.clone(), + ); + mode_node_assy.add_reply_target( + TestComponentId::PusModeService as ComponentId, + reply_sender_to_pus, + ); + + let mut device1 = TestDevice { + name: "Test Device 1".to_string(), + mode_node: mode_node_dev1, + mode_and_submode: ModeAndSubmode::new(0, 0), + }; + let mut device2 = TestDevice { + name: "Test Device 2".to_string(), + mode_node: mode_node_dev2, + mode_and_submode: ModeAndSubmode::new(0, 0), + }; + let mut assy = TestAssembly { + mode_node: mode_node_assy, + mode_requestor_info: None, + mode_and_submode: ModeAndSubmode::new(0, 0), + target_mode_and_submode: None, + }; + let pus_service = PusModeService { + request_id_counter: Cell::new(0), + mode_node: mode_node_pus, + }; + + pus_service.send_announce_mode_cmd_to_assy(); + assy.run(); + device1.run(); + device2.run(); + assy.run(); +} diff --git a/satrs/tests/pus_events.rs b/satrs/tests/pus_events.rs index ca6d71e..6fc518f 100644 --- a/satrs/tests/pus_events.rs +++ b/satrs/tests/pus_events.rs @@ -1,11 +1,14 @@ use satrs::event_man::{ - EventManagerWithMpsc, EventSendProvider, EventU32SenderMpsc, MpscEventU32Receiver, + EventManagerWithMpsc, EventMessage, EventMessageU32, EventRoutingError, EventSendProvider, + EventU32SenderMpsc, MpscEventU32Receiver, }; use satrs::events::{EventU32, EventU32TypedSev, Severity, SeverityInfo}; use satrs::params::U32Pair; use satrs::params::{Params, ParamsHeapless, WritableToBeBytes}; use satrs::pus::event_man::{DefaultPusEventMgmtBackend, EventReporter, PusEventDispatcher}; -use satrs::pus::TmAsVecSenderWithMpsc; +use satrs::pus::test_util::TEST_COMPONENT_ID_0; +use satrs::pus::PusTmAsVec; +use satrs::request::UniqueApidTargetId; use spacepackets::ecss::tm::PusTmReader; use spacepackets::ecss::{PusError, PusPacket}; use std::sync::mpsc::{self, SendError, TryRecvError}; @@ -15,6 +18,8 @@ const INFO_EVENT: EventU32TypedSev = EventU32TypedSev::::const_new(1, 0); const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5); const EMPTY_STAMP: [u8; 7] = [0; 7]; +const TEST_APID: u16 = 0x02; +const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05); #[derive(Debug, Clone)] pub enum CustomTmSenderError { @@ -30,42 +35,43 @@ fn test_threaded_usage() { let (pus_event_man_tx, pus_event_man_rx) = mpsc::channel(); let pus_event_man_send_provider = EventU32SenderMpsc::new(1, pus_event_man_tx); - event_man.subscribe_all(pus_event_man_send_provider.channel_id()); + event_man.subscribe_all(pus_event_man_send_provider.target_id()); event_man.add_sender(pus_event_man_send_provider); - let (event_tx, event_rx) = mpsc::channel(); - let reporter = EventReporter::new(0x02, 128).expect("Creating event reporter failed"); - let mut pus_event_man = - PusEventDispatcher::new(reporter, DefaultPusEventMgmtBackend::default()); + let (event_tx, event_rx) = mpsc::channel::(); + let reporter = + EventReporter::new(TEST_ID.raw(), 0x02, 0, 128).expect("Creating event reporter failed"); + let pus_event_man = PusEventDispatcher::new(reporter, DefaultPusEventMgmtBackend::default()); + let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| { + panic!("received routing error for event {event_msg:?}: {error:?}"); + }; // PUS + Generic event manager thread let jh0 = thread::spawn(move || { - let mut sender = TmAsVecSenderWithMpsc::new(0, "event_sender", event_tx); let mut event_cnt = 0; let mut params_array: [u8; 128] = [0; 128]; loop { - let res = event_man.try_event_handling(); - assert!(res.is_ok()); + event_man.try_event_handling(error_handler); match pus_event_man_rx.try_recv() { - Ok((event, aux_data)) => { - let mut gen_event = |aux_data| { + Ok(event_msg) => { + let gen_event = |aux_data| { pus_event_man.generate_pus_event_tm_generic( - &mut sender, + &event_tx, &EMPTY_STAMP, - event, + event_msg.event(), aux_data, ) }; - let res = if let Some(aux_data) = aux_data { + let res = if let Some(aux_data) = event_msg.params() { match aux_data { Params::Heapless(heapless) => match heapless { ParamsHeapless::Raw(raw) => { raw.write_to_be_bytes(&mut params_array) .expect("Writing raw parameter failed"); - gen_event(Some(¶ms_array[0..raw.raw_len()])) + gen_event(Some(¶ms_array[0..raw.written_len()])) } ParamsHeapless::EcssEnum(e) => { e.write_to_be_bytes(&mut params_array) .expect("Writing ECSS enum failed"); - gen_event(Some(¶ms_array[0..e.raw_len()])) + gen_event(Some(¶ms_array[0..e.written_len()])) } }, Params::Vec(vec) => gen_event(Some(vec.as_slice())), @@ -95,14 +101,17 @@ fn test_threaded_usage() { // Event sender and TM checker thread let jh1 = thread::spawn(move || { event_sender - .send((INFO_EVENT.into(), None)) + .send(EventMessage::new( + TEST_COMPONENT_ID_0.id(), + INFO_EVENT.into(), + )) .expect("Sending info event failed"); loop { match event_rx.try_recv() { // Event TM received successfully Ok(event_tm) => { - let tm = - PusTmReader::new(event_tm.as_slice(), 7).expect("Deserializing TM failed"); + let tm = PusTmReader::new(event_tm.packet.as_slice(), 7) + .expect("Deserializing TM failed"); assert_eq!(tm.0.service(), 5); assert_eq!(tm.0.subservice(), 1); let src_data = tm.0.source_data(); @@ -121,14 +130,18 @@ fn test_threaded_usage() { } } event_sender - .send((LOW_SEV_EVENT, Some(Params::Heapless((2_u32, 3_u32).into())))) + .send(EventMessage::new_with_params( + TEST_COMPONENT_ID_0.id(), + LOW_SEV_EVENT, + &Params::Heapless((2_u32, 3_u32).into()), + )) .expect("Sending low severity event failed"); loop { match event_rx.try_recv() { // Event TM received successfully Ok(event_tm) => { - let tm = - PusTmReader::new(event_tm.as_slice(), 7).expect("Deserializing TM failed"); + let tm = PusTmReader::new(event_tm.packet.as_slice(), 7) + .expect("Deserializing TM failed"); assert_eq!(tm.0.service(), 5); assert_eq!(tm.0.subservice(), 2); let src_data = tm.0.source_data(); diff --git a/satrs/tests/pus_verification.rs b/satrs/tests/pus_verification.rs index 386fea6..743535f 100644 --- a/satrs/tests/pus_verification.rs +++ b/satrs/tests/pus_verification.rs @@ -1,9 +1,10 @@ -#[cfg(feature = "crossbeam")] +// #[cfg(feature = "crossbeam")] pub mod crossbeam_test { use hashbrown::HashMap; use satrs::pool::{PoolProvider, PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig}; + use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0}; use satrs::pus::verification::{ - FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender, + FailParams, RequestId, VerificationReporter, VerificationReporterCfg, VerificationReportingProvider, }; use satrs::pus::TmInSharedPoolSenderWithCrossbeam; @@ -16,7 +17,6 @@ pub mod crossbeam_test { use std::thread; use std::time::Duration; - const TEST_APID: u16 = 0x03; const FIXED_STAMP: [u8; 7] = [0; 7]; const PACKETS_SENT: u8 = 8; @@ -40,13 +40,9 @@ pub mod crossbeam_test { let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg))); let shared_tc_pool_1 = shared_tc_pool_0.clone(); let (tx, rx) = crossbeam_channel::bounded(10); - let sender = TmInSharedPoolSenderWithCrossbeam::new( - 0, - "verif_sender", - shared_tm_pool.clone(), - tx.clone(), - ); - let mut reporter_with_sender_0 = VerificationReporterWithSender::new(&cfg, sender); + let sender_0 = TmInSharedPoolSenderWithCrossbeam::new(shared_tm_pool.clone(), tx.clone()); + let sender_1 = sender_0.clone(); + let mut reporter_with_sender_0 = VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &cfg); let mut reporter_with_sender_1 = reporter_with_sender_0.clone(); // For test purposes, we retrieve the request ID from the TCs and pass them to the receiver // tread. @@ -57,9 +53,9 @@ pub mod crossbeam_test { let (tx_tc_1, rx_tc_1) = crossbeam_channel::bounded(3); { let mut tc_guard = shared_tc_pool_0.write().unwrap(); - let mut sph = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); + let sph = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0); let tc_header = PusTcSecondaryHeader::new_simple(17, 1); - let pus_tc_0 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true); + let pus_tc_0 = PusTcCreator::new_no_app_data(sph, tc_header, true); req_id_0 = RequestId::new(&pus_tc_0); let addr = tc_guard .free_element(pus_tc_0.len_written(), |buf| { @@ -67,9 +63,9 @@ pub mod crossbeam_test { }) .unwrap(); tx_tc_0.send(addr).unwrap(); - let mut sph = SpHeader::tc_unseg(TEST_APID, 1, 0).unwrap(); + let sph = SpHeader::new_for_unseg_tc(TEST_APID, 1, 0); let tc_header = PusTcSecondaryHeader::new_simple(5, 1); - let pus_tc_1 = PusTcCreator::new_no_app_data(&mut sph, tc_header, true); + let pus_tc_1 = PusTcCreator::new_no_app_data(sph, tc_header, true); req_id_1 = RequestId::new(&pus_tc_1); let addr = tc_guard .free_element(pus_tc_0.len_written(), |buf| { @@ -93,24 +89,24 @@ pub mod crossbeam_test { let token = reporter_with_sender_0.add_tc_with_req_id(req_id_0); let accepted_token = reporter_with_sender_0 - .acceptance_success(token, &FIXED_STAMP) + .acceptance_success(&sender_0, token, &FIXED_STAMP) .expect("Acceptance success failed"); // Do some start handling here let started_token = reporter_with_sender_0 - .start_success(accepted_token, &FIXED_STAMP) + .start_success(&sender_0, accepted_token, &FIXED_STAMP) .expect("Start success failed"); // Do some step handling here reporter_with_sender_0 - .step_success(&started_token, &FIXED_STAMP, EcssEnumU8::new(0)) + .step_success(&sender_0, &started_token, &FIXED_STAMP, EcssEnumU8::new(0)) .expect("Start success failed"); // Finish up reporter_with_sender_0 - .step_success(&started_token, &FIXED_STAMP, EcssEnumU8::new(1)) + .step_success(&sender_0, &started_token, &FIXED_STAMP, EcssEnumU8::new(1)) .expect("Start success failed"); reporter_with_sender_0 - .completion_success(started_token, &FIXED_STAMP) + .completion_success(&sender_0, started_token, &FIXED_STAMP) .expect("Completion success failed"); }); @@ -128,15 +124,15 @@ pub mod crossbeam_test { let (tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap(); let token = reporter_with_sender_1.add_tc(&tc); let accepted_token = reporter_with_sender_1 - .acceptance_success(token, &FIXED_STAMP) + .acceptance_success(&sender_1, token, &FIXED_STAMP) .expect("Acceptance success failed"); let started_token = reporter_with_sender_1 - .start_success(accepted_token, &FIXED_STAMP) + .start_success(&sender_1, accepted_token, &FIXED_STAMP) .expect("Start success failed"); let fail_code = EcssEnumU16::new(2); let params = FailParams::new_no_fail_data(&FIXED_STAMP, &fail_code); reporter_with_sender_1 - .completion_failure(started_token, params) + .completion_failure(&sender_1, started_token, params) .expect("Completion success failed"); }); @@ -145,14 +141,14 @@ pub mod crossbeam_test { let mut tm_buf: [u8; 1024] = [0; 1024]; let mut verif_map = HashMap::new(); while packet_counter < PACKETS_SENT { - let verif_addr = rx + let tm_in_pool = rx .recv_timeout(Duration::from_millis(50)) .expect("Packet reception timeout"); let tm_len; let shared_tm_store = shared_tm_pool.clone_backing_pool(); { let mut rg = shared_tm_store.write().expect("Error locking shared pool"); - let store_guard = rg.read_with_guard(verif_addr); + let store_guard = rg.read_with_guard(tm_in_pool.store_addr); tm_len = store_guard .read(&mut tm_buf) .expect("Error reading TM slice"); diff --git a/satrs/tests/tcp_servers.rs b/satrs/tests/tcp_servers.rs index b960df7..ff3fe78 100644 --- a/satrs/tests/tcp_servers.rs +++ b/satrs/tests/tcp_servers.rs @@ -31,7 +31,7 @@ use spacepackets::{ ecss::{tc::PusTcCreator, WritablePusPacket}, PacketId, SpHeader, }; -use std::{boxed::Box, collections::VecDeque, sync::Arc, vec::Vec}; +use std::{collections::VecDeque, sync::Arc, vec::Vec}; #[derive(Default, Clone)] struct SyncTcCacher { @@ -162,14 +162,14 @@ fn test_cobs_server() { } const TEST_APID_0: u16 = 0x02; -const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0); +const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0); #[test] fn test_ccsds_server() { let tc_receiver = SyncTcCacher::default(); let mut tm_source = SyncTmSource::default(); - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 1, None, true); + let sph = SpHeader::new_for_unseg_tc(TEST_APID_0, 0, 0); + let verif_tm = PusTcCreator::new_simple(sph, 1, 1, &[], true); let tm_0 = verif_tm.to_vec().expect("tm generation failed"); tm_source.add_tm(&tm_0); let mut packet_id_lookup = HashSet::new(); @@ -178,7 +178,7 @@ fn test_ccsds_server() { ServerConfig::new(AUTO_PORT_ADDR, Duration::from_millis(2), 1024, 1024), tm_source, tc_receiver.clone(), - Box::new(packet_id_lookup), + packet_id_lookup, ) .expect("TCP server generation failed"); let dest_addr = tcp_server @@ -203,8 +203,8 @@ fn test_ccsds_server() { .expect("setting reas timeout failed"); // Send ping telecommand. - let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); - let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); + let sph = SpHeader::new_for_unseg_tc(TEST_APID_0, 0, 0); + let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true); let tc_0 = ping_tc.to_vec().expect("packet creation failed"); stream .write_all(&tc_0)