15 Commits

Author SHA1 Message Date
31844e4fe2 fix tests 2025-01-31 11:17:39 +01:00
738872f421 Merge pull request 'update mini simulator' (#214) from update-minisim into main
Reviewed-on: #214
2025-01-30 18:57:27 +01:00
309e39999f small tweak 2025-01-30 18:56:44 +01:00
1c43c3adf9 update mini simulator 2025-01-30 18:55:47 +01:00
d9e0abffa7 Merge pull request 'clippy and test fix' (#213) from clippy-fixes into main
Reviewed-on: #213
2025-01-23 17:22:46 +01:00
abec9dd448 clippy and test fix 2025-01-23 17:19:26 +01:00
c18fbb59ad Merge pull request 're-run formatter' (#212) from run-afmt into main
Reviewed-on: #212
2025-01-23 17:04:58 +01:00
c91ddcd658 re-run formatter 2025-01-23 17:02:16 +01:00
c5fa1955d7 Merge pull request 'Scheduling Table for std runtimes' (#210) from scheduling-table into main
Reviewed-on: #210
2024-11-20 17:22:08 +01:00
c9708810e6 changelog 2024-11-20 17:21:54 +01:00
79d0c2e222 Scheduling Table for std runtimes 2024-11-20 17:20:42 +01:00
8e87875c0e Merge pull request 'use released satrs-shared' (#209) from use-released-satrs-shared into main
Reviewed-on: #209
2024-11-15 12:15:06 +01:00
1ac6c02c06 use released satrs-shared 2024-11-15 12:14:33 +01:00
6e7907522e Merge pull request 'changelog satrs-shared' (#208) from satrs-shared-changelog into main
Reviewed-on: #208
2024-11-15 12:08:27 +01:00
f747a5efdc changelog satrs-shared 2024-11-15 12:07:32 +01:00
26 changed files with 624 additions and 139 deletions

View File

@ -1,5 +1,6 @@
#!/usr/bin/env python3
"""Example client for the sat-rs example application"""
import logging
import sys
import time

View File

@ -12,7 +12,7 @@ authors = [
{name = "Robin Mueller", email = "robin.mueller.m@gmail.com"},
]
dependencies = [
"tmtccmd~=8.0",
"tmtccmd~=8.1",
"pydantic~=2.7"
]

View File

@ -0,0 +1,11 @@
from tmtccmd.config import CmdTreeNode
def create_acs_node(mode_node: CmdTreeNode, hk_node: CmdTreeNode) -> CmdTreeNode:
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
mgm_node = CmdTreeNode("mgms", "MGM devices node")
mgm_node.add_child(mode_node)
mgm_node.add_child(hk_node)
acs_node.add_child(mgm_node)
return acs_node

View File

@ -12,7 +12,7 @@ from pytmtc.pus_tc import create_cmd_definition_tree
class SatrsConfigHook(HookBase):
def __init__(self, json_cfg_path: str):
super().__init__(json_cfg_path=json_cfg_path)
super().__init__(json_cfg_path)
def get_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com import (

View File

View File

@ -4,7 +4,7 @@ from spacepackets.ecss.pus_3_hk import Subservice
from spacepackets.ecss import PusTm
from pytmtc.common import AcsId, Apid
from pytmtc.mgms import handle_mgm_hk_report
from pytmtc.acs.mgms import handle_mgm_hk_report
_LOGGER = logging.getLogger(__name__)

View File

@ -17,8 +17,9 @@ from tmtccmd.tmtc import (
)
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
from pytmtc.acs import create_acs_node
from pytmtc.common import Apid
from pytmtc.mgms import create_mgm_cmds
from pytmtc.acs.mgms import create_mgm_cmds
_LOGGER = logging.getLogger(__name__)
@ -67,7 +68,6 @@ class TcHandler(TcHandlerBase):
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
hk_node = CmdTreeNode("hk", "Housekeeping Node", hide_children_for_print=True)
@ -101,15 +101,7 @@ def create_cmd_definition_tree() -> CmdTreeNode:
)
)
root_node.add_child(scheduler_node)
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
mgm_node = CmdTreeNode("mgms", "MGM devices node")
mgm_node.add_child(mode_node)
mgm_node.add_child(hk_node)
acs_node.add_child(mgm_node)
root_node.add_child(acs_node)
root_node.add_child(create_acs_node(mode_node, hk_node))
return root_node

View File

@ -25,7 +25,6 @@ optional = true
[dependencies.satrs-shared]
version = ">=0.1.3, <=0.2"
features = ["serde"]
path = "../satrs-shared"
[dependencies.satrs-mib-codegen]
path = "codegen"

View File

@ -29,7 +29,6 @@ trybuild = { version = "1", features = ["diff"] }
[dev-dependencies.satrs-shared]
version = ">=0.1.3, <=0.2"
path = "../../satrs-shared"
[dev-dependencies.satrs-mib]
path = ".."

View File

@ -14,11 +14,12 @@ fern = "0.7"
strum = { version = "0.26", features = ["derive"] }
num_enum = "0.7"
humantime = "2"
tai-time = { version = "0.3", features = ["serde"] }
[dependencies.asynchronix]
version = "0.2.2"
# git = "https://github.com/asynchronics/asynchronix.git"
# branch = "main"
[dependencies.nexosim]
version = "0.3.1"
git = "https://github.com/us-irs/nexosim.git"
branch = "explicit-serde-feature"
features = ["serde"]
[dependencies.satrs]

View File

@ -1,8 +1,8 @@
use std::{f32::consts::PI, sync::mpsc, time::Duration};
use asynchronix::{
model::{Model, Output},
time::Scheduler,
use nexosim::{
model::{Context, Model},
ports::Output,
};
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
@ -55,7 +55,7 @@ impl<ReplyProvider: MgmReplyProvider> MagnetometerModel<ReplyProvider> {
self.switch_state = switch_state;
}
pub async fn send_sensor_values(&mut self, _: (), scheduler: &Scheduler<Self>) {
pub async fn send_sensor_values(&mut self, _: (), scheduler: &mut Context<Self>) {
self.reply_sender
.send(ReplyProvider::create_mgm_reply(MgmReplyCommon {
switch_state: self.switch_state,
@ -114,11 +114,11 @@ impl MagnetorquerModel {
pub async fn apply_torque(
&mut self,
duration_and_dipole: (Duration, MgtDipole),
scheduler: &Scheduler<Self>,
cx: &mut Context<Self>,
) {
self.torque_dipole = duration_and_dipole.1;
self.torquing = true;
if scheduler
if cx
.schedule_event(duration_and_dipole.0, Self::clear_torque, ())
.is_err()
{
@ -138,12 +138,11 @@ impl MagnetorquerModel {
self.generate_magnetic_field(()).await;
}
pub async fn request_housekeeping_data(&mut self, _: (), scheduler: &Scheduler<Self>) {
pub async fn request_housekeeping_data(&mut self, _: (), cx: &mut Context<Self>) {
if self.switch_state != SwitchStateBinary::On {
return;
}
scheduler
.schedule_event(Duration::from_millis(15), Self::send_housekeeping_data, ())
cx.schedule_event(Duration::from_millis(15), Self::send_housekeeping_data, ())
.expect("requesting housekeeping data failed")
}
@ -200,7 +199,7 @@ pub mod tests {
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step();
sim_testbench.step().unwrap();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
@ -223,21 +222,21 @@ pub mod tests {
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step();
sim_testbench.step().unwrap();
let mut sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
let mut sim_reply = sim_reply_res.unwrap();
assert_eq!(sim_reply.component(), SimComponent::MgmLis3Mdl);
let first_reply = MgmLis3MdlReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
sim_testbench.step_by(Duration::from_millis(50));
sim_testbench.step_until(Duration::from_millis(50)).unwrap();
request = SimRequest::new_with_epoch_time(MgmRequestLis3Mdl::RequestSensorData);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step();
sim_testbench.step().unwrap();
sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
sim_reply = sim_reply_res.unwrap();
@ -272,7 +271,7 @@ pub mod tests {
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step();
sim_testbench.step().unwrap();
let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_none());
}
@ -287,7 +286,7 @@ pub mod tests {
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step();
sim_testbench.step().unwrap();
let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
let sim_reply = sim_reply_res.unwrap();
@ -308,7 +307,7 @@ pub mod tests {
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step();
sim_testbench.step().unwrap();
let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
let sim_reply = sim_reply_res.unwrap();
@ -339,7 +338,7 @@ pub mod tests {
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step_by(Duration::from_millis(5));
sim_testbench.step_until(Duration::from_millis(5)).unwrap();
check_mgt_hk(
&mut sim_testbench,
@ -348,7 +347,9 @@ pub mod tests {
torquing: true,
},
);
sim_testbench.step_by(Duration::from_millis(100));
sim_testbench
.step_until(Duration::from_millis(100))
.unwrap();
check_mgt_hk(
&mut sim_testbench,
MgtHkSet {

View File

@ -1,7 +1,7 @@
use std::{sync::mpsc, time::Duration};
use asynchronix::{
simulation::{Address, Simulation},
use nexosim::{
simulation::{Address, Scheduler, Simulation},
time::{Clock, MonotonicTime, SystemClock},
};
use satrs_minisim::{
@ -23,35 +23,52 @@ const MGM_REQ_WIRETAPPING: bool = false;
const PCDU_REQ_WIRETAPPING: bool = false;
const MGT_REQ_WIRETAPPING: bool = false;
pub struct ModelAddrWrapper {
mgm_addr: Address<MagnetometerModel<MgmLis3MdlReply>>,
pcdu_addr: Address<PcduModel>,
mgt_addr: Address<MagnetorquerModel>,
}
// The simulation controller processes requests and drives the simulation.
#[allow(dead_code)]
pub struct SimController {
pub sys_clock: SystemClock,
pub request_receiver: mpsc::Receiver<SimRequest>,
pub reply_sender: mpsc::Sender<SimReply>,
pub simulation: Simulation,
pub mgm_addr: Address<MagnetometerModel<MgmLis3MdlReply>>,
pub pcdu_addr: Address<PcduModel>,
pub mgt_addr: Address<MagnetorquerModel>,
pub scheduler: Scheduler,
pub addr_wrapper: ModelAddrWrapper,
}
impl ModelAddrWrapper {
pub fn new(
mgm_addr: Address<MagnetometerModel<MgmLis3MdlReply>>,
pcdu_addr: Address<PcduModel>,
mgt_addr: Address<MagnetorquerModel>,
) -> Self {
Self {
mgm_addr,
pcdu_addr,
mgt_addr,
}
}
}
impl SimController {
pub fn new(
sys_clock: SystemClock,
request_receiver: mpsc::Receiver<SimRequest>,
reply_sender: mpsc::Sender<SimReply>,
simulation: Simulation,
mgm_addr: Address<MagnetometerModel<MgmLis3MdlReply>>,
pcdu_addr: Address<PcduModel>,
mgt_addr: Address<MagnetorquerModel>,
scheduler: Scheduler,
addr_wrapper: ModelAddrWrapper,
) -> Self {
Self {
sys_clock,
request_receiver,
reply_sender,
simulation,
mgm_addr,
pcdu_addr,
mgt_addr,
scheduler,
addr_wrapper,
}
}
@ -62,7 +79,7 @@ impl SimController {
// Check for UDP requests every millisecond. Shift the simulator ahead here to prevent
// replies lying in the past.
t += Duration::from_millis(udp_polling_interval_ms);
self.sys_clock.synchronize(t);
let _synch_status = self.sys_clock.synchronize(t);
self.handle_sim_requests(t_old);
self.simulation
.step_until(t)
@ -118,11 +135,13 @@ impl SimController {
}
match mgm_request {
MgmRequestLis3Mdl::RequestSensorData => {
self.simulation.send_event(
MagnetometerModel::send_sensor_values,
(),
&self.mgm_addr,
);
self.simulation
.process_event(
MagnetometerModel::send_sensor_values,
(),
&self.addr_wrapper.mgm_addr,
)
.expect("event execution error for mgm");
}
}
Ok(())
@ -136,14 +155,21 @@ impl SimController {
match pcdu_request {
PcduRequest::RequestSwitchInfo => {
self.simulation
.send_event(PcduModel::request_switch_info, (), &self.pcdu_addr);
.process_event(
PcduModel::request_switch_info,
(),
&self.addr_wrapper.pcdu_addr,
)
.unwrap();
}
PcduRequest::SwitchDevice { switch, state } => {
self.simulation.send_event(
PcduModel::switch_device,
(switch, state),
&self.pcdu_addr,
);
self.simulation
.process_event(
PcduModel::switch_device,
(switch, state),
&self.addr_wrapper.pcdu_addr,
)
.unwrap();
}
}
Ok(())
@ -155,17 +181,23 @@ impl SimController {
log::info!("received MGT request: {:?}", mgt_request);
}
match mgt_request {
MgtRequest::ApplyTorque { duration, dipole } => self.simulation.send_event(
MagnetorquerModel::apply_torque,
(duration, dipole),
&self.mgt_addr,
),
MgtRequest::RequestHk => self.simulation.send_event(
MagnetorquerModel::request_housekeeping_data,
(),
&self.mgt_addr,
),
}
MgtRequest::ApplyTorque { duration, dipole } => self
.simulation
.process_event(
MagnetorquerModel::apply_torque,
(duration, dipole),
&self.addr_wrapper.mgt_addr,
)
.unwrap(),
MgtRequest::RequestHk => self
.simulation
.process_event(
MagnetorquerModel::request_housekeeping_data,
(),
&self.addr_wrapper.mgt_addr,
)
.unwrap(),
};
Ok(())
}
@ -199,7 +231,7 @@ mod tests {
.send_request(request)
.expect("sending sim ctrl request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step();
sim_testbench.step().unwrap();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();

View File

@ -1,8 +1,8 @@
use std::{sync::mpsc, time::Duration};
use asynchronix::{
model::{Model, Output},
time::Scheduler,
use nexosim::{
model::{Context, Model},
ports::Output,
};
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
@ -29,14 +29,13 @@ impl PcduModel {
}
}
pub async fn request_switch_info(&mut self, _: (), scheduler: &Scheduler<Self>) {
scheduler
.schedule_event(
Duration::from_millis(SWITCH_INFO_DELAY_MS),
Self::send_switch_info,
(),
)
.expect("requesting switch info failed");
pub async fn request_switch_info(&mut self, _: (), cx: &mut Context<Self>) {
cx.schedule_event(
Duration::from_millis(SWITCH_INFO_DELAY_MS),
Self::send_switch_info,
(),
)
.expect("requesting switch info failed");
}
pub fn send_switch_info(&mut self) {
@ -92,7 +91,7 @@ pub(crate) mod tests {
.send_request(request)
.expect("sending MGM switch request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step();
sim_testbench.step().unwrap();
}
#[allow(dead_code)]
@ -113,7 +112,7 @@ pub(crate) mod tests {
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step();
sim_testbench.step().unwrap();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
@ -143,12 +142,12 @@ pub(crate) mod tests {
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step_by(Duration::from_millis(1));
sim_testbench.step_until(Duration::from_millis(1)).unwrap();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_none());
// Reply takes 20ms
sim_testbench.step_by(Duration::from_millis(25));
sim_testbench.step_until(Duration::from_millis(25)).unwrap();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();

View File

@ -1,4 +1,4 @@
use asynchronix::time::MonotonicTime;
use nexosim::time::MonotonicTime;
use num_enum::{IntoPrimitive, TryFromPrimitive};
use serde::{de::DeserializeOwned, Deserialize, Serialize};

View File

@ -1,8 +1,8 @@
use acs::{MagnetometerModel, MagnetorquerModel};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{MonotonicTime, SystemClock};
use controller::SimController;
use controller::{ModelAddrWrapper, SimController};
use eps::PcduModel;
use nexosim::simulation::{Mailbox, SimInit};
use nexosim::time::{MonotonicTime, SystemClock};
use satrs_minisim::udp::SIM_CTRL_PORT;
use satrs_minisim::{SimReply, SimRequest};
use std::sync::mpsc;
@ -63,19 +63,20 @@ fn create_sim_controller(
} else {
SimInit::new()
};
let simulation = sim_init
.add_model(mgm_model, mgm_mailbox)
.add_model(pcdu_model, pcdu_mailbox)
.add_model(mgt_model, mgt_mailbox)
.init(start_time);
let addrs = ModelAddrWrapper::new(mgm_addr, pcdu_addr, mgt_addr);
let (simulation, scheduler) = sim_init
.add_model(mgm_model, mgm_mailbox, "MGM model")
.add_model(pcdu_model, pcdu_mailbox, "PCDU model")
.add_model(mgt_model, mgt_mailbox, "MGT model")
.init(start_time)
.unwrap();
SimController::new(
sys_clock,
request_receiver,
reply_sender,
simulation,
mgm_addr,
pcdu_addr,
mgt_addr,
scheduler,
addrs,
)
}

View File

@ -1,7 +1,10 @@
use delegate::delegate;
use std::{sync::mpsc, time::Duration};
use std::sync::mpsc;
use asynchronix::time::MonotonicTime;
use nexosim::{
simulation::ExecutionError,
time::{Deadline, MonotonicTime},
};
use satrs_minisim::{SimReply, SimRequest};
use crate::{controller::SimController, create_sim_controller, ThreadingModel};
@ -35,8 +38,8 @@ impl SimTestbench {
pub fn handle_sim_requests(&mut self, old_timestamp: MonotonicTime);
}
to self.sim_controller.simulation {
pub fn step(&mut self);
pub fn step_by(&mut self, duration: Duration);
pub fn step(&mut self) -> Result<(), ExecutionError>;
pub fn step_until(&mut self, duration: impl Deadline) -> Result<(), ExecutionError>;
}
}

View File

@ -1,4 +1,4 @@
use asynchronix::time::MonotonicTime;
use nexosim::time::MonotonicTime;
pub fn current_millis(time: MonotonicTime) -> u64 {
(time.as_secs() as u64 * 1000) + (time.subsec_nanos() as u64 / 1_000_000)

View File

@ -8,6 +8,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.2.1] 2024-11-15
Increased allowed spacepackets to v0.13
# [v0.2.0] 2024-11-04
Semver bump, due to added features in v0.1.4

View File

@ -19,6 +19,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
## Added
- `StaticHeaplessMemoryPool` which can be grown with user-provided static buffers.
- Scheduling table for systems with a standard runtime
# [v0.2.1] 2024-05-19

View File

@ -21,7 +21,6 @@ crc = "3"
[dependencies.satrs-shared]
version = ">=0.1.3, <=0.2"
path = "../satrs-shared"
[dependencies.num_enum]
version = ">0.5, <=0.7"
@ -32,9 +31,9 @@ version = "0.13"
default-features = false
[dependencies.cobs]
git = "https://github.com/robamu/cobs.rs.git"
git = "https://github.com/jamesmunns/cobs.rs.git"
version = "0.2.3"
branch = "all_features"
branch = "main"
default-features = false
[dependencies.num-traits]
@ -132,3 +131,8 @@ test_util = []
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--generate-link-to-definition"]
[[test]]
name = "event_test"
path = "tests/pus_events.rs"
required-features = ["test_util"]

View File

@ -15,20 +15,24 @@ pub enum OpResult {
TerminationRequested,
}
#[derive(Debug)]
pub enum ExecutionType {
Infinite,
Cycles(u32),
OneShot,
}
pub trait Executable: Send {
pub trait Executable {
type Error;
fn exec_type(&self) -> ExecutionType;
fn task_name(&self) -> &'static str;
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, Self::Error>;
}
pub trait ExecutableWithType: Executable {
fn exec_type(&self) -> ExecutionType;
}
/// This function allows executing one task which implements the [Executable] trait
///
/// # Arguments
@ -39,7 +43,10 @@ pub trait Executable: Send {
/// * `op_code`: Operation code which is passed to the executable task
/// [operation call][Executable::periodic_op]
/// * `termination`: Optional termination handler which can cancel threads with a broadcast
pub fn exec_sched_single<T: Executable<Error = E> + Send + 'static + ?Sized, E: Send + 'static>(
pub fn exec_sched_single<
T: ExecutableWithType<Error = E> + Send + 'static + ?Sized,
E: Send + 'static,
>(
mut executable: Box<T>,
task_freq: Option<Duration>,
op_code: i32,
@ -88,7 +95,10 @@ pub fn exec_sched_single<T: Executable<Error = E> + Send + 'static + ?Sized, E:
/// * `task_freq`: Optional frequency of task. Required for periodic and fixed cycle tasks
/// * `op_code`: Operation code which is passed to the executable task [operation call][Executable::periodic_op]
/// * `termination`: Optional termination handler which can cancel threads with a broadcast
pub fn exec_sched_multi<T: Executable<Error = E> + Send + 'static + ?Sized, E: Send + 'static>(
pub fn exec_sched_multi<
T: ExecutableWithType<Error = E> + Send + 'static + ?Sized,
E: Send + 'static,
>(
task_name: &'static str,
mut executable_vec: Vec<Box<T>>,
task_freq: Option<Duration>,
@ -142,7 +152,10 @@ pub fn exec_sched_multi<T: Executable<Error = E> + Send + 'static + ?Sized, E: S
#[cfg(test)]
mod tests {
use super::{exec_sched_multi, exec_sched_single, Executable, ExecutionType, OpResult};
use super::{
exec_sched_multi, exec_sched_single, Executable, ExecutableWithType, ExecutionType,
OpResult,
};
use bus::Bus;
use std::boxed::Box;
use std::error::Error;
@ -208,10 +221,6 @@ mod tests {
impl Executable for OneShotTask {
type Error = ExampleError;
fn exec_type(&self) -> ExecutionType {
ExecutionType::OneShot
}
fn task_name(&self) -> &'static str {
ONE_SHOT_TASK_NAME
}
@ -229,15 +238,17 @@ mod tests {
}
}
impl ExecutableWithType for OneShotTask {
fn exec_type(&self) -> ExecutionType {
ExecutionType::OneShot
}
}
const CYCLE_TASK_NAME: &str = "Fixed Cycles Task";
impl Executable for FixedCyclesTask {
type Error = ExampleError;
fn exec_type(&self) -> ExecutionType {
ExecutionType::Cycles(self.cycles)
}
fn task_name(&self) -> &'static str {
CYCLE_TASK_NAME
}
@ -255,15 +266,17 @@ mod tests {
}
}
impl ExecutableWithType for FixedCyclesTask {
fn exec_type(&self) -> ExecutionType {
ExecutionType::Cycles(self.cycles)
}
}
const PERIODIC_TASK_NAME: &str = "Periodic Task";
impl Executable for PeriodicTask {
type Error = ExampleError;
fn exec_type(&self) -> ExecutionType {
ExecutionType::Infinite
}
fn task_name(&self) -> &'static str {
PERIODIC_TASK_NAME
}
@ -281,6 +294,12 @@ mod tests {
}
}
impl ExecutableWithType for PeriodicTask {
fn exec_type(&self) -> ExecutionType {
ExecutionType::Infinite
}
}
#[test]
fn test_simple_one_shot() {
let expected_op_code = 42;
@ -423,7 +442,7 @@ mod tests {
});
assert_eq!(cycled_task_0.task_name(), CYCLE_TASK_NAME);
assert_eq!(one_shot_task.task_name(), ONE_SHOT_TASK_NAME);
let task_vec: Vec<Box<dyn Executable<Error = ExampleError>>> =
let task_vec: Vec<Box<dyn ExecutableWithType<Error = ExampleError> + Send>> =
vec![one_shot_task, cycled_task_0, cycled_task_1];
let jh = exec_sched_multi(
"multi-task-name",
@ -493,7 +512,7 @@ mod tests {
});
assert_eq!(periodic_task_0.task_name(), PERIODIC_TASK_NAME);
assert_eq!(periodic_task_1.task_name(), PERIODIC_TASK_NAME);
let task_vec: Vec<Box<dyn Executable<Error = ExampleError>>> =
let task_vec: Vec<Box<dyn ExecutableWithType<Error = ExampleError> + Send>> =
vec![cycled_task, periodic_task_0, periodic_task_1];
let jh = exec_sched_multi(
"multi-task-name",

View File

@ -36,6 +36,8 @@ pub mod pus;
pub mod queue;
pub mod request;
pub mod res_code;
#[cfg(feature = "alloc")]
pub mod scheduling;
pub mod time;
pub mod tmtc;

416
satrs/src/scheduling.rs Normal file
View File

@ -0,0 +1,416 @@
use core::{convert::Infallible, fmt::Debug, time::Duration};
use std::time::Instant;
use thiserror::Error;
use crate::executable::Executable;
#[cfg(feature = "std")]
pub use std_mod::*;
#[derive(Debug, Default)]
pub struct SchedulingTable {
execution_frequency: Duration,
pub table: alloc::vec::Vec<u32>,
}
#[derive(Debug, Error)]
pub enum InvalidSlotError {
#[error("slot time is larger than the execution frequency")]
SlotTimeLargerThanFrequency,
#[error("slot time is smaller than previous slot")]
SmallerThanPreviousSlot {
slot_time_ms: u32,
prev_slot_time_ms: u32,
},
}
impl SchedulingTable {
pub fn new(execution_frequency: Duration) -> Self {
Self {
execution_frequency,
table: Default::default(),
}
}
pub fn add_slot(&mut self, relative_execution_time_ms: u32) -> Result<(), InvalidSlotError> {
if relative_execution_time_ms > self.execution_frequency.as_millis() as u32 {
return Err(InvalidSlotError::SlotTimeLargerThanFrequency);
}
if !self.table.is_empty() {
let prev_slot_ms = *self.table.last().unwrap();
if relative_execution_time_ms < prev_slot_ms {
return Err(InvalidSlotError::SmallerThanPreviousSlot {
slot_time_ms: relative_execution_time_ms,
prev_slot_time_ms: *self.table.last().unwrap(),
});
}
}
self.table.push(relative_execution_time_ms);
Ok(())
}
}
#[derive(Debug, Error)]
pub enum TaskWithSchedulingTableError {
#[error("scheudlig table error: {0}")]
InvalidSlot(#[from] InvalidSlotError),
#[error("task lock error")]
LockError,
#[error("task borrow error")]
BorrowError,
}
pub trait DeadlineMissedHandler {
fn deadline_missed_callback(&mut self, task_name: &'static str, op_code: i32);
}
pub trait TaskExecutor {
fn with_task<F: FnOnce(&mut dyn Executable<Error = Infallible>)>(&self, f: F);
}
#[cfg(feature = "std")]
pub mod std_mod {
use core::cell::RefCell;
use std::{
rc::Rc,
sync::{Arc, Mutex},
vec::Vec,
};
use super::*;
impl TaskExecutor for Arc<Mutex<dyn Executable<Error = Infallible> + Send>> {
fn with_task<F: FnOnce(&mut dyn Executable<Error = Infallible>)>(&self, f: F) {
let mut task = self.lock().unwrap();
f(&mut *task);
}
}
impl TaskExecutor for Rc<RefCell<dyn Executable<Error = Infallible>>> {
fn with_task<F: FnOnce(&mut dyn Executable<Error = Infallible>)>(&self, f: F) {
let mut task = self.borrow_mut();
f(&mut *task);
}
}
pub struct TaskWithOpCode<T: TaskExecutor> {
task: T,
op_code: i32,
}
pub struct TaskWithSchedulingTable<T: TaskExecutor> {
start_of_slot: Instant,
end_of_slot: Instant,
deadline_missed_ms_count: u32,
table: SchedulingTable,
tasks: Vec<TaskWithOpCode<T>>,
}
impl TaskWithSchedulingTable<Rc<RefCell<dyn Executable<Error = Infallible>>>> {
/// Add a new task to the scheduling table
///
/// The task needs to be wrapped inside [Rc] and [RefCell]. The task is not sendable and
/// needs to be created inside the target thread.
pub fn add_task(
&mut self,
relative_execution_time_ms: u32,
task: Rc<RefCell<dyn Executable<Error = Infallible>>>,
op_code: i32,
) -> Result<(), TaskWithSchedulingTableError> {
self.table.add_slot(relative_execution_time_ms)?;
self.tasks.push(TaskWithOpCode { task, op_code });
Ok(())
}
}
impl TaskWithSchedulingTable<Arc<Mutex<dyn Executable<Error = Infallible> + Send>>> {
/// Add a new task to the scheduling table
///
/// The task needs to be wrapped inside [Arc] and [Mutex], but the task can be sent to
/// a different thread.
pub fn add_task_sendable(
&mut self,
relative_execution_time_ms: u32,
task: Arc<Mutex<dyn Executable<Error = Infallible> + Send>>,
op_code: i32,
) -> Result<(), TaskWithSchedulingTableError> {
self.table.add_slot(relative_execution_time_ms)?;
self.tasks.push(TaskWithOpCode { task, op_code });
Ok(())
}
}
impl<T: TaskExecutor> TaskWithSchedulingTable<T> {
pub fn new(execution_frequency: Duration) -> Self {
Self {
start_of_slot: Instant::now(),
end_of_slot: Instant::now(),
deadline_missed_ms_count: 10,
table: SchedulingTable::new(execution_frequency),
tasks: Default::default(),
}
}
/// Can be used to set the start of the slot to the current time. This is useful if a custom
/// runner implementation is used instead of the [Self::start] method.
pub fn init_start_of_slot(&mut self) {
self.start_of_slot = Instant::now();
}
pub fn run_one_task_cycle(
&mut self,
deadline_missed_cb: &mut impl DeadlineMissedHandler,
) -> Result<(), TaskWithSchedulingTableError> {
self.end_of_slot = self.start_of_slot + self.table.execution_frequency;
for (&relative_execution_time_ms, task_with_op_code) in
self.table.table.iter().zip(self.tasks.iter_mut())
{
let scheduled_execution_time = self.start_of_slot
+ core::time::Duration::from_millis(relative_execution_time_ms as u64);
let now = Instant::now();
if now < scheduled_execution_time {
std::thread::sleep(scheduled_execution_time - now);
} else if (now - scheduled_execution_time).as_millis()
> self.deadline_missed_ms_count.into()
{
task_with_op_code.task.with_task(|task| {
deadline_missed_cb
.deadline_missed_callback(task.task_name(), task_with_op_code.op_code);
});
}
task_with_op_code.task.with_task(|task| {
// Unwrapping is okay here because we constrain the tasks to be infallible.
task.periodic_op(task_with_op_code.op_code).unwrap();
});
}
let now = Instant::now();
if now <= self.end_of_slot {
let diff = self.end_of_slot - now;
std::thread::sleep(diff);
self.start_of_slot = self.end_of_slot;
} else if now > self.end_of_slot + self.table.execution_frequency {
// We're getting strongly out of sync. Set the new start timt to now.
self.start_of_slot = now;
}
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use core::{cell::RefCell, convert::Infallible, time::Duration};
use std::{
println,
rc::Rc,
sync::{
mpsc::{self, TryRecvError},
Arc, Mutex,
},
time::Instant,
};
use crate::executable::{Executable, OpResult};
use super::{DeadlineMissedHandler, TaskWithSchedulingTable};
#[derive(Debug)]
pub struct CallInfo {
time: std::time::Instant,
op_code: i32,
}
pub struct Task1 {
called_queue: mpsc::Sender<CallInfo>,
}
impl Executable for Task1 {
type Error = Infallible;
fn task_name(&self) -> &'static str {
"Task1"
}
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, Self::Error> {
self.called_queue
.send(CallInfo {
time: Instant::now(),
op_code,
})
.unwrap();
Ok(OpResult::Ok)
}
}
pub struct Task2 {
called_queue: mpsc::Sender<CallInfo>,
}
impl Executable for Task2 {
type Error = Infallible;
fn task_name(&self) -> &'static str {
"Task2"
}
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, Self::Error> {
self.called_queue
.send(CallInfo {
time: Instant::now(),
op_code,
})
.unwrap();
Ok(OpResult::Ok)
}
}
#[derive(Default)]
pub struct DeadlineMissed {
call_count: u32,
}
impl DeadlineMissedHandler for DeadlineMissed {
fn deadline_missed_callback(&mut self, task_name: &'static str, _op_code: i32) {
println!("task name {task_name} missed the deadline");
self.call_count += 1;
}
}
#[test]
pub fn basic_test() {
let (tx_t1, rx_t1) = mpsc::channel();
let (tx_t2, rx_t2) = mpsc::channel();
let t1 = Task1 {
called_queue: tx_t1,
};
let t2 = Task2 {
called_queue: tx_t2,
};
let mut deadline_missed_cb = DeadlineMissed::default();
let mut exec_task = TaskWithSchedulingTable::new(Duration::from_millis(200));
let t1_first_slot = Rc::new(RefCell::new(t1));
let t1_second_slot = t1_first_slot.clone();
let t2_first_slot = Rc::new(RefCell::new(t2));
let t2_second_slot = t2_first_slot.clone();
exec_task.add_task(0, t1_first_slot, 0).unwrap();
exec_task.add_task(50, t1_second_slot, -1).unwrap();
exec_task.add_task(100, t2_first_slot, 1).unwrap();
exec_task.add_task(150, t2_second_slot, 2).unwrap();
let now = Instant::now();
exec_task.init_start_of_slot();
exec_task
.run_one_task_cycle(&mut deadline_missed_cb)
.unwrap();
let mut call_info = rx_t1.try_recv().unwrap();
assert_eq!(call_info.op_code, 0);
let diff_call_to_start = call_info.time - now;
assert!(diff_call_to_start.as_millis() < 30);
call_info = rx_t1.try_recv().unwrap();
assert_eq!(call_info.op_code, -1);
let diff_call_to_start = call_info.time - now;
assert!(diff_call_to_start.as_millis() < 80);
assert!(diff_call_to_start.as_millis() >= 50);
matches!(rx_t1.try_recv().unwrap_err(), TryRecvError::Empty);
call_info = rx_t2.try_recv().unwrap();
assert_eq!(call_info.op_code, 1);
let diff_call_to_start = call_info.time - now;
assert!(diff_call_to_start.as_millis() < 120);
assert!(diff_call_to_start.as_millis() >= 100);
call_info = rx_t2.try_recv().unwrap();
assert_eq!(call_info.op_code, 2);
let diff_call_to_start = call_info.time - now;
assert!(diff_call_to_start.as_millis() < 180);
assert!(diff_call_to_start.as_millis() >= 150);
matches!(rx_t2.try_recv().unwrap_err(), TryRecvError::Empty);
assert_eq!(deadline_missed_cb.call_count, 0);
}
#[test]
pub fn basic_test_with_arc_mutex() {
let (tx_t1, rx_t1) = mpsc::channel();
let (tx_t2, rx_t2) = mpsc::channel();
let t1 = Task1 {
called_queue: tx_t1,
};
let t2 = Task2 {
called_queue: tx_t2,
};
let mut deadline_missed_cb = DeadlineMissed::default();
let mut exec_task = TaskWithSchedulingTable::new(Duration::from_millis(200));
let t1_first_slot = Arc::new(Mutex::new(t1));
let t1_second_slot = t1_first_slot.clone();
let t2_first_slot = Arc::new(Mutex::new(t2));
let t2_second_slot = t2_first_slot.clone();
exec_task.add_task_sendable(0, t1_first_slot, 0).unwrap();
exec_task.add_task_sendable(50, t1_second_slot, -1).unwrap();
exec_task.add_task_sendable(100, t2_first_slot, 1).unwrap();
exec_task.add_task_sendable(150, t2_second_slot, 2).unwrap();
let now = Instant::now();
exec_task.init_start_of_slot();
exec_task
.run_one_task_cycle(&mut deadline_missed_cb)
.unwrap();
let mut call_info = rx_t1.try_recv().unwrap();
assert_eq!(call_info.op_code, 0);
let diff_call_to_start = call_info.time - now;
assert!(diff_call_to_start.as_millis() < 30);
call_info = rx_t1.try_recv().unwrap();
assert_eq!(call_info.op_code, -1);
let diff_call_to_start = call_info.time - now;
assert!(diff_call_to_start.as_millis() < 80);
assert!(diff_call_to_start.as_millis() >= 50);
matches!(rx_t1.try_recv().unwrap_err(), TryRecvError::Empty);
call_info = rx_t2.try_recv().unwrap();
assert_eq!(call_info.op_code, 1);
let diff_call_to_start = call_info.time - now;
assert!(diff_call_to_start.as_millis() < 120);
assert!(diff_call_to_start.as_millis() >= 100);
call_info = rx_t2.try_recv().unwrap();
assert_eq!(call_info.op_code, 2);
let diff_call_to_start = call_info.time - now;
assert!(diff_call_to_start.as_millis() < 180);
assert!(diff_call_to_start.as_millis() >= 150);
matches!(rx_t2.try_recv().unwrap_err(), TryRecvError::Empty);
assert_eq!(deadline_missed_cb.call_count, 0);
}
#[test]
pub fn basic_test_in_thread() {
let mut deadline_missed_cb = DeadlineMissed::default();
std::thread::spawn(move || {
let (tx_t1, _rx_t1) = mpsc::channel();
let t1 = Task1 {
called_queue: tx_t1,
};
// Need to construct this in the thread, the task table in not [Send]
let mut exec_task = TaskWithSchedulingTable::new(Duration::from_millis(200));
let t1_wrapper = Rc::new(RefCell::new(t1));
exec_task.add_task(0, t1_wrapper, 0).unwrap();
exec_task
.run_one_task_cycle(&mut deadline_missed_cb)
.unwrap();
});
let mut deadline_missed_cb = DeadlineMissed::default();
let (tx_t1, _rx_t1) = mpsc::channel();
let t1 = Task1 {
called_queue: tx_t1,
};
let mut exec_task_sendable = TaskWithSchedulingTable::new(Duration::from_millis(200));
exec_task_sendable
.add_task_sendable(0, Arc::new(Mutex::new(t1)), 1)
.unwrap();
std::thread::spawn(move || {
exec_task_sendable
.run_one_task_cycle(&mut deadline_missed_cb)
.unwrap();
});
}
}

View File

@ -17,37 +17,37 @@ impl PusTmWithCdsShortHelper {
}
#[cfg(feature = "std")]
pub fn create_pus_tm_timestamp_now<'a>(
&'a mut self,
pub fn create_pus_tm_timestamp_now<'data>(
&mut self,
service: u8,
subservice: u8,
source_data: &'a [u8],
source_data: &'data [u8],
seq_count: u16,
) -> PusTmCreator {
) -> PusTmCreator<'_, 'data> {
let time_stamp = CdsTime::now_with_u16_days().unwrap();
time_stamp.write_to_bytes(&mut self.cds_short_buf).unwrap();
self.create_pus_tm_common(service, subservice, source_data, seq_count)
}
pub fn create_pus_tm_with_stamper<'a>(
&'a mut self,
pub fn create_pus_tm_with_stamper<'data>(
&mut self,
service: u8,
subservice: u8,
source_data: &'a [u8],
source_data: &'data [u8],
stamper: &CdsTime,
seq_count: u16,
) -> PusTmCreator {
) -> PusTmCreator<'_, 'data> {
stamper.write_to_bytes(&mut self.cds_short_buf).unwrap();
self.create_pus_tm_common(service, subservice, source_data, seq_count)
}
fn create_pus_tm_common<'a>(
&'a self,
fn create_pus_tm_common<'data>(
&self,
service: u8,
subservice: u8,
source_data: &'a [u8],
source_data: &'data [u8],
seq_count: u16,
) -> PusTmCreator {
) -> PusTmCreator<'_, 'data> {
let reply_header = SpHeader::new_for_unseg_tm(self.apid, seq_count, 0);
let tc_header = PusTmSecondaryHeader::new_simple(service, subservice, &self.cds_short_buf);
PusTmCreator::new(reply_header, tc_header, source_data, true)