diff --git a/justfile b/justfile index 5e00fa6..f61f920 100644 --- a/justfile +++ b/justfile @@ -14,12 +14,12 @@ test: embedded: cargo check -p satrs --target=thumbv7em-none-eabihf --no-default-features -fmt: - cargo fmt --all - check-fmt: cargo fmt --all -- --check +fmt: + cargo fmt --all + clippy: cargo clippy -- -D warnings diff --git a/satrs/src/ccsds/scheduler.rs b/satrs/src/ccsds/scheduler.rs index c4fdb18..b8d79e7 100644 --- a/satrs/src/ccsds/scheduler.rs +++ b/satrs/src/ccsds/scheduler.rs @@ -3,7 +3,7 @@ use core::{hash::Hash, time::Duration}; #[cfg(feature = "alloc")] pub use alloc_mod::*; use spacepackets::{ - ByteConversionError, PacketId, PacketSequenceControl, + ByteConversionError, CcsdsPacketIdAndPsc, time::{TimestampError, UnixTime}, }; @@ -24,6 +24,8 @@ pub enum ScheduleError { NestedScheduledTc, #[error("tc data empty")] TcDataEmpty, + #[error("scheduler is full")] + Full, #[error("timestamp error: {0}")] TimestampError(#[from] TimestampError), #[error("wrong subservice number {0}")] @@ -36,33 +38,32 @@ pub enum ScheduleError { #[derive(Debug, PartialEq, Eq, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct CcsdsPacketId { - pub packet_id: PacketId, - pub psc: PacketSequenceControl, - pub crc16: u16, +pub struct CcsdsSchedulePacketId { + pub base: CcsdsPacketIdAndPsc, + pub crc16: Option, } -impl Hash for CcsdsPacketId { +impl Hash for CcsdsSchedulePacketId { fn hash(&self, state: &mut H) { - self.packet_id.hash(state); - self.psc.raw().hash(state); + self.base.hash(state); self.crc16.hash(state); } } +#[cfg(feature = "alloc")] pub mod alloc_mod { use core::time::Duration; #[cfg(feature = "std")] use std::time::SystemTimeError; - use spacepackets::time::UnixTime; + use spacepackets::{CcsdsPacketIdAndPsc, CcsdsPacketReader, time::UnixTime}; - use crate::ccsds::scheduler::CcsdsPacketId; + use crate::ccsds::scheduler::CcsdsSchedulePacketId; pub struct CcsdsScheduler { tc_map: alloc::collections::BTreeMap< UnixTime, - alloc::vec::Vec<(CcsdsPacketId, alloc::vec::Vec)>, + alloc::vec::Vec<(CcsdsSchedulePacketId, alloc::vec::Vec)>, >, packet_limit: usize, pub(crate) current_time: UnixTime, @@ -117,13 +118,30 @@ pub mod alloc_mod { &self.current_time } + pub fn insert_telecommand_with_reader( + &mut self, + reader: &CcsdsPacketReader, + release_time: UnixTime, + ) -> Result<(), super::ScheduleError> { + if self.num_of_entries() + 1 >= self.packet_limit { + return Err(super::ScheduleError::Full); + } + let base_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(reader); + + Ok(()) + } + // TODO: Implementation pub fn insert_telecommand( &mut self, - packet_id: CcsdsPacketId, - packet: alloc::vec::Vec, + packet_id: CcsdsSchedulePacketId, + raw_packet: &[u8], release_time: UnixTime, - ) { + ) -> Result<(), super::ScheduleError> { + if self.num_of_entries() + 1 >= self.packet_limit { + return Err(super::ScheduleError::Full); + } + Ok(()) } } } diff --git a/satrs/src/lib.rs b/satrs/src/lib.rs index 5930f68..5878bd6 100644 --- a/satrs/src/lib.rs +++ b/satrs/src/lib.rs @@ -23,6 +23,7 @@ extern crate downcast_rs; extern crate std; pub mod action; +pub mod ccsds; #[cfg(feature = "alloc")] pub mod dev_mgmt; pub mod encoding; @@ -51,7 +52,6 @@ pub mod scheduling; pub mod subsystem; pub mod time; pub mod tmtc; -pub mod ccsds; pub use spacepackets; diff --git a/satrs/src/pool.rs b/satrs/src/pool.rs index c0c3c09..ac86781 100644 --- a/satrs/src/pool.rs +++ b/satrs/src/pool.rs @@ -155,73 +155,34 @@ impl Display for StoreIdError { #[cfg(feature = "std")] impl Error for StoreIdError {} -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum PoolError { /// Requested data block is too large + #[error("data to store with size {0} is too large")] DataTooLarge(usize), /// The store is full. Contains the index of the full subpool + #[error("store does not have any capacity")] StoreFull(u16), /// The store can not hold any data. + #[error("store does not have any capacity")] NoCapacity, /// Store ID is invalid. This also includes partial errors where only the subpool is invalid + #[error("invalid store ID: {0}, address: {1:?}")] InvalidStoreId(StoreIdError, Option), /// Valid subpool and packet index, but no data is stored at the given address + #[error("no data exists at address {0:?}")] DataDoesNotExist(PoolAddr), - ByteConversionError(spacepackets::ByteConversionError), + #[error("byte conversion error: {0}")] + ByteConversion(#[from] spacepackets::ByteConversionError), + #[error("lock error")] LockError, /// Internal or configuration errors + #[error("lock error")] InternalError(u32), } -impl Display for PoolError { - fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { - match self { - PoolError::DataTooLarge(size) => { - write!(f, "data to store with size {size} is too large") - } - PoolError::NoCapacity => { - write!(f, "store does not have any capacity") - } - PoolError::StoreFull(u16) => { - write!(f, "store is too full. index for full subpool: {u16}") - } - PoolError::InvalidStoreId(id_e, addr) => { - write!(f, "invalid store ID: {id_e}, address: {addr:?}") - } - PoolError::DataDoesNotExist(addr) => { - write!(f, "no data exists at address {addr:?}") - } - PoolError::InternalError(e) => { - write!(f, "internal error: {e}") - } - PoolError::ByteConversionError(e) => { - write!(f, "store error: {e}") - } - PoolError::LockError => { - write!(f, "lock error") - } - } - } -} - -impl From for PoolError { - fn from(value: ByteConversionError) -> Self { - Self::ByteConversionError(value) - } -} - -#[cfg(feature = "std")] -impl Error for PoolError { - fn source(&self) -> Option<&(dyn Error + 'static)> { - if let PoolError::InvalidStoreId(e, _) = self { - return Some(e); - } - None - } -} - /// Generic trait for pool providers which provide memory pools for variable sized packet data. /// /// It specifies a basic API to [Self::add], [Self::modify], [Self::read] and [Self::delete] data