Add Small Event Support #18

Merged
muellerr merged 5 commits from mueller/continue-event-pus-manager into main 2022-10-23 18:41:14 +02:00
4 changed files with 527 additions and 137 deletions

View File

@ -1,5 +1,5 @@
//! [Event][crate::events::Event] management and forwarding
use crate::events::{Event, EventRaw, GroupId};
use crate::events::{Event, EventProvider, EventSmall};
use alloc::boxed::Box;
use alloc::vec;
use alloc::vec::Vec;
@ -7,63 +7,86 @@ use hashbrown::HashMap;
#[derive(PartialEq, Eq, Hash, Copy, Clone)]
enum ListenerType {
Single(EventRaw),
Group(GroupId),
Single(u32),
Group(u16),
}
pub trait EventListener {
pub trait EventListener<Provider: EventProvider = Event> {
type Error;
fn id(&self) -> u32;
fn send_to(&mut self, event: Event) -> Result<(), Self::Error>;
fn send_to(&mut self, event: Provider) -> Result<(), Self::Error>;
}
struct Listener<E> {
struct Listener<E, Provider: EventProvider = Event> {
ltype: ListenerType,
dest: Box<dyn EventListener<Error = E>>,
dest: Box<dyn EventListener<Provider, Error = E>>,
}
pub trait ReceivesAllEvent {
fn receive(&mut self) -> Option<Event>;
pub trait ReceivesAllEvent<Provider: EventProvider = Event> {
fn receive(&mut self) -> Option<Provider>;
}
pub struct EventManager<E> {
listeners: HashMap<ListenerType, Vec<Listener<E>>>,
event_receiver: Box<dyn ReceivesAllEvent>,
pub struct EventManager<E, Provider: EventProvider = Event> {
listeners: HashMap<ListenerType, Vec<Listener<E, Provider>>>,
event_receiver: Box<dyn ReceivesAllEvent<Provider>>,
}
pub enum HandlerResult {
pub enum HandlerResult<Provider: EventProvider = Event> {
Empty,
Handled(u32, Event),
Handled(u32, Provider),
}
impl<E> EventManager<E> {
pub fn new(event_receiver: Box<dyn ReceivesAllEvent>) -> Self {
impl<E> EventManager<E, Event> {
pub fn new(event_receiver: Box<dyn ReceivesAllEvent<Event>>) -> Self {
EventManager {
listeners: HashMap::new(),
event_receiver,
}
}
}
impl<E> EventManager<E, Event> {
pub fn subscribe_single(
&mut self,
event: Event,
dest: impl EventListener<Error = E> + 'static,
dest: impl EventListener<Event, Error = E> + 'static,
) {
self.update_listeners(ListenerType::Single(event.raw()), dest);
self.update_listeners(ListenerType::Single(event.raw_as_largest_type()), dest);
}
pub fn subscribe_group(
&mut self,
group_id: GroupId,
dest: impl EventListener<Error = E> + 'static,
group_id: <Event as EventProvider>::GroupId,
dest: impl EventListener<Event, Error = E> + 'static,
) {
self.update_listeners(ListenerType::Group(group_id), dest);
}
}
impl<E> EventManager<E, EventSmall> {
pub fn subscribe_single(
&mut self,
event: EventSmall,
dest: impl EventListener<EventSmall, Error = E> + 'static,
) {
self.update_listeners(ListenerType::Single(event.raw_as_largest_type()), dest);
}
pub fn subscribe_group(
&mut self,
group_id: <EventSmall as EventProvider>::GroupId,
dest: impl EventListener<EventSmall, Error = E> + 'static,
) {
self.update_listeners(ListenerType::Group(group_id.into()), dest);
}
}
impl<E, Provider: EventProvider> EventManager<E, Provider> {
fn update_listeners(
&mut self,
key: ListenerType,
dest: impl EventListener<Error = E> + 'static,
dest: impl EventListener<Provider, Error = E> + 'static,
) {
if !self.listeners.contains_key(&key) {
self.listeners.insert(
@ -88,10 +111,10 @@ impl<E> EventManager<E> {
}
}
pub fn try_event_handling(&mut self) -> Result<HandlerResult, E> {
pub fn try_event_handling(&mut self) -> Result<HandlerResult<Provider>, E> {
let mut err_status = None;
let mut num_recipients = 0;
let mut send_handler = |event, llist: &mut Vec<Listener<E>>| {
let mut send_handler = |event: Provider, llist: &mut Vec<Listener<E, Provider>>| {
for listener in llist.iter_mut() {
if let Err(e) = listener.dest.send_to(event) {
err_status = Some(Err(e));
@ -101,11 +124,11 @@ impl<E> EventManager<E> {
}
};
if let Some(event) = self.event_receiver.receive() {
let single_key = ListenerType::Single(event.raw());
let single_key = ListenerType::Single(event.raw_as_largest_type());
if self.listeners.contains_key(&single_key) {
send_handler(event, self.listeners.get_mut(&single_key).unwrap());
}
let group_key = ListenerType::Group(event.group_id());
let group_key = ListenerType::Group(event.group_id_as_largest_type());
if self.listeners.contains_key(&group_key) {
send_handler(event, self.listeners.get_mut(&group_key).unwrap());
}
@ -122,7 +145,7 @@ impl<E> EventManager<E> {
mod tests {
use super::{EventListener, HandlerResult, ReceivesAllEvent};
use crate::event_man::EventManager;
use crate::events::{Event, Severity};
use crate::events::{Event, EventProvider, Severity};
use alloc::boxed::Box;
use std::sync::mpsc::{channel, Receiver, SendError, Sender};
use std::thread;
@ -131,7 +154,7 @@ mod tests {
struct EventReceiver {
mpsc_receiver: Receiver<Event>,
}
impl ReceivesAllEvent for EventReceiver {
impl ReceivesAllEvent<Event> for EventReceiver {
fn receive(&mut self) -> Option<Event> {
self.mpsc_receiver.try_recv().ok()
}
@ -143,7 +166,7 @@ mod tests {
mpsc_sender: Sender<Event>,
}
impl EventListener for MpscEventSenderQueue {
impl EventListener<Event> for MpscEventSenderQueue {
type Error = SendError<Event>;
fn id(&self) -> u32 {
@ -178,7 +201,7 @@ mod tests {
let event_man_receiver = EventReceiver {
mpsc_receiver: manager_queue,
};
let mut event_man: EventManager<SendError<Event>> =
let mut event_man: EventManager<SendError<Event>, Event> =
EventManager::new(Box::new(event_man_receiver));
let event_grp_0 = Event::new(Severity::INFO, 0, 0).unwrap();
let event_grp_1_0 = Event::new(Severity::HIGH, 1, 0).unwrap();
@ -221,7 +244,7 @@ mod tests {
let event_man_receiver = EventReceiver {
mpsc_receiver: manager_queue,
};
let mut event_man: EventManager<SendError<Event>> =
let mut event_man: EventManager<SendError<Event>, Event> =
EventManager::new(Box::new(event_man_receiver));
let res = event_man.try_event_handling();
assert!(res.is_ok());
@ -263,7 +286,7 @@ mod tests {
let event_man_receiver = EventReceiver {
mpsc_receiver: manager_queue,
};
let mut event_man: EventManager<SendError<Event>> =
let mut event_man: EventManager<SendError<Event>, Event> =
EventManager::new(Box::new(event_man_receiver));
let event_0 = Event::new(Severity::INFO, 0, 5).unwrap();
let event_1 = Event::new(Severity::HIGH, 1, 0).unwrap();

View File

@ -1,18 +1,35 @@
//! Event support module
use spacepackets::ecss::EcssEnumeration;
use core::hash::Hash;
use spacepackets::ecss::{EcssEnumeration, ToBeBytes};
use spacepackets::{ByteConversionError, SizeMissmatch};
use std::marker::PhantomData;
pub type GroupId = u16;
pub type UniqueId = u16;
pub type EventRaw = u32;
/// Using a type definition allows to change this to u64 in the future more easily
pub type LargestEventRaw = u32;
/// Using a type definition allows to change this to u32 in the future more easily
pub type LargestGroupIdRaw = u16;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum Severity {
INFO = 1,
LOW = 2,
MEDIUM = 3,
HIGH = 4,
INFO = 0,
LOW = 1,
MEDIUM = 2,
HIGH = 3,
}
pub trait EventProvider: PartialEq + Eq + Copy + Clone + Hash {
type Raw;
type GroupId;
type UniqueId;
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
}
impl TryFrom<u8> for Severity {
@ -30,10 +47,127 @@ impl TryFrom<u8> for Severity {
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct Event {
pub struct EventBase<RAW, GID, UID> {
severity: Severity,
group_id: GroupId,
unique_id: UniqueId,
group_id: GID,
unique_id: UID,
phantom: PhantomData<RAW>,
}
impl<RAW: ToBeBytes, GID, UID> EventBase<RAW, GID, UID> {
fn write_to_bytes(
&self,
raw: RAW,
buf: &mut [u8],
width: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < width {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: width,
}));
}
buf.copy_from_slice(raw.to_be_bytes().as_ref());
Ok(())
}
}
impl EventBase<u32, u16, u16> {
#[inline]
fn raw(&self) -> u32 {
(((self.severity as u32) << 30) | ((self.group_id as u32) << 16) | self.unique_id as u32)
as u32
}
}
impl EventBase<u16, u8, u8> {
#[inline]
fn raw(&self) -> u16 {
(((self.severity as u16) << 14) as u16
| ((self.group_id as u16) << 8) as u16
| self.unique_id as u16) as u16
}
}
impl<RAW, GID, UID> EventBase<RAW, GID, UID> {
#[inline]
pub fn severity(&self) -> Severity {
self.severity
}
}
impl<RAW, GID> EventBase<RAW, GID, u16> {
#[inline]
pub fn unique_id(&self) -> u16 {
self.unique_id
}
}
impl<RAW, GID> EventBase<RAW, GID, u8> {
#[inline]
pub fn unique_id(&self) -> u8 {
self.unique_id
}
}
impl<RAW, UID> EventBase<RAW, u16, UID> {
#[inline]
pub fn group_id(&self) -> u16 {
self.group_id
}
}
impl<RAW, UID> EventBase<RAW, u8, UID> {
#[inline]
pub fn group_id(&self) -> u8 {
self.group_id
}
}
macro_rules! event_provider_impl {
() => {
#[inline]
fn raw(&self) -> Self::Raw {
self.base.raw()
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
#[inline]
fn severity(&self) -> Severity {
self.base.severity()
}
#[inline]
fn group_id(&self) -> Self::GroupId {
self.base.group_id()
}
#[inline]
fn unique_id(&self) -> Self::UniqueId {
self.base.unique_id()
}
};
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct Event {
base: EventBase<u32, u16, u16>,
}
impl EventProvider for Event {
type Raw = u32;
type GroupId = u16;
type UniqueId = u16;
event_provider_impl!();
fn raw_as_largest_type(&self) -> LargestEventRaw {
self.raw()
}
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
self.group_id()
}
}
impl Event {
@ -43,54 +177,58 @@ impl Event {
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 3 bits of the raw event ID
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 13 bits after the severity. Therefore, the size is limited by dec 8191 hex 0x1FFF.
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID
pub fn new(severity: Severity, group_id: GroupId, unique_id: UniqueId) -> Option<Event> {
if group_id > (2u16.pow(13) - 1) {
pub fn new(
severity: Severity,
group_id: <Self as EventProvider>::GroupId,
unique_id: <Self as EventProvider>::UniqueId,
) -> Option<Self> {
if group_id > (2u16.pow(14) - 1) {
return None;
}
Some(Event {
severity,
group_id,
unique_id,
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
})
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
pub fn severity(&self) -> Severity {
self.severity
}
pub fn group_id(&self) -> GroupId {
self.group_id
}
pub fn unique_id(&self) -> UniqueId {
self.unique_id
}
pub fn raw(&self) -> EventRaw {
(((self.severity as u32) << 29) as u32
| ((self.group_id as u32) << 16) as u32
| self.unique_id as u32) as EventRaw
/// Const version of [new], but panics on invalid input which is invalid group ID
/// values
pub const fn const_new(
severity: Severity,
group_id: <Self as EventProvider>::GroupId,
unique_id: <Self as EventProvider>::UniqueId,
) -> Self {
if group_id > (2u16.pow(14) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
}
impl TryFrom<EventRaw> for Event {
type Error = ();
fn try_from(raw: u32) -> Result<Self, Self::Error> {
let severity: Option<Severity> = (((raw >> 29) & 0b111) as u8).try_into().ok();
if severity.is_none() {
return Err(());
}
let group_id = ((raw >> 16) & 0x1FFF) as u16;
impl From<u32> for Event {
fn from(raw: u32) -> Self {
// Severity conversion from u8 should never fail
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
let group_id = ((raw >> 16) & 0x3FFF) as u16;
let unique_id = (raw & 0xFFFF) as u16;
Event::new(severity.unwrap(), group_id, unique_id).ok_or(())
// Sanitized input, should never fail
Self::const_new(severity, group_id, unique_id)
}
}
@ -100,53 +238,274 @@ impl EcssEnumeration for Event {
}
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError> {
if buf.len() < self.byte_width() {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.byte_width(),
}));
self.base.write_to_bytes(self.raw(), buf, self.byte_width())
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventSmall {
base: EventBase<u16, u8, u8>,
}
impl EventSmall {
/// Generate a small event. The raw representation of a small event has 16 bits.
/// If the passed group ID is invalid (too large), [None] wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
/// raw event ID
pub fn new(
severity: Severity,
group_id: <Self as EventProvider>::GroupId,
unique_id: <Self as EventProvider>::UniqueId,
) -> Option<Self> {
if group_id > (2u8.pow(6) - 1) {
return None;
}
buf.copy_from_slice(self.raw().to_be_bytes().as_slice());
Ok(())
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: Default::default(),
},
})
}
pub const fn const_new(
severity: Severity,
group_id: <Self as EventProvider>::GroupId,
unique_id: <Self as EventProvider>::UniqueId,
) -> Self {
if group_id > (2u8.pow(6) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
}
impl EventProvider for EventSmall {
type Raw = u16;
type GroupId = u8;
type UniqueId = u8;
event_provider_impl!();
fn raw_as_largest_type(&self) -> LargestEventRaw {
self.raw().into()
}
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
self.group_id().into()
}
}
impl EcssEnumeration for EventSmall {
fn pfc(&self) -> u8 {
16
}
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.byte_width())
}
}
impl From<u16> for EventSmall {
fn from(raw: <Self as EventProvider>::Raw) -> Self {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
let group_id = ((raw >> 8) & 0x3F) as u8;
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::const_new(severity, group_id, unique_id)
}
}
#[cfg(test)]
mod tests {
use super::Event;
use crate::events::Severity;
use crate::events::{EventProvider, EventSmall, Severity};
use spacepackets::ecss::EcssEnumeration;
use spacepackets::ByteConversionError;
use std::mem::size_of;
fn assert_size<T>(_: T, val: usize) {
assert_eq!(size_of::<T>(), val);
}
const INFO_EVENT: Event = Event::const_new(Severity::INFO, 0, 0);
const INFO_EVENT_SMALL: EventSmall = EventSmall::const_new(Severity::INFO, 0, 0);
const HIGH_SEV_EVENT: Event = Event::const_new(Severity::HIGH, 0x3FFF, 0xFFFF);
const HIGH_SEV_EVENT_SMALL: EventSmall = EventSmall::const_new(Severity::HIGH, 0x3F, 0xff);
#[test]
fn test_events() {
let event = Event::new(Severity::INFO, 0, 0).unwrap();
assert_eq!(event.severity(), Severity::INFO);
assert_eq!(event.unique_id(), 0);
assert_eq!(event.group_id(), 0);
fn test_normal_from_raw_conversion() {
let conv_from_raw = Event::from(INFO_EVENT.raw());
assert_eq!(conv_from_raw, INFO_EVENT);
}
let raw_event = event.raw();
assert_eq!(raw_event, 0x20000000);
let conv_from_raw = Event::try_from(raw_event);
assert!(conv_from_raw.is_ok());
let opt_event = conv_from_raw.ok();
assert!(opt_event.is_some());
let event = opt_event.unwrap();
assert_eq!(event.severity(), Severity::INFO);
assert_eq!(event.unique_id(), 0);
assert_eq!(event.group_id(), 0);
#[test]
fn test_small_from_raw_conversion() {
let conv_from_raw = EventSmall::from(INFO_EVENT_SMALL.raw());
assert_eq!(conv_from_raw, INFO_EVENT_SMALL);
}
let event = Event::new(Severity::HIGH, 0x1FFF, 0xFFFF).unwrap();
assert_eq!(event.severity(), Severity::HIGH);
assert_eq!(event.group_id(), 0x1FFF);
assert_eq!(event.unique_id(), 0xFFFF);
let raw_event = event.raw();
assert_eq!(raw_event, 0x9FFFFFFF);
let conv_from_raw = Event::try_from(raw_event);
assert!(conv_from_raw.is_ok());
let opt_event = conv_from_raw.ok();
assert!(opt_event.is_some());
let event = opt_event.unwrap();
assert_eq!(event.severity(), Severity::HIGH);
assert_eq!(event.group_id(), 0x1FFF);
assert_eq!(event.unique_id(), 0xFFFF);
#[test]
fn verify_normal_size() {
assert_size(INFO_EVENT.raw(), 4)
}
#[test]
fn verify_small_size() {
assert_size(INFO_EVENT_SMALL.raw(), 2)
}
#[test]
fn test_normal_event_getters() {
assert_eq!(INFO_EVENT.severity(), Severity::INFO);
assert_eq!(INFO_EVENT.unique_id(), 0);
assert_eq!(INFO_EVENT.group_id(), 0);
let raw_event = INFO_EVENT.raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn test_small_event_getters() {
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::INFO);
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
let raw_event = INFO_EVENT_SMALL.raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn all_ones_event_regular() {
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::HIGH);
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
let raw_event = HIGH_SEV_EVENT.raw();
assert_eq!(raw_event, 0xFFFFFFFF);
}
#[test]
fn all_ones_event_small() {
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::HIGH);
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
let raw_event = HIGH_SEV_EVENT_SMALL.raw();
assert_eq!(raw_event, 0xFFFF);
}
#[test]
fn invalid_group_id_normal() {
assert!(Event::new(Severity::MEDIUM, 2_u16.pow(14), 0).is_none());
}
#[test]
fn invalid_group_id_small() {
assert!(EventSmall::new(Severity::MEDIUM, 2_u8.pow(6), 0).is_none());
}
#[test]
fn regular_new() {
assert_eq!(
Event::new(Severity::INFO, 0, 0).expect("Creating regular event failed"),
INFO_EVENT
);
}
#[test]
fn small_new() {
assert_eq!(
EventSmall::new(Severity::INFO, 0, 0).expect("Creating regular event failed"),
INFO_EVENT_SMALL
);
}
#[test]
fn as_largest_type() {
let event_raw = HIGH_SEV_EVENT.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFFFFFF);
}
#[test]
fn as_largest_type_for_small_event() {
let event_raw = HIGH_SEV_EVENT_SMALL.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFF);
}
#[test]
fn as_largest_group_id() {
let group_id = HIGH_SEV_EVENT.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3FFF);
}
#[test]
fn as_largest_group_id_small_event() {
let group_id = HIGH_SEV_EVENT_SMALL.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3F);
}
#[test]
fn write_to_buf() {
let mut buf: [u8; 4] = [0; 4];
assert!(HIGH_SEV_EVENT.write_to_bytes(&mut buf).is_ok());
let val_from_raw = u32::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFFFFFF);
}
#[test]
fn write_to_buf_small() {
let mut buf: [u8; 2] = [0; 2];
assert!(HIGH_SEV_EVENT_SMALL.write_to_bytes(&mut buf).is_ok());
let val_from_raw = u16::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFF);
}
#[test]
fn write_to_buf_insufficient_buf() {
let mut buf: [u8; 3] = [0; 3];
let err = HIGH_SEV_EVENT.write_to_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall(missmatch) = err {
assert_eq!(missmatch.expected, 4);
assert_eq!(missmatch.found, 3);
}
}
#[test]
fn write_to_buf_small_insufficient_buf() {
let mut buf: [u8; 1] = [0; 1];
let err = HIGH_SEV_EVENT_SMALL.write_to_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall(missmatch) = err {
assert_eq!(missmatch.expected, 2);
assert_eq!(missmatch.found, 1);
}
}
#[test]
fn severity_from_invalid_raw_val() {
let invalid = 0xFF;
assert!(Severity::try_from(invalid).is_err());
let invalid = Severity::HIGH as u8 + 1;
assert!(Severity::try_from(invalid).is_err());
}
}

View File

@ -1,4 +1,4 @@
use crate::events::Event;
use crate::events::EventProvider;
use hashbrown::HashSet;
#[cfg(feature = "heapless")]
@ -14,12 +14,12 @@ pub use heapless_mod::*;
/// structure to track disabled events. A more primitive and embedded friendly
/// solution could track this information in a static or pre-allocated list which contains
/// the disabled events.
pub trait PusEventMgmtBackendProvider {
pub trait PusEventMgmtBackendProvider<Provider: EventProvider> {
type Error;
fn event_enabled(&self, event: &Event) -> bool;
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>;
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>;
fn event_enabled(&self, event: &Provider) -> bool;
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
}
/// Default backend provider which uses a hash set as the event reporting status container
@ -28,21 +28,23 @@ pub trait PusEventMgmtBackendProvider {
/// This provider is a good option for host systems or larger embedded systems where
/// the expected occasional memory allocation performed by the [HashSet] is not an issue.
#[derive(Default)]
pub struct DefaultPusMgmtBackendProvider {
disabled: HashSet<Event>,
pub struct DefaultPusMgmtBackendProvider<Provider: EventProvider> {
disabled: HashSet<Provider>,
}
impl PusEventMgmtBackendProvider for DefaultPusMgmtBackendProvider {
impl<Provider: EventProvider> PusEventMgmtBackendProvider<Provider>
for DefaultPusMgmtBackendProvider<Provider>
{
type Error = ();
fn event_enabled(&self, event: &Event) -> bool {
fn event_enabled(&self, event: &Provider) -> bool {
!self.disabled.contains(event)
}
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(event))
}
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.insert(*event))
}
}
@ -50,28 +52,34 @@ impl PusEventMgmtBackendProvider for DefaultPusMgmtBackendProvider {
#[cfg(feature = "heapless")]
pub mod heapless_mod {
use super::*;
use crate::events::EventRaw;
use crate::events::{EventProvider, LargestEventRaw};
use std::marker::PhantomData;
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
// regular Event type again.
#[derive(Default)]
pub struct HeaplessPusMgmtBckendProvider<const N: usize> {
disabled: heapless::FnvIndexSet<EventRaw, N>,
pub struct HeaplessPusMgmtBckendProvider<const N: usize, Provider: EventProvider> {
disabled: heapless::FnvIndexSet<LargestEventRaw, N>,
phantom: PhantomData<Provider>,
}
impl<const N: usize> PusEventMgmtBackendProvider for HeaplessPusMgmtBckendProvider<N> {
impl<const N: usize, Provider: EventProvider> PusEventMgmtBackendProvider<Provider>
for HeaplessPusMgmtBckendProvider<N, Provider>
{
type Error = ();
fn event_enabled(&self, event: &Event) -> bool {
self.disabled.contains(&event.raw())
fn event_enabled(&self, event: &Provider) -> bool {
self.disabled.contains(&event.raw_as_largest_type())
}
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
self.disabled.insert(event.raw()).map_err(|_| ())
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
self.disabled
.insert(event.raw_as_largest_type())
.map_err(|_| ())
}
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(&event.raw()))
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(&event.raw_as_largest_type()))
}
}
}

@ -1 +1 @@
Subproject commit 603f688ac3e914de13037fd22ac544e125b6305b
Subproject commit a2673c98707ecbbabb9535bef607025c92b54724