Add Small Event Support #18

Merged
muellerr merged 5 commits from mueller/continue-event-pus-manager into main 2022-10-23 18:41:14 +02:00
4 changed files with 414 additions and 151 deletions
Showing only changes of commit dd2929eb98 - Show all commits

View File

@ -52,7 +52,7 @@ impl<E> EventManager<E, Event> {
event: Event, event: Event,
dest: impl EventListener<Event, Error = E> + 'static, dest: impl EventListener<Event, Error = E> + 'static,
) { ) {
self.update_listeners(ListenerType::Single(event.raw_as_u32()), dest); self.update_listeners(ListenerType::Single(event.raw_as_largest_type()), dest);
} }
pub fn subscribe_group( pub fn subscribe_group(
@ -68,9 +68,9 @@ impl<E> EventManager<E, EventSmall> {
pub fn subscribe_single( pub fn subscribe_single(
&mut self, &mut self,
event: EventSmall, event: EventSmall,
dest: impl EventListener<Event, Error = E> + 'static, dest: impl EventListener<EventSmall, Error = E> + 'static,
) { ) {
self.update_listeners(ListenerType::Single(event.raw_as_u32()), dest); self.update_listeners(ListenerType::Single(event.raw_as_largest_type()), dest);
} }
pub fn subscribe_group( pub fn subscribe_group(
@ -81,6 +81,7 @@ impl<E> EventManager<E, EventSmall> {
self.update_listeners(ListenerType::Group(group_id.into()), dest); self.update_listeners(ListenerType::Group(group_id.into()), dest);
} }
} }
impl<E, Provider: EventProvider> EventManager<E, Provider> { impl<E, Provider: EventProvider> EventManager<E, Provider> {
fn update_listeners( fn update_listeners(
&mut self, &mut self,
@ -123,11 +124,11 @@ impl<E, Provider: EventProvider> EventManager<E, Provider> {
} }
}; };
if let Some(event) = self.event_receiver.receive() { if let Some(event) = self.event_receiver.receive() {
let single_key = ListenerType::Single(event.raw_as_u32()); let single_key = ListenerType::Single(event.raw_as_largest_type());
if self.listeners.contains_key(&single_key) { if self.listeners.contains_key(&single_key) {
send_handler(event, self.listeners.get_mut(&single_key).unwrap()); send_handler(event, self.listeners.get_mut(&single_key).unwrap());
} }
let group_key = ListenerType::Group(event.group_id_as_u16()); let group_key = ListenerType::Group(event.group_id_as_largest_type());
if self.listeners.contains_key(&group_key) { if self.listeners.contains_key(&group_key) {
send_handler(event, self.listeners.get_mut(&group_key).unwrap()); send_handler(event, self.listeners.get_mut(&group_key).unwrap());
} }

View File

@ -1,16 +1,14 @@
//! Event support module //! Event support module
use spacepackets::ecss::EcssEnumeration; use core::hash::Hash;
use spacepackets::ecss::{EcssEnumeration, ToBeBytes};
use spacepackets::{ByteConversionError, SizeMissmatch}; use spacepackets::{ByteConversionError, SizeMissmatch};
use std::marker::PhantomData;
pub type EventRaw = u32; /// Using a type definition allows to change this to u64 in the future more easily
pub type SmallEventRaw = u16; pub type LargestEventRaw = u32;
/// Using a type definition allows to change this to u32 in the future more easily
pub type GroupId = u16; pub type LargestGroupIdRaw = u16;
pub type UniqueId = u16;
pub type GroupIdSmall = u8;
pub type UniqueIdSmall = u8;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum Severity { pub enum Severity {
@ -20,7 +18,7 @@ pub enum Severity {
HIGH = 3, HIGH = 3,
} }
pub trait EventProvider: PartialEq + Eq + Copy + Clone { pub trait EventProvider: PartialEq + Eq + Copy + Clone + Hash {
type Raw; type Raw;
type GroupId; type GroupId;
type UniqueId; type UniqueId;
@ -30,8 +28,8 @@ pub trait EventProvider: PartialEq + Eq + Copy + Clone {
fn group_id(&self) -> Self::GroupId; fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId; fn unique_id(&self) -> Self::UniqueId;
fn raw_as_u32(&self) -> u32; fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_u16(&self) -> u16; fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
} }
impl TryFrom<u8> for Severity { impl TryFrom<u8> for Severity {
@ -49,10 +47,111 @@ impl TryFrom<u8> for Severity {
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct Event { pub struct EventBase<RAW, GID, UID> {
severity: Severity, severity: Severity,
group_id: u16, group_id: GID,
unique_id: u16, unique_id: UID,
phantom: PhantomData<RAW>,
}
impl<RAW: ToBeBytes, GID, UID> EventBase<RAW, GID, UID> {
fn write_to_bytes(
&self,
raw: RAW,
buf: &mut [u8],
width: usize,
) -> Result<(), ByteConversionError> {
if buf.len() < width {
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: width,
}));
}
buf.copy_from_slice(raw.to_be_bytes().as_ref());
Ok(())
}
}
impl EventBase<u32, u16, u16> {
#[inline]
fn raw(&self) -> u32 {
(((self.severity as u32) << 30) | ((self.group_id as u32) << 16) | self.unique_id as u32)
as u32
}
}
impl EventBase<u16, u8, u8> {
#[inline]
fn raw(&self) -> u16 {
(((self.severity as u16) << 14) as u16
| ((self.group_id as u16) << 8) as u16
| self.unique_id as u16) as u16
}
}
impl<RAW, GID, UID> EventBase<RAW, GID, UID> {
#[inline]
pub fn severity(&self) -> Severity {
self.severity
}
}
impl<RAW, GID> EventBase<RAW, GID, u16> {
#[inline]
pub fn unique_id(&self) -> u16 {
self.unique_id
}
}
impl<RAW, GID> EventBase<RAW, GID, u8> {
#[inline]
pub fn unique_id(&self) -> u8 {
self.unique_id
}
}
impl<RAW, UID> EventBase<RAW, u16, UID> {
#[inline]
pub fn group_id(&self) -> u16 {
self.group_id
}
}
impl<RAW, UID> EventBase<RAW, u8, UID> {
#[inline]
pub fn group_id(&self) -> u8 {
self.group_id
}
}
macro_rules! event_provider_impl {
() => {
#[inline]
fn raw(&self) -> Self::Raw {
self.base.raw()
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
#[inline]
fn severity(&self) -> Severity {
self.base.severity()
}
#[inline]
fn group_id(&self) -> Self::GroupId {
self.base.group_id()
}
#[inline]
fn unique_id(&self) -> Self::UniqueId {
self.base.unique_id()
}
};
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct Event {
base: EventBase<u32, u16, u16>,
} }
impl EventProvider for Event { impl EventProvider for Event {
@ -60,31 +159,13 @@ impl EventProvider for Event {
type GroupId = u16; type GroupId = u16;
type UniqueId = u16; type UniqueId = u16;
fn raw(&self) -> Self::Raw { event_provider_impl!();
(((self.severity as Self::Raw) << 30) as Self::Raw
| ((self.group_id as Self::Raw) << 16) as Self::Raw
| self.unique_id as u32) as Self::Raw
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event fn raw_as_largest_type(&self) -> LargestEventRaw {
/// ID is invalid
fn severity(&self) -> Severity {
self.severity
}
fn group_id(&self) -> Self::GroupId {
self.group_id
}
fn unique_id(&self) -> Self::UniqueId {
self.unique_id
}
fn raw_as_u32(&self) -> u32 {
self.raw() self.raw()
} }
fn group_id_as_u16(&self) -> u16 { fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
self.group_id() self.group_id()
} }
} }
@ -110,24 +191,44 @@ impl Event {
return None; return None;
} }
Some(Self { Some(Self {
base: EventBase {
severity, severity,
group_id, group_id,
unique_id, unique_id,
phantom: PhantomData,
},
}) })
} }
/// Const version of [new], but panics on invalid input which is invalid group ID
/// values
pub const fn const_new(
severity: Severity,
group_id: <Self as EventProvider>::GroupId,
unique_id: <Self as EventProvider>::UniqueId,
) -> Self {
if group_id > (2u16.pow(14) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
} }
impl TryFrom<u32> for Event { impl From<u32> for Event {
type Error = (); fn from(raw: u32) -> Self {
// Severity conversion from u8 should never fail
fn try_from(raw: u32) -> Result<Self, Self::Error> { let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
let severity: Option<Severity> = (((raw >> 30) & 0b11) as u8).try_into().ok();
if severity.is_none() {
return Err(());
}
let group_id = ((raw >> 16) & 0x3FFF) as u16; let group_id = ((raw >> 16) & 0x3FFF) as u16;
let unique_id = (raw & 0xFFFF) as u16; let unique_id = (raw & 0xFFFF) as u16;
Event::new(severity.unwrap(), group_id, unique_id).ok_or(()) // Sanitized input, should never fail
Self::const_new(severity, group_id, unique_id)
} }
} }
@ -137,56 +238,13 @@ impl EcssEnumeration for Event {
} }
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError> { fn write_to_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError> {
if buf.len() < self.byte_width() { self.base.write_to_bytes(self.raw(), buf, self.byte_width())
return Err(ByteConversionError::ToSliceTooSmall(SizeMissmatch {
found: buf.len(),
expected: self.byte_width(),
}));
}
buf.copy_from_slice(self.raw().to_be_bytes().as_slice());
Ok(())
} }
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventSmall { pub struct EventSmall {
severity: Severity, base: EventBase<u16, u8, u8>,
group_id: u8,
unique_id: u8,
}
impl EventProvider for EventSmall {
type Raw = u16;
type GroupId = u8;
type UniqueId = u8;
fn raw(&self) -> Self::Raw {
(((self.severity as Self::Raw) << 14) as Self::Raw
| ((self.group_id as Self::Raw) << 8) as Self::Raw
| self.unique_id as Self::Raw) as Self::Raw
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
fn severity(&self) -> Severity {
self.severity
}
fn group_id(&self) -> Self::GroupId {
self.group_id.into()
}
fn unique_id(&self) -> Self::UniqueId {
self.unique_id.into()
}
fn raw_as_u32(&self) -> u32 {
self.raw().into()
}
fn group_id_as_u16(&self) -> u16 {
self.group_id().into()
}
} }
impl EventSmall { impl EventSmall {
@ -210,48 +268,244 @@ impl EventSmall {
return None; return None;
} }
Some(Self { Some(Self {
base: EventBase {
severity, severity,
group_id, group_id,
unique_id, unique_id,
phantom: Default::default(),
},
}) })
} }
pub const fn const_new(
severity: Severity,
group_id: <Self as EventProvider>::GroupId,
unique_id: <Self as EventProvider>::UniqueId,
) -> Self {
if group_id > (2u8.pow(6) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
} }
impl EventProvider for EventSmall {
type Raw = u16;
type GroupId = u8;
type UniqueId = u8;
event_provider_impl!();
fn raw_as_largest_type(&self) -> LargestEventRaw {
self.raw().into()
}
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
self.group_id().into()
}
}
impl EcssEnumeration for EventSmall {
fn pfc(&self) -> u8 {
16
}
fn write_to_bytes(&self, buf: &mut [u8]) -> Result<(), ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.byte_width())
}
}
impl From<u16> for EventSmall {
fn from(raw: <Self as EventProvider>::Raw) -> Self {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
let group_id = ((raw >> 8) & 0x3F) as u8;
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::const_new(severity, group_id, unique_id)
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::Event; use super::Event;
use crate::events::{EventProvider, Severity}; use crate::events::{EventProvider, EventSmall, Severity};
use spacepackets::ecss::EcssEnumeration;
use spacepackets::ByteConversionError;
use std::mem::size_of;
fn assert_size<T>(_: T, val: usize) {
assert_eq!(size_of::<T>(), val);
}
const INFO_EVENT: Event = Event::const_new(Severity::INFO, 0, 0);
const INFO_EVENT_SMALL: EventSmall = EventSmall::const_new(Severity::INFO, 0, 0);
const HIGH_SEV_EVENT: Event = Event::const_new(Severity::HIGH, 0x3FFF, 0xFFFF);
const HIGH_SEV_EVENT_SMALL: EventSmall = EventSmall::const_new(Severity::HIGH, 0x3F, 0xff);
#[test] #[test]
fn test_events() { fn test_normal_from_raw_conversion() {
let event = Event::new(Severity::INFO, 0, 0).unwrap(); let conv_from_raw = Event::from(INFO_EVENT.raw());
assert_eq!(event.severity(), Severity::INFO); assert_eq!(conv_from_raw, INFO_EVENT);
assert_eq!(event.unique_id(), 0); }
assert_eq!(event.group_id(), 0);
let raw_event = event.raw(); #[test]
fn test_small_from_raw_conversion() {
let conv_from_raw = EventSmall::from(INFO_EVENT_SMALL.raw());
assert_eq!(conv_from_raw, INFO_EVENT_SMALL);
}
#[test]
fn verify_normal_size() {
assert_size(INFO_EVENT.raw(), 4)
}
#[test]
fn verify_small_size() {
assert_size(INFO_EVENT_SMALL.raw(), 2)
}
#[test]
fn test_normal_event_getters() {
assert_eq!(INFO_EVENT.severity(), Severity::INFO);
assert_eq!(INFO_EVENT.unique_id(), 0);
assert_eq!(INFO_EVENT.group_id(), 0);
let raw_event = INFO_EVENT.raw();
assert_eq!(raw_event, 0x00000000); assert_eq!(raw_event, 0x00000000);
let conv_from_raw = Event::try_from(raw_event); }
assert!(conv_from_raw.is_ok());
let opt_event = conv_from_raw.ok();
assert!(opt_event.is_some());
let event = opt_event.unwrap();
assert_eq!(event.severity(), Severity::INFO);
assert_eq!(event.unique_id(), 0);
assert_eq!(event.group_id(), 0);
let event = Event::new(Severity::HIGH, 0x3FFF, 0xFFFF).unwrap(); #[test]
assert_eq!(event.severity(), Severity::HIGH); fn test_small_event_getters() {
assert_eq!(event.group_id(), 0x3FFF); assert_eq!(INFO_EVENT_SMALL.severity(), Severity::INFO);
assert_eq!(event.unique_id(), 0xFFFF); assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
let raw_event = event.raw(); assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
let raw_event = INFO_EVENT_SMALL.raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn all_ones_event_regular() {
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::HIGH);
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
let raw_event = HIGH_SEV_EVENT.raw();
assert_eq!(raw_event, 0xFFFFFFFF); assert_eq!(raw_event, 0xFFFFFFFF);
let conv_from_raw = Event::try_from(raw_event); }
assert!(conv_from_raw.is_ok());
let opt_event = conv_from_raw.ok(); #[test]
assert!(opt_event.is_some()); fn all_ones_event_small() {
let event = opt_event.unwrap(); assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::HIGH);
assert_eq!(event.severity(), Severity::HIGH); assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
assert_eq!(event.group_id(), 0x3FFF); assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
assert_eq!(event.unique_id(), 0xFFFF); let raw_event = HIGH_SEV_EVENT_SMALL.raw();
assert_eq!(raw_event, 0xFFFF);
}
#[test]
fn invalid_group_id_normal() {
assert!(Event::new(Severity::MEDIUM, 2_u16.pow(14), 0).is_none());
}
#[test]
fn invalid_group_id_small() {
assert!(EventSmall::new(Severity::MEDIUM, 2_u8.pow(6), 0).is_none());
}
#[test]
fn regular_new() {
assert_eq!(
Event::new(Severity::INFO, 0, 0).expect("Creating regular event failed"),
INFO_EVENT
);
}
#[test]
fn small_new() {
assert_eq!(
EventSmall::new(Severity::INFO, 0, 0).expect("Creating regular event failed"),
INFO_EVENT_SMALL
);
}
#[test]
fn as_largest_type() {
let event_raw = HIGH_SEV_EVENT.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFFFFFF);
}
#[test]
fn as_largest_type_for_small_event() {
let event_raw = HIGH_SEV_EVENT_SMALL.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFF);
}
#[test]
fn as_largest_group_id() {
let group_id = HIGH_SEV_EVENT.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3FFF);
}
#[test]
fn as_largest_group_id_small_event() {
let group_id = HIGH_SEV_EVENT_SMALL.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3F);
}
#[test]
fn write_to_buf() {
let mut buf: [u8; 4] = [0; 4];
assert!(HIGH_SEV_EVENT.write_to_bytes(&mut buf).is_ok());
let val_from_raw = u32::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFFFFFF);
}
#[test]
fn write_to_buf_small() {
let mut buf: [u8; 2] = [0; 2];
assert!(HIGH_SEV_EVENT_SMALL.write_to_bytes(&mut buf).is_ok());
let val_from_raw = u16::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFF);
}
#[test]
fn write_to_buf_insufficient_buf() {
let mut buf: [u8; 3] = [0; 3];
let err = HIGH_SEV_EVENT.write_to_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall(missmatch) = err {
assert_eq!(missmatch.expected, 4);
assert_eq!(missmatch.found, 3);
}
}
#[test]
fn write_to_buf_small_insufficient_buf() {
let mut buf: [u8; 1] = [0; 1];
let err = HIGH_SEV_EVENT_SMALL.write_to_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall(missmatch) = err {
assert_eq!(missmatch.expected, 2);
assert_eq!(missmatch.found, 1);
}
}
#[test]
fn severity_from_invalid_raw_val() {
let invalid = 0xFF;
assert!(Severity::try_from(invalid).is_err());
let invalid = Severity::HIGH as u8 + 1;
assert!(Severity::try_from(invalid).is_err());
} }
} }

View File

@ -1,4 +1,4 @@
use crate::events::Event; use crate::events::EventProvider;
use hashbrown::HashSet; use hashbrown::HashSet;
#[cfg(feature = "heapless")] #[cfg(feature = "heapless")]
@ -14,12 +14,12 @@ pub use heapless_mod::*;
/// structure to track disabled events. A more primitive and embedded friendly /// structure to track disabled events. A more primitive and embedded friendly
/// solution could track this information in a static or pre-allocated list which contains /// solution could track this information in a static or pre-allocated list which contains
/// the disabled events. /// the disabled events.
pub trait PusEventMgmtBackendProvider { pub trait PusEventMgmtBackendProvider<Provider: EventProvider> {
type Error; type Error;
fn event_enabled(&self, event: &Event) -> bool; fn event_enabled(&self, event: &Provider) -> bool;
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>; fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>; fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
} }
/// Default backend provider which uses a hash set as the event reporting status container /// Default backend provider which uses a hash set as the event reporting status container
@ -28,21 +28,23 @@ pub trait PusEventMgmtBackendProvider {
/// This provider is a good option for host systems or larger embedded systems where /// This provider is a good option for host systems or larger embedded systems where
/// the expected occasional memory allocation performed by the [HashSet] is not an issue. /// the expected occasional memory allocation performed by the [HashSet] is not an issue.
#[derive(Default)] #[derive(Default)]
pub struct DefaultPusMgmtBackendProvider { pub struct DefaultPusMgmtBackendProvider<Provider: EventProvider> {
disabled: HashSet<Event>, disabled: HashSet<Provider>,
} }
impl PusEventMgmtBackendProvider for DefaultPusMgmtBackendProvider { impl<Provider: EventProvider> PusEventMgmtBackendProvider<Provider>
for DefaultPusMgmtBackendProvider<Provider>
{
type Error = (); type Error = ();
fn event_enabled(&self, event: &Event) -> bool { fn event_enabled(&self, event: &Provider) -> bool {
!self.disabled.contains(event) !self.disabled.contains(event)
} }
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> { fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(event)) Ok(self.disabled.remove(event))
} }
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> { fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.insert(*event)) Ok(self.disabled.insert(*event))
} }
} }
@ -50,28 +52,34 @@ impl PusEventMgmtBackendProvider for DefaultPusMgmtBackendProvider {
#[cfg(feature = "heapless")] #[cfg(feature = "heapless")]
pub mod heapless_mod { pub mod heapless_mod {
use super::*; use super::*;
use crate::events::{EventProvider, EventRaw}; use crate::events::{EventProvider, LargestEventRaw};
use std::marker::PhantomData;
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using // TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
// regular Event type again. // regular Event type again.
#[derive(Default)] #[derive(Default)]
pub struct HeaplessPusMgmtBckendProvider<const N: usize> { pub struct HeaplessPusMgmtBckendProvider<const N: usize, Provider: EventProvider> {
disabled: heapless::FnvIndexSet<EventRaw, N>, disabled: heapless::FnvIndexSet<LargestEventRaw, N>,
phantom: PhantomData<Provider>,
} }
impl<const N: usize> PusEventMgmtBackendProvider for HeaplessPusMgmtBckendProvider<N> { impl<const N: usize, Provider: EventProvider> PusEventMgmtBackendProvider<Provider>
for HeaplessPusMgmtBckendProvider<N, Provider>
{
type Error = (); type Error = ();
fn event_enabled(&self, event: &Event) -> bool { fn event_enabled(&self, event: &Provider) -> bool {
self.disabled.contains(&event.raw()) self.disabled.contains(&event.raw_as_largest_type())
} }
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> { fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
self.disabled.insert(event.raw()).map_err(|_| ()) self.disabled
.insert(event.raw_as_largest_type())
.map_err(|_| ())
} }
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> { fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(&event.raw())) Ok(self.disabled.remove(&event.raw_as_largest_type()))
} }
} }
} }

@ -1 +1 @@
Subproject commit 603f688ac3e914de13037fd22ac544e125b6305b Subproject commit a2673c98707ecbbabb9535bef607025c92b54724