tx-adaptions #16

Merged
muellerr merged 2 commits from tx-adaptions into main 2026-03-12 18:26:29 +01:00
5 changed files with 16 additions and 67 deletions
+1
View File
@@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
### Changed
- Added `RxWithInterrupt::steal`.
- Renamed UART `Data` register `value` field to `data`
- Improved type level support for resource management for SPI, PWM, UART.
- Renamed `tx_asynch` and `rx_asynch` module name to `*_async`
+1 -1
View File
@@ -11,7 +11,7 @@ license = "Apache-2.0"
cortex-m = { version = "0.7" }
cfg-if = "1"
derive-mmio = "0.6"
bitbybit = "1.3"
bitbybit = "2"
arbitrary-int = "2"
static_assertions = "1.1"
nb = "1"
+2 -2
View File
@@ -56,12 +56,12 @@ impl Bank {
}
}
#[bitbybit::bitfield(u32, debug, defmt_bitfields(feature = "defmt"))]
#[bitbybit::bitfield(u32, default = 0x0, debug, defmt_bitfields(feature = "defmt"))]
pub struct Data {
#[bit(15, rw)]
dparity: bool,
#[bits(0..=7, rw)]
value: u8,
data: u8,
}
#[bitbybit::bitfield(u32, default = 0x0, debug, defmt_bitfields(feature = "defmt"))]
+4 -4
View File
@@ -147,7 +147,7 @@ pub fn on_interrupt_rx_async_heapless_queue_overwriting(
// If this interrupt bit is set, the trigger level is available at the very least.
// Read everything as fast as possible
for _ in 0..available_bytes {
let byte = uart_regs.read_data().value();
let byte = uart_regs.read_data().data();
if !prod.ready() {
queue_overflow = true;
critical_section::with(|cs| {
@@ -164,7 +164,7 @@ pub fn on_interrupt_rx_async_heapless_queue_overwriting(
if irq_status.rx_timeout() {
while uart_regs.read_rx_status().data_available() {
// While there is data in the FIFO, write it into the reception buffer
let byte = uart_regs.read_data().value();
let byte = uart_regs.read_data().data();
if !prod.ready() {
queue_overflow = true;
critical_section::with(|cs| {
@@ -215,7 +215,7 @@ pub fn on_interrupt_rx_async_heapless_queue(
// If this interrupt bit is set, the trigger level is available at the very least.
// Read everything as fast as possible
for _ in 0..available_bytes {
let byte = uart_regs.read_data().value();
let byte = uart_regs.read_data().data();
if !prod.ready() {
queue_overflow = true;
}
@@ -228,7 +228,7 @@ pub fn on_interrupt_rx_async_heapless_queue(
if irq_status.rx_timeout() {
while uart_regs.read_rx_status().data_available() {
// While there is data in the FIFO, write it into the reception buffer
let byte = uart_regs.read_data().value();
let byte = uart_regs.read_data().data();
if !prod.ready() {
queue_overflow = true;
}
+8 -60
View File
@@ -23,7 +23,6 @@ static TX_CONTEXTS: [Mutex<RefCell<TxContext>>; 2] =
// Completion flag. Kept outside of the context structure as an atomic to avoid
// critical section.
static TX_DONE: [AtomicBool; 2] = [const { AtomicBool::new(false) }; 2];
const EMPTY_SLICE: &[u8] = &[];
#[inline]
fn tx_is_drained(tx: &Tx) -> bool {
@@ -55,8 +54,7 @@ pub fn on_interrupt_tx(bank: Bank) {
// Safety: We documented that the user provided slice must outlive the future, so we convert
// the raw pointer back to the slice here.
let slice = unsafe { context.slice.get().unwrap() };
let tx_fifo_empty = uart.read_state().tx_fifo().value() == 0;
if context.progress >= slice.len() && tx_fifo_empty && !tx_status.tx_busy() {
if context.progress >= slice.len() && !tx_status.tx_busy() {
uart.modify_irq_enabled(|mut value| {
value.set_tx(false);
value.set_tx_empty(false);
@@ -178,61 +176,6 @@ impl Drop for TxFuture {
}
}
pub struct TxFlushFuture {
id: Bank,
}
impl TxFlushFuture {
pub fn new(tx: &mut Tx) -> Self {
let tx_idx = tx.id as usize;
TX_DONE[tx_idx].store(false, core::sync::atomic::Ordering::Relaxed);
tx.disable_interrupts();
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[tx_idx].borrow(cs);
let mut context = context_ref.borrow_mut();
unsafe { context.slice.set(EMPTY_SLICE) };
context.progress = 0;
});
if tx_is_drained(tx) {
TX_DONE[tx_idx].store(true, core::sync::atomic::Ordering::Relaxed);
return Self { id: tx.id };
}
critical_section::with(|_cs| {
// Ensure those are enabled inside a critical section at the same time. Can lead to
// weird glitches otherwise.
tx.enable_interrupts(
#[cfg(feature = "vor4x")]
true,
);
tx.enable();
});
if tx_is_drained(tx) {
TX_DONE[tx_idx].store(true, core::sync::atomic::Ordering::Relaxed);
}
Self { id: tx.id }
}
}
impl Future for TxFlushFuture {
type Output = Result<(), TxOverrunError>;
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
UART_TX_WAKERS[self.id as usize].register(cx.waker());
if TX_DONE[self.id as usize].swap(false, core::sync::atomic::Ordering::Relaxed) {
return core::task::Poll::Ready(Ok(()));
}
core::task::Poll::Pending
}
}
pub struct TxAsync(Tx);
impl TxAsync {
@@ -240,6 +183,11 @@ impl TxAsync {
Self(tx)
}
#[inline]
pub fn inner(&mut self) -> &mut Tx {
&mut self.0
}
/// Write a buffer asynchronously.
///
/// This implementation is not side effect free, and a started future might have already
@@ -262,8 +210,8 @@ impl TxAsync {
}
pub async fn flush(&mut self) -> Result<(), TxOverrunError> {
let fut = TxFlushFuture::new(&mut self.0);
fut.await
while !tx_is_drained(&self.0) {}
Ok(())
}
pub fn release(self) -> Tx {