start non-blocking SPI impl

This commit is contained in:
Robin Müller 2025-04-02 20:06:12 +02:00
parent 7cfb8adcd0
commit 5ffd23701e
Signed by: muellerr
GPG Key ID: A649FB78196E3849
6 changed files with 493 additions and 118 deletions

View File

@ -129,7 +129,7 @@ async fn main(_spawner: Spawner) -> ! {
assert_eq!(mod_id, spi::MODULE_ID);
assert!(spi.sclk() <= Hertz::from_raw(10_000_000));
let min_delay = (spi.sclk().raw() * 5) / 1_000_000_000;
spi.configure_delays(
spi.inner().configure_delays(
DelayControl::builder()
.with_inter_word_cs_deassert(0)
.with_between_cs_assertion(0)

View File

@ -20,6 +20,7 @@ num_enum = { version = "0.7", default-features = false }
embedded-hal-nb = "1"
embedded-io = "0.6"
embedded-hal = "1"
embedded-hal-async = "1"
delegate = "0.13"
paste = "1"
nb = "1"

View File

@ -0,0 +1,242 @@
use core::{cell::RefCell, convert::Infallible, sync::atomic::AtomicBool};
use critical_section::Mutex;
use embassy_sync::waitqueue::AtomicWaker;
use raw_slice::{RawBufSlice, RawBufSliceMut};
use zynq7000::spi::InterruptStatus;
use super::{Spi, SpiId, SpiLowLevel, FIFO_DEPTH};
static WAKERS: [AtomicWaker; 2] = [const { AtomicWaker::new() }; 2];
static TRANSFER_CONTEXTS: [Mutex<RefCell<TransferContext>>; 2] =
[const { Mutex::new(RefCell::new(TransferContext::new())) }; 2];
// Completion flag. Kept outside of the context structure as an atomic to avoid
// critical section.
static DONE: [AtomicBool; 2] = [const { AtomicBool::new(false) }; 2];
pub fn on_interrupt(peripheral: SpiId) {
let mut spi = unsafe { SpiLowLevel::steal(peripheral) };
let idx = peripheral as usize;
let imr = spi.read_imr();
// IRQ is not related.
if !imr.tx_trig() && !imr.tx_full() && !imr.tx_underflow() && !imr.rx_ovr() && !imr.rx_full() {
return;
}
// Prevent spurious interrupts from messing with out logic here.
spi.disable_interrupts();
let isr = spi.read_isr();
spi.clear_interrupts();
let mut context = critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
*context_ref.borrow()
});
// No transfer active.
if context.transfer_type.is_none() {
return;
}
let transfer_type = context.transfer_type.unwrap();
match transfer_type {
TransferType::Read => {
on_interrupt_read(idx, &mut context, &mut spi, isr);
}
TransferType::Write => todo!(),
TransferType::Transfer => todo!(),
TransferType::TransferInPlace => todo!(),
}
}
fn on_interrupt_read(
idx: usize,
context: &mut TransferContext,
spi: &mut SpiLowLevel,
mut isr: InterruptStatus,
) {
let read_slice = unsafe { context.rx_slice.get_mut().unwrap() };
let read_len = read_slice.len();
// Read data from RX FIFO first.
if isr.rx_full() {
while context.rx_progress < read_slice.len() {
read_slice[context.rx_progress] = spi.read_fifo_unchecked();
context.rx_progress += 1;
}
} else if isr.rx_not_empty() {
let trigger = spi.read_rx_not_empty_threshold();
// Read data from RX FIFO first.
while context.rx_progress < read_len && context.rx_progress < trigger as usize {
read_slice[context.rx_progress] = spi.read_fifo_unchecked();
context.rx_progress += 1;
}
}
// The FIFO still needs to be pumped.
if context.tx_progress < read_len {
// Write dummy data to TX FIFO.
while context.tx_progress < read_slice.len() && !isr.tx_full() {
spi.write_fifo_unchecked(0);
context.tx_progress += 1;
isr = spi.read_isr();
}
}
if context.rx_progress == context.tx_progress && context.rx_progress == read_len {
// Write back updated context structure.
critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
*context_ref.borrow_mut() = *context;
});
spi.set_rx_fifo_trigger(1).unwrap();
spi.set_tx_fifo_trigger(1).unwrap();
// Interrupts were already disabled and cleared.
DONE[idx].store(true, core::sync::atomic::Ordering::Relaxed);
WAKERS[idx].wake();
} else {
let new_trig_level = core::cmp::min(FIFO_DEPTH, read_len - context.rx_progress);
spi.set_rx_fifo_trigger(new_trig_level as u32).unwrap();
// Re-enable interrupts with the new RX FIFO trigger level.
spi.enable_interrupts();
}
}
#[derive(Debug, Clone, Copy)]
pub enum TransferType {
Read,
Write,
Transfer,
TransferInPlace,
}
#[derive(Default, Debug, Copy, Clone)]
pub struct TransferContext {
transfer_type: Option<TransferType>,
tx_progress: usize,
rx_progress: usize,
tx_slice: RawBufSlice,
rx_slice: RawBufSliceMut,
}
#[allow(clippy::new_without_default)]
impl TransferContext {
pub const fn new() -> Self {
Self {
transfer_type: None,
tx_progress: 0,
rx_progress: 0,
tx_slice: RawBufSlice::new_nulled(),
rx_slice: RawBufSliceMut::new_nulled(),
}
}
}
pub struct SpiFuture {
id: super::SpiId,
}
impl Future for SpiFuture {
type Output = ();
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
WAKERS[self.id as usize].register(cx.waker());
if DONE[self.id as usize].swap(false, core::sync::atomic::Ordering::Relaxed) {
critical_section::with(|cs| {
let mut ctx = TRANSFER_CONTEXTS[self.id as usize].borrow(cs).borrow_mut();
*ctx = TransferContext::default();
});
return core::task::Poll::Ready(());
}
core::task::Poll::Pending
}
}
impl SpiFuture {
async fn new_for_read(spi: &mut Spi, spi_id: SpiId, words: &mut [u8]) -> Self {
if words.is_empty() {
panic!("words length unexpectedly 0");
}
let idx = spi_id as usize;
DONE[idx].store(false, core::sync::atomic::Ordering::Relaxed);
spi.inner.disable_interrupts();
let write_idx = core::cmp::min(super::FIFO_DEPTH, words.len());
// Send dummy bytes.
(0..write_idx).for_each(|_| {
spi.inner.write_fifo_unchecked(0);
});
// This should never fail because it is never larger than the FIFO depth.
spi.inner.set_rx_fifo_trigger(write_idx as u32).unwrap();
// We want to fill the TX FIFO before it is completely empty. I am not sure whether
// the default value of 1 ensures this because the TMR says that this interrupt is
// triggered when the FIFO has less than threshold entries.
if write_idx < super::FIFO_DEPTH {
spi.inner.set_tx_fifo_trigger(2).unwrap();
}
// We assume that the slave select configuration was already performed, but we take
// care of issuing a start if necessary.
spi.issue_manual_start_for_manual_cfg();
critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
let mut context = context_ref.borrow_mut();
context.transfer_type = Some(TransferType::Read);
unsafe {
context.rx_slice.set(words);
}
context.tx_slice.set_null();
context.tx_progress = write_idx;
context.rx_progress = 0;
spi.inner.clear_interrupts();
spi.inner.enable_interrupts();
});
Self { id: spi_id }
}
}
pub struct SpiAsync(pub Spi);
impl SpiAsync {
async fn read(&mut self, words: &mut [u8]) {
if words.is_empty() {
return;
}
let id = self.0.inner.id;
let spi_fut = SpiFuture::new_for_read(&mut self.0, id, words);
spi_fut.await;
}
async fn write(&mut self, words: &[u8]) {}
async fn transfer(&mut self, read: &mut [u8], write: &[u8]) {}
async fn transfer_in_place(&mut self, words: &mut [u8]) {}
}
impl embedded_hal_async::spi::ErrorType for SpiAsync {
type Error = Infallible;
}
impl embedded_hal_async::spi::SpiBus for SpiAsync {
async fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
self.read(words).await;
Ok(())
}
async fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> {
self.write(words).await;
Ok(())
}
async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> {
self.transfer(read, write).await;
Ok(())
}
async fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
self.transfer_in_place(words).await;
Ok(())
}
async fn flush(&mut self) -> Result<(), Self::Error> {
Ok(())
}
}

View File

@ -13,18 +13,22 @@ use crate::gpio::{
};
use crate::{clocks::IoClocks, slcr::Slcr, time::Hertz};
use arbitrary_int::{Number, u3, u4, u6};
use arbitrary_int::{u3, u4, u6, Number};
use embedded_hal::delay::DelayNs;
pub use embedded_hal::spi::Mode;
use embedded_hal::spi::{MODE_0, MODE_1, MODE_2, MODE_3, SpiBus as _};
use embedded_hal::spi::{SpiBus as _, MODE_0, MODE_1, MODE_2, MODE_3};
use zynq7000::slcr::reset::DualRefAndClockReset;
use zynq7000::spi::{
BaudDivSelect, DelayControl, FifoWrite, MmioSpi, SPI_0_BASE_ADDR, SPI_1_BASE_ADDR,
BaudDivSelect, DelayControl, FifoWrite, InterruptControl, InterruptMask, InterruptStatus,
MmioSpi, SPI_0_BASE_ADDR, SPI_1_BASE_ADDR,
};
pub const FIFO_DEPTH: usize = 128;
pub const MODULE_ID: u32 = 0x90106;
pub mod asynch;
pub use asynch::*;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SpiId {
Spi0,
@ -403,9 +407,181 @@ impl Config {
}
}
pub struct SpiLowLevel {
id: SpiId,
regs: zynq7000::spi::MmioSpi<'static>,
}
impl SpiLowLevel {
/// Steal the SPI low level helper.
///
/// # Safety
///
/// This API can be used to potentially create a driver to the same peripheral structure
/// from multiple threads. The user must ensure that concurrent accesses are safe and do not
/// interfere with each other.
pub unsafe fn steal(id: SpiId) -> Self {
let regs = unsafe {
match id {
SpiId::Spi0 => zynq7000::spi::Spi::new_mmio_fixed_0(),
SpiId::Spi1 => zynq7000::spi::Spi::new_mmio_fixed_1(),
}
};
Self { id, regs }
}
/// Select the peripheral chip select line.
///
/// This needs to be done before starting a transfer to select the correct peripheral chip
/// select line.
///
/// The decoder bits logic is is based
/// [on online documentation](https://www.realdigital.org/doc/3eb4f7a05e5003f2e0e6858a27a679bb?utm_source=chatgpt.com)
/// because the TRM does not specify how decoding really works. This also only works if
/// the external decoding was enabled via the [Config::enable_external_decoding] option.
#[inline]
pub fn select_hw_cs(&mut self, chip_select: ChipSelect) {
self.regs.modify_cr(|mut val| {
val.set_cs_raw(chip_select.raw_reg());
val
});
}
/// Re-configures the mode register.
#[inline]
pub fn configure_mode(&mut self, mode: Mode) {
let (cpol, cpha) = match mode {
MODE_0 => (false, false),
MODE_1 => (false, true),
MODE_2 => (true, false),
MODE_3 => (true, true),
};
self.regs.modify_cr(|mut val| {
val.set_cpha(cpha);
val.set_cpha(cpol);
val
});
}
/// Re-configures the delay control register.
#[inline]
pub fn configure_delays(&mut self, config: DelayControl) {
self.regs.write_delay_control(config)
}
/// No peripheral slave selection.
#[inline]
pub fn no_hw_cs(&mut self) {
self.select_hw_cs(ChipSelect::None);
}
#[inline(always)]
pub fn write_fifo_unchecked(&mut self, data: u8) {
self.regs
.write_txd(FifoWrite::new_with_raw_value(data as u32));
}
#[inline(always)]
pub fn read_fifo_unchecked(&mut self) -> u8 {
self.regs.read_rxd().data()
}
#[inline]
pub fn issue_manual_start(&mut self) {
self.regs.modify_cr(|mut val| {
val.set_manual_start(true);
val
});
}
#[inline]
pub fn read_isr(&self) -> InterruptStatus {
self.regs.read_isr()
}
#[inline]
pub fn read_imr(&self) -> InterruptMask {
self.regs.read_imr()
}
#[inline]
pub fn read_rx_not_empty_threshold(&self) -> u32 {
self.regs.read_rx_trig()
}
#[inline]
pub fn set_rx_fifo_trigger(&mut self, trigger: u32) -> Result<(), InvalidTriggerError> {
if trigger > FIFO_DEPTH as u32 {
return Err(InvalidTriggerError(trigger as usize));
}
self.regs.write_rx_trig(trigger.value());
Ok(())
}
#[inline]
pub fn set_tx_fifo_trigger(&mut self, trigger: u32) -> Result<(), InvalidTriggerError> {
if trigger > FIFO_DEPTH as u32 {
return Err(InvalidTriggerError(trigger as usize));
}
self.regs.write_tx_trig(trigger.value());
Ok(())
}
/// This disables all interrupts relevant for non-blocking interrupt driven SPI operation
/// in SPI master mode.
#[inline]
pub fn disable_interrupts(&mut self) {
self.regs.write_idr(
InterruptControl::builder()
.with_tx_underflow(true)
.with_rx_full(true)
.with_rx_not_empty(true)
.with_tx_full(false)
.with_tx_trig(true)
.with_mode_fault(false)
.with_rx_ovr(true)
.build(),
);
}
/// This enables all interrupts relevant for non-blocking interrupt driven SPI operation
/// in SPI master mode.
#[inline]
pub fn enable_interrupts(&mut self) {
self.regs.write_ier(
InterruptControl::builder()
.with_tx_underflow(true)
.with_rx_full(true)
.with_rx_not_empty(true)
.with_tx_full(false)
.with_tx_trig(true)
.with_mode_fault(false)
.with_rx_ovr(true)
.build(),
);
}
/// This clears all interrupts relevant for non-blocking interrupt driven SPI operation
/// in SPI master mode.
#[inline]
pub fn clear_interrupts(&mut self) {
self.regs.write_isr(
InterruptStatus::builder()
.with_tx_underflow(true)
.with_rx_full(true)
.with_rx_not_empty(true)
.with_tx_full(false)
.with_tx_not_full(true)
.with_mode_fault(false)
.with_rx_ovr(true)
.build(),
);
}
}
/// Blocking Driver for the PS SPI peripheral in master mode.
pub struct Spi {
regs: zynq7000::spi::MmioSpi<'static>,
inner: SpiLowLevel,
sclk: Hertz,
config: Config,
outstanding_rx: bool,
@ -415,6 +591,10 @@ pub struct Spi {
#[error("invalid SPI ID")]
pub struct InvalidPsSpiError;
#[derive(Debug, thiserror::Error)]
#[error("invalid trigger value {0}")]
pub struct InvalidTriggerError(pub usize);
// TODO: Add and handle MUX config check.
#[derive(Debug, thiserror::Error)]
pub enum SpiConstructionError {
@ -640,101 +820,43 @@ impl Spi {
regs.write_enable(1);
let sclk = clocks.spi_clk() / config.baud_div.div_value() as u32;
Self {
regs,
inner: SpiLowLevel { regs, id },
sclk,
config,
outstanding_rx: false,
}
}
#[inline]
pub fn issue_manual_start_for_manual_cfg(&mut self) {
if self.config.ss_config == SlaveSelectConfig::AutoWithManualStart
|| self.config.ss_config == SlaveSelectConfig::ManualWithManualStart
{
self.inner.issue_manual_start();
}
}
/// Retrieve SCLK clock frequency currently configured for this SPI.
#[inline]
pub const fn sclk(&self) -> Hertz {
self.sclk
}
/// Retrieve inner low-level helper.
#[inline]
pub fn regs(&mut self) -> &mut MmioSpi<'static> {
&mut self.regs
}
/// Select the peripheral chip select line.
///
/// This needs to be done before starting a transfer to select the correct peripheral chip
/// select line.
///
/// The decoder bits logic is is based
/// [on online documentation](https://www.realdigital.org/doc/3eb4f7a05e5003f2e0e6858a27a679bb?utm_source=chatgpt.com)
/// because the TRM does not specify how decoding really works. This also only works if
/// the external decoding was enabled via the [Config::enable_external_decoding] option.
#[inline]
pub fn select_hw_cs(&mut self, chip_select: ChipSelect) {
self.regs.modify_cr(|mut val| {
val.set_cs_raw(chip_select.raw_reg());
val
});
}
/// Re-configures the mode register.
#[inline]
pub fn configure_mode(&mut self, mode: Mode) {
let (cpol, cpha) = match mode {
MODE_0 => (false, false),
MODE_1 => (false, true),
MODE_2 => (true, false),
MODE_3 => (true, true),
};
self.regs.modify_cr(|mut val| {
val.set_cpha(cpha);
val.set_cpha(cpol);
val
});
}
/// Re-configures the delay control register.
#[inline]
pub fn configure_delays(&mut self, config: DelayControl) {
self.regs.write_delay_control(config)
}
/// No peripheral slave selection.
#[inline]
pub fn no_hw_cs(&mut self) {
self.select_hw_cs(ChipSelect::None);
}
#[inline(always)]
pub fn write_fifo_unchecked(&mut self, data: u8) {
self.regs
.write_txd(FifoWrite::new_with_raw_value(data as u32));
}
#[inline(always)]
pub fn read_fifo_unchecked(&mut self) -> u8 {
self.regs.read_rxd().data()
pub const fn inner(&mut self) -> &mut SpiLowLevel {
&mut self.inner
}
#[inline]
pub fn issue_manual_start(&mut self) {
self.regs.modify_cr(|mut val| {
val.set_manual_start(true);
val
});
}
#[inline]
pub fn issue_manual_start_for_manual_cfg(&mut self) {
if self.config.ss_config == SlaveSelectConfig::AutoWithManualStart
|| self.config.ss_config == SlaveSelectConfig::ManualWithManualStart
{
self.issue_manual_start();
}
pub fn regs(&mut self) -> &mut MmioSpi<'static> {
&mut self.inner.regs
}
fn initial_fifo_fill(&mut self, words: &[u8]) -> usize {
let write_len = core::cmp::min(FIFO_DEPTH, words.len());
(0..write_len).for_each(|idx| {
self.write_fifo_unchecked(words[idx]);
self.inner.write_fifo_unchecked(words[idx]);
});
write_len
}
@ -744,7 +866,7 @@ impl Spi {
// implementation for now.
self.flush().unwrap();
// Write this to 1 in any case to allow polling, defensive programming.
self.regs.write_rx_trig(1);
self.inner.regs.write_rx_trig(1);
// Fill the FIFO with initial data.
let written = self.initial_fifo_fill(words);
@ -769,12 +891,12 @@ impl embedded_hal::spi::SpiBus for Spi {
// implementation for now.
self.flush()?;
// Write this to 1 in any case to allow polling, defensive programming.
self.regs.write_rx_trig(1);
self.regs().write_rx_trig(1);
let mut write_idx = core::cmp::min(FIFO_DEPTH, words.len());
// Send dummy bytes.
(0..write_idx).for_each(|_| {
self.write_fifo_unchecked(0);
self.inner.write_fifo_unchecked(0);
});
// We assume that the slave select configuration was already performed, but we take
@ -783,14 +905,14 @@ impl embedded_hal::spi::SpiBus for Spi {
let mut read_idx = 0;
while read_idx < words.len() {
let status = self.regs.read_sr();
let status = self.regs().read_isr();
if status.rx_not_empty() {
words[read_idx] = self.read_fifo_unchecked();
words[read_idx] = self.inner.read_fifo_unchecked();
read_idx += 1;
}
// Continue pumping the FIFO if necesary and possible.
if write_idx < words.len() && !status.tx_full() {
self.write_fifo_unchecked(0);
self.inner.write_fifo_unchecked(0);
write_idx += 1;
}
}
@ -806,21 +928,21 @@ impl embedded_hal::spi::SpiBus for Spi {
let mut read_idx = 0;
while written < words.len() {
let status = self.regs.read_sr();
let status = self.regs().read_isr();
// We empty the FIFO to prevent it filling up completely, as long as we have to write
// bytes
if status.rx_not_empty() {
self.read_fifo_unchecked();
self.inner.read_fifo_unchecked();
read_idx += 1;
}
if !status.tx_full() {
self.write_fifo_unchecked(words[written]);
self.inner.write_fifo_unchecked(words[written]);
written += 1;
}
}
// We exit once all bytes have been written, so some bytes to read might be outstanding.
// We use the FIFO trigger mechanism to determine when we can read all the remaining bytes.
self.regs.write_rx_trig((words.len() - read_idx) as u32);
self.regs().write_rx_trig((words.len() - read_idx) as u32);
self.outstanding_rx = true;
Ok(())
}
@ -835,14 +957,14 @@ impl embedded_hal::spi::SpiBus for Spi {
let mut writes_finished = write_idx == write.len();
let mut reads_finished = false;
while !writes_finished || !reads_finished {
let status = self.regs.read_sr();
let status = self.regs().read_isr();
if status.rx_not_empty() && !reads_finished {
read[read_idx] = self.read_fifo_unchecked();
read[read_idx] = self.inner.read_fifo_unchecked();
read_idx += 1;
}
if !status.tx_full() && !writes_finished {
self.write_fifo_unchecked(write[write_idx]);
self.inner.write_fifo_unchecked(write[write_idx]);
write_idx += 1;
}
writes_finished = write_idx == write.len();
@ -862,14 +984,14 @@ impl embedded_hal::spi::SpiBus for Spi {
let mut writes_finished = write_idx == words.len();
let mut reads_finished = false;
while !writes_finished || !reads_finished {
let status = self.regs.read_sr();
let status = self.inner.read_isr();
if status.rx_not_empty() && !reads_finished {
words[read_idx] = self.read_fifo_unchecked();
words[read_idx] = self.inner.read_fifo_unchecked();
read_idx += 1;
}
if !status.tx_full() && !writes_finished {
self.write_fifo_unchecked(words[write_idx]);
self.inner.write_fifo_unchecked(words[write_idx]);
write_idx += 1;
}
writes_finished = write_idx == words.len();
@ -879,16 +1001,17 @@ impl embedded_hal::spi::SpiBus for Spi {
Ok(())
}
/// Blocking flush implementation.
fn flush(&mut self) -> Result<(), Self::Error> {
if !self.outstanding_rx {
return Ok(());
}
let rx_trig = self.regs.read_rx_trig();
while !self.regs.read_sr().rx_not_empty() {}
let rx_trig = self.inner.read_rx_not_empty_threshold();
while !self.inner.read_isr().rx_not_empty() {}
(0..rx_trig).for_each(|_| {
self.regs.read_rxd();
self.inner.read_fifo_unchecked();
});
self.regs.write_rx_trig(1);
self.inner.set_rx_fifo_trigger(1).unwrap();
self.outstanding_rx = false;
Ok(())
}
@ -919,7 +1042,7 @@ impl<Delay: DelayNs> embedded_hal::spi::SpiDevice for SpiWithHwCs<Delay> {
&mut self,
operations: &mut [embedded_hal::spi::Operation<'_, u8>],
) -> Result<(), Self::Error> {
self.spi.select_hw_cs(self.cs);
self.spi.inner.select_hw_cs(self.cs);
for op in operations {
match op {
embedded_hal::spi::Operation::Read(items) => {
@ -940,7 +1063,7 @@ impl<Delay: DelayNs> embedded_hal::spi::SpiDevice for SpiWithHwCs<Delay> {
}
}
self.spi.flush()?;
self.spi.no_hw_cs();
self.spi.inner.no_hw_cs();
Ok(())
}
}

View File

@ -6,6 +6,14 @@ use raw_slice::RawBufSlice;
use crate::uart::{FIFO_DEPTH, Tx, UartId};
#[derive(Debug)]
pub enum TransferType {
Read,
Write,
Transfer,
TransferInPlace
}
static UART_TX_WAKERS: [AtomicWaker; 2] = [const { AtomicWaker::new() }; 2];
static TX_CONTEXTS: [Mutex<RefCell<TxContext>>; 2] =
[const { Mutex::new(RefCell::new(TxContext::new())) }; 2];
@ -49,8 +57,8 @@ pub fn on_interrupt_tx(peripheral: UartId) {
// Transfer is done.
TX_DONE[idx].store(true, core::sync::atomic::Ordering::Relaxed);
tx_with_irq.disable_interrupts();
UART_TX_WAKERS[idx].wake();
tx_with_irq.clear_interrupts();
UART_TX_WAKERS[idx].wake();
return;
}
// Safety: We documented that the user provided slice must outlive the future, so we convert
@ -94,7 +102,7 @@ impl TxContext {
}
pub struct TxFuture {
uart_idx: UartId,
id: UartId,
}
impl TxFuture {
@ -124,7 +132,7 @@ impl TxFuture {
tx_with_irq.enable_interrupts();
Self {
uart_idx: tx_with_irq.uart_idx(),
id: tx_with_irq.uart_idx(),
}
}
}
@ -136,10 +144,10 @@ impl Future for TxFuture {
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
UART_TX_WAKERS[self.uart_idx as usize].register(cx.waker());
if TX_DONE[self.uart_idx as usize].swap(false, core::sync::atomic::Ordering::Relaxed) {
UART_TX_WAKERS[self.id as usize].register(cx.waker());
if TX_DONE[self.id as usize].swap(false, core::sync::atomic::Ordering::Relaxed) {
let progress = critical_section::with(|cs| {
let mut ctx = TX_CONTEXTS[self.uart_idx as usize].borrow(cs).borrow_mut();
let mut ctx = TX_CONTEXTS[self.id as usize].borrow(cs).borrow_mut();
ctx.slice.set_null();
ctx.progress
});
@ -151,7 +159,7 @@ impl Future for TxFuture {
impl Drop for TxFuture {
fn drop(&mut self) {
let mut tx = unsafe { Tx::steal(self.uart_idx) };
let mut tx = unsafe { Tx::steal(self.id) };
tx.disable_interrupts();
}
}

View File

@ -65,9 +65,9 @@ pub struct Config {
master_ern: bool,
}
#[bitbybit::bitfield(u32)]
#[bitbybit::bitfield(u32, default = 0x0)]
#[derive(Debug)]
pub struct Status {
pub struct InterruptStatus {
#[bit(6, rw)]
tx_underflow: bool,
#[bit(5, rw)]
@ -77,7 +77,7 @@ pub struct Status {
#[bit(3, rw)]
tx_full: bool,
#[bit(2, rw)]
tx_trig: bool,
tx_not_full: bool,
#[bit(1, rw)]
mode_fault: bool,
/// Receiver overflow interrupt.
@ -85,9 +85,9 @@ pub struct Status {
rx_ovr: bool,
}
#[bitbybit::bitfield(u32)]
#[bitbybit::bitfield(u32, default = 0x0)]
#[derive(Debug)]
pub struct InterruptRegWriteOnly {
pub struct InterruptControl {
#[bit(6, w)]
tx_underflow: bool,
#[bit(5, w)]
@ -107,7 +107,7 @@ pub struct InterruptRegWriteOnly {
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct InterruptRegReadOnly {
pub struct InterruptMask {
#[bit(6, r)]
tx_underflow: bool,
#[bit(5, r)]
@ -162,16 +162,17 @@ pub struct DelayControl {
#[repr(C)]
pub struct Spi {
cr: Config,
sr: Status,
#[mmio(PureRead, Write)]
isr: InterruptStatus,
/// Interrupt Enable Register.
#[mmio(Write)]
ier: InterruptRegWriteOnly,
ier: InterruptControl,
/// Interrupt Disable Register.
#[mmio(Write)]
idr: InterruptRegWriteOnly,
idr: InterruptControl,
/// Interrupt Mask Register.
#[mmio(PureRead)]
imr: InterruptRegReadOnly,
imr: InterruptMask,
enable: u32,
delay_control: DelayControl,
#[mmio(Write)]