move AXI UART drivers to separate crates

This commit is contained in:
Robin Müller 2025-03-31 19:52:19 +02:00
parent 0f2bda8ca1
commit 198e17c134
Signed by: muellerr
GPG Key ID: A649FB78196E3849
15 changed files with 2 additions and 2101 deletions

View File

@ -5,8 +5,6 @@ members = [
"zynq7000", "zynq7000",
"zynq7000-hal", "zynq7000-hal",
"zynq7000-embassy", "zynq7000-embassy",
"axi-uartlite-rs",
"axi-uart16550-rs",
"examples/simple", "examples/simple",
"examples/embassy", "examples/embassy",
"examples/zedboard", "examples/zedboard",

View File

@ -1,32 +0,0 @@
[package]
name = "axi-uart16550"
version = "0.1.0"
edition = "2024"
[dependencies]
derive-mmio = { git = "https://github.com/knurling-rs/derive-mmio.git", rev = "0806ce10b132ca15c6d9122a2d15a6e146b01520"}
bitbybit = "1.3"
arbitrary-int = "1.3"
nb = "1"
libm = "0.2"
critical-section = "1"
thiserror = { version = "2", default-features = false }
fugit = "0.3"
embedded-hal-async = "1"
embedded-hal-nb = "1"
embedded-io = "0.6"
embedded-io-async = "0.6"
embassy-sync = "0.6"
raw-slice = { git = "https://egit.irs.uni-stuttgart.de/rust/raw-slice.git" }
[features]
default = ["1-waker"]
1-waker = []
2-wakers = []
4-wakers = []
8-wakers = []
16-wakers = []
32-wakers = []
[dev-dependencies]
approx = "0.5"

View File

@ -1,372 +0,0 @@
#![no_std]
use core::convert::Infallible;
use registers::{Fcr, Ier, Lcr, RxFifoTrigger, StopBits, WordLen};
pub mod registers;
pub mod tx;
pub use tx::*;
pub mod tx_async;
pub use tx_async::*;
pub mod rx;
pub use rx::*;
pub const FIFO_DEPTH: usize = 16;
pub const DEFAULT_RX_TRIGGER_LEVEL: RxFifoTrigger = RxFifoTrigger::EightBytes;
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct ClkConfig {
pub div: u16,
}
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
#[error("divisor is zero")]
pub struct DivisorZeroError;
/// Calculate the error rate of the baudrate with the given clock frequency, baudrate and
/// divisor as a floating point value between 0.0 and 1.0.
#[inline]
pub fn calculate_error_rate_from_div(
clk_in: fugit::HertzU32,
baudrate: u32,
div: u16,
) -> Result<f32, DivisorZeroError> {
if baudrate == 0 || div == 0 {
return Err(DivisorZeroError);
}
let actual = (clk_in.raw() as f32) / (16.0 * div as f32);
Ok(libm::fabsf(actual - baudrate as f32) / baudrate as f32)
}
/// If this error occurs, the calculated baudrate divisor is too large, either because the
/// used clock is too large, or the baudrate is too slow for the used clock frequency.
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
#[error("divisor too large")]
pub enum ClkConfigError {
DivisorTooLargeError(u32),
DivisorZero(#[from] DivisorZeroError),
}
impl ClkConfig {
pub fn new(div: u16) -> Self {
Self { div }
}
#[inline(always)]
pub fn div_msb(&self) -> u8 {
(self.div >> 8) as u8
}
#[inline(always)]
pub fn div_lsb(&self) -> u8 {
self.div as u8
}
/// This function calculates the required divisor values for a given input clock and baudrate
/// as well as an baud error rate.
#[inline]
pub fn new_autocalc_with_error(
clk_in: fugit::HertzU32,
baudrate: u32,
) -> Result<(Self, f32), ClkConfigError> {
let cfg = Self::new_autocalc(clk_in, baudrate)?;
Ok((cfg, cfg.calculate_error_rate(clk_in, baudrate)?))
}
/// This function calculates the required divisor values for a given input clock and baudrate.
///
/// The function will not calculate the error rate. You can use [Self::calculate_error_rate]
/// to check the error rate, or use the [Self::new_autocalc_with_error] function to get both
/// the clock config and its baud error.
#[inline]
pub fn new_autocalc(clk_in: fugit::HertzU32, baudrate: u32) -> Result<Self, ClkConfigError> {
let div = Self::calc_div_with_integer_div(clk_in, baudrate)?;
if div > u16::MAX as u32 {
return Err(ClkConfigError::DivisorTooLargeError(div));
}
Ok(Self { div: div as u16 })
}
/// Calculate the error rate of the baudrate with the given clock frequency, baudrate and the
/// current clock config as a floating point value between 0.0 and 1.0.
#[inline]
pub fn calculate_error_rate(
&self,
clk_in: fugit::HertzU32,
baudrate: u32,
) -> Result<f32, DivisorZeroError> {
calculate_error_rate_from_div(clk_in, baudrate, self.div)
}
#[inline(always)]
pub const fn calc_div_with_integer_div(
clk_in: fugit::HertzU32,
baudrate: u32,
) -> Result<u32, DivisorZeroError> {
if baudrate == 0 {
return Err(DivisorZeroError);
}
// Rounding integer division, by adding half the divisor to the dividend.
Ok((clk_in.raw() + (8 * baudrate)) / (16 * baudrate))
}
}
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub enum Parity {
#[default]
None,
Odd,
Even,
}
pub struct AxiUart16550 {
rx: Rx,
tx: Tx,
config: UartConfig,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct UartConfig {
clk: ClkConfig,
word_len: WordLen,
parity: Parity,
stop_bits: StopBits,
}
impl UartConfig {
pub const fn new_with_clk_config(clk: ClkConfig) -> Self {
Self {
clk,
word_len: WordLen::Eight,
parity: Parity::None,
stop_bits: StopBits::One,
}
}
pub const fn new(
clk: ClkConfig,
word_len: WordLen,
parity: Parity,
stop_bits: StopBits,
) -> Self {
Self {
clk,
word_len,
parity,
stop_bits,
}
}
}
impl AxiUart16550 {
/// Create a new AXI UART16550 peripheral driver.
///
/// # Safety
///
/// - The `base_addr` must be a valid memory-mapped register address of an AXI UART 16550
/// peripheral.
/// - Dereferencing an invalid or misaligned address results in **undefined behavior**.
/// - The caller must ensure that no other code concurrently modifies the same peripheral registers
/// in an unsynchronized manner to prevent data races.
/// - This function does not enforce uniqueness of driver instances. Creating multiple instances
/// with the same `base_addr` can lead to unintended behavior if not externally synchronized.
/// - The driver performs **volatile** reads and writes to the provided address.
pub unsafe fn new(base_addr: u32, config: UartConfig) -> Self {
let mut regs = unsafe { registers::AxiUart16550::new_mmio_at(base_addr as usize) };
// This unlocks the divisor config registers.
regs.write_lcr(Lcr::new_for_divisor_access());
regs.write_fifo_or_dll(config.clk.div_lsb() as u32);
regs.write_ier_or_dlm(config.clk.div_msb() as u32);
// Configure all other settings and reset the div acess latch. This is important
// for accessing IER and the FIFO control register again.
regs.write_lcr(
Lcr::builder()
.with_div_access_latch(false)
.with_set_break(false)
.with_stick_parity(false)
.with_even_parity(config.parity == Parity::Even)
.with_parity_enable(config.parity != Parity::None)
.with_stop_bits(config.stop_bits)
.with_word_len(config.word_len)
.build(),
);
// Disable all interrupts.
regs.write_ier_or_dlm(Ier::new_with_raw_value(0x0).raw_value());
// Enable FIFO, configure 8 bytes FIFO trigger by default.
regs.write_iir_or_fcr(
Fcr::builder()
.with_rx_fifo_trigger(DEFAULT_RX_TRIGGER_LEVEL)
.with_dma_mode_sel(false)
.with_reset_tx_fifo(true)
.with_reset_rx_fifo(true)
.with_fifo_enable(true)
.build()
.raw_value(),
);
Self {
rx: Rx::new(unsafe { regs.clone() }),
tx: Tx::new(regs),
config,
}
}
#[inline(always)]
pub const fn regs(&mut self) -> &mut registers::MmioAxiUart16550<'static> {
&mut self.rx.regs
}
#[inline(always)]
pub const fn config(&mut self) -> &UartConfig {
&self.config
}
/// Write into the UART Lite.
///
/// Returns [nb::Error::WouldBlock] if the TX FIFO is full.
#[inline]
pub fn write_fifo(&mut self, data: u8) -> nb::Result<(), Infallible> {
self.tx.write_fifo(data)
}
// TODO: Make this non-mut as soon as pure reads are available.
#[inline(always)]
pub fn thr_empty(&mut self) -> bool {
self.tx.thr_empty()
}
#[inline(always)]
pub fn tx_empty(&mut self) -> bool {
self.tx.tx_empty()
}
#[inline(always)]
pub fn rx_has_data(&mut self) -> bool {
self.rx.has_data()
}
/// Write into the FIFO without checking the FIFO fill status.
///
/// This can be useful to completely fill the FIFO if it is known to be empty.
#[inline(always)]
pub fn write_fifo_unchecked(&mut self, data: u8) {
self.tx.write_fifo_unchecked(data);
}
#[inline]
pub fn read_fifo(&mut self) -> nb::Result<u8, Infallible> {
self.rx.read_fifo()
}
#[inline(always)]
pub fn read_fifo_unchecked(&mut self) -> u8 {
self.rx.read_fifo_unchecked()
}
#[inline(always)]
pub fn enable_interrupts(&mut self, ier: Ier) {
self.regs().write_ier_or_dlm(ier.raw_value());
}
pub fn split(self) -> (Tx, Rx) {
(self.tx, self.rx)
}
}
impl embedded_hal_nb::serial::ErrorType for AxiUart16550 {
type Error = Infallible;
}
impl embedded_hal_nb::serial::Write for AxiUart16550 {
#[inline]
fn write(&mut self, word: u8) -> nb::Result<(), Self::Error> {
self.tx.write(word)
}
#[inline]
fn flush(&mut self) -> nb::Result<(), Self::Error> {
self.tx.flush()
}
}
impl embedded_hal_nb::serial::Read for AxiUart16550 {
#[inline]
fn read(&mut self) -> nb::Result<u8, Self::Error> {
self.rx.read()
}
}
impl embedded_io::ErrorType for AxiUart16550 {
type Error = Infallible;
}
impl embedded_io::Read for AxiUart16550 {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
self.rx.read(buf)
}
}
impl embedded_io::Write for AxiUart16550 {
fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
self.tx.write(buf)
}
fn flush(&mut self) -> Result<(), Self::Error> {
self.tx.flush()
}
}
#[cfg(test)]
mod tests {
use crate::ClkConfigError;
//extern crate std;
use super::{DivisorZeroError, calculate_error_rate_from_div};
use super::ClkConfig;
use approx::abs_diff_eq;
use fugit::RateExtU32;
#[test]
fn test_clk_calc_example_0() {
let clk_cfg = ClkConfig::new_autocalc(100.MHz(), 56000).unwrap();
// For some reason, the Xilinx example rounds up here..
assert_eq!(clk_cfg.div, 0x0070);
assert_eq!(clk_cfg.div_msb(), 0x00);
assert_eq!(clk_cfg.div_lsb(), 0x70);
let error = clk_cfg.calculate_error_rate(100.MHz(), 56000).unwrap();
assert!(abs_diff_eq!(error, 0.0035, epsilon = 0.001));
let (clk_cfg_checked, error_checked) =
ClkConfig::new_autocalc_with_error(100.MHz(), 56000).unwrap();
assert_eq!(clk_cfg, clk_cfg_checked);
assert!(abs_diff_eq!(error, error_checked, epsilon = 0.001));
let error_calc = calculate_error_rate_from_div(100.MHz(), 56000, clk_cfg.div).unwrap();
assert!(abs_diff_eq!(error, error_calc, epsilon = 0.001));
}
#[test]
fn test_clk_calc_example_1() {
let clk_cfg = ClkConfig::new_autocalc(1843200.Hz(), 56000).unwrap();
assert_eq!(clk_cfg.div, 0x0002);
assert_eq!(clk_cfg.div_msb(), 0x00);
assert_eq!(clk_cfg.div_lsb(), 0x02);
}
#[test]
fn test_invalid_baud() {
let clk_cfg = ClkConfig::new_autocalc_with_error(100.MHz(), 0);
assert_eq!(clk_cfg, Err(ClkConfigError::DivisorZero(DivisorZeroError)));
}
#[test]
fn test_invalid_div() {
let error = calculate_error_rate_from_div(100.MHz(), 115200, 0);
assert_eq!(error.unwrap_err(), DivisorZeroError);
let error = calculate_error_rate_from_div(100.MHz(), 0, 0);
assert_eq!(error.unwrap_err(), DivisorZeroError);
let error = calculate_error_rate_from_div(100.MHz(), 0, 16);
assert_eq!(error.unwrap_err(), DivisorZeroError);
}
}

View File

@ -1,177 +0,0 @@
use arbitrary_int::u2;
/// Transmitter Holding Register.
#[bitbybit::bitfield(u32)]
pub struct Fifo {
#[bits(0..=7, rw)]
data: u8,
}
#[bitbybit::bitfield(u32)]
pub struct Ier {
/// Enable Modem Status Interrupt
#[bit(3, rw)]
modem_status: bool,
/// Enable Receiver Line Status Interrupt
#[bit(2, rw)]
line_status: bool,
/// Enable Transmitter Holding Register Empty Interrupt
#[bit(1, rw)]
thr_empty: bool,
/// Enable Received Data Available Interrupt
#[bit(0, rw)]
rx_avl: bool,
}
/// Interrupt identification ID
#[bitbybit::bitenum(u3, exhaustive = false)]
#[derive(Debug, PartialEq, Eq)]
pub enum IntId2 {
ReceiverLineStatus = 0b011,
RxDataAvailable = 0b010,
CharTimeout = 0b110,
ThrEmpty = 0b001,
ModemStatus = 0b000,
}
/// Interrupt Identification Register
#[bitbybit::bitfield(u32)]
pub struct Iir {
/// 16550 mode enabled?
#[bits(6..=7, r)]
fifo_enabled: u2,
#[bits(1..=3, r)]
int_id: Option<IntId2>,
/// Interrupt Pending, active low.
#[bit(0, r)]
int_pend_n: bool,
}
#[bitbybit::bitenum(u2, exhaustive = true)]
pub enum RxFifoTrigger {
OneByte = 0b00,
FourBytes = 0b01,
EightBytes = 0b10,
FourteenBytes = 0b11,
}
impl RxFifoTrigger {
pub const fn as_num(self) -> u32 {
match self {
RxFifoTrigger::OneByte => 1,
RxFifoTrigger::FourBytes => 4,
RxFifoTrigger::EightBytes => 8,
RxFifoTrigger::FourteenBytes => 14,
}
}
}
/// FIFO Control Register
#[bitbybit::bitfield(u32, default = 0x0)]
pub struct Fcr {
#[bits(4..=5, rw)]
rx_fifo_trigger: RxFifoTrigger,
#[bit(3, rw)]
dma_mode_sel: bool,
#[bit(2, rw)]
reset_tx_fifo: bool,
#[bit(1, rw)]
reset_rx_fifo: bool,
#[bit(0, rw)]
fifo_enable: bool,
}
#[bitbybit::bitenum(u2, exhaustive = true)]
#[derive(Default, Debug, PartialEq, Eq)]
pub enum WordLen {
Five = 0b00,
Six = 0b01,
Seven = 0b10,
#[default]
Eight = 0b11,
}
#[bitbybit::bitenum(u1, exhaustive = true)]
#[derive(Default, Debug, PartialEq, Eq)]
pub enum StopBits {
#[default]
One = 0b0,
/// 1.5 for 5 bits/char, 2 otherwise.
OnePointFiveOrTwo = 0b1,
}
/// Line control register
#[bitbybit::bitfield(u32, default = 0x00)]
pub struct Lcr {
#[bit(7, rw)]
div_access_latch: bool,
#[bit(6, rw)]
set_break: bool,
#[bit(5, rw)]
stick_parity: bool,
#[bit(4, rw)]
even_parity: bool,
#[bit(3, rw)]
parity_enable: bool,
/// 0: 1 stop bit, 1: 2 stop bits or 1.5 if 5 bits/char selected
#[bit(2, rw)]
stop_bits: StopBits,
#[bits(0..=1, rw)]
word_len: WordLen,
}
impl Lcr {
pub fn new_for_divisor_access() -> Self {
Self::new_with_raw_value(0x80)
}
}
/// Line Status Register
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct Lsr {
#[bit(7, rw)]
error_in_rx_fifo: bool,
/// In the FIFO mode, this is set to 1 when the TX FIFO and shift register are both empty.
#[bit(6, rw)]
tx_empty: bool,
/// In the FIFO mode, this is set to 1 when the TX FIFO is empty. There might still be a byte
/// in the TX shift register.
#[bit(5, rw)]
thr_empty: bool,
#[bit(4, rw)]
break_interrupt: bool,
#[bit(3, rw)]
framing_error: bool,
#[bit(2, rw)]
parity_error: bool,
#[bit(1, rw)]
overrun_error: bool,
#[bit(0, rw)]
data_ready: bool,
}
#[derive(derive_mmio::Mmio)]
#[repr(C)]
pub struct AxiUart16550 {
_reserved: [u32; 0x400],
/// FIFO register for LCR[7] == 0 or Divisor Latch (LSB) register for LCR[7] == 1
fifo_or_dll: u32,
/// Interrupt Enable Register for LCR[7] == 0 or Divisor Latch (MSB) register for LCR[7] == 1
ier_or_dlm: u32,
/// Interrupt Identification Register or FIFO Control Register. FCR is not included in 16450
/// mode. If LCR[7] == 1, this register will be the read-only FIFO control register.
/// If LCR[7] == 0, this register will be the read-only interrupt IIR register or the
/// write-only FIFO control register.
iir_or_fcr: u32,
/// Line Control Register
lcr: Lcr,
/// Modem Control Register
mcr: u32,
/// Line Status Register
lsr: Lsr,
/// Modem Status Register
msr: u32,
/// Scratch Register
scr: u32,
}

View File

@ -1,223 +0,0 @@
use core::convert::Infallible;
use crate::{
DEFAULT_RX_TRIGGER_LEVEL,
registers::{self, Fcr, Ier, Iir, IntId2, Lsr},
};
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
pub struct RxErrors {
parity: bool,
frame: bool,
overrun: bool,
}
impl RxErrors {
pub const fn new() -> Self {
Self {
parity: false,
frame: false,
overrun: false,
}
}
pub const fn parity(&self) -> bool {
self.parity
}
pub const fn frame(&self) -> bool {
self.frame
}
pub const fn overrun(&self) -> bool {
self.overrun
}
pub const fn has_errors(&self) -> bool {
self.parity || self.frame || self.overrun
}
}
pub struct Rx {
/// Internal MMIO register structure.
pub(crate) regs: registers::MmioAxiUart16550<'static>,
pub(crate) errors: Option<RxErrors>,
}
impl Rx {
/// Steal the RX part of the UART 16550.
///
/// You should only use this if you can not use the regular [super::AxiUart16550] constructor
/// and the [super::AxiUart16550::split] method.
///
/// This function assumes that the setup of the UART was already done.
/// It can be used to create an RX handle inside an interrupt handler without having to use
/// a [critical_section::Mutex] if the user can guarantee that the RX handle will only be
/// used by the interrupt handler or only interrupt specific API will be used.
///
/// # Safety
///
/// The same safey rules specified in [super::AxiUart16550::new] apply.
pub const unsafe fn steal(base_addr: usize) -> Self {
Self {
regs: unsafe { registers::AxiUart16550::new_mmio_at(base_addr) },
errors: None,
}
}
pub(crate) fn new(regs: registers::MmioAxiUart16550<'static>) -> Self {
Self { regs, errors: None }
}
#[inline]
pub fn read_fifo(&mut self) -> nb::Result<u8, Infallible> {
let status_reg = self.regs.read_lsr();
if !status_reg.data_ready() {
return Err(nb::Error::WouldBlock);
}
if status_reg.error_in_rx_fifo() {
self.errors = Some(Self::lsr_to_errors(status_reg));
}
Ok(self.read_fifo_unchecked())
}
#[inline(always)]
pub fn read_fifo_unchecked(&mut self) -> u8 {
self.regs.read_fifo_or_dll() as u8
}
/// Start interrupt driven reception.
///
/// This function resets the FIFO with [Self::reset_fifo] and then enables the interrupts
/// with [Self::enable_interrupt].
/// After this, you only need to call [Self::on_interrupt_receiver_line_status] and
/// [Self::on_interrupt_data_available_or_char_timeout] in your interrupt handler depending
/// on the value of the IIR register to continously receive data.
#[inline]
pub fn start_interrupt_driven_reception(&mut self) {
self.reset_fifo();
self.enable_interrupt();
}
#[inline]
pub fn enable_interrupt(&mut self) {
self.regs.modify_ier_or_dlm(|val| {
let mut ier = Ier::new_with_raw_value(val);
ier.set_rx_avl(true);
ier.set_line_status(true);
ier.raw_value()
});
}
#[inline]
pub fn disable_interrupt(&mut self) {
self.regs.modify_ier_or_dlm(|val| {
let mut ier = Ier::new_with_raw_value(val);
ier.set_rx_avl(false);
ier.set_line_status(false);
ier.raw_value()
});
}
#[inline]
pub fn reset_fifo(&mut self) {
self.regs.write_iir_or_fcr(
Fcr::builder()
.with_rx_fifo_trigger(DEFAULT_RX_TRIGGER_LEVEL)
.with_dma_mode_sel(false)
.with_reset_tx_fifo(false)
.with_reset_rx_fifo(true)
.with_fifo_enable(true)
.build()
.raw_value(),
);
}
#[inline(always)]
pub fn has_data(&mut self) -> bool {
self.regs.read_lsr().data_ready()
}
#[inline]
pub fn read_iir(&mut self) -> Iir {
Iir::new_with_raw_value(self.regs.read_iir_or_fcr())
}
#[inline]
pub fn on_interrupt_receiver_line_status(&mut self, _iir: Iir) -> RxErrors {
let lsr = self.regs.read_lsr();
Self::lsr_to_errors(lsr)
}
#[inline]
pub fn on_interrupt_data_available_or_char_timeout(
&mut self,
int_id2: IntId2,
buf: &mut [u8; 16],
) -> usize {
let mut read = 0;
// It is guaranteed that we can read the FIFO trigger level.
if int_id2 == IntId2::RxDataAvailable {
let trigger_level = Fcr::new_with_raw_value(self.regs.read_iir_or_fcr());
(0..trigger_level.rx_fifo_trigger().as_num() as usize).for_each(|i| {
buf[i] = self.read_fifo_unchecked();
read += 1;
});
}
// Read the rest of the FIFO.
while self.has_data() && read < 16 {
buf[read] = self.read_fifo_unchecked();
read += 1;
}
read
}
pub fn lsr_to_errors(status_reg: Lsr) -> RxErrors {
let mut errors = RxErrors::new();
if status_reg.framing_error() {
errors.frame = true;
}
if status_reg.parity_error() {
errors.parity = true;
}
if status_reg.overrun_error() {
errors.overrun = true;
}
errors
}
}
impl embedded_hal_nb::serial::ErrorType for Rx {
type Error = Infallible;
}
impl embedded_hal_nb::serial::Read for Rx {
#[inline]
fn read(&mut self) -> nb::Result<u8, Self::Error> {
self.read_fifo()
}
}
impl embedded_io::ErrorType for Rx {
type Error = Infallible;
}
impl embedded_io::Read for Rx {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
if buf.is_empty() {
return Ok(0);
}
while !self.has_data() {}
let mut read = 0;
for byte in buf.iter_mut() {
match self.read_fifo() {
Ok(data) => {
*byte = data;
read += 1;
}
Err(nb::Error::WouldBlock) => break,
}
}
Ok(read)
}
}

View File

@ -1,152 +0,0 @@
use core::convert::Infallible;
use crate::{
DEFAULT_RX_TRIGGER_LEVEL,
registers::{self, Fcr, Ier},
};
pub struct Tx {
/// Internal MMIO register structure.
pub(crate) regs: registers::MmioAxiUart16550<'static>,
}
impl Tx {
/// Steal the TX part of the UART 16550.
///
/// You should only use this if you can not use the regular [super::AxiUart16550] constructor
/// and the [super::AxiUart16550::split] method.
///
/// This function assumes that the setup of the UART was already done.
/// It can be used to create a TX handle inside an interrupt handler without having to use
/// a [critical_section::Mutex] if the user can guarantee that the TX handle will only be
/// used by the interrupt handler, or only interrupt specific API will be used.
///
/// # Safety
///
/// The same safey rules specified in [super::AxiUart16550::new] apply.
pub const unsafe fn steal(base_addr: usize) -> Self {
Self {
regs: unsafe { registers::AxiUart16550::new_mmio_at(base_addr) },
}
}
pub(crate) fn new(regs: registers::MmioAxiUart16550<'static>) -> Self {
Self { regs }
}
#[inline]
pub fn write_fifo(&mut self, data: u8) -> nb::Result<(), Infallible> {
if !self.thr_empty() {
return Err(nb::Error::WouldBlock);
}
self.write_fifo_unchecked(data);
Ok(())
}
#[inline]
pub fn enable_interrupt(&mut self) {
self.regs.modify_ier_or_dlm(|val| {
let mut ier = Ier::new_with_raw_value(val);
ier.set_thr_empty(true);
ier.raw_value()
});
}
#[inline]
pub fn disable_interrupt(&mut self) {
self.regs.modify_ier_or_dlm(|val| {
let mut ier = Ier::new_with_raw_value(val);
ier.set_thr_empty(false);
ier.raw_value()
});
}
/// Write into the FIFO without checking the FIFO fill status.
///
/// This can be useful to completely fill the FIFO if it is known to be empty.
#[inline(always)]
pub fn write_fifo_unchecked(&mut self, data: u8) {
self.regs.write_fifo_or_dll(data as u32);
}
// TODO: Make this non-mut as soon as pure reads are available.
#[inline(always)]
pub fn thr_empty(&mut self) -> bool {
self.regs.read_lsr().thr_empty()
}
#[inline(always)]
pub fn tx_empty(&mut self) -> bool {
self.regs.read_lsr().tx_empty()
}
#[inline]
pub fn reset_fifo(&mut self) {
self.regs.write_iir_or_fcr(
Fcr::builder()
.with_rx_fifo_trigger(DEFAULT_RX_TRIGGER_LEVEL)
.with_dma_mode_sel(false)
.with_reset_tx_fifo(true)
.with_reset_rx_fifo(false)
.with_fifo_enable(true)
.build()
.raw_value(),
);
}
#[inline]
pub fn on_interrupt_thr_empty(&mut self, next_write_chunk: &[u8]) -> usize {
if next_write_chunk.is_empty() {
return 0;
}
let mut written = 0;
while self.thr_empty() && written < next_write_chunk.len() {
self.write_fifo_unchecked(next_write_chunk[written]);
written += 1;
}
written
}
}
impl embedded_hal_nb::serial::ErrorType for Tx {
type Error = Infallible;
}
impl embedded_hal_nb::serial::Write for Tx {
#[inline]
fn write(&mut self, word: u8) -> nb::Result<(), Self::Error> {
self.write_fifo(word)
}
#[inline]
fn flush(&mut self) -> nb::Result<(), Self::Error> {
while !self.tx_empty() {}
Ok(())
}
}
impl embedded_io::ErrorType for Tx {
type Error = Infallible;
}
impl embedded_io::Write for Tx {
fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
if buf.is_empty() {
return Ok(0);
}
while !self.thr_empty() {}
let mut written = 0;
for &byte in buf.iter() {
match self.write_fifo(byte) {
Ok(_) => written += 1,
Err(nb::Error::WouldBlock) => break,
}
}
Ok(written)
}
fn flush(&mut self) -> Result<(), Self::Error> {
while !self.tx_empty() {}
Ok(())
}
}

View File

@ -1,259 +0,0 @@
//! # Asynchronous TX support.
//!
//! This module provides support for asynchronous non-blocking TX transfers.
//!
//! It provides a static number of async wakers to allow a configurable amount of pollable
//! [TxFuture]s. Each UARTLite [Tx] instance which performs asynchronous TX operations needs
//! to be to explicitely assigned a waker when creating an awaitable [TxAsync] structure
//! as well as when calling the [on_interrupt_tx] handler.
//!
//! The maximum number of available wakers is configured via the waker feature flags:
//!
//! - `1-waker`
//! - `2-wakers`
//! - `4-wakers`
//! - `8-wakers`
//! - `16-wakers`
//! - `32-wakers`
use core::{cell::RefCell, convert::Infallible, sync::atomic::AtomicBool};
use critical_section::Mutex;
use embassy_sync::waitqueue::AtomicWaker;
use embedded_hal_async::delay::DelayNs;
use raw_slice::RawBufSlice;
use crate::{
FIFO_DEPTH, Tx,
registers::{self, Ier},
};
#[cfg(feature = "1-waker")]
pub const NUM_WAKERS: usize = 1;
#[cfg(feature = "2-wakers")]
pub const NUM_WAKERS: usize = 2;
#[cfg(feature = "4-wakers")]
pub const NUM_WAKERS: usize = 4;
#[cfg(feature = "8-wakers")]
pub const NUM_WAKERS: usize = 8;
#[cfg(feature = "16-wakers")]
pub const NUM_WAKERS: usize = 16;
#[cfg(feature = "32-wakers")]
pub const NUM_WAKERS: usize = 32;
static UART_TX_WAKERS: [AtomicWaker; NUM_WAKERS] = [const { AtomicWaker::new() }; NUM_WAKERS];
static TX_CONTEXTS: [Mutex<RefCell<TxContext>>; NUM_WAKERS] =
[const { Mutex::new(RefCell::new(TxContext::new())) }; NUM_WAKERS];
// Completion flag. Kept outside of the context structure as an atomic to avoid
// critical section.
static TX_DONE: [AtomicBool; NUM_WAKERS] = [const { AtomicBool::new(false) }; NUM_WAKERS];
#[derive(Debug, thiserror::Error)]
#[error("invalid waker slot index: {0}")]
pub struct InvalidWakerIndex(pub usize);
/// This is a generic interrupt handler to handle asynchronous UART TX operations for a given
/// UART peripheral.
///
/// The user has to call this once in the interrupt handler responsible if the interrupt was
/// triggered by the UARTLite. The relevant [Tx] handle of the UARTLite and the waker slot used
/// for it must be passed as well. [Tx::steal] can be used to create the required handle.
pub fn on_interrupt_tx(tx: &mut Tx, waker_slot: usize) {
if waker_slot >= NUM_WAKERS {
return;
}
let status = tx.regs.read_lsr();
let ier = Ier::new_with_raw_value(tx.regs.read_ier_or_dlm());
// Interrupt are not even enabled.
if !ier.thr_empty() {
return;
}
let mut context = critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[waker_slot].borrow(cs);
*context_ref.borrow()
});
// No transfer active.
if context.slice.is_null() {
return;
}
let slice_len = context.slice.len().unwrap();
// We have to use the THRE instead of the TEMT status flag here, because the interrupt
// is configured to trigger on the THRE flag and the UART might still be busy shifting the
// last byte out.
if (context.progress >= slice_len && status.thr_empty()) || slice_len == 0 {
// Write back updated context structure.
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[waker_slot].borrow(cs);
*context_ref.borrow_mut() = context;
});
// Transfer is done.
TX_DONE[waker_slot].store(true, core::sync::atomic::Ordering::Relaxed);
tx.disable_interrupt();
UART_TX_WAKERS[waker_slot].wake();
return;
}
// Safety: We documented that the user provided slice must outlive the future, so we convert
// the raw pointer back to the slice here.
let slice = unsafe { context.slice.get() }.expect("slice is invalid");
while context.progress < slice_len {
match tx.write_fifo(slice[context.progress]) {
Ok(_) => context.progress += 1,
Err(nb::Error::WouldBlock) => break,
}
}
// Write back updated context structure.
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[waker_slot].borrow(cs);
*context_ref.borrow_mut() = context;
});
}
#[derive(Debug, Copy, Clone)]
pub struct TxContext {
progress: usize,
slice: RawBufSlice,
}
#[allow(clippy::new_without_default)]
impl TxContext {
pub const fn new() -> Self {
Self {
progress: 0,
slice: RawBufSlice::new_nulled(),
}
}
}
pub struct TxFuture {
waker_idx: usize,
reg_block: registers::MmioAxiUart16550<'static>,
}
impl TxFuture {
/// Create a new TX future which can be used for asynchronous TX operations.
///
/// # Safety
///
/// This function stores the raw pointer of the passed data slice. The user MUST ensure
/// that the slice outlives the data structure.
pub unsafe fn new(
tx: &mut Tx,
waker_idx: usize,
data: &[u8],
) -> Result<Self, InvalidWakerIndex> {
TX_DONE[waker_idx].store(false, core::sync::atomic::Ordering::Relaxed);
tx.disable_interrupt();
tx.reset_fifo();
let init_fill_count = core::cmp::min(data.len(), FIFO_DEPTH);
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[waker_idx].borrow(cs);
let mut context = context_ref.borrow_mut();
unsafe {
context.slice.set(data);
}
context.progress = init_fill_count;
});
// We fill the FIFO with initial data.
for data in data.iter().take(init_fill_count) {
tx.write_fifo_unchecked(*data);
}
tx.enable_interrupt();
Ok(Self {
waker_idx,
reg_block: unsafe { tx.regs.clone() },
})
}
}
impl Future for TxFuture {
type Output = usize;
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
UART_TX_WAKERS[self.waker_idx].register(cx.waker());
if TX_DONE[self.waker_idx].swap(false, core::sync::atomic::Ordering::Relaxed) {
let progress = critical_section::with(|cs| {
let mut ctx = TX_CONTEXTS[self.waker_idx].borrow(cs).borrow_mut();
ctx.slice.set_null();
ctx.progress
});
return core::task::Poll::Ready(progress);
}
core::task::Poll::Pending
}
}
impl Drop for TxFuture {
fn drop(&mut self) {
let mut tx = Tx::new(unsafe { self.reg_block.clone() });
tx.disable_interrupt();
}
}
pub struct TxAsync<D: DelayNs> {
tx: Tx,
waker_idx: usize,
delay: D,
}
impl<D: DelayNs> TxAsync<D> {
/// Create a new asynchronous TX structure.
///
/// The delay function is a [DelayNs] provider which is used to allow flushing the
/// device properly. This is because even when a write finished, the UART might still
/// be busy shifting the last byte out.
pub fn new(tx: Tx, waker_idx: usize, delay: D) -> Result<Self, InvalidWakerIndex> {
if waker_idx >= NUM_WAKERS {
return Err(InvalidWakerIndex(waker_idx));
}
Ok(Self {
tx,
waker_idx,
delay,
})
}
/// Write a buffer asynchronously.
///
/// This implementation is not side effect free, and a started future might have already
/// written part of the passed buffer.
pub async fn write(&mut self, buf: &[u8]) -> usize {
if buf.is_empty() {
return 0;
}
let fut = unsafe { TxFuture::new(&mut self.tx, self.waker_idx, buf).unwrap() };
fut.await
}
/// Flush this output stream, ensuring that all intermediately buffered contents reach their destination.
pub async fn flush(&mut self) {
while !self.tx.tx_empty() {
self.delay.delay_us(10).await;
}
}
pub fn release(self) -> Tx {
self.tx
}
}
impl<D: DelayNs> embedded_io::ErrorType for TxAsync<D> {
type Error = Infallible;
}
impl<D: DelayNs> embedded_io_async::Write for TxAsync<D> {
/// Write a buffer asynchronously.
///
/// This implementation is not side effect free, and a started future might have already
/// written part of the passed buffer.
async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
Ok(self.write(buf).await)
}
/// Flush this output stream, ensuring that all intermediately buffered contents reach their destination.
async fn flush(&mut self) -> Result<(), Self::Error> {
self.flush().await;
Ok(())
}
}

View File

@ -1 +0,0 @@
/target

View File

@ -1,27 +0,0 @@
[package]
name = "axi-uartlite"
version = "0.1.0"
description = "LogiCORE AXI UART Lite v2.0 driver"
edition = "2024"
[dependencies]
derive-mmio = { git = "https://github.com/knurling-rs/derive-mmio.git", rev = "0806ce10b132ca15c6d9122a2d15a6e146b01520"}
bitbybit = "1.3"
arbitrary-int = "1.3"
nb = "1"
embedded-hal-nb = "1"
embedded-io = "0.6"
embedded-io-async = "0.6"
critical-section = "1"
thiserror = { version = "2", default-features = false }
embassy-sync = "0.6"
raw-slice = { git = "https://egit.irs.uni-stuttgart.de/rust/raw-slice.git" }
[features]
default = ["1-waker"]
1-waker = []
2-wakers = []
4-wakers = []
8-wakers = []
16-wakers = []
32-wakers = []

View File

@ -1,264 +0,0 @@
//! # AXI UART Lite v2.0 driver
//!
//! This is a native Rust driver for the AMD AXI UART Lite v2.0 IP core.
//!
//! # Features
//!
//! If asynchronous TX operations are used, the number of wakers which defaults to 1 waker can
//! also be configured. The [tx_async] module provides more details on the meaning of this number.
//!
//! - `1-waker` which is also a `default` feature
//! - `2-wakers`
//! - `4-wakers`
//! - `8-wakers`
//! - `16-wakers`
//! - `32-wakers`
#![no_std]
use core::convert::Infallible;
use registers::Control;
pub mod registers;
pub mod tx;
pub use tx::*;
pub mod rx;
pub use rx::*;
pub mod tx_async;
pub use tx_async::*;
pub const FIFO_DEPTH: usize = 16;
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
pub struct RxErrorsCounted {
parity: u8,
frame: u8,
overrun: u8,
}
impl RxErrorsCounted {
pub const fn new() -> Self {
Self {
parity: 0,
frame: 0,
overrun: 0,
}
}
pub const fn parity(&self) -> u8 {
self.parity
}
pub const fn frame(&self) -> u8 {
self.frame
}
pub const fn overrun(&self) -> u8 {
self.overrun
}
pub fn has_errors(&self) -> bool {
self.parity > 0 || self.frame > 0 || self.overrun > 0
}
}
pub struct AxiUartlite {
rx: Rx,
tx: Tx,
errors: RxErrorsCounted,
}
impl AxiUartlite {
/// Create a new AXI UART Lite peripheral driver.
///
/// # Safety
///
/// - The `base_addr` must be a valid memory-mapped register address of an AXI UART Lite peripheral.
/// - Dereferencing an invalid or misaligned address results in **undefined behavior**.
/// - The caller must ensure that no other code concurrently modifies the same peripheral registers
/// in an unsynchronized manner to prevent data races.
/// - This function does not enforce uniqueness of driver instances. Creating multiple instances
/// with the same `base_addr` can lead to unintended behavior if not externally synchronized.
/// - The driver performs **volatile** reads and writes to the provided address.
pub const unsafe fn new(base_addr: u32) -> Self {
let regs = unsafe { registers::AxiUartlite::new_mmio_at(base_addr as usize) };
Self {
rx: Rx {
regs: unsafe { regs.clone() },
errors: None,
},
tx: Tx { regs, errors: None },
errors: RxErrorsCounted::new(),
}
}
#[inline(always)]
pub const fn regs(&mut self) -> &mut registers::MmioAxiUartlite<'static> {
&mut self.tx.regs
}
/// Write into the UART Lite.
///
/// Returns [nb::Error::WouldBlock] if the TX FIFO is full.
#[inline]
pub fn write_fifo(&mut self, data: u8) -> nb::Result<(), Infallible> {
self.tx.write_fifo(data).unwrap();
if let Some(errors) = self.tx.errors {
self.handle_status_reg_errors(errors);
}
Ok(())
}
/// Write into the FIFO without checking the FIFO fill status.
///
/// This can be useful to completely fill the FIFO if it is known to be empty.
#[inline(always)]
pub fn write_fifo_unchecked(&mut self, data: u8) {
self.tx.write_fifo_unchecked(data);
}
#[inline]
pub fn read_fifo(&mut self) -> nb::Result<u8, Infallible> {
let val = self.rx.read_fifo().unwrap();
if let Some(errors) = self.rx.errors {
self.handle_status_reg_errors(errors);
}
Ok(val)
}
#[inline(always)]
pub fn read_fifo_unchecked(&mut self) -> u8 {
self.rx.read_fifo_unchecked()
}
// TODO: Make this non-mut as soon as pure reads are available
#[inline(always)]
pub fn tx_fifo_empty(&mut self) -> bool {
self.tx.fifo_empty()
}
// TODO: Make this non-mut as soon as pure reads are available
#[inline(always)]
pub fn tx_fifo_full(&mut self) -> bool {
self.tx.fifo_full()
}
// TODO: Make this non-mut as soon as pure reads are available
#[inline(always)]
pub fn rx_has_data(&mut self) -> bool {
self.rx.has_data()
}
/// Read the error counters and also resets them.
pub fn read_and_clear_errors(&mut self) -> RxErrorsCounted {
let errors = self.errors;
self.errors = RxErrorsCounted::new();
errors
}
#[inline(always)]
fn handle_status_reg_errors(&mut self, errors: RxErrors) {
if errors.frame() {
self.errors.frame = self.errors.frame.saturating_add(1);
}
if errors.parity() {
self.errors.parity = self.errors.parity.saturating_add(1);
}
if errors.overrun() {
self.errors.overrun = self.errors.overrun.saturating_add(1);
}
}
#[inline]
pub fn reset_rx_fifo(&mut self) {
self.regs().write_ctrl_reg(
Control::builder()
.with_enable_interrupt(false)
.with_reset_rx_fifo(true)
.with_reset_tx_fifo(false)
.build(),
);
}
#[inline]
pub fn reset_tx_fifo(&mut self) {
self.regs().write_ctrl_reg(
Control::builder()
.with_enable_interrupt(false)
.with_reset_rx_fifo(false)
.with_reset_tx_fifo(true)
.build(),
);
}
#[inline]
pub fn split(self) -> (Tx, Rx) {
(self.tx, self.rx)
}
#[inline]
pub fn enable_interrupt(&mut self) {
self.regs().write_ctrl_reg(
Control::builder()
.with_enable_interrupt(true)
.with_reset_rx_fifo(false)
.with_reset_tx_fifo(false)
.build(),
);
}
#[inline]
pub fn disable_interrupt(&mut self) {
self.regs().write_ctrl_reg(
Control::builder()
.with_enable_interrupt(false)
.with_reset_rx_fifo(false)
.with_reset_tx_fifo(false)
.build(),
);
}
}
impl embedded_hal_nb::serial::ErrorType for AxiUartlite {
type Error = Infallible;
}
impl embedded_hal_nb::serial::Write for AxiUartlite {
#[inline]
fn write(&mut self, word: u8) -> nb::Result<(), Self::Error> {
self.tx.write(word)
}
#[inline]
fn flush(&mut self) -> nb::Result<(), Self::Error> {
self.tx.flush()
}
}
impl embedded_hal_nb::serial::Read for AxiUartlite {
#[inline]
fn read(&mut self) -> nb::Result<u8, Self::Error> {
self.rx.read()
}
}
impl embedded_io::ErrorType for AxiUartlite {
type Error = Infallible;
}
impl embedded_io::Read for AxiUartlite {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
self.rx.read(buf)
}
}
impl embedded_io::Write for AxiUartlite {
fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
self.tx.write(buf)
}
fn flush(&mut self) -> Result<(), Self::Error> {
self.tx.flush()
}
}

View File

@ -1,55 +0,0 @@
#[bitbybit::bitfield(u32)]
pub struct RxFifo {
#[bits(0..=7, r)]
pub data: u8,
}
#[bitbybit::bitfield(u32)]
pub struct TxFifo {
#[bits(0..=7, w)]
pub data: u8,
}
#[bitbybit::bitfield(u32)]
pub struct Status {
#[bit(7, r)]
pub parity_error: bool,
#[bit(6, r)]
pub frame_error: bool,
#[bit(5, r)]
pub overrun_error: bool,
#[bit(4, r)]
pub intr_enabled: bool,
#[bit(3, r)]
pub tx_fifo_full: bool,
#[bit(2, r)]
pub tx_fifo_empty: bool,
#[bit(1, r)]
pub rx_fifo_full: bool,
/// RX FIFO contains valid data.
#[bit(0, r)]
pub rx_fifo_valid_data: bool,
}
#[bitbybit::bitfield(u32, default = 0x0)]
pub struct Control {
#[bit(4, w)]
enable_interrupt: bool,
#[bit(1, w)]
reset_rx_fifo: bool,
#[bit(0, w)]
reset_tx_fifo: bool,
}
#[derive(derive_mmio::Mmio)]
#[repr(C)]
pub struct AxiUartlite {
#[mmio(RO)]
rx_fifo: RxFifo,
tx_fifo: TxFifo,
#[mmio(RO)]
stat_reg: Status,
ctrl_reg: Control,
}
unsafe impl Send for MmioAxiUartlite<'static> {}

View File

@ -1,172 +0,0 @@
use core::convert::Infallible;
use crate::registers::{self, AxiUartlite, Status};
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
pub struct RxErrors {
parity: bool,
frame: bool,
overrun: bool,
}
impl RxErrors {
pub const fn new() -> Self {
Self {
parity: false,
frame: false,
overrun: false,
}
}
pub const fn parity(&self) -> bool {
self.parity
}
pub const fn frame(&self) -> bool {
self.frame
}
pub const fn overrun(&self) -> bool {
self.overrun
}
pub const fn has_errors(&self) -> bool {
self.parity || self.frame || self.overrun
}
}
pub struct Rx {
pub(crate) regs: registers::MmioAxiUartlite<'static>,
pub(crate) errors: Option<RxErrors>,
}
impl Rx {
/// Steal the RX part of the UART Lite.
///
/// You should only use this if you can not use the regular [super::AxiUartlite] constructor
/// and the [super::AxiUartlite::split] method.
///
/// This function assumes that the setup of the UART was already done.
/// It can be used to create an RX handle inside an interrupt handler without having to use
/// a [critical_section::Mutex] if the user can guarantee that the RX handle will only be
/// used by the interrupt handler or only interrupt specific API will be used.
///
/// # Safety
///
/// The same safey rules specified in [super::AxiUartlite] apply.
#[inline]
pub const unsafe fn steal(base_addr: usize) -> Self {
Self {
regs: unsafe { AxiUartlite::new_mmio_at(base_addr) },
errors: None,
}
}
#[inline]
pub fn read_fifo(&mut self) -> nb::Result<u8, Infallible> {
let status_reg = self.regs.read_stat_reg();
if !status_reg.rx_fifo_valid_data() {
return Err(nb::Error::WouldBlock);
}
let val = self.read_fifo_unchecked();
if let Some(errors) = handle_status_reg_errors(&status_reg) {
self.errors = Some(errors);
}
Ok(val)
}
#[inline(always)]
pub fn read_fifo_unchecked(&mut self) -> u8 {
self.regs.read_rx_fifo().data()
}
// TODO: Make this non-mut as soon as pure reads are available
#[inline(always)]
pub fn has_data(&mut self) -> bool {
self.regs.read_stat_reg().rx_fifo_valid_data()
}
/// This simply reads all available bytes in the RX FIFO.
///
/// It returns the number of read bytes.
#[inline]
pub fn read_whole_fifo(&mut self, buf: &mut [u8; 16]) -> usize {
let mut read = 0;
while read < buf.len() {
match self.read_fifo() {
Ok(byte) => {
buf[read] = byte;
read += 1;
}
Err(nb::Error::WouldBlock) => break,
}
}
read
}
/// Can be called in the interrupt handler for the UART Lite to handle RX reception.
///
/// Simply calls [Rx::read_whole_fifo].
#[inline]
pub fn on_interrupt_rx(&mut self, buf: &mut [u8; 16]) -> usize {
self.read_whole_fifo(buf)
}
pub fn read_and_clear_last_error(&mut self) -> Option<RxErrors> {
let errors = self.errors?;
self.errors = None;
Some(errors)
}
}
impl embedded_hal_nb::serial::ErrorType for Rx {
type Error = Infallible;
}
impl embedded_hal_nb::serial::Read for Rx {
#[inline]
fn read(&mut self) -> nb::Result<u8, Self::Error> {
self.read_fifo()
}
}
impl embedded_io::ErrorType for Rx {
type Error = Infallible;
}
impl embedded_io::Read for Rx {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
if buf.is_empty() {
return Ok(0);
}
while !self.has_data() {}
let mut read = 0;
for byte in buf.iter_mut() {
match self.read_fifo() {
Ok(data) => {
*byte = data;
read += 1;
}
Err(nb::Error::WouldBlock) => break,
}
}
Ok(read)
}
}
pub const fn handle_status_reg_errors(status_reg: &Status) -> Option<RxErrors> {
let mut errors = RxErrors::new();
if status_reg.frame_error() {
errors.frame = true;
}
if status_reg.parity_error() {
errors.parity = true;
}
if status_reg.overrun_error() {
errors.overrun = true;
}
if !errors.has_errors() {
return None;
}
Some(errors)
}

View File

@ -1,142 +0,0 @@
use core::convert::Infallible;
use crate::{
RxErrors, handle_status_reg_errors,
registers::{self, Control, TxFifo},
};
pub struct Tx {
pub(crate) regs: registers::MmioAxiUartlite<'static>,
pub(crate) errors: Option<RxErrors>,
}
impl Tx {
/// Steal the TX part of the UART Lite.
///
/// You should only use this if you can not use the regular [super::AxiUartlite] constructor
/// and the [super::AxiUartlite::split] method.
///
/// This function assumes that the setup of the UART was already done.
/// It can be used to create a TX handle inside an interrupt handler without having to use
/// a [critical_section::Mutex] if the user can guarantee that the TX handle will only be
/// used by the interrupt handler, or only interrupt specific API will be used.
///
/// # Safety
///
/// The same safey rules specified in [super::AxiUartlite] apply.
pub unsafe fn steal(base_addr: usize) -> Self {
let regs = unsafe { registers::AxiUartlite::new_mmio_at(base_addr) };
Self { regs, errors: None }
}
/// Write into the UART Lite.
///
/// Returns [nb::Error::WouldBlock] if the TX FIFO is full.
#[inline]
pub fn write_fifo(&mut self, data: u8) -> nb::Result<(), Infallible> {
let status_reg = self.regs.read_stat_reg();
if status_reg.tx_fifo_full() {
return Err(nb::Error::WouldBlock);
}
self.write_fifo_unchecked(data);
if let Some(errors) = handle_status_reg_errors(&status_reg) {
self.errors = Some(errors);
}
Ok(())
}
#[inline]
pub fn reset_fifo(&mut self) {
let status = self.regs.read_stat_reg();
self.regs.write_ctrl_reg(
Control::builder()
.with_enable_interrupt(status.intr_enabled())
.with_reset_rx_fifo(false)
.with_reset_tx_fifo(true)
.build(),
);
}
/// Write into the FIFO without checking the FIFO fill status.
///
/// This can be useful to completely fill the FIFO if it is known to be empty.
#[inline(always)]
pub fn write_fifo_unchecked(&mut self, data: u8) {
self.regs
.write_tx_fifo(TxFifo::new_with_raw_value(data as u32));
}
// TODO: Make this non-mut as soon as pure reads are available
#[inline(always)]
pub fn fifo_empty(&mut self) -> bool {
self.regs.read_stat_reg().tx_fifo_empty()
}
// TODO: Make this non-mut as soon as pure reads are available
#[inline(always)]
pub fn fifo_full(&mut self) -> bool {
self.regs.read_stat_reg().tx_fifo_full()
}
/// Fills the FIFO with user provided data until the user data
/// is consumed or the FIFO is full.
///
/// Returns the amount of written data, which might be smaller than the buffer size.
pub fn fill_fifo(&mut self, buf: &[u8]) -> usize {
let mut written = 0;
while written < buf.len() {
match self.write_fifo(buf[written]) {
Ok(_) => written += 1,
Err(nb::Error::WouldBlock) => break,
}
}
written
}
pub fn read_and_clear_last_error(&mut self) -> Option<RxErrors> {
let errors = self.errors?;
self.errors = None;
Some(errors)
}
}
impl embedded_hal_nb::serial::ErrorType for Tx {
type Error = Infallible;
}
impl embedded_hal_nb::serial::Write for Tx {
fn write(&mut self, word: u8) -> nb::Result<(), Self::Error> {
self.write_fifo(word)
}
fn flush(&mut self) -> nb::Result<(), Self::Error> {
while !self.fifo_empty() {}
Ok(())
}
}
impl embedded_io::ErrorType for Tx {
type Error = Infallible;
}
impl embedded_io::Write for Tx {
fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
if buf.is_empty() {
return Ok(0);
}
while self.fifo_full() {}
let mut written = 0;
for &byte in buf.iter() {
match self.write_fifo(byte) {
Ok(_) => written += 1,
Err(nb::Error::WouldBlock) => break,
}
}
Ok(written)
}
fn flush(&mut self) -> Result<(), Self::Error> {
while !self.fifo_empty() {}
Ok(())
}
}

View File

@ -1,221 +0,0 @@
//! # Asynchronous TX support.
//!
//! This module provides support for asynchronous non-blocking TX transfers.
//!
//! It provides a static number of async wakers to allow a configurable amount of pollable
//! [TxFuture]s. Each UARTLite [Tx] instance which performs asynchronous TX operations needs
//! to be to explicitely assigned a waker when creating an awaitable [TxAsync] structure
//! as well as when calling the [on_interrupt_tx] handler.
//!
//! The maximum number of available wakers is configured via the waker feature flags:
//!
//! - `1-waker`
//! - `2-wakers`
//! - `4-wakers`
//! - `8-wakers`
//! - `16-wakers`
//! - `32-wakers`
use core::{cell::RefCell, convert::Infallible, sync::atomic::AtomicBool};
use critical_section::Mutex;
use embassy_sync::waitqueue::AtomicWaker;
use raw_slice::RawBufSlice;
use crate::{FIFO_DEPTH, Tx};
#[cfg(feature = "1-waker")]
pub const NUM_WAKERS: usize = 1;
#[cfg(feature = "2-wakers")]
pub const NUM_WAKERS: usize = 2;
#[cfg(feature = "4-wakers")]
pub const NUM_WAKERS: usize = 4;
#[cfg(feature = "8-wakers")]
pub const NUM_WAKERS: usize = 8;
#[cfg(feature = "16-wakers")]
pub const NUM_WAKERS: usize = 16;
#[cfg(feature = "32-wakers")]
pub const NUM_WAKERS: usize = 32;
static UART_TX_WAKERS: [AtomicWaker; NUM_WAKERS] = [const { AtomicWaker::new() }; NUM_WAKERS];
static TX_CONTEXTS: [Mutex<RefCell<TxContext>>; NUM_WAKERS] =
[const { Mutex::new(RefCell::new(TxContext::new())) }; NUM_WAKERS];
// Completion flag. Kept outside of the context structure as an atomic to avoid
// critical section.
static TX_DONE: [AtomicBool; NUM_WAKERS] = [const { AtomicBool::new(false) }; NUM_WAKERS];
#[derive(Debug, thiserror::Error)]
#[error("invalid waker slot index: {0}")]
pub struct InvalidWakerIndex(pub usize);
/// This is a generic interrupt handler to handle asynchronous UART TX operations for a given
/// UART peripheral.
///
/// The user has to call this once in the interrupt handler responsible if the interrupt was
/// triggered by the UARTLite. The relevant [Tx] handle of the UARTLite and the waker slot used
/// for it must be passed as well. [Tx::steal] can be used to create the required handle.
pub fn on_interrupt_tx(uartlite_tx: &mut Tx, waker_slot: usize) {
if waker_slot >= NUM_WAKERS {
return;
}
let status = uartlite_tx.regs.read_stat_reg();
// Interrupt are not even enabled.
if !status.intr_enabled() {
return;
}
let mut context = critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[waker_slot].borrow(cs);
*context_ref.borrow()
});
// No transfer active.
if context.slice.is_null() {
return;
}
let slice_len = context.slice.len().unwrap();
if (context.progress >= slice_len && status.tx_fifo_empty()) || slice_len == 0 {
// Write back updated context structure.
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[waker_slot].borrow(cs);
*context_ref.borrow_mut() = context;
});
// Transfer is done.
TX_DONE[waker_slot].store(true, core::sync::atomic::Ordering::Relaxed);
UART_TX_WAKERS[waker_slot].wake();
return;
}
// Safety: We documented that the user provided slice must outlive the future, so we convert
// the raw pointer back to the slice here.
let slice = unsafe { context.slice.get() }.expect("slice is invalid");
while context.progress < slice_len {
if uartlite_tx.regs.read_stat_reg().tx_fifo_full() {
break;
}
// Safety: TX structure is owned by the future which does not write into the the data
// register, so we can assume we are the only one writing to the data register.
uartlite_tx.write_fifo_unchecked(slice[context.progress]);
context.progress += 1;
}
// Write back updated context structure.
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[waker_slot].borrow(cs);
*context_ref.borrow_mut() = context;
});
}
#[derive(Debug, Copy, Clone)]
pub struct TxContext {
progress: usize,
slice: RawBufSlice,
}
#[allow(clippy::new_without_default)]
impl TxContext {
pub const fn new() -> Self {
Self {
progress: 0,
slice: RawBufSlice::new_nulled(),
}
}
}
pub struct TxFuture {
waker_idx: usize,
}
impl TxFuture {
/// Create a new TX future which can be used for asynchronous TX operations.
///
/// # Safety
///
/// This function stores the raw pointer of the passed data slice. The user MUST ensure
/// that the slice outlives the data structure.
pub unsafe fn new(
tx: &mut Tx,
waker_idx: usize,
data: &[u8],
) -> Result<Self, InvalidWakerIndex> {
TX_DONE[waker_idx].store(false, core::sync::atomic::Ordering::Relaxed);
tx.reset_fifo();
let init_fill_count = core::cmp::min(data.len(), FIFO_DEPTH);
// We fill the FIFO with initial data.
for data in data.iter().take(init_fill_count) {
tx.write_fifo_unchecked(*data);
}
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[waker_idx].borrow(cs);
let mut context = context_ref.borrow_mut();
unsafe {
context.slice.set(data);
}
context.progress = init_fill_count;
});
Ok(Self { waker_idx })
}
}
impl Future for TxFuture {
type Output = usize;
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
UART_TX_WAKERS[self.waker_idx].register(cx.waker());
if TX_DONE[self.waker_idx].swap(false, core::sync::atomic::Ordering::Relaxed) {
let progress = critical_section::with(|cs| {
let mut ctx = TX_CONTEXTS[self.waker_idx].borrow(cs).borrow_mut();
ctx.slice.set_null();
ctx.progress
});
return core::task::Poll::Ready(progress);
}
core::task::Poll::Pending
}
}
impl Drop for TxFuture {
fn drop(&mut self) {}
}
pub struct TxAsync {
tx: Tx,
waker_idx: usize,
}
impl TxAsync {
pub fn new(tx: Tx, waker_idx: usize) -> Result<Self, InvalidWakerIndex> {
if waker_idx >= NUM_WAKERS {
return Err(InvalidWakerIndex(waker_idx));
}
Ok(Self { tx, waker_idx })
}
/// Write a buffer asynchronously.
///
/// This implementation is not side effect free, and a started future might have already
/// written part of the passed buffer.
pub async fn write(&mut self, buf: &[u8]) -> usize {
if buf.is_empty() {
return 0;
}
let fut = unsafe { TxFuture::new(&mut self.tx, self.waker_idx, buf).unwrap() };
fut.await
}
pub fn release(self) -> Tx {
self.tx
}
}
impl embedded_io::ErrorType for TxAsync {
type Error = Infallible;
}
impl embedded_io_async::Write for TxAsync {
/// Write a buffer asynchronously.
///
/// This implementation is not side effect free, and a started future might have already
/// written part of the passed buffer.
async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
Ok(self.write(buf).await)
}
}

View File

@ -34,5 +34,5 @@ embassy-executor = { path = "/home/rmueller/Rust/embassy/embassy-executor", feat
]} ]}
embassy-time = { path = "/home/rmueller/Rust/embassy/embassy-time", version = "0.4" } embassy-time = { path = "/home/rmueller/Rust/embassy/embassy-time", version = "0.4" }
heapless = "0.8" heapless = "0.8"
axi-uartlite = { path = "../../axi-uartlite-rs" } axi-uartlite = { path = "/home/rmueller/Rust/axi-uartlite-rs" }
axi-uart16550 = { path = "../../axi-uart16550-rs" } axi-uart16550 = { path = "/home/rmueller/Rust/axi-uart16550-rs" }