Compare commits

1 Commits

Author SHA1 Message Date
bf1a171760 Ethernet and smoltcp/embassy-net support
Some checks failed
ci / Check build (pull_request) Has been cancelled
ci / Check formatting (pull_request) Has been cancelled
ci / Check Documentation Build (pull_request) Has been cancelled
ci / Clippy (pull_request) Has been cancelled
ci / Check build (push) Has been cancelled
ci / Check formatting (push) Has been cancelled
ci / Check Documentation Build (push) Has been cancelled
ci / Clippy (push) Has been cancelled
2025-07-22 12:24:32 +02:00
44 changed files with 8532 additions and 4259 deletions

View File

@@ -8,5 +8,16 @@ members = [
"examples/simple",
"examples/embassy",
"examples/zedboard",
"zynq-mmu",
]
exclude = ["experiments"]
# cargo build/run --release
[profile.release]
codegen-units = 1
debug = 2
debug-assertions = false # <-
incremental = false
lto = true
opt-level = 3 # <-
overflow-checks = false # <-

View File

@@ -11,7 +11,7 @@ keywords = ["no-std", "arm", "cortex-a", "amd", "zynq7000"]
categories = ["embedded", "no-std", "hardware-support"]
[dependencies]
cortex-ar = "0.2"
cortex-ar = { version = "0.2", git = "https://github.com/rust-embedded/cortex-ar.git", rev = "79dba7000d2090d13823bfb783d9d64be8b778d2", features = ["critical-section-single-core"] }
zynq7000-rt = { path = "../../zynq7000-rt" }
zynq7000 = { path = "../../zynq7000" }
zynq7000-hal = { path = "../../zynq7000-hal" }
@@ -25,11 +25,11 @@ embedded-hal = "1"
fugit = "0.3"
log = "0.4"
embassy-executor = { git = "https://github.com/embassy-rs/embassy.git", branch = "main", features = [
embassy-executor = { git = "https://github.com/us-irs/embassy.git", branch = "cortex-ar-update", features = [
"arch-cortex-ar",
"executor-thread",
]}
embassy-time = { git = "https://github.com/embassy-rs/embassy.git", branch = "main", version = "0.4", features = ["tick-hz-1_000_000"] }
embassy-time = { version = "0.4", features = ["tick-hz-1_000_000"] }
[profile.release]
codegen-units = 1

View File

@@ -9,7 +9,7 @@ repository = "https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs"
license = "MIT OR Apache-2.0"
[dependencies]
cortex-ar = "0.2"
cortex-ar = { version = "0.2", git = "https://github.com/rust-embedded/cortex-ar.git", rev = "79dba7000d2090d13823bfb783d9d64be8b778d2", features = ["critical-section-single-core"] }
zynq7000-rt = { path = "../../zynq7000-rt" }
zynq7000 = { path = "../../zynq7000" }
zynq7000-hal = { path = "../../zynq7000-hal" }

View File

@@ -57,8 +57,8 @@ pub fn main() -> ! {
}
}
#[unsafe(no_mangle)]
pub extern "C" fn _irq_handler() {}
#[zynq7000_rt::irq]
pub fn irq_handler() {}
#[unsafe(no_mangle)]
pub extern "C" fn _abort_handler() {

View File

@@ -11,13 +11,14 @@ keywords = ["no-std", "arm", "cortex-a", "amd", "zynq7000"]
categories = ["embedded", "no-std", "hardware-support"]
[dependencies]
cortex-ar = "0.2"
cortex-ar = { version = "0.2", git = "https://github.com/rust-embedded/cortex-ar.git", rev = "79dba7000d2090d13823bfb783d9d64be8b778d2", features = ["critical-section-single-core"] }
zynq7000-rt = { path = "../../zynq7000-rt" }
zynq7000 = { path = "../../zynq7000" }
zynq7000-hal = { path = "../../zynq7000-hal" }
zynq7000-embassy = { path = "../../zynq7000-embassy" }
l3gd20 = { git = "https://github.com/us-irs/l3gd20.git", branch = "add-async-if" }
embedded-io = "0.6"
bitbybit = "1.3"
arbitrary-int = "1.3"
embedded-io-async = "0.6"
critical-section = "1"
@@ -27,12 +28,15 @@ embedded-hal = "1"
embedded-hal-async = "1"
fugit = "0.3"
log = "0.4"
rand = { version = "0.9", default-features = false, features = ["small_rng"] }
embassy-executor = { git = "https://github.com/embassy-rs/embassy.git", branch = "main", features = [
embassy-executor = { git = "https://github.com/us-irs/embassy.git", branch = "cortex-ar-update", features = [
"arch-cortex-ar",
"executor-thread",
]}
embassy-time = { git = "https://github.com/embassy-rs/embassy.git", branch = "main", version = "0.4", features = ["tick-hz-1_000_000"] }
embassy-time = { version = "0.4", features = ["tick-hz-1_000_000"] }
embassy-net = { version = "0.7", features = ["dhcpv4", "packet-trace", "medium-ethernet", "icmp", "tcp", "udp"] }
embassy-sync = { version = "0.7" }
heapless = "0.8"
axi-uartlite = { git = "https://egit.irs.uni-stuttgart.de/rust/axi-uartlite.git" }
axi-uart16550 = { git = "https://egit.irs.uni-stuttgart.de/rust/axi-uart16550.git" }

View File

@@ -0,0 +1,521 @@
//! Zedboard ethernet example code.
//!
//! This code uses embassy-net, a smoltcp based networking stack, as the IP stack.
//! It uses DHCP by default to assign the IP address. The assigned address will be displayed on
//! the console.
//!
//! Alternatively, you can also set a static IPv4 configuration via the `STATIC_IPV4_CONFIG`
//! constant and by setting `USE_DHCP` to `false`.
//!
//! It also exposes simple UDP and TCP echo servers. You can use the following sample commands
//! to send UDP or TCP data to the Zedboard using the Unix `netcat` application:
//!
//! ## UDP
//!
//! ```sh
//! echo "Hello Zedboard" | nc -uN <ip-address> 8000
//! ```
//!
//! ## TCP
//!
//! ```sh
//! echo "Hello Zedboard" | nc -N <ip-address> 8000
//! ```
#![no_std]
#![no_main]
use core::{net::Ipv4Addr, panic::PanicInfo};
use cortex_ar::asm::nop;
use embassy_executor::Spawner;
use embassy_net::{Ipv4Cidr, StaticConfigV4, tcp::TcpSocket, udp::UdpSocket};
use embassy_time::{Duration, Timer};
use embedded_io::Write;
use embedded_io_async::Write as _;
use log::{LevelFilter, debug, error, info, warn};
use rand::{RngCore, SeedableRng};
use zedboard::{
PS_CLOCK_FREQUENCY,
phy_marvell::{LatchingLinkStatus, MARVELL_88E1518_OUI},
};
use zynq7000_hal::{
BootMode,
clocks::Clocks,
configure_level_shifter,
eth::{
AlignedBuffer, ClkDivCollection, EthernetConfig, EthernetLowLevel,
embassy_net::InterruptResult,
},
gic::{GicConfigurator, GicInterruptHelper, Interrupt},
gpio::{GpioPins, Output, PinState},
gtc::Gtc,
uart::{ClkConfigRaw, Uart, UartConfig},
};
use zynq7000::{PsPeripherals, slcr::LevelShifterConfig};
use zynq7000_rt::{self as _, mmu::section_attrs::SHAREABLE_DEVICE, mmu_l1_table_mut};
const USE_DHCP: bool = true;
const UDP_AND_TCP_PORT: u16 = 8000;
const PRINT_PACKET_STATS: bool = false;
const LOG_LEVEL: LevelFilter = LevelFilter::Info;
const NUM_RX_SLOTS: usize = 16;
const NUM_TX_SLOTS: usize = 16;
const STATIC_IPV4_CONFIG: StaticConfigV4 = StaticConfigV4 {
address: Ipv4Cidr::new(Ipv4Addr::new(192, 168, 179, 25), 24),
gateway: None,
dns_servers: heapless::Vec::new(),
};
const INIT_STRING: &str = "-- Zynq 7000 Zedboard Ethernet Example --\n\r";
// Unicast address with OUI of the Marvell 88E1518 PHY.
const MAC_ADDRESS: [u8; 6] = [
0x00,
((MARVELL_88E1518_OUI >> 8) & 0xff) as u8,
(MARVELL_88E1518_OUI & 0xff) as u8,
0x00,
0x00,
0x01,
];
/// See memory.x file. 1 MB starting at this address will be configured as uncached memory using the
/// MMU.
const UNCACHED_ADDR: u32 = 0x4000000;
// These descriptors must be placed in uncached memory. The MMU will be used to configure the
// .uncached memory segment as device memory.
#[unsafe(link_section = ".uncached")]
static RX_DESCRIPTORS: zynq7000_hal::eth::rx_descr::DescriptorList<NUM_RX_SLOTS> =
zynq7000_hal::eth::rx_descr::DescriptorList::new();
#[unsafe(link_section = ".uncached")]
static TX_DESCRIPTORS: zynq7000_hal::eth::tx_descr::DescriptorList<NUM_TX_SLOTS> =
zynq7000_hal::eth::tx_descr::DescriptorList::new();
static ETH_ERR_QUEUE: embassy_sync::channel::Channel<
embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex,
InterruptResult,
8,
> = embassy_sync::channel::Channel::new();
#[derive(Debug, PartialEq, Eq)]
pub enum IpMode {
LinkDown,
AutoNegotiating,
AwaitingIpConfig,
StackReady,
}
/// Entry point (not called like a normal main function)
#[unsafe(no_mangle)]
pub extern "C" fn boot_core(cpu_id: u32) -> ! {
if cpu_id != 0 {
panic!("unexpected CPU ID {}", cpu_id);
}
main();
}
#[embassy_executor::task]
async fn embassy_net_task(
mut runner: embassy_net::Runner<'static, zynq7000_hal::eth::embassy_net::Driver>,
) -> ! {
runner.run().await
}
/// Simple UDP echo task.
#[embassy_executor::task]
async fn udp_task(mut udp: UdpSocket<'static>) -> ! {
let mut rx_buf = [0; zynq7000_hal::eth::MTU];
udp.bind(UDP_AND_TCP_PORT)
.expect("failed to bind UDP socket to port 8000");
loop {
match udp.recv_from(&mut rx_buf).await {
Ok((data, meta)) => {
log::info!("udp rx {data} bytes from {meta:?}");
match udp.send_to(&rx_buf[0..data], meta).await {
Ok(_) => (),
Err(e) => {
log::warn!("udp send error: {e:?}");
Timer::after_millis(100).await;
}
}
}
Err(e) => {
log::warn!("udp receive error: {e:?}");
Timer::after_millis(100).await;
}
}
}
}
/// Simple TCP echo task.
#[embassy_executor::task]
async fn tcp_task(mut tcp: TcpSocket<'static>) -> ! {
let mut rx_buf = [0; zynq7000_hal::eth::MTU];
tcp.set_timeout(Some(Duration::from_secs(2)));
loop {
match tcp.accept(UDP_AND_TCP_PORT).await {
Ok(_) => {
log::info!("tcp connection to {:?} accepted", tcp.remote_endpoint());
loop {
if tcp.may_recv() {
match tcp.read(&mut rx_buf).await {
Ok(0) => {
log::info!("tcp EOF received");
tcp.close();
}
Ok(read_bytes) => {
log::info!("tcp rx {read_bytes} bytes");
if tcp.may_send() {
match tcp.write_all(&rx_buf[0..read_bytes]).await {
Ok(_) => continue,
Err(e) => {
log::warn!("tcp error when writing: {e:?}");
Timer::after_millis(100).await;
}
}
} else {
log::warn!("tcp remote endpoint not writeable");
continue;
}
}
Err(_) => {
log::warn!("tcp connection reset by remote endpoint.");
tcp.close();
}
}
}
if !tcp.may_send() && !tcp.may_recv() {
log::info!("tcp send and receive side closed");
tcp.close();
}
if tcp.state() == embassy_net::tcp::State::Closed {
log::info!("tcp socket closed, exiting loop");
break;
}
Timer::after_millis(100).await;
}
}
Err(e) => {
log::warn!("tcp error accepting connection: {e:?}");
Timer::after_millis(100).await;
continue;
}
}
}
}
#[embassy_executor::main]
#[unsafe(export_name = "main")]
async fn main(spawner: Spawner) -> ! {
// Configure the uncached memory region using the MMU.
mmu_l1_table_mut()
.update(UNCACHED_ADDR, SHAREABLE_DEVICE)
.expect("configuring uncached memory section failed");
// Enable PS-PL level shifters.
configure_level_shifter(LevelShifterConfig::EnableAll);
let dp = PsPeripherals::take().unwrap();
// Clock was already initialized by PS7 Init TCL script or FSBL, we just read it.
let clocks = Clocks::new_from_regs(PS_CLOCK_FREQUENCY).unwrap();
// Set up the global interrupt controller.
let mut gic = GicConfigurator::new_with_init(dp.gicc, dp.gicd);
gic.enable_all_interrupts();
gic.set_all_spi_interrupt_targets_cpu0();
gic.enable();
unsafe {
gic.enable_interrupts();
}
let gpio_pins = GpioPins::new(dp.gpio);
// Set up global timer counter and embassy time driver.
let gtc = Gtc::new(dp.gtc, clocks.arm_clocks());
zynq7000_embassy::init(clocks.arm_clocks(), gtc);
// Set up the UART, we are logging with it.
let uart_clk_config = ClkConfigRaw::new_autocalc_with_error(clocks.io_clocks(), 115200)
.unwrap()
.0;
let mut uart = Uart::new_with_mio(
dp.uart_1,
UartConfig::new_with_clk_config(uart_clk_config),
(gpio_pins.mio.mio48, gpio_pins.mio.mio49),
)
.unwrap();
uart.write_all(INIT_STRING.as_bytes()).unwrap();
// Safety: We are not multi-threaded yet.
unsafe { zynq7000_hal::log::uart_blocking::init_unsafe_single_core(uart, LOG_LEVEL, false) };
let boot_mode = BootMode::new();
info!("Boot mode: {:?}", boot_mode);
static ETH_RX_BUFS: static_cell::ConstStaticCell<[AlignedBuffer; NUM_RX_SLOTS]> =
static_cell::ConstStaticCell::new(
[AlignedBuffer([0; zynq7000_hal::eth::MTU]); NUM_RX_SLOTS],
);
static ETH_TX_BUFS: static_cell::ConstStaticCell<[AlignedBuffer; NUM_TX_SLOTS]> =
static_cell::ConstStaticCell::new(
[AlignedBuffer([0; zynq7000_hal::eth::MTU]); NUM_TX_SLOTS],
);
let rx_bufs = ETH_RX_BUFS.take();
let tx_bufs = ETH_TX_BUFS.take();
let rx_descr = RX_DESCRIPTORS.take().unwrap();
let tx_descr = TX_DESCRIPTORS.take().unwrap();
// Unwraps okay, list length is not 0
let mut rx_descr_ref =
zynq7000_hal::eth::rx_descr::DescriptorListWrapper::new(rx_descr.as_mut_slice());
let mut tx_descr_ref =
zynq7000_hal::eth::tx_descr::DescriptorListWrapper::new(tx_descr.as_mut_slice());
rx_descr_ref.init_with_aligned_bufs(rx_bufs.as_slice());
tx_descr_ref.init_or_reset();
// Unwrap okay, this is a valid peripheral.
let eth_ll = EthernetLowLevel::new(dp.eth_0).unwrap();
let mod_id = eth_ll.regs.read_module_id();
info!("Ethernet Module ID: {mod_id:?}");
assert_eq!(mod_id, 0x20118);
let (clk_divs, clk_errors) =
ClkDivCollection::calculate_for_rgmii_and_io_clock(clocks.io_clocks());
debug!(
"Calculated RGMII clock configuration: {:?}, errors (missmatch from ideal rate in hertz): {:?}",
clk_divs, clk_errors
);
// Unwrap okay, we use a standard clock config, and the clock config should never fail.
let eth_cfg = EthernetConfig::new(
zynq7000_hal::eth::ClkConfig::new(clk_divs.cfg_1000_mbps),
zynq7000_hal::eth::calculate_mdc_clk_div(clocks.arm_clocks()).unwrap(),
MAC_ADDRESS,
);
// Configures all the physical pins for ethernet operation and sets up the
// ethernet peripheral.
let mut eth = zynq7000_hal::eth::Ethernet::new_with_mio(
eth_ll,
eth_cfg,
gpio_pins.mio.mio16,
gpio_pins.mio.mio21,
(
gpio_pins.mio.mio17,
gpio_pins.mio.mio18,
gpio_pins.mio.mio19,
gpio_pins.mio.mio20,
),
gpio_pins.mio.mio22,
gpio_pins.mio.mio27,
(
gpio_pins.mio.mio23,
gpio_pins.mio.mio24,
gpio_pins.mio.mio25,
gpio_pins.mio.mio26,
),
Some((gpio_pins.mio.mio52, gpio_pins.mio.mio53)),
);
eth.set_rx_buf_descriptor_base_address(rx_descr_ref.base_addr());
eth.set_tx_buf_descriptor_base_address(tx_descr_ref.base_addr());
eth.start();
let (mut phy, phy_rev) =
zedboard::phy_marvell::Marvell88E1518Phy::new_autoprobe_addr(eth.mdio_mut())
.expect("could not auto-detect phy");
info!(
"Detected Marvell 88E1518 PHY with revision number: {:?}",
phy_rev
);
phy.reset();
phy.restart_auto_negotiation();
let driver = zynq7000_hal::eth::embassy_net::Driver::new(
&eth,
MAC_ADDRESS,
zynq7000_hal::eth::embassy_net::DescriptorsAndBuffers::new(
rx_descr_ref,
rx_bufs,
tx_descr_ref,
tx_bufs,
)
.unwrap(),
);
let config = if USE_DHCP {
embassy_net::Config::dhcpv4(Default::default())
} else {
embassy_net::Config::ipv4_static(STATIC_IPV4_CONFIG)
};
static RESOURCES: static_cell::StaticCell<embassy_net::StackResources<3>> =
static_cell::StaticCell::new();
let mut rng = rand::rngs::SmallRng::seed_from_u64(1);
let (stack, runner) = embassy_net::new(
driver,
config,
RESOURCES.init(embassy_net::StackResources::new()),
rng.next_u64(),
);
static RX_UDP_META: static_cell::ConstStaticCell<[embassy_net::udp::PacketMetadata; 8]> =
static_cell::ConstStaticCell::new([embassy_net::udp::PacketMetadata::EMPTY; 8]);
static TX_UDP_META: static_cell::ConstStaticCell<[embassy_net::udp::PacketMetadata; 8]> =
static_cell::ConstStaticCell::new([embassy_net::udp::PacketMetadata::EMPTY; 8]);
// Ensure those are in the data section by making them static.
static TX_UDP_BUFS: static_cell::ConstStaticCell<[u8; zynq7000_hal::eth::MTU]> =
static_cell::ConstStaticCell::new([0; zynq7000_hal::eth::MTU]);
static RX_UDP_BUFS: static_cell::ConstStaticCell<[u8; zynq7000_hal::eth::MTU]> =
static_cell::ConstStaticCell::new([0; zynq7000_hal::eth::MTU]);
let udp_socket = UdpSocket::new(
stack,
RX_UDP_META.take(),
RX_UDP_BUFS.take(),
TX_UDP_META.take(),
TX_UDP_BUFS.take(),
);
// Ensure those are in the data section by making them static.
static TX_TCP_BUFS: static_cell::ConstStaticCell<[u8; zynq7000_hal::eth::MTU]> =
static_cell::ConstStaticCell::new([0; zynq7000_hal::eth::MTU]);
static RX_TCP_BUFS: static_cell::ConstStaticCell<[u8; zynq7000_hal::eth::MTU]> =
static_cell::ConstStaticCell::new([0; zynq7000_hal::eth::MTU]);
let tcp_socket = TcpSocket::new(stack, RX_TCP_BUFS.take(), TX_TCP_BUFS.take());
spawner.spawn(embassy_net_task(runner)).unwrap();
spawner.spawn(udp_task(udp_socket)).unwrap();
spawner.spawn(tcp_task(tcp_socket)).unwrap();
let mut mio_led = Output::new_for_mio(gpio_pins.mio.mio7, PinState::Low);
let mut ip_mode = IpMode::LinkDown;
let mut transmitted_frames = 0;
let mut received_frames = 0;
let receiver = ETH_ERR_QUEUE.receiver();
loop {
// Handle error messages from ethernet interrupt.
while let Ok(msg) = receiver.try_receive() {
info!("Received interrupt result: {msg:?}");
}
if PRINT_PACKET_STATS {
let sent_frames_since_last = eth.ll().regs.statistics().read_tx_count();
if sent_frames_since_last > 0 {
transmitted_frames += sent_frames_since_last;
info!("Frame sent count: {transmitted_frames}");
}
let received_frames_since_last = eth.ll().regs.statistics().read_rx_count();
if received_frames_since_last > 0 {
received_frames += received_frames_since_last;
info!("Frame received count: {received_frames}");
}
}
// This is basically a linker checker task. It also takes care of notifying the
// embassy stack of link state changes.
match ip_mode {
// Assuming that auto-negotiation is performed automatically.
IpMode::LinkDown => {
mio_led.set_low();
zynq7000_hal::eth::embassy_net::update_link_state(
embassy_net::driver::LinkState::Down,
);
ip_mode = IpMode::AutoNegotiating;
}
IpMode::AutoNegotiating => {
let status = phy.read_copper_status();
if status.auto_negotiation_complete() {
let extended_status = phy.read_copper_specific_status_register_1();
info!(
"link is up and auto-negotiation complete. Setting speed {:?} and duplex {:?}",
extended_status.speed().as_zynq7000_eth_speed().unwrap(),
extended_status.duplex().as_zynq7000_eth_duplex()
);
eth.configure_clock_and_speed_duplex(
// If this has the reserved bits, what do we even do? For this example app,
// I am going to assume this never happens..
extended_status.speed().as_zynq7000_eth_speed().unwrap(),
extended_status.duplex().as_zynq7000_eth_duplex(),
&clk_divs,
);
zynq7000_hal::eth::embassy_net::update_link_state(
embassy_net::driver::LinkState::Up,
);
ip_mode = IpMode::AwaitingIpConfig;
} else {
Timer::after_millis(100).await;
}
}
IpMode::AwaitingIpConfig => {
if stack.is_config_up() {
let network_config = stack.config_v4();
info!("Network configuration is up. config: {network_config:?}!",);
ip_mode = IpMode::StackReady;
mio_led.set_high();
} else {
Timer::after_millis(100).await;
}
}
IpMode::StackReady => {
let status = phy.read_copper_status();
// Periodically check for link changes.
if status.copper_link_status() == LatchingLinkStatus::DownSinceLastRead {
warn!("ethernet link is down.");
ip_mode = IpMode::LinkDown;
continue;
}
Timer::after_millis(100).await;
}
}
}
}
#[unsafe(no_mangle)]
pub extern "C" fn _irq_handler() {
let mut gic_helper = GicInterruptHelper::new();
let irq_info = gic_helper.acknowledge_interrupt();
match irq_info.interrupt() {
Interrupt::Sgi(_) => (),
Interrupt::Ppi(ppi_interrupt) => {
if ppi_interrupt == zynq7000_hal::gic::PpiInterrupt::GlobalTimer {
unsafe {
zynq7000_embassy::on_interrupt();
}
}
}
Interrupt::Spi(spi_interrupt) => {
if spi_interrupt == zynq7000_hal::gic::SpiInterrupt::Eth0 {
let result = zynq7000_hal::eth::embassy_net::on_interrupt(
zynq7000_hal::eth::EthernetId::Eth0,
);
if result.has_errors() {
ETH_ERR_QUEUE.try_send(result).ok();
}
}
}
Interrupt::Invalid(_) => (),
Interrupt::Spurious => (),
}
gic_helper.end_of_interrupt(irq_info);
}
#[unsafe(no_mangle)]
pub extern "C" fn _abort_handler() {
loop {
nop();
}
}
#[unsafe(no_mangle)]
pub extern "C" fn _undefined_handler() {
loop {
nop();
}
}
#[unsafe(no_mangle)]
pub extern "C" fn _prefetch_handler() {
loop {
nop();
}
}
/// Panic handler
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
error!("Panic: {info:?}");
loop {}
}

View File

@@ -131,7 +131,7 @@ async fn main(_spawner: Spawner) -> ! {
Output::new_for_emio(gpio_pins.emio.take(7).unwrap(), PinState::Low),
];
for (idx, led) in emio_leds.iter_mut().enumerate() {
if idx % 2 == 0 {
if idx.is_multiple_of(2) {
led.set_high();
} else {
led.set_low();

View File

@@ -148,7 +148,7 @@ async fn main(spawner: Spawner) -> ! {
Output::new_for_emio(gpio_pins.emio.take(7).unwrap(), PinState::Low),
];
for (idx, led) in emio_leds.iter_mut().enumerate() {
if idx % 2 == 0 {
if idx.is_multiple_of(2) {
led.set_high();
} else {
led.set_low();

View File

@@ -1,5 +1,6 @@
#![no_std]
use zynq7000_hal::time::Hertz;
pub mod phy_marvell;
// Define the clock frequency as a constant
pub const PS_CLOCK_FREQUENCY: Hertz = Hertz::from_raw(33_333_300);
pub const PS_CLOCK_FREQUENCY: Hertz = Hertz::from_raw(33_333_333);

View File

@@ -0,0 +1,246 @@
use arbitrary_int::{u2, u4, u5};
#[derive(Clone, Debug)]
pub struct PhyIdentifier {
pub oui: u32,
pub model: u8,
pub rev: u8,
}
// Organizational Unique Identifier for Marvell 88E1518 PHY
pub const MARVELL_88E1518_OUI: u32 = 0x005043;
pub const MARVELL_88E1518_MODELL_NUMBER: u8 = 0b011101;
#[bitbybit::bitenum(u5, exhaustive = false)]
pub enum MarvellRegistersPage0 {
CopperControl = 0,
CopperStatus = 1,
IdReg1 = 2,
IdReg2 = 3,
CopperSpecificStatus = 17,
PageSel = 22,
}
#[bitbybit::bitfield(u16)]
pub struct CopperControlRegister {
#[bit(15, rw)]
copper_reset: bool,
#[bit(14, rw)]
loopback: bool,
#[bit(12, rw)]
auto_negotiation_enable: bool,
#[bit(11, rw)]
power_down: bool,
#[bit(10, rw)]
isolate: bool,
#[bit(9, rw)]
restart_auto_negotiation: bool,
/// 1: Full-duplex, 0: Half-duplex
#[bit(8, rw)]
copper_duplex_mode: bool,
#[bits([13, 6], rw)]
speed_selection: u2,
}
#[bitbybit::bitenum(u1, exhaustive = true)]
#[derive(Debug, PartialEq, Eq)]
pub enum LatchingLinkStatus {
Up = 1,
DownSinceLastRead = 0,
}
#[bitbybit::bitfield(u16)]
pub struct CopperStatusRegister {
/// Always 0, the 100BASE-T4 protocol is not available on Marvell 88E15XX.
#[bit(15, r)]
p_100_base_t4: bool,
/// Always 1 for Marvell 88E15XX
#[bit(14, r)]
p_100_base_x_full_duplex: bool,
/// Always 1 for Marvell 88E15XX
#[bit(13, r)]
p_100_base_x_half_duplex: bool,
/// Always 1 for Marvell 88E15XX
#[bit(12, r)]
p_10_base_t_full_duplex: bool,
/// Always 1 for Marvell 88E15XX
#[bit(11, r)]
p_10_base_t_half_duplex: bool,
/// Always 0 for Marvell 88E15XX
#[bit(10, r)]
p_100_base_t2_full_duplex: bool,
/// Always 0 for Marvell 88E15XX
#[bit(9, r)]
p_100_base_t2_half_duplex: bool,
/// Always 1 for Marvell 88E15XX
#[bit(8, r)]
extended_status: bool,
/// Always 1 for Marvell 88E15XX
#[bit(6, r)]
mf_preamble_suppression: bool,
#[bit(5, r)]
auto_negotiation_complete: bool,
// Latching high register bit.
#[bit(4, r)]
copper_remote_fault: bool,
/// Always 1 for Marvell 88E15XX
#[bit(3, r)]
auto_negotation_ability: bool,
// Latching low register bit. For the current link status, this register should be read back
// to back, or the link real time register (17_0.10) should be read
#[bit(2, r)]
copper_link_status: LatchingLinkStatus,
// Latching high register bit.
#[bit(1, r)]
jabber_detect: bool,
/// Always 1 for Marvell 88E15XX
#[bit(0, r)]
extended_capabilities: bool,
}
#[bitbybit::bitenum(u2, exhaustive = true)]
#[derive(Debug, PartialEq, Eq)]
pub enum PhySpeedBits {
Reserved = 0b11,
Mbps1000 = 0b10,
Mbps100 = 0b01,
Mbps10 = 0b00,
}
impl PhySpeedBits {
#[inline]
pub fn as_zynq7000_eth_speed(&self) -> Option<zynq7000_hal::eth::Speed> {
match self {
PhySpeedBits::Reserved => None,
PhySpeedBits::Mbps1000 => Some(zynq7000_hal::eth::Speed::Mbps1000),
PhySpeedBits::Mbps100 => Some(zynq7000_hal::eth::Speed::Mbps100),
PhySpeedBits::Mbps10 => Some(zynq7000_hal::eth::Speed::Mbps10),
}
}
}
#[bitbybit::bitenum(u1, exhaustive = true)]
#[derive(Debug, PartialEq, Eq)]
pub enum PhyDuplexBit {
Full = 1,
Half = 0,
}
impl PhyDuplexBit {
#[inline]
pub fn as_zynq7000_eth_duplex(&self) -> zynq7000_hal::eth::Duplex {
match self {
PhyDuplexBit::Full => zynq7000_hal::eth::Duplex::Full,
PhyDuplexBit::Half => zynq7000_hal::eth::Duplex::Half,
}
}
}
#[bitbybit::bitfield(u16)]
pub struct CopperSpecificStatusRegister {
#[bits(14..=15, r)]
speed: PhySpeedBits,
#[bit(13, r)]
duplex: PhyDuplexBit,
/// Latching high register bit.
#[bit(12, r)]
page_received: bool,
/// This is 1 when auto-negotiation is not enabled.
#[bit(11, r)]
speed_and_duplex_resolved: bool,
/// This is the real-time link status.
#[bit(10, r)]
copper_link: bool,
#[bit(9, r)]
transmit_pause_enabled: bool,
#[bit(8, r)]
received_pause_enabled: bool,
#[bit(6, r)]
mdi_crossover_status: bool,
#[bit(4, r)]
copper_energy_detect_status: bool,
#[bit(3, r)]
global_link_status: bool,
#[bit(1, r)]
polarity: bool,
#[bit(0, r)]
jabber: bool,
}
pub struct Marvell88E1518Phy {
mdio: zynq7000_hal::eth::mdio::Mdio,
addr: u5,
}
impl Marvell88E1518Phy {
pub fn new_autoprobe_addr(mdio: &mut zynq7000_hal::eth::mdio::Mdio) -> Option<(Self, u4)> {
for addr in 0..32 {
let phy_id_1 =
mdio.read_blocking(u5::new(addr), MarvellRegistersPage0::IdReg1.raw_value());
let phy_id_2 =
mdio.read_blocking(u5::new(addr), MarvellRegistersPage0::IdReg2.raw_value());
// PHY ID 1 contains bits 3 to 18 of the OUI in the goofy IEEE ordering scheme,
// which corresponds to bit \[21:6\] of the OUI.
// PHY ID 2 contains bits 19 to 24 which correspond to bits \[5:0\] of the OUI.
let oui = ((phy_id_1 as u32) << 6) | ((phy_id_2 >> 10) & 0b111111) as u32;
let model_number = ((phy_id_2 >> 4) & 0b111111) as u8;
let revision_number = u4::new((phy_id_2 & 0b1111) as u8);
if oui == MARVELL_88E1518_OUI && model_number == MARVELL_88E1518_MODELL_NUMBER {
return Some((
Self {
mdio: unsafe { mdio.clone() },
addr: u5::new(addr),
},
revision_number,
));
}
}
None
}
pub fn new(mdio: zynq7000_hal::eth::mdio::Mdio, addr: u5) -> Self {
Self { mdio, addr }
}
pub fn reset(&mut self) {
let mut ctrl = CopperControlRegister::new_with_raw_value(
self.mdio
.read_blocking(self.addr, MarvellRegistersPage0::CopperControl.raw_value()),
);
ctrl.set_copper_reset(true);
self.mdio.write_blocking(
self.addr,
MarvellRegistersPage0::CopperControl.raw_value(),
ctrl.raw_value(),
);
}
pub fn restart_auto_negotiation(&mut self) {
let mut ctrl = CopperControlRegister::new_with_raw_value(
self.mdio
.read_blocking(self.addr, MarvellRegistersPage0::CopperControl.raw_value()),
);
ctrl.set_auto_negotiation_enable(true);
ctrl.set_restart_auto_negotiation(true);
self.mdio.write_blocking(
self.addr,
MarvellRegistersPage0::CopperControl.raw_value(),
ctrl.raw_value(),
);
}
pub fn read_copper_status(&mut self) -> CopperStatusRegister {
let raw_value = self
.mdio
.read_blocking(self.addr, MarvellRegistersPage0::CopperStatus.raw_value());
CopperStatusRegister::new_with_raw_value(raw_value)
}
pub fn read_copper_specific_status_register_1(&mut self) -> CopperSpecificStatusRegister {
let raw_value = self.mdio.read_blocking(
self.addr,
MarvellRegistersPage0::CopperSpecificStatus.raw_value(),
);
CopperSpecificStatusRegister::new_with_raw_value(raw_value)
}
}

View File

@@ -1,8 +1,23 @@
MEMORY
{
/* Zedboard: 512 MB DDR3. Only use 256 MB for now, should be plenty for a bare-metal app. */
CODE(rx) : ORIGIN = 0x00100000, LENGTH = 256M
/* Zedboard: 512 MB DDR3. Only use 63 MB for now, should be plenty for a bare-metal app.
Leave 1 MB of memory which will be configured as uncached device memory by the MPU. This is
recommended for something like DMA descriptors. */
CODE(rx) : ORIGIN = 0x00100000, LENGTH = 63M
UNCACHED(rx): ORIGIN = 0x4000000, LENGTH = 1M
}
REGION_ALIAS("DATA", CODE);
SECTIONS
{
/* Uncached memory */
.uncached (NOLOAD) : ALIGN(4) {
. = ALIGN(4);
_sbss_uncached = .;
*(.uncached .uncached.*);
. = ALIGN(4);
_ebss_uncached = .;
} > UNCACHED
}

12
zynq-mmu/Cargo.toml Normal file
View File

@@ -0,0 +1,12 @@
[package]
name = "zynq-mmu"
description = "Zynq MMU structures"
version = "0.1.0"
edition = "2024"
[dependencies]
thiserror = { version = "2", default-features = false }
cortex-ar = { version = "0.2", git = "https://github.com/rust-embedded/cortex-ar.git", rev = "79dba7000d2090d13823bfb783d9d64be8b778d2" }
[features]
tools = []

100
zynq-mmu/src/lib.rs Normal file
View File

@@ -0,0 +1,100 @@
//! The MMU structures live inside a dedicated shared crate so it can be used by both the Zynq
//! runtime crate and the HAL crate.
#![no_std]
use core::cell::UnsafeCell;
use cortex_ar::mmu::L1Section;
#[cfg(not(feature = "tools"))]
use cortex_ar::{
asm::{dsb, isb},
cache::clean_and_invalidate_l1_data_cache,
mmu::SectionAttributes,
register::{BpIAll, TlbIAll},
};
pub const NUM_L1_PAGE_TABLE_ENTRIES: usize = 4096;
#[derive(Debug, PartialEq, Eq, thiserror::Error)]
#[error("address is not aligned to 1MB boundary")]
pub struct AddrNotAlignedToOneMb;
/// Raw L1 table wrapper.
///
/// You can use [L1Table] to create a static global L1 table, which can be shared and updated
/// without requiring a static mutable global.
#[repr(C, align(16384))]
pub struct L1TableRaw(pub [L1Section; NUM_L1_PAGE_TABLE_ENTRIES]);
impl L1TableRaw {
#[inline(always)]
pub const fn as_ptr(&self) -> *const u32 {
self.0.as_ptr() as *const _
}
#[inline(always)]
pub const fn as_mut_ptr(&mut self) -> *mut u32 {
self.0.as_mut_ptr() as *mut _
}
#[cfg(not(feature = "tools"))]
pub fn update(
&mut self,
addr: u32,
section_attrs: SectionAttributes,
) -> Result<(), AddrNotAlignedToOneMb> {
if addr & 0x000F_FFFF != 0 {
return Err(AddrNotAlignedToOneMb);
}
let index = addr as usize / 0x10_0000;
self.0[index].set_section_attrs(section_attrs);
// The Zynq 7000 has a 32 kB 4-way associative cache with a line length of 32 bytes.
// 4-way associative cache: A == 2
// 32 bytes line length: N == 5
// 256 (32kB / (32 * 4)) sets: S == 8
clean_and_invalidate_l1_data_cache::<2, 5, 8>();
TlbIAll::write();
BpIAll::write();
dsb();
isb();
Ok(())
}
}
/// This is a thin helper structure to allow declaring one static global L1 table
/// while also allowing mutable access to it without requiring static mutables.
///
/// The L1 table is usually expected as some data structure at a certain address which can be
/// declared with initial values and placed inside the .data section.
#[repr(transparent)]
pub struct L1Table(pub UnsafeCell<L1TableRaw>);
unsafe impl Sync for L1Table {}
impl L1Table {
#[inline]
pub const fn new(l1_table: [L1Section; NUM_L1_PAGE_TABLE_ENTRIES]) -> L1Table {
L1Table(UnsafeCell::new(L1TableRaw(l1_table)))
}
}
/// Wrapper structure to modify the L1 table given a mutable reference to the table.
pub struct L1TableWrapper<'a>(pub &'a mut L1TableRaw);
impl<'a> L1TableWrapper<'a> {
pub fn new(l1_table: &'a mut L1TableRaw) -> L1TableWrapper<'a> {
L1TableWrapper(l1_table)
}
}
impl L1TableWrapper<'_> {
#[cfg(not(feature = "tools"))]
pub fn update(
&mut self,
addr: u32,
section_attrs: SectionAttributes,
) -> Result<(), AddrNotAlignedToOneMb> {
self.0.update(addr, section_attrs)
}
}

View File

@@ -15,5 +15,7 @@ critical-section = "1"
once_cell = { version = "1", default-features = false, features = ["critical-section"] }
zynq7000-hal = { path = "../zynq7000-hal" }
embassy-time-driver = { git = "https://github.com/embassy-rs/embassy.git", branch = "main", version = "0.2" }
embassy-time-queue-utils = { git = "https://github.com/embassy-rs/embassy.git", branch = "main", version = "0.1" }
# embassy-time-driver = { git = "https://github.com/embassy-rs/embassy.git", branch = "main", version = "0.2" }
# embassy-time-queue-utils = { git = "https://github.com/embassy-rs/embassy.git", branch = "main", version = "0.1" }
embassy-time-driver = "0.2"
embassy-time-queue-utils = "0.1"

View File

@@ -11,9 +11,11 @@ keywords = ["no-std", "hal", "amd", "zynq7000", "xilinx", "bare-metal"]
categories = ["embedded", "no-std", "hardware-support"]
[dependencies]
cortex-ar = "0.2"
cortex-ar = { version = "0.2", git = "https://github.com/rust-embedded/cortex-ar.git", rev = "79dba7000d2090d13823bfb783d9d64be8b778d2" }
zynq7000 = { path = "../zynq7000" }
zynq-mmu = { path = "../zynq-mmu", version = "0.1.0" }
bitbybit = "1.3"
arbitrary-int = "1.3"
thiserror = { version = "2", default-features = false }
num_enum = { version = "0.7", default-features = false }
@@ -31,7 +33,10 @@ fugit = "0.3"
critical-section = "1"
libm = "0.2"
log = "0.4"
embassy-sync = "0.6"
embassy-sync = "0.7"
embassy-net-driver = "0.2"
smoltcp = { version = "0.12", default-features = false }
vcell = "0.1"
raw-slicee = "0.1"
embedded-io-async = "0.6"

163
zynq7000-hal/src/cache.rs Normal file
View File

@@ -0,0 +1,163 @@
use core::sync::atomic::compiler_fence;
use cortex_ar::{
asm::dsb,
cache::{
clean_and_invalidate_data_cache_line_to_poc, clean_data_cache_line_to_poc,
invalidate_data_cache_line_to_poc,
},
};
use zynq7000::l2_cache::{L2Cache, MmioL2Cache};
pub const CACHE_LINE_SIZE: usize = 32;
#[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)]
#[error("alignment error, addresses and lengths must be aligned to 32 byte cache line length")]
pub struct AlignmentError;
pub fn clean_and_invalidate_l2c_line(l2c: &mut MmioL2Cache<'static>, addr: u32) {
l2c.write_clean_by_pa(addr);
l2c.write_invalidate_by_pa(addr);
}
/// Cleans and invalidates the full L1 and L2 cache.
pub fn clean_and_invalidate_data_cache() {
dsb();
cortex_ar::cache::clean_l1_data_cache::<2, 5, 8>();
dsb();
// Clean all ways in L2 cache.
let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
l2c.write_clean_invalidate_by_way(0xff);
while l2c.read_cache_sync().busy() {}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
cortex_ar::cache::clean_and_invalidate_l1_data_cache::<2, 5, 8>();
dsb();
}
/// Invalidate an address range.
///
/// This function invalidates both the L1 and L2 cache. The L2C must be enabled and set up
/// correctly for this function to work correctly.
///
/// The provided address and the range to invalidate must both be aligned to the 32 byte cache line
/// length.
pub fn invalidate_data_cache_range(addr: u32, len: usize) -> Result<(), AlignmentError> {
if !addr.is_multiple_of(CACHE_LINE_SIZE as u32) || !len.is_multiple_of(CACHE_LINE_SIZE) {
return Err(AlignmentError);
}
let mut current_addr = addr;
let end_addr = addr.saturating_add(len as u32);
let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
dsb();
// Invalidate outer caches lines first, see chapter 3.3.10 of the L2C technical reference
// manual.
while current_addr < end_addr {
l2c.write_invalidate_by_pa(current_addr);
current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
}
while l2c.read_cache_sync().busy() {}
// Invalidate inner cache lines.
current_addr = addr;
compiler_fence(core::sync::atomic::Ordering::SeqCst);
while current_addr < end_addr {
invalidate_data_cache_line_to_poc(addr);
current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
}
// Synchronize the cache maintenance.
dsb();
Ok(())
}
/// Clean and then invalidate an address range.
///
/// This is commonly also called cache flushing. This function cleans and invalidates both L1
/// and L2 cache. The L2C must be enabled and set up correctly for this function to work correctly.
///
/// Both the address and length to clean and invalidate must be a multiple of the 32 byte cache
/// line.
pub fn clean_and_invalidate_data_cache_range(addr: u32, len: usize) -> Result<(), AlignmentError> {
if !addr.is_multiple_of(CACHE_LINE_SIZE as u32) || !len.is_multiple_of(CACHE_LINE_SIZE) {
return Err(AlignmentError);
}
let end_addr = addr.saturating_add(len as u32);
let mut current_addr = addr;
dsb();
// For details on the following section, see chapter 3.3.10 of the L2C technical reference
// manual.
// Clean inner cache lines first.
while current_addr < end_addr {
clean_data_cache_line_to_poc(current_addr);
current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
}
dsb();
// Clean and invalidates outer cache.
let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
current_addr = addr;
while current_addr < end_addr {
// ARM errate 588369 specifies that clean and invalidate need to be separate, but the
// current revision of the L2C on the Zynq7000 seems to be revision 8 (r3p2), and the
// errata was fixed in r2p0. Both Xilinx and zynq-rs use the clean and invalidate operation,
// so it should be fine. Considering the debug control in Xilinx code which disable
// linefills and write-backs, zynq-rs does not appear to do that and it should not be
// necessary.. I think this was related to the errata.
l2c.write_clean_invalidate_by_pa(current_addr);
current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
}
while l2c.read_cache_sync().busy() {}
// Now clean and invalidate inner cache.
current_addr = addr;
compiler_fence(core::sync::atomic::Ordering::SeqCst);
while current_addr < end_addr {
clean_and_invalidate_data_cache_line_to_poc(current_addr);
current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
}
dsb();
Ok(())
}
/// Cleans an address range.
///
/// This function cleans and invalidates both L1
/// and L2 cache. The L2C must be enabled and set up correctly for this function to work correctly.
///
/// Both the address and length to clean and invalidate must be a multiple of the 32 byte cache
/// line.
pub fn clean_data_cache_range(addr: u32, len: usize) -> Result<(), AlignmentError> {
if !addr.is_multiple_of(32) || !len.is_multiple_of(32) {
return Err(AlignmentError);
}
let end_addr = addr.saturating_add(len as u32);
let mut current_addr = addr;
dsb();
// For details on the following section, see chapter 3.3.10 of the L2C technical reference
// manual.
// Clean inner cache lines first.
while current_addr < end_addr {
clean_data_cache_line_to_poc(current_addr);
current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
}
dsb();
// Clean and invalidates outer cache.
let mut l2c = unsafe { L2Cache::new_mmio_fixed() };
current_addr = addr;
while current_addr < end_addr {
l2c.write_clean_by_pa(current_addr);
current_addr = current_addr.saturating_add(CACHE_LINE_SIZE as u32);
}
while l2c.read_cache_sync().busy() {}
compiler_fence(core::sync::atomic::Ordering::SeqCst);
Ok(())
}

View File

@@ -0,0 +1,99 @@
//! Embassy-net driver for the Zynq 7000 ethernet peripheral.
use core::sync::atomic::AtomicBool;
pub use crate::eth::smoltcp::DescriptorsAndBuffers;
use crate::eth::smoltcp::{SmoltcpRxToken, SmoltcpTxToken};
pub use crate::eth::{EthernetId, InterruptResult};
use embassy_sync::waitqueue::AtomicWaker;
pub(crate) static TX_WAKER: AtomicWaker = AtomicWaker::new();
pub(crate) static RX_WAKER: AtomicWaker = AtomicWaker::new();
static LINK_WAKER: AtomicWaker = AtomicWaker::new();
static LINK_STATE: AtomicBool = AtomicBool::new(false);
/// This interrupt handler should be called when a Gigabit Ethernet interrupt occurs.
///
/// It also handles embassy-net related waking up of tasks via wakers.
pub fn on_interrupt(eth_id: EthernetId) -> InterruptResult {
super::on_interrupt(eth_id, true, true)
}
#[inline]
pub fn link_state() -> embassy_net_driver::LinkState {
match LINK_STATE.load(core::sync::atomic::Ordering::Relaxed) {
true => embassy_net_driver::LinkState::Up,
false => embassy_net_driver::LinkState::Down,
}
}
pub fn update_link_state(new_state: embassy_net_driver::LinkState) {
let new_value = match new_state {
embassy_net_driver::LinkState::Up => true,
embassy_net_driver::LinkState::Down => false,
};
if LINK_STATE.swap(new_value, core::sync::atomic::Ordering::Relaxed) != new_value {
LINK_WAKER.wake();
}
}
pub struct Driver(super::smoltcp::CommonSmoltcpDriver);
impl Driver {
pub fn new(eth: &super::Ethernet, mac_addr: [u8; 6], bufs: DescriptorsAndBuffers) -> Self {
let tx_burst_len = bufs.tx_burst_len();
Self(super::smoltcp::CommonSmoltcpDriver::new(
eth.id(),
mac_addr,
bufs,
tx_burst_len,
))
}
}
impl embassy_net_driver::Driver for Driver {
type RxToken<'a>
= SmoltcpRxToken<'a>
where
Self: 'a;
type TxToken<'a>
= SmoltcpTxToken<'a>
where
Self: 'a;
fn receive(
&mut self,
cx: &mut core::task::Context,
) -> Option<(Self::RxToken<'_>, Self::TxToken<'_>)> {
RX_WAKER.register(cx.waker());
TX_WAKER.register(cx.waker());
self.0.receive()
}
fn transmit(&mut self, cx: &mut core::task::Context) -> Option<Self::TxToken<'_>> {
TX_WAKER.register(cx.waker());
self.0.transmit()
}
fn link_state(&mut self, cx: &mut core::task::Context) -> embassy_net_driver::LinkState {
LINK_WAKER.register(cx.waker());
link_state()
}
fn capabilities(&self) -> embassy_net_driver::Capabilities {
let mut capabilities = embassy_net_driver::Capabilities::default();
capabilities.max_transmission_unit = super::MTU;
capabilities.max_burst_size = Some(self.0.burst_size);
capabilities.checksum.ipv4 = embassy_net_driver::Checksum::Both;
capabilities.checksum.udp = embassy_net_driver::Checksum::Both;
capabilities.checksum.tcp = embassy_net_driver::Checksum::Both;
capabilities.checksum.icmpv4 = embassy_net_driver::Checksum::None;
capabilities.checksum.icmpv6 = embassy_net_driver::Checksum::None;
capabilities
}
fn hardware_address(&self) -> embassy_net_driver::HardwareAddress {
embassy_net_driver::HardwareAddress::Ethernet(self.0.mac_addr)
}
}

375
zynq7000-hal/src/eth/ll.rs Normal file
View File

@@ -0,0 +1,375 @@
use arbitrary_int::{Number, u6};
use zynq7000::{
eth::{InterruptControl, NetworkControl, RxStatus, TxStatus},
slcr::reset::EthernetReset,
};
use crate::{clocks::IoClocks, enable_amba_peripheral_clock, slcr::Slcr, time::Hertz};
use super::{EthernetId, PsEthernet as _};
pub struct EthernetLowLevel {
id: EthernetId,
pub regs: zynq7000::eth::MmioEthernet<'static>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Speed {
Mbps10,
Mbps100,
Mbps1000,
}
impl Speed {
pub const fn rgmii_clk_rate(&self) -> Hertz {
match self {
Speed::Mbps10 => Hertz::from_raw(2_500_000),
Speed::Mbps100 => Hertz::from_raw(25_000_000),
Speed::Mbps1000 => Hertz::from_raw(125_000_000),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Duplex {
Half,
Full,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ClkDivisors {
pub divisor_0: u6,
pub divisor_1: u6,
}
impl ClkDivisors {
pub const fn new(divisor_0: u6, divisor_1: u6) -> Self {
Self {
divisor_0,
divisor_1,
}
}
/// Calls [Self::calculate_for_rgmii], assuming that the IO clock is the reference clock,
/// which is the default clock for the Ethernet module.
pub fn calculate_for_rgmii_and_io_clock(io_clks: IoClocks, target_speed: Speed) -> (Self, u32) {
Self::calculate_for_rgmii(io_clks.ref_clk(), target_speed)
}
/// Calculate the best clock configuration (divisors) for the given reference clock
/// and desired target speed when using a RGMII interface.
///
/// Usually, the reference clock will be the IO clock.
///
/// Returns a tuple where the first entry is the calcualted clock configuration
/// and the second entry is the difference between calculated clock speed for the divisors
/// and the target speed. Ideally, this difference should be 0.
pub fn calculate_for_rgmii(ref_clk: Hertz, target_speed: Speed) -> (Self, u32) {
let mut smallest_diff = u32::MAX;
let target_speed = target_speed.rgmii_clk_rate();
let mut best_div_0 = u6::new(0);
let mut best_div_1 = u6::new(0);
for div_1 in 1..=u6::MAX.as_usize() {
for div_0 in 1..=u6::MAX.as_usize() {
let clk_rate = ref_clk.raw() / div_0 as u32 / div_1 as u32;
let diff = (target_speed.raw() as i64 - clk_rate as i64).unsigned_abs() as u32;
if diff < smallest_diff {
smallest_diff = diff;
best_div_0 = u6::new(div_0 as u8);
best_div_1 = u6::new(div_1 as u8);
}
// We found a perfect match. No need to continue.
if diff == 0 {
break;
}
}
}
(Self::new(best_div_0, best_div_1), smallest_diff)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ClkConfig {
pub src_sel: zynq7000::slcr::clocks::SrcSelIo,
pub use_emio_tx_clk: bool,
pub divs: ClkDivisors,
/// Enable the clock.
pub enable: bool,
}
impl ClkConfig {
pub const fn new(divs: ClkDivisors) -> Self {
Self {
src_sel: zynq7000::slcr::clocks::SrcSelIo::IoPll,
use_emio_tx_clk: false,
divs,
enable: true,
}
}
}
/// This is a collection of clock configuration for all relevant speed settings.
///
/// Generally, the clock need to be re-configured each time the speed settings change, for example
/// after a completed auto-negotiation process. The necessary clock configurations for each speed
/// setting can be pre-calculated and stored using this data structure.
#[derive(Debug, Clone, Copy)]
pub struct ClkDivCollection {
pub cfg_10_mbps: ClkDivisors,
pub cfg_100_mbps: ClkDivisors,
pub cfg_1000_mbps: ClkDivisors,
}
impl ClkDivCollection {
pub const fn new(
cfg_10_mbps: ClkDivisors,
cfg_100_mbps: ClkDivisors,
cfg_1000_mbps: ClkDivisors,
) -> Self {
Self {
cfg_10_mbps,
cfg_100_mbps,
cfg_1000_mbps,
}
}
#[inline]
pub fn clk_divs_for_speed(&self, speed: Speed) -> &ClkDivisors {
match speed {
Speed::Mbps10 => &self.cfg_10_mbps,
Speed::Mbps100 => &self.cfg_100_mbps,
Speed::Mbps1000 => &self.cfg_1000_mbps,
}
}
/// Calls [Self::calculate_for_rgmii], assuming that the IO clock is the reference clock,
/// which is the default clock for the Ethernet module.
pub fn calculate_for_rgmii_and_io_clock(io_clks: &IoClocks) -> (Self, [u32; 3]) {
Self::calculate_for_rgmii(io_clks.ref_clk())
}
/// Calculate the best clock configuration (divisors) for the given reference clock
/// and desired target speed when using a RGMII interface.
///
/// Usually, the reference clock will be the IO clock.
///
/// Returns a tuple where the first entry is the calcualted clock configuration
/// and the second entry is the difference between calculated clock speed for the divisors
/// and the target speed. Ideally, this difference should be 0.
pub fn calculate_for_rgmii(ref_clk: Hertz) -> (Self, [u32; 3]) {
let (cfg_10_mbps, error_10_mbps) = ClkDivisors::calculate_for_rgmii(ref_clk, Speed::Mbps10);
let (cfg_100_mbps, error_100_mbps) =
ClkDivisors::calculate_for_rgmii(ref_clk, Speed::Mbps100);
let (cfg_1000_mbps, error_1000_mbps) =
ClkDivisors::calculate_for_rgmii(ref_clk, Speed::Mbps1000);
(
Self::new(cfg_10_mbps, cfg_100_mbps, cfg_1000_mbps),
[error_10_mbps, error_100_mbps, error_1000_mbps],
)
}
}
/// Ethernet low-level interface.
impl EthernetLowLevel {
/// Creates a new instance of the Ethernet low-level interface.
#[inline]
pub fn new(regs: zynq7000::eth::MmioEthernet<'static>) -> Option<Self> {
regs.id()?;
Some(EthernetLowLevel {
id: regs.id().unwrap(),
regs,
})
}
/// Create a low-level instance for the given [EthernetId].
///
/// # Safety
///
/// Circumvents ownership and safety guarantees of the HAL.
#[inline]
pub const unsafe fn steal(id: EthernetId) -> Self {
Self {
id,
regs: unsafe {
match id {
EthernetId::Eth0 => zynq7000::eth::Ethernet::new_mmio_fixed_0(),
EthernetId::Eth1 => zynq7000::eth::Ethernet::new_mmio_fixed_1(),
}
},
}
}
pub fn reset(&mut self, cycles: usize) {
let assert_reset = match self.id {
EthernetId::Eth0 => EthernetReset::builder()
.with_gem1_ref_rst(false)
.with_gem0_ref_rst(true)
.with_gem1_rx_rst(false)
.with_gem0_rx_rst(true)
.with_gem1_cpu1x_rst(false)
.with_gem0_cpu1x_rst(true)
.build(),
EthernetId::Eth1 => EthernetReset::builder()
.with_gem1_ref_rst(true)
.with_gem0_ref_rst(false)
.with_gem1_rx_rst(true)
.with_gem0_rx_rst(false)
.with_gem1_cpu1x_rst(true)
.with_gem0_cpu1x_rst(false)
.build(),
};
unsafe {
Slcr::with(|regs| {
regs.reset_ctrl().write_eth(assert_reset);
for _ in 0..cycles {
cortex_ar::asm::nop();
}
regs.reset_ctrl().write_eth(EthernetReset::DEFAULT);
});
}
}
#[inline]
pub fn enable_peripheral_clock(&mut self) {
let periph_sel = match self.id {
EthernetId::Eth0 => crate::PeripheralSelect::Gem0,
EthernetId::Eth1 => crate::PeripheralSelect::Gem1,
};
enable_amba_peripheral_clock(periph_sel);
}
pub fn configure_clock(&mut self, cfg: ClkConfig, enable_rx_clock: bool) {
unsafe {
Slcr::with(|regs| {
let (ptr_gig_eth_clk_ctrl, ptr_gig_eth_rclk_ctrl) = self.id().clk_config_regs(regs);
let mut gig_eth_clk_ctrl_val = core::ptr::read_volatile(ptr_gig_eth_clk_ctrl);
gig_eth_clk_ctrl_val.set_srcsel(cfg.src_sel);
gig_eth_clk_ctrl_val.set_divisor_0(cfg.divs.divisor_0);
gig_eth_clk_ctrl_val.set_divisor_1(cfg.divs.divisor_1);
gig_eth_clk_ctrl_val.set_use_emio_tx_clk(cfg.use_emio_tx_clk);
gig_eth_clk_ctrl_val.set_clk_act(cfg.enable);
core::ptr::write_volatile(ptr_gig_eth_clk_ctrl, gig_eth_clk_ctrl_val);
if enable_rx_clock {
let mut gig_eth_rclk_ctrl_val = core::ptr::read_volatile(ptr_gig_eth_rclk_ctrl);
gig_eth_rclk_ctrl_val.set_clk_enable(true);
core::ptr::write_volatile(ptr_gig_eth_rclk_ctrl, gig_eth_rclk_ctrl_val);
}
})
}
}
pub fn configure_clock_divs(&mut self, cfg: ClkDivisors) {
unsafe {
Slcr::with(|regs| {
let (ptr_gig_eth_clk_ctrl, _ptr_gig_eth_rclk_ctrl) =
self.id().clk_config_regs(regs);
let mut gig_eth_clk_ctrl_val = core::ptr::read_volatile(ptr_gig_eth_clk_ctrl);
gig_eth_clk_ctrl_val.set_divisor_0(cfg.divisor_0);
gig_eth_clk_ctrl_val.set_divisor_1(cfg.divisor_1);
core::ptr::write_volatile(ptr_gig_eth_clk_ctrl, gig_eth_clk_ctrl_val);
})
}
}
/// Can be used after auto-negotiation to update all relevant speed and duplex
/// parameter of the ethernet peripheral.
///
/// It is probably a good idea to disable the receiver and transmitter while doing this.
/// This function calls [Self::configure_clock_for_speed] and [Self::set_speed_and_duplex].
pub fn configure_clock_and_speed_duplex(
&mut self,
speed: Speed,
duplex: Duplex,
clk_collection: &ClkDivCollection,
) {
self.configure_clock_for_speed(speed, clk_collection);
self.set_speed_and_duplex(speed, duplex);
}
pub fn configure_clock_for_speed(&mut self, speed: Speed, clk_collection: &ClkDivCollection) {
match speed {
Speed::Mbps10 => self.configure_clock_divs(clk_collection.cfg_10_mbps),
Speed::Mbps100 => self.configure_clock_divs(clk_collection.cfg_100_mbps),
Speed::Mbps1000 => self.configure_clock_divs(clk_collection.cfg_1000_mbps),
}
}
#[inline]
pub fn set_promiscous_mode(&mut self, enable: bool) {
self.regs.modify_net_cfg(|mut val| {
val.set_copy_all_frames(enable);
val
});
}
#[inline]
pub fn set_rx_buf_descriptor_base_address(&mut self, addr: u32) {
self.regs.write_rx_buf_queue_base_addr(addr);
}
#[inline]
pub fn set_tx_buf_descriptor_base_address(&mut self, addr: u32) {
self.regs.write_tx_buf_queue_base_addr(addr);
}
/// This function sets the speed and duplex mode of the Ethernet interface.
///
/// This should be called after a completed auto-negotiation process with the negotiated
/// settings.
pub fn set_speed_and_duplex(&mut self, speed: Speed, duplex: Duplex) {
self.regs.modify_net_cfg(|mut val| {
val.set_full_duplex(duplex == Duplex::Full);
match speed {
Speed::Mbps10 => {
val.set_speed_mode(zynq7000::eth::SpeedMode::Low10Mbps);
val.set_gigabit_enable(false);
val
}
Speed::Mbps100 => {
val.set_speed_mode(zynq7000::eth::SpeedMode::High100Mbps);
val.set_gigabit_enable(false);
val
}
Speed::Mbps1000 => {
val.set_gigabit_enable(true);
val
}
}
});
}
/// Allows enabling/disabling ethernet receiver and transmitter respectively.
#[inline]
pub fn set_tx_rx_enable(&mut self, tx_enable: bool, rx_enable: bool) {
self.regs.modify_net_ctrl(|mut val| {
val.set_rx_enable(rx_enable);
val.set_tx_enable(tx_enable);
val
});
}
/// Performs initialization according to TRM p.541.
///
/// These steps do not include any resets or clock configuration.
pub fn initialize(&mut self, reset_rx_tx_queue_base_addr: bool) {
let mut ctrl_val = NetworkControl::new_with_raw_value(0);
self.regs.write_net_ctrl(ctrl_val);
// Now clear statistics.
ctrl_val.set_clear_statistics(true);
self.regs.write_net_ctrl(ctrl_val);
self.regs.write_tx_status(TxStatus::new_clear_all());
self.regs.write_rx_status(RxStatus::new_clear_all());
self.regs
.write_interrupt_disable(InterruptControl::new_clear_all());
if reset_rx_tx_queue_base_addr {
self.regs.write_rx_buf_queue_base_addr(0);
self.regs.write_tx_buf_queue_base_addr(0);
}
}
#[inline]
pub const fn id(&self) -> EthernetId {
self.id
}
}

View File

@@ -0,0 +1,79 @@
use arbitrary_int::{u2, u5};
use zynq7000::eth::{MdcClkDiv, PhyMaintenance};
use super::{EthernetId, ll::EthernetLowLevel};
pub struct Mdio {
regs: zynq7000::eth::MmioEthernet<'static>,
clause22: bool,
}
impl Mdio {
pub fn new(ll: &EthernetLowLevel, clause22: bool) -> Self {
Self {
regs: unsafe { ll.regs.clone() },
clause22,
}
}
/// # Safety
///
/// Circumvents ownership and safety guarantees of the HAL.
pub unsafe fn steal(eth_id: EthernetId, clause22: bool) -> Self {
Self {
regs: unsafe { eth_id.steal_regs() },
clause22,
}
}
/// Steals the MDIO handle from the given Ethernet low-level interface.
///
/// # Safety
///
/// Circumvents ownership and safety guarantees of the HAL.
pub unsafe fn clone(&self) -> Self {
Self {
regs: unsafe { self.regs.clone() },
clause22: self.clause22,
}
}
#[inline]
pub fn configure_clock_div(&mut self, clk_div: MdcClkDiv) {
self.regs.modify_net_cfg(|mut val| {
val.set_mdc_clk_div(clk_div);
val
});
}
pub fn read_blocking(&mut self, phy_addr: u5, reg_addr: u5) -> u16 {
while !self.regs.read_net_status().phy_mgmt_idle() {}
self.regs.write_phy_maintenance(
PhyMaintenance::builder()
.with_clause_22(self.clause22)
.with_op(zynq7000::eth::PhyOperation::Read)
.with_phy_addr(phy_addr)
.with_reg_addr(reg_addr)
.with_must_be_0b10(u2::new(0b10))
.with_data(0x0000)
.build(),
);
while !self.regs.read_net_status().phy_mgmt_idle() {}
self.regs.read_phy_maintenance().data()
}
pub fn write_blocking(&mut self, phy_addr: u5, reg_addr: u5, data: u16) {
while !self.regs.read_net_status().phy_mgmt_idle() {}
self.regs.write_phy_maintenance(
PhyMaintenance::builder()
.with_clause_22(self.clause22)
.with_op(zynq7000::eth::PhyOperation::Write)
.with_phy_addr(phy_addr)
.with_reg_addr(reg_addr)
.with_must_be_0b10(u2::new(0b10))
.with_data(data)
.build(),
);
while !self.regs.read_net_status().phy_mgmt_idle() {}
}
}

787
zynq7000-hal/src/eth/mod.rs Normal file
View File

@@ -0,0 +1,787 @@
use arbitrary_int::{u2, u3};
pub use zynq7000::eth::MdcClkDiv;
use zynq7000::eth::{
BurstLength, DmaRxBufSize, GEM_0_BASE_ADDR, GEM_1_BASE_ADDR, InterruptControl, InterruptStatus,
MmioEthernet, RxStatus, TxStatus,
};
pub use ll::{ClkConfig, ClkDivCollection, Duplex, EthernetLowLevel, Speed};
pub mod embassy_net;
pub mod ll;
pub mod mdio;
pub mod rx_descr;
pub mod smoltcp;
pub mod tx_descr;
pub const MTU: usize = 1536;
pub const MAX_MDC_SPEED: Hertz = Hertz::from_raw(2_500_000);
#[repr(C, align(32))]
#[derive(Debug, Clone, Copy)]
pub struct AlignedBuffer(pub [u8; MTU]);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
use crate::gpio::mio::{
Mio16, Mio17, Mio18, Mio19, Mio20, Mio21, Mio22, Mio23, Mio24, Mio25, Mio26, Mio27,
};
use crate::{
clocks::ArmClocks,
eth::ll::ClkDivisors,
gpio::{
IoPeriphPin,
mio::{
Mio28, Mio29, Mio30, Mio31, Mio32, Mio33, Mio34, Mio35, Mio36, Mio37, Mio38, Mio39,
Mio52, Mio53, MioPinMarker, MuxConf, Pin,
},
},
time::Hertz,
};
pub const MUX_CONF_PHY: MuxConf = MuxConf::new_with_l0();
pub const MUX_CONF_MDIO: MuxConf = MuxConf::new_with_l3(u3::new(0b100));
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum EthernetId {
Eth0 = 0,
Eth1 = 1,
}
impl EthernetId {
/// Steal the ethernet register block for the given ethernet ID.
///
/// # Safety
///
/// Circumvents ownership and safety guarantees of the HAL.
pub const unsafe fn steal_regs(&self) -> zynq7000::eth::MmioEthernet<'static> {
unsafe {
match self {
EthernetId::Eth0 => zynq7000::eth::Ethernet::new_mmio_fixed_0(),
EthernetId::Eth1 => zynq7000::eth::Ethernet::new_mmio_fixed_1(),
}
}
}
pub fn clk_config_regs(
&self,
slcr: &mut zynq7000::slcr::MmioSlcr<'static>,
) -> (
*mut zynq7000::slcr::clocks::GigEthClkCtrl,
*mut zynq7000::slcr::clocks::GigEthRclkCtrl,
) {
match self {
EthernetId::Eth0 => (
slcr.clk_ctrl().pointer_to_gem_0_clk_ctrl(),
slcr.clk_ctrl().pointer_to_gem_0_rclk_ctrl(),
),
EthernetId::Eth1 => (
slcr.clk_ctrl().pointer_to_gem_1_clk_ctrl(),
slcr.clk_ctrl().pointer_to_gem_1_rclk_ctrl(),
),
}
}
}
pub trait PsEthernet {
fn reg_block(&self) -> MmioEthernet<'static>;
fn id(&self) -> Option<EthernetId>;
}
impl PsEthernet for MmioEthernet<'static> {
#[inline]
fn reg_block(&self) -> MmioEthernet<'static> {
unsafe { self.clone() }
}
#[inline]
fn id(&self) -> Option<EthernetId> {
let base_addr = unsafe { self.ptr() } as usize;
if base_addr == GEM_0_BASE_ADDR {
return Some(EthernetId::Eth0);
} else if base_addr == GEM_1_BASE_ADDR {
return Some(EthernetId::Eth1);
}
None
}
}
pub trait TxClk: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait TxCtrl: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait TxData0: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait TxData1: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait TxData2: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait TxData3: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait RxClk: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait RxCtrl: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait RxData0: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait RxData1: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait RxData2: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait RxData3: MioPinMarker {
const ETH_ID: EthernetId;
}
pub trait MdClk: MioPinMarker {}
pub trait MdIo: MioPinMarker {}
impl MdClk for Pin<Mio52> {}
impl MdIo for Pin<Mio53> {}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl TxClk for Pin<Mio16> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl TxCtrl for Pin<Mio21> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl TxData0 for Pin<Mio17> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl TxData1 for Pin<Mio18> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl TxData2 for Pin<Mio19> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl TxData3 for Pin<Mio20> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl RxClk for Pin<Mio22> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl RxCtrl for Pin<Mio27> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl RxData0 for Pin<Mio23> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl RxData1 for Pin<Mio24> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl RxData2 for Pin<Mio25> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl RxData3 for Pin<Mio26> {
const ETH_ID: EthernetId = EthernetId::Eth0;
}
impl TxClk for Pin<Mio28> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl TxCtrl for Pin<Mio33> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl TxData0 for Pin<Mio29> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl TxData1 for Pin<Mio30> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl TxData2 for Pin<Mio31> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl TxData3 for Pin<Mio32> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl RxClk for Pin<Mio34> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl RxCtrl for Pin<Mio39> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl RxData0 for Pin<Mio35> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl RxData1 for Pin<Mio36> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl RxData2 for Pin<Mio37> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
impl RxData3 for Pin<Mio38> {
const ETH_ID: EthernetId = EthernetId::Eth1;
}
/// Calculate the CPU 1x clock divisor required to achieve a clock speed which is below
/// 2.5 MHz, as specified by the 802.3 standard.
pub fn calculate_mdc_clk_div(arm_clks: &ArmClocks) -> Option<MdcClkDiv> {
let div = arm_clks.cpu_1x_clk().raw().div_ceil(MAX_MDC_SPEED.raw());
match div {
0..8 => Some(MdcClkDiv::Div8),
8..16 => Some(MdcClkDiv::Div16),
16..32 => Some(MdcClkDiv::Div32),
32..48 => Some(MdcClkDiv::Div48),
48..64 => Some(MdcClkDiv::Div64),
64..96 => Some(MdcClkDiv::Div96),
96..128 => Some(MdcClkDiv::Div128),
128..224 => Some(MdcClkDiv::Div224),
// MDC clock divisor is too high for the maximum speed.
// This is not a valid configuration.
_ => None,
}
}
#[derive(Debug, Clone, Copy)]
pub struct EthernetConfig {
pub clk_config_1000_mbps: ClkConfig,
pub mdc_clk_div: MdcClkDiv,
pub mac_address: [u8; 6],
}
impl EthernetConfig {
/// Creates a new Ethernet configuration.
pub fn new(
clk_config_1000_mbps: ClkConfig,
mdc_clk_div: MdcClkDiv,
mac_address: [u8; 6],
) -> Self {
Self {
clk_config_1000_mbps,
mdc_clk_div,
mac_address,
}
}
}
pub struct Ethernet {
ll: ll::EthernetLowLevel,
mdio: mdio::Mdio,
current_speed: Speed,
current_duplex: Duplex,
current_clk_divs: ClkDivisors,
}
const IRQ_CONTROL: InterruptControl = InterruptControl::builder()
.with_tsu_sec_incr(false)
.with_partner_pg_rx(false)
.with_auto_negotiation_complete(false)
.with_external_interrupt(false)
.with_pause_transmitted(false)
.with_pause_time_zero(false)
.with_pause_with_non_zero_quantum(false)
.with_hresp_not_ok(true)
.with_rx_overrun(true)
.with_link_changed(false)
.with_frame_sent(true)
.with_tx_frame_corruption_ahb_error(true)
.with_tx_retry_limit_reached_or_late_collision(true)
.with_tx_descr_read_when_used(true)
.with_rx_descr_read_when_used(true)
.with_frame_received(true)
.with_mgmt_frame_sent(false)
.build();
const IRQ_CLEAR_ALL: InterruptStatus = InterruptStatus::builder()
.with_tsu_sec_incr(true)
.with_partner_pg_rx(true)
.with_auto_negotiation_complete(true)
.with_external_interrupt(true)
.with_pause_transmitted(true)
.with_pause_time_zero(true)
.with_pause_with_non_zero_quantum(true)
.with_hresp_not_ok(true)
.with_rx_overrun(true)
.with_link_changed(true)
.with_frame_sent(true)
.with_tx_retry_limit_reached_or_late_collision(true)
.with_tx_descr_read_when_used(true)
.with_rx_descr_read_when_used(true)
.with_frame_received(true)
.with_mgmt_frame_sent(true)
.build();
impl Ethernet {
#[allow(clippy::too_many_arguments)]
pub fn new_with_mio<
TxClkPin: TxClk,
TxCtrlPin: TxCtrl,
TxData0Pin: TxData0,
TxData1Pin: TxData1,
TxData2Pin: TxData2,
TxData3Pin: TxData3,
RxClkPin: RxClk,
RxCtrlPin: RxCtrl,
RxData0Pin: RxData0,
RxData1Pin: RxData1,
RxData2Pin: RxData2,
RxData3Pin: RxData3,
MdClkPin: MdClk,
MdIoPin: MdIo,
>(
mut ll: ll::EthernetLowLevel,
config: EthernetConfig,
tx_clk: TxClkPin,
tx_ctrl: TxCtrlPin,
tx_data: (TxData0Pin, TxData1Pin, TxData2Pin, TxData3Pin),
rx_clk: RxClkPin,
rx_ctrl: RxCtrlPin,
rx_data: (RxData0Pin, RxData1Pin, RxData2Pin, RxData3Pin),
md_pins: Option<(MdClkPin, MdIoPin)>,
) -> Self {
Self::common_init(&mut ll, config.mac_address);
let tx_mio_config = zynq7000::slcr::mio::Config::builder()
.with_disable_hstl_rcvr(true)
.with_pullup(true)
.with_io_type(zynq7000::slcr::mio::IoType::Hstl)
.with_speed(zynq7000::slcr::mio::Speed::FastCmosEdge)
.with_l3_sel(MUX_CONF_PHY.l3_sel())
.with_l2_sel(MUX_CONF_PHY.l2_sel())
.with_l1_sel(MUX_CONF_PHY.l1_sel())
.with_l0_sel(MUX_CONF_PHY.l0_sel())
.with_tri_enable(false)
.build();
let rx_mio_config = zynq7000::slcr::mio::Config::builder()
.with_disable_hstl_rcvr(false)
.with_pullup(true)
.with_io_type(zynq7000::slcr::mio::IoType::Hstl)
.with_speed(zynq7000::slcr::mio::Speed::FastCmosEdge)
.with_l3_sel(MUX_CONF_PHY.l3_sel())
.with_l2_sel(MUX_CONF_PHY.l2_sel())
.with_l1_sel(MUX_CONF_PHY.l1_sel())
.with_l0_sel(MUX_CONF_PHY.l0_sel())
// TODO: What is correct now?
// Disable output driver.
.with_tri_enable(true)
//.with_tri_enable(false)
.build();
unsafe {
crate::slcr::Slcr::with(|slcr_mut| {
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
tx_clk,
slcr_mut,
tx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
tx_ctrl,
slcr_mut,
tx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
tx_data.0,
slcr_mut,
tx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
tx_data.1,
slcr_mut,
tx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
tx_data.2,
slcr_mut,
tx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
tx_data.3,
slcr_mut,
tx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
rx_clk,
slcr_mut,
rx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
rx_ctrl,
slcr_mut,
rx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
rx_data.0,
slcr_mut,
rx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
rx_data.1,
slcr_mut,
rx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
rx_data.2,
slcr_mut,
rx_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
rx_data.3,
slcr_mut,
rx_mio_config,
);
if let Some((md_clk, md_io)) = md_pins {
let md_mio_config = zynq7000::slcr::mio::Config::builder()
.with_disable_hstl_rcvr(false)
.with_pullup(true)
.with_io_type(zynq7000::slcr::mio::IoType::LvCmos18)
.with_speed(zynq7000::slcr::mio::Speed::SlowCmosEdge)
.with_l3_sel(MUX_CONF_MDIO.l3_sel())
.with_l2_sel(MUX_CONF_MDIO.l2_sel())
.with_l1_sel(MUX_CONF_MDIO.l1_sel())
.with_l0_sel(MUX_CONF_MDIO.l0_sel())
.with_tri_enable(false)
.build();
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
md_clk,
slcr_mut,
md_mio_config,
);
IoPeriphPin::new_with_full_config_and_unlocked_slcr(
md_io,
slcr_mut,
md_mio_config,
);
}
// Enable VREF internal generator, which is required for HSTL pin mode.
slcr_mut.gpiob().modify_ctrl(|mut ctrl| {
ctrl.set_vref_en(true);
ctrl
});
});
}
ll.configure_clock(config.clk_config_1000_mbps, true);
let mut mdio = mdio::Mdio::new(&ll, true);
mdio.configure_clock_div(config.mdc_clk_div);
ll.regs.modify_net_ctrl(|mut val| {
val.set_management_port_enable(true);
val
});
let mut eth = Ethernet {
ll,
mdio,
current_speed: Speed::Mbps1000,
current_duplex: Duplex::Full,
current_clk_divs: config.clk_config_1000_mbps.divs,
};
eth.set_rx_buf_descriptor_base_address(0);
eth.set_tx_buf_descriptor_base_address(0);
eth
}
pub fn new(mut ll: EthernetLowLevel, config: EthernetConfig) -> Self {
Self::common_init(&mut ll, config.mac_address);
ll.configure_clock(config.clk_config_1000_mbps, true);
let mut mdio = mdio::Mdio::new(&ll, true);
mdio.configure_clock_div(config.mdc_clk_div);
Ethernet {
ll,
mdio,
current_speed: Speed::Mbps1000,
current_duplex: Duplex::Full,
current_clk_divs: config.clk_config_1000_mbps.divs,
}
}
/// Starts the peripheral by enabling relevant interrupts as well as the TX and RX blocks.
///
/// The user should set valid TX and RX buffer queue addresses before calling this function.
pub fn start(&mut self) {
self.clear_interrupts();
self.enable_interrupts();
self.ll.regs.modify_net_ctrl(|mut val| {
val.set_tx_enable(true);
val.set_rx_enable(true);
val
});
}
#[inline]
pub fn speed(&self) -> Speed {
self.current_speed
}
#[inline]
pub fn duplex(&self) -> Duplex {
self.current_duplex
}
pub fn reset(&mut self) {
self.ll.reset(3);
// Do not set RX and TX queue base address.
self.ll.initialize(false);
}
fn common_init(ll: &mut EthernetLowLevel, mac_address: [u8; 6]) {
ll.enable_peripheral_clock();
ll.reset(3);
ll.initialize(true);
// Set speed and duplex to sensible values, but these probably need to be set again after
// auto-negotiation has completed.
ll.set_speed_and_duplex(Speed::Mbps1000, Duplex::Full);
let macaddr_msbs = (u32::from(mac_address[5]) << 8) | u32::from(mac_address[4]);
let macaddr_lsbs = (u32::from(mac_address[3]) << 24)
| (u32::from(mac_address[2]) << 16)
| (u32::from(mac_address[1]) << 8)
| u32::from(mac_address[0]);
// Writing to the lower address portion disables the address match, writing to the higher
// portion enables it again. Address matching is disabled on reset, so we do not need
// to disable the other addresses here.
ll.regs.write_addr1_low(macaddr_lsbs);
ll.regs.write_addr1_high(macaddr_msbs);
ll.regs.modify_net_cfg(|mut val| {
val.set_rx_enable_1536(true);
val.set_rx_checksum_enable(true);
val.set_no_broadcast(false);
// val.set_pause_enable(true);
val
});
ll.regs.modify_dma_cfg(|mut val| {
val.set_rx_packet_buf_size_sel(u2::new(0b11));
val.set_tx_packet_buf_size_sel(true);
val.set_chksum_offload_enable(true);
val.set_burst_length(BurstLength::Incr16.reg_value());
// Configure 1536 bytes receive buffer size. This is sufficient for regular Ethernet
// frames.
val.set_dma_rx_ahb_buf_size_sel(DmaRxBufSize::new((MTU >> 6) as u8).unwrap());
val.set_endian_swap_mgmt_descriptor(zynq7000::eth::AhbEndianess::Little);
val.set_endian_swap_packet_data(zynq7000::eth::AhbEndianess::Little);
val
});
}
#[inline]
pub fn clear_interrupts(&mut self) {
self.ll.regs.write_interrupt_status(IRQ_CLEAR_ALL);
}
#[inline]
pub fn enable_interrupts(&mut self) {
self.ll.regs.write_interrupt_enable(IRQ_CONTROL);
}
#[inline]
pub fn disable_interrupts(&mut self) {
self.ll.regs.write_interrupt_disable(IRQ_CONTROL);
}
#[inline]
pub fn ll(&mut self) -> &mut EthernetLowLevel {
&mut self.ll
}
#[inline]
pub fn regs(&mut self) -> &MmioEthernet<'static> {
&self.ll.regs
}
#[inline]
pub fn mdio(&mut self) -> &mdio::Mdio {
&self.mdio
}
pub fn mdio_mut(&mut self) -> &mut mdio::Mdio {
&mut self.mdio
}
#[inline]
pub fn regs_mut(&mut self) -> &mut MmioEthernet<'static> {
&mut self.ll.regs
}
/// This function checks whether new auto-negotiated settings require driver settings
/// updates and perform them if necessary.
pub fn configure_clock_and_speed_duplex(
&mut self,
speed: Speed,
duplex: Duplex,
clk_collection: &ClkDivCollection,
) {
if speed == self.current_speed
&& duplex == self.current_duplex
&& *clk_collection.clk_divs_for_speed(speed) == self.current_clk_divs
{
// No change, do nothing.
return;
}
self.ll.set_tx_rx_enable(false, false);
self.ll
.configure_clock_and_speed_duplex(speed, duplex, clk_collection);
self.ll.set_tx_rx_enable(true, true);
}
delegate::delegate! {
to self.ll {
#[inline]
pub const fn id(&self) -> EthernetId;
#[inline]
pub fn set_rx_buf_descriptor_base_address(&mut self, addr: u32);
#[inline]
pub fn set_tx_buf_descriptor_base_address(&mut self, addr: u32);
#[inline]
pub fn set_tx_rx_enable(&mut self, tx_enable: bool, rx_enable: bool);
}
}
}
mod shared {
#[bitbybit::bitenum(u1, exhaustive = true)]
#[derive(Debug, PartialEq, Eq)]
pub enum Ownership {
Hardware = 0,
Software = 1,
}
}
/// Possibly critical errors.
#[derive(Debug, Clone, Copy, Default)]
pub struct EthErrors {
/// According to the TMR, p.551, this condition implies a packet is dropped because the
/// packet buffer is full. It occurs occasionally when the controller is unable to process
/// the packets if they arrive very fast. No special action for error recovery needs to
/// be taken, but we still report it.
pub rx_overrun: bool,
/// The TMR recommends re-initializing the controller and the buffer descriptors for
/// receive and transmit paths.
pub hresp_error: bool,
/// The TMR recommends disabling the ethernet transmitter, re-initializing the buffer
/// descriptors on the transmit side and re-enabling the transmitter.
pub tx_frame_corruption_ahb_error: bool,
/// Only set in Gigabit mode, for 10/100 mode, late collision and collisions are treated
/// the same.
pub tx_late_collision: bool,
/// In 10/100 mode, this is set when the retry limit was reached.
///
/// According to the TMR, p.551, this implies there are a series of collisions for which
/// an Ethernet frame could not be sent out even with a number of retries in half-duplex mode.
/// No drastic measures need to be taken, but this could also be an indicator for a duplex
/// missmatch.
pub tx_retry_limit_reached: bool,
}
#[derive(Debug, Clone, Copy, Default)]
pub struct InterruptResult {
pub frame_received: bool,
pub frame_sent: bool,
/// Possibly indicator for high traffic, not enough descriptors available or not handled
/// quick enough.
pub rx_descr_read_when_used: bool,
/// Indicator that driver is done sending all frames because it found a descriptor slot owned
/// by the software.
pub tx_descr_read_when_used: bool,
/// These are full errors.
pub errors: EthErrors,
}
impl InterruptResult {
#[inline]
pub fn has_errors(&self) -> bool {
self.errors.rx_overrun
|| self.errors.hresp_error
|| self.errors.tx_frame_corruption_ahb_error
|| self.errors.tx_late_collision
|| self.errors.tx_retry_limit_reached
}
}
/// Interrupt handler which also has dedicated handling for embassy-net.
pub(crate) fn on_interrupt(
eth_id: EthernetId,
wake_embassy_tx: bool,
wake_embassy_rx: bool,
) -> InterruptResult {
let mut eth_regs = unsafe { eth_id.steal_regs() };
let status = eth_regs.read_interrupt_status();
let mut clear = InterruptStatus::new_with_raw_value(0);
let mut tx_status_clear = TxStatus::new_with_raw_value(0);
let mut rx_status_clear = RxStatus::new_with_raw_value(0);
let mut disable = InterruptControl::new_with_raw_value(0);
let mut result = InterruptResult::default();
if status.frame_sent() {
if wake_embassy_tx {
embassy_net::TX_WAKER.wake();
}
result.frame_sent = true;
tx_status_clear.set_complete(true);
clear.set_frame_sent(true);
}
if status.frame_received() {
if wake_embassy_rx {
embassy_net::RX_WAKER.wake();
}
result.frame_received = true;
clear.set_frame_received(true);
}
if status.hresp_not_ok() {
result.errors.hresp_error = true;
clear.set_hresp_not_ok(true);
tx_status_clear.set_hresp_not_ok(true);
rx_status_clear.set_hresp_not_ok(true);
}
if status.tx_retry_limit_reached_or_late_collision() {
let tx_status = eth_regs.read_tx_status();
if tx_status.late_collision() {
result.errors.tx_late_collision = true;
tx_status_clear.set_late_collision(true);
} else {
result.errors.tx_retry_limit_reached = true;
tx_status_clear.set_retry_limit_reached(true);
}
// Clear this in any case.
tx_status_clear.set_collision(true);
clear.set_tx_retry_limit_reached_or_late_collision(true);
}
if status.tx_descr_read_when_used() {
result.tx_descr_read_when_used = true;
// The interrupt status bit is cleared on a read.
clear.set_tx_descr_read_when_used(true);
tx_status_clear.set_read_when_used(true);
}
if status.tx_frame_corruption_ahb_error() {
result.errors.tx_frame_corruption_ahb_error = true;
// The interrupt status bit is cleared on a read.
tx_status_clear.set_frame_corruption_ahb_error(true);
}
if status.rx_descr_read_when_used() {
result.rx_descr_read_when_used = true;
// I am guessing that those are related.
rx_status_clear.set_buf_not_available(true);
clear.set_rx_descr_read_when_used(true);
}
if status.rx_overrun() {
if wake_embassy_rx {
embassy_net::RX_WAKER.wake();
}
result.errors.rx_overrun = true;
rx_status_clear.set_overrun(true);
disable.set_rx_overrun(true);
clear.set_rx_overrun(true);
}
eth_regs.write_interrupt_status(clear);
eth_regs.write_interrupt_disable(disable);
eth_regs.write_tx_status(tx_status_clear);
eth_regs.write_rx_status(rx_status_clear);
result
}

View File

@@ -0,0 +1,364 @@
//! RX buffer descriptor module.
use core::{cell::UnsafeCell, mem::MaybeUninit, sync::atomic::AtomicBool};
use crate::{cache::clean_and_invalidate_data_cache_range, eth::AlignedBuffer};
pub use super::shared::Ownership;
use arbitrary_int::{Number, u2, u3, u13, u30};
use vcell::VolatileCell;
static RX_DESCR_TAKEN: AtomicBool = AtomicBool::new(false);
/// RX buffer descriptor.
///
/// The user should declare an array of this structure inside uncached memory.
///
/// These descriptors are shared between software and hardware and contain information
/// related to frame reception.
#[repr(C, align(8))]
pub struct Descriptor {
/// The first word of the descriptor.
pub word_0: VolatileCell<Word0>,
/// The second word of the descriptor.
pub status: VolatileCell<StatusWord>,
}
#[bitbybit::bitfield(u32)]
#[derive(Debug, PartialEq, Eq)]
pub struct Word0 {
/// The full reception address with the last two bits cleared.
#[bits(2..=31, rw)]
addr_upper_30_bits: u30,
#[bit(1, rw)]
wrap: bool,
#[bit(0, rw)]
ownership: Ownership,
}
/// This control word contains the status bits.
///
/// If the end of frame bit (15) is not set, the only valid status information is the start of
/// frame bit.
#[bitbybit::bitfield(u32)]
#[derive(Debug, PartialEq, Eq)]
pub struct StatusWord {
#[bit(31, r)]
broadcast_detect: bool,
#[bit(30, r)]
multicast_hash: bool,
#[bit(29, r)]
unicast_hash: bool,
#[bit(27, r)]
specific_addr_match: bool,
/// Specifies which of the 4 specific address registers was matched.
#[bits(25..=26, r)]
specific_addr_match_info: u2,
#[bit(24, r)]
type_id_match_or_snap_info: bool,
#[bits(22..=23, r)]
type_id_match_info_or_chksum_status: u2,
#[bit(21, r)]
vlan_tag_detected: bool,
#[bit(20, r)]
priority_tag_detected: bool,
#[bits(17..=19, r)]
vlan_prio: u3,
#[bit(16, r)]
cfi_bit: bool,
/// If this bit is not set, the only other valid status bit is the start of frame bit.
#[bit(15, r)]
end_of_frame: bool,
#[bit(14, r)]
start_of_frame: bool,
/// Relevant when FCS errors are not ignored.
/// 0: Frame has good FCS, 1: Frame has bad FCS, but was copied to memory as the ignore FCS
/// functionality was enabled.
#[bit(13, r)]
fcs_status: bool,
#[bits(0..=12, r)]
rx_len: u13,
}
impl Descriptor {
#[inline]
pub const fn new() -> Self {
Self {
word_0: VolatileCell::new(Word0::new_with_raw_value(0)),
status: VolatileCell::new(StatusWord::new_with_raw_value(0)),
}
}
#[inline]
pub fn ownership(&self) -> Ownership {
self.word_0.get().ownership()
}
#[inline]
pub fn set_word_0(&mut self, word: Word0) {
self.word_0.set(word);
}
#[inline]
pub fn set_word_1(&mut self, word: StatusWord) {
self.status.set(word);
}
#[inline]
pub fn word_0(&mut self) -> Word0 {
self.word_0.get()
}
#[inline]
pub fn status_word(&mut self) -> StatusWord {
self.status.get()
}
}
impl Default for Descriptor {
#[inline]
fn default() -> Self {
Self::new()
}
}
/// This is a low level wrapper to simplify declaring a global descriptor list.
///
/// It allows placing the descriptor structure statically in memory which might not
/// be zero-initialized.
#[repr(transparent)]
pub struct DescriptorList<const SLOTS: usize>(pub UnsafeCell<MaybeUninit<[Descriptor; SLOTS]>>);
unsafe impl<const SLOTS: usize> Sync for DescriptorList<SLOTS> {}
impl<const SLOTS: usize> DescriptorList<SLOTS> {
#[inline]
pub const fn new() -> Self {
Self(UnsafeCell::new(MaybeUninit::uninit()))
}
/// Initializes the RX descriptors and returns a mutable reference to them.
pub fn take(&self) -> Option<&'static mut [Descriptor; SLOTS]> {
if RX_DESCR_TAKEN.swap(true, core::sync::atomic::Ordering::SeqCst) {
return None; // Already taken, return None
}
let descr = unsafe { &mut *self.0.get() };
descr.write([const { Descriptor::new() }; SLOTS]);
Some(unsafe { descr.assume_init_mut() })
}
}
impl<const SLOTS: usize> Default for DescriptorList<SLOTS> {
fn default() -> Self {
Self::new()
}
}
pub enum FrameScanResult {
NoFrames,
SingleFrame {
index: usize,
size: usize,
status: StatusWord,
},
MultiSlotFrame {
first_slot_index: usize,
last_slot_index: usize,
},
BrokenFragments {
first_slot_index: usize,
last_slot_index: usize,
},
}
/// This is a thin wrapper around a descriptor list.
///
/// It provides a basic higher-level API to perform tasks like descriptor initialization
/// and handling of received frames.
pub struct DescriptorListWrapper<'a> {
list: &'a mut [Descriptor],
current_idx: usize,
}
impl<'a> DescriptorListWrapper<'a> {
#[inline]
pub fn new(descr_list: &'a mut [Descriptor]) -> Self {
Self {
list: descr_list,
current_idx: 0,
}
}
/// Unsafely clone this descriptor list. See safety notes.
//
/// # Safety
//
/// You must not use both the original and the clone at the same time.
pub unsafe fn clone_unchecked(&mut self) -> Self {
Self {
list: unsafe {
core::slice::from_raw_parts_mut(self.list.as_mut().as_mut_ptr(), self.list.len())
},
current_idx: self.current_idx,
}
}
}
impl DescriptorListWrapper<'_> {
#[allow(clippy::len_without_is_empty)]
#[inline]
pub fn len(&self) -> usize {
self.list.len()
}
#[inline]
pub fn base_ptr(&self) -> *const Descriptor {
self.list.as_ptr()
}
#[inline]
pub fn base_addr(&self) -> u32 {
self.base_ptr() as u32
}
/// Resets the descriptor list. This retains the previous configured reception
/// addresses, but sets the ownership for all descriptors to the hardware again.
pub fn reset(&mut self) {
self.current_idx = 0;
let list_len = self.list.len();
let mut word_0;
for desc in self.list.iter_mut().take(list_len - 1) {
word_0 = desc.word_0();
word_0.set_ownership(Ownership::Hardware);
word_0.set_wrap(false);
desc.set_word_0(word_0);
}
let last = self.list.last_mut().unwrap();
word_0 = last.word_0();
word_0.set_ownership(Ownership::Hardware);
word_0.set_wrap(true);
last.set_word_0(word_0);
}
/// Initialize the descriptor list with a provided RX buffer.
///
/// This function performs important initialization routines for the descriptors
/// and also sets the addresses of the provided buffers. The number of buffers and the length
/// of the descriptor list need to be the same.
pub fn init_with_aligned_bufs(&mut self, aligned_bufs: &[AlignedBuffer]) {
self.current_idx = 0;
let list_len = self.list.len();
for (desc, buf) in self.list.iter_mut().take(list_len - 1).zip(aligned_bufs) {
desc.set_word_0(
Word0::builder()
.with_addr_upper_30_bits(u30::new(buf.0.as_ptr() as u32 >> 2))
.with_wrap(false)
.with_ownership(Ownership::Hardware)
.build(),
);
clean_and_invalidate_data_cache_range(
buf.0.as_ptr() as u32,
core::mem::size_of::<AlignedBuffer>(),
)
.expect("TX buffer or buffer size not aligned to cache line size");
}
self.list.last_mut().unwrap().set_word_0(
Word0::builder()
.with_addr_upper_30_bits(u30::new(
aligned_bufs.last().unwrap().0.as_ptr() as u32 >> 2,
))
.with_wrap(true)
.with_ownership(Ownership::Hardware)
.build(),
);
}
/// This function tries to scan for received frames and handles the frame if it finds one.
pub fn scan_and_handle_next_received_frame(
&mut self,
scan_full_descr_list: bool,
) -> FrameScanResult {
let mut handled_slots = 0;
let mut current_idx = self.current_idx;
let mut start_found = false;
let mut start_idx = 0;
while handled_slots < self.list.len() {
let word_0 = self.list[current_idx].word_0.get();
if word_0.ownership() == Ownership::Hardware {
if scan_full_descr_list {
current_idx = (current_idx + 1) % self.list.len();
handled_slots += 1;
continue;
}
// The descriptor is not owned by the hardware, so it is not ready for processing.
return FrameScanResult::NoFrames;
}
let status = self.list[current_idx].status_word();
match (status.start_of_frame(), status.end_of_frame()) {
(true, true) => {
self.current_idx = (current_idx + 1) % self.list.len();
return FrameScanResult::SingleFrame {
index: current_idx,
size: status.rx_len().as_usize(),
status,
};
}
(true, false) => {
// Consecutive start frame.. Which means something went wrong, and we need
// to discard all the slots until the second start frame slot.
if start_found {
self.clear_slots(start_idx, current_idx);
self.current_idx = (current_idx + 1) % self.list.len();
return FrameScanResult::BrokenFragments {
first_slot_index: start_idx,
last_slot_index: current_idx,
};
} else {
start_found = true;
start_idx = current_idx;
}
}
(false, true) => {
// Detected frame spanning multiple buffers.. which should really not happen
// if we only use frames with a certain MTU, but we will handle it.
if start_found {
self.current_idx = (current_idx + 1) % self.list.len();
return FrameScanResult::MultiSlotFrame {
first_slot_index: start_idx,
last_slot_index: current_idx,
};
}
}
(false, false) => {
// If a slot is neither the start nor the end of frame.
}
}
current_idx = (current_idx + 1) % self.list.len();
handled_slots += 1;
}
FrameScanResult::NoFrames
}
/// Clear a slot range by setting the ownership bit back to [Ownership::Hardware].
pub fn clear_slots(&mut self, start: usize, end: usize) {
if start >= self.list.len() || end >= self.list.len() {
return;
}
let mut current_idx = start;
while current_idx != end {
let mut word_0 = self.list[current_idx].word_0.get();
word_0.set_ownership(Ownership::Hardware);
self.list[current_idx].set_word_0(word_0);
current_idx = (current_idx + 1) % self.list.len();
}
}
pub fn clear_slot(&mut self, index: usize) {
if index >= self.list.len() {
return;
}
let mut word_0 = self.list[index].word_0.get();
word_0.set_ownership(Ownership::Hardware);
self.list[index].set_word_0(word_0);
}
}

View File

@@ -0,0 +1,300 @@
//! smoltcp driver for the Zynq 7000 ethernet peripheral.
use arbitrary_int::u14;
pub use crate::eth::{EthernetId, InterruptResult};
use crate::{
cache::{CACHE_LINE_SIZE, clean_and_invalidate_data_cache_range, invalidate_data_cache_range},
eth::{rx_descr, tx_descr},
};
/// This interrupt handler should be called when a Gigabit Ethernet interrupt occurs.
pub fn on_interrupt(eth_id: EthernetId) -> InterruptResult {
super::on_interrupt(eth_id, false, false)
}
pub struct SmoltcpRxToken<'a> {
pub(crate) descr_list: &'a mut super::rx_descr::DescriptorListWrapper<'static>,
pub(crate) slot_index: usize,
pub(crate) rx_buf: &'a mut super::AlignedBuffer,
pub(crate) rx_size: usize,
}
impl embassy_net_driver::RxToken for SmoltcpRxToken<'_> {
fn consume<R, F>(self, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
self.consume(f)
}
}
impl smoltcp::phy::RxToken for SmoltcpRxToken<'_> {
fn consume<R, F>(self, f: F) -> R
where
F: FnOnce(&[u8]) -> R,
{
// Convert the mutable slice to an immutable slice for the closure
self.consume(|buf| f(&buf[..]))
}
}
impl SmoltcpRxToken<'_> {
fn consume<R, F>(self, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
// The DMA will write the received frame into DDR. The L1 and L2 cache lines for the
// particular reception address need to be invalidated, to avoid fetching stale data from
// the cache instead of the DDR.
let clean_invalidate_len = (self.rx_size + CACHE_LINE_SIZE - 1) & !(CACHE_LINE_SIZE - 1);
invalidate_data_cache_range(self.rx_buf.0.as_ptr() as u32, clean_invalidate_len)
.expect("RX buffer or buffer size not aligned to cache line size");
log::debug!("eth rx {} bytes", self.rx_size);
log::trace!("rx data: {:x?}", &self.rx_buf.0[0..self.rx_size]);
let result = f(&mut self.rx_buf.0[0..self.rx_size]);
self.descr_list.clear_slot(self.slot_index);
// Okay, this is weird, but we have to do this. I encountered this bug where ICMP replies
// were duplicated after the descriptor rings wrapped. My theory is that there is
// some data in the cache after the embassy reception function which needs to be cleaned.
clean_and_invalidate_data_cache_range(self.rx_buf.0.as_ptr() as u32, clean_invalidate_len)
.expect("RX buffer or buffer size not aligned to cache line size");
result
}
}
pub struct SmoltcpTxToken<'a> {
pub(crate) eth_id: super::EthernetId,
pub(crate) descr_list: &'a mut super::tx_descr::DescriptorListWrapper<'static>,
pub(crate) tx_bufs: &'a mut [super::AlignedBuffer],
}
impl smoltcp::phy::TxToken for SmoltcpTxToken<'_> {
fn consume<R, F>(self, len: usize, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
self.consume(len, f)
}
}
impl embassy_net_driver::TxToken for SmoltcpTxToken<'_> {
fn consume<R, F>(self, len: usize, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
self.consume(len, f)
}
}
impl SmoltcpTxToken<'_> {
fn consume<R, F>(self, len: usize, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
assert!(len <= super::MTU, "packet length exceeds MTU");
// In the transmit call, it was checked that the buffer queue actually is not full.
let tx_idx = self.descr_list.current_tx_idx();
let buffer = self.tx_bufs.get_mut(tx_idx).unwrap();
let addr = buffer.0.as_ptr() as u32;
let result = f(&mut buffer.0[0..len]);
let clean_invalidate_len = (len + CACHE_LINE_SIZE - 1) & !(CACHE_LINE_SIZE - 1);
// DMA accesses the DDR memory directly, so we need to flush everything that might
// still be in the L1 or L2 cache to the DDR.
clean_and_invalidate_data_cache_range(buffer.0.as_ptr() as u32, clean_invalidate_len)
.expect("TX buffer or buffer size not aligned to cache line size");
log::debug!("eth tx {len} bytes");
log::trace!("tx data: {:x?}", &buffer.0[0..len]);
self.descr_list
.prepare_transfer_unchecked(Some(addr), u14::new(len as u16), true, false);
let mut regs = unsafe { self.eth_id.steal_regs() };
regs.modify_net_ctrl(|mut val| {
val.set_start_tx(true);
val
});
result
}
}
pub struct Driver(CommonSmoltcpDriver);
impl smoltcp::phy::Device for Driver {
type RxToken<'a>
= SmoltcpRxToken<'a>
where
Self: 'a;
type TxToken<'a>
= SmoltcpTxToken<'a>
where
Self: 'a;
fn receive(
&mut self,
_timestamp: smoltcp::time::Instant,
) -> Option<(Self::RxToken<'_>, Self::TxToken<'_>)> {
self.0.receive()
}
fn transmit(&mut self, _timestamp: smoltcp::time::Instant) -> Option<Self::TxToken<'_>> {
self.0.transmit()
}
fn capabilities(&self) -> smoltcp::phy::DeviceCapabilities {
let mut capabilities = smoltcp::phy::DeviceCapabilities::default();
capabilities.medium = smoltcp::phy::Medium::Ethernet;
capabilities.max_transmission_unit = super::MTU;
capabilities.max_burst_size = Some(self.0.burst_size);
capabilities.checksum.ipv4 = smoltcp::phy::Checksum::Both;
capabilities.checksum.udp = smoltcp::phy::Checksum::Both;
capabilities.checksum.tcp = smoltcp::phy::Checksum::Both;
capabilities
}
}
pub struct DescriptorsAndBuffers {
pub rx_descr: super::rx_descr::DescriptorListWrapper<'static>,
pub rx_bufs: &'static mut [super::AlignedBuffer],
pub tx_descr: super::tx_descr::DescriptorListWrapper<'static>,
pub tx_bufs: &'static mut [super::AlignedBuffer],
}
impl DescriptorsAndBuffers {
pub fn new(
rx_descr: super::rx_descr::DescriptorListWrapper<'static>,
rx_bufs: &'static mut [super::AlignedBuffer],
tx_descr: super::tx_descr::DescriptorListWrapper<'static>,
tx_bufs: &'static mut [super::AlignedBuffer],
) -> Option<Self> {
if rx_descr.len() != rx_bufs.len() || tx_descr.len() != tx_bufs.len() {
return None;
}
Some(Self {
rx_descr,
rx_bufs,
tx_descr,
tx_bufs,
})
}
#[inline]
pub fn tx_burst_len(&self) -> usize {
self.tx_descr.len()
}
}
pub(crate) struct CommonSmoltcpDriver {
pub eth_id: super::EthernetId,
pub mac_addr: [u8; 6],
pub bufs: DescriptorsAndBuffers,
pub burst_size: usize,
}
impl CommonSmoltcpDriver {
pub fn new(
eth_id: super::EthernetId,
mac_addr: [u8; 6],
bufs: DescriptorsAndBuffers,
burst_size: usize,
) -> Self {
Self {
burst_size,
eth_id,
mac_addr,
bufs,
}
}
pub fn handle_completed_tx_transfers(&mut self) {
loop {
let result = self.bufs.tx_descr.check_and_handle_completed_transfer();
match result {
tx_descr::BusyHandlingResult::TxError(tx_error) => {
// TODO: An ideal error reporting system would send a message to the user,
// but we would need to introduce an abstraction for that..
// For now, we log unexpected checksum errors and other errors.
if let Some(cksum_error) = tx_error.checksum_generation {
// These can occur for ICMP packets.
if cksum_error
!= tx_descr::TransmitChecksumGenerationStatus::PacketNotTcpUdp
&& cksum_error
!= tx_descr::TransmitChecksumGenerationStatus::NotVlanOrSnapOrIp
{
log::warn!("TX checksum generation error: {tx_error:?}");
}
} else {
log::warn!("TX error: {tx_error:?}");
}
}
tx_descr::BusyHandlingResult::SlotHandled(_) => (),
tx_descr::BusyHandlingResult::Complete => break,
}
}
}
pub fn receive(&mut self) -> Option<(SmoltcpRxToken<'_>, SmoltcpTxToken<'_>)> {
self.handle_completed_tx_transfers();
if self.bufs.tx_descr.full() {
// TODO: When introducing an abstraction for notifying the user, send a message
// for this case.
log::warn!("TX descriptor queue is full");
return None;
}
match self.bufs.rx_descr.scan_and_handle_next_received_frame(true) {
// Nothing to do.
rx_descr::FrameScanResult::NoFrames => None,
rx_descr::FrameScanResult::SingleFrame {
index,
size,
status,
} => {
log::trace!(
"eth rx frame, fsc status {:?}, cksum status {:?}",
status.fcs_status(),
status.type_id_match_info_or_chksum_status()
);
let rx_buf = self.bufs.rx_bufs.get_mut(index).unwrap();
Some((
SmoltcpRxToken {
descr_list: &mut self.bufs.rx_descr,
slot_index: index,
rx_buf,
rx_size: size,
},
SmoltcpTxToken {
eth_id: self.eth_id,
descr_list: &mut self.bufs.tx_descr,
tx_bufs: self.bufs.tx_bufs,
},
))
}
rx_descr::FrameScanResult::MultiSlotFrame {
first_slot_index: _,
last_slot_index: _,
} => {
// We can not really handle multi-slot frames.. this should never happen.
None
}
rx_descr::FrameScanResult::BrokenFragments {
first_slot_index: _,
last_slot_index: _,
} => None,
}
}
pub fn transmit(&mut self) -> Option<SmoltcpTxToken<'_>> {
// Handle any completed frames first.
self.handle_completed_tx_transfers();
if self.bufs.tx_descr.full() {
// TODO: When introducing an abstraction for notifying the user, send a message
// for this case.
log::warn!("TX descriptor queue is full");
return None;
}
Some(SmoltcpTxToken {
eth_id: self.eth_id,
descr_list: &mut self.bufs.tx_descr,
tx_bufs: self.bufs.tx_bufs,
})
}
}

View File

@@ -0,0 +1,337 @@
use core::{cell::UnsafeCell, mem::MaybeUninit, sync::atomic::AtomicBool};
use arbitrary_int::u14;
pub use super::shared::Ownership;
use vcell::VolatileCell;
/// TX buffer descriptor.
///
/// The user should declare an array of this structure inside uncached memory.
///
/// These descriptors are shared between software and hardware and contain information
/// related to frame reception.
#[repr(C, align(8))]
pub struct Descriptor {
/// The first word of the descriptor which is the byte address of the buffer.
pub word0: VolatileCell<u32>,
/// The second word of the descriptor.
pub word1: VolatileCell<Word1>,
}
static TX_DESCR_TAKEN: AtomicBool = AtomicBool::new(false);
/// This is a low level wrapper to simplify declaring a global descriptor list.
///
/// It allows placing the descriptor structure statically in memory which might not
/// be zero-initialized.
#[repr(transparent)]
pub struct DescriptorList<const SLOTS: usize>(pub UnsafeCell<MaybeUninit<[Descriptor; SLOTS]>>);
unsafe impl<const SLOTS: usize> Sync for DescriptorList<SLOTS> {}
impl<const SLOTS: usize> DescriptorList<SLOTS> {
#[inline]
pub const fn new() -> Self {
Self(UnsafeCell::new(MaybeUninit::uninit()))
}
/// Initializes the TX descriptors and returns a mutable reference to them.
pub fn take(&self) -> Option<&'static mut [Descriptor; SLOTS]> {
if TX_DESCR_TAKEN.swap(true, core::sync::atomic::Ordering::SeqCst) {
return None; // Already taken, return None
}
let descr = unsafe { &mut *self.0.get() };
descr.write([const { Descriptor::new() }; SLOTS]);
Some(unsafe { descr.assume_init_mut() })
}
}
impl<const SLOTS: usize> Default for DescriptorList<SLOTS> {
fn default() -> Self {
Self::new()
}
}
#[bitbybit::bitenum(u3, exhaustive = true)]
#[derive(Debug, PartialEq, Eq)]
pub enum TransmitChecksumGenerationStatus {
NoError = 0b000,
VlanError = 0b001,
SnapError = 0b010,
IpError = 0b011,
NotVlanOrSnapOrIp = 0b100,
NonSupportedPacketFragmentation = 0b101,
PacketNotTcpUdp = 0b110,
PrematureEndOfFrame = 0b111,
}
#[bitbybit::bitfield(u32, default = 0x0)]
#[derive(Debug, PartialEq, Eq)]
pub struct Word1 {
/// The ownership bit must be set to [Ownership::Hardware] if a frame should be transmitted.
///
/// The controller will set this to [Ownership::Software] once the frame has been transmitted.
#[bit(31, rw)]
ownership: Ownership,
#[bit(30, rw)]
wrap: bool,
#[bit(29, rw)]
retry_limit_exceeded: bool,
#[bit(27, rw)]
transmit_frame_corruption_ahb_error: bool,
#[bit(26, rw)]
late_collision: bool,
#[bits(20..=22, rw)]
checksum_status: TransmitChecksumGenerationStatus,
#[bit(16, rw)]
no_crc_generation: bool,
#[bit(15, rw)]
last_buffer: bool,
#[bits(0..=13, rw)]
tx_len: u14,
}
impl Descriptor {
#[inline]
pub const fn new() -> Self {
Self {
word0: VolatileCell::new(0),
word1: VolatileCell::new(Word1::new_with_raw_value(0)),
}
}
#[inline]
pub fn ownership(&self) -> Ownership {
self.word1.get().ownership()
}
#[inline]
pub fn set_addr(&self, addr: u32) {
self.word0.set(addr)
}
#[inline]
pub fn read_word_1(&self) -> Word1 {
self.word1.get()
}
#[inline]
pub fn set_word_1(&self, word1: Word1) {
self.word1.set(word1);
}
/// Set the information for a transfer.
pub fn setup_tx_transfer_unchecked(
&mut self,
addr: Option<u32>,
tx_len: u14,
last_buffer: bool,
no_crc_generation: bool,
) {
if let Some(addr) = addr {
self.set_addr(addr);
}
// Perform the read-modify-write sequence manually to ensure a minimum of reads/writes
// for the uncached memory.
let mut word1 = self.read_word_1();
word1.set_tx_len(tx_len);
word1.set_last_buffer(last_buffer);
word1.set_no_crc_generation(no_crc_generation);
word1.set_ownership(Ownership::Hardware);
self.set_word_1(word1);
}
}
impl Default for Descriptor {
#[inline]
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, PartialEq, Eq, thiserror::Error)]
#[error("tx error: {self:?}")]
pub struct TxError {
pub retry_limit_exceeded: bool,
pub late_collisions: bool,
pub ahb_error: bool,
pub checksum_generation: Option<TransmitChecksumGenerationStatus>,
}
impl TxError {
pub fn from_word1(word1: &Word1) -> Self {
Self {
retry_limit_exceeded: word1.retry_limit_exceeded(),
late_collisions: word1.late_collision(),
ahb_error: word1.transmit_frame_corruption_ahb_error(),
checksum_generation: if word1.checksum_status()
== TransmitChecksumGenerationStatus::NoError
{
None
} else {
Some(word1.checksum_status())
},
}
}
}
pub enum IncrementResult {
Busy,
Ok,
}
#[derive(Debug, PartialEq, Eq)]
pub enum BusyHandlingResult {
/// Handled a descriptor slot where a TX error has occured.
TxError(TxError),
/// Handled one busy descriptor slot. More calls to this function are required to handle
/// all busy slots.
SlotHandled(super::tx_descr::Word1),
/// The busy index has caught up to the transmission index (all frames have been sent), or
/// the busy index is at an active transmission index.
Complete,
}
pub struct DescriptorListWrapper<'a> {
list: &'a mut [Descriptor],
/// The head index is used to handle the transmission of new frames.
tx_idx: usize,
/// The tail index is used to track the progress of active transmissions.
busy_idx: usize,
}
impl core::fmt::Debug for DescriptorListWrapper<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("DescriptorList")
.field("tx_idx", &self.tx_idx)
.field("busy_idx", &self.busy_idx)
.field("list_len", &self.list.len())
.finish()
}
}
impl<'a> DescriptorListWrapper<'a> {
#[inline]
pub fn new(descr_list: &'a mut [Descriptor]) -> Self {
Self {
list: descr_list,
tx_idx: 0,
busy_idx: 0,
}
}
}
impl DescriptorListWrapper<'_> {
#[allow(clippy::len_without_is_empty)]
#[inline]
pub fn len(&self) -> usize {
self.list.len()
}
#[inline]
pub fn base_ptr(&self) -> *const Descriptor {
self.list.as_ptr()
}
#[inline]
pub fn base_addr(&self) -> u32 {
self.base_ptr() as u32
}
pub fn init_or_reset(&mut self) {
self.tx_idx = 0;
self.busy_idx = 0;
let mut reset_val = Word1::builder()
.with_ownership(Ownership::Software)
.with_wrap(false)
.with_retry_limit_exceeded(false)
.with_transmit_frame_corruption_ahb_error(false)
.with_late_collision(false)
.with_checksum_status(TransmitChecksumGenerationStatus::NoError)
.with_no_crc_generation(false)
.with_last_buffer(false)
.with_tx_len(u14::new(0))
.build();
let list_len = self.list.len();
for desc in self.list.iter_mut().take(list_len - 1) {
desc.set_word_1(reset_val);
}
reset_val.set_wrap(true);
self.list.last_mut().unwrap().set_word_1(reset_val);
}
#[inline]
pub fn increment_tx_idx(&mut self) {
self.tx_idx = (self.tx_idx + 1) % self.list.len();
}
/// Increment the busy index without checking whether it has become
/// larger than the transmission index.
#[inline]
pub fn increment_busy_idx_unchecked(&mut self) {
self.busy_idx = (self.busy_idx + 1) % self.list.len();
}
#[inline]
pub fn full(&self) -> bool {
self.list[self.tx_idx].ownership() == Ownership::Hardware
}
/// Check whether a transfer has completed and handles the descriptor accordingly.
///
/// This should be called continuosly when a TX error or a TX completion interrupt has
/// occured until it returns [BusyHandlingResult::Complete].
pub fn check_and_handle_completed_transfer(&mut self) -> BusyHandlingResult {
let word1 = self.list[self.busy_idx].word1.get();
if word1.ownership() == Ownership::Hardware || self.busy_idx == self.tx_idx {
return BusyHandlingResult::Complete;
}
if word1.transmit_frame_corruption_ahb_error()
|| word1.retry_limit_exceeded()
|| word1.checksum_status() != TransmitChecksumGenerationStatus::NoError
|| word1.late_collision()
{
self.increment_busy_idx_unchecked();
return BusyHandlingResult::TxError(TxError::from_word1(&word1));
}
self.list[self.busy_idx].set_word_1(
Word1::builder()
.with_ownership(Ownership::Software)
.with_wrap(word1.wrap())
.with_retry_limit_exceeded(false)
.with_transmit_frame_corruption_ahb_error(false)
.with_late_collision(false)
.with_checksum_status(TransmitChecksumGenerationStatus::NoError)
.with_no_crc_generation(false)
.with_last_buffer(false)
.with_tx_len(u14::new(0))
.build(),
);
self.increment_busy_idx_unchecked();
BusyHandlingResult::SlotHandled(word1)
}
#[inline]
pub fn current_tx_idx(&self) -> usize {
self.tx_idx
}
/// Prepare a transfer without checking whether that particular descriptor slot is used by
/// the hardware.
pub fn prepare_transfer_unchecked(
&mut self,
addr: Option<u32>,
tx_len: u14,
last_buffer: bool,
no_crc_generation: bool,
) {
self.list[self.tx_idx].setup_tx_transfer_unchecked(
addr,
tx_len,
last_buffer,
no_crc_generation,
);
self.increment_tx_idx();
}
}

View File

@@ -148,6 +148,23 @@ impl LowLevelGpio {
self.reconfigure_slcr_mio_cfg(false, pullup, Some(mux_conf));
}
pub fn set_mio_pin_config(&mut self, config: zynq7000::slcr::mio::Config) {
let raw_offset = self.offset.offset();
// Safety: We only modify the MIO config of the pin.
let mut slcr_wrapper = unsafe { Slcr::steal() };
slcr_wrapper.modify(|mut_slcr| mut_slcr.write_mio_pins(raw_offset, config).unwrap());
}
/// Set the MIO pin configuration with an unlocked SLCR.
pub fn set_mio_pin_config_with_unlocked_slcr(
&mut self,
slcr: &mut zynq7000::slcr::MmioSlcr<'static>,
config: zynq7000::slcr::mio::Config,
) {
let raw_offset = self.offset.offset();
slcr.write_mio_pins(raw_offset, config).unwrap();
}
#[inline]
pub fn is_low(&self) -> bool {
let (offset, in_reg) = self.get_data_in_reg_and_local_offset();

View File

@@ -31,10 +31,27 @@ impl MuxConf {
Self { l3, l2, l1, l0 }
}
#[inline]
pub const fn new_with_l0() -> Self {
Self::new(true, false, u2::new(0b00), u3::new(0b000))
}
#[inline]
pub const fn new_with_l1() -> Self {
Self::new(false, true, u2::new(0b00), u3::new(0b000))
}
#[inline]
pub const fn new_with_l2(l2: u2) -> Self {
Self::new(false, false, l2, u3::new(0b000))
}
#[inline]
pub const fn new_with_l3(l3: u3) -> Self {
Self::new(false, false, u2::new(0b00), l3)
}
#[inline]
pub const fn new_for_gpio() -> Self {
Self::new(false, false, u2::new(0), u3::new(0))
}

View File

@@ -384,6 +384,8 @@ pub struct IoPeriphPin {
}
impl IoPeriphPin {
/// Constructor for IO peripheral pins where only the multiplexer and pullup configuration
/// need to be changed.
pub fn new(pin: impl MioPinMarker, mux_conf: MuxConf, pullup: Option<bool>) -> Self {
let mut low_level = LowLevelGpio::new(PinOffset::Mio(pin.offset()));
low_level.configure_as_io_periph_pin(mux_conf, pullup);
@@ -392,6 +394,43 @@ impl IoPeriphPin {
mux_conf,
}
}
/// Constructor to fully configure an IO peripheral pin with a specific MIO pin configuration.
pub fn new_with_full_config(
pin: impl MioPinMarker,
config: zynq7000::slcr::mio::Config,
) -> Self {
let mut low_level = LowLevelGpio::new(PinOffset::Mio(pin.offset()));
low_level.set_mio_pin_config(config);
Self {
pin: low_level,
mux_conf: MuxConf::new(
config.l0_sel(),
config.l1_sel(),
config.l2_sel(),
config.l3_sel(),
),
}
}
/// Constructor to fully configure an IO peripheral pin with a specific MIO pin configuration.
pub fn new_with_full_config_and_unlocked_slcr(
pin: impl MioPinMarker,
slcr: &mut zynq7000::slcr::MmioSlcr<'static>,
config: zynq7000::slcr::mio::Config,
) -> Self {
let mut low_level = LowLevelGpio::new(PinOffset::Mio(pin.offset()));
low_level.set_mio_pin_config_with_unlocked_slcr(slcr, config);
Self {
pin: low_level,
mux_conf: MuxConf::new(
config.l0_sel(),
config.l1_sel(),
config.l2_sel(),
config.l3_sel(),
),
}
}
}
impl IoPinProvider for IoPeriphPin {

View File

@@ -12,7 +12,9 @@
use slcr::Slcr;
use zynq7000::slcr::LevelShifterReg;
pub mod cache;
pub mod clocks;
pub mod eth;
pub mod gic;
pub mod gpio;
pub mod gtc;

View File

@@ -14,7 +14,7 @@ impl Slcr {
/// This method unsafely steals the SLCR MMIO block and then calls a user provided function
/// with the [SLCR MMIO][MmioSlcr] block as an input argument. It is the user's responsibility
/// that the SLCR is not used concurrently in a way which leads to data races.
pub unsafe fn with<F: FnMut(&mut MmioSlcr)>(mut f: F) {
pub unsafe fn with<F: FnOnce(&mut MmioSlcr<'static>)>(f: F) {
let mut slcr = unsafe { zynq7000::slcr::Slcr::new_mmio_fixed() };
slcr.write_unlock(UNLOCK_KEY);
f(&mut slcr);

View File

@@ -12,11 +12,13 @@ categories = ["embedded", "no-std", "hardware-support"]
[dependencies]
cortex-a-rt = { version = "0.1", optional = true, features = ["vfp-dp"] }
cortex-ar = "0.2"
cortex-ar = { version = "0.2", git = "https://github.com/rust-embedded/cortex-ar.git", rev = "79dba7000d2090d13823bfb783d9d64be8b778d2" }
arbitrary-int = "1.3"
zynq-mmu = { path = "../zynq-mmu", version = "0.1.0" }
[features]
default = ["rt"]
tools = []
tools = ["zynq-mmu/tools"]
rt = ["dep:cortex-a-rt"]
[[bin]]

View File

@@ -4,6 +4,17 @@ use std::process::Command;
use zynq7000_rt::mmu::ONE_MB;
pub use zynq7000_rt::mmu::segments::*;
macro_rules! write_l1_section {
($writer:expr, $offset:expr, $attr:expr) => {
writeln!(
$writer,
"L1Section::new_with_addr_and_attrs({:#010x}, {}),",
$offset, $attr
)
.unwrap();
};
}
fn main() {
let file_path = "src/mmu_table.rs";
let file = File::create(file_path).expect("Failed to create file");
@@ -35,14 +46,17 @@ fn main() {
+ OCM_MAPPED_HIGH,
4096
);
let mut buf_writer = std::io::BufWriter::new(file);
writeln!(
buf_writer,
"//! This file was auto-generated by table-gen.rs"
)
.unwrap();
writeln!(buf_writer, "use crate::mmu::section_attrs;").unwrap();
writeln!(buf_writer, "use cortex_ar::mmu::L1Section;").unwrap();
writeln!(buf_writer, "use crate::mmu::{{section_attrs, L1Table}};").unwrap();
writeln!(buf_writer, "use zynq_mmu::L1Table;").unwrap();
writeln!(buf_writer, "").unwrap();
writeln!(buf_writer, "/// MMU Level 1 Page table.").unwrap();
@@ -54,7 +68,7 @@ fn main() {
.unwrap();
writeln!(
buf_writer,
"pub const MMU_L1_PAGE_TABLE: L1Table = L1Table(["
"pub static MMU_L1_PAGE_TABLE: L1Table = L1Table::new(["
)
.unwrap();
@@ -63,44 +77,24 @@ fn main() {
"// First DDR segment, OCM memory (0x0000_0000 - 0x0010_0000)"
)
.unwrap();
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_ddr
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_ddr);
offset += ONE_MB;
writeln!(buf_writer, "// DDR memory (0x00100000 - 0x4000_0000)").unwrap();
for _ in 0..DDR_FULL_ACCESSIBLE {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_ddr
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_ddr);
offset += ONE_MB;
}
writeln!(buf_writer, "// FPGA slave 0 (0x4000_0000 - 0x8000_0000)").unwrap();
for _ in 0..FPGA_SLAVE {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_fpga_slaves
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_fpga_slaves);
offset += ONE_MB;
}
writeln!(buf_writer, "// FPGA slave 1 (0x8000_0000 - 0xC000_0000)").unwrap();
for _ in 0..FPGA_SLAVE {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_fpga_slaves
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_fpga_slaves);
offset += ONE_MB;
}
@@ -110,12 +104,7 @@ fn main() {
)
.unwrap();
for _ in 0..UNASSIGNED_0 {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_unassigned
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_unassigned);
offset += ONE_MB;
}
@@ -125,12 +114,7 @@ fn main() {
)
.unwrap();
for _ in 0..IO_PERIPHS {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_shared_dev
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_shared_dev);
offset += ONE_MB;
}
@@ -140,45 +124,25 @@ fn main() {
)
.unwrap();
for _ in 0..UNASSIGNED_1 {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_unassigned
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_unassigned);
offset += ONE_MB;
}
writeln!(buf_writer, "// NAND (0xE100_0000 - 0xE200_0000)").unwrap();
for _ in 0..NAND {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_shared_dev
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_shared_dev);
offset += ONE_MB;
}
writeln!(buf_writer, "// NOR (0xE200_0000 - 0xE400_0000)").unwrap();
for _ in 0..NOR {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_shared_dev
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_shared_dev);
offset += ONE_MB;
}
writeln!(buf_writer, "// SRAM (0xE400_0000 - 0xE600_0000)").unwrap();
for _ in 0..SRAM {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_sram
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_sram);
offset += ONE_MB;
}
@@ -188,12 +152,7 @@ fn main() {
)
.unwrap();
for _ in 0..SEGMENTS_UNASSIGNED_2 {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_unassigned
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_unassigned);
offset += ONE_MB;
}
@@ -203,12 +162,7 @@ fn main() {
)
.unwrap();
for _ in 0..AMBA_APB {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_shared_dev
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_shared_dev);
offset += ONE_MB;
}
@@ -218,23 +172,13 @@ fn main() {
)
.unwrap();
for _ in 0..UNASSIGNED_3 {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_unassigned
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_unassigned);
offset += ONE_MB;
}
writeln!(buf_writer, "// QSPI XIP (0xFC00_0000 - 0xFE00_0000)").unwrap();
for _ in 0..QSPI_XIP {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_qspi
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_qspi);
offset += ONE_MB;
}
@@ -244,24 +188,14 @@ fn main() {
)
.unwrap();
for _ in 0..UNASSIGNED_4 {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_unassigned
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_unassigned);
offset += ONE_MB;
}
writeln!(buf_writer, "// OCM High (0xFFF0_0000 - 0xFFFF_FFFF)").unwrap();
let mut offset_u64 = offset as u64;
for _ in 0..OCM_MAPPED_HIGH {
writeln!(
buf_writer,
"L1Section::new({}, {}).raw_value(),",
offset, attr_ocm_high
)
.unwrap();
write_l1_section!(buf_writer, offset, attr_ocm_high);
offset_u64 += ONE_MB as u64;
}

View File

@@ -3,9 +3,22 @@
//! This includes basic low-level startup code similar to the bare-metal boot routines
//! [provided by Xilinx](https://github.com/Xilinx/embeddedsw/tree/master/lib/bsp/standalone/src/arm/cortexa9/gcc).
#![no_std]
#[cfg(feature = "rt")]
pub use cortex_a_rt::*;
#[cfg(feature = "rt")]
use zynq_mmu::L1TableWrapper;
pub mod mmu;
#[cfg(feature = "rt")]
mod mmu_table;
#[cfg(feature = "rt")]
pub mod rt;
/// Retrieves a mutable reference to the MMU L1 page table.
#[cfg(feature = "rt")]
pub fn mmu_l1_table_mut() -> L1TableWrapper<'static> {
let mmu_table = mmu_table::MMU_L1_PAGE_TABLE.0.get();
// Safety: We retrieve a reference to the MMU page table singleton.
L1TableWrapper::new(unsafe { &mut *mmu_table })
}

View File

@@ -84,17 +84,23 @@ pub mod segments {
}
pub mod section_attrs {
use arbitrary_int::u4;
use cortex_ar::mmu::{
AccessPermissions, CacheableMemoryAttribute, MemoryRegionAttributes, SectionAttributes,
};
pub const DEFAULT_DOMAIN: u4 = u4::new(0b0000);
// DDR is in different domain, but all domains are set as manager domains during run-time
// initialization.
pub const DDR_DOMAIN: u4 = u4::new(0b1111);
pub const DDR: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: true,
access: AccessPermissions::FullAccess,
// Manager domain
domain: 0b1111,
domain: DDR_DOMAIN,
execute_never: false,
memory_attrs: MemoryRegionAttributes::CacheableMemory {
inner: CacheableMemoryAttribute::WriteBackWriteAlloc,
@@ -107,7 +113,7 @@ pub mod section_attrs {
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
domain: 0b0000,
domain: DEFAULT_DOMAIN,
execute_never: false,
memory_attrs: MemoryRegionAttributes::StronglyOrdered.as_raw(),
};
@@ -116,7 +122,7 @@ pub mod section_attrs {
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
domain: 0b0000,
domain: DEFAULT_DOMAIN,
execute_never: false,
memory_attrs: MemoryRegionAttributes::ShareableDevice.as_raw(),
};
@@ -125,7 +131,7 @@ pub mod section_attrs {
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
domain: 0b0000,
domain: DEFAULT_DOMAIN,
execute_never: false,
memory_attrs: MemoryRegionAttributes::OuterAndInnerWriteBackNoWriteAlloc.as_raw(),
};
@@ -134,7 +140,7 @@ pub mod section_attrs {
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
domain: 0b0000,
domain: DEFAULT_DOMAIN,
execute_never: false,
memory_attrs: MemoryRegionAttributes::OuterAndInnerWriteThroughNoWriteAlloc.as_raw(),
};
@@ -143,7 +149,7 @@ pub mod section_attrs {
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
domain: 0b0000,
domain: DEFAULT_DOMAIN,
execute_never: false,
memory_attrs: MemoryRegionAttributes::CacheableMemory {
inner: CacheableMemoryAttribute::WriteThroughNoWriteAlloc,
@@ -156,18 +162,12 @@ pub mod section_attrs {
p_bit: false,
shareable: false,
access: AccessPermissions::PermissionFault,
domain: 0b0000,
domain: DEFAULT_DOMAIN,
execute_never: false,
memory_attrs: MemoryRegionAttributes::StronglyOrdered.as_raw(),
};
}
pub const NUM_L1_PAGE_TABLE_ENTRIES: usize = 4096;
#[repr(C, align(16384))]
#[cfg(feature = "rt")]
pub struct L1Table(pub(crate) [u32; NUM_L1_PAGE_TABLE_ENTRIES]);
/// Load the MMU translation table base address into the MMU.
///
/// # Safety
@@ -178,7 +178,7 @@ pub struct L1Table(pub(crate) [u32; NUM_L1_PAGE_TABLE_ENTRIES]);
#[unsafe(no_mangle)]
#[cfg(feature = "rt")]
unsafe extern "C" fn load_mmu_table() {
let table_base = &crate::mmu_table::MMU_L1_PAGE_TABLE.0 as *const _ as u32;
let table_base = crate::mmu_table::MMU_L1_PAGE_TABLE.0.get() as u32;
unsafe {
core::arch::asm!(

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,7 @@ categories = ["embedded", "no-std", "hardware-support"]
[dependencies]
static_assertions = "1.1"
derive-mmio = { version = "0.5", default-features = false }
derive-mmio = { version = "0.6", default-features = false }
bitbybit = "1.3"
arbitrary-int = "1.3"
rustversion = "1"

603
zynq7000/src/eth.rs Normal file
View File

@@ -0,0 +1,603 @@
use arbitrary_int::{u2, u5};
pub const GEM_0_BASE_ADDR: usize = 0xE000_B000;
pub const GEM_1_BASE_ADDR: usize = 0xE000_C000;
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct NetworkControl {
#[bit(18, w)]
flush_next_rx_dpram_pkt: bool,
#[bit(17, w)]
tx_pfc_pri_pause_frame: bool,
#[bit(16, w)]
enable_pfc_pri_pause_rx: bool,
#[bit(12, w)]
zero_pause_tx: bool,
#[bit(11, w)]
pause_tx: bool,
#[bit(10, w)]
stop_tx: bool,
#[bit(9, w)]
start_tx: bool,
#[bit(8, rw)]
back_pressure: bool,
#[bit(7, rw)]
statistics_write_enable: bool,
#[bit(6, w)]
increment_statistics: bool,
#[bit(5, w)]
clear_statistics: bool,
#[bit(4, rw)]
management_port_enable: bool,
#[bit(3, rw)]
tx_enable: bool,
#[bit(2, rw)]
rx_enable: bool,
#[bit(1, rw)]
loopback_local: bool,
}
/// The speed mode selects between 10 Mbps and 100 Mbps if the Gigabit enable bit is cleared.
#[bitbybit::bitenum(u1, exhaustive = true)]
#[derive(Debug, PartialEq, Eq)]
pub enum SpeedMode {
Low10Mbps = 0,
High100Mbps = 1,
}
#[bitbybit::bitenum(u1, exhaustive = true)]
#[derive(Debug, PartialEq, Eq)]
pub enum PcsSelect {
GmiiMii = 0,
Tbi = 1,
}
#[bitbybit::bitenum(u3, exhaustive = true)]
#[derive(Debug, PartialEq, Eq)]
pub enum MdcClkDiv {
Div8 = 0,
Div16 = 1,
Div32 = 2,
Div48 = 3,
Div64 = 4,
Div96 = 5,
Div128 = 6,
Div224 = 7,
}
impl MdcClkDiv {
pub fn divisor(&self) -> usize {
match self {
MdcClkDiv::Div8 => 8,
MdcClkDiv::Div16 => 16,
MdcClkDiv::Div32 => 32,
MdcClkDiv::Div48 => 48,
MdcClkDiv::Div64 => 64,
MdcClkDiv::Div96 => 96,
MdcClkDiv::Div128 => 128,
MdcClkDiv::Div224 => 224,
}
}
}
#[bitbybit::bitfield(u32, default = 0x0)]
#[derive(Debug)]
pub struct NetworkConfig {
#[bit(30, rw)]
ignore_ipg_rx_error: bool,
#[bit(29, rw)]
allow_bad_preamble: bool,
#[bit(28, rw)]
ipg_stretch_enable: bool,
#[bit(27, rw)]
sgmii_enable: bool,
#[bit(26, rw)]
ignore_rx_fcs: bool,
#[bit(25, rw)]
half_duplex_rx_enable: bool,
#[bit(24, rw)]
rx_checksum_enable: bool,
#[bit(23, rw)]
disable_copy_pause_frames: bool,
/// Zynq defines this as 0b00 for 32-bit AMBA AHB data bus width.
#[bits(21..=22, r)]
dbus_width: u2,
#[bits(18..=20, rw)]
mdc_clk_div: MdcClkDiv,
#[bit(17, rw)]
fcs_removal: bool,
#[bit(16, rw)]
length_field_error_discard: bool,
#[bits(14..=15, rw)]
rx_buf_offset: u2,
#[bit(13, rw)]
pause_enable: bool,
#[bit(12, rw)]
retry_test_enable: bool,
#[bit(11, rw)]
pcs_select: PcsSelect,
#[bit(10, rw)]
gigabit_enable: bool,
#[bit(9, rw)]
ext_addr_match_enable: bool,
#[bit(8, rw)]
rx_enable_1536: bool,
#[bit(7, rw)]
unicast_hash_enable: bool,
#[bit(6, rw)]
multicast_hash_enable: bool,
#[bit(5, rw)]
no_broadcast: bool,
#[bit(4, rw)]
copy_all_frames: bool,
#[bit(2, rw)]
discard_non_vlan: bool,
#[bit(1, rw)]
full_duplex: bool,
#[bit(0, rw)]
speed_mode: SpeedMode,
}
/// PHY management status information.
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct NetworkStatus {
#[bit(6, r)]
pfc_pri_pause_neg: bool,
#[bit(5, r)]
pcs_autoneg_pause_tx_res: bool,
#[bit(4, r)]
pcs_autoneg_pause_rx_res: bool,
#[bit(3, r)]
pcs_autoneg_dup_res: bool,
#[bit(2, r)]
phy_mgmt_idle: bool,
#[bit(1, r)]
mdio_in: bool,
#[bit(0, r)]
pcs_link_state: bool,
}
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
pub enum BurstLength {
Single,
#[default]
Incr4,
Incr8,
Incr16,
}
impl BurstLength {
pub const fn reg_value(&self) -> u5 {
u5::new(match self {
BurstLength::Single => 0b1,
BurstLength::Incr4 => 0b100,
BurstLength::Incr8 => 0b1000,
BurstLength::Incr16 => 0b10000,
})
}
}
#[bitbybit::bitenum(u1, exhaustive = true)]
#[derive(Debug, PartialEq, Eq)]
pub enum AhbEndianess {
Little = 0,
Big = 1,
}
#[derive(Debug, PartialEq, Eq)]
pub struct DmaRxBufSize(u8);
impl DmaRxBufSize {
pub const fn new_with_raw_value(size: u8) -> Self {
Self(size)
}
pub const fn new(size: u8) -> Option<Self> {
if size == 0 {
return None;
}
Some(Self(size))
}
pub const fn raw_value(&self) -> u8 {
self.0
}
pub const fn size_in_bytes(&self) -> usize {
self.0 as usize * 64
}
pub const fn reg_value(&self) -> u8 {
self.0
}
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct DmaConfig {
#[bit(24, rw)]
discard_when_ahb_full: bool,
/// DMA receive buffer size in AHB system memory.
#[bits(16..=23, rw)]
dma_rx_ahb_buf_size_sel: DmaRxBufSize,
/// Checksum offloading for TX.
#[bit(11, rw)]
chksum_offload_enable: bool,
/// Select size for packet buffer SRAM. Should be set to 1 to use the full configurable address
/// space of 4 kB for the packet buffer.
#[bit(10, rw)]
tx_packet_buf_size_sel: bool,
/// Select size for packet buffer SRAM. Should be set to 0b11 to use the full configurable
/// address space of 8 kB for the packet buffer.
#[bits(8..=9, rw)]
rx_packet_buf_size_sel: u2,
/// Default value is 0x1 (big endian)
#[bit(7, rw)]
endian_swap_packet_data: AhbEndianess,
// Default value is 0x0 (little endian)
#[bit(6, rw)]
endian_swap_mgmt_descriptor: AhbEndianess,
#[bits(0..=4, rw)]
burst_length: u5,
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct TxStatus {
#[bit(8, rw)]
hresp_not_ok: bool,
#[bit(7, rw)]
late_collision: bool,
/// This bit should never be se because the DMA is configured for packet buffer mode.
#[bit(6, rw)]
underrun: bool,
#[bit(5, rw)]
complete: bool,
#[bit(4, rw)]
frame_corruption_ahb_error: bool,
/// Its called "tx_go" inside the Zynq 7000 documentation, but I think this is just a
/// TX active bit.
#[bit(3, r)]
go: bool,
#[bit(2, rw)]
retry_limit_reached: bool,
#[bit(1, rw)]
collision: bool,
#[bit(0, rw)]
read_when_used: bool,
}
impl TxStatus {
pub fn new_clear_all() -> Self {
Self::new_with_raw_value(0xFF)
}
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct RxStatus {
#[bit(3, rw)]
hresp_not_ok: bool,
#[bit(2, rw)]
overrun: bool,
#[bit(1, rw)]
frame_received: bool,
#[bit(0, rw)]
buf_not_available: bool,
}
impl RxStatus {
pub fn new_clear_all() -> Self {
Self::new_with_raw_value(0xF)
}
}
#[bitbybit::bitfield(u32, default = 0x0)]
#[derive(Debug)]
pub struct InterruptStatus {
#[bit(26, rw)]
tsu_sec_incr: bool,
/// Marked N/A in datasheet.
#[bit(17, rw)]
partner_pg_rx: bool,
/// Marked N/A in datasheet.
#[bit(16, rw)]
auto_negotiation_complete: bool,
#[bit(15, rw)]
external_interrupt: bool,
#[bit(14, rw)]
pause_transmitted: bool,
#[bit(13, rw)]
pause_time_zero: bool,
#[bit(12, rw)]
pause_with_non_zero_quantum: bool,
#[bit(11, rw)]
hresp_not_ok: bool,
#[bit(10, rw)]
rx_overrun: bool,
/// Marked N/A in datasheet.
#[bit(9, rw)]
link_changed: bool,
#[bit(7, rw)]
frame_sent: bool,
/// Cleared on read.
#[bit(6, r)]
tx_frame_corruption_ahb_error: bool,
#[bit(5, rw)]
tx_retry_limit_reached_or_late_collision: bool,
#[bit(3, rw)]
tx_descr_read_when_used: bool,
#[bit(2, rw)]
rx_descr_read_when_used: bool,
#[bit(1, rw)]
frame_received: bool,
#[bit(0, rw)]
mgmt_frame_sent: bool,
}
#[bitbybit::bitfield(u32, default = 0x00)]
#[derive(Debug)]
pub struct InterruptControl {
#[bit(26, w)]
tsu_sec_incr: bool,
/// Marked N/A in datasheet. Probably because external PHYs are used.
#[bit(17, w)]
partner_pg_rx: bool,
/// Marked N/A in datasheet. Probably because external PHYs are used.
#[bit(16, w)]
auto_negotiation_complete: bool,
#[bit(15, w)]
external_interrupt: bool,
#[bit(14, w)]
pause_transmitted: bool,
#[bit(13, w)]
pause_time_zero: bool,
#[bit(12, w)]
pause_with_non_zero_quantum: bool,
#[bit(11, w)]
hresp_not_ok: bool,
#[bit(10, w)]
rx_overrun: bool,
/// Marked N/A in datasheet. Probably because external PHYs are used.
#[bit(9, w)]
link_changed: bool,
#[bit(7, w)]
frame_sent: bool,
#[bit(6, w)]
tx_frame_corruption_ahb_error: bool,
#[bit(5, w)]
tx_retry_limit_reached_or_late_collision: bool,
#[bit(3, w)]
tx_descr_read_when_used: bool,
#[bit(2, w)]
rx_descr_read_when_used: bool,
#[bit(1, w)]
frame_received: bool,
#[bit(0, w)]
mgmt_frame_sent: bool,
}
impl InterruptControl {
pub fn new_clear_all() -> Self {
Self::new_with_raw_value(0xFFFF_FFFF)
}
}
#[bitbybit::bitenum(u2, exhaustive = false)]
pub enum PhyOperation {
Read = 0b10,
Write = 0b01,
}
#[bitbybit::bitfield(u32, default = 0x0)]
#[derive(Debug)]
pub struct PhyMaintenance {
/// Must be 1 for Clause 22 operations.
#[bit(30, rw)]
clause_22: bool,
#[bits(28..=29, rw)]
op: Option<PhyOperation>,
#[bits(23..=27, rw)]
phy_addr: u5,
#[bits(18..=22, rw)]
reg_addr: u5,
#[bits(16..=17, rw)]
must_be_0b10: u2,
#[bits(0..=15, rw)]
data: u16,
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct PauseQuantum {
#[bits(0..=15, rw)]
value: u16,
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct MatchRegister {
#[bit(31, rw)]
copy_enable: bool,
#[bits(0..=15, rw)]
type_id: u16,
}
/// Gigabit Ethernet Controller (GEM) registers for Zynq-7000
#[derive(derive_mmio::Mmio)]
#[repr(C)]
pub struct Ethernet {
net_ctrl: NetworkControl,
net_cfg: NetworkConfig,
#[mmio(PureRead)]
net_status: NetworkStatus,
_reserved0: u32,
dma_cfg: DmaConfig,
tx_status: TxStatus,
rx_buf_queue_base_addr: u32,
tx_buf_queue_base_addr: u32,
rx_status: RxStatus,
interrupt_status: InterruptStatus,
interrupt_enable: InterruptControl,
interrupt_disable: InterruptControl,
interrupt_mask: InterruptStatus,
phy_maintenance: PhyMaintenance,
#[mmio(PureRead)]
rx_pause_quantum: PauseQuantum,
tx_pause_quantum: PauseQuantum,
_reserved1: [u32; 0x10],
hash_low: u32,
hash_high: u32,
addr1_low: u32,
addr1_high: u32,
addr2_low: u32,
addr2_high: u32,
addr3_low: u32,
addr3_high: u32,
addr4_low: u32,
addr4_high: u32,
match_reg: [MatchRegister; 4],
wake_on_lan: u32,
ipg_stretch: u32,
stacked_vlan: u32,
tx_pfc: u32,
addr1_mask_low: u32,
addr1_mask_high: u32,
_reserved2: [u32; 0x0B],
/// Should be 0x20118.
#[mmio(PureRead)]
module_id: u32,
#[mmio(Inner)]
statistics: Statistics,
_reserved3: [u32; 0x34],
#[mmio(PureRead)]
design_cfg_2: u32,
#[mmio(PureRead)]
design_cfg_3: u32,
#[mmio(PureRead)]
design_cfg_4: u32,
#[mmio(PureRead)]
design_cfg_5: u32,
}
static_assertions::const_assert_eq!(core::mem::size_of::<Ethernet>(), 0x294);
/// GEM statistics registers
#[derive(derive_mmio::Mmio)]
#[repr(C)]
pub struct Statistics {
#[mmio(PureRead)]
tx_octets_low: u32,
#[mmio(PureRead)]
tx_octets_high: u32,
#[mmio(PureRead)]
tx_count: u32,
#[mmio(PureRead)]
tx_broadcast: u32,
#[mmio(PureRead)]
tx_multicast: u32,
#[mmio(PureRead)]
tx_pause: u32,
#[mmio(PureRead)]
tx_64_bits: u32,
#[mmio(PureRead)]
tx_65_to_127_bits: u32,
#[mmio(PureRead)]
tx_128_to_255_bits: u32,
#[mmio(PureRead)]
tx_256_to_511_bits: u32,
#[mmio(PureRead)]
tx_512_to_1023_bits: u32,
#[mmio(PureRead)]
tx_1024_to_1518_bits: u32,
_reserved0: u32,
#[mmio(PureRead)]
tx_underruns: u32,
#[mmio(PureRead)]
single_collision_frames: u32,
#[mmio(PureRead)]
multi_collision_frames: u32,
#[mmio(PureRead)]
excessive_collisions: u32,
#[mmio(PureRead)]
late_collisions: u32,
#[mmio(PureRead)]
deferred_tx: u32,
#[mmio(PureRead)]
carrier_sense_errors: u32,
#[mmio(PureRead)]
rx_octets_low: u32,
#[mmio(PureRead)]
rx_octets_high: u32,
#[mmio(PureRead)]
rx_count: u32,
#[mmio(PureRead)]
rx_broadcast: u32,
#[mmio(PureRead)]
rx_multicast: u32,
#[mmio(PureRead)]
rx_pause: u32,
#[mmio(PureRead)]
rx_64_bits: u32,
#[mmio(PureRead)]
rx_65_to_127_bits: u32,
#[mmio(PureRead)]
rx_128_to_255_bits: u32,
#[mmio(PureRead)]
rx_256_to_511_bits: u32,
#[mmio(PureRead)]
rx_512_to_1023_bits: u32,
#[mmio(PureRead)]
rx_1024_to_1518_bits: u32,
_reserved1: u32,
#[mmio(PureRead)]
rx_undersize: u32,
#[mmio(PureRead)]
rx_oversize: u32,
#[mmio(PureRead)]
rx_jabber: u32,
#[mmio(PureRead)]
rx_frame_check_sequence_errors: u32,
#[mmio(PureRead)]
rx_length_field_errors: u32,
#[mmio(PureRead)]
rx_symbol_errors: u32,
#[mmio(PureRead)]
rx_alignment_errors: u32,
#[mmio(PureRead)]
rx_resource_errors: u32,
#[mmio(PureRead)]
rx_overrun_errors: u32,
#[mmio(PureRead)]
rx_ip_header_checksum_errors: u32,
#[mmio(PureRead)]
rx_tcp_checksum_errors: u32,
#[mmio(PureRead)]
rx_udp_checksum_errors: u32,
}
impl Ethernet {
/// Create a new Gigabit Ethernet MMIO instance for GEM 0 at address [GEM_0_BASE_ADDR].
///
/// # Safety
///
/// This API can be used to potentially create a driver to the same peripheral structure
/// from multiple threads. The user must ensure that concurrent accesses are safe and do not
/// interfere with each other.
pub const unsafe fn new_mmio_fixed_0() -> MmioEthernet<'static> {
unsafe { Self::new_mmio_at(GEM_0_BASE_ADDR) }
}
/// Create a new Gigabit Ethernet MMIO instance for GEM 1 at address [GEM_1_BASE_ADDR].
///
/// # Safety
///
/// This API can be used to potentially create a driver to the same peripheral structure
/// from multiple threads. The user must ensure that concurrent accesses are safe and do not
/// interfere with each other.
pub const unsafe fn new_mmio_fixed_1() -> MmioEthernet<'static> {
unsafe { Self::new_mmio_at(GEM_1_BASE_ADDR) }
}
}

View File

@@ -83,22 +83,22 @@ pub struct Gpio {
_reserved_2: [u32; 101],
#[mmio(inner)]
#[mmio(Inner)]
bank_0: BankCtrl,
_reserved_3: [u32; 7],
#[mmio(inner)]
#[mmio(Inner)]
bank_1: BankCtrl,
_reserved_4: [u32; 7],
#[mmio(inner)]
#[mmio(Inner)]
bank_2: BankCtrl,
_reserved_5: [u32; 7],
#[mmio(inner)]
#[mmio(Inner)]
bank_3: BankCtrl,
}

View File

@@ -178,6 +178,8 @@ pub struct I2c {
idr: InterruptControl,
}
static_assertions::const_assert_eq!(core::mem::size_of::<I2c>(), 0x2C);
impl I2c {
/// Create a new I2C MMIO instance for I2C0 at address [I2C_0_BASE_ADDR].
///

144
zynq7000/src/l2_cache.rs Normal file
View File

@@ -0,0 +1,144 @@
use arbitrary_int::{u4, u6};
pub const L2C_BASE_ADDR: usize = 0xF8F0_2000;
#[derive(derive_mmio::Mmio)]
#[repr(C)]
pub struct LockdownRegisters {
data: u32,
instruction: u32,
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct CacheSync {
#[bit(0, r)]
busy: bool,
}
#[bitbybit::bitfield(u32, default = 0x0)]
#[derive(Debug)]
pub struct DebugControl {
#[bit(2, rw)]
spniden: bool,
#[bit(1, rw)]
disable_write_back: bool,
#[bit(0, rw)]
disable_cache_linefill: bool,
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct CacheId {
#[bits(24..=31, r)]
implementer: u8,
#[bits(10..=15, r)]
cache_id: u6,
#[bits(6..=9, r)]
part_number: u4,
#[bits(0..=5, r)]
rtl_release: u6,
}
#[derive(derive_mmio::Mmio)]
#[repr(C)]
pub struct L2Cache {
#[mmio(PureRead)]
cache_id: CacheId,
#[mmio(PureRead)]
cache_type: u32,
_reserved: [u32; 0x3E],
control: u32,
aux_control: u32,
tag_ram_control: u32,
data_ram_control: u32,
_reserved2: [u32; 0x3C],
event_counter_control: u32,
event_counter_1_config: u32,
event_counter_0_config: u32,
event_counter_1: u32,
event_counter_0: u32,
interrupt_mask: u32,
#[mmio(PureRead)]
interrupt_mask_status: u32,
#[mmio(PureRead)]
interrupt_raw_status: u32,
#[mmio(Write)]
interrupt_clear: u32,
_reserved3: [u32; 0x143],
cache_sync: CacheSync,
_reserved4: [u32; 0xF],
invalidate_by_pa: u32,
_reserved5: [u32; 0x2],
invalidate_by_way: u32,
_reserved6: [u32; 0xC],
clean_by_pa: u32,
_reserved7: u32,
clean_by_index: u32,
clean_by_way: u32,
_reserved8: [u32; 0xC],
clean_invalidate_by_pa: u32,
_reserved9: u32,
clean_invalidate_by_index: u32,
clean_invalidate_by_way: u32,
_reserved10: [u32; 0x40],
#[mmio(Inner)]
lockdown_regs: [LockdownRegisters; 8],
_reserved11: [u32; 0x4],
lockdown_by_line_enable: u32,
unlock_way: u32,
_reserved12: [u32; 0xAA],
addr_filtering_start: u32,
addr_filtering_end: u32,
_reserved13: [u32; 0xCE],
debug_control: DebugControl,
_reserved14: [u32; 0x7],
prefetch_control: u32,
_reserved15: [u32; 0x7],
power_control: u32,
}
static_assertions::const_assert_eq!(core::mem::size_of::<L2Cache>(), 0xF84);
impl L2Cache {
/// Create a new L2C MMIO instance for for L2 Cache at address [I2C_0_BASE_ADDR].
///
/// # Safety
///
/// This API can be used to potentially create a driver to the same peripheral structure
/// from multiple threads. The user must ensure that concurrent accesses are safe and do not
/// interfere with each other.
pub const unsafe fn new_mmio_fixed() -> MmioL2Cache<'static> {
unsafe { Self::new_mmio_at(L2C_BASE_ADDR) }
}
}

View File

@@ -17,10 +17,12 @@ extern crate std;
pub const MPCORE_BASE_ADDR: usize = 0xF8F0_0000;
pub mod eth;
pub mod gic;
pub mod gpio;
pub mod gtc;
pub mod i2c;
pub mod l2_cache;
pub mod mpcore;
pub mod slcr;
pub mod spi;
@@ -48,6 +50,8 @@ pub struct PsPeripherals {
pub slcr: slcr::MmioSlcr<'static>,
pub ttc_0: ttc::MmioTtc<'static>,
pub ttc_1: ttc::MmioTtc<'static>,
pub eth_0: eth::MmioEthernet<'static>,
pub eth_1: eth::MmioEthernet<'static>,
}
impl PsPeripherals {
@@ -81,6 +85,8 @@ impl PsPeripherals {
i2c_1: i2c::I2c::new_mmio_fixed_1(),
ttc_0: ttc::Ttc::new_mmio_fixed_0(),
ttc_1: ttc::Ttc::new_mmio_fixed_1(),
eth_0: eth::Ethernet::new_mmio_fixed_0(),
eth_1: eth::Ethernet::new_mmio_fixed_1(),
}
}
}

View File

@@ -47,15 +47,15 @@ const_assert_eq!(core::mem::size_of::<Scu>(), 0x58);
#[derive(derive_mmio::Mmio)]
#[repr(C)]
pub struct Mpcore {
#[mmio(inner)]
#[mmio(Inner)]
scu: Scu,
_reserved_0: [u32; 0x2A],
#[mmio(inner)]
#[mmio(Inner)]
gicc: Gicc,
#[mmio(inner)]
#[mmio(Inner)]
gt: Gtc,
_reserved_1: [u32; 0xF9],
@@ -76,7 +76,7 @@ pub struct Mpcore {
_reserved_3: [u32; 0x272],
#[mmio(inner)]
#[mmio(Inner)]
gicd: Gicd,
}

View File

@@ -172,7 +172,7 @@ pub enum ClockRatioSelect {
}
#[bitbybit::bitenum(u2, exhaustive = true)]
#[derive(Debug)]
#[derive(Debug, Eq)]
pub enum SrcSelIo {
IoPll = 0b00,
IoPllAlt = 0b01,
@@ -180,6 +180,25 @@ pub enum SrcSelIo {
DdrPll = 0b11,
}
impl PartialEq for SrcSelIo {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
// IoPll and IoPllAlt are equal to each other
(Self::IoPll, Self::IoPll)
| (Self::IoPll, Self::IoPllAlt)
| (Self::IoPllAlt, Self::IoPll)
| (Self::IoPllAlt, Self::IoPllAlt) => true,
// For other variants, only equal if exactly the same
(Self::ArmPll, Self::ArmPll) => true,
(Self::DdrPll, Self::DdrPll) => true,
// Otherwise, not equal
_ => false,
}
}
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct GigEthClkCtrl {
@@ -195,6 +214,23 @@ pub struct GigEthClkCtrl {
clk_act: bool,
}
#[bitbybit::bitenum(u1, exhaustive = true)]
#[derive(Debug)]
pub enum SrcSelGigEthRclk {
Mio = 0,
Emio = 1,
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct GigEthRclkCtrl {
#[bit(4, rw)]
srcsel: SrcSelGigEthRclk,
// Enable the ethernet controller RX clock.
#[bit(0, rw)]
clk_enable: bool,
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct CanClkCtrl {
@@ -320,8 +356,8 @@ pub struct ClockControl {
aper_clk_ctrl: AperClkCtrl,
usb_0_clk_ctrl: u32,
usb_1_clk_ctrl: u32,
gem_0_rclk_ctrl: u32,
gem_1_rclk_ctrl: u32,
gem_0_rclk_ctrl: GigEthRclkCtrl,
gem_1_rclk_ctrl: GigEthRclkCtrl,
gem_0_clk_ctrl: GigEthClkCtrl,
gem_1_clk_ctrl: GigEthClkCtrl,
smc_clk_ctrl: SingleCommonPeriphIoClkCtrl,
@@ -335,13 +371,13 @@ pub struct ClockControl {
dbg_clk_ctrl: TracePortClkCtrl,
pcap_clk_ctrl: SingleCommonPeriphIoClkCtrl,
topsw_clk_ctrl: u32,
#[mmio(inner)]
#[mmio(Inner)]
fpga_0_clk_ctrl: FpgaClkBlock,
#[mmio(inner)]
#[mmio(Inner)]
fpga_1_clk_ctrl: FpgaClkBlock,
#[mmio(inner)]
#[mmio(Inner)]
fpga_2_clk_ctrl: FpgaClkBlock,
#[mmio(inner)]
#[mmio(Inner)]
fpga_3_clk_ctrl: FpgaClkBlock,
_gap1: [u32; 5],
clk_621_true: ClockRatioSelectReg,

View File

@@ -17,7 +17,7 @@ pub enum IoType {
Hstl = 0b100,
}
#[bitbybit::bitfield(u32)]
#[bitbybit::bitfield(u32, default = 0x0)]
#[derive(Debug)]
pub struct Config {
#[bit(13, rw)]

View File

@@ -50,10 +50,27 @@ impl DdrIoB {
static_assertions::const_assert_eq!(core::mem::size_of::<DdrIoB>(), 0x38);
#[bitbybit::bitenum(u3, exhaustive = false)]
pub enum VrefSel {
Disabled = 0b000,
Vref0_9V = 0b001,
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct GpiobControl {
#[bit(11, rw)]
vref_sw_en: bool,
#[bits(4..=6, rw)]
vref_sel: Option<VrefSel>,
#[bit(0, rw)]
vref_en: bool,
}
#[derive(derive_mmio::Mmio)]
#[repr(C)]
pub struct GpiobCtrl {
ctrl: u32,
pub struct GpiobRegisters {
ctrl: GpiobControl,
cfg_cmos18: u32,
cfg_cmos25: u32,
cfg_cmos33: u32,
@@ -62,7 +79,7 @@ pub struct GpiobCtrl {
drvr_bias_ctrl: u32,
}
impl GpiobCtrl {
impl GpiobRegisters {
/// Create a new handle to this peripheral.
///
/// Writing to this register requires unlocking the SLCR registers first.
@@ -71,12 +88,13 @@ impl GpiobCtrl {
///
/// If you create multiple instances of this handle at the same time, you are responsible for
/// ensuring that there are no read-modify-write races on any of the registers.
pub unsafe fn new_mmio_fixed() -> MmioGpiobCtrl<'static> {
pub unsafe fn new_mmio_fixed() -> MmioGpiobRegisters<'static> {
unsafe { Self::new_mmio_at(SLCR_BASE_ADDR + GPIOB_OFFSET) }
}
}
#[bitbybit::bitfield(u32)]
#[derive(Debug)]
pub struct BootModeRegister {
#[bit(4, r)]
pll_bypass: bool,
@@ -113,12 +131,12 @@ pub struct Slcr {
_gap0: [u32; 0x3C],
#[mmio(inner)]
#[mmio(Inner)]
clk_ctrl: ClockControl,
_gap1: [u32; 0x0E],
#[mmio(inner)]
#[mmio(Inner)]
reset_ctrl: ResetControl,
_gap2: [u32; 0x02],
@@ -176,16 +194,17 @@ pub struct Slcr {
_gap15: [u32; 0x42],
reserved: u32,
/// Xilinx marks this as reserved but writes to it in their low-level L2 cache configuration.
magic_l2c_register: u32,
_gap16: [u32; 0x38],
_gap18: [u32; 0x09],
#[mmio(inner)]
gpiob: GpiobCtrl,
#[mmio(Inner)]
gpiob: GpiobRegisters,
#[mmio(inner)]
#[mmio(Inner)]
ddriob: DdrIoB,
}

View File

@@ -35,6 +35,23 @@ pub struct GpioClockReset {
gpio_cpu1x_rst: bool,
}
#[bitbybit::bitfield(u32, default = 0x0)]
#[derive(Debug)]
pub struct EthernetReset {
#[bit(5, rw)]
gem1_ref_rst: bool,
#[bit(4, rw)]
gem0_ref_rst: bool,
#[bit(3, rw)]
gem1_rx_rst: bool,
#[bit(2, rw)]
gem0_rx_rst: bool,
#[bit(1, rw)]
gem1_cpu1x_rst: bool,
#[bit(0, rw)]
gem0_cpu1x_rst: bool,
}
#[derive(derive_mmio::Mmio)]
#[repr(C)]
pub struct ResetControl {
@@ -45,7 +62,7 @@ pub struct ResetControl {
topsw: u32,
dmac: u32,
usb: u32,
gem: u32,
eth: EthernetReset,
sdio: DualRefAndClockReset,
spi: DualRefAndClockReset,
can: DualClockReset,