init commit

This commit is contained in:
2025-02-19 11:00:04 +01:00
commit f842673e3a
104 changed files with 21595 additions and 0 deletions

45
zynq7000-hal/Cargo.toml Normal file
View File

@ -0,0 +1,45 @@
[package]
name = "zynq7000-hal"
version = "0.1.0"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
edition = "2024"
description = "HAL for the Zynq7000 family of SoCs"
homepage = "https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs"
repository = "https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs"
license = "MIT OR Apache-2.0"
keywords = ["no-std", "hal", "amd", "zynq7000", "xilinx", "bare-metal"]
categories = ["embedded", "no-std", "hardware-support"]
[dependencies]
cortex-ar = { git = "https://github.com/rust-embedded/cortex-ar", branch = "main", features = ["critical-section-single-core"] }
zynq7000 = { path = "../zynq7000" }
arbitrary-int = "1.3"
thiserror = { version = "2", default-features = false }
num_enum = { version = "0.7", default-features = false }
ringbuf = { version = "0.4.8", default-features = false }
embedded-hal-nb = "1"
embedded-io = "0.6"
embedded-hal = "1"
embedded-hal-async = "1"
heapless = "0.8"
static_cell = "2"
delegate = "0.13"
paste = "1"
nb = "1"
fugit = "0.3"
critical-section = "1"
libm = "0.2"
log = "0.4"
embassy-sync = "0.6"
raw-slicee = "0.1"
embedded-io-async = "0.6"
[features]
std = ["thiserror/std", "alloc"]
alloc = []
# These devices have a lower pin count.
7z010-7z007s-clg225 = []
[dev-dependencies]
approx = "0.5"

201
zynq7000-hal/LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

21
zynq7000-hal/LICENSE-MIT Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Robin A. Mueller
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

12
zynq7000-hal/README.md Normal file
View File

@ -0,0 +1,12 @@
# HAL for the AMD Zynq 7000 SoC family
This repository contains the **H**ardware **A**bstraction **L**ayer (HAL), which is an additional
hardware abstraction on top of the [peripheral access API](https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs/src/branch/main/zynq7000).
It is the result of reading the datasheet for the device and encoding a type-safe layer over the
raw PAC. This crate also implements traits specified by the
[embedded-hal](https://github.com/rust-embedded/embedded-hal) project, making it compatible with
various drivers in the embedded rust ecosystem.
The [top-level README](https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs) and the documentation
contain more information on how to use this crate.

466
zynq7000-hal/src/clocks.rs Normal file
View File

@ -0,0 +1,466 @@
//! Clock module.
use arbitrary_int::Number;
use zynq7000::slcr::{
ClockControl,
clocks::{
ClockRatioSelect, DualCommonPeriphIoClkCtrl, FpgaClkControl, GigEthClkCtrl,
SingleCommonPeriphIoClkCtrl,
},
};
use super::time::Hertz;
#[derive(Debug)]
pub struct ArmClocks {
ref_clk: Hertz,
cpu_1x_clk: Hertz,
cpu_2x_clk: Hertz,
cpu_3x2x_clk: Hertz,
cpu_6x4x_clk: Hertz,
}
impl ArmClocks {
/// Reference clock provided by ARM PLL which is used to calculate all other clock frequencies.
pub const fn ref_clk(&self) -> Hertz {
self.ref_clk
}
pub const fn cpu_1x_clk(&self) -> Hertz {
self.cpu_1x_clk
}
pub const fn cpu_2x_clk(&self) -> Hertz {
self.cpu_2x_clk
}
pub const fn cpu_3x2x_clk(&self) -> Hertz {
self.cpu_3x2x_clk
}
pub const fn cpu_6x4x_clk(&self) -> Hertz {
self.cpu_6x4x_clk
}
}
#[derive(Debug)]
pub struct DdrClocks {
ref_clk: Hertz,
ddr_3x_clk: Hertz,
ddr_2x_clk: Hertz,
}
impl DdrClocks {
/// Reference clock provided by DDR PLL which is used to calculate all other clock frequencies.
pub const fn ref_clk(&self) -> Hertz {
self.ref_clk
}
pub fn ddr_3x_clk(&self) -> Hertz {
self.ddr_3x_clk
}
pub fn ddr_2x_clk(&self) -> Hertz {
self.ddr_2x_clk
}
}
#[derive(Debug)]
pub struct IoClocks {
/// Reference clock provided by IO PLL which is used to calculate all other clock frequencies.
ref_clk: Hertz,
smc_clk: Hertz,
qspi_clk: Hertz,
sdio_clk: Hertz,
uart_clk: Hertz,
spi_clk: Hertz,
can_clk: Hertz,
pcap_2x_clk: Hertz,
trace_clk: Option<Hertz>,
}
impl IoClocks {
pub const fn ref_clk(&self) -> Hertz {
self.ref_clk
}
pub const fn smc_clk(&self) -> Hertz {
self.smc_clk
}
pub fn update_smc_clk(&mut self, clk: Hertz) {
self.smc_clk = clk
}
pub const fn qspi_clk(&self) -> Hertz {
self.qspi_clk
}
pub fn update_qspi_clk(&mut self, clk: Hertz) {
self.qspi_clk = clk
}
pub const fn sdio_clk(&self) -> Hertz {
self.sdio_clk
}
pub fn update_sdio_clk(&mut self, clk: Hertz) {
self.sdio_clk = clk
}
pub const fn uart_clk(&self) -> Hertz {
self.uart_clk
}
pub fn update_uart_clk(&mut self, clk: Hertz) {
self.uart_clk = clk
}
pub const fn spi_clk(&self) -> Hertz {
self.spi_clk
}
pub fn update_spi_clk(&mut self, clk: Hertz) {
self.spi_clk = clk
}
pub fn can_clk(&self) -> Hertz {
self.can_clk
}
pub fn update_can_clk(&mut self, clk: Hertz) {
self.can_clk = clk
}
pub fn pcap_2x_clk(&self) -> Hertz {
self.pcap_2x_clk
}
pub fn update_pcap_2x_clk(&mut self, clk: Hertz) {
self.pcap_2x_clk = clk
}
/// Returns [None] if the trace clock is configured to use the EMIO trace clock.
pub fn trace_clk(&self) -> Option<Hertz> {
self.trace_clk
}
}
#[derive(Debug)]
pub struct Clocks {
ps_clk: Hertz,
arm_pll_out: Hertz,
io_pll_out: Hertz,
ddr_pll_out: Hertz,
arm: ArmClocks,
ddr: DdrClocks,
io: IoClocks,
pl: [Hertz; 4],
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ClockModuleId {
Ddr,
Arm,
Smc,
Qspi,
Sdio,
Uart,
Spi,
Pcap,
Can,
Fpga,
Trace,
Gem0,
Gem1,
}
#[derive(Debug)]
pub struct DivisorZero(pub ClockModuleId);
#[derive(Debug)]
pub enum ClockReadError {
/// The feedback value for the PLL clock output calculation is zero.
PllFeedbackZero,
/// Detected a divisor of zero.
DivisorZero(DivisorZero),
/// Detected a divisor that is not even.
DivisorNotEven,
}
impl Clocks {
/// Processing system clock, which is generally dependent on the board and the used crystal.
pub fn ps_clk(&self) -> Hertz {
self.ps_clk
}
/// This generates the clock configuration by reading the SLCR clock registers.
///
/// It assumes that the clock already has been configured, for example by a first-stage
/// bootloader, or the PS7 initialization script.
pub fn new_from_regs(ps_clk_freq: Hertz) -> Result<Self, ClockReadError> {
let mut clk_regs = unsafe { ClockControl::new_mmio_fixed() };
let arm_pll_cfg = clk_regs.read_arm_pll();
let io_pll_cfg = clk_regs.read_io_pll();
let ddr_pll_cfg = clk_regs.read_ddr_pll();
if arm_pll_cfg.fdiv().as_u32() == 0
|| io_pll_cfg.fdiv().as_u32() == 0
|| ddr_pll_cfg.fdiv().as_u32() == 0
{
return Err(ClockReadError::PllFeedbackZero);
}
let arm_pll_out = ps_clk_freq * arm_pll_cfg.fdiv().into();
let io_pll_out = ps_clk_freq * io_pll_cfg.fdiv().into();
let ddr_pll_out = ps_clk_freq * ddr_pll_cfg.fdiv().into();
let arm_clk_ctrl = clk_regs.read_arm_clk_ctrl();
let arm_base_clk = match arm_clk_ctrl.srcsel() {
zynq7000::slcr::clocks::SrcSelArm::ArmPll
| zynq7000::slcr::clocks::SrcSelArm::ArmPllAlt => arm_pll_out,
zynq7000::slcr::clocks::SrcSelArm::DdrPll => ddr_pll_out,
zynq7000::slcr::clocks::SrcSelArm::IoPll => io_pll_out,
};
let clk_sel = clk_regs.read_clk_621_true();
if arm_clk_ctrl.divisor().as_u32() == 0 {
return Err(ClockReadError::DivisorZero(DivisorZero(ClockModuleId::Arm)));
}
let arm_clk_divided = arm_base_clk / arm_clk_ctrl.divisor().as_u32();
let arm_clks = match clk_sel.sel() {
ClockRatioSelect::FourToTwoToOne => ArmClocks {
ref_clk: arm_pll_out,
cpu_1x_clk: arm_clk_divided / 4,
cpu_2x_clk: arm_clk_divided / 2,
cpu_3x2x_clk: arm_clk_divided / 2,
cpu_6x4x_clk: arm_clk_divided,
},
ClockRatioSelect::SixToTwoToOne => ArmClocks {
ref_clk: arm_pll_out,
cpu_1x_clk: arm_clk_divided / 6,
cpu_2x_clk: arm_clk_divided / 3,
cpu_3x2x_clk: arm_clk_divided / 2,
cpu_6x4x_clk: arm_clk_divided,
},
};
let ddr_clk_ctrl = clk_regs.read_ddr_clk_ctrl();
if ddr_clk_ctrl.div_3x_clk().as_u32() == 0 || ddr_clk_ctrl.div_2x_clk().as_u32() == 0 {
return Err(ClockReadError::DivisorZero(DivisorZero(ClockModuleId::Ddr)));
}
let ddr_clks = DdrClocks {
ref_clk: ddr_pll_out,
ddr_3x_clk: ddr_pll_out / ddr_clk_ctrl.div_3x_clk().as_u32(),
ddr_2x_clk: ddr_pll_out / ddr_clk_ctrl.div_2x_clk().as_u32(),
};
let handle_common_single_clock_config = |single_block: SingleCommonPeriphIoClkCtrl,
id: ClockModuleId|
-> Result<Hertz, ClockReadError> {
if single_block.divisor().as_u32() == 0 {
return Err(ClockReadError::DivisorZero(DivisorZero(id)));
}
Ok(match single_block.srcsel() {
zynq7000::slcr::clocks::SrcSelIo::IoPll
| zynq7000::slcr::clocks::SrcSelIo::IoPllAlt => {
io_pll_out / single_block.divisor().as_u32()
}
zynq7000::slcr::clocks::SrcSelIo::ArmPll => {
arm_pll_out / single_block.divisor().as_u32()
}
zynq7000::slcr::clocks::SrcSelIo::DdrPll => {
ddr_pll_out / single_block.divisor().as_u32()
}
})
};
let handle_common_dual_clock_config = |dual_block: DualCommonPeriphIoClkCtrl,
id: ClockModuleId|
-> Result<Hertz, ClockReadError> {
if dual_block.divisor().as_u32() == 0 {
return Err(ClockReadError::DivisorZero(DivisorZero(id)));
}
Ok(match dual_block.srcsel() {
zynq7000::slcr::clocks::SrcSelIo::IoPll
| zynq7000::slcr::clocks::SrcSelIo::IoPllAlt => {
io_pll_out / dual_block.divisor().as_u32()
}
zynq7000::slcr::clocks::SrcSelIo::ArmPll => {
arm_pll_out / dual_block.divisor().as_u32()
}
zynq7000::slcr::clocks::SrcSelIo::DdrPll => {
ddr_pll_out / dual_block.divisor().as_u32()
}
})
};
let smc_clk =
handle_common_single_clock_config(clk_regs.read_smc_clk_ctrl(), ClockModuleId::Smc)?;
let qspi_clk =
handle_common_single_clock_config(clk_regs.read_lqspi_clk_ctrl(), ClockModuleId::Qspi)?;
let sdio_clk =
handle_common_dual_clock_config(clk_regs.read_sdio_clk_ctrl(), ClockModuleId::Sdio)?;
let uart_clk =
handle_common_dual_clock_config(clk_regs.read_uart_clk_ctrl(), ClockModuleId::Uart)?;
let spi_clk =
handle_common_dual_clock_config(clk_regs.read_spi_clk_ctrl(), ClockModuleId::Spi)?;
let pcap_2x_clk =
handle_common_single_clock_config(clk_regs.read_pcap_clk_ctrl(), ClockModuleId::Pcap)?;
let can_clk_ctrl = clk_regs.read_can_clk_ctrl();
let can_clk_ref_clk = match can_clk_ctrl.srcsel() {
zynq7000::slcr::clocks::SrcSelIo::IoPll
| zynq7000::slcr::clocks::SrcSelIo::IoPllAlt => io_pll_out,
zynq7000::slcr::clocks::SrcSelIo::ArmPll => arm_pll_out,
zynq7000::slcr::clocks::SrcSelIo::DdrPll => ddr_pll_out,
};
if can_clk_ctrl.divisor_0().as_u32() == 0 || can_clk_ctrl.divisor_1().as_u32() == 0 {
return Err(ClockReadError::DivisorZero(DivisorZero(ClockModuleId::Can)));
}
let can_clk =
can_clk_ref_clk / can_clk_ctrl.divisor_0().as_u32() / can_clk_ctrl.divisor_1().as_u32();
let trace_clk_ctrl = clk_regs.read_dbg_clk_ctrl();
if trace_clk_ctrl.divisor().as_u32() == 0 {
return Err(ClockReadError::DivisorZero(DivisorZero(
ClockModuleId::Trace,
)));
}
let trace_clk = match trace_clk_ctrl.srcsel() {
zynq7000::slcr::clocks::SrcSelTpiu::IoPll
| zynq7000::slcr::clocks::SrcSelTpiu::IoPllAlt => {
Some(io_pll_out / trace_clk_ctrl.divisor().as_u32())
}
zynq7000::slcr::clocks::SrcSelTpiu::ArmPll => {
Some(arm_pll_out / trace_clk_ctrl.divisor().as_u32())
}
zynq7000::slcr::clocks::SrcSelTpiu::DdrPll => {
Some(ddr_pll_out / trace_clk_ctrl.divisor().as_u32())
}
zynq7000::slcr::clocks::SrcSelTpiu::EmioTraceClk
| zynq7000::slcr::clocks::SrcSelTpiu::EmioTraceClkAlt0
| zynq7000::slcr::clocks::SrcSelTpiu::EmioTraceClkAlt1
| zynq7000::slcr::clocks::SrcSelTpiu::EmioTraceClkAlt2 => None,
};
let calculate_fpga_clk = |fpga_clk_ctrl: FpgaClkControl| -> Result<Hertz, ClockReadError> {
if fpga_clk_ctrl.divisor_0().as_u32() == 0 || fpga_clk_ctrl.divisor_1().as_u32() == 0 {
return Err(ClockReadError::DivisorZero(DivisorZero(
ClockModuleId::Fpga,
)));
}
Ok(match fpga_clk_ctrl.srcsel() {
zynq7000::slcr::clocks::SrcSelIo::IoPll
| zynq7000::slcr::clocks::SrcSelIo::IoPllAlt => {
io_pll_out
/ fpga_clk_ctrl.divisor_0().as_u32()
/ fpga_clk_ctrl.divisor_1().as_u32()
}
zynq7000::slcr::clocks::SrcSelIo::ArmPll => {
arm_pll_out
/ fpga_clk_ctrl.divisor_0().as_u32()
/ fpga_clk_ctrl.divisor_1().as_u32()
}
zynq7000::slcr::clocks::SrcSelIo::DdrPll => {
ddr_pll_out
/ fpga_clk_ctrl.divisor_0().as_u32()
/ fpga_clk_ctrl.divisor_1().as_u32()
}
})
};
Ok(Self {
ps_clk: ps_clk_freq,
io_pll_out,
ddr_pll_out,
arm_pll_out,
arm: arm_clks,
ddr: ddr_clks,
io: IoClocks {
ref_clk: io_pll_out,
smc_clk,
qspi_clk,
sdio_clk,
uart_clk,
spi_clk,
can_clk,
pcap_2x_clk,
trace_clk,
},
// TODO: There should be a mut and a non-mut getter for an inner block. We only do pure
// reads with the inner block here.
pl: [
calculate_fpga_clk(clk_regs.fpga_0_clk_ctrl().read_clk_ctrl())?,
calculate_fpga_clk(clk_regs.fpga_1_clk_ctrl().read_clk_ctrl())?,
calculate_fpga_clk(clk_regs.fpga_2_clk_ctrl().read_clk_ctrl())?,
calculate_fpga_clk(clk_regs.fpga_3_clk_ctrl().read_clk_ctrl())?,
],
})
}
pub fn arm_clocks(&self) -> &ArmClocks {
&self.arm
}
pub fn ddr_clocks(&self) -> &DdrClocks {
&self.ddr
}
pub fn io_clocks(&self) -> &IoClocks {
&self.io
}
pub fn io_clocks_mut(&mut self) -> &mut IoClocks {
&mut self.io
}
/// Programmable Logic (PL) FCLK clocks.
pub fn pl_clocks(&self) -> &[Hertz; 4] {
&self.pl
}
fn calculate_gem_ref_clock(
&self,
reg: GigEthClkCtrl,
module: ClockModuleId,
) -> Result<Hertz, DivisorZero> {
let source_clk = match reg.srcsel() {
zynq7000::slcr::clocks::SrcSelIo::IoPll
| zynq7000::slcr::clocks::SrcSelIo::IoPllAlt => self.io_pll_out,
zynq7000::slcr::clocks::SrcSelIo::ArmPll => self.arm_pll_out,
zynq7000::slcr::clocks::SrcSelIo::DdrPll => self.ddr_pll_out,
};
let div0 = reg.divisor_0().as_u32();
if div0 == 0 {
return Err(DivisorZero(module));
}
let div1 = reg.divisor_1().as_u32();
if div1 == 0 {
return Err(DivisorZero(module));
}
Ok(source_clk / reg.divisor_0().as_u32() / reg.divisor_1().as_u32())
}
/// Calculate the reference clock for GEM0.
///
/// The divisor 1 of the GEM is 0 on reset. You have to properly initialize the clock
/// configuration before calling this function.
///
/// It should be noted that the GEM has a separate TX and RX clock.
/// The reference clock will only be the RX clock in loopback mode. For the TX block,
/// the reference clock is used if the EMIO enable bit `GEM{0,1}_CLK_CTRL[6]` is set to 0.
pub fn calculate_gem_0_ref_clock(&self) -> Result<Hertz, DivisorZero> {
let clk_regs = unsafe { ClockControl::new_mmio_fixed() };
self.calculate_gem_ref_clock(clk_regs.read_gem_0_clk_ctrl(), ClockModuleId::Gem0)
}
/// Calculate the reference clock for GEM1.
///
/// The divisor 1 of the GEM is 0 on reset. You have to properly initialize the clock
/// configuration before calling this function.
///
/// It should be noted that the GEM has a separate TX and RX clock.
/// The reference clock will only be the RX clock in loopback mode. For the TX block,
/// the reference clock is used if the EMIO enable bit `GEM{0,1}_CLK_CTRL[6]` is set to 0.
pub fn calculate_gem_1_ref_clock(&self) -> Result<Hertz, DivisorZero> {
let clk_regs = unsafe { ClockControl::new_mmio_fixed() };
self.calculate_gem_ref_clock(clk_regs.read_gem_0_clk_ctrl(), ClockModuleId::Gem1)
}
}

528
zynq7000-hal/src/gic.rs Normal file
View File

@ -0,0 +1,528 @@
//! Global Interrupt Controller (GIC) module.
//!
//! The primary interface to configure and allow handling the interrupts are the
//! [GicConfigurator] and the [GicInterruptHelper] structures.
//!
//! # Examples
//!
//! - [GTC ticks](https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs/src/branch/main/examples/simple/src/bin/gtc-ticks.rs)
use arbitrary_int::Number;
use cortex_ar::interrupt;
use zynq7000::gic::{
Dcr, Gicc, Gicd, Icr, InterruptSignalRegister, MmioGicc, MmioGicd, PriorityRegister,
};
const SPURIOUS_INTERRUPT_ID: u32 = 1023;
pub const HIGHEST_PRIORITY: u8 = 0;
pub const LOWEST_PRIORITY: u8 = 31;
/// These fixed values must be programmed according to the Zynq7000 TRM p.236.
/// Configures #32 to #47.
pub const ICFR_2_FIXED_VALUE: u32 = 0b01010101010111010101010001011111;
/// These fixed values must be programmed according to the Zynq7000 TRM p.236.
/// This configures `PL[2:0]` to high-level sensitivity.
/// Configures #48 to #63.
pub const ICFR_3_FIXED_VALUE: u32 = 0b01010101010101011101010101010101;
/// These fixed values must be programmed according to the Zynq7000 TRM p.236.
/// This configures `PL[7:3]` to high-level sensitivity.
/// Configures #64 to #79.
pub const ICFR_4_FIXED_VALUE: u32 = 0b01110101010101010101010101010101;
/// These fixed values must be programmed according to the Zynq7000 TRM p.236.
/// This configures `PL[15:8]` to high-level sensitivity.
/// Configures #80 to #95.
pub const ICFR_5_FIXED_VALUE: u32 = 0b00000011010101010101010101010101;
/// Helper value to target all interrupts which can be targetted to CPU 0
pub const TARGETS_ALL_CPU_0_IPTR_VAL: u32 = 0x01010101;
/// Helper value to target all interrupts which can be targetted to CPU 1
pub const TARGETS_ALL_CPU_1_IPTR_VAL: u32 = 0x02020202;
pub const ACTIVATE_ALL_SGIS_MASK_ISER: u32 = 0x0000_FFFF;
pub const ACTIVATE_ALL_PPIS_MASK_ISER: u32 = 0xF800_0000;
pub enum SpiSensitivity {
Level = 0b01,
Edge = 0b11,
}
pub enum TargetCpu {
None = 0b00,
Cpu0 = 0b01,
Cpu1 = 0b10,
Both = 0b11,
}
/// Private Peripheral Interrupt (PPI) which are private to the CPU.
#[derive(Debug, Eq, PartialEq, Clone, Copy, num_enum::TryFromPrimitive)]
#[repr(u8)]
pub enum PpiInterrupt {
GlobalTimer = 27,
// Interrupt signal from the PL. CPU0: `IRQF2P[18]` and CPU1: `IRQF2P[19]`
NFiq = 28,
CpuPrivateTimer = 29,
/// AWDT0 and AWDT1 for each CPU.
Awdt = 30,
// Interrupt signal from the PL. CPU0: `IRQF2P[16]` and CPU1: `IRQF2P[17]`
NIrq = 31,
}
/// Shared Peripheral Interrupt IDs.
#[derive(Debug, Eq, PartialEq, Clone, Copy, num_enum::TryFromPrimitive)]
#[repr(u8)]
pub enum SpiInterrupt {
Cpu0 = 32,
Cpu1 = 33,
L2Cache = 34,
Ocm = 35,
_Reserved0 = 36,
Pmu0 = 37,
Pmu1 = 38,
Xadc = 39,
DevC = 40,
Swdt = 41,
Ttc00 = 42,
Ttc01 = 43,
Ttc02 = 44,
DmacAbort = 45,
Dmac0 = 46,
Dmac1 = 47,
Dmac2 = 48,
Dmac3 = 49,
Smc = 50,
Qspi = 51,
Gpio = 52,
Usb0 = 53,
Eth0 = 54,
Eth0Wakeup = 55,
Sdio0 = 56,
I2c0 = 57,
Spi0 = 58,
Uart0 = 59,
Can0 = 60,
Pl0 = 61,
Pl1 = 62,
Pl2 = 63,
Pl3 = 64,
Pl4 = 65,
Pl5 = 66,
Pl6 = 67,
Pl7 = 68,
Ttc10 = 69,
Ttc11 = 70,
Ttc12 = 71,
Dmac4 = 72,
Dmac5 = 73,
Dmac6 = 74,
Dmac7 = 75,
Usb1 = 76,
Eth1 = 77,
Eth1Wakeup = 78,
Sdio1 = 79,
I2c1 = 80,
Spi1 = 81,
Uart1 = 82,
Can1 = 83,
Pl8 = 84,
Pl9 = 85,
Pl10 = 86,
Pl11 = 87,
Pl12 = 88,
Pl13 = 89,
Pl14 = 90,
Pl15 = 91,
ScuParity = 92,
}
/// Interrupt ID wrapper.
#[derive(Debug, Clone, Copy)]
pub enum Interrupt {
Sgi(usize),
Ppi(PpiInterrupt),
Spi(SpiInterrupt),
/// Detects an invalid interrupt ID.
Invalid(usize),
/// Spurious interrupt (ID# 1023).
Spurious,
}
#[derive(Debug)]
pub struct InterruptInfo {
raw_reg: InterruptSignalRegister,
interrupt: Interrupt,
cpu_id: u8,
}
impl InterruptInfo {
pub fn raw_reg(&self) -> InterruptSignalRegister {
self.raw_reg
}
pub fn cpu_id(&self) -> u8 {
self.cpu_id
}
pub fn interrupt(&self) -> Interrupt {
self.interrupt
}
}
#[derive(Debug, thiserror::Error)]
#[error("Invalid priority value {0}, range is [0, 31]")]
pub struct InvalidPriorityValue(pub u8);
#[derive(Debug, thiserror::Error)]
#[error("Invalid PL interrupt ID {0}")]
pub struct InvalidPlInterruptId(pub usize);
/// Invalid Shared Peripheral Interrupt (SPI) ID.
#[derive(Debug, thiserror::Error)]
#[error("Invalid SPI interrupt ID {0}")]
pub struct InvalidSpiInterruptId(pub usize);
/// Invalid Software Generated Interrupt (SGI) ID.
#[derive(Debug, thiserror::Error)]
#[error("Invalid SGI interrupt ID {0}")]
pub struct InvalidSgiInterruptId(pub usize);
/// Higher-level GIC controller for the Zynq70000 SoC.
///
/// The flow of using this controller is as follows:
///
/// 1. Create the controller using [Self::new_with_init]. You can use the [zynq7000::PsPeripherals]
/// structure or the [zynq7000::gic::Gicc::new_mmio] and [zynq7000::gic::Gicd::new_mmio]
/// functions to create the MMIO instances. The constructor configures all PL interrupts
/// sensivities to high-level sensitivity and configures all sensitivities which are expected
/// to have a certain value. It also sets the priority mask to 0xff by calling
/// [Self::set_priority_mask] to prevent masking of the interrupts.
/// 2. Perform the configuration of the interrupt targets and the interrupt sensitivities.
/// The CPU targets are encoded with [TargetCpu] while the sensitivities are encoded by
/// the [SpiSensitivity] enum. You can use the following (helper) API to configure the
/// interrupts:
///
/// - [Self::set_spi_interrupt_cpu_target]
/// - [Self::set_all_spi_interrupt_targets_cpu0]
/// - [Self::set_pl_interrupt_sensitivity]
///
/// 3. Enable all required interrupts. The following API can be used for this:
///
/// - [Self::enable_sgi_interrupt]
/// - [Self::enable_ppi_interrupt]
/// - [Self::enable_spi_interrupt]
/// - [Self::enable_all_spi_interrupts]
/// - [Self::enable_all_ppi_interrupts]
/// - [Self::enable_all_sgi_interrupts]
/// - [Self::enable_all_interrupts]
///
/// You might also chose to enable these interrupts at run-time after the GIC was started.
/// 4. Start the GIC by calling [Self::update_ctrl_regs] with the required settings or
/// with [Self::enable] which assumes a certain configuration.
/// 5. Enable interrupts for the Cortex-AR core by calling [Self::enable_interrupts].
///
/// For the handling of the interrupts, you can use the [GicInterruptHelper] which assumes a
/// properly configured GIC.
pub struct GicConfigurator {
pub gicc: MmioGicc<'static>,
pub gicd: MmioGicd<'static>,
}
impl GicConfigurator {
/// Create a new GIC controller instance and calls [Self::initialize] to perform
/// strongly recommended initialization routines for the GIC.
#[inline]
pub fn new_with_init(gicc: MmioGicc<'static>, gicd: MmioGicd<'static>) -> Self {
let mut gic = GicConfigurator { gicc, gicd };
gic.initialize();
gic
}
/// Create a new GIC controller instance without performing any initialization routines.
///
/// # Safety
///
/// This creates the GIC without performing any of the initialization routines necessary
/// for proper operation. It also circumvents ownership checks. It is mainly intended to be
/// used inside the interrupt handler.
#[inline]
pub unsafe fn steal() -> Self {
GicConfigurator {
gicc: unsafe { Gicc::new_mmio_fixed() },
gicd: unsafe { Gicd::new_mmio_fixed() },
}
}
/// Sets up the GIC by configuring the required sensitivites for the shared peripheral
/// interrupts.
///
/// With a few exeception, the GIC expects software to set up the sensitivities
/// to fixed values. The only exceptions are the interupts coming from the programmable
/// logic. These are configured to high level sensitivity by this function.
/// If you need a different sensitivity, you need to update the bits using the
/// [Self::set_pl_interrupt_sensitivity] function.
#[inline]
pub fn initialize(&mut self) {
self.gicd.write_icfr_2_spi(ICFR_2_FIXED_VALUE);
self.gicd.write_icfr_3_spi(ICFR_3_FIXED_VALUE);
self.gicd.write_icfr_4_spi(ICFR_4_FIXED_VALUE);
self.gicd.write_icfr_5_spi(ICFR_5_FIXED_VALUE);
self.set_priority_mask(0xff);
}
/// Set the priority mask for the CPU.
///
/// Only interrupts with a higher priority than the mask will be accepted.
/// A lower numerical number means a higher priority. This means that the reset value 0x0
/// will mask all interrupts to the CPU while 0xff will unmask all interrupts.
///
/// Please note that the highest priority mask will NOT be necessarily the number of priority
/// levels: The IPRn register always sets the priority level number to the upper bits of the
/// 8-bit bitfield. See p.83 of the ARM GICv1 architecture specification.
pub fn set_priority_mask(&mut self, mask: u8) {
self.gicc
.write_pmr(PriorityRegister::new_with_raw_value(mask as u32));
}
/// Set the sensitivity of a the Programmable Logic SPI interrupts.
///
/// These are the only interrupt IDs which are configurable for SPI. They are set
/// to high-level sensitivity by default by the [Self::initialize] function. You can
/// use this method to override certain sensitivies.
#[inline]
pub fn set_pl_interrupt_sensitivity(
&mut self,
pl_int_id: usize,
sensitivity: SpiSensitivity,
) -> Result<(), InvalidPlInterruptId> {
if pl_int_id >= 16 {
return Err(InvalidPlInterruptId(pl_int_id));
}
match pl_int_id {
0..=2 => {
let pos = 26 + (pl_int_id * 2);
let mask = 0b11 << pos;
self.gicd
.modify_icfr_3_spi(|v| (v & !mask) | ((sensitivity as u32) << pos));
}
3..=7 => {
let pos = pl_int_id * 2;
let mask = 0b11 << pos;
self.gicd
.modify_icfr_4_spi(|v| (v & !mask) | ((sensitivity as u32) << pos));
}
8..=15 => {
let pos = 8 + (pl_int_id * 2);
let mask = 0b11 << pos;
self.gicd
.modify_icfr_5_spi(|v| (v & !mask) | ((sensitivity as u32) << pos));
}
_ => unreachable!(),
}
Ok(())
}
/// Set the CPU target for a SPI interrupt.
///
/// See [Self::set_all_spi_interrupt_targets_cpu0] for a utility method to handle all
/// interrupts with one core.
#[inline]
pub fn set_spi_interrupt_cpu_target(&mut self, spi_int: SpiInterrupt, target: TargetCpu) {
let spi_int_raw = spi_int as u32;
let spi_offset_to_0 = spi_int_raw as usize - 32;
// Unwrap okay, calculated index is always valid.
self.gicd
.write_iptr_spi(
spi_offset_to_0 / 4,
(target as u32) << ((spi_offset_to_0 % 4) * 8),
)
.unwrap();
}
/// Utility function to set all SGI interrupt targets to CPU0.
///
/// This is useful if only CPU0 is active in a system, or if CPU0 handles most interrupts in
/// the system.
#[inline]
pub fn set_all_spi_interrupt_targets_cpu0(&mut self) {
for i in 0..0x10 {
self.gicd
.write_iptr_spi(i, TARGETS_ALL_CPU_0_IPTR_VAL)
.unwrap();
}
}
#[inline]
pub fn enable_sgi_interrupt(&mut self, int_id: usize) -> Result<(), InvalidSpiInterruptId> {
if int_id >= 16 {
return Err(InvalidSpiInterruptId(int_id));
}
unsafe { self.gicd.write_iser_unchecked(0, 1 << int_id) };
Ok(())
}
#[inline]
pub fn enable_all_sgi_interrupts(&mut self) {
// Unwrap okay, index is valid.
self.gicd
.modify_iser(0, |mut v| {
v |= ACTIVATE_ALL_SGIS_MASK_ISER;
v
})
.unwrap();
}
#[inline]
pub fn enable_ppi_interrupt(&mut self, ppi_int: PpiInterrupt) {
// Unwrap okay, index is valid.
self.gicd
.modify_iser(0, |mut v| {
v |= 1 << (ppi_int as u32);
v
})
.unwrap();
}
#[inline]
pub fn enable_all_ppi_interrupts(&mut self) {
unsafe {
self.gicd.modify_iser_unchecked(0, |mut v| {
v |= ACTIVATE_ALL_PPIS_MASK_ISER;
v
})
};
}
#[inline]
pub fn enable_spi_interrupt(&mut self, spi_int: SpiInterrupt) {
let spi_int_raw = spi_int as u32;
match spi_int_raw {
32..=63 => {
let bit_pos = spi_int_raw - 32;
// Unwrap okay, valid index.
self.gicd.write_iser(1, 1 << bit_pos).unwrap();
}
64..=92 => {
let bit_pos = spi_int_raw - 64;
// Unwrap okay, valid index.
self.gicd.write_iser(2, 1 << bit_pos).unwrap();
}
_ => unreachable!(),
}
}
#[inline]
pub fn enable_all_spi_interrupts(&mut self) {
self.gicd.write_iser(1, 0xFFFF_FFFF).unwrap();
self.gicd.write_iser(2, 0xFFFF_FFFF).unwrap();
}
/// Enables all interrupts by calling [Self::enable_all_sgi_interrupts],
/// [Self::enable_all_ppi_interrupts] and [Self::enable_all_spi_interrupts].
pub fn enable_all_interrupts(&mut self) {
self.enable_all_sgi_interrupts();
self.enable_all_ppi_interrupts();
self.enable_all_spi_interrupts();
}
/// Enable the GIC assuming a possibly non-secure configuration.
///
/// This function will NOT configure and enable the various interrupt sources. You need to
/// set the interrupt sensitivities and targets before calling this function.
/// This function configured the control registers with the following settings:
///
/// - CPU interface: Secure and non-secure interrupts are enabled. SBPR, FIQen and AckCtrl
/// fields set to default value 0.
/// - Distributor interface: Both non-secure and secure interrupt distribution enabled.
///
/// It calls [Self::update_ctrl_regs] to update the control registers.
/// If you need custom settings, you can call [Self::update_ctrl_regs] with your required
/// settings.
///
/// This will not enable the interrupt exception for the Cortex-AR core. You might also have
/// to call [Self::enable_interrupts] for interrupts to work.
pub fn enable(&mut self) {
self.update_ctrl_regs(
Icr::builder()
.with_sbpr(false)
.with_fiq_en(false)
.with_ack_ctrl(false)
.with_enable_non_secure(true)
.with_enable_secure(true)
.build(),
Dcr::builder()
.with_enable_non_secure(true)
.with_enable_secure(true)
.build(),
);
}
/// Enable the regular interrupt exceprion for the Cortex-A core by calling the
/// [interrupt::enable] function. You also need to [Self::enable] the GIC for interrupts to
/// work.
///
/// # Safety
///
/// Do not call this in a critical section.
pub unsafe fn enable_interrupts(&self) {
unsafe {
interrupt::enable();
}
}
/// Enable the interrupts for the Cortex-A core by calling the [interrupt::enable] module.
pub fn disable_interrupts(&self) {
interrupt::disable();
}
/// Update the control registers which control the safety configuration and which also enable
/// the GIC.
pub fn update_ctrl_regs(&mut self, icr: Icr, dcr: Dcr) {
self.gicc.write_icr(icr);
self.gicd.write_dcr(dcr);
}
}
/// Helper structure which should only be used inside the interrupt handler once the GIC has
/// been configured with the [GicConfigurator].
pub struct GicInterruptHelper(MmioGicc<'static>);
impl GicInterruptHelper {
/// Create the interrupt helper with the fixed GICC MMIO instance.
pub const fn new() -> Self {
GicInterruptHelper(unsafe { Gicc::new_mmio_fixed() })
}
/// Acknowledges an interrupt by reading the IAR register and returning the interrupt context
/// information structure.
///
/// This should be called at the start of an interrupt handler.
pub fn acknowledge_interrupt(&mut self) -> InterruptInfo {
let iar = self.0.read_iar();
let int_id = iar.ack_int_id().as_u32();
let interrupt = match int_id {
0..=15 => Interrupt::Sgi(int_id as usize),
27..=31 => Interrupt::Ppi(PpiInterrupt::try_from(int_id as u8).unwrap()),
32..=92 => Interrupt::Spi(SpiInterrupt::try_from(int_id as u8).unwrap()),
SPURIOUS_INTERRUPT_ID => Interrupt::Spurious,
_ => Interrupt::Invalid(int_id as usize),
};
InterruptInfo {
interrupt,
cpu_id: iar.cpu_id().as_u8(),
raw_reg: iar,
}
}
/// Acknowledges the end of an interrupt by writing the EOIR register of the GICC.
///
/// This should be called at the end of an interrupt handler.
pub fn end_of_interrupt(&mut self, irq_info: InterruptInfo) {
self.0.write_eoir(irq_info.raw_reg())
}
}
impl Default for GicInterruptHelper {
fn default() -> Self {
Self::new()
}
}

View File

@ -0,0 +1,46 @@
//! EMIO (Extended Multiplexed I/O) resource management module.
use zynq7000::gpio::MmioGpio;
pub use crate::gpio::PinState;
pub struct EmioPin {
offset: usize,
}
impl EmioPin {
/// This offset ranges from 0 to 64.
pub fn offset(&self) -> usize {
self.offset
}
}
pub struct Pins {
emios: [Option<EmioPin>; 64],
}
impl Pins {
/// Create a new EMIO pin structure.
///
/// This structure is supposed to be used as a singleton. It will configure all
/// EMIO pins as inputs. If you want to retrieve individual pins without this structure,
/// use [EmioPin::steal] instead.
pub fn new(mut mmio: MmioGpio) -> Self {
let mut emios = [const { None }; 64];
// Configure all EMIO pins as inputs.
mmio.bank_2().write_dirm(0);
mmio.bank_3().write_dirm(0);
(0..64).for_each(|i| {
emios[i] = Some(EmioPin { offset: i });
});
Self { emios }
}
pub fn take(&mut self, offset: usize) -> Option<EmioPin> {
self.emios[offset].take()
}
pub fn give(&mut self, emio: EmioPin) {
self.emios[emio.offset].replace(emio);
}
}

345
zynq7000-hal/src/gpio/ll.rs Normal file
View File

@ -0,0 +1,345 @@
//! Low-level GPIO access module.
use embedded_hal::digital::PinState;
use zynq7000::gpio::{Gpio, MaskedOutput, MmioGpio};
use crate::slcr::Slcr;
use super::{mio::MuxConf, PinIsOutputOnly};
#[derive(Debug, Clone, Copy)]
pub enum PinOffset {
Mio(usize),
Emio(usize),
}
impl PinOffset {
/// Returs [None] if offset is larger than 53.
pub const fn new_for_mio(offset: usize) -> Option<Self> {
if offset > 53 {
return None;
}
Some(PinOffset::Mio(offset))
}
/// Returs [None] if offset is larger than 63.
pub const fn new_for_emio(offset: usize) -> Option<Self> {
if offset > 63 {
return None;
}
Some(PinOffset::Emio(offset))
}
pub fn is_mio(&self) -> bool {
match self {
PinOffset::Mio(_) => true,
PinOffset::Emio(_) => false,
}
}
}
impl PinOffset {
pub fn offset(&self) -> usize {
match self {
PinOffset::Mio(offset) => *offset,
PinOffset::Emio(offset) => *offset,
}
}
}
pub struct LowLevelGpio {
offset: PinOffset,
regs: MmioGpio<'static>,
}
impl LowLevelGpio {
pub fn new(offset: PinOffset) -> Self {
Self {
offset,
regs: unsafe { Gpio::new_mmio_fixed() },
}
}
pub fn offset(&self) -> PinOffset {
self.offset
}
/// Convert the pin into an output pin.
pub fn configure_as_output_push_pull(&mut self, init_level: PinState) {
let (offset, dirm, outen) = self.get_dirm_outen_regs_and_local_offset();
if self.offset.is_mio() {
// Tri-state bit must be 0 for the output driver to work.
self.reconfigure_slcr_mio_cfg(false, None, Some(MuxConf::new_for_gpio()));
}
let mut curr_dirm = unsafe { core::ptr::read_volatile(dirm) };
curr_dirm |= 1 << offset;
unsafe { core::ptr::write_volatile(dirm, curr_dirm) };
let mut curr_outen = unsafe { core::ptr::read_volatile(outen) };
curr_outen |= 1 << offset;
unsafe { core::ptr::write_volatile(outen, curr_outen) };
// Unwrap okay, just set mode.
self.write_state(init_level);
}
/// Convert the pin into an output pin with open drain emulation.
///
/// This works by only enabling the output driver when the pin is driven low and letting
/// the pin float when it is driven high. A pin pull-up is used for MIO pins as well which
/// pulls the pin to a defined state if it is not driven. This allows something like 1-wire bus
/// operation because other devices can pull the pin low as well.
///
/// For EMIO pins, the pull-up and the IO buffer necessary for open-drain usage must be
/// provided by the FPGA design.
pub fn configure_as_output_open_drain(
&mut self,
init_level: PinState,
with_internal_pullup: bool,
) {
let (offset, dirm, outen) = self.get_dirm_outen_regs_and_local_offset();
if self.offset.is_mio() {
// Tri-state bit must be 0 for the output driver to work. Enable the pullup pin.
self.reconfigure_slcr_mio_cfg(
false,
Some(with_internal_pullup),
Some(MuxConf::new_for_gpio()),
);
}
let mut curr_dirm = unsafe { core::ptr::read_volatile(dirm) };
curr_dirm |= 1 << offset;
unsafe { core::ptr::write_volatile(dirm, curr_dirm) };
// Disable the output driver depending on initial level.
let mut curr_outen = unsafe { core::ptr::read_volatile(outen) };
if init_level == PinState::High {
curr_outen &= !(1 << offset);
} else {
curr_outen |= 1 << offset;
self.write_state(init_level);
}
unsafe { core::ptr::write_volatile(outen, curr_outen) };
}
/// Convert the pin into a floating input pin.
pub fn configure_as_input_floating(&mut self) -> Result<(), PinIsOutputOnly> {
if self.offset.is_mio() {
let offset_raw = self.offset.offset();
if offset_raw == 7 || offset_raw == 8 {
return Err(PinIsOutputOnly);
}
self.reconfigure_slcr_mio_cfg(true, Some(false), Some(MuxConf::new_for_gpio()));
}
self.configure_input_pin();
Ok(())
}
/// Convert the pin into an input pin with a pull up.
pub fn configure_as_input_with_pull_up(&mut self) -> Result<(), PinIsOutputOnly> {
if self.offset.is_mio() {
let offset_raw = self.offset.offset();
if offset_raw == 7 || offset_raw == 8 {
return Err(PinIsOutputOnly);
}
self.reconfigure_slcr_mio_cfg(true, Some(true), Some(MuxConf::new_for_gpio()));
}
self.configure_input_pin();
Ok(())
}
/// Convert the pin into an IO peripheral pin.
pub fn configure_as_io_periph_pin(&mut self, mux_conf: MuxConf, pullup: Option<bool>) {
self.reconfigure_slcr_mio_cfg(false, pullup, Some(mux_conf));
}
#[inline]
pub fn is_low(&self) -> bool {
let (offset, in_reg) = self.get_data_in_reg_and_local_offset();
let in_val = unsafe { core::ptr::read_volatile(in_reg) };
((in_val >> offset) & 0b1) == 0
}
#[inline]
pub fn is_high(&self) -> bool {
!self.is_low()
}
#[inline]
pub fn is_set_low(&self) -> bool {
let (offset, out_reg) = self.get_data_out_reg_and_local_offset();
let out_val = unsafe { core::ptr::read_volatile(out_reg) };
((out_val >> offset) & 0b1) == 0
}
#[inline]
pub fn is_set_high(&self) -> bool {
!self.is_set_low()
}
#[inline]
pub fn enable_output_driver(&mut self) {
let (offset, _dirm, outen) = self.get_dirm_outen_regs_and_local_offset();
let mut outen_reg = unsafe { core::ptr::read_volatile(outen) };
outen_reg |= 1 << offset;
unsafe { core::ptr::write_volatile(outen, outen_reg) };
}
#[inline]
pub fn disable_output_driver(&mut self) {
let (offset, _dirm, outen) = self.get_dirm_outen_regs_and_local_offset();
let mut outen_reg = unsafe { core::ptr::read_volatile(outen) };
outen_reg &= !(1 << offset);
unsafe { core::ptr::write_volatile(outen, outen_reg) };
}
#[inline]
pub fn set_low(&mut self) {
self.write_state(PinState::Low)
}
#[inline]
pub fn set_high(&mut self) {
self.write_state(PinState::High)
}
#[inline]
fn write_state(&mut self, level: PinState) {
let (offset_in_reg, masked_out_ptr) = self.get_masked_out_reg_and_local_offset();
unsafe {
core::ptr::write_volatile(
masked_out_ptr,
MaskedOutput::builder()
.with_mask(!(1 << offset_in_reg))
.with_output((level as u16) << offset_in_reg)
.build(),
);
}
}
fn reconfigure_slcr_mio_cfg(
&mut self,
tristate: bool,
pullup: Option<bool>,
mux_conf: Option<MuxConf>,
) {
let raw_offset = self.offset.offset();
// Safety: We only modify the MIO config of the pin.
let mut slcr_wrapper = unsafe { Slcr::steal() };
// We read first, because writing also required unlocking the SLCR.
// This allows the user to configure the SLCR themselves to avoid unnecessary
// re-configuration which might also be potentially unsafe at run-time.
let mio_cfg = slcr_wrapper.regs().read_mio_pins(raw_offset).unwrap();
if (pullup.is_some() && mio_cfg.pullup() != pullup.unwrap())
|| (mux_conf.is_some() && MuxConf::from(mio_cfg) != mux_conf.unwrap())
|| tristate != mio_cfg.tri_enable()
{
slcr_wrapper.modify(|mut_slcr| {
mut_slcr
.modify_mio_pins(raw_offset, |mut val| {
if let Some(pullup) = pullup {
val.set_pullup(pullup);
}
if let Some(mux_conf) = mux_conf {
val.set_l0_sel(mux_conf.l0_sel());
val.set_l1_sel(mux_conf.l1_sel());
val.set_l2_sel(mux_conf.l2_sel());
val.set_l3_sel(mux_conf.l3_sel());
}
val.set_tri_enable(tristate);
val
})
.unwrap();
});
}
}
fn configure_input_pin(&mut self) {
let (offset, dirm, outen) = self.get_dirm_outen_regs_and_local_offset();
let mut curr_dirm = unsafe { core::ptr::read_volatile(dirm) };
curr_dirm &= !(1 << offset);
unsafe { core::ptr::write_volatile(dirm, curr_dirm) };
let mut curr_outen = unsafe { core::ptr::read_volatile(outen) };
curr_outen &= !(1 << offset);
unsafe { core::ptr::write_volatile(outen, curr_outen) };
}
#[inline(always)]
fn get_data_in_reg_and_local_offset(&self) -> (usize, *mut u32) {
match self.offset {
PinOffset::Mio(offset) => match offset {
0..=31 => (offset, self.regs.pointer_to_in_0()),
32..=53 => (offset - 32, self.regs.pointer_to_in_1()),
_ => panic!("invalid MIO pin offset"),
},
PinOffset::Emio(offset) => match offset {
0..=31 => (offset, self.regs.pointer_to_in_2()),
32..=63 => (offset - 32, self.regs.pointer_to_in_3()),
_ => panic!("invalid EMIO pin offset"),
},
}
}
#[inline(always)]
fn get_data_out_reg_and_local_offset(&self) -> (usize, *mut u32) {
match self.offset {
PinOffset::Mio(offset) => match offset {
0..=31 => (offset, self.regs.pointer_to_out_0()),
32..=53 => (offset - 32, self.regs.pointer_to_out_1()),
_ => panic!("invalid MIO pin offset"),
},
PinOffset::Emio(offset) => match offset {
0..=31 => (offset, self.regs.pointer_to_out_2()),
32..=63 => (offset - 32, self.regs.pointer_to_out_3()),
_ => panic!("invalid EMIO pin offset"),
},
}
}
#[inline(always)]
fn get_dirm_outen_regs_and_local_offset(&self) -> (usize, *mut u32, *mut u32) {
match self.offset {
PinOffset::Mio(offset) => match offset {
0..=31 => (
offset,
self.regs.bank_0_shared().pointer_to_dirm(),
self.regs.bank_0_shared().pointer_to_out_en(),
),
32..=53 => (
offset - 32,
self.regs.bank_1_shared().pointer_to_dirm(),
self.regs.bank_1_shared().pointer_to_out_en(),
),
_ => panic!("invalid MIO pin offset"),
},
PinOffset::Emio(offset) => match offset {
0..=31 => (
offset,
self.regs.bank_2_shared().pointer_to_dirm(),
self.regs.bank_2_shared().pointer_to_out_en(),
),
32..=63 => (
offset - 32,
self.regs.bank_3_shared().pointer_to_dirm(),
self.regs.bank_3_shared().pointer_to_out_en(),
),
_ => panic!("invalid EMIO pin offset"),
},
}
}
#[inline(always)]
fn get_masked_out_reg_and_local_offset(&mut self) -> (usize, *mut MaskedOutput) {
match self.offset {
PinOffset::Mio(offset) => match offset {
0..=15 => (offset, self.regs.pointer_to_masked_out_0_lsw()),
16..=31 => (offset - 16, self.regs.pointer_to_masked_out_0_msw()),
32..=47 => (offset - 32, self.regs.pointer_to_masked_out_1_lsw()),
48..=53 => (offset - 48, self.regs.pointer_to_masked_out_1_msw()),
_ => panic!("invalid MIO pin offset"),
},
PinOffset::Emio(offset) => match offset {
0..=15 => (offset, self.regs.pointer_to_masked_out_2_lsw()),
16..=31 => (offset - 16, self.regs.pointer_to_masked_out_2_msw()),
32..=47 => (offset - 32, self.regs.pointer_to_masked_out_3_lsw()),
48..=63 => (offset - 48, self.regs.pointer_to_masked_out_3_msw()),
_ => panic!("invalid EMIO pin offset"),
},
}
}
}

View File

@ -0,0 +1,364 @@
//! Multiplexed I/O (MIO) module.
//!
//! This module provides a [singleton][Pins] for the resource management of all MIO pins. This
//! also allows associating the pins, their modes and their IDs to the peripherals they are able to
//! serve.
use arbitrary_int::{u2, u3};
use zynq7000::gpio::MmioGpio;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct MuxConf {
l3: u3,
l2: u2,
l1: bool,
l0: bool,
}
impl From<zynq7000::slcr::mio::Config> for MuxConf {
fn from(value: zynq7000::slcr::mio::Config) -> Self {
Self::new(
value.l0_sel(),
value.l1_sel(),
value.l2_sel(),
value.l3_sel(),
)
}
}
impl MuxConf {
#[inline]
pub const fn new(l0: bool, l1: bool, l2: u2, l3: u3) -> Self {
Self { l3, l2, l1, l0 }
}
pub const fn new_with_l3(l3: u3) -> Self {
Self::new(false, false, u2::new(0b00), l3)
}
pub const fn new_for_gpio() -> Self {
Self::new(false, false, u2::new(0), u3::new(0))
}
#[inline]
pub const fn l0_sel(&self) -> bool {
self.l0
}
#[inline]
pub const fn l1_sel(&self) -> bool {
self.l1
}
#[inline]
pub const fn l2_sel(&self) -> u2 {
self.l2
}
#[inline]
pub const fn l3_sel(&self) -> u3 {
self.l3
}
}
pub trait PinId {
const OFFSET: usize;
}
macro_rules! pin_id {
($Id:ident, $num:literal) => {
// Need paste macro to use ident in doc attribute
paste::paste! {
#[doc = "Pin ID representing pin " $Id]
#[derive(Debug)]
pub enum $Id {}
impl $crate::sealed::Sealed for $Id {}
impl PinId for $Id {
const OFFSET: usize = $num;
}
}
};
}
pin_id!(Mio0, 0);
pin_id!(Mio1, 1);
pin_id!(Mio2, 2);
pin_id!(Mio3, 3);
pin_id!(Mio4, 4);
pin_id!(Mio5, 5);
pin_id!(Mio6, 6);
pin_id!(Mio7, 7);
pin_id!(Mio8, 8);
pin_id!(Mio9, 9);
pin_id!(Mio10, 10);
pin_id!(Mio11, 11);
pin_id!(Mio12, 12);
pin_id!(Mio13, 13);
pin_id!(Mio14, 14);
pin_id!(Mio15, 15);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio16, 16);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio17, 17);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio18, 18);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio19, 19);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio20, 20);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio21, 21);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio22, 22);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio23, 23);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio24, 24);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio25, 25);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio26, 26);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio27, 27);
pin_id!(Mio28, 28);
pin_id!(Mio29, 29);
pin_id!(Mio30, 30);
pin_id!(Mio31, 31);
pin_id!(Mio32, 32);
pin_id!(Mio33, 33);
pin_id!(Mio34, 34);
pin_id!(Mio35, 35);
pin_id!(Mio36, 36);
pin_id!(Mio37, 37);
pin_id!(Mio38, 38);
pin_id!(Mio39, 39);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio40, 40);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio41, 41);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio42, 42);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio43, 43);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio44, 44);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio45, 45);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio46, 46);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio47, 47);
pin_id!(Mio48, 48);
pin_id!(Mio49, 49);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio50, 50);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pin_id!(Mio51, 51);
pin_id!(Mio52, 52);
pin_id!(Mio53, 53);
pub trait MioPinMarker {
fn offset(&self) -> usize;
}
pub struct Pin<I: PinId> {
phantom: core::marker::PhantomData<I>,
}
impl<I: PinId> Pin<I> {
#[inline]
const unsafe fn new() -> Self {
Self {
//pin: LowLevelPin::new(I::OFFSET),
phantom: core::marker::PhantomData,
}
}
/// Steal a typed MIO pin.
///
/// Usually, you can just use the MIO pin members of the [MioPins] structure.
/// However, if you pass the pins into a consuming peripheral driver which performs
/// immediate type erasure, and you require the pins for/after a re-configuration
/// of the system, you can unsafely steal the pin. This function will NOT perform any
/// re-configuration.
///
/// # Safety
///
/// This allows to create multiple instances of the same pin, which can lead to
/// data races on concurrent access.
#[inline]
pub const unsafe fn steal() -> Self {
unsafe { Self::new() }
}
}
pub struct Pins {
pub mio0: Pin<Mio0>,
pub mio1: Pin<Mio1>,
pub mio2: Pin<Mio2>,
pub mio3: Pin<Mio3>,
pub mio4: Pin<Mio4>,
pub mio5: Pin<Mio5>,
pub mio6: Pin<Mio6>,
pub mio7: Pin<Mio7>,
pub mio8: Pin<Mio8>,
pub mio9: Pin<Mio9>,
pub mio10: Pin<Mio10>,
pub mio11: Pin<Mio11>,
pub mio12: Pin<Mio12>,
pub mio13: Pin<Mio13>,
pub mio14: Pin<Mio14>,
pub mio15: Pin<Mio15>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio16: Pin<Mio16>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio17: Pin<Mio17>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio18: Pin<Mio18>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio19: Pin<Mio19>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio20: Pin<Mio20>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio21: Pin<Mio21>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio22: Pin<Mio22>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio23: Pin<Mio23>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio24: Pin<Mio24>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio25: Pin<Mio25>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio26: Pin<Mio26>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio27: Pin<Mio27>,
pub mio28: Pin<Mio28>,
pub mio29: Pin<Mio29>,
pub mio30: Pin<Mio30>,
pub mio31: Pin<Mio31>,
pub mio32: Pin<Mio32>,
pub mio33: Pin<Mio33>,
pub mio34: Pin<Mio34>,
pub mio35: Pin<Mio35>,
pub mio36: Pin<Mio36>,
pub mio37: Pin<Mio37>,
pub mio38: Pin<Mio38>,
pub mio39: Pin<Mio39>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio40: Pin<Mio40>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio41: Pin<Mio41>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio42: Pin<Mio42>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio43: Pin<Mio43>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio44: Pin<Mio44>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio45: Pin<Mio45>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio46: Pin<Mio46>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio47: Pin<Mio47>,
pub mio48: Pin<Mio48>,
pub mio49: Pin<Mio49>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio50: Pin<Mio50>,
#[cfg(not(feature = "7z010-7z007s-clg225"))]
pub mio51: Pin<Mio51>,
pub mio52: Pin<Mio52>,
pub mio53: Pin<Mio53>,
}
impl Pins {
pub const fn new(_mmio: MmioGpio) -> Self {
Self {
mio0: unsafe { Pin::new() },
mio1: unsafe { Pin::new() },
mio2: unsafe { Pin::new() },
mio3: unsafe { Pin::new() },
mio4: unsafe { Pin::new() },
mio5: unsafe { Pin::new() },
mio6: unsafe { Pin::new() },
mio7: unsafe { Pin::new() },
mio8: unsafe { Pin::new() },
mio9: unsafe { Pin::new() },
mio10: unsafe { Pin::new() },
mio11: unsafe { Pin::new() },
mio12: unsafe { Pin::new() },
mio13: unsafe { Pin::new() },
mio14: unsafe { Pin::new() },
mio15: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio16: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio17: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio18: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio19: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio20: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio21: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio22: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio23: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio24: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio25: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio26: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio27: unsafe { Pin::new() },
mio28: unsafe { Pin::new() },
mio29: unsafe { Pin::new() },
mio30: unsafe { Pin::new() },
mio31: unsafe { Pin::new() },
mio32: unsafe { Pin::new() },
mio33: unsafe { Pin::new() },
mio34: unsafe { Pin::new() },
mio35: unsafe { Pin::new() },
mio36: unsafe { Pin::new() },
mio37: unsafe { Pin::new() },
mio38: unsafe { Pin::new() },
mio39: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio40: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio41: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio42: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio43: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio44: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio45: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio46: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio47: unsafe { Pin::new() },
mio48: unsafe { Pin::new() },
mio49: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio50: unsafe { Pin::new() },
#[cfg(not(feature = "7z010-7z007s-clg225"))]
mio51: unsafe { Pin::new() },
mio52: unsafe { Pin::new() },
mio53: unsafe { Pin::new() },
}
}
}
impl<I: PinId> MioPinMarker for Pin<I> {
fn offset(&self) -> usize {
I::OFFSET
}
}

View File

@ -0,0 +1,406 @@
//! GPIO support module for the Zynq7000 SoC.
//!
//! This module contains a MIO and EMIO pin resource managements singleton as well as abstractions
//! to use these pins as GPIOs.
//!
//! # Examples
//!
//! - [Blinky](https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs/src/branch/main/examples/simple/src/main.rs)
//! - [Logger example](https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs/src/branch/main/examples/simple/src/bin/logger.rs)
//! which uses MIO pins for the UART.
pub mod emio;
pub mod ll;
pub mod mio;
use core::convert::Infallible;
use ll::PinOffset;
use mio::{MioPinMarker, MuxConf};
use crate::gpio::ll::LowLevelGpio;
use crate::{enable_amba_peripheral_clock, slcr::Slcr};
pub use embedded_hal::digital::PinState;
use zynq7000::{gpio::MmioGpio, slcr::reset::GpioClockReset};
#[derive(Debug, thiserror::Error)]
#[error("MIO pins 7 and 8 can only be output pins")]
pub struct PinIsOutputOnly;
/// GPIO pin singleton to allow resource management of both MIO and EMIO pins.
pub struct GpioPins {
pub mio: mio::Pins,
pub emio: emio::Pins,
}
impl GpioPins {
pub fn new(gpio: MmioGpio) -> Self {
enable_amba_peripheral_clock(crate::PeripheralSelect::Gpio);
Self {
mio: mio::Pins::new(unsafe { gpio.clone() }),
emio: emio::Pins::new(gpio),
}
}
}
/// Reset the GPIO peripheral using the SLCR reset register for GPIO.
#[inline]
pub fn reset() {
unsafe {
Slcr::with(|regs| {
regs.reset_ctrl()
.write_gpio(GpioClockReset::builder().with_gpio_cpu1x_rst(true).build());
// Keep it in reset for one cycle.. not sure if this is necessary.
cortex_ar::asm::nop();
regs.reset_ctrl()
.write_gpio(GpioClockReset::builder().with_gpio_cpu1x_rst(false).build());
});
}
}
/// Enumeration of all pin modes. Some of the modes are only valid for MIO pins.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum PinMode {
OutputPushPull,
/// See [super::gpio] documentation for more information on running an output pin in
/// open-drain configuration.
OutputOpenDrain,
InputFloating,
InputPullUp,
/// MIO-only peripheral pin configuration
MioIoPeriph(MuxConf),
}
#[derive(Debug, thiserror::Error)]
#[error("invalid pin mode for MIO pin: {0:?}")]
pub struct InvalidPinMode(pub PinMode);
impl embedded_hal::digital::Error for InvalidPinMode {
fn kind(&self) -> embedded_hal::digital::ErrorKind {
embedded_hal::digital::ErrorKind::Other
}
}
pub trait IoPinProvider {
fn mode(&self) -> PinMode;
fn offset(&self) -> PinOffset;
#[inline]
fn is_input(&self) -> bool {
matches!(self.mode(), PinMode::InputFloating | PinMode::InputPullUp)
}
#[inline]
fn is_output(&self) -> bool {
matches!(
self.mode(),
PinMode::OutputPushPull | PinMode::OutputOpenDrain
)
}
#[inline]
fn is_io_periph(&self) -> bool {
matches!(self.mode(), PinMode::MioIoPeriph(_))
}
}
/// Flex pin abstraction which can be dynamically re-configured.
///
/// The following functions can be configured at run-time:
///
/// - Input Floating
/// - Input with Pull-Up
/// - Output Push-Pull
/// - Output Open-Drain.
///
/// Flex pins are always floating input pins after construction except for MIO7 and MIO8,
/// which are Push-Pull Output pins with initial low-level.
///
/// ## Notes on [PinMode::OutputOpenDrain] configuration
///
/// For MIO, the open-drain functionality is simulated by only enabling the output driver
/// when driving the pin low, and leaving the pin floating when the pin is driven high.
/// The internal pull-up will also be enabled to have a high state if the pin is not driven.
///
/// For EMIO, the pull-up and the IO buffer needs to be provided in the FPGA design for the
/// used EMIO pins because the EMIO pins are just wires going out to the FPGA design.
/// The software will still perform the necessary logic when driving the pin low or high.
///
/// ## Notes on [PinMode::InputPullUp] configuration
///
/// For EMIO, the pull-up wiring needs to be provided by the FPGA design.
pub struct Flex {
ll: LowLevelGpio,
mode: PinMode,
}
impl Flex {
pub fn new_for_mio<I: mio::PinId>(_pin: mio::Pin<I>) -> Self {
let mut ll = LowLevelGpio::new(PinOffset::Mio(I::OFFSET));
if I::OFFSET == 7 || I::OFFSET == 8 {
ll.configure_as_output_push_pull(PinState::Low);
} else {
ll.configure_as_input_floating().unwrap();
}
Self {
ll,
mode: PinMode::InputFloating,
}
}
pub fn new_for_emio(pin: emio::EmioPin) -> Self {
let mut ll = LowLevelGpio::new(PinOffset::new_for_emio(pin.offset()).unwrap());
ll.configure_as_input_floating().unwrap();
Self {
ll,
mode: PinMode::InputFloating,
}
}
pub fn configure_as_input_floating(&mut self) -> Result<(), PinIsOutputOnly> {
self.mode = PinMode::InputFloating;
self.ll.configure_as_input_floating()
}
pub fn configure_as_input_with_pull_up(&mut self) -> Result<(), PinIsOutputOnly> {
self.mode = PinMode::InputPullUp;
self.ll.configure_as_input_with_pull_up()
}
pub fn configure_as_output_push_pull(&mut self, level: PinState) {
self.mode = PinMode::OutputPushPull;
self.ll.configure_as_output_push_pull(level);
}
pub fn configure_as_output_open_drain(&mut self, level: PinState, with_internal_pullup: bool) {
self.mode = PinMode::OutputOpenDrain;
self.ll.configure_as_output_open_drain(level, with_internal_pullup);
}
/// If the pin is configured as an input pin, this function does nothing.
pub fn set_high(&mut self) {
if self.is_input() {
return;
}
if self.mode == PinMode::OutputOpenDrain {
self.ll.disable_output_driver();
} else {
self.ll.set_high();
}
}
/// If the pin is configured as an input pin, this function does nothing.
pub fn set_low(&mut self) {
if self.is_input() {
return;
}
self.ll.set_low();
if self.mode == PinMode::OutputOpenDrain {
self.ll.enable_output_driver();
}
}
/// Reads the input state of the pin, regardless of configured mode.
#[inline]
pub fn is_high(&self) -> bool {
self.ll.is_high()
}
/// Reads the input state of the pin, regardless of configured mode.
#[inline]
pub fn is_low(&self) -> bool {
!self.ll.is_high()
}
/// If the pin is not configured as a stateful output pin like Output Push-Pull, the result
/// of this function is undefined.
#[inline]
pub fn is_set_low(&self) -> bool {
self.ll.is_set_low()
}
/// If the pin is not configured as a stateful output pin like Output Push-Pull, the result
/// of this function is undefined.
#[inline]
pub fn is_set_high(&self) -> bool {
!self.is_set_low()
}
}
impl IoPinProvider for Flex {
fn mode(&self) -> PinMode {
self.mode
}
fn offset(&self) -> PinOffset {
self.ll.offset()
}
}
impl embedded_hal::digital::ErrorType for Flex {
type Error = Infallible;
}
impl embedded_hal::digital::InputPin for Flex {
/// Reads the input state of the pin, regardless of configured mode.
#[inline]
fn is_high(&mut self) -> Result<bool, Self::Error> {
Ok(self.ll.is_high())
}
/// Reads the input state of the pin, regardless of configured mode.
#[inline]
fn is_low(&mut self) -> Result<bool, Self::Error> {
Ok(self.ll.is_low())
}
}
impl embedded_hal::digital::OutputPin for Flex {
/// If the pin is configured as an input pin, this function does nothing.
#[inline]
fn set_low(&mut self) -> Result<(), Self::Error> {
self.set_low();
Ok(())
}
/// If the pin is configured as an input pin, this function does nothing.
#[inline]
fn set_high(&mut self) -> Result<(), Self::Error> {
self.set_high();
Ok(())
}
}
impl embedded_hal::digital::StatefulOutputPin for Flex {
/// If the pin is not configured as a stateful output pin like Output Push-Pull, the result
/// of this function is undefined.
#[inline]
fn is_set_high(&mut self) -> Result<bool, Self::Error> {
Ok(self.ll.is_set_high())
}
/// If the pin is not configured as a stateful output pin like Output Push-Pull, the result
/// of this function is undefined.
#[inline]
fn is_set_low(&mut self) -> Result<bool, Self::Error> {
Ok(self.ll.is_set_low())
}
}
/// Push-Pull output pin.
pub struct Output(LowLevelGpio);
impl Output {
pub fn new_for_mio<I: mio::PinId>(_pin: mio::Pin<I>, init_level: PinState) -> Self {
let mut low_level = LowLevelGpio::new(PinOffset::Mio(I::OFFSET));
low_level.configure_as_output_push_pull(init_level);
Self(low_level)
}
pub fn new_for_emio(pin: emio::EmioPin, init_level: PinState) -> Self {
let mut low_level = LowLevelGpio::new(PinOffset::new_for_emio(pin.offset()).unwrap());
low_level.configure_as_output_push_pull(init_level);
Self(low_level)
}
#[inline]
pub fn set_low(&mut self) {
self.0.set_low();
}
#[inline]
pub fn set_high(&mut self) {
self.0.set_high();
}
}
impl embedded_hal::digital::ErrorType for Output {
type Error = Infallible;
}
impl embedded_hal::digital::OutputPin for Output {
fn set_low(&mut self) -> Result<(), Self::Error> {
self.0.set_low();
Ok(())
}
fn set_high(&mut self) -> Result<(), Self::Error> {
self.0.set_high();
Ok(())
}
}
impl embedded_hal::digital::StatefulOutputPin for Output {
fn is_set_high(&mut self) -> Result<bool, Self::Error> {
Ok(self.0.is_set_high())
}
fn is_set_low(&mut self) -> Result<bool, Self::Error> {
Ok(self.0.is_set_low())
}
}
/// Input pin.
pub struct Input(LowLevelGpio);
impl Input {
pub fn new_for_mio<I: mio::PinId>(_pin: mio::Pin<I>) -> Result<Self, PinIsOutputOnly> {
let mut low_level = LowLevelGpio::new(PinOffset::Mio(I::OFFSET));
low_level.configure_as_input_floating()?;
Ok(Self(low_level))
}
pub fn new_for_emio(pin: emio::EmioPin) -> Result<Self, PinIsOutputOnly> {
let mut low_level = LowLevelGpio::new(PinOffset::new_for_emio(pin.offset()).unwrap());
low_level.configure_as_input_floating()?;
Ok(Self(low_level))
}
pub fn is_high(&self) -> bool {
self.0.is_high()
}
pub fn is_low(&self) -> bool {
self.0.is_low()
}
}
impl embedded_hal::digital::ErrorType for Input {
type Error = Infallible;
}
impl embedded_hal::digital::InputPin for Input {
fn is_high(&mut self) -> Result<bool, Self::Error> {
Ok(self.0.is_high())
}
fn is_low(&mut self) -> Result<bool, Self::Error> {
Ok(self.0.is_low())
}
}
/// IO peripheral pin.
pub struct IoPeriphPin {
pin: LowLevelGpio,
mux_conf: MuxConf,
}
impl IoPeriphPin {
pub fn new(pin: impl MioPinMarker, mux_conf: MuxConf, pullup: Option<bool>) -> Self {
let mut low_level = LowLevelGpio::new(PinOffset::Mio(pin.offset()));
low_level.configure_as_io_periph_pin(mux_conf, pullup);
Self {
pin: low_level,
mux_conf,
}
}
}
impl IoPinProvider for IoPeriphPin {
#[inline]
fn mode(&self) -> PinMode {
PinMode::MioIoPeriph(self.mux_conf)
}
#[inline]
fn offset(&self) -> PinOffset {
self.pin.offset()
}
}

169
zynq7000-hal/src/gtc.rs Normal file
View File

@ -0,0 +1,169 @@
//! Global timer counter driver module.
//!
//! # Examples
//!
//! - [GTC ticks example](https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs/src/branch/main/examples/simple/src/bin/gtc-ticks.rs)
//! - [Embassy Timer Driver](https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs/src/branch/main/zynq7000-embassy/src/lib.rs)
use zynq7000::gtc::MmioGtc;
use crate::{clocks::ArmClocks, time::Hertz};
/// High level GTC driver.
///
/// This structure also allows an optional clock member, which is required for the
/// [frequency_to_ticks] function and the [embedded_hal::delay::DelayNs] implementation
/// to work.
pub struct Gtc {
regs: MmioGtc<'static>,
cpu_3x2x_clock: Option<Hertz>,
}
unsafe impl Send for Gtc {}
pub const fn frequency_to_ticks(clock: Hertz, frequency: Hertz) -> u32 {
clock.raw().div_ceil(frequency.raw())
}
impl Gtc {
/// Create a peripheral driver from a MMIO GTC block.
#[inline]
pub const fn new(_regs: MmioGtc<'static>, clocks: &ArmClocks) -> Self {
unsafe { Self::steal_fixed(Some(clocks.cpu_3x2x_clk())) }
}
/// Steal the GTC from the PAC.
///
/// This function still expect the GTC clock, which is the CPU 3x2x clock frequency.
///
/// # Safety
///
/// This function allows creating an arbitrary amount of memory-mapped peripheral drivers.
/// See the [zynq7000::gtc::Gtc::new_mmio] docs for more safety information.
#[inline]
pub const unsafe fn steal_fixed(cpu_3x2x_clk: Option<Hertz>) -> Self {
Self {
regs: unsafe { zynq7000::gtc::Gtc::new_mmio_fixed() },
cpu_3x2x_clock: cpu_3x2x_clk,
}
}
#[inline]
pub fn set_cpu_3x2x_clock(&mut self, clock: Hertz) {
self.cpu_3x2x_clock = Some(clock);
}
// TODO: Change this API once pure-reads work.
/// Read the 64-bit timer.
#[inline]
pub fn read_timer(&self) -> u64 {
// Safety: We require interior mutability here because even reads are unsafe.
// But we want to avoid a RefCell which would incur a run-time cost solely to make this
// function non-mut, so we steal the GTC here. Ownership is guaranteed or mandated
// by constructor.
let upper = self.regs.read_count_upper();
loop {
let lower = self.regs.read_count_lower();
if self.regs.read_count_upper() == upper {
return ((upper as u64) << 32) | (lower as u64);
}
// Overflow, read upper again.
}
}
/// Set the comparator which can be used to trigger an interrupt in the future.
#[inline]
pub fn set_comparator(&mut self, comparator: u64) {
self.regs.modify_ctrl(|mut ctrl| {
ctrl.set_comparator_enable(false);
ctrl
});
self.regs.write_comparator_upper((comparator >> 32) as u32);
self.regs.write_comparator_lower(comparator as u32);
self.regs.modify_ctrl(|mut ctrl| {
ctrl.set_comparator_enable(true);
ctrl
});
}
pub fn frequency_to_ticks(&self, frequency: Hertz) -> u32 {
if self.cpu_3x2x_clock.is_none() {
return 0;
}
frequency_to_ticks(self.cpu_3x2x_clock.unwrap(), frequency)
}
/// Set the auto-increment value which will be used by the hardware to automatically
/// increment the comparator value on a comparator interrupt, if the auto-increment is enabled.
#[inline]
pub fn set_auto_increment_value(&mut self, value: u32) {
self.regs.write_auto_increment(value);
}
#[inline]
pub fn set_auto_increment_value_for_frequency(&mut self, frequency: Hertz) {
self.regs
.write_auto_increment(self.frequency_to_ticks(frequency));
}
#[inline]
pub fn enable(&mut self) {
self.regs.modify_ctrl(|mut ctrl| {
ctrl.set_enable(true);
ctrl
});
}
#[inline]
pub fn enable_auto_increment(&mut self) {
self.regs.modify_ctrl(|mut ctrl| {
ctrl.set_auto_increment(true);
ctrl
});
}
#[inline]
pub fn set_prescaler(&mut self, prescaler: u8) {
self.regs.modify_ctrl(|mut ctrl| {
ctrl.set_prescaler(prescaler);
ctrl
});
}
#[inline]
pub fn disable(&mut self) {
self.regs.modify_ctrl(|mut ctrl| {
ctrl.set_enable(false);
ctrl
});
}
/// Enable the comparator interrupt.
#[inline]
pub fn enable_interrupt(&mut self) {
self.regs.modify_ctrl(|mut ctrl| {
ctrl.set_irq_enable(true);
ctrl
});
}
/// Disable the comparator interrupt.
#[inline]
pub fn disable_interrupt(&mut self) {
self.regs.modify_ctrl(|mut ctrl| {
ctrl.set_irq_enable(false);
ctrl
});
}
}
/// GTC can be used for blocking delays.
impl embedded_hal::delay::DelayNs for Gtc {
fn delay_ns(&mut self, ns: u32) {
if self.cpu_3x2x_clock.is_none() {
return;
}
let end_of_delay = self.read_timer()
+ (((ns as u64) * self.cpu_3x2x_clock.unwrap().raw() as u64) / 1_000_000_000);
while self.read_timer() < end_of_delay {}
}
}

711
zynq7000-hal/src/i2c.rs Normal file
View File

@ -0,0 +1,711 @@
use arbitrary_int::{u2, u3, u6};
use embedded_hal::i2c::NoAcknowledgeSource;
use zynq7000::{
i2c::{Control, I2C_0_BASE_ADDR, I2C_1_BASE_ADDR, InterruptStatus, MmioI2c, TransferSize},
slcr::reset::DualClockReset,
};
#[cfg(not(feature = "7z010-7z007s-clg225"))]
use crate::gpio::mio::{
Mio16, Mio17, Mio18, Mio19, Mio20, Mio21, Mio22, Mio23, Mio24, Mio25, Mio26, Mio27, Mio40,
Mio41, Mio42, Mio43, Mio44, Mio45, Mio46, Mio47, Mio50, Mio51,
};
use crate::{
enable_amba_peripheral_clock,
gpio::{
IoPeriphPin,
mio::{
Mio10, Mio11, Mio12, Mio13, Mio14, Mio15, Mio28, Mio29, Mio30, Mio31, Mio32, Mio33,
Mio34, Mio35, Mio36, Mio37, Mio38, Mio39, Mio48, Mio49, Mio52, Mio53, MioPinMarker,
MuxConf, Pin,
},
},
slcr::Slcr,
time::Hertz,
};
pub const I2C_MUX_CONF: MuxConf = MuxConf::new_with_l3(u3::new(0b010));
pub const FIFO_DEPTH: usize = 16;
/// Maximum read size in one read operation.
pub const MAX_READ_SIZE: usize = 255;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum I2cId {
I2c0 = 0,
I2c1 = 1,
}
pub trait PsI2c {
fn reg_block(&self) -> MmioI2c<'static>;
fn id(&self) -> Option<I2cId>;
}
impl PsI2c for MmioI2c<'static> {
#[inline]
fn reg_block(&self) -> MmioI2c<'static> {
unsafe { self.clone() }
}
#[inline]
fn id(&self) -> Option<I2cId> {
let base_addr = unsafe { self.ptr() } as usize;
if base_addr == I2C_0_BASE_ADDR {
return Some(I2cId::I2c0);
} else if base_addr == I2C_1_BASE_ADDR {
return Some(I2cId::I2c1);
}
None
}
}
pub trait SdaPin: MioPinMarker {
const ID: I2cId;
}
pub trait SckPin: MioPinMarker {
const ID: I2cId;
}
pub trait I2cPins {}
macro_rules! i2c_pin_impls {
($Id: path, $SckMio:ident, $SdaMio:ident) => {
impl SckPin for Pin<$SckMio> {
const ID: I2cId = $Id;
}
impl SdaPin for Pin<$SdaMio> {
const ID: I2cId = $Id;
}
impl I2cPins for (Pin<$SckMio>, Pin<$SdaMio>) {}
};
}
/*
macro_rules! into_i2c {
($($Mio:ident),+) => {
$(
impl <M: PinMode> MioPin<$Mio, M> {
/// Convert the pin into I2C pins by configuring the pin routing via the
/// MIO multiplexer bits. Also enables pull-ups for the pins.
pub fn into_i2c(self) -> MioPin<$Mio, IoPeriph> {
// Enable pull-ups for the I2C pins.
self.into_io_periph(I2C_MUX_CONF, Some(true))
}
}
)+
};
}
into_i2c!(
Mio10, Mio11, Mio14, Mio15, Mio30, Mio31, Mio34, Mio35, Mio38, Mio39, Mio12, Mio13, Mio28,
Mio29, Mio32, Mio33, Mio36, Mio37, Mio48, Mio49, Mio52, Mio53
);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
into_i2c!(
Mio18, Mio19, Mio22, Mio23, Mio26, Mio27, Mio42, Mio43, Mio46, Mio47, Mio50, Mio51, Mio16,
Mio17, Mio20, Mio21, Mio24, Mio25, Mio40, Mio41, Mio44, Mio45
);
*/
i2c_pin_impls!(I2cId::I2c0, Mio10, Mio11);
i2c_pin_impls!(I2cId::I2c0, Mio14, Mio15);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c0, Mio18, Mio19);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c0, Mio22, Mio23);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c0, Mio26, Mio27);
i2c_pin_impls!(I2cId::I2c0, Mio30, Mio31);
i2c_pin_impls!(I2cId::I2c0, Mio34, Mio35);
i2c_pin_impls!(I2cId::I2c0, Mio38, Mio39);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c0, Mio42, Mio43);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c0, Mio46, Mio47);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c0, Mio50, Mio51);
i2c_pin_impls!(I2cId::I2c1, Mio12, Mio13);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c1, Mio16, Mio17);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c1, Mio20, Mio21);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c1, Mio24, Mio25);
i2c_pin_impls!(I2cId::I2c1, Mio28, Mio29);
i2c_pin_impls!(I2cId::I2c1, Mio32, Mio33);
i2c_pin_impls!(I2cId::I2c1, Mio36, Mio37);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c1, Mio40, Mio41);
#[cfg(not(feature = "7z010-7z007s-clg225"))]
i2c_pin_impls!(I2cId::I2c1, Mio44, Mio45);
i2c_pin_impls!(I2cId::I2c1, Mio48, Mio49);
i2c_pin_impls!(I2cId::I2c1, Mio52, Mio53);
#[derive(Debug, Clone, Copy)]
pub enum I2cSpeed {
Normal100kHz,
HighSpeed400KHz,
}
impl I2cSpeed {
pub fn frequency_full_number(&self) -> Hertz {
Hertz::from_raw(match self {
I2cSpeed::Normal100kHz => 100_000,
I2cSpeed::HighSpeed400KHz => 400_000,
})
}
/// From Xilinx embeddedsw
/// If frequency 400KHz is selected, 384.6KHz should be set.
/// If frequency 100KHz is selected, 90KHz should be set.
/// This is due to a hardware limitation.
pub fn frequency_for_calculation(&self) -> Hertz {
Hertz::from_raw(match self {
I2cSpeed::Normal100kHz => 90_000,
I2cSpeed::HighSpeed400KHz => 384_600,
})
}
}
#[derive(Debug, thiserror::Error)]
#[error("I2C speed not attainable")]
pub struct I2cSpeedNotAttainable;
#[derive(Debug, thiserror::Error)]
pub enum I2cTxError {
#[error("arbitration lost")]
ArbitrationLoss,
#[error("transfer not acknowledged: {0}")]
Nack(NoAcknowledgeSource),
#[error("TX overflow")]
TxOverflow,
#[error("timeout of transfer")]
Timeout,
}
#[derive(Debug, thiserror::Error)]
pub enum I2cRxError {
#[error("arbitration lost")]
ArbitrationLoss,
#[error("transfer not acknowledged")]
Nack(NoAcknowledgeSource),
#[error("RX underflow")]
RxUnderflow,
#[error("RX overflow")]
RxOverflow,
#[error("timeout of transfer")]
Timeout,
#[error("read data exceeds maximum allowed 255 bytes per transfer")]
ReadDataLenTooLarge,
}
#[derive(Debug, thiserror::Error)]
pub enum I2cError {
#[error("arbitration lost")]
ArbitrationLoss,
#[error("transfer not acknowledged: {0}")]
Nack(NoAcknowledgeSource),
#[error("TX overflow")]
TxOverflow,
#[error("RX underflow")]
RxUnderflow,
#[error("RX overflow")]
RxOverflow,
#[error("timeout of transfer")]
Timeout,
#[error("read data exceeds maximum allowed 255 bytes per transfer")]
ReadDataLenTooLarge,
}
impl From<I2cRxError> for I2cError {
fn from(err: I2cRxError) -> Self {
match err {
I2cRxError::ArbitrationLoss => I2cError::ArbitrationLoss,
I2cRxError::Nack(nack) => I2cError::Nack(nack),
I2cRxError::RxUnderflow => I2cError::RxUnderflow,
I2cRxError::RxOverflow => I2cError::RxOverflow,
I2cRxError::Timeout => I2cError::Timeout,
I2cRxError::ReadDataLenTooLarge => I2cError::ReadDataLenTooLarge,
}
}
}
impl From<I2cTxError> for I2cError {
fn from(err: I2cTxError) -> Self {
match err {
I2cTxError::ArbitrationLoss => I2cError::ArbitrationLoss,
I2cTxError::Nack(nack) => I2cError::Nack(nack),
I2cTxError::TxOverflow => I2cError::TxOverflow,
I2cTxError::Timeout => I2cError::Timeout,
}
}
}
#[inline]
pub fn calculate_i2c_speed(cpu_1x_clk: Hertz, clk_config: ClockConfig) -> Hertz {
cpu_1x_clk / (22 * (clk_config.div_a as u32 + 1) * (clk_config.div_b as u32 + 1))
}
pub fn calculate_divisors(
cpu_1x_clk: Hertz,
speed: I2cSpeed,
) -> Result<ClockConfig, I2cSpeedNotAttainable> {
let target_speed = speed.frequency_for_calculation();
if cpu_1x_clk > 22 * 64 * 4 * target_speed {
return Err(I2cSpeedNotAttainable);
}
let mut smallest_deviation = u32::MAX;
let mut best_div_a = 1;
let mut best_div_b = 1;
for divisor_a in 1..=4 {
for divisor_b in 1..=64 {
let i2c_clock = cpu_1x_clk / (22 * divisor_a * divisor_b);
let deviation = (target_speed.raw() as i32 - i2c_clock.raw() as i32).unsigned_abs();
if deviation < smallest_deviation {
smallest_deviation = deviation;
best_div_a = divisor_a;
best_div_b = divisor_b;
}
}
}
Ok(ClockConfig::new(best_div_a as u8 - 1, best_div_b as u8 - 1))
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct ClockConfig {
div_a: u8,
div_b: u8,
}
impl ClockConfig {
pub fn new(div_a: u8, div_b: u8) -> Self {
Self { div_a, div_b }
}
pub fn div_a(&self) -> u8 {
self.div_a
}
pub fn div_b(&self) -> u8 {
self.div_b
}
}
#[derive(Debug, thiserror::Error)]
#[error("invalid I2C ID")]
pub struct InvalidPsI2cError;
#[derive(Debug, thiserror::Error)]
pub enum I2cConstructionError {
#[error("invalid I2C ID {0}")]
InvalidPsI2c(#[from] InvalidPsI2cError),
#[error("pin invalid for I2C ID")]
PinInvalidForI2cId,
#[error("invalid pin configuration for I2C")]
InvalidPinConf,
}
pub struct I2c {
regs: MmioI2c<'static>,
}
impl I2c {
pub fn new_with_mio<Sck: SckPin, Sda: SdaPin>(
i2c: impl PsI2c,
clk_cfg: ClockConfig,
i2c_pins: (Sck, Sda),
) -> Result<Self, I2cConstructionError> {
if i2c.id().is_none() {
return Err(InvalidPsI2cError.into());
}
if Sck::ID != Sda::ID {
return Err(I2cConstructionError::PinInvalidForI2cId);
}
IoPeriphPin::new(i2c_pins.0, I2C_MUX_CONF, Some(true));
IoPeriphPin::new(i2c_pins.1, I2C_MUX_CONF, Some(true));
Ok(Self::new_generic(
i2c.id().unwrap(),
i2c.reg_block(),
clk_cfg,
))
}
pub fn new_with_emio(i2c: impl PsI2c, clk_cfg: ClockConfig) -> Result<Self, InvalidPsI2cError> {
if i2c.id().is_none() {
return Err(InvalidPsI2cError);
}
Ok(Self::new_generic(
i2c.id().unwrap(),
i2c.reg_block(),
clk_cfg,
))
}
pub fn new_generic(id: I2cId, mut regs: MmioI2c<'static>, clk_cfg: ClockConfig) -> Self {
let periph_sel = match id {
I2cId::I2c0 => crate::PeripheralSelect::I2c0,
I2cId::I2c1 => crate::PeripheralSelect::I2c1,
};
enable_amba_peripheral_clock(periph_sel);
//reset(id);
regs.write_cr(
Control::builder()
.with_div_a(u2::new(clk_cfg.div_a()))
.with_div_b(u6::new(clk_cfg.div_b()))
.with_clear_fifo(true)
.with_slv_mon(false)
.with_hold_bus(false)
.with_acken(false)
.with_addressing(true)
.with_mode(zynq7000::i2c::Mode::Master)
.with_dir(zynq7000::i2c::Direction::Transmitter)
.build(),
);
Self { regs }
}
/// Start the transfer by writing the I2C address.
#[inline]
fn start_transfer(&mut self, address: u8) {
self.regs
.write_addr(zynq7000::i2c::Addr::new_with_raw_value(address as u32));
}
#[inline]
pub fn set_hold_bit(&mut self) {
self.regs.modify_cr(|mut cr| {
cr.set_hold_bus(true);
cr
});
}
#[inline]
pub fn clear_hold_bit(&mut self) {
self.regs.modify_cr(|mut cr| {
cr.set_hold_bus(false);
cr
});
}
pub fn write_transfer_blocking(
&mut self,
addr: u8,
data: &[u8],
generate_stop: bool,
) -> Result<(), I2cTxError> {
self.regs.modify_cr(|mut cr| {
cr.set_acken(true);
cr.set_mode(zynq7000::i2c::Mode::Master);
cr.set_clear_fifo(true);
cr.set_dir(zynq7000::i2c::Direction::Transmitter);
if !generate_stop {
cr.set_hold_bus(true);
}
cr
});
let mut first_write_cycle = true;
let mut addr_set = false;
let mut written = 0;
// Clear the interrupt status register before using it to monitor the transfer.
self.regs.modify_isr(|isr| isr);
loop {
let bytes_to_write = core::cmp::min(
FIFO_DEPTH - self.regs.read_transfer_size().size() as usize,
data.len() - written,
);
(0..bytes_to_write).for_each(|_| {
self.regs
.write_data(zynq7000::i2c::Fifo::new_with_raw_value(
data[written] as u32,
));
written += 1;
});
if !addr_set {
self.start_transfer(addr);
addr_set = true;
}
let mut status = self.regs.read_sr();
// While the hardware is busy sending out data, we poll for errors.
while status.tx_busy() {
let isr = self.regs.read_isr();
self.check_and_handle_tx_errors(isr, first_write_cycle, bytes_to_write)?;
// Re-read for next check.
status = self.regs.read_sr();
}
first_write_cycle = false;
// Just need to poll to completion now.
if written == data.len() {
break;
}
}
// Poll to completion.
while !self.regs.read_isr().complete() {
let isr = self.regs.read_isr();
self.check_and_handle_tx_errors(isr, first_write_cycle, data.len())?;
}
if generate_stop {
self.clear_hold_bit();
}
Ok(())
}
fn check_and_handle_tx_errors(
&mut self,
isr: InterruptStatus,
first_write_cycle: bool,
first_chunk_len: usize,
) -> Result<(), I2cTxError> {
if isr.tx_overflow() {
self.clean_up_after_transfer_or_on_error();
return Err(I2cTxError::TxOverflow);
}
if isr.arbitration_lost() {
self.clean_up_after_transfer_or_on_error();
return Err(I2cTxError::ArbitrationLoss);
}
if isr.nack() {
self.clean_up_after_transfer_or_on_error();
// I have no tested this yet, but if no data was sent yet, this is probably
// an address NACK.
if first_write_cycle
&& self.regs.read_transfer_size().size() as usize + 1 == first_chunk_len
{
return Err(I2cTxError::Nack(NoAcknowledgeSource::Address));
} else {
return Err(I2cTxError::Nack(NoAcknowledgeSource::Data));
}
}
if isr.timeout() {
// Timeout / Stall condition.
self.clean_up_after_transfer_or_on_error();
return Err(I2cTxError::Timeout);
}
Ok(())
}
pub fn clean_up_after_transfer_or_on_error(&mut self) {
self.regs.modify_cr(|mut cr| {
cr.set_acken(false);
cr.set_clear_fifo(true);
cr
});
}
pub fn read_transfer_blocking(&mut self, addr: u8, data: &mut [u8]) -> Result<(), I2cRxError> {
self.regs.modify_cr(|mut cr| {
cr.set_acken(true);
cr.set_mode(zynq7000::i2c::Mode::Master);
cr.set_clear_fifo(true);
cr.set_dir(zynq7000::i2c::Direction::Receiver);
if data.len() > FIFO_DEPTH {
cr.set_hold_bus(true);
}
cr
});
let mut read = 0;
if data.len() > MAX_READ_SIZE {
return Err(I2cRxError::ReadDataLenTooLarge);
}
// Clear the interrupt status register before using it to monitor the transfer.
self.regs.modify_isr(|isr| isr);
self.regs
.write_transfer_size(TransferSize::new_with_raw_value(data.len() as u32));
self.start_transfer(addr);
loop {
let mut status = self.regs.read_sr();
loop {
let isr = self.regs.read_isr();
self.check_and_handle_rx_errors(read, isr)?;
if status.rx_valid() {
break;
}
// Re-read for next check.
status = self.regs.read_sr();
}
// Data to be read.
while self.regs.read_sr().rx_valid() {
data[read] = self.regs.read_data().data();
read += 1;
}
// The outstading read size is smaller than the FIFO. Clear the HOLD register as
// specified in TRM p.649 polled read step 6.
if self.regs.read_transfer_size().size() as usize <= FIFO_DEPTH {
self.clear_hold_bit();
}
// Read everything, just need to poll to completion now.
if read == data.len() {
break;
}
}
// Poll to completion.
while !self.regs.read_isr().complete() {
let isr = self.regs.read_isr();
self.check_and_handle_rx_errors(read, isr)?
}
self.clear_hold_bit();
self.clean_up_after_transfer_or_on_error();
Ok(())
}
fn check_and_handle_rx_errors(
&mut self,
read_count: usize,
isr: InterruptStatus,
) -> Result<(), I2cRxError> {
if isr.rx_overflow() {
self.clean_up_after_transfer_or_on_error();
return Err(I2cRxError::RxOverflow);
}
if isr.rx_underflow() {
self.clean_up_after_transfer_or_on_error();
return Err(I2cRxError::RxUnderflow);
}
if isr.nack() {
self.clean_up_after_transfer_or_on_error();
// I have no tested this yet, but if no data was sent yet, this is probably
// an address NACK.
if read_count == 0 {
return Err(I2cRxError::Nack(NoAcknowledgeSource::Address));
} else {
return Err(I2cRxError::Nack(NoAcknowledgeSource::Data));
}
}
if isr.timeout() {
// Timeout / Stall condition.
self.clean_up_after_transfer_or_on_error();
return Err(I2cRxError::Timeout);
}
Ok(())
}
}
impl embedded_hal::i2c::ErrorType for I2c {
type Error = I2cError;
}
impl embedded_hal::i2c::Error for I2cError {
fn kind(&self) -> embedded_hal::i2c::ErrorKind {
match self {
I2cError::ArbitrationLoss => embedded_hal::i2c::ErrorKind::ArbitrationLoss,
I2cError::Nack(nack_kind) => embedded_hal::i2c::ErrorKind::NoAcknowledge(*nack_kind),
I2cError::RxOverflow => embedded_hal::i2c::ErrorKind::Overrun,
I2cError::TxOverflow => embedded_hal::i2c::ErrorKind::Other,
I2cError::RxUnderflow => embedded_hal::i2c::ErrorKind::Other,
I2cError::Timeout | I2cError::ReadDataLenTooLarge => {
embedded_hal::i2c::ErrorKind::Other
}
}
}
}
impl embedded_hal::i2c::I2c for I2c {
fn transaction(
&mut self,
address: u8,
operations: &mut [embedded_hal::i2c::Operation<'_>],
) -> Result<(), Self::Error> {
for op in operations {
match op {
embedded_hal::i2c::Operation::Read(items) => {
self.read_transfer_blocking(address, items)?
}
embedded_hal::i2c::Operation::Write(items) => {
self.write_transfer_blocking(address, items, true)?
}
}
}
Ok(())
}
fn write_read(
&mut self,
address: u8,
write: &[u8],
read: &mut [u8],
) -> Result<(), Self::Error> {
// I have never tested this, so I am not sure whether the master still generates a stop
// condition somehow.. which might break the trait contract.
self.write_transfer_blocking(address, write, false)?;
Ok(self.read_transfer_blocking(address, read)?)
}
}
/// Reset the SPI peripheral using the SLCR reset register for SPI.
///
/// Please note that this function will interfere with an already configured
/// SPI instance.
#[inline]
pub fn reset(id: I2cId) {
let assert_reset = match id {
I2cId::I2c0 => DualClockReset::builder()
.with_periph1_cpu1x_rst(false)
.with_periph0_cpu1x_rst(true)
.build(),
I2cId::I2c1 => DualClockReset::builder()
.with_periph1_cpu1x_rst(true)
.with_periph0_cpu1x_rst(false)
.build(),
};
unsafe {
Slcr::with(|regs| {
regs.reset_ctrl().write_i2c(assert_reset);
// Keep it in reset for some cycles.. The TMR just mentions some small delay,
// no idea what is meant with that.
for _ in 0..3 {
cortex_ar::asm::nop();
}
regs.reset_ctrl().write_i2c(DualClockReset::DEFAULT);
});
}
}
#[cfg(test)]
mod tests {
extern crate std;
use super::*;
use fugit::RateExtU32;
use std::println;
#[test]
fn example_test() {
let clk_cfg = calculate_divisors(111.MHz(), I2cSpeed::Normal100kHz).unwrap();
assert_eq!(clk_cfg.div_a(), 0);
assert_eq!(clk_cfg.div_b(), 55);
let speed = calculate_i2c_speed(111.MHz(), clk_cfg);
assert!(speed.raw() < 100_000);
assert!(speed.raw() > 85_000);
}
#[test]
fn example_test_2() {
let clk_cfg = calculate_divisors(111.MHz(), I2cSpeed::HighSpeed400KHz).unwrap();
assert_eq!(clk_cfg.div_a(), 0);
assert_eq!(clk_cfg.div_b(), 12);
let speed = calculate_i2c_speed(111.MHz(), clk_cfg);
assert!(speed.raw() < 400_000);
assert!(speed.raw() > 360_000);
}
#[test]
fn example_test_3() {
let clk_cfg = calculate_divisors(133.MHz(), I2cSpeed::Normal100kHz).unwrap();
assert_eq!(clk_cfg.div_a(), 1);
assert_eq!(clk_cfg.div_b(), 33);
let speed = calculate_i2c_speed(133.MHz(), clk_cfg);
assert!(speed.raw() < 100_000);
assert!(speed.raw() > 85_000);
}
#[test]
fn example_test_4() {
let clk_cfg = calculate_divisors(133.MHz(), I2cSpeed::HighSpeed400KHz).unwrap();
assert_eq!(clk_cfg.div_a(), 0);
assert_eq!(clk_cfg.div_b(), 15);
let speed = calculate_i2c_speed(133.MHz(), clk_cfg);
assert!(speed.raw() < 400_000);
assert!(speed.raw() > 360_000);
}
}

204
zynq7000-hal/src/lib.rs Normal file
View File

@ -0,0 +1,204 @@
//! # HAL for the AMD Zynq 7000 SoC family
//!
//! This repository contains the **H**ardware **A**bstraction **L**ayer (HAL), which is an additional
//! hardware abstraction on top of the [peripheral access API](https://egit.irs.uni-stuttgart.de/rust/zynq7000-rs/src/branch/main/zynq7000).
//!
//! It is the result of reading the datasheet for the device and encoding a type-safe layer over the
//! raw PAC. This crate also implements traits specified by the
//! [embedded-hal](https://github.com/rust-embedded/embedded-hal) project, making it compatible with
//! various drivers in the embedded rust ecosystem.
#![no_std]
use slcr::Slcr;
use zynq7000::slcr::LevelShifterReg;
pub mod clocks;
pub mod gic;
pub mod gpio;
pub mod gtc;
pub mod i2c;
pub mod log;
pub mod prelude;
pub mod slcr;
pub mod spi;
pub mod time;
pub mod ttc;
pub mod uart;
/// This enumeration encodes the various boot sources.
#[derive(Debug, Copy, Clone)]
pub enum BootDevice {
JtagCascaded,
JtagIndependent,
Nor,
Nand,
Qspi,
SdCard,
}
#[derive(Debug, Copy, Clone)]
pub enum BootPllConfig {
Enabled,
Bypassed,
}
#[derive(Debug)]
pub struct BootMode {
boot_mode: Option<BootDevice>,
pll_config: BootPllConfig,
}
impl BootMode {
#[allow(clippy::new_without_default)]
/// Create a new boot mode information structure by reading the boot mode register from the
/// fixed SLCR block.
pub fn new() -> Self {
// Safety: Only read a read-only register here.
Self::new_with_raw_reg(
unsafe { zynq7000::slcr::Slcr::new_mmio_fixed() }
.read_boot_mode()
.raw_value(),
)
}
fn new_with_raw_reg(raw_register: u32) -> Self {
let msb_three_bits = (raw_register >> 1) & 0b111;
let boot_mode = match msb_three_bits {
0b000 => {
if raw_register & 0b1 == 0 {
Some(BootDevice::JtagCascaded)
} else {
Some(BootDevice::JtagIndependent)
}
}
0b001 => Some(BootDevice::Nor),
0b010 => Some(BootDevice::Nand),
0b100 => Some(BootDevice::Qspi),
0b110 => Some(BootDevice::SdCard),
_ => None,
};
let pll_config = if (raw_register >> 4) & 0b1 == 0 {
BootPllConfig::Enabled
} else {
BootPllConfig::Bypassed
};
Self {
boot_mode,
pll_config,
}
}
pub fn boot_device(&self) -> Option<BootDevice> {
self.boot_mode
}
pub const fn pll_enable(&self) -> BootPllConfig {
self.pll_config
}
}
/// This configures the level shifters between the programmable logic (PL) and the processing
/// system (PS).
///
/// The Zynq-7000 TRM p.32 specifies more information about this register and how to use it.
pub fn configure_level_shifter(config: zynq7000::slcr::LevelShifterConfig) {
// Safety: We only manipulate the level shift registers.
unsafe {
Slcr::with(|slcr_unlocked| {
slcr_unlocked.write_lvl_shftr_en(LevelShifterReg::new_with_raw_value(config as u32));
});
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum PeripheralSelect {
Smc = 24,
Lqspi = 23,
Gpio = 22,
Uart1 = 21,
Uart0 = 20,
I2c1 = 19,
I2c0 = 18,
Can1 = 17,
Can0 = 16,
Spi1 = 15,
Spi0 = 14,
Sdio1 = 11,
Sdio0 = 10,
Gem1 = 7,
Gem0 = 6,
Usb1 = 3,
Usb0 = 2,
Dma = 0,
}
/// Enable the AMBA peripheral clock, which is required to read the registers of a peripheral
/// block.
#[inline]
pub fn enable_amba_peripheral_clock(select: PeripheralSelect) {
unsafe {
Slcr::with(|regs| {
regs.clk_ctrl().modify_aper_clk_ctrl(|mut val| {
match select {
PeripheralSelect::Smc => val.set_smc_1x_clk_act(true),
PeripheralSelect::Lqspi => val.set_lqspi_1x_clk_act(true),
PeripheralSelect::Gpio => val.set_gpio_1x_clk_act(true),
PeripheralSelect::Uart1 => val.set_uart_1_1x_clk_act(true),
PeripheralSelect::Uart0 => val.set_uart_0_1x_clk_act(true),
PeripheralSelect::I2c1 => val.set_i2c_1_1x_clk_act(true),
PeripheralSelect::I2c0 => val.set_i2c_0_1x_clk_act(true),
PeripheralSelect::Can1 => val.set_can_1_1x_clk_act(true),
PeripheralSelect::Can0 => val.set_can_0_1x_clk_act(true),
PeripheralSelect::Spi1 => val.set_spi_1_1x_clk_act(true),
PeripheralSelect::Spi0 => val.set_spi_1_1x_clk_act(true),
PeripheralSelect::Sdio1 => val.set_sdio_1_1x_clk_act(true),
PeripheralSelect::Sdio0 => val.set_sdio_0_1x_clk_act(true),
PeripheralSelect::Gem1 => val.set_gem_1_1x_clk_act(true),
PeripheralSelect::Gem0 => val.set_gem_0_1x_clk_act(true),
PeripheralSelect::Usb1 => val.set_usb_1_cpu_1x_clk_act(true),
PeripheralSelect::Usb0 => val.set_usb_0_cpu_1x_clk_act(true),
PeripheralSelect::Dma => val.set_dma_cpu_2x_clk_act(true),
}
val
})
});
}
}
/// Disable the AMBA peripheral clock, which is required to read the registers of a peripheral
/// block.
#[inline]
pub fn disable_amba_peripheral_clock(select: PeripheralSelect) {
unsafe {
Slcr::with(|regs| {
regs.clk_ctrl().modify_aper_clk_ctrl(|mut val| {
match select {
PeripheralSelect::Smc => val.set_smc_1x_clk_act(false),
PeripheralSelect::Lqspi => val.set_lqspi_1x_clk_act(false),
PeripheralSelect::Gpio => val.set_gpio_1x_clk_act(false),
PeripheralSelect::Uart1 => val.set_uart_1_1x_clk_act(false),
PeripheralSelect::Uart0 => val.set_uart_0_1x_clk_act(false),
PeripheralSelect::I2c1 => val.set_i2c_1_1x_clk_act(false),
PeripheralSelect::I2c0 => val.set_i2c_0_1x_clk_act(false),
PeripheralSelect::Can1 => val.set_can_1_1x_clk_act(false),
PeripheralSelect::Can0 => val.set_can_0_1x_clk_act(false),
PeripheralSelect::Spi1 => val.set_spi_1_1x_clk_act(false),
PeripheralSelect::Spi0 => val.set_spi_1_1x_clk_act(false),
PeripheralSelect::Sdio1 => val.set_sdio_1_1x_clk_act(false),
PeripheralSelect::Sdio0 => val.set_sdio_0_1x_clk_act(false),
PeripheralSelect::Gem1 => val.set_gem_1_1x_clk_act(false),
PeripheralSelect::Gem0 => val.set_gem_0_1x_clk_act(false),
PeripheralSelect::Usb1 => val.set_usb_1_cpu_1x_clk_act(false),
PeripheralSelect::Usb0 => val.set_usb_0_cpu_1x_clk_act(false),
PeripheralSelect::Dma => val.set_dma_cpu_2x_clk_act(false),
}
val
})
});
}
}
#[allow(dead_code)]
pub(crate) mod sealed {
pub trait Sealed {}
}

230
zynq7000-hal/src/log.rs Normal file
View File

@ -0,0 +1,230 @@
//! # Simple logging providers.
/// Blocking UART loggers.
pub mod uart_blocking {
use core::cell::{Cell, RefCell, UnsafeCell};
use embedded_io::Write as _;
use cortex_ar::register::Cpsr;
use critical_section::Mutex;
use log::{LevelFilter, set_logger, set_max_level};
use crate::uart::Uart;
pub struct UartLoggerBlocking(Mutex<RefCell<Option<Uart>>>);
unsafe impl Send for UartLoggerBlocking {}
unsafe impl Sync for UartLoggerBlocking {}
static UART_LOGGER_BLOCKING: UartLoggerBlocking =
UartLoggerBlocking(Mutex::new(RefCell::new(None)));
/// Initialize the logger with a blocking UART instance.
///
/// This is a blocking logger which performs a write inside a critical section. This logger is
/// thread-safe, but interrupts will be disabled while the logger is writing to the UART.
///
/// For async applications, it is strongly recommended to use the asynchronous ring buffer
/// logger instead.
pub fn init_with_locks(uart: Uart, level: LevelFilter) {
// TODO: Impl debug for Uart
critical_section::with(|cs| {
let inner = UART_LOGGER_BLOCKING.0.borrow(cs);
inner.replace(Some(uart));
});
set_logger(&UART_LOGGER_BLOCKING).unwrap();
// Adjust as needed
set_max_level(level);
}
impl log::Log for UartLoggerBlocking {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}
fn log(&self, record: &log::Record) {
critical_section::with(|cs| {
let mut opt_logger = self.0.borrow(cs).borrow_mut();
if opt_logger.is_none() {
return;
}
let logger = opt_logger.as_mut().unwrap();
writeln!(logger, "{} - {}\r", record.level(), record.args()).unwrap();
})
}
fn flush(&self) {}
}
pub struct UartLoggerUnsafeSingleThread {
skip_in_isr: Cell<bool>,
uart: UnsafeCell<Option<Uart>>,
}
unsafe impl Send for UartLoggerUnsafeSingleThread {}
unsafe impl Sync for UartLoggerUnsafeSingleThread {}
static UART_LOGGER_UNSAFE_SINGLE_THREAD: UartLoggerUnsafeSingleThread =
UartLoggerUnsafeSingleThread {
skip_in_isr: Cell::new(false),
uart: UnsafeCell::new(None),
};
/// Initialize the logger with a blocking UART instance.
///
/// For async applications, it is strongly recommended to use the asynchronous ring buffer
/// logger instead.
///
/// # Safety
///
/// This is a blocking logger which performs a write WITHOUT a critical section. This logger is
/// NOT thread-safe. Users must ensure that this logger is not used inside a pre-emptive
/// multi-threading context and interrupt handlers.
pub unsafe fn create_unsafe_single_thread_logger(uart: Uart) -> UartLoggerUnsafeSingleThread {
UartLoggerUnsafeSingleThread {
skip_in_isr: Cell::new(false),
uart: UnsafeCell::new(Some(uart)),
}
}
/// Initialize the logger with a blocking UART instance which does not use locks.
///
/// # Safety
///
/// This is a blocking logger which performs a write WITHOUT a critical section. This logger is
/// NOT thread-safe, which might lead to garbled output. Log output in ISRs can optionally be
/// surpressed.
pub unsafe fn init_unsafe_single_core(uart: Uart, level: LevelFilter, skip_in_isr: bool) {
let opt_uart = unsafe { &mut *UART_LOGGER_UNSAFE_SINGLE_THREAD.uart.get() };
opt_uart.replace(uart);
UART_LOGGER_UNSAFE_SINGLE_THREAD
.skip_in_isr
.set(skip_in_isr);
set_logger(&UART_LOGGER_UNSAFE_SINGLE_THREAD).unwrap();
set_max_level(level); // Adjust as needed
}
impl log::Log for UartLoggerUnsafeSingleThread {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}
fn log(&self, record: &log::Record) {
if self.skip_in_isr.get() {
match Cpsr::read().mode().unwrap() {
cortex_ar::register::cpsr::ProcessorMode::Fiq
| cortex_ar::register::cpsr::ProcessorMode::Irq => {
return;
}
_ => {}
}
}
let uart_mut = unsafe { &mut *self.uart.get() }.as_mut();
if uart_mut.is_none() {
return;
}
writeln!(
uart_mut.unwrap(),
"{} - {}\r",
record.level(),
record.args()
)
.unwrap();
}
fn flush(&self) {}
}
}
/// Logger module which logs into a ring buffer to allow asynchronous logging handling.
pub mod rb {
use core::cell::RefCell;
use core::fmt::Write as _;
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
use log::{LevelFilter, set_logger, set_max_level};
use ringbuf::{
StaticRb,
traits::{Consumer, Producer},
};
/// Logger implementation which logs frames via a ring buffer and sends the frame sizes
/// as messages.
///
/// The logger does not require allocation and reserved a generous amount of 4096 bytes for
/// both data buffer and ring buffer. This should be sufficient for most logging needs.
pub struct Logger {
frame_queue: embassy_sync::channel::Channel<CriticalSectionRawMutex, usize, 32>,
data_buf: critical_section::Mutex<RefCell<heapless::String<4096>>>,
ring_buf: critical_section::Mutex<RefCell<Option<StaticRb<u8, 4096>>>>,
}
unsafe impl Send for Logger {}
unsafe impl Sync for Logger {}
static LOGGER_RB: Logger = Logger {
frame_queue: embassy_sync::channel::Channel::new(),
data_buf: critical_section::Mutex::new(RefCell::new(heapless::String::new())),
ring_buf: critical_section::Mutex::new(RefCell::new(None)),
};
impl log::Log for Logger {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}
fn log(&self, record: &log::Record) {
critical_section::with(|cs| {
let ref_buf = self.data_buf.borrow(cs);
let mut buf = ref_buf.borrow_mut();
buf.clear();
let _ = writeln!(buf, "{} - {}\r", record.level(), record.args());
let rb_ref = self.ring_buf.borrow(cs);
let mut rb_opt = rb_ref.borrow_mut();
if rb_opt.is_none() {
panic!("log call on uninitialized logger");
}
rb_opt.as_mut().unwrap().push_slice(buf.as_bytes());
let _ = self.frame_queue.try_send(buf.len());
});
}
fn flush(&self) {
while !self.frame_queue().is_empty() {}
}
}
impl Logger {
pub fn frame_queue(
&self,
) -> &embassy_sync::channel::Channel<CriticalSectionRawMutex, usize, 32> {
&self.frame_queue
}
}
pub fn init(level: LevelFilter) {
critical_section::with(|cs| {
let rb = StaticRb::<u8, 4096>::default();
let rb_ref = LOGGER_RB.ring_buf.borrow(cs);
rb_ref.borrow_mut().replace(rb);
});
set_logger(&LOGGER_RB).unwrap();
set_max_level(level); // Adjust as needed
}
pub fn read_next_frame(frame_len: usize, buf: &mut [u8]) {
let read_len = core::cmp::min(frame_len, buf.len());
critical_section::with(|cs| {
let rb_ref = LOGGER_RB.ring_buf.borrow(cs);
let mut rb = rb_ref.borrow_mut();
rb.as_mut().unwrap().pop_slice(&mut buf[0..read_len]);
})
}
pub fn get_frame_queue()
-> &'static embassy_sync::channel::Channel<CriticalSectionRawMutex, usize, 32> {
LOGGER_RB.frame_queue()
}
}

View File

@ -0,0 +1,3 @@
//! Prelude
pub use fugit::ExtU32 as _;
pub use fugit::RateExtU32 as _;

67
zynq7000-hal/src/slcr.rs Normal file
View File

@ -0,0 +1,67 @@
//! # System Level Control Register (SLCR) module.
use zynq7000::slcr::MmioSlcr;
pub const LOCK_KEY: u32 = 0x767B;
pub const UNLOCK_KEY: u32 = 0xDF0D;
pub struct Slcr(zynq7000::slcr::MmioSlcr<'static>);
impl Slcr {
/// Modify the SLCR register.
///
/// # Safety
///
/// This method unsafely steals the SLCR MMIO block and then calls a user provided function
/// with the [SLCR MMIO][MmioSlcr] block as an input argument. It is the user's responsibility
/// that the SLCR is not used concurrently in a way which leads to data races.
pub unsafe fn with<F: FnMut(&mut MmioSlcr)>(mut f: F) {
let mut slcr = unsafe { zynq7000::slcr::Slcr::new_mmio_fixed() };
slcr.write_unlock(UNLOCK_KEY);
f(&mut slcr);
slcr.write_lock(LOCK_KEY);
}
/// Create a new SLCR peripheral wrapper.
pub fn new(slcr: zynq7000::slcr::MmioSlcr<'static>) -> Self {
Self(slcr)
}
/// Unsafely create a new SLCR peripheral wrapper.
///
/// # Safety
///
/// This allows to create an arbitrary number of SLCR peripheral wrappers. It is the user's
/// responsibility that these wrappers are not used concurrently in a way which leads to
/// data races.
pub unsafe fn steal() -> Self {
Self::new(unsafe { zynq7000::slcr::Slcr::new_mmio_fixed() })
}
/// Returns a mutable reference to the SLCR MMIO block.
///
/// The MMIO block will not be unlocked. However, the registers can still be read.
pub fn regs(&mut self) -> &mut MmioSlcr<'static> {
&mut self.0
}
/// Modify the SLCR register.
///
/// This method unlocks the SLCR registers and then calls a user provided function
/// with the [SLCR MMIO][MmioSlcr] block as an input argument. This allows the user
/// to safely modify the SLCR registers. The SLCR will be locked afte the operation.
pub fn modify<F: FnMut(&mut MmioSlcr)>(&mut self, mut f: F) {
self.0.write_unlock(UNLOCK_KEY);
f(&mut self.0);
self.0.write_lock(LOCK_KEY);
}
/// Manually unlock the SLCR registers.
pub fn unlock(&mut self) {
self.0.write_unlock(UNLOCK_KEY);
}
/// Manually lock the SLCR registers.
pub fn lock(&mut self) {
self.0.write_lock(LOCK_KEY);
}
}

View File

@ -0,0 +1,584 @@
//! Asynchronous PS SPI driver.
use core::{cell::RefCell, convert::Infallible, sync::atomic::AtomicBool};
use critical_section::Mutex;
use embassy_sync::waitqueue::AtomicWaker;
use embedded_hal_async::spi::SpiBus;
use raw_slice::{RawBufSlice, RawBufSliceMut};
use zynq7000::spi::InterruptStatus;
use super::{ChipSelect, FIFO_DEPTH, Spi, SpiId, SpiLowLevel};
static WAKERS: [AtomicWaker; 2] = [const { AtomicWaker::new() }; 2];
static TRANSFER_CONTEXTS: [Mutex<RefCell<TransferContext>>; 2] =
[const { Mutex::new(RefCell::new(TransferContext::new())) }; 2];
// Completion flag. Kept outside of the context structure as an atomic to avoid
// critical section.
static DONE: [AtomicBool; 2] = [const { AtomicBool::new(false) }; 2];
/// This is a generic interrupt handler to handle asynchronous SPI operations for a given
/// SPI peripheral.
///
/// The user has to call this once in the interrupt handler responsible for the SPI interrupts on
/// the given SPI bank.
pub fn on_interrupt(peripheral: SpiId) {
let mut spi = unsafe { SpiLowLevel::steal(peripheral) };
let idx = peripheral as usize;
let imr = spi.read_imr();
// IRQ is not related.
if !imr.tx_trig() && !imr.tx_full() && !imr.tx_underflow() && !imr.rx_ovr() && !imr.rx_full() {
return;
}
// Prevent spurious interrupts from messing with out logic here.
spi.disable_interrupts();
let isr = spi.read_isr();
spi.clear_interrupts();
let mut context = critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
*context_ref.borrow()
});
// No transfer active.
if context.transfer_type.is_none() {
return;
}
let transfer_type = context.transfer_type.unwrap();
match transfer_type {
TransferType::Read => on_interrupt_read(idx, &mut context, &mut spi, isr),
TransferType::Write => on_interrupt_write(idx, &mut context, &mut spi, isr),
TransferType::Transfer => on_interrupt_transfer(idx, &mut context, &mut spi, isr),
TransferType::TransferInPlace => {
on_interrupt_transfer_in_place(idx, &mut context, &mut spi, isr)
}
};
}
fn on_interrupt_read(
idx: usize,
context: &mut TransferContext,
spi: &mut SpiLowLevel,
mut isr: InterruptStatus,
) {
let read_slice = unsafe { context.rx_slice.get_mut().unwrap() };
let transfer_len = read_slice.len();
// Read data from RX FIFO first.
let read_len = calculate_read_len(spi, isr, transfer_len, context.rx_progress);
(0..read_len).for_each(|_| {
read_slice[context.rx_progress] = spi.read_fifo_unchecked();
context.rx_progress += 1;
});
// The FIFO still needs to be pumped.
while context.tx_progress < read_slice.len() && !isr.tx_full() {
spi.write_fifo_unchecked(0);
context.tx_progress += 1;
isr = spi.read_isr();
}
isr_finish_handler(idx, spi, context, transfer_len)
}
fn on_interrupt_write(
idx: usize,
context: &mut TransferContext,
spi: &mut SpiLowLevel,
mut isr: InterruptStatus,
) {
let write_slice = unsafe { context.tx_slice.get().unwrap() };
let transfer_len = write_slice.len();
// Read data from RX FIFO first.
let read_len = calculate_read_len(spi, isr, transfer_len, context.rx_progress);
(0..read_len).for_each(|_| {
spi.read_fifo_unchecked();
context.rx_progress += 1;
});
// Data still needs to be sent
while context.tx_progress < transfer_len && !isr.tx_full() {
spi.write_fifo_unchecked(write_slice[context.tx_progress]);
context.tx_progress += 1;
isr = spi.read_isr();
}
isr_finish_handler(idx, spi, context, transfer_len)
}
fn on_interrupt_transfer(
idx: usize,
context: &mut TransferContext,
spi: &mut SpiLowLevel,
mut isr: InterruptStatus,
) {
let read_slice = unsafe { context.rx_slice.get_mut().unwrap() };
let read_len = read_slice.len();
let write_slice = unsafe { context.tx_slice.get().unwrap() };
let write_len = write_slice.len();
let transfer_len = core::cmp::max(read_len, write_len);
// Read data from RX FIFO first.
let read_len = calculate_read_len(spi, isr, transfer_len, context.rx_progress);
(0..read_len).for_each(|_| {
if context.rx_progress < read_len {
read_slice[context.rx_progress] = spi.read_fifo_unchecked();
} else {
spi.read_fifo_unchecked();
}
context.rx_progress += 1;
});
// Data still needs to be sent
while context.tx_progress < transfer_len && !isr.tx_full() {
if context.tx_progress < write_len {
spi.write_fifo_unchecked(write_slice[context.tx_progress]);
} else {
// Dummy write.
spi.write_fifo_unchecked(0);
}
context.tx_progress += 1;
isr = spi.read_isr();
}
isr_finish_handler(idx, spi, context, transfer_len)
}
fn on_interrupt_transfer_in_place(
idx: usize,
context: &mut TransferContext,
spi: &mut SpiLowLevel,
mut isr: InterruptStatus,
) {
let transfer_slice = unsafe { context.rx_slice.get_mut().unwrap() };
let transfer_len = transfer_slice.len();
// Read data from RX FIFO first.
let read_len = calculate_read_len(spi, isr, transfer_len, context.rx_progress);
(0..read_len).for_each(|_| {
transfer_slice[context.rx_progress] = spi.read_fifo_unchecked();
context.rx_progress += 1;
});
// Data still needs to be sent
while context.tx_progress < transfer_len && !isr.tx_full() {
spi.write_fifo_unchecked(transfer_slice[context.tx_progress]);
context.tx_progress += 1;
isr = spi.read_isr();
}
isr_finish_handler(idx, spi, context, transfer_len)
}
fn calculate_read_len(
spi: &mut SpiLowLevel,
isr: InterruptStatus,
total_read_len: usize,
rx_progress: usize,
) -> usize {
if isr.rx_full() {
core::cmp::min(FIFO_DEPTH, total_read_len - rx_progress)
} else if isr.rx_not_empty() {
let trigger = spi.read_rx_not_empty_threshold();
core::cmp::min(total_read_len - rx_progress, trigger as usize)
} else {
0
}
}
/// Generic handler after RX FIFO and TX FIFO were handled. Checks and handles finished
/// and unfinished conditions.
fn isr_finish_handler(
idx: usize,
spi: &mut SpiLowLevel,
context: &mut TransferContext,
transfer_len: usize,
) {
// Transfer finish condition.
if context.rx_progress == context.tx_progress && context.rx_progress == transfer_len {
finish_transfer(idx, context, spi);
return;
}
unfinished_transfer(spi, transfer_len, context.rx_progress);
// If the transfer is done, the context structure was already written back.
// Write back updated context structure.
critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
*context_ref.borrow_mut() = *context;
});
}
fn finish_transfer(idx: usize, context: &mut TransferContext, spi: &mut SpiLowLevel) {
// Write back updated context structure.
critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
*context_ref.borrow_mut() = *context;
});
spi.set_rx_fifo_trigger(1).unwrap();
spi.set_tx_fifo_trigger(1).unwrap();
// Interrupts were already disabled and cleared.
DONE[idx].store(true, core::sync::atomic::Ordering::Relaxed);
WAKERS[idx].wake();
}
fn unfinished_transfer(spi: &mut SpiLowLevel, transfer_len: usize, rx_progress: usize) {
let new_trig_level = core::cmp::min(FIFO_DEPTH, transfer_len - rx_progress);
spi.set_rx_fifo_trigger(new_trig_level as u32).unwrap();
// Re-enable interrupts with the new RX FIFO trigger level.
spi.enable_interrupts();
}
#[derive(Debug, Clone, Copy)]
pub enum TransferType {
Read,
Write,
Transfer,
TransferInPlace,
}
#[derive(Default, Debug, Copy, Clone)]
pub struct TransferContext {
transfer_type: Option<TransferType>,
tx_progress: usize,
rx_progress: usize,
tx_slice: RawBufSlice,
rx_slice: RawBufSliceMut,
}
#[allow(clippy::new_without_default)]
impl TransferContext {
pub const fn new() -> Self {
Self {
transfer_type: None,
tx_progress: 0,
rx_progress: 0,
tx_slice: RawBufSlice::new_nulled(),
rx_slice: RawBufSliceMut::new_nulled(),
}
}
}
pub struct SpiFuture {
id: super::SpiId,
spi: super::SpiLowLevel,
config: super::Config,
finished_regularly: core::cell::Cell<bool>,
}
impl SpiFuture {
fn new_for_read(spi: &mut Spi, spi_id: SpiId, words: &mut [u8]) -> Self {
if words.is_empty() {
panic!("words length unexpectedly 0");
}
let idx = spi_id as usize;
DONE[idx].store(false, core::sync::atomic::Ordering::Relaxed);
spi.inner.disable_interrupts();
let write_idx = core::cmp::min(super::FIFO_DEPTH, words.len());
// Send dummy bytes.
(0..write_idx).for_each(|_| {
spi.inner.write_fifo_unchecked(0);
});
Self::set_triggers(spi, write_idx, words.len());
// We assume that the slave select configuration was already performed, but we take
// care of issuing a start if necessary.
spi.issue_manual_start_for_manual_cfg();
critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
let mut context = context_ref.borrow_mut();
context.transfer_type = Some(TransferType::Read);
unsafe {
context.rx_slice.set(words);
}
context.tx_slice.set_null();
context.tx_progress = write_idx;
context.rx_progress = 0;
spi.inner.clear_interrupts();
spi.inner.enable_interrupts();
spi.inner.enable();
});
Self {
id: spi_id,
config: spi.config,
spi: unsafe { spi.inner.clone() },
finished_regularly: core::cell::Cell::new(false),
}
}
fn new_for_write(spi: &mut Spi, spi_id: SpiId, words: &[u8]) -> Self {
if words.is_empty() {
panic!("words length unexpectedly 0");
}
let (idx, write_idx) = Self::generic_init_transfer(spi, spi_id, words);
critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
let mut context = context_ref.borrow_mut();
context.transfer_type = Some(TransferType::Write);
unsafe {
context.tx_slice.set(words);
}
context.rx_slice.set_null();
context.tx_progress = write_idx;
context.rx_progress = 0;
spi.inner.clear_interrupts();
spi.inner.enable_interrupts();
spi.inner.enable();
});
Self {
id: spi_id,
config: spi.config,
spi: unsafe { spi.inner.clone() },
finished_regularly: core::cell::Cell::new(false),
}
}
fn new_for_transfer(spi: &mut Spi, spi_id: SpiId, read: &mut [u8], write: &[u8]) -> Self {
if read.is_empty() || write.is_empty() {
panic!("read or write buffer unexpectedly empty");
}
let (idx, write_idx) = Self::generic_init_transfer(spi, spi_id, write);
critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
let mut context = context_ref.borrow_mut();
context.transfer_type = Some(TransferType::Transfer);
unsafe {
context.tx_slice.set(write);
context.rx_slice.set(read);
}
context.tx_progress = write_idx;
context.rx_progress = 0;
spi.inner.clear_interrupts();
spi.inner.enable_interrupts();
spi.inner.enable();
});
Self {
id: spi_id,
config: spi.config,
spi: unsafe { spi.inner.clone() },
finished_regularly: core::cell::Cell::new(false),
}
}
fn new_for_transfer_in_place(spi: &mut Spi, spi_id: SpiId, words: &mut [u8]) -> Self {
if words.is_empty() {
panic!("read and write buffer unexpectedly empty");
}
let (idx, write_idx) = Self::generic_init_transfer(spi, spi_id, words);
critical_section::with(|cs| {
let context_ref = TRANSFER_CONTEXTS[idx].borrow(cs);
let mut context = context_ref.borrow_mut();
context.transfer_type = Some(TransferType::TransferInPlace);
unsafe {
context.rx_slice.set(words);
}
context.tx_slice.set_null();
context.tx_progress = write_idx;
context.rx_progress = 0;
spi.inner.clear_interrupts();
spi.inner.enable_interrupts();
spi.inner.enable();
});
Self {
id: spi_id,
config: spi.config,
spi: unsafe { spi.inner.clone() },
finished_regularly: core::cell::Cell::new(false),
}
}
fn generic_init_transfer(spi: &mut Spi, spi_id: SpiId, write: &[u8]) -> (usize, usize) {
let idx = spi_id as usize;
DONE[idx].store(false, core::sync::atomic::Ordering::Relaxed);
spi.inner.disable();
spi.inner.disable_interrupts();
let write_idx = core::cmp::min(super::FIFO_DEPTH, write.len());
(0..write_idx).for_each(|idx| {
spi.inner.write_fifo_unchecked(write[idx]);
});
Self::set_triggers(spi, write_idx, write.len());
// We assume that the slave select configuration was already performed, but we take
// care of issuing a start if necessary.
spi.issue_manual_start_for_manual_cfg();
(idx, write_idx)
}
fn set_triggers(spi: &mut Spi, write_idx: usize, write_len: usize) {
// This should never fail because it is never larger than the FIFO depth.
spi.inner.set_rx_fifo_trigger(write_idx as u32).unwrap();
// We want to re-fill the TX FIFO before it is completely empty if the full transfer size
// is larger than the FIFO depth. I am not sure whether the default value of 1 ensures
// this because the TMR says that this interrupt is triggered when the FIFO has less than
// threshold entries.
if write_len > super::FIFO_DEPTH {
spi.inner.set_tx_fifo_trigger(2).unwrap();
}
}
}
impl Future for SpiFuture {
type Output = ();
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
WAKERS[self.id as usize].register(cx.waker());
if DONE[self.id as usize].swap(false, core::sync::atomic::Ordering::Relaxed) {
critical_section::with(|cs| {
let mut ctx = TRANSFER_CONTEXTS[self.id as usize].borrow(cs).borrow_mut();
*ctx = TransferContext::default();
});
self.finished_regularly.set(true);
return core::task::Poll::Ready(());
}
core::task::Poll::Pending
}
}
impl Drop for SpiFuture {
fn drop(&mut self) {
if !self.finished_regularly.get() {
// It might be sufficient to disable and enable the SPI.. But this definitely
// ensures the SPI is fully reset.
self.spi.reset_and_reconfigure(self.config);
}
}
}
/// Asynchronous SPI driver.
///
/// This is the primary data structure used to perform non-blocking SPI operations.
/// It implements the [embedded_hal_async::spi::SpiBus] as well.
pub struct SpiAsync(pub Spi);
impl SpiAsync {
pub fn new(spi: Spi) -> Self {
Self(spi)
}
async fn read(&mut self, words: &mut [u8]) {
if words.is_empty() {
return;
}
let id = self.0.inner.id;
let spi_fut = SpiFuture::new_for_read(&mut self.0, id, words);
spi_fut.await;
}
async fn write(&mut self, words: &[u8]) {
if words.is_empty() {
return;
}
let id = self.0.inner.id;
let spi_fut = SpiFuture::new_for_write(&mut self.0, id, words);
spi_fut.await;
}
async fn transfer(&mut self, read: &mut [u8], write: &[u8]) {
if read.is_empty() || write.is_empty() {
return;
}
let id = self.0.inner.id;
let spi_fut = SpiFuture::new_for_transfer(&mut self.0, id, read, write);
spi_fut.await;
}
async fn transfer_in_place(&mut self, words: &mut [u8]) {
if words.is_empty() {
return;
}
let id = self.0.inner.id;
let spi_fut = SpiFuture::new_for_transfer_in_place(&mut self.0, id, words);
spi_fut.await;
}
}
impl embedded_hal_async::spi::ErrorType for SpiAsync {
type Error = Infallible;
}
impl embedded_hal_async::spi::SpiBus for SpiAsync {
async fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
self.read(words).await;
Ok(())
}
async fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> {
self.write(words).await;
Ok(())
}
async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> {
self.transfer(read, write).await;
Ok(())
}
async fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
self.transfer_in_place(words).await;
Ok(())
}
async fn flush(&mut self) -> Result<(), Self::Error> {
Ok(())
}
}
/// This structure is a wrapper for [SpiAsync] which implements the
/// [embedded_hal_async::spi::SpiDevice] trait as well.
pub struct SpiWithHwCsAsync<Delay: embedded_hal_async::delay::DelayNs> {
pub spi: SpiAsync,
pub cs: ChipSelect,
pub delay: Delay,
}
impl<Delay: embedded_hal_async::delay::DelayNs> SpiWithHwCsAsync<Delay> {
pub fn new(spi: SpiAsync, cs: ChipSelect, delay: Delay) -> Self {
Self { spi, cs, delay }
}
pub fn release(self) -> SpiAsync {
self.spi
}
}
impl<Delay: embedded_hal_async::delay::DelayNs> embedded_hal_async::spi::ErrorType
for SpiWithHwCsAsync<Delay>
{
type Error = Infallible;
}
impl<Delay: embedded_hal_async::delay::DelayNs> embedded_hal_async::spi::SpiDevice
for SpiWithHwCsAsync<Delay>
{
async fn transaction(
&mut self,
operations: &mut [embedded_hal::spi::Operation<'_, u8>],
) -> Result<(), Self::Error> {
self.spi.0.inner.select_hw_cs(self.cs);
for op in operations {
match op {
embedded_hal::spi::Operation::Read(items) => {
self.spi.read(items).await;
}
embedded_hal::spi::Operation::Write(items) => {
self.spi.write(items).await;
}
embedded_hal::spi::Operation::Transfer(read, write) => {
self.spi.transfer(read, write).await;
}
embedded_hal::spi::Operation::TransferInPlace(items) => {
self.spi.transfer_in_place(items).await;
}
embedded_hal::spi::Operation::DelayNs(delay) => {
self.delay.delay_ns(*delay).await;
}
}
}
self.spi.flush().await?;
self.spi.0.inner.no_hw_cs();
Ok(())
}
}

1213
zynq7000-hal/src/spi/mod.rs Normal file

File diff suppressed because it is too large Load Diff

26
zynq7000-hal/src/time.rs Normal file
View File

@ -0,0 +1,26 @@
//! Time units
// Frequency based
/// Hertz
pub type Hertz = fugit::HertzU32;
/// KiloHertz
pub type KiloHertz = fugit::KilohertzU32;
/// MegaHertz
pub type MegaHertz = fugit::MegahertzU32;
// Period based
/// Seconds
pub type Seconds = fugit::SecsDurationU32;
/// Milliseconds
pub type Milliseconds = fugit::MillisDurationU32;
/// Microseconds
pub type Microseconds = fugit::MicrosDurationU32;
/// Nanoseconds
pub type Nanoseconds = fugit::NanosDurationU32;

376
zynq7000-hal/src/ttc.rs Normal file
View File

@ -0,0 +1,376 @@
//! Triple-timer counter (TTC) high-level driver.
//!
//! This module also contains support for PWM and output waveform generation.
use core::convert::Infallible;
use arbitrary_int::{Number, u3, u4};
use zynq7000::ttc::{MmioTtc, TTC_0_BASE_ADDR, TTC_1_BASE_ADDR};
#[cfg(not(feature = "7z010-7z007s-clg225"))]
use crate::gpio::mio::{Mio16, Mio17, Mio18, Mio19, Mio40, Mio41, Mio42, Mio43};
use crate::{
clocks::ArmClocks,
gpio::{
IoPeriphPin,
mio::{Mio28, Mio29, Mio30, Mio31, MioPinMarker, MuxConf, Pin},
},
time::Hertz,
};
/// Each TTC consists of three independent timers/counters.
#[derive(Debug, Copy, Clone)]
pub enum TtcId {
Ttc0 = 0,
Ttc1 = 1,
}
#[derive(Debug, Copy, Clone)]
pub enum ChannelId {
Ch0 = 0,
Ch1 = 1,
Ch2 = 2,
}
pub trait PsTtc {
fn reg_block(&self) -> MmioTtc<'static>;
fn id(&self) -> Option<TtcId>;
}
impl PsTtc for MmioTtc<'static> {
#[inline]
fn reg_block(&self) -> MmioTtc<'static> {
unsafe { self.clone() }
}
#[inline]
fn id(&self) -> Option<TtcId> {
let base_addr = unsafe { self.ptr() } as usize;
if base_addr == TTC_0_BASE_ADDR {
return Some(TtcId::Ttc0);
} else if base_addr == TTC_1_BASE_ADDR {
return Some(TtcId::Ttc1);
}
None
}
}
pub const TTC_MUX_CONF: MuxConf = MuxConf::new_with_l3(u3::new(0b110));
pub trait ClockInPin: MioPinMarker {
const ID: TtcId;
}
pub trait WaveOutPin: MioPinMarker {
const ID: TtcId;
}
// TTC0 pin trait implementations.
impl ClockInPin for Pin<Mio19> {
const ID: TtcId = TtcId::Ttc0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl ClockInPin for Pin<Mio31> {
const ID: TtcId = TtcId::Ttc0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl ClockInPin for Pin<Mio43> {
const ID: TtcId = TtcId::Ttc0;
}
impl WaveOutPin for Pin<Mio18> {
const ID: TtcId = TtcId::Ttc0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl WaveOutPin for Pin<Mio30> {
const ID: TtcId = TtcId::Ttc0;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl WaveOutPin for Pin<Mio42> {
const ID: TtcId = TtcId::Ttc0;
}
// TTC1 pin trait implementations.
impl ClockInPin for Pin<Mio17> {
const ID: TtcId = TtcId::Ttc1;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl ClockInPin for Pin<Mio29> {
const ID: TtcId = TtcId::Ttc1;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl ClockInPin for Pin<Mio41> {
const ID: TtcId = TtcId::Ttc1;
}
impl WaveOutPin for Pin<Mio16> {
const ID: TtcId = TtcId::Ttc1;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl WaveOutPin for Pin<Mio28> {
const ID: TtcId = TtcId::Ttc1;
}
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl WaveOutPin for Pin<Mio40> {
const ID: TtcId = TtcId::Ttc1;
}
pub struct Ttc {
pub ch0: TtcChannel,
pub ch1: TtcChannel,
pub ch2: TtcChannel,
}
impl Ttc {
/// Create a new TTC instance. The passed TTC peripheral instance MUST point to a valid
/// processing system TTC peripheral.
///
/// Returns [None] if the passed peripheral block does not have a valid PS TTC address.
pub fn new(ps_ttc: impl PsTtc) -> Option<Self> {
ps_ttc.id()?;
let regs = ps_ttc.reg_block();
let ch0 = TtcChannel {
regs: unsafe { regs.clone() },
id: ChannelId::Ch0,
};
let ch1 = TtcChannel {
regs: unsafe { regs.clone() },
id: ChannelId::Ch1,
};
let ch2 = TtcChannel {
regs,
id: ChannelId::Ch2,
};
Some(Self { ch0, ch1, ch2 })
}
}
pub struct TtcChannel {
regs: MmioTtc<'static>,
id: ChannelId,
}
impl TtcChannel {
pub fn regs_mut(&mut self) -> &mut MmioTtc<'static> {
&mut self.regs
}
#[inline]
pub fn read_counter(&self) -> u16 {
self.regs
.read_current_counter(self.id as usize)
.unwrap()
.count()
}
pub fn id(&self) -> ChannelId {
self.id
}
}
#[derive(Debug, thiserror::Error)]
#[error("invalid TTC pin configuration")]
pub struct InvalidTtcPinConfigError(pub MuxConf);
#[derive(Debug, thiserror::Error)]
#[error("frequency is zero")]
pub struct FrequencyIsZeroError;
#[derive(Debug, thiserror::Error)]
pub enum TtcConstructionError {
#[error("invalid TTC pin configuration")]
InvalidTtcPinConfig(#[from] InvalidTtcPinConfigError),
#[error("frequency is zero")]
FrequencyIsZero(#[from] FrequencyIsZeroError),
}
pub fn calculate_prescaler_reg_and_interval_ticks(
mut ref_clk: Hertz,
freq: Hertz,
) -> (Option<u4>, u16) {
// TODO: Can this be optimized?
let mut prescaler_reg: Option<u4> = None;
let mut tick_val = ref_clk / freq;
while tick_val > u16::MAX as u32 {
ref_clk /= 2;
if let Some(prescaler_reg) = prescaler_reg {
// TODO: Better error handling for this case? Can this even happen?
if prescaler_reg.value() == u4::MAX.value() {
break;
} else {
prescaler_reg.checked_add(u4::new(1));
}
} else {
prescaler_reg = Some(u4::new(0));
}
tick_val = ref_clk / freq;
}
(prescaler_reg, tick_val as u16)
}
pub struct Pwm {
channel: TtcChannel,
ref_clk: Hertz,
}
impl Pwm {
/// Create a new PWM instance which uses the CPU 1x clock as the clock source and also uses
/// a MIO output pin for the waveform output.
pub fn new_with_cpu_clk_and_mio_waveout(
channel: TtcChannel,
arm_clocks: &ArmClocks,
freq: Hertz,
wave_out: impl WaveOutPin,
) -> Result<Self, TtcConstructionError> {
IoPeriphPin::new(wave_out, TTC_MUX_CONF, None);
Ok(Self::new_with_cpu_clk(channel, arm_clocks, freq)?)
}
/// Create a new PWM instance which uses the CPU 1x clock as the clock source.
pub fn new_with_cpu_clk(
channel: TtcChannel,
arm_clocks: &ArmClocks,
freq: Hertz,
) -> Result<Self, FrequencyIsZeroError> {
Self::new_generic(channel, arm_clocks.cpu_1x_clk(), freq)
}
/// Create a new PWM instance based on a reference clock source.
pub fn new_generic(
channel: TtcChannel,
ref_clk: Hertz,
freq: Hertz,
) -> Result<Self, FrequencyIsZeroError> {
if freq.raw() == 0 {
return Err(FrequencyIsZeroError);
}
let (prescaler_reg, tick_val) = calculate_prescaler_reg_and_interval_ticks(ref_clk, freq);
let id = channel.id() as usize;
let mut pwm = Self { channel, ref_clk };
pwm.set_up_and_configure_pwm(id, prescaler_reg, tick_val);
Ok(pwm)
}
/// Set a new frequency for the PWM cycle.
///
/// This resets the duty cycle to 0%.
pub fn set_frequency(&mut self, freq: Hertz) -> Result<(), FrequencyIsZeroError> {
if freq.raw() == 0 {
return Err(FrequencyIsZeroError);
}
let id = self.channel.id() as usize;
let (prescaler_reg, tick_val) =
calculate_prescaler_reg_and_interval_ticks(self.ref_clk, freq);
self.set_up_and_configure_pwm(id, prescaler_reg, tick_val);
Ok(())
}
#[inline]
pub fn ttc_channel_mut(&mut self) -> &mut TtcChannel {
&mut self.channel
}
#[inline]
pub fn max_duty_cycle(&self) -> u16 {
self.channel
.regs
.read_interval_value(self.channel.id() as usize)
.unwrap()
.value()
}
#[inline]
pub fn set_duty_cycle(&mut self, duty: u16) {
let id = self.channel.id() as usize;
self.channel
.regs
.modify_cnt_ctrl(id, |mut val| {
val.set_disable(true);
val
})
.unwrap();
self.channel
.regs
.write_match_value_0(
self.channel.id() as usize,
zynq7000::ttc::RwValue::new_with_raw_value(duty as u32),
)
.unwrap();
self.channel
.regs
.modify_cnt_ctrl(id, |mut val| {
val.set_disable(false);
val.set_reset(true);
val
})
.unwrap();
}
fn set_up_and_configure_pwm(&mut self, id: usize, prescaler_reg: Option<u4>, tick_val: u16) {
// Disable the counter first.
self.channel
.regs
.write_cnt_ctrl(id, zynq7000::ttc::CounterControl::new_with_raw_value(1))
.unwrap();
// Clock configuration
self.channel
.regs
.write_clk_cntr(
id,
zynq7000::ttc::ClockControl::builder()
.with_ext_clk_edge(false)
.with_clk_src(zynq7000::ttc::ClockSource::Pclk)
.with_prescaler(prescaler_reg.unwrap_or(u4::new(0)))
.with_prescale_enable(prescaler_reg.is_some())
.build(),
)
.unwrap();
self.channel
.regs
.write_interval_value(
id,
zynq7000::ttc::RwValue::new_with_raw_value(tick_val as u32),
)
.unwrap();
// Corresponds to duty cycle 0.
self.channel
.regs
.write_match_value_0(id, zynq7000::ttc::RwValue::new_with_raw_value(0))
.unwrap();
self.channel
.regs
.write_cnt_ctrl(
id,
zynq7000::ttc::CounterControl::builder()
.with_wave_polarity(zynq7000::ttc::WavePolarity::LowToHighOnMatch1)
.with_wave_enable_n(zynq7000::ttc::WaveEnable::Enable)
.with_reset(true)
.with_match_enable(true)
.with_decrementing(false)
.with_mode(zynq7000::ttc::Mode::Interval)
.with_disable(false)
.build(),
)
.unwrap();
}
}
impl embedded_hal::pwm::ErrorType for Pwm {
type Error = Infallible;
}
impl embedded_hal::pwm::SetDutyCycle for Pwm {
#[inline]
fn max_duty_cycle(&self) -> u16 {
self.max_duty_cycle()
}
#[inline]
fn set_duty_cycle(&mut self, duty: u16) -> Result<(), Self::Error> {
self.set_duty_cycle(duty);
Ok(())
}
}

View File

@ -0,0 +1,739 @@
//! # UART module.
//!
//! Support for the processing system UARTs.
use core::convert::Infallible;
use arbitrary_int::u3;
use libm::round;
use zynq7000::{
slcr::reset::DualRefAndClockReset,
uart::{
BaudRateDiv, Baudgen, ChMode, ClkSel, FifoTrigger, InterruptControl, MmioUart, Mode,
UART_0_BASE, UART_1_BASE,
},
};
use crate::{
enable_amba_peripheral_clock,
gpio::{
IoPeriphPin,
mio::{
Mio8, Mio9, Mio10, Mio11, Mio12, Mio13, Mio14, Mio15, Mio28, Mio29, Mio30, Mio31,
Mio32, Mio33, Mio34, Mio35, Mio36, Mio37, Mio38, Mio39, Mio48, Mio49, Mio52, Mio53,
MioPinMarker, MuxConf, Pin,
},
},
slcr::Slcr,
};
#[cfg(not(feature = "7z010-7z007s-clg225"))]
use crate::gpio::mio::{
Mio16, Mio17, Mio18, Mio19, Mio20, Mio21, Mio22, Mio23, Mio24, Mio25, Mio26, Mio27, Mio40,
Mio41, Mio42, Mio43, Mio44, Mio45, Mio46, Mio47, Mio50, Mio51,
};
use super::{clocks::IoClocks, time::Hertz};
pub mod tx;
pub use tx::*;
pub mod tx_async;
pub use tx_async::*;
pub mod rx;
pub use rx::*;
pub const FIFO_DEPTH: usize = 64;
pub const DEFAULT_RX_TRIGGER_LEVEL: u8 = 32;
pub const UART_MUX_CONF: MuxConf = MuxConf::new_with_l3(u3::new(0b111));
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum UartId {
Uart0 = 0,
Uart1 = 1,
}
pub trait PsUart {
fn reg_block(&self) -> MmioUart<'static>;
fn uart_id(&self) -> Option<UartId>;
}
impl PsUart for MmioUart<'static> {
#[inline]
fn reg_block(&self) -> MmioUart<'static> {
unsafe { self.clone() }
}
fn uart_id(&self) -> Option<UartId> {
let base_addr = unsafe { self.ptr() } as usize;
if base_addr == UART_0_BASE {
return Some(UartId::Uart0);
} else if base_addr == UART_1_BASE {
return Some(UartId::Uart1);
}
None
}
}
impl UartId {
/// Unsafely steal a peripheral MMIO block for the given UART.
///
/// # Safety
///
/// Circumvents ownership and safety guarantees by the HAL.
pub const unsafe fn regs(&self) -> MmioUart<'static> {
match self {
UartId::Uart0 => unsafe { zynq7000::uart::Uart::new_mmio_fixed_0() },
UartId::Uart1 => unsafe { zynq7000::uart::Uart::new_mmio_fixed_1() },
}
}
}
pub trait RxPin: MioPinMarker {
const UART_IDX: UartId;
}
pub trait TxPin: MioPinMarker {
const UART_IDX: UartId;
}
pub trait UartPins {}
#[derive(Debug, thiserror::Error)]
#[error("divisor is zero")]
pub struct DivisorZero;
macro_rules! pin_pairs {
($UartPeriph:path, ($( [$(#[$meta:meta], )? $TxMio:ident, $RxMio:ident] ),+ $(,)? )) => {
$(
$( #[$meta] )?
impl TxPin for Pin<$TxMio> {
const UART_IDX: UartId = $UartPeriph;
}
$( #[$meta] )?
impl RxPin for Pin<$RxMio> {
const UART_IDX: UartId = $UartPeriph;
}
impl UartPins for (Pin<$TxMio>, Pin<$RxMio>) {}
)+
};
}
/*
macro_rules! impl_into_uart {
(($($Mio:ident),+)) => {
$(
impl From<Pin<$Mio>> for IoPeriphPin {
fn from(pin: Pin<$Mio>) -> Self {
IoPeriphPin::new(pin, UART_MUX_CONF, None)
}
}
)+
};
}
impl_into_uart!((
Mio10, Mio11, Mio15, Mio14, Mio31, Mio30, Mio35, Mio34, Mio39, Mio38, Mio8, Mio9, Mio12, Mio13,
Mio28, Mio29, Mio32, Mio33, Mio36, Mio37, Mio48, Mio49, Mio52, Mio53
));
#[cfg(not(feature = "7z010-7z007s-clg225"))]
impl_into_uart!((
Mio19, Mio18, Mio23, Mio22, Mio43, Mio42, Mio47, Mio46, Mio51, Mio50, Mio16, Mio17, Mio20,
Mio21, Mio24, Mio25, Mio40, Mio41, Mio44, Mio45
));
*/
pin_pairs!(
UartId::Uart0,
(
[Mio11, Mio10],
[Mio15, Mio14],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio19, Mio18],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio23, Mio22],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio27, Mio26],
[Mio31, Mio30],
[Mio35, Mio34],
[Mio39, Mio38],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio43, Mio42],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio47, Mio46],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio51, Mio50],
)
);
pin_pairs!(
UartId::Uart1,
(
[Mio8, Mio9],
[Mio12, Mio13],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio16, Mio17],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio20, Mio21],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio24, Mio25],
[Mio28, Mio29],
[Mio32, Mio33],
[Mio36, Mio37],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio40, Mio41],
[#[cfg(not(feature ="7z010-7z007s-clg225"))], Mio44, Mio45],
[Mio48, Mio49],
[Mio52, Mio53],
)
);
/// Based on values provided by the vendor library.
pub const MAX_BAUD_RATE: u32 = 6240000;
/// Based on values provided by the vendor library.
pub const MIN_BAUD_RATE: u32 = 110;
#[derive(Debug, Default, Clone, Copy)]
pub enum Parity {
Even,
Odd,
#[default]
None,
}
#[derive(Debug, Default, Clone, Copy)]
pub enum Stopbits {
#[default]
One,
OnePointFive,
Two,
}
#[derive(Debug, Default, Clone, Copy)]
pub enum CharLen {
SixBits,
SevenBits,
#[default]
EightBits,
}
#[derive(Debug, Clone, Copy)]
pub struct ClkConfigRaw {
cd: u16,
bdiv: u8,
}
#[cfg(feature = "alloc")]
pub fn calculate_viable_configs(
mut uart_clk: Hertz,
clk_sel: ClkSel,
target_baud: u32,
) -> alloc::vec::Vec<(ClkConfigRaw, f64)> {
let mut viable_cfgs = alloc::vec::Vec::new();
if clk_sel == ClkSel::UartRefClkDiv8 {
uart_clk /= 8;
}
let mut current_clk_config = ClkConfigRaw::new(0, 0);
for bdiv in 4..u8::MAX {
let cd =
round(uart_clk.raw() as f64 / ((bdiv as u32 + 1) as f64 * target_baud as f64)) as u64;
if cd > u16::MAX as u64 {
continue;
}
current_clk_config.cd = cd as u16;
current_clk_config.bdiv = bdiv;
let baud = current_clk_config.actual_baud(uart_clk);
let error = ((baud - target_baud as f64).abs() / target_baud as f64) * 100.0;
if error < MAX_BAUDERROR_RATE as f64 {
viable_cfgs.push((current_clk_config, error));
}
}
viable_cfgs
}
/// Calculate the clock configuration for the smallest error to reach the desired target
/// baud rate.
///
/// You can also use [calculate_viable_configs] to get a list of all viable configurations.
pub fn calculate_raw_baud_cfg_smallest_error(
mut uart_clk: Hertz,
clk_sel: ClkSel,
target_baud: u32,
) -> Result<(ClkConfigRaw, f64), DivisorZero> {
if target_baud == 0 {
return Err(DivisorZero);
}
if clk_sel == ClkSel::UartRefClkDiv8 {
uart_clk /= 8;
}
let mut current_clk_config = ClkConfigRaw::default();
let mut best_clk_config = ClkConfigRaw::default();
let mut smallest_error: f64 = 100.0;
for bdiv in 4..u8::MAX {
let cd =
round(uart_clk.raw() as f64 / ((bdiv as u32 + 1) as f64 * target_baud as f64)) as u64;
if cd > u16::MAX as u64 {
continue;
}
current_clk_config.cd = cd as u16;
current_clk_config.bdiv = bdiv;
let baud = current_clk_config.actual_baud(uart_clk);
let error = ((baud - target_baud as f64).abs() / target_baud as f64) * 100.0;
if error < smallest_error {
best_clk_config = current_clk_config;
smallest_error = error;
}
}
Ok((best_clk_config, smallest_error))
}
impl ClkConfigRaw {
#[inline]
pub const fn new(cd: u16, bdiv: u8) -> Result<Self, DivisorZero> {
if cd == 0 {
return Err(DivisorZero);
}
Ok(ClkConfigRaw { cd, bdiv })
}
/// Auto-calculates the best clock configuration settings for the target baudrate.
///
/// This function assumes [ClkSel::UartRefClk] as the clock source. It returns a tuple
/// where the first entry is the clock configuration while the second entry is the associated
/// baud error from 0.0 to 1.0. It is recommended to keep this error below 2-3 %.
pub fn new_autocalc_with_error(
io_clks: &IoClocks,
target_baud: u32,
) -> Result<(Self, f64), DivisorZero> {
Self::new_autocalc_generic(io_clks, ClkSel::UartRefClk, target_baud)
}
pub fn new_autocalc_generic(
io_clks: &IoClocks,
clk_sel: ClkSel,
target_baud: u32,
) -> Result<(Self, f64), DivisorZero> {
Self::new_autocalc_with_raw_clk(io_clks.uart_clk(), clk_sel, target_baud)
}
pub fn new_autocalc_with_raw_clk(
uart_clk: Hertz,
clk_sel: ClkSel,
target_baud: u32,
) -> Result<(Self, f64), DivisorZero> {
calculate_raw_baud_cfg_smallest_error(uart_clk, clk_sel, target_baud)
}
#[inline]
pub const fn cd(&self) -> u16 {
self.cd
}
#[inline]
pub const fn bdiv(&self) -> u8 {
self.bdiv
}
#[inline]
pub fn rounded_baud(&self, sel_clk: Hertz) -> u32 {
round(self.actual_baud(sel_clk)) as u32
}
#[inline]
pub fn actual_baud(&self, sel_clk: Hertz) -> f64 {
sel_clk.raw() as f64 / (self.cd as f64 * (self.bdiv + 1) as f64)
}
}
impl Default for ClkConfigRaw {
#[inline]
fn default() -> Self {
ClkConfigRaw::new(1, 0).unwrap()
}
}
#[derive(Debug)]
pub struct UartConfig {
clk_config: ClkConfigRaw,
chmode: ChMode,
parity: Parity,
stopbits: Stopbits,
chrl: CharLen,
clk_sel: ClkSel,
}
impl UartConfig {
pub fn new_with_clk_config(clk_config: ClkConfigRaw) -> Self {
Self::new(
clk_config,
ChMode::default(),
Parity::default(),
Stopbits::default(),
CharLen::default(),
ClkSel::default(),
)
}
#[inline]
pub const fn new(
clk_config: ClkConfigRaw,
chmode: ChMode,
parity: Parity,
stopbits: Stopbits,
chrl: CharLen,
clk_sel: ClkSel,
) -> Self {
UartConfig {
clk_config,
chmode,
parity,
stopbits,
chrl,
clk_sel,
}
}
#[inline]
pub const fn raw_clk_config(&self) -> ClkConfigRaw {
self.clk_config
}
#[inline]
pub const fn chmode(&self) -> ChMode {
self.chmode
}
#[inline]
pub const fn parity(&self) -> Parity {
self.parity
}
#[inline]
pub const fn stopbits(&self) -> Stopbits {
self.stopbits
}
#[inline]
pub const fn chrl(&self) -> CharLen {
self.chrl
}
#[inline]
pub const fn clksel(&self) -> ClkSel {
self.clk_sel
}
}
// TODO: Impl Debug
pub struct Uart {
rx: Rx,
tx: Tx,
cfg: UartConfig,
}
#[derive(Debug, thiserror::Error)]
#[error("invalid UART ID")]
pub struct InvalidPsUart;
#[derive(Debug, thiserror::Error)]
pub enum UartConstructionError {
#[error("invalid UART ID")]
InvalidPsUart(#[from] InvalidPsUart),
#[error("missmatch between pins index and passed index")]
IdxMissmatch,
#[error("invalid pin mux conf for UART")]
InvalidMuxConf(MuxConf),
}
impl Uart {
/// This is the constructor to use the PS UART with EMIO pins to route the UART into the PL
/// or expose them via the PL package pins.
///
/// A valid PL design which routes the UART pins through into the PL must be used for this to
/// work.
pub fn new_with_emio(uart: impl PsUart, cfg: UartConfig) -> Result<Uart, InvalidPsUart> {
if uart.uart_id().is_none() {
return Err(InvalidPsUart);
}
Ok(Self::new_generic_unchecked(
uart.reg_block(),
uart.uart_id().unwrap(),
cfg,
))
}
/// This is the constructor to use the PS UART with MIO pins.
pub fn new_with_mio<TxPinI: TxPin, RxPinI: RxPin>(
uart: impl PsUart,
cfg: UartConfig,
pins: (TxPinI, RxPinI),
) -> Result<Self, UartConstructionError>
where
(TxPinI, RxPinI): UartPins,
{
let id = uart.uart_id();
if id.is_none() {
return Err(InvalidPsUart.into());
}
if id.unwrap() != TxPinI::UART_IDX || id.unwrap() != RxPinI::UART_IDX {
return Err(UartConstructionError::IdxMissmatch);
}
IoPeriphPin::new(pins.0, UART_MUX_CONF, None);
IoPeriphPin::new(pins.1, UART_MUX_CONF, None);
Ok(Self::new_generic_unchecked(
uart.reg_block(),
id.unwrap(),
cfg,
))
}
/// This is the generic constructor used by all other constructors.
///
/// It does not do any pin checks and resource control. It is recommended to use the other
/// constructors instead.
pub fn new_generic_unchecked(
mut reg_block: MmioUart<'static>,
uart_id: UartId,
cfg: UartConfig,
) -> Uart {
let periph_sel = match uart_id {
UartId::Uart0 => crate::PeripheralSelect::Uart0,
UartId::Uart1 => crate::PeripheralSelect::Uart1,
};
enable_amba_peripheral_clock(periph_sel);
reset(uart_id);
reg_block.modify_cr(|mut v| {
v.set_tx_dis(true);
v.set_rx_dis(true);
v
});
// Disable all interrupts.
reg_block.write_idr(InterruptControl::new_with_raw_value(0xFFFF_FFFF));
let mode = Mode::builder()
.with_chmode(cfg.chmode)
.with_nbstop(match cfg.stopbits {
Stopbits::One => zynq7000::uart::Stopbits::One,
Stopbits::OnePointFive => zynq7000::uart::Stopbits::OnePointFive,
Stopbits::Two => zynq7000::uart::Stopbits::Two,
})
.with_par(match cfg.parity {
Parity::Even => zynq7000::uart::Parity::Even,
Parity::Odd => zynq7000::uart::Parity::Odd,
Parity::None => zynq7000::uart::Parity::NoParity,
})
.with_chrl(match cfg.chrl {
CharLen::SixBits => zynq7000::uart::Chrl::SixBits,
CharLen::SevenBits => zynq7000::uart::Chrl::SevenBits,
CharLen::EightBits => zynq7000::uart::Chrl::EightBits,
})
.with_clksel(cfg.clk_sel)
.build();
reg_block.write_mr(mode);
reg_block.write_baudgen(
Baudgen::builder()
.with_cd(cfg.raw_clk_config().cd())
.build(),
);
reg_block.write_baud_rate_div(
BaudRateDiv::builder()
.with_bdiv(cfg.raw_clk_config().bdiv())
.build(),
);
// Soft reset for both TX and RX.
reg_block.modify_cr(|mut v| {
v.set_tx_rst(true);
v.set_rx_rst(true);
v
});
// Write default value.
reg_block.write_rx_fifo_trigger(FifoTrigger::new_with_raw_value(
DEFAULT_RX_TRIGGER_LEVEL as u32,
));
// Enable TX and RX.
reg_block.modify_cr(|mut v| {
v.set_tx_dis(false);
v.set_rx_dis(false);
v.set_tx_en(true);
v.set_rx_en(true);
v
});
Uart {
rx: Rx {
regs: unsafe { reg_block.clone() },
},
tx: Tx {
regs: reg_block,
idx: uart_id,
},
cfg,
}
}
#[inline]
pub fn set_mode(&mut self, mode: ChMode) {
self.regs().modify_mr(|mut mr| {
mr.set_chmode(mode);
mr
});
}
#[inline]
pub const fn regs(&mut self) -> &mut MmioUart<'static> {
&mut self.rx.regs
}
#[inline]
pub const fn cfg(&self) -> &UartConfig {
&self.cfg
}
#[inline]
pub const fn split(self) -> (Tx, Rx) {
(self.tx, self.rx)
}
}
impl embedded_hal_nb::serial::ErrorType for Uart {
type Error = Infallible;
}
impl embedded_hal_nb::serial::Write for Uart {
#[inline]
fn write(&mut self, word: u8) -> nb::Result<(), Self::Error> {
self.tx.write(word)
}
fn flush(&mut self) -> nb::Result<(), Self::Error> {
self.tx.flush()
}
}
impl embedded_hal_nb::serial::Read for Uart {
/// Read one byte from the FIFO.
///
/// This operation is infallible because pulling an available byte from the FIFO
/// always succeeds. If you want to be informed about RX errors, you should look at the
/// non-blocking API using interrupts, which also tracks the RX error bits.
#[inline]
fn read(&mut self) -> nb::Result<u8, Self::Error> {
self.rx.read()
}
}
impl embedded_io::ErrorType for Uart {
type Error = Infallible;
}
impl embedded_io::Write for Uart {
fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
self.tx.write(buf)
}
fn flush(&mut self) -> Result<(), Self::Error> {
self.tx.flush()
}
}
impl embedded_io::Read for Uart {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
self.rx.read(buf)
}
}
/// Reset the UART peripheral using the SLCR reset register for UART.
///
/// Please note that this function will interfere with an already configured
/// UART instance.
#[inline]
pub fn reset(id: UartId) {
let assert_reset = match id {
UartId::Uart0 => DualRefAndClockReset::builder()
.with_periph1_ref_rst(false)
.with_periph0_ref_rst(true)
.with_periph1_cpu1x_rst(false)
.with_periph0_cpu1x_rst(true)
.build(),
UartId::Uart1 => DualRefAndClockReset::builder()
.with_periph1_ref_rst(true)
.with_periph0_ref_rst(false)
.with_periph1_cpu1x_rst(true)
.with_periph0_cpu1x_rst(false)
.build(),
};
unsafe {
Slcr::with(|regs| {
regs.reset_ctrl().write_uart(assert_reset);
// Keep it in reset for one cycle.. not sure if this is necessary.
cortex_ar::asm::nop();
regs.reset_ctrl().write_uart(DualRefAndClockReset::DEFAULT);
});
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::abs_diff_eq;
use fugit::HertzU32;
use zynq7000::uart::ClkSel;
const REF_UART_CLK: HertzU32 = HertzU32::from_raw(50_000_000);
const REF_UART_CLK_DIV_8: HertzU32 = HertzU32::from_raw(6_250_000);
#[test]
fn test_error_calc_0() {
// Baud 600
let cfg_0 = ClkConfigRaw::new(10417, 7).unwrap();
let actual_baud_0 = cfg_0.actual_baud(REF_UART_CLK);
assert!(abs_diff_eq!(actual_baud_0, 599.980, epsilon = 0.01));
}
#[test]
fn test_error_calc_1() {
// Baud 9600
let cfg = ClkConfigRaw::new(81, 7).unwrap();
let actual_baud = cfg.actual_baud(REF_UART_CLK_DIV_8);
assert!(abs_diff_eq!(actual_baud, 9645.061, epsilon = 0.01));
}
#[test]
fn test_error_calc_2() {
// Baud 9600
let cfg = ClkConfigRaw::new(651, 7).unwrap();
let actual_baud = cfg.actual_baud(REF_UART_CLK);
assert!(abs_diff_eq!(actual_baud, 9600.614, epsilon = 0.01));
}
#[test]
fn test_error_calc_3() {
// Baud 28800
let cfg = ClkConfigRaw::new(347, 4).unwrap();
let actual_baud = cfg.actual_baud(REF_UART_CLK);
assert!(abs_diff_eq!(actual_baud, 28818.44, epsilon = 0.01));
}
#[test]
fn test_error_calc_4() {
// Baud 921600
let cfg = ClkConfigRaw::new(9, 5).unwrap();
let actual_baud = cfg.actual_baud(REF_UART_CLK);
assert!(abs_diff_eq!(actual_baud, 925925.92, epsilon = 0.01));
}
#[test]
fn test_best_calc_0() {
let result = ClkConfigRaw::new_autocalc_with_raw_clk(REF_UART_CLK, ClkSel::UartRefClk, 600);
assert!(result.is_ok());
let (cfg, _error) = result.unwrap();
assert_eq!(cfg.cd(), 499);
assert_eq!(cfg.bdiv(), 166);
}
#[test]
#[cfg(feature = "alloc")]
fn test_viable_config_calculation() {
let cfgs = calculate_viable_configs(REF_UART_CLK, ClkSel::UartRefClk, 115200);
assert!(
cfgs.iter()
.find(|(cfg, _error)| { cfg.cd() == 62 && cfg.bdiv() == 6 })
.is_some()
);
}
}

249
zynq7000-hal/src/uart/rx.rs Normal file
View File

@ -0,0 +1,249 @@
use core::convert::Infallible;
use arbitrary_int::Number;
use zynq7000::uart::{InterruptControl, InterruptStatus, MmioUart};
use super::FIFO_DEPTH;
pub struct Rx {
pub(crate) regs: MmioUart<'static>,
}
// TODO: Remove once this is impelemnted for MmioUart
unsafe impl Send for Rx {}
#[derive(Debug, Default, Clone, Copy)]
pub struct RxErrors {
framing: bool,
overrun: bool,
parity: bool,
}
impl RxErrors {
#[inline]
pub const fn framing(&self) -> bool {
self.framing
}
#[inline]
pub const fn overrun(&self) -> bool {
self.overrun
}
#[inline]
pub const fn parity(&self) -> bool {
self.parity
}
}
#[derive(Debug, Default)]
pub struct RxInterruptResult {
read_bytes: usize,
errors: Option<RxErrors>,
}
impl RxInterruptResult {
pub fn read_bytes(&self) -> usize {
self.read_bytes
}
pub fn errors(&self) -> Option<RxErrors> {
self.errors
}
}
impl Rx {
#[inline]
pub fn read_fifo(&mut self) -> nb::Result<u8, Infallible> {
if self.regs.read_sr().rx_empty() {
return Err(nb::Error::WouldBlock);
}
Ok(self.regs.read_fifo().fifo())
}
#[inline(always)]
pub fn read_fifo_unchecked(&mut self) -> u8 {
self.regs.read_fifo().fifo()
}
/// Write the receiver timeout value.
///
/// A value of 0 will disable the receiver timeout.
/// Otherwise, the 10-bit counter used by the receiver timeout mechanism of the UART will
/// load this value for the upper 8 bits on a reload. The counter is clocked by the UART
/// bit clock, so this value times 4 is the number of UART clock ticks until a timeout occurs.
#[inline]
pub fn set_rx_timeout_value(&mut self, rto: u8) {
self.regs.write_rx_tout(rto as u32);
}
#[inline]
pub fn soft_reset(&mut self) {
self.regs.modify_cr(|mut cr| {
cr.set_rx_rst(true);
cr
});
while self.regs.read_cr().rx_rst() {}
}
/// Helper function to start the interrupt driven reception of data.
///
/// This function will perform a soft-reset, clear RX related interrupts and then enable
/// all relevant interrupts for the RX side of the UART. These steps are recommended to have
/// a glitch-free start of the interrupt driven reception.
///
/// This should be called once at system start-up. After that, you only need to call
/// [Self::on_interrupt] in the interrupt handler for the UART peripheral.
pub fn start_interrupt_driven_reception(&mut self) {
self.soft_reset();
self.clear_interrupts();
self.enable_interrupts();
}
/// Enables all interrupts relevant for the RX side of the UART.
///
/// It is recommended to also clear all interrupts immediately after enabling them.
#[inline]
pub fn enable_interrupts(&mut self) {
self.regs.write_ier(
InterruptControl::builder()
.with_tx_over(false)
.with_tx_near_full(false)
.with_tx_trig(false)
.with_rx_dms(false)
.with_rx_timeout(true)
.with_rx_parity(true)
.with_rx_framing(true)
.with_rx_over(true)
.with_tx_full(false)
.with_tx_empty(false)
.with_rx_full(true)
.with_rx_empty(false)
.with_rx_trg(true)
.build(),
);
}
pub fn on_interrupt(
&mut self,
buf: &mut [u8; FIFO_DEPTH],
reset_rx_timeout: bool,
) -> RxInterruptResult {
let mut result = RxInterruptResult::default();
let imr = self.regs.read_imr();
if !imr.rx_full()
&& !imr.rx_trg()
&& !imr.rx_parity()
&& !imr.rx_framing()
&& !imr.rx_over()
&& !imr.rx_timeout()
{
return result;
}
let isr = self.regs.read_isr();
if isr.rx_full() {
// Read all bytes in the full RX fifo.
for byte in buf.iter_mut() {
*byte = self.read_fifo_unchecked();
}
result.read_bytes = FIFO_DEPTH;
} else if isr.rx_trg() {
// It is guaranteed that we can read the FIFO level amount of data
let fifo_trigger = self.regs.read_rx_fifo_trigger().trig().as_usize();
(0..fifo_trigger).for_each(|i| {
buf[i] = self.read_fifo_unchecked();
});
result.read_bytes = fifo_trigger;
}
// Read everything else that is available, as long as there is space left.
while result.read_bytes < buf.len() {
if let Ok(byte) = self.read_fifo() {
buf[result.read_bytes] = byte;
result.read_bytes += 1;
} else {
break;
}
}
// Handle error events.
if isr.rx_parity() || isr.rx_framing() || isr.rx_over() {
result.errors = Some(RxErrors {
framing: isr.rx_framing(),
overrun: isr.rx_over(),
parity: isr.rx_parity(),
});
}
// Handle timeout event.
if isr.rx_timeout() && reset_rx_timeout {
self.regs.modify_cr(|mut cr| {
cr.set_rstto(true);
cr
});
}
self.clear_interrupts();
result
}
// This clears all RX related interrupts.
#[inline]
pub fn clear_interrupts(&mut self) {
self.regs.write_isr(
InterruptStatus::builder()
.with_tx_over(false)
.with_tx_near_full(false)
.with_tx_trig(false)
.with_rx_dms(true)
.with_rx_timeout(true)
.with_rx_parity(true)
.with_rx_framing(true)
.with_rx_over(true)
.with_tx_full(false)
.with_tx_empty(false)
.with_rx_full(true)
.with_rx_empty(true)
.with_rx_trg(true)
.build(),
);
}
}
impl embedded_hal_nb::serial::ErrorType for Rx {
type Error = Infallible;
}
impl embedded_hal_nb::serial::Read for Rx {
/// Read one byte from the FIFO.
///
/// This operation is infallible because pulling an available byte from the FIFO
/// always succeeds. If you want to be informed about RX errors, you should look at the
/// non-blocking API using interrupts, which also tracks the RX error bits.
#[inline]
fn read(&mut self) -> nb::Result<u8, Self::Error> {
self.read_fifo()
}
}
impl embedded_io::ErrorType for Rx {
type Error = Infallible;
}
impl embedded_io::Read for Rx {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
if buf.is_empty() {
return Ok(0);
}
let mut read = 0;
loop {
if !self.regs.read_sr().rx_empty() {
break;
}
}
for byte in buf.iter_mut() {
match <Self as embedded_hal_nb::serial::Read<u8>>::read(self) {
Ok(w) => {
*byte = w;
read += 1;
}
Err(nb::Error::WouldBlock) => break,
}
}
Ok(read)
}
}

201
zynq7000-hal/src/uart/tx.rs Normal file
View File

@ -0,0 +1,201 @@
use core::convert::Infallible;
use zynq7000::uart::{Fifo, InterruptControl, InterruptStatus, MmioUart};
use super::UartId;
pub struct Tx {
pub(crate) regs: MmioUart<'static>,
pub(crate) idx: UartId,
}
impl Tx {
/// Steal the TX side of the UART for a given UART index.
///
/// # Safety
///
/// Circumvents safety guarantees provided by the compiler.
#[inline]
pub const unsafe fn steal(idx: UartId) -> Self {
Tx {
regs: unsafe { idx.regs() },
idx,
}
}
#[inline]
pub const fn uart_idx(&self) -> UartId {
self.idx
}
#[inline]
pub const fn regs(&mut self) -> &mut MmioUart<'static> {
&mut self.regs
}
#[inline]
pub fn write_fifo(&mut self, word: u8) -> nb::Result<(), Infallible> {
if self.regs.read_sr().tx_full() {
return Err(nb::Error::WouldBlock);
}
self.write_fifo_unchecked(word);
Ok(())
}
/// Enables TX side of the UART.
#[inline]
pub fn enable(&mut self, with_reset: bool) {
if with_reset {
self.soft_reset();
}
self.regs.modify_cr(|mut val| {
val.set_tx_en(true);
val.set_tx_dis(false);
val
});
}
/// Disables TX side of the UART.
#[inline]
pub fn disable(&mut self) {
self.regs.modify_cr(|mut val| {
val.set_tx_en(false);
val.set_tx_dis(true);
val
});
}
#[inline]
pub fn soft_reset(&mut self) {
self.regs.modify_cr(|mut val| {
val.set_tx_rst(true);
val
});
loop {
if !self.regs.read_cr().tx_rst() {
break;
}
}
}
#[inline]
pub fn write_fifo_unchecked(&mut self, word: u8) {
self.regs.write_fifo(Fifo::new_with_raw_value(word as u32));
}
/// Enables interrupts relevant for the TX side of the UART except the TX trigger interrupt.
#[inline]
pub fn enable_interrupts(&mut self) {
self.regs.write_ier(
InterruptControl::builder()
.with_tx_over(true)
.with_tx_near_full(true)
.with_tx_trig(false)
.with_rx_dms(false)
.with_rx_timeout(false)
.with_rx_parity(false)
.with_rx_framing(false)
.with_rx_over(false)
.with_tx_full(true)
.with_tx_empty(true)
.with_rx_full(false)
.with_rx_empty(false)
.with_rx_trg(false)
.build(),
);
}
/// Disable interrupts relevant for the TX side of the UART except the TX trigger interrupt.
#[inline]
pub fn disable_interrupts(&mut self) {
self.regs.write_idr(
InterruptControl::builder()
.with_tx_over(true)
.with_tx_near_full(true)
.with_tx_trig(false)
.with_rx_dms(false)
.with_rx_timeout(false)
.with_rx_parity(false)
.with_rx_framing(false)
.with_rx_over(false)
.with_tx_full(true)
.with_tx_empty(true)
.with_rx_full(false)
.with_rx_empty(false)
.with_rx_trg(false)
.build(),
);
}
/// Clears interrupts relevant for the TX side of the UART except the TX trigger interrupt.
#[inline]
pub fn clear_interrupts(&mut self) {
self.regs.write_isr(
InterruptStatus::builder()
.with_tx_over(true)
.with_tx_near_full(true)
.with_tx_trig(false)
.with_rx_dms(false)
.with_rx_timeout(false)
.with_rx_parity(false)
.with_rx_framing(false)
.with_rx_over(false)
.with_tx_full(true)
.with_tx_empty(true)
.with_rx_full(false)
.with_rx_empty(false)
.with_rx_trg(false)
.build(),
);
}
}
impl embedded_hal_nb::serial::ErrorType for Tx {
type Error = Infallible;
}
impl embedded_hal_nb::serial::Write for Tx {
#[inline]
fn write(&mut self, word: u8) -> nb::Result<(), Self::Error> {
self.write_fifo(word)
}
fn flush(&mut self) -> nb::Result<(), Self::Error> {
loop {
if self.regs.read_sr().tx_empty() {
return Ok(());
}
}
}
}
impl embedded_io::ErrorType for Tx {
type Error = Infallible;
}
impl embedded_io::Write for Tx {
fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
if buf.is_empty() {
return Ok(0);
}
let mut written = 0;
loop {
if !self.regs.read_sr().tx_full() {
break;
}
}
for byte in buf.iter() {
match self.write_fifo(*byte) {
Ok(_) => written += 1,
Err(nb::Error::WouldBlock) => return Ok(written),
}
}
Ok(written)
}
fn flush(&mut self) -> Result<(), Self::Error> {
<Self as embedded_hal_nb::serial::Write<u8>>::flush(self).ok();
Ok(())
}
}

View File

@ -0,0 +1,205 @@
use core::{cell::RefCell, convert::Infallible, sync::atomic::AtomicBool};
use critical_section::Mutex;
use embassy_sync::waitqueue::AtomicWaker;
use raw_slice::RawBufSlice;
use crate::uart::{FIFO_DEPTH, Tx, UartId};
#[derive(Debug)]
pub enum TransferType {
Read,
Write,
Transfer,
TransferInPlace,
}
static UART_TX_WAKERS: [AtomicWaker; 2] = [const { AtomicWaker::new() }; 2];
static TX_CONTEXTS: [Mutex<RefCell<TxContext>>; 2] =
[const { Mutex::new(RefCell::new(TxContext::new())) }; 2];
// Completion flag. Kept outside of the context structure as an atomic to avoid
// critical section.
static TX_DONE: [AtomicBool; 2] = [const { AtomicBool::new(false) }; 2];
/// This is a generic interrupt handler to handle asynchronous UART TX operations for a given
/// UART peripheral.
///
/// The user has to call this once in the interrupt handler responsible for the TX interrupts on
/// the given UART bank.
pub fn on_interrupt_tx(peripheral: UartId) {
let mut tx_with_irq = unsafe { Tx::steal(peripheral) };
let idx = peripheral as usize;
let imr = tx_with_irq.regs().read_imr();
// IRQ is not related to TX.
if !imr.tx_over() && !imr.tx_near_full() && !imr.tx_full() && !imr.tx_empty() && !imr.tx_full()
{
return;
}
let isr = tx_with_irq.regs().read_isr();
let unexpected_overrun = isr.tx_over();
let mut context = critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[idx].borrow(cs);
*context_ref.borrow()
});
// No transfer active.
if context.slice.is_null() {
return;
}
let slice_len = context.slice.len().unwrap();
context.tx_overrun = unexpected_overrun;
if (context.progress >= slice_len && isr.tx_empty()) || slice_len == 0 {
// Write back updated context structure.
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[idx].borrow(cs);
*context_ref.borrow_mut() = context;
});
// Transfer is done.
TX_DONE[idx].store(true, core::sync::atomic::Ordering::Relaxed);
tx_with_irq.disable_interrupts();
tx_with_irq.clear_interrupts();
UART_TX_WAKERS[idx].wake();
return;
}
// Safety: We documented that the user provided slice must outlive the future, so we convert
// the raw pointer back to the slice here.
let slice = unsafe { context.slice.get() }.expect("slice is invalid");
while context.progress < slice_len {
if tx_with_irq.regs().read_sr().tx_full() {
break;
}
// Safety: TX structure is owned by the future which does not write into the the data
// register, so we can assume we are the only one writing to the data register.
tx_with_irq.write_fifo_unchecked(slice[context.progress]);
context.progress += 1;
}
// Write back updated context structure.
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[idx].borrow(cs);
*context_ref.borrow_mut() = context;
});
// Clear interrupts.
tx_with_irq.clear_interrupts();
}
#[derive(Debug, Copy, Clone)]
pub struct TxContext {
progress: usize,
tx_overrun: bool,
slice: RawBufSlice,
}
#[allow(clippy::new_without_default)]
impl TxContext {
pub const fn new() -> Self {
Self {
progress: 0,
tx_overrun: false,
slice: RawBufSlice::new_nulled(),
}
}
}
pub struct TxFuture {
id: UartId,
}
impl TxFuture {
/// # Safety
///
/// This function stores the raw pointer of the passed data slice. The user MUST ensure
/// that the slice outlives the data structure.
pub unsafe fn new(tx_with_irq: &mut Tx, data: &[u8]) -> Self {
let idx = tx_with_irq.uart_idx() as usize;
TX_DONE[idx].store(false, core::sync::atomic::Ordering::Relaxed);
tx_with_irq.disable_interrupts();
tx_with_irq.disable();
let init_fill_count = core::cmp::min(data.len(), FIFO_DEPTH);
critical_section::with(|cs| {
let context_ref = TX_CONTEXTS[idx].borrow(cs);
let mut context = context_ref.borrow_mut();
unsafe {
context.slice.set(data);
}
context.progress = init_fill_count; // We fill the FIFO.
});
tx_with_irq.enable(true);
for data in data.iter().take(init_fill_count) {
tx_with_irq.write_fifo_unchecked(*data);
}
tx_with_irq.enable_interrupts();
Self {
id: tx_with_irq.uart_idx(),
}
}
}
impl Future for TxFuture {
type Output = usize;
fn poll(
self: core::pin::Pin<&mut Self>,
cx: &mut core::task::Context<'_>,
) -> core::task::Poll<Self::Output> {
UART_TX_WAKERS[self.id as usize].register(cx.waker());
if TX_DONE[self.id as usize].swap(false, core::sync::atomic::Ordering::Relaxed) {
let progress = critical_section::with(|cs| {
let mut ctx = TX_CONTEXTS[self.id as usize].borrow(cs).borrow_mut();
ctx.slice.set_null();
ctx.progress
});
return core::task::Poll::Ready(progress);
}
core::task::Poll::Pending
}
}
impl Drop for TxFuture {
fn drop(&mut self) {
let mut tx = unsafe { Tx::steal(self.id) };
tx.disable_interrupts();
}
}
pub struct TxAsync {
tx: Tx,
}
impl TxAsync {
pub fn new(tx: Tx) -> Self {
Self { tx }
}
/// Write a buffer asynchronously.
///
/// This implementation is not side effect free, and a started future might have already
/// written part of the passed buffer.
pub async fn write(&mut self, buf: &[u8]) -> usize {
if buf.is_empty() {
return 0;
}
let fut = unsafe { TxFuture::new(&mut self.tx, buf) };
fut.await
}
pub fn release(self) -> Tx {
self.tx
}
}
impl embedded_io::ErrorType for TxAsync {
type Error = Infallible;
}
impl embedded_io_async::Write for TxAsync {
/// Write a buffer asynchronously.
///
/// This implementation is not side effect free, and a started future might have already
/// written part of the passed buffer.
async fn write(&mut self, buf: &[u8]) -> Result<usize, Self::Error> {
Ok(self.write(buf).await)
}
}