getting closer

This commit is contained in:
2025-02-21 15:44:45 +01:00
parent 21d618172b
commit 956f22b41b
5 changed files with 365 additions and 31 deletions

27
Cargo.lock generated
View File

@ -24,6 +24,15 @@ dependencies = [
"syn",
]
[[package]]
name = "cortex-a-rt"
version = "0.1.0"
dependencies = [
"arm-targets",
"cortex-r-a",
"semihosting",
]
[[package]]
name = "cortex-r-a"
version = "0.1.0"
@ -33,15 +42,6 @@ dependencies = [
"bitbybit",
]
[[package]]
name = "cortex-r-a-rt"
version = "0.1.0"
dependencies = [
"arm-targets",
"cortex-r-a",
"semihosting",
]
[[package]]
name = "proc-macro2"
version = "1.0.93"
@ -88,5 +88,12 @@ name = "zedboard-blinky-rs"
version = "0.1.0"
dependencies = [
"cortex-r-a",
"cortex-r-a-rt",
"zynq-rt",
]
[[package]]
name = "zynq-rt"
version = "0.1.0"
dependencies = [
"cortex-a-rt",
]

View File

@ -8,4 +8,4 @@ edition = "2021"
[dependencies]
cortex-r-a = { path = "../cortex-r-a/cortex-r-a" }
cortex-r-a-rt = { path = "../cortex-r-a/cortex-a-rt", features = ["vfp-dp"] }
zynq-rt = { path = "zynq-rt" }

View File

@ -3,7 +3,7 @@
use core::panic::PanicInfo;
use cortex_r_a::asm::nop;
use cortex_r_a_rt as _;
use zynq_rt as _;
/// Entry point (not called like a normal main function)
#[no_mangle]

View File

@ -28,11 +28,7 @@ core::arch::global_asm!(
.set EFUSEStatus, (0xF800D000 + 0x10) /*(XPS_EFUSE_BASEADDR + EFUSE_STATUS_OFFSET)*/
/* workaround for simulation not working when L1 D and I caches,MMU and L2 cache enabled - DT568997 */
.if SIM_MODE == 1
.set CRValMmuCac, 0b00000000000000 /* Disable IDC, and MMU */
.else
.set CRValMmuCac, 0b01000000000101 /* Enable IDC, and MMU */
.endif
.set CRValHiVectorAddr, 0b10000000000000 /* Set the Vector address to high, 0xFFFF0000 */
@ -46,6 +42,8 @@ core::arch::global_asm!(
.set SLCRUnlockKey, 0xDF0D /* SLCR unlock key */
.set SLCRL2cRamConfig, 0x00020202 /* SLCR L2C ram configuration */
.set FPEXC_EN, 0x40000000 /* FPU enable bit, (1 << 30) */
.section .text.startup
.align 0
@ -110,19 +108,6 @@ initialize:
bic r0, r0, #0x1 /* clear bit 0 */
mcr p15, 0, r0, c1, c0, 0 /* write value back */
#ifdef SHAREABLE_DDR
/* Mark the entire DDR memory as shareable */
ldr r3, =0x3ff /* 1024 entries to cover 1G DDR */
ldr r0, =TblBase /* MMU Table address in memory */
ldr r2, =0x15de6 /* S=b1 TEX=b101 AP=b11, Domain=b1111, C=b0, B=b1 */
shareable_loop:
str r2, [r0] /* write the entry to MMU table */
add r0, r0, #0x4 /* next entry in the table */
add r2, r2, #0x100000 /* next section */
subs r3, r3, #1
bge shareable_loop /* loop till 1G is covered */
#endif
mrs r0, cpsr /* get the current PSR */
mvn r1, #0x1f /* set up the irq stack pointer */
and r2, r1, r0
@ -175,6 +160,8 @@ shareable_loop:
msr cpsr, r2
ldr r13,=SYS_stack /* SYS stack pointer */
bl init_mmu_table
/*set scu enable bit in scu*/
ldr r7, =0xf8f00000
ldr r0, [r7]
@ -184,8 +171,8 @@ shareable_loop:
/* enable MMU and cache */
ldr r0,=TblBase /* Load MMU translation table base */
orr r0, r0, #0x5B /* Outer-cacheable, WB */
mcr 15, 0, r0, c2, c0, 0 /* TTB0 */
/* orr r0, r0, #0x5B Outer-cacheable, WB */
mcr p15, 0, r0, c2, c0, 0 /* TTB0 */
mvn r0,#0 /* Load MMU domains -- all ones=manager */
mcr p15,0,r0,c3,c0,0

View File

@ -1 +1,341 @@
//! The overview of translation table memory attributes is described below.
//!
//!| | Memory Range | Definition in Translation Table |
//!|-----------------------|-------------------------|-----------------------------------|
//!| DDR | 0x00000000 - 0x3FFFFFFF | Normal write-back Cacheable |
//!| PL | 0x40000000 - 0xBFFFFFFF | Strongly Ordered |
//!| Reserved | 0xC0000000 - 0xDFFFFFFF | Unassigned |
//!| Memory mapped devices | 0xE0000000 - 0xE02FFFFF | Device Memory |
//!| Reserved | 0xE0300000 - 0xE0FFFFFF | Unassigned |
//!| NAND, NOR | 0xE1000000 - 0xE3FFFFFF | Device memory |
//!| SRAM | 0xE4000000 - 0xE5FFFFFF | Normal write-back Cacheable |
//!| Reserved | 0xE6000000 - 0xF7FFFFFF | Unassigned |
//!| AMBA APB Peripherals | 0xF8000000 - 0xF8FFFFFF | Device Memory |
//!| Reserved | 0xF9000000 - 0xFBFFFFFF | Unassigned |
//!| Linear QSPI - XIP | 0xFC000000 - 0xFDFFFFFF | Normal write-through cacheable |
//!| Reserved | 0xFE000000 - 0xFFEFFFFF | Unassigned |
//!| OCM | 0xFFF00000 - 0xFFFFFFFF | Normal inner write-back cacheable |
//!
//! For region 0x00000000 - 0x3FFFFFFF, a system where DDR is less than 1 GB,
//! region after DDR and before PL is marked as undefined/reserved in translation
//! table. In 0xF8000000 - 0xF8FFFFFF, 0xF8000C00 - 0xF8000FFF, 0xF8010000 -
//! 0xF88FFFFF and 0xF8F03000 to 0xF8FFFFFF are reserved but due to granual size
//! of 1 MB, it is not possible to define separate regions for them. For region
//! 0xFFF00000 - 0xFFFFFFFF, 0xFFF00000 to 0xFFFB0000 is reserved but due to 1MB
//! granual size, it is not possible to define separate region for it.
pub const OFFSET_DDR: usize = 0;
pub const OFFSET_DDR_ALL_ACCESSIBLE: usize = 0x10_0000;
pub const OFFSET_FPGA_SLAVE_0: usize = 0x4000_0000;
pub const OFFSET_FPGA_SLAVE_1_START: usize = 0x8000_0000;
pub const OFFSET_FPGA_SLAVE_1_END: usize = 0xC000_0000;
pub const OFFSET_IO_PERIPHERALS_START: usize = 0xE000_0000;
pub const OFFSET_IO_PERIPHERALS_END: usize = 0xE030_0000;
pub const OFFSET_NAND_MEMORY: usize = 0xE100_0000;
pub const OFFSET_NOR_MEMORY: usize = 0xE200_0000;
pub const OFFSET_SRAM_MEMORY: usize = 0xE400_0000;
pub const OFFSET_SMC_MEMORIES_END: usize = 0xE600_0000;
/// 0xf8000c00 to 0xf8000fff, 0xf8010000 to 0xf88fffff and
/// 0xf8f03000 to 0xf8ffffff are reserved but due to granual size of
/// 1MB, it is not possible to define separate regions for them.
pub const OFFSET_AMBA_APB_START: usize = 0xF800_0000;
pub const OFFSET_AMBA_APB_END: usize = 0xF900_0000;
pub const OFFSET_QSPI_XIP_START: usize = 0xFC00_0000;
pub const OFFSET_QSPI_XIP_END: usize = 0xFE00_0000;
/// 0xfff00000 to 0xfffb0000 is reserved but due to granual size of
/// 1MB, it is not possible to define separate region for it
pub const OFFSET_OCM_MAPPED_HIGH_START: usize = 0xFFF0_0000;
pub const OFFSET_OCM_MAPPED_HIGH_END: u64 = 0x1_0000_0000;
pub const MAX_DDR_SIZE: usize = 0x4000_0000;
pub const ONE_MB: usize = 0x10_0000;
/// First 1 MB of DDR has special treatment, access is dependant on SCU/OCM state.
/// Refer to Zynq TRM UG585 p.106 for more details.
pub const SEGMENTS_DDR_FULL_ACCESSIBLE: usize = (MAX_DDR_SIZE - ONE_MB) / ONE_MB;
pub const SEGMENTS_FPGA_SLAVE: usize = (OFFSET_FPGA_SLAVE_1_START - OFFSET_FPGA_SLAVE_0) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_0: usize =
(OFFSET_IO_PERIPHERALS_START - OFFSET_FPGA_SLAVE_1_END) / ONE_MB;
pub const SEGMENTS_IO_PERIPHS: usize =
(OFFSET_IO_PERIPHERALS_END - OFFSET_IO_PERIPHERALS_START) / ONE_MB;
pub const SEGMENTS_NAND: usize = (OFFSET_NOR_MEMORY - OFFSET_NAND_MEMORY) / ONE_MB;
pub const SEGMENTS_NOR: usize = (OFFSET_SRAM_MEMORY - OFFSET_NOR_MEMORY) / ONE_MB;
pub const SEGMENTS_SRAM: usize = (OFFSET_SMC_MEMORIES_END - OFFSET_SRAM_MEMORY) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_1: usize = (OFFSET_AMBA_APB_START - OFFSET_SMC_MEMORIES_END) / ONE_MB;
pub const SEGMENTS_AMBA_APB: usize = (OFFSET_AMBA_APB_END - OFFSET_AMBA_APB_START) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_2: usize = (OFFSET_QSPI_XIP_START - OFFSET_AMBA_APB_END) / ONE_MB;
pub const SEGMENTS_QSPI_XIP: usize = (OFFSET_QSPI_XIP_END - OFFSET_QSPI_XIP_START) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_3: usize =
(OFFSET_OCM_MAPPED_HIGH_START - OFFSET_QSPI_XIP_END) / ONE_MB;
pub const SEGMENTS_OCM_MAPPED_HIGH: usize = ((OFFSET_OCM_MAPPED_HIGH_END
- OFFSET_OCM_MAPPED_HIGH_START as u64)
/ ONE_MB as u64) as usize;
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
pub enum AccessPermissions {
PermissionFault = 0b000,
PrivilegedOnly = 0b001,
NoUserWrite = 0b010,
FullAccess = 0b011,
_Reserved1 = 0b100,
PrivilegedReadOnly = 0b101,
ReadOnly = 0b110,
_Reserved2 = 0b111,
}
impl AccessPermissions {
const fn ap(&self) -> u8 {
(*self as u8) & 0b11
}
const fn apx(&self) -> bool {
(*self as u8) > (AccessPermissions::FullAccess as u8)
}
}
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
pub enum L1EntryType {
/// Access generates an abort exception. Indicates an unmapped virtual address.
Fault = 0b00,
/// Entry points to a L2 translation table, allowing 1 MB of memory to be further divided
PageTable = 0b01,
/// Maps a 1 MB region to a physical address.
Section = 0b10,
/// Special 1MB section entry which requires 16 entries in the translation table.
Supersection = 0b11,
}
/// 1 MB section translation entry, mapping a 1 MB region to a physical address.
#[derive(Debug, Copy, Clone)]
pub struct L1Section(u32);
pub struct SectionAttributes {
/// NG bit
non_global: bool,
/// Implementation defined bit.
p_bit: bool,
shareable: bool,
/// AP bits
access: AccessPermissions,
/// Type EXtension bits. See Zynq 7000 TRM
type_extensions: u8,
domain: u8,
/// xN bit.
execute_never: bool,
/// C bit
cacheable: bool,
/// B bit
bufferable: bool,
}
impl L1Section {
/// The physical base address. The uppermost 12 bits define which 1 MB of virtual address
/// space are being accessed. They will be stored in the L1 section table. This address
/// MUST be aligned to 1 MB. This code will panic if this is not the case.
pub const fn new(phys_base: u32, section_attrs: SectionAttributes) -> Self {
// Must be aligned to 1 MB
if phys_base & 0x000F_FFFF != 0 {
panic!("physical base address for L1 section must be aligned to 1 MB");
}
let higher_bits = phys_base >> 20;
let raw = (higher_bits << 20)
| ((section_attrs.non_global as u32) << 17)
| ((section_attrs.shareable as u32) << 16)
| ((section_attrs.access.apx() as u32) << 15)
| ((section_attrs.type_extensions as u32) << 12)
| ((section_attrs.access.ap() as u32) << 10)
| ((section_attrs.p_bit as u32) << 9)
| ((section_attrs.domain as u32) << 5)
| ((section_attrs.execute_never as u32) << 4)
| ((section_attrs.cacheable as u32) << 3)
| ((section_attrs.bufferable as u32) << 2)
| L1EntryType::Section as u32;
L1Section(raw)
}
}
const SECTION_ATTRS_DDR: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: true,
access: AccessPermissions::FullAccess,
type_extensions: 0b101,
// Manager domain
domain: 0b1111,
execute_never: false,
cacheable: false,
bufferable: true,
};
const SECTION_ATTRS_FPGA_SLAVES: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
// Strongly ordered
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: false,
bufferable: false,
};
const SECTION_ATTRS_SHAREABLE_DEVICE: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: false,
bufferable: true,
};
const SECTION_ATTRS_SRAM: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: true,
bufferable: true,
};
const SECTION_ATTRS_QSPI_XIP: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: true,
bufferable: false,
};
const SECTION_ATTRS_OCM_MAPPED_HIGH: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
type_extensions: 0b100,
domain: 0b0000,
execute_never: false,
cacheable: true,
bufferable: true,
};
const SECTION_ATTRS_UNASSIGNED_RESERVED: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::PermissionFault,
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: false,
bufferable: false,
};
const NUM_L1_PAGE_TABLE_ENTRIES: usize = 4096;
#[repr(C, align(16384))]
pub struct L1Table([u32; NUM_L1_PAGE_TABLE_ENTRIES]);
/// MMU Level 1 Page table.
///
/// 4096 entries, each covering 1MB
///
static mut MMU_L1_PAGE_TABLE: L1Table = L1Table([0; NUM_L1_PAGE_TABLE_ENTRIES]);
#[unsafe(no_mangle)]
pub unsafe extern "C" fn init_mmu_table() {
let mut offset = 0;
let mut addr = 0;
unsafe {
// The first entry (1 MB) is related to special DDR memory. See p.101 of the TMR.
// We set is separtely to accomodate for future changes.
MMU_L1_PAGE_TABLE.0[0] = L1Section::new(addr, SECTION_ATTRS_DDR).0;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_DDR_FULL_ACCESSIBLE] {
*entry = L1Section::new(addr, SECTION_ATTRS_DDR).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_DDR_FULL_ACCESSIBLE;
// 2 FPGA slaves.
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_FPGA_SLAVE * 2] {
*entry = L1Section::new(addr, SECTION_ATTRS_FPGA_SLAVES).0;
addr += ONE_MB as u32;
}
offset += 2 * SEGMENTS_FPGA_SLAVE;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_UNASSIGNED_0] {
*entry = L1Section::new(addr, SECTION_ATTRS_UNASSIGNED_RESERVED).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_UNASSIGNED_0;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_IO_PERIPHS] {
*entry = L1Section::new(addr, SECTION_ATTRS_SHAREABLE_DEVICE).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_IO_PERIPHS;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_NAND] {
*entry = L1Section::new(addr, SECTION_ATTRS_SHAREABLE_DEVICE).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_NAND;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_NOR] {
*entry = L1Section::new(addr, SECTION_ATTRS_SHAREABLE_DEVICE).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_NOR;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_SRAM] {
*entry = L1Section::new(addr, SECTION_ATTRS_SRAM).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_SRAM;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_UNASSIGNED_1] {
*entry = L1Section::new(addr, SECTION_ATTRS_UNASSIGNED_RESERVED).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_UNASSIGNED_1;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_AMBA_APB] {
*entry = L1Section::new(addr, SECTION_ATTRS_SHAREABLE_DEVICE).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_AMBA_APB;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_UNASSIGNED_2] {
*entry = L1Section::new(addr, SECTION_ATTRS_UNASSIGNED_RESERVED).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_UNASSIGNED_2;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_QSPI_XIP] {
*entry = L1Section::new(addr, SECTION_ATTRS_QSPI_XIP).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_QSPI_XIP;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_UNASSIGNED_3] {
*entry = L1Section::new(addr, SECTION_ATTRS_UNASSIGNED_RESERVED).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_UNASSIGNED_3;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_OCM_MAPPED_HIGH] {
*entry = L1Section::new(addr, SECTION_ATTRS_OCM_MAPPED_HIGH).0;
addr += ONE_MB as u32;
}
}
}