generated table automatically

This commit is contained in:
2025-02-23 12:25:36 +01:00
parent 956f22b41b
commit 05709ba8a1
11 changed files with 4885 additions and 409 deletions

View File

@ -23,6 +23,10 @@
//! of 1 MB, it is not possible to define separate regions for them. For region
//! 0xFFF00000 - 0xFFFFFFFF, 0xFFF00000 to 0xFFFB0000 is reserved but due to 1MB
//! granual size, it is not possible to define separate region for it.
use core::arch::asm;
use crate::mmu_table::MMU_L1_PAGE_TABLE;
pub const OFFSET_DDR: usize = 0;
pub const OFFSET_DDR_ALL_ACCESSIBLE: usize = 0x10_0000;
@ -62,18 +66,18 @@ pub const SEGMENTS_UNASSIGNED_0: usize =
(OFFSET_IO_PERIPHERALS_START - OFFSET_FPGA_SLAVE_1_END) / ONE_MB;
pub const SEGMENTS_IO_PERIPHS: usize =
(OFFSET_IO_PERIPHERALS_END - OFFSET_IO_PERIPHERALS_START) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_1: usize = (OFFSET_NAND_MEMORY - OFFSET_IO_PERIPHERALS_END) / ONE_MB;
pub const SEGMENTS_NAND: usize = (OFFSET_NOR_MEMORY - OFFSET_NAND_MEMORY) / ONE_MB;
pub const SEGMENTS_NOR: usize = (OFFSET_SRAM_MEMORY - OFFSET_NOR_MEMORY) / ONE_MB;
pub const SEGMENTS_SRAM: usize = (OFFSET_SMC_MEMORIES_END - OFFSET_SRAM_MEMORY) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_1: usize = (OFFSET_AMBA_APB_START - OFFSET_SMC_MEMORIES_END) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_2: usize = (OFFSET_AMBA_APB_START - OFFSET_SMC_MEMORIES_END) / ONE_MB;
pub const SEGMENTS_AMBA_APB: usize = (OFFSET_AMBA_APB_END - OFFSET_AMBA_APB_START) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_2: usize = (OFFSET_QSPI_XIP_START - OFFSET_AMBA_APB_END) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_3: usize = (OFFSET_QSPI_XIP_START - OFFSET_AMBA_APB_END) / ONE_MB;
pub const SEGMENTS_QSPI_XIP: usize = (OFFSET_QSPI_XIP_END - OFFSET_QSPI_XIP_START) / ONE_MB;
pub const SEGMENTS_UNASSIGNED_3: usize =
pub const SEGMENTS_UNASSIGNED_4: usize =
(OFFSET_OCM_MAPPED_HIGH_START - OFFSET_QSPI_XIP_END) / ONE_MB;
pub const SEGMENTS_OCM_MAPPED_HIGH: usize = ((OFFSET_OCM_MAPPED_HIGH_END
- OFFSET_OCM_MAPPED_HIGH_START as u64)
/ ONE_MB as u64) as usize;
pub const SEGMENTS_OCM_MAPPED_HIGH: usize =
((OFFSET_OCM_MAPPED_HIGH_END - OFFSET_OCM_MAPPED_HIGH_START as u64) / ONE_MB as u64) as usize;
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
@ -111,10 +115,88 @@ pub enum L1EntryType {
Supersection = 0b11,
}
/// 1 MB section translation entry, mapping a 1 MB region to a physical address.
/// The ARM Cortex-A architecture reference manual p.1363 specifies these attributes in more detail.
///
/// The B (Bufferable), C (Cacheable), and TEX (Type extension) bit names are inherited from
/// earlier versions of the architecture. These names no longer adequately describe the function
/// of the B, C, and TEX bits.
#[derive(Debug, Copy, Clone)]
pub struct L1Section(u32);
pub struct MemoryRegionAttributesRaw {
/// TEX bits
type_extensions: u8,
c: bool,
b: bool,
}
impl MemoryRegionAttributesRaw {
pub const fn new(type_extensions: u8, c: bool, b: bool) -> Self {
Self {
type_extensions,
c,
b,
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum CacheableMemoryAttribute {
NonCacheable = 0b00,
WriteBackWriteAlloc = 0b01,
WriteThroughNoWriteAlloc = 0b10,
WriteBackNoWriteAlloc = 0b11,
}
#[derive(Debug, Copy, Clone)]
pub enum MemoryRegionAttributes {
StronglyOrdered,
ShareableDevice,
OuterAndInnerWriteThroughNoWriteAlloc,
OuterAndInnerWriteBackNoWriteAlloc,
OuterAndInnerNonCacheable,
OuterAndInnerWriteBackWriteAlloc,
NonShareableDevice,
CacheableMemory {
inner: CacheableMemoryAttribute,
outer: CacheableMemoryAttribute,
},
}
impl MemoryRegionAttributes {
pub const fn as_raw(&self) -> MemoryRegionAttributesRaw {
match self {
MemoryRegionAttributes::StronglyOrdered => {
MemoryRegionAttributesRaw::new(0b000, false, false)
}
MemoryRegionAttributes::ShareableDevice => {
MemoryRegionAttributesRaw::new(0b000, false, true)
}
MemoryRegionAttributes::OuterAndInnerWriteThroughNoWriteAlloc => {
MemoryRegionAttributesRaw::new(0b000, true, false)
}
MemoryRegionAttributes::OuterAndInnerWriteBackNoWriteAlloc => {
MemoryRegionAttributesRaw::new(0b000, true, true)
}
MemoryRegionAttributes::OuterAndInnerNonCacheable => {
MemoryRegionAttributesRaw::new(0b001, false, false)
}
MemoryRegionAttributes::OuterAndInnerWriteBackWriteAlloc => {
MemoryRegionAttributesRaw::new(0b001, true, true)
}
MemoryRegionAttributes::NonShareableDevice => {
MemoryRegionAttributesRaw::new(0b010, false, false)
}
MemoryRegionAttributes::CacheableMemory { inner, outer } => {
MemoryRegionAttributesRaw::new(
1 << 2 | (*outer as u8),
(*inner as u8 & 0b10) != 0,
(*inner as u8 & 0b01) != 0,
)
}
}
}
}
#[derive(Debug, Copy, Clone)]
pub struct SectionAttributes {
/// NG bit
non_global: bool,
@ -123,17 +205,16 @@ pub struct SectionAttributes {
shareable: bool,
/// AP bits
access: AccessPermissions,
/// Type EXtension bits. See Zynq 7000 TRM
type_extensions: u8,
memory_attrs: MemoryRegionAttributesRaw,
domain: u8,
/// xN bit.
execute_never: bool,
/// C bit
cacheable: bool,
/// B bit
bufferable: bool,
}
/// 1 MB section translation entry, mapping a 1 MB region to a physical address.
#[derive(Debug, Copy, Clone)]
pub struct L1Section(pub u32);
impl L1Section {
/// The physical base address. The uppermost 12 bits define which 1 MB of virtual address
/// space are being accessed. They will be stored in the L1 section table. This address
@ -148,194 +229,114 @@ impl L1Section {
| ((section_attrs.non_global as u32) << 17)
| ((section_attrs.shareable as u32) << 16)
| ((section_attrs.access.apx() as u32) << 15)
| ((section_attrs.type_extensions as u32) << 12)
| ((section_attrs.memory_attrs.type_extensions as u32) << 12)
| ((section_attrs.access.ap() as u32) << 10)
| ((section_attrs.p_bit as u32) << 9)
| ((section_attrs.domain as u32) << 5)
| ((section_attrs.execute_never as u32) << 4)
| ((section_attrs.cacheable as u32) << 3)
| ((section_attrs.bufferable as u32) << 2)
| ((section_attrs.memory_attrs.c as u32) << 3)
| ((section_attrs.memory_attrs.b as u32) << 2)
| L1EntryType::Section as u32;
L1Section(raw)
}
}
const SECTION_ATTRS_DDR: SectionAttributes = SectionAttributes {
pub const SECTION_ATTRS_DDR: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: true,
access: AccessPermissions::FullAccess,
type_extensions: 0b101,
// Manager domain
domain: 0b1111,
execute_never: false,
cacheable: false,
bufferable: true,
memory_attrs: MemoryRegionAttributes::CacheableMemory {
inner: CacheableMemoryAttribute::WriteBackWriteAlloc,
outer: CacheableMemoryAttribute::WriteBackWriteAlloc,
}
.as_raw(),
};
const SECTION_ATTRS_FPGA_SLAVES: SectionAttributes = SectionAttributes {
pub const SECTION_ATTRS_FPGA_SLAVES: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
// Strongly ordered
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: false,
bufferable: false,
memory_attrs: MemoryRegionAttributes::StronglyOrdered.as_raw(),
};
const SECTION_ATTRS_SHAREABLE_DEVICE: SectionAttributes = SectionAttributes {
pub const SECTION_ATTRS_SHAREABLE_DEVICE: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: false,
bufferable: true,
memory_attrs: MemoryRegionAttributes::ShareableDevice.as_raw(),
};
const SECTION_ATTRS_SRAM: SectionAttributes = SectionAttributes {
pub const SECTION_ATTRS_SRAM: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: true,
bufferable: true,
memory_attrs: MemoryRegionAttributes::OuterAndInnerWriteBackNoWriteAlloc.as_raw(),
};
const SECTION_ATTRS_QSPI_XIP: SectionAttributes = SectionAttributes {
pub const SECTION_ATTRS_QSPI_XIP: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: true,
bufferable: false,
memory_attrs: MemoryRegionAttributes::OuterAndInnerWriteThroughNoWriteAlloc.as_raw(),
};
const SECTION_ATTRS_OCM_MAPPED_HIGH: SectionAttributes = SectionAttributes {
pub const SECTION_ATTRS_OCM_MAPPED_HIGH: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::FullAccess,
type_extensions: 0b100,
domain: 0b0000,
execute_never: false,
cacheable: true,
bufferable: true,
memory_attrs: MemoryRegionAttributes::CacheableMemory {
inner: CacheableMemoryAttribute::WriteThroughNoWriteAlloc,
outer: CacheableMemoryAttribute::NonCacheable,
}
.as_raw(),
};
const SECTION_ATTRS_UNASSIGNED_RESERVED: SectionAttributes = SectionAttributes {
pub const SECTION_ATTRS_UNASSIGNED_RESERVED: SectionAttributes = SectionAttributes {
non_global: false,
p_bit: false,
shareable: false,
access: AccessPermissions::PermissionFault,
type_extensions: 0b000,
domain: 0b0000,
execute_never: false,
cacheable: false,
bufferable: false,
memory_attrs: MemoryRegionAttributes::StronglyOrdered.as_raw(),
};
const NUM_L1_PAGE_TABLE_ENTRIES: usize = 4096;
pub const NUM_L1_PAGE_TABLE_ENTRIES: usize = 4096;
#[repr(C, align(16384))]
pub struct L1Table([u32; NUM_L1_PAGE_TABLE_ENTRIES]);
pub struct L1Table(pub(crate) [u32; NUM_L1_PAGE_TABLE_ENTRIES]);
/// MMU Level 1 Page table.
/// Load the MMU translation table base address into the MMU.
///
/// 4096 entries, each covering 1MB
/// # Safety
///
static mut MMU_L1_PAGE_TABLE: L1Table = L1Table([0; NUM_L1_PAGE_TABLE_ENTRIES]);
/// This function is unsafe because it directly writes to the MMU related registers. It has to be
/// called once in the boot code before enabling the MMU, and it should be called while the MMU is
/// disabled.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn init_mmu_table() {
let mut offset = 0;
let mut addr = 0;
unsafe extern "C" fn load_mmu_table() {
let table_base = &MMU_L1_PAGE_TABLE.0 as *const _ as u32;
unsafe {
// The first entry (1 MB) is related to special DDR memory. See p.101 of the TMR.
// We set is separtely to accomodate for future changes.
MMU_L1_PAGE_TABLE.0[0] = L1Section::new(addr, SECTION_ATTRS_DDR).0;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_DDR_FULL_ACCESSIBLE] {
*entry = L1Section::new(addr, SECTION_ATTRS_DDR).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_DDR_FULL_ACCESSIBLE;
// 2 FPGA slaves.
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_FPGA_SLAVE * 2] {
*entry = L1Section::new(addr, SECTION_ATTRS_FPGA_SLAVES).0;
addr += ONE_MB as u32;
}
offset += 2 * SEGMENTS_FPGA_SLAVE;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_UNASSIGNED_0] {
*entry = L1Section::new(addr, SECTION_ATTRS_UNASSIGNED_RESERVED).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_UNASSIGNED_0;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_IO_PERIPHS] {
*entry = L1Section::new(addr, SECTION_ATTRS_SHAREABLE_DEVICE).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_IO_PERIPHS;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_NAND] {
*entry = L1Section::new(addr, SECTION_ATTRS_SHAREABLE_DEVICE).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_NAND;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_NOR] {
*entry = L1Section::new(addr, SECTION_ATTRS_SHAREABLE_DEVICE).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_NOR;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_SRAM] {
*entry = L1Section::new(addr, SECTION_ATTRS_SRAM).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_SRAM;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_UNASSIGNED_1] {
*entry = L1Section::new(addr, SECTION_ATTRS_UNASSIGNED_RESERVED).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_UNASSIGNED_1;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_AMBA_APB] {
*entry = L1Section::new(addr, SECTION_ATTRS_SHAREABLE_DEVICE).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_AMBA_APB;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_UNASSIGNED_2] {
*entry = L1Section::new(addr, SECTION_ATTRS_UNASSIGNED_RESERVED).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_UNASSIGNED_2;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_QSPI_XIP] {
*entry = L1Section::new(addr, SECTION_ATTRS_QSPI_XIP).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_QSPI_XIP;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_UNASSIGNED_3] {
*entry = L1Section::new(addr, SECTION_ATTRS_UNASSIGNED_RESERVED).0;
addr += ONE_MB as u32;
}
offset += SEGMENTS_UNASSIGNED_3;
for entry in &mut MMU_L1_PAGE_TABLE.0[offset..offset + SEGMENTS_OCM_MAPPED_HIGH] {
*entry = L1Section::new(addr, SECTION_ATTRS_OCM_MAPPED_HIGH).0;
addr += ONE_MB as u32;
}
core::arch::asm!(
"orr {0}, {0}, #0x5B", // Outer-cacheable, WB
"mcr p15, 0, {0}, c2, c0, 0", // Load table pointer
inout(reg) table_base => _,
options(nostack, preserves_flags)
);
}
}