diff --git a/zynq-mmu/Cargo.toml b/zynq-mmu/Cargo.toml
index 5cdd109..13453c4 100644
--- a/zynq-mmu/Cargo.toml
+++ b/zynq-mmu/Cargo.toml
@@ -5,3 +5,5 @@ version = "0.1.0"
 edition = "2024"
 
 [dependencies]
+cortex-ar = { version = "0.2", path = "../../../Rust/cortex-ar/cortex-ar" }
+thiserror = { version = "2", default-features = false }
diff --git a/zynq-mmu/src/lib.rs b/zynq-mmu/src/lib.rs
index c704bf7..653a15a 100644
--- a/zynq-mmu/src/lib.rs
+++ b/zynq-mmu/src/lib.rs
@@ -2,8 +2,18 @@
 //! runtime crate and teh HAL crate.
 #![no_std]
 
+use cortex_ar::{
+    asm::{dsb, isb},
+    mmu::SectionAttributes,
+    register::{BpIAll, TlbIAll},
+};
+
 pub const NUM_L1_PAGE_TABLE_ENTRIES: usize = 4096;
 
+#[derive(Debug, PartialEq, Eq, thiserror::Error)]
+#[error("address is not aligned to 1MB boundary")]
+pub struct AddrNotAlignedToOneMb;
+
 #[repr(C, align(16384))]
 pub struct L1Table(pub [u32; NUM_L1_PAGE_TABLE_ENTRIES]);
 
@@ -17,4 +27,22 @@ impl L1Table {
     pub const fn as_mut_ptr(&mut self) -> *mut u32 {
         self.0.as_mut_ptr()
     }
+
+    pub fn update_page_attr(
+        &mut self,
+        addr: u32,
+        section_attrs: SectionAttributes,
+    ) -> Result<(), AddrNotAlignedToOneMb> {
+        if addr & 0x000F_FFFF != 0 {
+            return Err(AddrNotAlignedToOneMb);
+        }
+        let index = addr as usize / 0x10_0000;
+        self.0[index] = (self.0[index] & 0xFFF0_0000) | section_attrs.as_raw_bits();
+        // TODO: DCache flush.
+        TlbIAll::write();
+        BpIAll::write();
+        dsb();
+        isb();
+        Ok(())
+    }
 }