mmu: support L2 page tables

This commit is contained in:
2026-01-28 16:58:11 +08:00
committed by sb10q
parent 95ef3057cd
commit 7b2e37351d

View File

@@ -70,7 +70,12 @@ pub struct L1Section {
pub bufferable: bool,
}
pub struct L1PointToL2Table {
pub domain: u8,
}
const ENTRY_TYPE_SECTION: u32 = 0b10;
const ENTRY_TYPE_PAGE_TABLE: u32 = 0b01;
pub const L1_PAGE_SIZE: usize = 0x100000;
#[repr(C)]
@@ -84,7 +89,16 @@ impl L1Entry {
assert!(phys_base & 0x000f_ffff == 0);
let mut entry = L1Entry(phys_base);
entry.set_section(section);
entry.set_section(section);
entry
}
#[inline(always)]
pub fn from_l2_page_table(page_table_base: u32, page_table: L1PointToL2Table) -> Self {
// Must be aligned to 1024 bytes
assert!(page_table_base & 0x3ff == 0);
let mut entry = L1Entry(page_table_base);
entry.set_l2_entry(page_table);
entry
}
@@ -117,6 +131,106 @@ impl L1Entry {
self.0.set_bit(16, section.shareable);
self.0.set_bit(17, !section.global);
}
pub fn set_l2_entry(&mut self, table: L1PointToL2Table) {
self.0.set_bits(0..=1, ENTRY_TYPE_PAGE_TABLE);
self.0.set_bits(5..=8, table.domain.into());
}
}
const L2_ENTRY_LARGE_SECTION: u32 = 0b01;
const L2_ENTRY_SMALL_SECTION: u32 = 0b10;
#[repr(C)]
#[derive(Clone, Copy)]
pub struct L2Entry(u32);
#[derive(Clone, Copy)]
pub struct L2Section {
pub exec: bool,
pub global: bool,
pub tex: u8,
pub shareable: bool,
pub access: AccessPermissions,
pub cacheable: bool,
pub bufferable: bool,
pub large: bool, // true for 64KB pages, false for 4KB
}
const L2_TABLE_SIZE: usize = 256;
#[repr(C, align(1024))]
pub struct L2Table {
pub l2_table: [L2Entry; L2_TABLE_SIZE],
}
impl L2Entry {
#[inline(always)]
pub fn from_section(phys_base: u32, section: L2Section) -> Self {
if section.large {
// must be aligned to 64KB
assert!(phys_base & 0xffff == 0);
} else {
// must be aligned to 4KB
assert!(phys_base & 0xfff == 0);
}
let mut entry = L2Entry(phys_base);
entry.set_section(section);
entry
}
pub fn set_section(&mut self, section: L2Section) {
if section.large {
// large section (64KB)
self.0.set_bits(0..=1, L2_ENTRY_LARGE_SECTION);
self.0.set_bit(15, !section.exec);
self.0.set_bits(12..=14, section.tex.into());
} else {
// small section (4KB)
self.0.set_bits(0..=1, L2_ENTRY_SMALL_SECTION);
self.0.set_bit(0, !section.exec);
self.0.set_bits(6..=8, section.tex.into());
}
// Common fields
self.0.set_bit(2, section.bufferable);
self.0.set_bit(3, section.cacheable);
self.0.set_bits(4..=5, section.access.ap().into());
self.0.set_bit(9, section.access.apx());
self.0.set_bit(10, section.shareable);
self.0.set_bit(11, !section.global);
}
#[inline(always)]
pub fn is_large_section(&self) -> bool {
// we will assume that anything that's not a large page, is a small page.
self.0.get_bits(0..=1) == L2_ENTRY_LARGE_SECTION
}
pub fn get_section(&self) -> L2Section {
if self.is_large_section() {
L2Section {
exec: !self.0.get_bit(15),
global: !self.0.get_bit(11),
tex: self.0.get_bits(12..=14) as u8,
shareable: self.0.get_bit(10),
access: AccessPermissions::new(self.0.get_bits(4..=5) as u8, self.0.get_bit(9)),
cacheable: self.0.get_bit(3),
bufferable: self.0.get_bit(2),
large: true,
}
} else {
L2Section {
exec: !self.0.get_bit(0),
global: !self.0.get_bit(11),
tex: self.0.get_bits(6..=8) as u8,
shareable: self.0.get_bit(10),
access: AccessPermissions::new(self.0.get_bits(4..=5) as u8, self.0.get_bit(9)),
cacheable: self.0.get_bit(3),
bufferable: self.0.get_bit(2),
large: false,
}
}
}
}
const L1_TABLE_SIZE: usize = 4096;
@@ -413,14 +527,7 @@ impl L1Table {
let section = entry.get_section();
*entry = L1Entry::from_section(new_physical_base, section);
// L1 I-cache cache is VIPT and 1MB page sizes guarantee different tags
// L1 D-cache and L2 cache is PIPT, unaffected by virtual memory changes
// thus flushing caches are unnecessary, we just
// invalidate the updated TLB entry and the branch predictor
tlbimva(virtual_addr);
bpiall();
dsb();
isb();
invalidate_section(virtual_addr);
}
pub fn update<T, F, R>(&mut self, ptr: *const T, f: F) -> R
@@ -449,6 +556,81 @@ impl L1Table {
result
}
pub fn link_l2_page_table(&mut self, virtual_addr: u32, page_table_base: u32) {
let index = (virtual_addr >> 20) as usize;
let entry = L1Entry::from_l2_page_table(
page_table_base,
// hardcoded domain to match other L1 entries in DDR space
L1PointToL2Table { domain: 0b1111 }
);
self.table[index] = entry;
tlbimva(virtual_addr);
bpiall();
dsb();
isb();
}
}
impl L2Table {
pub const fn new() -> Self {
L2Table {
l2_table: [L2Entry(0); L2_TABLE_SIZE],
}
}
pub fn setup_flat_layout(&mut self, base_addr: u32) {
// 1MB aligned
assert!(base_addr & 0x000f_ffff == 0);
// setup flat layout with small 4KB pages,
// so it will not generate a fault when accessed
let small_flat_page = L2Section {
large: false,
global: true,
exec: true,
tex: 0,
shareable: true,
access: AccessPermissions::FullAccess,
cacheable: true,
bufferable: true,
};
for i in 0..L2_TABLE_SIZE {
self.l2_table[i] =
L2Entry::from_section(base_addr + (i as u32 * 4096), small_flat_page);
}
invalidate_section(base_addr);
}
pub fn get_base_addr(&self) -> u32 {
&self.l2_table as *const _ as u32
}
pub fn set_section(&mut self, virtual_addr: u32, phys_addr: u32, section: L2Section) {
let index = (virtual_addr as usize >> 12) & 0xff;
let entry = L2Entry::from_section(phys_addr, section);
if section.large {
// L2 page table uses 4kB page size, thus large 64kB pages
// need to be repeated 16 times.
let large_index = index & !0xf;
self.l2_table[large_index..large_index + 16].fill(entry);
} else {
self.l2_table[index] = entry;
}
}
pub fn remap_section(&mut self, virtual_addr: u32, new_phys_base: u32) {
let index = (virtual_addr as usize >> 12) & 0xff;
let section = self.l2_table[index].get_section();
let new_entry = L2Entry::from_section(new_phys_base, section);
if section.large {
let large_index = index & !0xf;
self.l2_table[large_index..large_index + 16].fill(new_entry);
} else {
self.l2_table[index] = new_entry;
}
invalidate_section(virtual_addr);
}
}
pub fn with_mmu<F: FnMut() -> !>(l1table: &L1Table, mut f: F) -> ! {
@@ -482,3 +664,19 @@ pub fn with_mmu<F: FnMut() -> !>(l1table: &L1Table, mut f: F) -> ! {
pub fn remap_section(virtual_addr: u32, new_physical_base: u32) {
L1Table::get().remap_section(virtual_addr, new_physical_base);
}
pub fn link_l2_page_table(virtual_addr: u32, page_table_base: u32) {
L1Table::get().link_l2_page_table(virtual_addr, page_table_base);
}
#[inline(always)]
fn invalidate_section(virtual_addr: u32) {
// L1 I-cache cache is VIPT and 1MB page sizes guarantee different tags
// L1 D-cache and L2 cache is PIPT, unaffected by virtual memory changes
// thus flushing caches are unnecessary, we just
// invalidate the updated TLB entry and the branch predictor
tlbimva(virtual_addr);
bpiall();
dsb();
isb();
}