4 Commits

Author SHA1 Message Date
b92c58a544 mmu: pass l2 table instead of raw address 2026-02-25 13:29:25 +08:00
6cec8914f8 cargo fmt 2026-02-24 23:20:20 +08:00
7b2e37351d mmu: support L2 page tables 2026-02-24 23:10:04 +08:00
95ef3057cd netboot: support 100Mbps and 10Mbps eth link speed
check for eth link change during netboot instead of defaulting to 1Gbps
2026-02-24 12:03:06 +08:00
2 changed files with 213 additions and 9 deletions

View File

@@ -70,7 +70,12 @@ pub struct L1Section {
pub bufferable: bool,
}
pub struct L1PointToL2Table {
pub domain: u8,
}
const ENTRY_TYPE_SECTION: u32 = 0b10;
const ENTRY_TYPE_PAGE_TABLE: u32 = 0b01;
pub const L1_PAGE_SIZE: usize = 0x100000;
#[repr(C)]
@@ -88,6 +93,16 @@ impl L1Entry {
entry
}
#[inline(always)]
pub fn from_l2_page_table(l2_table: &L2Table, page_table: L1PointToL2Table) -> Self {
let page_table_base = l2_table.get_base_addr();
// Must be aligned to 1024 bytes
assert!(page_table_base & 0x3ff == 0);
let mut entry = L1Entry(page_table_base);
entry.set_l2_entry(page_table);
entry
}
pub fn get_section(&mut self) -> L1Section {
assert_eq!(self.0.get_bits(0..=1), ENTRY_TYPE_SECTION);
let access = AccessPermissions::new(self.0.get_bits(10..=11) as u8, self.0.get_bit(15));
@@ -117,6 +132,106 @@ impl L1Entry {
self.0.set_bit(16, section.shareable);
self.0.set_bit(17, !section.global);
}
pub fn set_l2_entry(&mut self, table: L1PointToL2Table) {
self.0.set_bits(0..=1, ENTRY_TYPE_PAGE_TABLE);
self.0.set_bits(5..=8, table.domain.into());
}
}
const L2_ENTRY_LARGE_SECTION: u32 = 0b01;
const L2_ENTRY_SMALL_SECTION: u32 = 0b10;
#[repr(C)]
#[derive(Clone, Copy)]
pub struct L2Entry(u32);
#[derive(Clone, Copy)]
pub struct L2Section {
pub exec: bool,
pub global: bool,
pub tex: u8,
pub shareable: bool,
pub access: AccessPermissions,
pub cacheable: bool,
pub bufferable: bool,
pub large: bool, // true for 64KB pages, false for 4KB
}
const L2_TABLE_SIZE: usize = 256;
#[repr(C, align(1024))]
pub struct L2Table {
pub l2_table: [L2Entry; L2_TABLE_SIZE],
}
impl L2Entry {
#[inline(always)]
pub fn from_section(phys_base: u32, section: L2Section) -> Self {
if section.large {
// must be aligned to 64KB
assert!(phys_base & 0xffff == 0);
} else {
// must be aligned to 4KB
assert!(phys_base & 0xfff == 0);
}
let mut entry = L2Entry(phys_base);
entry.set_section(section);
entry
}
pub fn set_section(&mut self, section: L2Section) {
if section.large {
// large section (64KB)
self.0.set_bits(0..=1, L2_ENTRY_LARGE_SECTION);
self.0.set_bit(15, !section.exec);
self.0.set_bits(12..=14, section.tex.into());
} else {
// small section (4KB)
self.0.set_bits(0..=1, L2_ENTRY_SMALL_SECTION);
self.0.set_bit(0, !section.exec);
self.0.set_bits(6..=8, section.tex.into());
}
// Common fields
self.0.set_bit(2, section.bufferable);
self.0.set_bit(3, section.cacheable);
self.0.set_bits(4..=5, section.access.ap().into());
self.0.set_bit(9, section.access.apx());
self.0.set_bit(10, section.shareable);
self.0.set_bit(11, !section.global);
}
#[inline(always)]
pub fn is_large_section(&self) -> bool {
// we will assume that anything that's not a large page, is a small page.
self.0.get_bits(0..=1) == L2_ENTRY_LARGE_SECTION
}
pub fn get_section(&self) -> L2Section {
if self.is_large_section() {
L2Section {
exec: !self.0.get_bit(15),
global: !self.0.get_bit(11),
tex: self.0.get_bits(12..=14) as u8,
shareable: self.0.get_bit(10),
access: AccessPermissions::new(self.0.get_bits(4..=5) as u8, self.0.get_bit(9)),
cacheable: self.0.get_bit(3),
bufferable: self.0.get_bit(2),
large: true,
}
} else {
L2Section {
exec: !self.0.get_bit(0),
global: !self.0.get_bit(11),
tex: self.0.get_bits(6..=8) as u8,
shareable: self.0.get_bit(10),
access: AccessPermissions::new(self.0.get_bits(4..=5) as u8, self.0.get_bit(9)),
cacheable: self.0.get_bit(3),
bufferable: self.0.get_bit(2),
large: false,
}
}
}
}
const L1_TABLE_SIZE: usize = 4096;
@@ -413,14 +528,7 @@ impl L1Table {
let section = entry.get_section();
*entry = L1Entry::from_section(new_physical_base, section);
// L1 I-cache cache is VIPT and 1MB page sizes guarantee different tags
// L1 D-cache and L2 cache is PIPT, unaffected by virtual memory changes
// thus flushing caches are unnecessary, we just
// invalidate the updated TLB entry and the branch predictor
tlbimva(virtual_addr);
bpiall();
dsb();
isb();
invalidate_section(virtual_addr);
}
pub fn update<T, F, R>(&mut self, ptr: *const T, f: F) -> R
@@ -449,6 +557,78 @@ impl L1Table {
result
}
pub fn link_l2_page_table(&mut self, virtual_addr: u32, l2_table: &L2Table) {
assert!(virtual_addr & 0x000f_ffff == 0);
let index = (virtual_addr >> 20) as usize;
let entry = L1Entry::from_l2_page_table(
l2_table,
// hardcoded domain to match other L1 entries in DDR space
L1PointToL2Table { domain: 0b1111 },
);
self.table[index] = entry;
invalidate_section(virtual_addr);
}
}
impl L2Table {
pub const fn new() -> Self {
L2Table {
l2_table: [L2Entry(0); L2_TABLE_SIZE],
}
}
pub fn setup_flat_layout(&mut self, base_addr: u32) {
// 1MB aligned
assert!(base_addr & 0x000f_ffff == 0);
// setup flat layout with small 4KB pages,
// so it will not generate a fault when accessed
let small_flat_page = L2Section {
large: false,
global: true,
exec: true,
tex: 0,
shareable: true,
access: AccessPermissions::FullAccess,
cacheable: true,
bufferable: true,
};
for i in 0..L2_TABLE_SIZE {
self.l2_table[i] = L2Entry::from_section(base_addr + (i as u32 * 4096), small_flat_page);
}
invalidate_section(base_addr);
}
pub fn get_base_addr(&self) -> u32 {
&self.l2_table as *const _ as u32
}
pub fn set_section(&mut self, virtual_addr: u32, phys_addr: u32, section: L2Section) {
let index = (virtual_addr as usize >> 12) & 0xff;
let entry = L2Entry::from_section(phys_addr, section);
if section.large {
// L2 page table uses 4kB page size, thus large 64kB pages
// need to be repeated 16 times.
let large_index = index & !0xf;
self.l2_table[large_index..large_index + 16].fill(entry);
} else {
self.l2_table[index] = entry;
}
}
pub fn remap_section(&mut self, virtual_addr: u32, new_phys_base: u32) {
let index = (virtual_addr as usize >> 12) & 0xff;
let section = self.l2_table[index].get_section();
let new_entry = L2Entry::from_section(new_phys_base, section);
if section.large {
let large_index = index & !0xf;
self.l2_table[large_index..large_index + 16].fill(new_entry);
} else {
self.l2_table[index] = new_entry;
}
invalidate_section(virtual_addr);
}
}
pub fn with_mmu<F: FnMut() -> !>(l1table: &L1Table, mut f: F) -> ! {
@@ -482,3 +662,19 @@ pub fn with_mmu<F: FnMut() -> !>(l1table: &L1Table, mut f: F) -> ! {
pub fn remap_section(virtual_addr: u32, new_physical_base: u32) {
L1Table::get().remap_section(virtual_addr, new_physical_base);
}
pub fn link_l2_page_table(virtual_addr: u32, l2_table: &L2Table) {
L1Table::get().link_l2_page_table(virtual_addr, l2_table);
}
#[inline(always)]
fn invalidate_section(virtual_addr: u32) {
// L1 I-cache cache is VIPT and 1MB page sizes guarantee different tags
// L1 D-cache and L2 cache is PIPT, unaffected by virtual memory changes
// thus flushing caches are unnecessary, we just
// invalidate the updated TLB entry and the branch predictor
tlbimva(virtual_addr);
bpiall();
dsb();
isb();
}

View File

@@ -6,7 +6,7 @@ use libboard_zynq::{devc,
eth::Eth,
smoltcp::{self,
iface::{EthernetInterfaceBuilder, NeighborCache},
time::Instant,
time::{Duration, Instant},
wire::IpCidr},
timer};
use libconfig::{bootgen, net_settings};
@@ -335,6 +335,9 @@ pub fn netboot<File: Read + Seek>(bootgen_file: &mut Option<File>, runtime_start
let mut boot_flag = false;
log::info!("Waiting for connections...");
let mut last_link_check = Instant::from_millis(0);
const LINK_CHECK_INTERVAL: u64 = 500;
loop {
let timestamp = Instant::from_millis(timer::get_ms() as i64);
{
@@ -375,5 +378,10 @@ pub fn netboot<File: Read + Seek>(bootgen_file: &mut Option<File>, runtime_start
Err(smoltcp::Error::Unrecognized) => (),
Err(err) => log::error!("Network error: {}", err),
}
let dev = interface.device_mut();
if dev.is_idle() && timestamp >= last_link_check + Duration::from_millis(LINK_CHECK_INTERVAL) {
dev.check_link_change();
last_link_check = timestamp;
}
}
}