zynq-rs/libcortex_a9/src/pl310/mod.rs

167 lines
4.7 KiB
Rust

//! L2 cache controller
use libregister::RegisterW;
use crate::asm::*;
mod regs;
const CACHE_LINE: usize = 0x20;
const CACHE_LINE_MASK: usize = CACHE_LINE - 1;
#[inline]
fn cache_line_addrs(first_addr: usize, beyond_addr: usize) -> impl Iterator<Item = usize> {
let first_addr = first_addr & !CACHE_LINE_MASK;
let beyond_addr = (beyond_addr | CACHE_LINE_MASK) + 1;
(first_addr..beyond_addr).step_by(CACHE_LINE)
}
fn object_cache_line_addrs<T>(object: &T) -> impl Iterator<Item = usize> {
let first_addr = object as *const _ as usize;
let beyond_addr = (object as *const _ as usize) + core::mem::size_of_val(object);
cache_line_addrs(first_addr, beyond_addr)
}
fn slice_cache_line_addrs<T>(slice: &[T]) -> impl Iterator<Item = usize> {
let first_addr = &slice[0] as *const _ as usize;
let beyond_addr = (&slice[slice.len() - 1] as *const _ as usize) +
core::mem::size_of_val(&slice[slice.len() - 1]);
cache_line_addrs(first_addr, beyond_addr)
}
pub struct L2Cache {
pub regs: &'static mut regs::RegisterBlock,
}
impl L2Cache {
pub fn new(register_baseaddr: usize) -> Self {
let regs = unsafe {
regs::RegisterBlock::new_at(register_baseaddr)
};
L2Cache { regs }
}
pub fn set_tag_ram_latencies(&mut self, setup_lat: u8, rd_access_lat: u8, wr_access_lat: u8) {
self.regs.tag_ram_control.write(
regs::RamControl::zeroed()
.setup_lat(setup_lat)
.rd_access_lat(rd_access_lat)
.wr_access_lat(wr_access_lat)
);
}
pub fn set_data_ram_latencies(&mut self, setup_lat: u8, rd_access_lat: u8, wr_access_lat: u8) {
self.regs.data_ram_control.write(
regs::RamControl::zeroed()
.setup_lat(setup_lat)
.rd_access_lat(rd_access_lat)
.wr_access_lat(wr_access_lat)
);
}
pub fn disable_interrupts(&mut self) {
self.regs.int_mask.write(
regs::Interrupts::zeroed()
.ecntr(true)
.parrt(true)
.parrd(true)
.errwt(true)
.errwd(true)
.errrt(true)
.errrd(true)
.slverr(true)
.decerr(true)
);
}
pub fn reset_interrupts(&mut self) {
self.regs.int_clear.write(
regs::Interrupts::zeroed()
.ecntr(true)
.parrt(true)
.parrd(true)
.errwt(true)
.errwd(true)
.errrt(true)
.errrd(true)
.slverr(true)
.decerr(true)
);
}
pub fn invalidate_all(&mut self) {
unsafe { self.regs.inv_way.write(0xFFFF); }
unsafe { self.regs.cache_sync.write(1); }
while self.regs.cache_sync.read() != 0 {}
}
pub fn enable(&mut self) {
dmb();
self.regs.control.write(
regs::Control::zeroed()
.l2_enable(true)
);
dsb();
}
pub fn clean_invalidate<T>(&mut self, obj: &T) {
dmb();
for addr in object_cache_line_addrs(obj) {
unsafe {
self.regs.clean_inv_pa.write(addr as u32);
}
}
dsb();
unsafe { self.regs.cache_sync.write(1); }
while self.regs.cache_sync.read() != 0 {}
}
pub fn clean_invalidate_slice<T>(&mut self, slice: &[T]) {
dmb();
for addr in slice_cache_line_addrs(slice) {
unsafe {
self.regs.clean_inv_pa.write(addr as u32);
}
}
dsb();
unsafe { self.regs.cache_sync.write(1); }
while self.regs.cache_sync.read() != 0 {}
}
pub fn clean_slice<T>(&mut self, slice: &[T]) {
dmb();
for addr in slice_cache_line_addrs(slice) {
unsafe {
self.regs.clean_pa.write(addr as u32);
}
}
dsb();
unsafe { self.regs.cache_sync.write(1); }
while self.regs.cache_sync.read() != 0 {}
}
pub fn invalidate<T>(&mut self, obj: &mut T) {
dmb();
for addr in object_cache_line_addrs(obj) {
unsafe {
self.regs.inv_pa.write(addr as u32);
}
}
dsb();
unsafe { self.regs.cache_sync.write(1); }
while self.regs.cache_sync.read() != 0 {}
}
pub fn invalidate_slice<T>(&mut self, slice: &mut [T]) {
dmb();
for addr in slice_cache_line_addrs(slice) {
unsafe {
self.regs.inv_pa.write(addr as u32);
}
}
dsb();
unsafe { self.regs.cache_sync.write(1); }
while self.regs.cache_sync.read() != 0 {}
}
}