zynq-rs/libcortex_r5/src/cache.rs

87 lines
2.0 KiB
Rust

/// Basically same as the Cortex A9 but with no L2
pub use libcortex_a9::cache::{
iciallu,
dcisw,
dccsw,
dccisw,
dccimvac,
dccmvac,
dcimvac,
object_cache_line_addrs,
slice_cache_line_addrs,
CACHE_LINE,
CACHE_LINE_MASK
};
use super::asm::{dmb, dsb};
#[inline(always)]
pub fn dciall() {
unsafe {
llvm_asm!("mcr p15, 0, $0, c15, c5, 0")
}
}
// D$ clean and invalidate
pub fn dcci<T>(object: &T) {
dmb();
for addr in object_cache_line_addrs(object) {
dccimvac(addr);
}
dsb();
}
pub fn dcci_slice<T>(slice: &[T]) {
dmb();
for addr in slice_cache_line_addrs(slice) {
dccimvac(addr);
}
dsb();
}
// D$ clean
pub fn dcc<T>(object: &T) {
dmb();
for addr in object_cache_line_addrs(object) {
dccmvac(addr);
}
dsb();
}
pub fn dcc_slice<T>(slice: &[T]) {
if slice.len() == 0 {
return;
}
dmb();
for addr in slice_cache_line_addrs(slice) {
dccmvac(addr);
}
dsb();
}
// D$ invalidate
pub unsafe fn dci<T>(object: &mut T) {
let first_addr = object as *const _ as usize;
let beyond_addr = (object as *const _ as usize) + core::mem::size_of_val(object);
assert_eq!(first_addr & CACHE_LINE_MASK, 0, "dci object first_addr must be aligned");
assert_eq!(beyond_addr & CACHE_LINE_MASK, 0, "dci object beyond_addr must be aligned");
dmb();
for addr in (first_addr..beyond_addr).step_by(CACHE_LINE) {
dcimvac(addr);
}
dsb();
}
pub unsafe fn dci_slice<T>(slice: &mut [T]) {
let first_addr = &slice[0] as *const _ as usize;
let beyond_addr = (&slice[slice.len() - 1] as *const _ as usize) +
core::mem::size_of_val(&slice[slice.len() - 1]);
assert_eq!(first_addr & CACHE_LINE_MASK, 0, "dci slice first_addr must be aligned");
assert_eq!(beyond_addr & CACHE_LINE_MASK, 0, "dci slice beyond_addr must be aligned");
dmb();
for addr in (first_addr..beyond_addr).step_by(CACHE_LINE) {
dcimvac(addr);
}
dsb();
}