Compare commits
No commits in common. "a80a2c67ef26425732d1404c9209947d640bb85a" and "98f509968418d10dbc8198cb163cca774724111d" have entirely different histories.
a80a2c67ef
...
98f5099684
|
@ -20,7 +20,7 @@ const TX_100: u32 = 25_000_000;
|
|||
const TX_1000: u32 = 125_000_000;
|
||||
|
||||
#[derive(Clone)]
|
||||
#[repr(C, align(0x20))]
|
||||
#[repr(C, align(0x08))]
|
||||
pub struct Buffer(pub [u8; MTU]);
|
||||
|
||||
impl Buffer {
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use core::ops::Deref;
|
||||
use alloc::{vec, vec::Vec};
|
||||
use libcortex_a9::{asm::*, cache::*, UncachedSlice};
|
||||
use libregister::*;
|
||||
use super::Buffer;
|
||||
|
||||
|
@ -56,15 +55,15 @@ register_bit!(desc_word1, global_broadcast, 31);
|
|||
|
||||
#[repr(C)]
|
||||
pub struct DescList {
|
||||
list: UncachedSlice<DescEntry>,
|
||||
list: Vec<DescEntry>,
|
||||
buffers: Vec<Buffer>,
|
||||
next: usize,
|
||||
}
|
||||
|
||||
impl DescList {
|
||||
pub fn new(size: usize) -> Self {
|
||||
let mut list = UncachedSlice::new(size, || DescEntry::zeroed())
|
||||
.unwrap();
|
||||
let mut list: Vec<_> = (0..size).map(|_| DescEntry::zeroed())
|
||||
.collect();
|
||||
let mut buffers = vec![Buffer::new(); size];
|
||||
|
||||
let last = list.len().min(buffers.len()) - 1;
|
||||
|
@ -81,7 +80,6 @@ impl DescList {
|
|||
entry.word1.write(
|
||||
DescWord1::zeroed()
|
||||
);
|
||||
dcci_slice(&buffer[..]);
|
||||
}
|
||||
|
||||
DescList {
|
||||
|
@ -98,7 +96,6 @@ impl DescList {
|
|||
pub fn recv_next<'s: 'p, 'p>(&'s mut self) -> Result<Option<PktRef<'p>>, Error> {
|
||||
let list_len = self.list.len();
|
||||
let entry = &mut self.list[self.next];
|
||||
dmb();
|
||||
if entry.word0.read().used() {
|
||||
let word1 = entry.word1.read();
|
||||
let len = word1.frame_length_lsbs().into();
|
||||
|
@ -129,10 +126,7 @@ pub struct PktRef<'a> {
|
|||
|
||||
impl<'a> Drop for PktRef<'a> {
|
||||
fn drop(&mut self) {
|
||||
dcci_slice(self.buffer);
|
||||
|
||||
self.entry.word0.modify(|_, w| w.used(false));
|
||||
dmb();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
use core::ops::{Deref, DerefMut};
|
||||
use alloc::{vec, vec::Vec};
|
||||
use libcortex_a9::{cache::dcc_slice, UncachedSlice};
|
||||
use libregister::*;
|
||||
use super::{Buffer, regs};
|
||||
|
||||
|
@ -43,15 +42,15 @@ pub const DESCS: usize = 8;
|
|||
|
||||
#[repr(C)]
|
||||
pub struct DescList {
|
||||
list: UncachedSlice<DescEntry>,
|
||||
list: Vec<DescEntry>,
|
||||
buffers: Vec<Buffer>,
|
||||
next: usize,
|
||||
}
|
||||
|
||||
impl DescList {
|
||||
pub fn new(size: usize) -> Self {
|
||||
let mut list = UncachedSlice::new(size, || DescEntry::zeroed())
|
||||
.unwrap();
|
||||
let mut list: Vec<_> = (0..size).map(|_| DescEntry::zeroed())
|
||||
.collect();
|
||||
let mut buffers = vec![Buffer::new(); size];
|
||||
|
||||
let last = list.len().min(buffers.len()) - 1;
|
||||
|
@ -124,7 +123,6 @@ pub struct PktRef<'a> {
|
|||
|
||||
impl<'a> Drop for PktRef<'a> {
|
||||
fn drop(&mut self) {
|
||||
dcc_slice(self.buffer);
|
||||
self.entry.word1.modify(|_, w| w.used(false));
|
||||
if ! self.regs.tx_status.read().tx_go() {
|
||||
self.regs.net_ctrl.modify(|_, w|
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
use super::asm::{dmb, dsb};
|
||||
|
||||
/// Invalidate TLBs
|
||||
#[inline(always)]
|
||||
pub fn tlbiall() {
|
||||
|
@ -109,19 +107,15 @@ pub fn dccimvac(addr: usize) {
|
|||
|
||||
/// Data cache clean and invalidate for an object.
|
||||
pub fn dcci<T>(object: &T) {
|
||||
dmb();
|
||||
for addr in object_cache_line_addrs(object) {
|
||||
dccimvac(addr);
|
||||
}
|
||||
dsb();
|
||||
}
|
||||
|
||||
pub fn dcci_slice<T>(slice: &[T]) {
|
||||
dmb();
|
||||
for addr in slice_cache_line_addrs(slice) {
|
||||
dccimvac(addr);
|
||||
}
|
||||
dsb();
|
||||
}
|
||||
|
||||
/// Data cache clean by memory virtual address.
|
||||
|
@ -134,22 +128,18 @@ pub fn dccmvac(addr: usize) {
|
|||
|
||||
/// Data cache clean for an object.
|
||||
pub fn dcc<T>(object: &T) {
|
||||
dmb();
|
||||
for addr in object_cache_line_addrs(object) {
|
||||
dccmvac(addr);
|
||||
}
|
||||
dsb();
|
||||
}
|
||||
|
||||
/// Data cache clean for an object. Panics if not properly
|
||||
/// aligned and properly sized to be contained in an exact number of
|
||||
/// cache lines.
|
||||
pub fn dcc_slice<T>(slice: &[T]) {
|
||||
dmb();
|
||||
for addr in slice_cache_line_addrs(slice) {
|
||||
dccmvac(addr);
|
||||
}
|
||||
dsb();
|
||||
}
|
||||
|
||||
/// Data cache invalidate by memory virtual address. This and
|
||||
|
@ -168,11 +158,9 @@ pub unsafe fn dci<T>(object: &mut T) {
|
|||
assert_eq!(first_addr & CACHE_LINE_MASK, 0, "dci object first_addr must be aligned");
|
||||
assert_eq!(beyond_addr & CACHE_LINE_MASK, 0, "dci object beyond_addr must be aligned");
|
||||
|
||||
dmb();
|
||||
for addr in (first_addr..beyond_addr).step_by(CACHE_LINE) {
|
||||
dcimvac(addr);
|
||||
}
|
||||
dsb();
|
||||
}
|
||||
|
||||
pub unsafe fn dci_slice<T>(slice: &mut [T]) {
|
||||
|
@ -182,9 +170,7 @@ pub unsafe fn dci_slice<T>(slice: &mut [T]) {
|
|||
assert_eq!(first_addr & CACHE_LINE_MASK, 0, "dci slice first_addr must be aligned");
|
||||
assert_eq!(beyond_addr & CACHE_LINE_MASK, 0, "dci slice beyond_addr must be aligned");
|
||||
|
||||
dmb();
|
||||
for addr in (first_addr..beyond_addr).step_by(CACHE_LINE) {
|
||||
dcimvac(addr);
|
||||
}
|
||||
dsb();
|
||||
}
|
||||
|
|
|
@ -10,7 +10,5 @@ pub mod cache;
|
|||
pub mod mmu;
|
||||
pub mod mutex;
|
||||
pub mod sync_channel;
|
||||
mod uncached;
|
||||
pub use uncached::UncachedSlice;
|
||||
|
||||
global_asm!(include_str!("exceptions.s"));
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use bit_field::BitField;
|
||||
use super::{regs::*, asm, cache};
|
||||
use super::{regs::*, asm};
|
||||
use libregister::RegisterW;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
|
@ -44,12 +44,6 @@ pub enum AccessPermissions {
|
|||
}
|
||||
|
||||
impl AccessPermissions {
|
||||
fn new(ap: u8, apx: bool) -> Self {
|
||||
unsafe {
|
||||
core::mem::transmute(if apx { 0b100 } else { 0 } | ap)
|
||||
}
|
||||
}
|
||||
|
||||
fn ap(&self) -> u8 {
|
||||
(*self as u8) & 0b11
|
||||
}
|
||||
|
@ -71,55 +65,31 @@ pub struct L1Section {
|
|||
pub bufferable: bool,
|
||||
}
|
||||
|
||||
const ENTRY_TYPE_SECTION: u32 = 0b10;
|
||||
pub const L1_PAGE_SIZE: usize = 0x100000;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct L1Entry(u32);
|
||||
|
||||
impl L1Entry {
|
||||
#[inline(always)]
|
||||
pub fn from_section(phys_base: u32, section: L1Section) -> Self {
|
||||
pub fn section(phys_base: u32, section: L1Section) -> Self {
|
||||
// Must be aligned to 1 MB
|
||||
assert!(phys_base & 0x000f_ffff == 0);
|
||||
let mut entry = L1Entry(phys_base);
|
||||
|
||||
entry.set_section(section);
|
||||
entry
|
||||
}
|
||||
|
||||
pub fn get_section(&mut self) -> L1Section {
|
||||
assert_eq!(self.0.get_bits(0..=1), ENTRY_TYPE_SECTION);
|
||||
let access = AccessPermissions::new(
|
||||
self.0.get_bits(10..=11) as u8,
|
||||
self.0.get_bit(15)
|
||||
);
|
||||
L1Section {
|
||||
global: !self.0.get_bit(17),
|
||||
shareable: self.0.get_bit(16),
|
||||
access,
|
||||
tex: self.0.get_bits(12..=14) as u8,
|
||||
domain: self.0.get_bits(5..=8) as u8,
|
||||
exec: !self.0.get_bit(4),
|
||||
cacheable: self.0.get_bit(3),
|
||||
bufferable: self.0.get_bit(2),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_section(&mut self, section: L1Section) {
|
||||
self.0.set_bits(0..=1, ENTRY_TYPE_SECTION);
|
||||
self.0.set_bit(2, section.bufferable);
|
||||
self.0.set_bit(3, section.cacheable);
|
||||
self.0.set_bit(4, !section.exec);
|
||||
entry.0.set_bits(0..=1, 0b10);
|
||||
entry.0.set_bit(2, section.bufferable);
|
||||
entry.0.set_bit(3, section.cacheable);
|
||||
entry.0.set_bit(4, !section.exec);
|
||||
assert!(section.domain < 16);
|
||||
self.0.set_bits(5..=8, section.domain.into());
|
||||
self.0.set_bits(10..=11, section.access.ap().into());
|
||||
entry.0.set_bits(5..=8, section.domain.into());
|
||||
entry.0.set_bits(10..=11, section.access.ap().into());
|
||||
assert!(section.tex < 8);
|
||||
self.0.set_bits(12..=14, section.tex.into());
|
||||
self.0.set_bit(15, section.access.apx());
|
||||
self.0.set_bit(16, section.shareable);
|
||||
self.0.set_bit(17, !section.global);
|
||||
entry.0.set_bits(12..=14, section.tex.into());
|
||||
entry.0.set_bit(15, section.access.apx());
|
||||
entry.0.set_bit(16, section.shareable);
|
||||
entry.0.set_bit(17, !section.global);
|
||||
|
||||
entry
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -162,7 +132,7 @@ impl L1Table {
|
|||
domain: 0b1111,
|
||||
exec: true,
|
||||
cacheable: true,
|
||||
bufferable: true,
|
||||
bufferable: false,
|
||||
});
|
||||
}
|
||||
/* 0x40000000 - 0x7fffffff (FPGA slave0) */
|
||||
|
@ -355,25 +325,7 @@ impl L1Table {
|
|||
assert!(index < L1_TABLE_SIZE);
|
||||
|
||||
let base = (index as u32) << 20;
|
||||
self.table[index] = L1Entry::from_section(base, section);
|
||||
}
|
||||
|
||||
pub fn update<T, F, R>(&mut self, ptr: *const T, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&'_ mut L1Section) -> R,
|
||||
{
|
||||
let index = (ptr as usize) >> 20;
|
||||
let entry = &mut self.table[index];
|
||||
let mut section = entry.get_section();
|
||||
let result = f(&mut section);
|
||||
entry.set_section(section);
|
||||
|
||||
asm::dmb();
|
||||
cache::tlbiall();
|
||||
asm::dsb();
|
||||
asm::isb();
|
||||
|
||||
result
|
||||
self.table[index] = L1Entry::section(base, section);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
use core::{
|
||||
ops::{Deref, DerefMut},
|
||||
mem::{align_of, size_of},
|
||||
};
|
||||
use alloc::alloc::{dealloc, Layout, LayoutErr};
|
||||
use crate::mmu::{L1_PAGE_SIZE, L1Table};
|
||||
|
||||
pub struct UncachedSlice<T: 'static> {
|
||||
layout: Layout,
|
||||
slice: &'static mut [T],
|
||||
}
|
||||
|
||||
impl<T> UncachedSlice<T> {
|
||||
/// allocates in chunks of 1 MB
|
||||
pub fn new<F: Fn() -> T>(len: usize, default: F) -> Result<Self, LayoutErr> {
|
||||
// round to full pages
|
||||
let size = ((len * size_of::<T>() - 1) | (L1_PAGE_SIZE - 1)) + 1;
|
||||
let align = align_of::<T>()
|
||||
.max(L1_PAGE_SIZE);
|
||||
let layout = Layout::from_size_align(size, align)?;
|
||||
let ptr = unsafe { alloc::alloc::alloc(layout).cast::<T>() };
|
||||
let start = ptr as usize;
|
||||
assert_eq!(start & (L1_PAGE_SIZE - 1), 0);
|
||||
|
||||
for page_start in (start..(start + size)).step_by(L1_PAGE_SIZE) {
|
||||
L1Table::get()
|
||||
.update(page_start as *const (), |l1_section| {
|
||||
l1_section.cacheable = false;
|
||||
l1_section.bufferable = false;
|
||||
});
|
||||
}
|
||||
|
||||
let slice = unsafe { core::slice::from_raw_parts_mut(ptr, len) };
|
||||
// verify size
|
||||
assert!(unsafe { slice.get_unchecked(len) } as *const _ as usize <= start + size);
|
||||
// initialize
|
||||
for e in slice.iter_mut() {
|
||||
*e = default();
|
||||
}
|
||||
Ok(UncachedSlice { layout, slice })
|
||||
}
|
||||
}
|
||||
|
||||
/// Does not yet mark the pages cachable again
|
||||
impl<T> Drop for UncachedSlice<T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
dealloc(self.slice.as_mut_ptr() as *mut _ as *mut u8, self.layout);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for UncachedSlice<T> {
|
||||
type Target = [T];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.slice
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for UncachedSlice<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.slice
|
||||
}
|
||||
}
|
|
@ -8,7 +8,7 @@ use libboard_zynq::ddr::DdrRam;
|
|||
#[global_allocator]
|
||||
static ALLOCATOR: CortexA9Alloc = CortexA9Alloc(Mutex::new(Heap::empty()));
|
||||
|
||||
/// LockedHeap doesn't lock properly
|
||||
/// LockedHeap doesn't locking properly
|
||||
struct CortexA9Alloc(Mutex<Heap>);
|
||||
|
||||
unsafe impl Sync for CortexA9Alloc {}
|
||||
|
|
Loading…
Reference in New Issue