libsupport_zynq/ram: split allocator for each core. #47
|
@ -9,6 +9,7 @@ edition = "2018"
|
||||||
target_zc706 = ["libboard_zynq/target_zc706"]
|
target_zc706 = ["libboard_zynq/target_zc706"]
|
||||||
target_cora_z7_10 = ["libboard_zynq/target_cora_z7_10"]
|
target_cora_z7_10 = ["libboard_zynq/target_cora_z7_10"]
|
||||||
panic_handler = []
|
panic_handler = []
|
||||||
|
alloc_core = []
|
||||||
|
|
||||||
default = ["panic_handler"]
|
default = ["panic_handler"]
|
||||||
|
|
||||||
|
|
|
@ -1,55 +1,94 @@
|
||||||
|
use alloc::alloc::Layout;
|
||||||
use core::alloc::GlobalAlloc;
|
use core::alloc::GlobalAlloc;
|
||||||
use core::ptr::NonNull;
|
use core::ptr::NonNull;
|
||||||
use alloc::alloc::Layout;
|
|
||||||
use linked_list_allocator::Heap;
|
|
||||||
use libcortex_a9::mutex::Mutex;
|
|
||||||
use libboard_zynq::ddr::DdrRam;
|
use libboard_zynq::ddr::DdrRam;
|
||||||
|
use libcortex_a9::{mutex::Mutex, regs::MPIDR};
|
||||||
|
use libregister::RegisterR;
|
||||||
|
use linked_list_allocator::Heap;
|
||||||
|
|
||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
static ALLOCATOR: CortexA9Alloc = CortexA9Alloc(Mutex::new(Heap::empty()));
|
static ALLOCATOR: CortexA9Alloc =
|
||||||
|
CortexA9Alloc(Mutex::new(Heap::empty()), Mutex::new(Heap::empty()));
|
||||||
|
|
||||||
/// LockedHeap doesn't lock properly
|
/// LockedHeap doesn't lock properly
|
||||||
struct CortexA9Alloc(Mutex<Heap>);
|
struct CortexA9Alloc(Mutex<Heap>, Mutex<Heap>);
|
||||||
|
|
||||||
unsafe impl Sync for CortexA9Alloc {}
|
unsafe impl Sync for CortexA9Alloc {}
|
||||||
|
|
||||||
unsafe impl GlobalAlloc for CortexA9Alloc {
|
unsafe impl GlobalAlloc for CortexA9Alloc {
|
||||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||||
self.0.lock()
|
if MPIDR.read().cpu_id() == 0 {
|
||||||
.allocate_first_fit(layout)
|
self.0.lock()
|
||||||
.ok()
|
} else {
|
||||||
.map_or(0 as *mut u8, |allocation| allocation.as_ptr())
|
self.1.lock()
|
||||||
|
}
|
||||||
|
.allocate_first_fit(layout)
|
||||||
|
.ok()
|
||||||
|
.map_or(0 as *mut u8, |allocation| allocation.as_ptr())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc_core")]
|
||||||
|
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||||
|
let start0 = &__heap0_start as *const usize as *const u8;
|
||||||
|
let end0 = &__heap0_end as *const usize as *const u8;
|
||||||
|
let start1 = &__heap1_start as *const usize as *const u8;
|
||||||
|
let end1 = &__heap1_end as *const usize as *const u8;
|
||||||
|
let const_ptr = ptr as *const u8;
|
||||||
|
|||||||
|
|
||||||
|
if start0 <= const_ptr && const_ptr < end0 {
|
||||||
|
self.0.lock()
|
||||||
|
} else if start1 <= const_ptr && const_ptr < end1 {
|
||||||
|
self.1.lock()
|
||||||
|
} else {
|
||||||
|
panic!("Invalid deallocation: {:p} is not in heap0 nor heap1", ptr);
|
||||||
|
}
|
||||||
|
.deallocate(NonNull::new_unchecked(ptr), layout)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "alloc_core"))]
|
||||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||||
self.0.lock()
|
self.0.lock()
|
||||||
.deallocate(NonNull::new_unchecked(ptr), layout)
|
.deallocate(NonNull::new_unchecked(ptr), layout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "alloc_core"))]
|
||||||
pub fn init_alloc_ddr(ddr: &mut DdrRam) {
|
pub fn init_alloc_ddr(ddr: &mut DdrRam) {
|
||||||
unsafe {
|
unsafe {
|
||||||
ALLOCATOR.0.lock()
|
ALLOCATOR
|
||||||
|
.0
|
||||||
|
.lock()
|
||||||
.init(ddr.ptr::<u8>() as usize, ddr.size());
|
.init(ddr.ptr::<u8>() as usize, ddr.size());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc_core")]
|
||||||
extern "C" {
|
extern "C" {
|
||||||
static __heap_start: usize;
|
static __heap0_start: usize;
|
||||||
static __heap_end: usize;
|
static __heap0_end: usize;
|
||||||
|
static __heap1_start: usize;
|
||||||
|
static __heap1_end: usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init_alloc_linker() {
|
#[cfg(feature = "alloc_core")]
|
||||||
|
pub fn init_alloc_core0() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let start = &__heap_start as *const usize as usize;
|
let start = &__heap0_start as *const usize as usize;
|
||||||
let end = &__heap_end as *const usize as usize;
|
let end = &__heap0_end as *const usize as usize;
|
||||||
ALLOCATOR.0.lock()
|
ALLOCATOR.0.lock().init(start, end - start);
|
||||||
.init(start, end - start);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc_core")]
|
||||||
|
pub fn init_alloc_core1() {
|
||||||
|
unsafe {
|
||||||
|
let start = &__heap1_start as *const usize as usize;
|
||||||
|
let end = &__heap1_end as *const usize as usize;
|
||||||
|
ALLOCATOR.1.lock().init(start, end - start);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[alloc_error_handler]
|
#[alloc_error_handler]
|
||||||
fn alloc_error(_: core::alloc::Layout) -> ! {
|
fn alloc_error(layout: core::alloc::Layout) -> ! {
|
||||||
panic!("alloc_error")
|
panic!("Core {} alloc_error, layout: {:?}", MPIDR.read().cpu_id(), layout);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
let ptr = ptr as *const u8;
I don't think this works, as this does not work for
which the pointer has to be mutable here.