2019-11-11 09:43:05 +08:00
|
|
|
use r0::zero_bss;
|
2019-11-18 07:38:03 +08:00
|
|
|
use vcell::VolatileCell;
|
|
|
|
use crate::regs::{RegisterR, RegisterW, RegisterRW};
|
2019-11-18 08:13:57 +08:00
|
|
|
use crate::cortex_a9::{asm, regs::*, cache, mmu};
|
2019-11-18 07:38:03 +08:00
|
|
|
use crate::zynq::{slcr, mpcore};
|
2019-11-11 09:43:05 +08:00
|
|
|
|
|
|
|
extern "C" {
|
|
|
|
static mut __bss_start: u32;
|
|
|
|
static mut __bss_end: u32;
|
|
|
|
static mut __stack_start: u32;
|
|
|
|
}
|
|
|
|
|
2019-11-18 08:13:57 +08:00
|
|
|
/// `0` means: wait for initialization by core0
|
2019-11-18 07:38:03 +08:00
|
|
|
static mut CORE1_STACK: VolatileCell<u32> = VolatileCell::new(0);
|
2019-11-16 06:54:26 +08:00
|
|
|
|
2019-11-11 09:43:05 +08:00
|
|
|
#[link_section = ".text.boot"]
|
|
|
|
#[no_mangle]
|
|
|
|
#[naked]
|
|
|
|
pub unsafe extern "C" fn _boot_cores() -> ! {
|
|
|
|
const CORE_MASK: u32 = 0x3;
|
|
|
|
|
|
|
|
match MPIDR.read() & CORE_MASK {
|
|
|
|
0 => {
|
|
|
|
SP.write(&mut __stack_start as *mut _ as u32);
|
|
|
|
boot_core0();
|
|
|
|
}
|
2019-11-16 06:54:26 +08:00
|
|
|
1 => {
|
2019-11-18 07:38:03 +08:00
|
|
|
while CORE1_STACK.get() == 0 {
|
2019-11-16 06:54:26 +08:00
|
|
|
asm::wfe();
|
|
|
|
}
|
|
|
|
|
2019-11-18 07:38:03 +08:00
|
|
|
SP.write(CORE1_STACK.get());
|
2019-11-16 06:54:26 +08:00
|
|
|
boot_core1();
|
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
2019-11-11 09:43:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[naked]
|
|
|
|
#[inline(never)]
|
|
|
|
unsafe fn boot_core0() -> ! {
|
|
|
|
l1_cache_init();
|
2019-11-16 06:54:26 +08:00
|
|
|
|
|
|
|
let mpcore = mpcore::RegisterBlock::new();
|
|
|
|
mpcore.scu_invalidate.invalidate_all_cores();
|
|
|
|
|
2019-11-11 09:43:05 +08:00
|
|
|
zero_bss(&mut __bss_start, &mut __bss_end);
|
|
|
|
|
|
|
|
let mmu_table = mmu::L1Table::get()
|
|
|
|
.setup_flat_layout();
|
|
|
|
mmu::with_mmu(mmu_table, || {
|
2019-11-16 06:54:26 +08:00
|
|
|
mpcore.scu_control.start();
|
2019-11-16 07:12:58 +08:00
|
|
|
ACTLR.enable_smp();
|
|
|
|
// TODO: Barriers reqd when core1 is not yet starting?
|
|
|
|
asm::dmb();
|
|
|
|
asm::dsb();
|
2019-11-16 06:54:26 +08:00
|
|
|
|
2019-11-11 09:43:05 +08:00
|
|
|
crate::main();
|
|
|
|
panic!("return from main");
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-11-16 06:54:26 +08:00
|
|
|
#[naked]
|
|
|
|
#[inline(never)]
|
|
|
|
unsafe fn boot_core1() -> ! {
|
|
|
|
l1_cache_init();
|
|
|
|
|
|
|
|
let mpcore = mpcore::RegisterBlock::new();
|
|
|
|
mpcore.scu_invalidate.invalidate_core1();
|
|
|
|
|
|
|
|
let mmu_table = mmu::L1Table::get();
|
|
|
|
mmu::with_mmu(mmu_table, || {
|
2019-11-16 07:12:58 +08:00
|
|
|
ACTLR.enable_smp();
|
|
|
|
// TODO: Barriers reqd when core1 is not yet starting?
|
|
|
|
asm::dmb();
|
|
|
|
asm::dsb();
|
|
|
|
|
2019-11-16 06:54:26 +08:00
|
|
|
crate::main_core1();
|
|
|
|
panic!("return from main_core1");
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-11-11 09:43:05 +08:00
|
|
|
fn l1_cache_init() {
|
|
|
|
use crate::cortex_a9::cache::*;
|
|
|
|
|
|
|
|
// Invalidate TLBs
|
|
|
|
tlbiall();
|
|
|
|
// Invalidate I-Cache
|
|
|
|
iciallu();
|
|
|
|
// Invalidate Branch Predictor Array
|
|
|
|
bpiall();
|
|
|
|
// Invalidate D-Cache
|
|
|
|
//
|
|
|
|
// NOTE: It is both faster and correct to only invalidate instead
|
|
|
|
// of also flush the cache (as was done before with
|
|
|
|
// `dccisw()`) and it is correct to perform this operation
|
|
|
|
// for all of the L1 data cache rather than a (previously
|
|
|
|
// unspecified) combination of one cache set and one cache
|
|
|
|
// way.
|
|
|
|
dciall();
|
|
|
|
}
|
2019-11-16 07:53:30 +08:00
|
|
|
|
2019-11-18 08:13:57 +08:00
|
|
|
pub struct Core1<S: AsMut<[u32]>> {
|
|
|
|
pub stack: S,
|
|
|
|
}
|
2019-11-18 07:38:03 +08:00
|
|
|
|
2019-11-18 08:13:57 +08:00
|
|
|
impl<S: AsMut<[u32]>> Core1<S> {
|
|
|
|
pub fn stop(&self) {
|
2019-11-18 07:38:03 +08:00
|
|
|
slcr::RegisterBlock::unlocked(|slcr| {
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_rst1(true));
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_clkstop1(true));
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_rst1(false));
|
|
|
|
});
|
2019-11-16 07:53:30 +08:00
|
|
|
}
|
|
|
|
|
2019-11-18 08:13:57 +08:00
|
|
|
/// Reset and start core1
|
|
|
|
///
|
|
|
|
/// The stack must not be in OCM because core1 still has to
|
|
|
|
/// initialize its MMU before it can access DDR.
|
|
|
|
pub fn start(stack: S) -> Self {
|
|
|
|
let mut core = Core1 { stack };
|
|
|
|
|
2019-11-18 07:38:03 +08:00
|
|
|
// reset and stop (safe to repeat)
|
2019-11-18 08:13:57 +08:00
|
|
|
core.stop();
|
2019-11-18 07:38:03 +08:00
|
|
|
|
2019-11-18 08:13:57 +08:00
|
|
|
let stack = core.stack.as_mut();
|
2019-11-18 07:38:03 +08:00
|
|
|
let stack_start = &mut stack[stack.len() - 1];
|
|
|
|
unsafe {
|
|
|
|
CORE1_STACK.set(stack_start as *mut _ as u32);
|
|
|
|
}
|
2019-11-18 08:13:57 +08:00
|
|
|
// Ensure stack pointer has been written to cache
|
2019-11-18 07:38:03 +08:00
|
|
|
asm::dmb();
|
2019-11-18 08:13:57 +08:00
|
|
|
// Flush cache-line
|
|
|
|
cache::dccmvac(unsafe { &CORE1_STACK } as *const _ as u32);
|
2019-11-18 07:38:03 +08:00
|
|
|
|
|
|
|
// wake up core1
|
|
|
|
slcr::RegisterBlock::unlocked(|slcr| {
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_rst1(false));
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_clkstop1(false));
|
|
|
|
});
|
2019-11-18 08:13:57 +08:00
|
|
|
|
|
|
|
core
|
2019-11-18 07:38:03 +08:00
|
|
|
}
|
2019-11-16 07:53:30 +08:00
|
|
|
}
|