2019-11-11 09:43:05 +08:00
|
|
|
use r0::zero_bss;
|
2020-05-06 22:05:34 +08:00
|
|
|
use core::ptr::write_volatile;
|
2019-12-18 06:35:58 +08:00
|
|
|
use libregister::{
|
|
|
|
VolatileCell,
|
2021-01-28 11:57:52 +08:00
|
|
|
RegisterR, RegisterRW,
|
2019-12-18 06:35:58 +08:00
|
|
|
};
|
2021-01-28 10:58:17 +08:00
|
|
|
use libcortex_a9::{asm, l2c, regs::*, cache, mmu, spin_lock_yield, notify_spin_lock, enable_fpu, interrupt_handler};
|
2019-12-18 06:35:58 +08:00
|
|
|
use libboard_zynq::{slcr, mpcore};
|
2019-11-11 09:43:05 +08:00
|
|
|
|
|
|
|
extern "C" {
|
|
|
|
static mut __bss_start: u32;
|
|
|
|
static mut __bss_end: u32;
|
2020-04-28 19:31:49 +08:00
|
|
|
static mut __stack0_start: u32;
|
|
|
|
static mut __stack1_start: u32;
|
2019-12-18 07:06:10 +08:00
|
|
|
fn main_core0();
|
|
|
|
fn main_core1();
|
2019-11-11 09:43:05 +08:00
|
|
|
}
|
|
|
|
|
2020-04-28 19:31:49 +08:00
|
|
|
static mut CORE1_ENABLED: VolatileCell<bool> = VolatileCell::new(false);
|
2019-11-16 06:54:26 +08:00
|
|
|
|
2021-01-28 10:58:17 +08:00
|
|
|
interrupt_handler!(Reset, reset_irq, __stack0_start, __stack1_start, {
|
2021-01-28 11:57:52 +08:00
|
|
|
// no need to setup stack here, as we already did when entering the handler
|
2020-07-08 05:51:33 +08:00
|
|
|
match MPIDR.read().cpu_id() {
|
2019-11-11 09:43:05 +08:00
|
|
|
0 => {
|
|
|
|
boot_core0();
|
|
|
|
}
|
2019-11-16 06:54:26 +08:00
|
|
|
1 => {
|
2020-04-28 19:31:49 +08:00
|
|
|
while !CORE1_ENABLED.get() {
|
2020-08-04 13:50:42 +08:00
|
|
|
spin_lock_yield();
|
2019-11-16 06:54:26 +08:00
|
|
|
}
|
|
|
|
boot_core1();
|
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
2019-11-11 09:43:05 +08:00
|
|
|
}
|
2021-01-28 10:58:17 +08:00
|
|
|
});
|
2019-11-11 09:43:05 +08:00
|
|
|
|
|
|
|
#[inline(never)]
|
2021-01-15 16:47:14 +08:00
|
|
|
unsafe extern "C" fn boot_core0() -> ! {
|
2019-11-11 09:43:05 +08:00
|
|
|
l1_cache_init();
|
2019-11-16 06:54:26 +08:00
|
|
|
|
2021-01-15 12:00:35 +08:00
|
|
|
enable_fpu();
|
2020-08-13 13:39:04 +08:00
|
|
|
let mpcore = mpcore::RegisterBlock::mpcore();
|
2019-11-16 06:54:26 +08:00
|
|
|
mpcore.scu_invalidate.invalidate_all_cores();
|
|
|
|
|
2019-11-11 09:43:05 +08:00
|
|
|
zero_bss(&mut __bss_start, &mut __bss_end);
|
|
|
|
|
|
|
|
let mmu_table = mmu::L1Table::get()
|
|
|
|
.setup_flat_layout();
|
|
|
|
mmu::with_mmu(mmu_table, || {
|
2019-11-16 06:54:26 +08:00
|
|
|
mpcore.scu_control.start();
|
2019-11-16 07:12:58 +08:00
|
|
|
ACTLR.enable_smp();
|
2020-09-04 16:38:48 +08:00
|
|
|
ACTLR.enable_prefetch();
|
2019-11-16 07:12:58 +08:00
|
|
|
// TODO: Barriers reqd when core1 is not yet starting?
|
|
|
|
asm::dmb();
|
|
|
|
asm::dsb();
|
2019-11-16 06:54:26 +08:00
|
|
|
|
2024-02-02 16:15:46 +08:00
|
|
|
asm::enable_fiq();
|
2020-08-03 11:21:38 +08:00
|
|
|
asm::enable_irq();
|
2019-12-18 07:06:10 +08:00
|
|
|
main_core0();
|
2019-11-11 09:43:05 +08:00
|
|
|
panic!("return from main");
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-11-16 06:54:26 +08:00
|
|
|
#[inline(never)]
|
2021-01-15 16:47:14 +08:00
|
|
|
unsafe extern "C" fn boot_core1() -> ! {
|
2019-11-16 06:54:26 +08:00
|
|
|
l1_cache_init();
|
|
|
|
|
2020-08-13 13:39:04 +08:00
|
|
|
let mpcore = mpcore::RegisterBlock::mpcore();
|
2019-11-16 06:54:26 +08:00
|
|
|
mpcore.scu_invalidate.invalidate_core1();
|
|
|
|
|
|
|
|
let mmu_table = mmu::L1Table::get();
|
|
|
|
mmu::with_mmu(mmu_table, || {
|
2019-11-16 07:12:58 +08:00
|
|
|
ACTLR.enable_smp();
|
2020-09-04 16:38:48 +08:00
|
|
|
ACTLR.enable_prefetch();
|
2019-11-16 07:12:58 +08:00
|
|
|
// TODO: Barriers reqd when core1 is not yet starting?
|
|
|
|
asm::dmb();
|
|
|
|
asm::dsb();
|
|
|
|
|
2024-02-02 16:15:46 +08:00
|
|
|
asm::enable_fiq();
|
2020-08-03 11:21:38 +08:00
|
|
|
asm::enable_irq();
|
2019-12-18 07:06:10 +08:00
|
|
|
main_core1();
|
2019-11-16 06:54:26 +08:00
|
|
|
panic!("return from main_core1");
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-11-11 09:43:05 +08:00
|
|
|
fn l1_cache_init() {
|
2019-12-18 06:35:58 +08:00
|
|
|
use libcortex_a9::cache::*;
|
2019-11-11 09:43:05 +08:00
|
|
|
|
|
|
|
// Invalidate TLBs
|
|
|
|
tlbiall();
|
|
|
|
// Invalidate I-Cache
|
|
|
|
iciallu();
|
|
|
|
// Invalidate Branch Predictor Array
|
|
|
|
bpiall();
|
|
|
|
// Invalidate D-Cache
|
|
|
|
//
|
|
|
|
// NOTE: It is both faster and correct to only invalidate instead
|
|
|
|
// of also flush the cache (as was done before with
|
|
|
|
// `dccisw()`) and it is correct to perform this operation
|
|
|
|
// for all of the L1 data cache rather than a (previously
|
|
|
|
// unspecified) combination of one cache set and one cache
|
|
|
|
// way.
|
2020-08-20 11:51:24 +08:00
|
|
|
dciall_l1();
|
2019-11-11 09:43:05 +08:00
|
|
|
}
|
2019-11-16 07:53:30 +08:00
|
|
|
|
2020-04-28 19:31:49 +08:00
|
|
|
pub struct Core1 {
|
2019-11-18 08:13:57 +08:00
|
|
|
}
|
2019-11-18 07:38:03 +08:00
|
|
|
|
2020-04-28 19:31:49 +08:00
|
|
|
impl Core1 {
|
2019-11-18 08:13:57 +08:00
|
|
|
/// Reset and start core1
|
2020-05-06 22:05:34 +08:00
|
|
|
pub fn start(sdram: bool) -> Self {
|
2019-11-18 07:38:03 +08:00
|
|
|
// reset and stop (safe to repeat)
|
2020-01-10 05:13:04 +08:00
|
|
|
slcr::RegisterBlock::unlocked(|slcr| {
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_rst1(true));
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_clkstop1(true));
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_rst1(false));
|
|
|
|
});
|
2019-11-18 07:38:03 +08:00
|
|
|
|
2020-05-06 22:05:34 +08:00
|
|
|
if sdram {
|
|
|
|
// Cores always start from OCM no matter what you do.
|
|
|
|
// Make up a vector table there that just jumps to SDRAM.
|
|
|
|
for i in 0..8 {
|
|
|
|
unsafe {
|
|
|
|
// this is the ARM instruction "b +0x00100000"
|
|
|
|
write_volatile((i*4) as *mut u32, 0xea03fffe);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-18 07:38:03 +08:00
|
|
|
unsafe {
|
2020-04-28 19:31:49 +08:00
|
|
|
CORE1_ENABLED.set(true);
|
2019-11-18 07:38:03 +08:00
|
|
|
}
|
2019-11-18 08:13:57 +08:00
|
|
|
// Flush cache-line
|
2020-08-20 11:51:24 +08:00
|
|
|
cache::dcc(unsafe { &CORE1_ENABLED });
|
2020-05-06 22:05:34 +08:00
|
|
|
if sdram {
|
|
|
|
cache::dccmvac(0);
|
2020-08-20 11:51:24 +08:00
|
|
|
asm::dsb();
|
|
|
|
l2c::l2_cache_clean(0);
|
|
|
|
l2c::l2_cache_sync();
|
2020-05-06 22:05:34 +08:00
|
|
|
}
|
2019-11-18 07:38:03 +08:00
|
|
|
|
|
|
|
// wake up core1
|
|
|
|
slcr::RegisterBlock::unlocked(|slcr| {
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_rst1(false));
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_clkstop1(false));
|
|
|
|
});
|
2020-08-04 13:50:42 +08:00
|
|
|
notify_spin_lock();
|
2019-11-18 08:13:57 +08:00
|
|
|
|
2020-04-28 19:31:49 +08:00
|
|
|
Core1 {}
|
2019-11-18 07:38:03 +08:00
|
|
|
}
|
2020-04-17 14:05:45 +08:00
|
|
|
|
|
|
|
pub fn disable(&self) {
|
|
|
|
unsafe {
|
2020-04-28 19:31:49 +08:00
|
|
|
CORE1_ENABLED.set(false);
|
2020-07-28 11:40:20 +08:00
|
|
|
cache::dccmvac(&CORE1_ENABLED as *const _ as usize);
|
|
|
|
asm::dsb();
|
2020-04-17 14:05:45 +08:00
|
|
|
}
|
|
|
|
self.restart();
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn restart(&self) {
|
|
|
|
slcr::RegisterBlock::unlocked(|slcr| {
|
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_rst1(true));
|
2020-07-07 10:17:15 +08:00
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_clkstop1(true));
|
2020-04-17 14:05:45 +08:00
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_rst1(false));
|
2020-07-07 10:17:15 +08:00
|
|
|
slcr.a9_cpu_rst_ctrl.modify(|_, w| w.a9_clkstop1(false));
|
2020-04-17 14:05:45 +08:00
|
|
|
});
|
|
|
|
}
|
2019-11-16 07:53:30 +08:00
|
|
|
}
|