forked from M-Labs/nac3
1
0
Fork 0

runtime/kernel: use mutable static for shared channel

Mutex would prevent restart if we failed while waiting for RPC.
This commit is contained in:
pca006132 2020-08-24 16:18:31 +08:00
parent e592efb2b8
commit fa187fb37a
5 changed files with 41 additions and 31 deletions

View File

@ -5,10 +5,11 @@ use super::{KERNEL_CHANNEL_0TO1, KERNEL_CHANNEL_1TO0, Message};
pub extern fn get(key: CSlice<u8>) -> CSlice<'static, i32> { pub extern fn get(key: CSlice<u8>) -> CSlice<'static, i32> {
let key = String::from_utf8(key.as_ref().to_vec()).unwrap(); let key = String::from_utf8(key.as_ref().to_vec()).unwrap();
KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::CacheGetRequest(key)); unsafe {
let msg = KERNEL_CHANNEL_0TO1.lock().as_mut().unwrap().recv(); KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::CacheGetRequest(key));
let msg = KERNEL_CHANNEL_0TO1.as_mut().unwrap().recv();
if let Message::CacheGetReply(v) = msg { if let Message::CacheGetReply(v) = msg {
let slice = unsafe { transmute(v.as_c_slice()) }; let slice = transmute(v.as_c_slice());
// we intentionally leak the memory here, // we intentionally leak the memory here,
// which does not matter as core1 would restart // which does not matter as core1 would restart
forget(v); forget(v);
@ -16,11 +17,14 @@ pub extern fn get(key: CSlice<u8>) -> CSlice<'static, i32> {
} else { } else {
panic!("Expected CacheGetReply for CacheGetRequest"); panic!("Expected CacheGetReply for CacheGetRequest");
} }
}
} }
pub extern fn put(key: CSlice<u8>, list: CSlice<i32>) { pub extern fn put(key: CSlice<u8>, list: CSlice<i32>) {
let key = String::from_utf8(key.as_ref().to_vec()).unwrap(); let key = String::from_utf8(key.as_ref().to_vec()).unwrap();
let value = list.as_ref().to_vec(); let value = list.as_ref().to_vec();
KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::CachePutRequest(key, value)); unsafe {
KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::CachePutRequest(key, value));
}
} }

View File

@ -179,15 +179,15 @@ pub fn main_core1() {
Message::StartRequest => { Message::StartRequest => {
info!("kernel starting"); info!("kernel starting");
if let Some(kernel) = loaded_kernel.take() { if let Some(kernel) = loaded_kernel.take() {
*KERNEL_CHANNEL_0TO1.lock() = Some(core1_rx);
*KERNEL_CHANNEL_1TO0.lock() = Some(core1_tx);
unsafe { unsafe {
KERNEL_CHANNEL_0TO1 = Some(core1_rx);
KERNEL_CHANNEL_1TO0 = Some(core1_tx);
KERNEL_IMAGE = &kernel as *const KernelImage; KERNEL_IMAGE = &kernel as *const KernelImage;
kernel.exec(); kernel.exec();
KERNEL_IMAGE = ptr::null(); KERNEL_IMAGE = ptr::null();
core1_rx = KERNEL_CHANNEL_0TO1.take().unwrap();
core1_tx = KERNEL_CHANNEL_1TO0.take().unwrap();
} }
core1_rx = core::mem::replace(&mut *KERNEL_CHANNEL_0TO1.lock(), None).unwrap();
core1_tx = core::mem::replace(&mut *KERNEL_CHANNEL_1TO0.lock(), None).unwrap();
} }
info!("kernel finished"); info!("kernel finished");
core1_tx.send(Message::KernelFinished); core1_tx.send(Message::KernelFinished);
@ -213,8 +213,8 @@ pub fn terminate(exception: &'static eh_artiq::Exception<'static>, backtrace: &'
} }
{ {
let mut core1_tx = KERNEL_CHANNEL_1TO0.lock(); let core1_tx = unsafe { KERNEL_CHANNEL_1TO0.as_mut().unwrap() };
core1_tx.as_mut().unwrap().send(Message::KernelException(exception, &backtrace[..cursor])); core1_tx.send(Message::KernelException(exception, &backtrace[..cursor]));
} }
loop {} loop {}
} }

View File

@ -36,7 +36,9 @@ pub unsafe fn init_dma_recorder() {
pub extern fn dma_record_start(name: CSlice<u8>) { pub extern fn dma_record_start(name: CSlice<u8>) {
let name = String::from_utf8(name.as_ref().to_vec()).unwrap(); let name = String::from_utf8(name.as_ref().to_vec()).unwrap();
KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::DmaEraseRequest(name.clone())); unsafe {
KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::DmaEraseRequest(name.clone()));
}
unsafe { unsafe {
if RECORDER.is_some() { if RECORDER.is_some() {
artiq_raise!("DMAError", "DMA is already recording") artiq_raise!("DMAError", "DMA is already recording")
@ -70,7 +72,7 @@ pub extern fn dma_record_stop(duration: i64) {
let mut recorder = RECORDER.take().unwrap(); let mut recorder = RECORDER.take().unwrap();
recorder.duration = duration; recorder.duration = duration;
KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send( KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(
Message::DmaPutRequest(recorder) Message::DmaPutRequest(recorder)
); );
} }
@ -135,13 +137,17 @@ pub extern fn dma_record_output_wide(target: i32, words: CSlice<i32>) {
pub extern fn dma_erase(name: CSlice<u8>) { pub extern fn dma_erase(name: CSlice<u8>) {
let name = String::from_utf8(name.as_ref().to_vec()).unwrap(); let name = String::from_utf8(name.as_ref().to_vec()).unwrap();
KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::DmaEraseRequest(name)); unsafe {
KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::DmaEraseRequest(name));
}
} }
pub extern fn dma_retrieve(name: CSlice<u8>) -> DmaTrace { pub extern fn dma_retrieve(name: CSlice<u8>) -> DmaTrace {
let name = String::from_utf8(name.as_ref().to_vec()).unwrap(); let name = String::from_utf8(name.as_ref().to_vec()).unwrap();
KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::DmaGetRequest(name)); unsafe {
match KERNEL_CHANNEL_0TO1.lock().as_mut().unwrap().recv() { KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::DmaGetRequest(name));
}
match unsafe {KERNEL_CHANNEL_0TO1.as_mut().unwrap()}.recv() {
Message::DmaGetReply(None) => (), Message::DmaGetReply(None) => (),
Message::DmaGetReply(Some((mut v, duration))) => { Message::DmaGetReply(Some((mut v, duration))) => {
v.reserve(ALIGNMENT - 1); v.reserve(ALIGNMENT - 1);

View File

@ -49,8 +49,8 @@ pub enum Message {
static CHANNEL_0TO1: Mutex<Option<sync_channel::Sender<'static, Message>>> = Mutex::new(None); static CHANNEL_0TO1: Mutex<Option<sync_channel::Sender<'static, Message>>> = Mutex::new(None);
static CHANNEL_1TO0: Mutex<Option<sync_channel::Receiver<'static, Message>>> = Mutex::new(None); static CHANNEL_1TO0: Mutex<Option<sync_channel::Receiver<'static, Message>>> = Mutex::new(None);
static KERNEL_CHANNEL_0TO1: Mutex<Option<sync_channel::Receiver<'static, Message>>> = Mutex::new(None); static mut KERNEL_CHANNEL_0TO1: Option<sync_channel::Receiver<'static, Message>> = None;
static KERNEL_CHANNEL_1TO0: Mutex<Option<sync_channel::Sender<'static, Message>>> = Mutex::new(None); static mut KERNEL_CHANNEL_1TO0: Option<sync_channel::Sender<'static, Message>> = None;
static mut KERNEL_IMAGE: *const core1::KernelImage = ptr::null(); static mut KERNEL_IMAGE: *const core1::KernelImage = ptr::null();

View File

@ -11,10 +11,10 @@ use super::{
}; };
fn rpc_send_common(is_async: bool, service: u32, tag: &CSlice<u8>, data: *const *const ()) { fn rpc_send_common(is_async: bool, service: u32, tag: &CSlice<u8>, data: *const *const ()) {
let mut core1_tx = KERNEL_CHANNEL_1TO0.lock(); let core1_tx = unsafe { KERNEL_CHANNEL_1TO0.as_mut().unwrap() };
let mut buffer = Vec::<u8>::new(); let mut buffer = Vec::<u8>::new();
send_args(&mut buffer, service, tag.as_ref(), data).expect("RPC encoding failed"); send_args(&mut buffer, service, tag.as_ref(), data).expect("RPC encoding failed");
core1_tx.as_mut().unwrap().send(Message::RpcSend { is_async, data: buffer }); core1_tx.send(Message::RpcSend { is_async, data: buffer });
} }
pub extern fn rpc_send(service: u32, tag: &CSlice<u8>, data: *const *const ()) { pub extern fn rpc_send(service: u32, tag: &CSlice<u8>, data: *const *const ()) {
@ -26,11 +26,11 @@ pub extern fn rpc_send_async(service: u32, tag: &CSlice<u8>, data: *const *const
} }
pub extern fn rpc_recv(slot: *mut ()) -> usize { pub extern fn rpc_recv(slot: *mut ()) -> usize {
let reply = { let reply = unsafe {
let mut core1_rx = KERNEL_CHANNEL_0TO1.lock(); let core1_rx = KERNEL_CHANNEL_0TO1.as_mut().unwrap();
let mut core1_tx = KERNEL_CHANNEL_1TO0.lock(); let core1_tx = KERNEL_CHANNEL_1TO0.as_mut().unwrap();
core1_tx.as_mut().unwrap().send(Message::RpcRecvRequest(slot)); core1_tx.send(Message::RpcRecvRequest(slot));
core1_rx.as_mut().unwrap().recv() core1_rx.recv()
}; };
match reply { match reply {
Message::RpcRecvReply(Ok(alloc_size)) => alloc_size, Message::RpcRecvReply(Ok(alloc_size)) => alloc_size,