From fa187fb37a553fce668400f83cdbd3eaeb9084a0 Mon Sep 17 00:00:00 2001 From: pca006132 Date: Mon, 24 Aug 2020 16:18:31 +0800 Subject: [PATCH] runtime/kernel: use mutable static for shared channel Mutex would prevent restart if we failed while waiting for RPC. --- src/runtime/src/kernel/cache.rs | 26 +++++++++++++++----------- src/runtime/src/kernel/core1.rs | 12 ++++++------ src/runtime/src/kernel/dma.rs | 16 +++++++++++----- src/runtime/src/kernel/mod.rs | 4 ++-- src/runtime/src/kernel/rpc.rs | 14 +++++++------- 5 files changed, 41 insertions(+), 31 deletions(-) diff --git a/src/runtime/src/kernel/cache.rs b/src/runtime/src/kernel/cache.rs index 996cec3e..89df157b 100644 --- a/src/runtime/src/kernel/cache.rs +++ b/src/runtime/src/kernel/cache.rs @@ -5,22 +5,26 @@ use super::{KERNEL_CHANNEL_0TO1, KERNEL_CHANNEL_1TO0, Message}; pub extern fn get(key: CSlice) -> CSlice<'static, i32> { let key = String::from_utf8(key.as_ref().to_vec()).unwrap(); - KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::CacheGetRequest(key)); - let msg = KERNEL_CHANNEL_0TO1.lock().as_mut().unwrap().recv(); - if let Message::CacheGetReply(v) = msg { - let slice = unsafe { transmute(v.as_c_slice()) }; - // we intentionally leak the memory here, - // which does not matter as core1 would restart - forget(v); - slice - } else { - panic!("Expected CacheGetReply for CacheGetRequest"); + unsafe { + KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::CacheGetRequest(key)); + let msg = KERNEL_CHANNEL_0TO1.as_mut().unwrap().recv(); + if let Message::CacheGetReply(v) = msg { + let slice = transmute(v.as_c_slice()); + // we intentionally leak the memory here, + // which does not matter as core1 would restart + forget(v); + slice + } else { + panic!("Expected CacheGetReply for CacheGetRequest"); + } } } pub extern fn put(key: CSlice, list: CSlice) { let key = String::from_utf8(key.as_ref().to_vec()).unwrap(); let value = list.as_ref().to_vec(); - KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::CachePutRequest(key, value)); + unsafe { + KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::CachePutRequest(key, value)); + } } diff --git a/src/runtime/src/kernel/core1.rs b/src/runtime/src/kernel/core1.rs index 547ec346..557c5468 100644 --- a/src/runtime/src/kernel/core1.rs +++ b/src/runtime/src/kernel/core1.rs @@ -179,15 +179,15 @@ pub fn main_core1() { Message::StartRequest => { info!("kernel starting"); if let Some(kernel) = loaded_kernel.take() { - *KERNEL_CHANNEL_0TO1.lock() = Some(core1_rx); - *KERNEL_CHANNEL_1TO0.lock() = Some(core1_tx); unsafe { + KERNEL_CHANNEL_0TO1 = Some(core1_rx); + KERNEL_CHANNEL_1TO0 = Some(core1_tx); KERNEL_IMAGE = &kernel as *const KernelImage; kernel.exec(); KERNEL_IMAGE = ptr::null(); + core1_rx = KERNEL_CHANNEL_0TO1.take().unwrap(); + core1_tx = KERNEL_CHANNEL_1TO0.take().unwrap(); } - core1_rx = core::mem::replace(&mut *KERNEL_CHANNEL_0TO1.lock(), None).unwrap(); - core1_tx = core::mem::replace(&mut *KERNEL_CHANNEL_1TO0.lock(), None).unwrap(); } info!("kernel finished"); core1_tx.send(Message::KernelFinished); @@ -213,8 +213,8 @@ pub fn terminate(exception: &'static eh_artiq::Exception<'static>, backtrace: &' } { - let mut core1_tx = KERNEL_CHANNEL_1TO0.lock(); - core1_tx.as_mut().unwrap().send(Message::KernelException(exception, &backtrace[..cursor])); + let core1_tx = unsafe { KERNEL_CHANNEL_1TO0.as_mut().unwrap() }; + core1_tx.send(Message::KernelException(exception, &backtrace[..cursor])); } loop {} } diff --git a/src/runtime/src/kernel/dma.rs b/src/runtime/src/kernel/dma.rs index 4d1859c6..6b4a2e2b 100644 --- a/src/runtime/src/kernel/dma.rs +++ b/src/runtime/src/kernel/dma.rs @@ -36,7 +36,9 @@ pub unsafe fn init_dma_recorder() { pub extern fn dma_record_start(name: CSlice) { let name = String::from_utf8(name.as_ref().to_vec()).unwrap(); - KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::DmaEraseRequest(name.clone())); + unsafe { + KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::DmaEraseRequest(name.clone())); + } unsafe { if RECORDER.is_some() { artiq_raise!("DMAError", "DMA is already recording") @@ -70,7 +72,7 @@ pub extern fn dma_record_stop(duration: i64) { let mut recorder = RECORDER.take().unwrap(); recorder.duration = duration; - KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send( + KERNEL_CHANNEL_1TO0.as_mut().unwrap().send( Message::DmaPutRequest(recorder) ); } @@ -135,13 +137,17 @@ pub extern fn dma_record_output_wide(target: i32, words: CSlice) { pub extern fn dma_erase(name: CSlice) { let name = String::from_utf8(name.as_ref().to_vec()).unwrap(); - KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::DmaEraseRequest(name)); + unsafe { + KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::DmaEraseRequest(name)); + } } pub extern fn dma_retrieve(name: CSlice) -> DmaTrace { let name = String::from_utf8(name.as_ref().to_vec()).unwrap(); - KERNEL_CHANNEL_1TO0.lock().as_mut().unwrap().send(Message::DmaGetRequest(name)); - match KERNEL_CHANNEL_0TO1.lock().as_mut().unwrap().recv() { + unsafe { + KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::DmaGetRequest(name)); + } + match unsafe {KERNEL_CHANNEL_0TO1.as_mut().unwrap()}.recv() { Message::DmaGetReply(None) => (), Message::DmaGetReply(Some((mut v, duration))) => { v.reserve(ALIGNMENT - 1); diff --git a/src/runtime/src/kernel/mod.rs b/src/runtime/src/kernel/mod.rs index ea3ecd54..e06a6e18 100644 --- a/src/runtime/src/kernel/mod.rs +++ b/src/runtime/src/kernel/mod.rs @@ -49,8 +49,8 @@ pub enum Message { static CHANNEL_0TO1: Mutex>> = Mutex::new(None); static CHANNEL_1TO0: Mutex>> = Mutex::new(None); -static KERNEL_CHANNEL_0TO1: Mutex>> = Mutex::new(None); -static KERNEL_CHANNEL_1TO0: Mutex>> = Mutex::new(None); +static mut KERNEL_CHANNEL_0TO1: Option> = None; +static mut KERNEL_CHANNEL_1TO0: Option> = None; static mut KERNEL_IMAGE: *const core1::KernelImage = ptr::null(); diff --git a/src/runtime/src/kernel/rpc.rs b/src/runtime/src/kernel/rpc.rs index 31e77868..de0dff8c 100644 --- a/src/runtime/src/kernel/rpc.rs +++ b/src/runtime/src/kernel/rpc.rs @@ -11,10 +11,10 @@ use super::{ }; fn rpc_send_common(is_async: bool, service: u32, tag: &CSlice, data: *const *const ()) { - let mut core1_tx = KERNEL_CHANNEL_1TO0.lock(); + let core1_tx = unsafe { KERNEL_CHANNEL_1TO0.as_mut().unwrap() }; let mut buffer = Vec::::new(); send_args(&mut buffer, service, tag.as_ref(), data).expect("RPC encoding failed"); - core1_tx.as_mut().unwrap().send(Message::RpcSend { is_async, data: buffer }); + core1_tx.send(Message::RpcSend { is_async, data: buffer }); } pub extern fn rpc_send(service: u32, tag: &CSlice, data: *const *const ()) { @@ -26,11 +26,11 @@ pub extern fn rpc_send_async(service: u32, tag: &CSlice, data: *const *const } pub extern fn rpc_recv(slot: *mut ()) -> usize { - let reply = { - let mut core1_rx = KERNEL_CHANNEL_0TO1.lock(); - let mut core1_tx = KERNEL_CHANNEL_1TO0.lock(); - core1_tx.as_mut().unwrap().send(Message::RpcRecvRequest(slot)); - core1_rx.as_mut().unwrap().recv() + let reply = unsafe { + let core1_rx = KERNEL_CHANNEL_0TO1.as_mut().unwrap(); + let core1_tx = KERNEL_CHANNEL_1TO0.as_mut().unwrap(); + core1_tx.send(Message::RpcRecvRequest(slot)); + core1_rx.recv() }; match reply { Message::RpcRecvReply(Ok(alloc_size)) => alloc_size,