From 76f1318bc026007599a724411875a550eb819bdb Mon Sep 17 00:00:00 2001 From: "Hartmann Michael (IFAG PSS SIS SCE QSE)" Date: Fri, 6 Oct 2023 17:13:47 +0200 Subject: [PATCH 001/296] doc: Extend documentation Extend the paragraph "Pitfalls" in the documentation of "Compiler" by problems caused by returning values from the stack. --- doc/manual/compiler.rst | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/doc/manual/compiler.rst b/doc/manual/compiler.rst index 0a72b38f3..e4923dfa9 100644 --- a/doc/manual/compiler.rst +++ b/doc/manual/compiler.rst @@ -79,6 +79,43 @@ interpreter. Empty lists do not have valid list element types, so they cannot be used in the kernel. +In kernels, lifetime of allocated values (e.g. lists or numpy arrays) might not be correctly +tracked across function calls (see `#1497 `_, +`#1677 `_) like in this example :: + + @kernel + def func(a): + return a + + class ProblemReturn1(EnvExperiment): + def build(self): + self.setattr_device("core") + + @kernel + def run(self): + # results in memory corruption + return func([1, 2, 3]) +or if the return value is obfuscated by an if-statement like here: :: + + class ProblemReturn2(EnvExperiment): + def build(self): + self.setattr_device("core") + + @kernel + def meth(self): + # if statement for obfuscation + if self.core.get_rtio_counter_mu() % 2: + return np.array([1,2,3]) + else: + return np.array([4,5,6]) + + @kernel + def run(self): + # also results in memory corrption + return self.meth() + +This results in memory corruption at runtime. + Asynchronous RPCs ----------------- From b42816582ed7f6c7290f495f0c728f90daea8364 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 5 Oct 2023 14:39:19 +0800 Subject: [PATCH 002/296] ksupport: support subkernels --- artiq/build_soc.py | 1 - artiq/firmware/ksupport/api.rs | 5 ++ artiq/firmware/ksupport/eh_artiq.rs | 3 +- artiq/firmware/ksupport/lib.rs | 71 +++++++++++++++++-- artiq/firmware/libproto_artiq/kernel_proto.rs | 17 +++++ 5 files changed, 91 insertions(+), 6 deletions(-) diff --git a/artiq/build_soc.py b/artiq/build_soc.py index f51c8885a..0e6deb499 100644 --- a/artiq/build_soc.py +++ b/artiq/build_soc.py @@ -71,7 +71,6 @@ def build_artiq_soc(soc, argdict): if not soc.config["DRTIO_ROLE"] == "satellite": builder.add_software_package("runtime", os.path.join(firmware_dir, "runtime")) else: - # Assume DRTIO satellite. builder.add_software_package("satman", os.path.join(firmware_dir, "satman")) try: builder.build() diff --git a/artiq/firmware/ksupport/api.rs b/artiq/firmware/ksupport/api.rs index 1682f818b..e4d7b2482 100644 --- a/artiq/firmware/ksupport/api.rs +++ b/artiq/firmware/ksupport/api.rs @@ -157,6 +157,11 @@ static mut API: &'static [(&'static str, *const ())] = &[ api!(dma_retrieve = ::dma_retrieve), api!(dma_playback = ::dma_playback), + api!(subkernel_load_run = ::subkernel_load_run), + api!(subkernel_send_message = ::subkernel_send_message), + api!(subkernel_await_message = ::subkernel_await_message), + api!(subkernel_await_finish = ::subkernel_await_finish), + api!(i2c_start = ::nrt_bus::i2c::start), api!(i2c_restart = ::nrt_bus::i2c::restart), api!(i2c_stop = ::nrt_bus::i2c::stop), diff --git a/artiq/firmware/ksupport/eh_artiq.rs b/artiq/firmware/ksupport/eh_artiq.rs index 19470fe70..80d999304 100644 --- a/artiq/firmware/ksupport/eh_artiq.rs +++ b/artiq/firmware/ksupport/eh_artiq.rs @@ -333,7 +333,7 @@ extern fn stop_fn(_version: c_int, } } -static EXCEPTION_ID_LOOKUP: [(&str, u32); 11] = [ +static EXCEPTION_ID_LOOKUP: [(&str, u32); 12] = [ ("RuntimeError", 0), ("RTIOUnderflow", 1), ("RTIOOverflow", 2), @@ -345,6 +345,7 @@ static EXCEPTION_ID_LOOKUP: [(&str, u32); 11] = [ ("ZeroDivisionError", 8), ("IndexError", 9), ("UnwrapNoneError", 10), + ("SubkernelError", 11) ]; pub fn get_exception_id(name: &str) -> u32 { diff --git a/artiq/firmware/ksupport/lib.rs b/artiq/firmware/ksupport/lib.rs index 685516e2b..53ea4574d 100644 --- a/artiq/firmware/ksupport/lib.rs +++ b/artiq/firmware/ksupport/lib.rs @@ -21,7 +21,6 @@ use dyld::Library; use board_artiq::{mailbox, rpc_queue}; use proto_artiq::{kernel_proto, rpc_proto}; use kernel_proto::*; -#[cfg(has_rtio_dma)] use board_misoc::csr; use riscv::register::{mcause, mepc, mtval}; @@ -396,7 +395,7 @@ extern fn dma_retrieve(name: &CSlice) -> DmaTrace { }) } -#[cfg(has_rtio_dma)] +#[cfg(kernel_has_rtio_dma)] #[unwind(allowed)] extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { assert!(ptr % 64 == 0); @@ -454,10 +453,74 @@ extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { } } -#[cfg(not(has_rtio_dma))] +#[cfg(not(kernel_has_rtio_dma))] #[unwind(allowed)] extern fn dma_playback(_timestamp: i64, _ptr: i32, _uses_ddma: bool) { - unimplemented!("not(has_rtio_dma)") + unimplemented!("not(kernel_has_rtio_dma)") +} + +#[unwind(allowed)] +extern fn subkernel_load_run(id: u32, run: bool) { + send(&SubkernelLoadRunRequest { id: id, run: run }); + recv!(&SubkernelLoadRunReply { succeeded } => { + if !succeeded { + raise!("SubkernelError", + "Error loading or running the subkernel"); + } + }); +} + +#[unwind(allowed)] +extern fn subkernel_await_finish(id: u32, timeout: u64) { + send(&SubkernelAwaitFinishRequest { id: id, timeout: timeout }); + recv!(SubkernelAwaitFinishReply { status } => { + match status { + SubkernelStatus::NoError => (), + SubkernelStatus::IncorrectState => raise!("SubkernelError", + "Subkernel not running"), + SubkernelStatus::Timeout => raise!("SubkernelError", + "Subkernel timed out"), + SubkernelStatus::CommLost => raise!("SubkernelError", + "Lost communication with satellite"), + SubkernelStatus::OtherError => raise!("SubkernelError", + "An error occurred during subkernel operation") + } + }) +} + +#[unwind(aborts)] +extern fn subkernel_send_message(id: u32, count: u8, tag: &CSlice, data: *const *const ()) { + send(&SubkernelMsgSend { + id: id, + count: count, + tag: tag.as_ref(), + data: data + }); +} + +#[unwind(allowed)] +extern fn subkernel_await_message(id: u32, timeout: u64, min: u8, max: u8) -> u8 { + send(&SubkernelMsgRecvRequest { id: id, timeout: timeout }); + recv!(SubkernelMsgRecvReply { status, count } => { + match status { + SubkernelStatus::NoError => { + if count < &min || count > &max { + raise!("SubkernelError", + "Received less or more arguments than expected"); + } + *count + } + SubkernelStatus::IncorrectState => raise!("SubkernelError", + "Subkernel not running"), + SubkernelStatus::Timeout => raise!("SubkernelError", + "Subkernel timed out"), + SubkernelStatus::CommLost => raise!("SubkernelError", + "Lost communication with satellite"), + SubkernelStatus::OtherError => raise!("SubkernelError", + "An error occurred during subkernel operation") + } + }) + // RpcRecvRequest should be called `count` times after this to receive message data } unsafe fn attribute_writeback(typeinfo: *const ()) { diff --git a/artiq/firmware/libproto_artiq/kernel_proto.rs b/artiq/firmware/libproto_artiq/kernel_proto.rs index 2ecb39c3b..51e619974 100644 --- a/artiq/firmware/libproto_artiq/kernel_proto.rs +++ b/artiq/firmware/libproto_artiq/kernel_proto.rs @@ -10,6 +10,15 @@ pub const KERNELCPU_LAST_ADDRESS: usize = 0x4fffffff; // section in ksupport.elf. pub const KSUPPORT_HEADER_SIZE: usize = 0x74; +#[derive(Debug)] +pub enum SubkernelStatus { + NoError, + Timeout, + IncorrectState, + CommLost, + OtherError +} + #[derive(Debug)] pub enum Message<'a> { LoadRequest(&'a [u8]), @@ -94,6 +103,14 @@ pub enum Message<'a> { SpiReadReply { succeeded: bool, data: u32 }, SpiBasicReply { succeeded: bool }, + SubkernelLoadRunRequest { id: u32, run: bool }, + SubkernelLoadRunReply { succeeded: bool }, + SubkernelAwaitFinishRequest { id: u32, timeout: u64 }, + SubkernelAwaitFinishReply { status: SubkernelStatus }, + SubkernelMsgSend { id: u32, count: u8, tag: &'a [u8], data: *const *const () }, + SubkernelMsgRecvRequest { id: u32, timeout: u64 }, + SubkernelMsgRecvReply { status: SubkernelStatus, count: u8 }, + Log(fmt::Arguments<'a>), LogSlice(&'a str) } From 6f4b8c641e265e7e5b4aba8a23b79b39eac2a598 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 5 Oct 2023 15:43:46 +0800 Subject: [PATCH 003/296] drtioaux_proto: use better payload names --- .../firmware/libproto_artiq/drtioaux_proto.rs | 15 ++++--- artiq/firmware/runtime/rtio_mgt.rs | 42 +++++++++++-------- artiq/firmware/satman/analyzer.rs | 6 +-- artiq/firmware/satman/main.rs | 5 +-- 4 files changed, 39 insertions(+), 29 deletions(-) diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index 94f26dca7..4a962f6fe 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -14,8 +14,11 @@ impl From> for Error { } } -pub const DMA_TRACE_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*trace ID*/4 - /*last*/1 -/*length*/2; -pub const ANALYZER_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2; +// maximum size of arbitrary payloads +// used by satellite -> master analyzer, subkernel exceptions +pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2; +// used by DDMA, subkernel program data (need to provide extra ID and destination) +pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*destination*/1 - /*ID*/4; #[derive(PartialEq, Debug)] pub enum Packet { @@ -61,9 +64,9 @@ pub enum Packet { AnalyzerHeaderRequest { destination: u8 }, AnalyzerHeader { sent_bytes: u32, total_byte_count: u64, overflow_occurred: bool }, AnalyzerDataRequest { destination: u8 }, - AnalyzerData { last: bool, length: u16, data: [u8; ANALYZER_MAX_SIZE]}, + AnalyzerData { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE]}, - DmaAddTraceRequest { destination: u8, id: u32, last: bool, length: u16, trace: [u8; DMA_TRACE_MAX_SIZE] }, + DmaAddTraceRequest { destination: u8, id: u32, last: bool, length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE] }, DmaAddTraceReply { succeeded: bool }, DmaRemoveTraceRequest { destination: u8, id: u32 }, DmaRemoveTraceReply { succeeded: bool }, @@ -215,7 +218,7 @@ impl Packet { 0xa3 => { let last = reader.read_bool()?; let length = reader.read_u16()?; - let mut data: [u8; ANALYZER_MAX_SIZE] = [0; ANALYZER_MAX_SIZE]; + let mut data: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE]; reader.read_exact(&mut data[0..length as usize])?; Packet::AnalyzerData { last: last, @@ -229,7 +232,7 @@ impl Packet { let id = reader.read_u32()?; let last = reader.read_bool()?; let length = reader.read_u16()?; - let mut trace: [u8; DMA_TRACE_MAX_SIZE] = [0; DMA_TRACE_MAX_SIZE]; + let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; reader.read_exact(&mut trace[0..length as usize])?; Packet::DmaAddTraceRequest { destination: destination, diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 3ead188b5..f643fafd5 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -17,7 +17,7 @@ pub mod drtio { use super::*; use alloc::vec::Vec; use drtioaux; - use proto_artiq::drtioaux_proto::DMA_TRACE_MAX_SIZE; + use proto_artiq::drtioaux_proto::MASTER_PAYLOAD_MAX_SIZE; use rtio_dma::remote_dma; #[cfg(has_rtio_analyzer)] use analyzer::remote_analyzer::RemoteBuffer; @@ -372,28 +372,36 @@ pub mod drtio { } } - pub fn ddma_upload_trace(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8, trace: &Vec) -> Result<(), &'static str> { - let linkno = routing_table.0[destination as usize][0] - 1; + fn partition_data(data: &[u8], send_f: F) -> Result<(), &'static str> + where F: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], bool, usize) -> Result<(), &'static str> { let mut i = 0; - while i < trace.len() { - let mut trace_slice: [u8; DMA_TRACE_MAX_SIZE] = [0; DMA_TRACE_MAX_SIZE]; - let len: usize = if i + DMA_TRACE_MAX_SIZE < trace.len() { DMA_TRACE_MAX_SIZE } else { trace.len() - i } as usize; - let last = i + len == trace.len(); - trace_slice[..len].clone_from_slice(&trace[i..i+len]); + while i < data.len() { + let mut slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; + let len: usize = if i + MASTER_PAYLOAD_MAX_SIZE < data.len() { MASTER_PAYLOAD_MAX_SIZE } else { data.len() - i } as usize; + let last = i + len == data.len(); + slice[..len].clone_from_slice(&data[i..i+len]); i += len; + send_f(&slice, last, len)?; + } + Ok(()) + } + + pub fn ddma_upload_trace(io: &Io, aux_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, + id: u32, destination: u8, trace: &[u8]) -> Result<(), &'static str> { + let linkno = routing_table.0[destination as usize][0] - 1; + partition_data(trace, |slice, last, len: usize| { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::DmaAddTraceRequest { - id: id, destination: destination, last: last, length: len as u16, trace: trace_slice}); + id: id, destination: destination, last: last, length: len as u16, trace: *slice}); match reply { - Ok(drtioaux::Packet::DmaAddTraceReply { succeeded: true }) => (), - Ok(drtioaux::Packet::DmaAddTraceReply { succeeded: false }) => { - return Err("error adding trace on satellite"); }, - Ok(_) => { return Err("adding DMA trace failed, unexpected aux packet"); }, - Err(_) => { return Err("adding DMA trace failed, aux error"); } + Ok(drtioaux::Packet::DmaAddTraceReply { succeeded: true }) => Ok(()), + Ok(drtioaux::Packet::DmaAddTraceReply { succeeded: false }) => + Err("error adding trace on satellite"), + Ok(_) => Err("adding DMA trace failed, unexpected aux packet"), + Err(_) => Err("adding DMA trace failed, aux error") } - } - Ok(()) + }) } pub fn ddma_send_erase(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, diff --git a/artiq/firmware/satman/analyzer.rs b/artiq/firmware/satman/analyzer.rs index e2a43ba22..72f9e8c2b 100644 --- a/artiq/firmware/satman/analyzer.rs +++ b/artiq/firmware/satman/analyzer.rs @@ -1,6 +1,6 @@ use core::cmp::min; use board_misoc::{csr, cache}; -use proto_artiq::drtioaux_proto::ANALYZER_MAX_SIZE; +use proto_artiq::drtioaux_proto::SAT_PAYLOAD_MAX_SIZE; const BUFFER_SIZE: usize = 512 * 1024; @@ -86,10 +86,10 @@ impl Analyzer { } } - pub fn get_data(&mut self, data_slice: &mut [u8; ANALYZER_MAX_SIZE]) -> AnalyzerSliceMeta { + pub fn get_data(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> AnalyzerSliceMeta { let data = unsafe { &BUFFER.data[..] }; let i = (self.data_pointer + self.sent_bytes) % BUFFER_SIZE; - let len = min(ANALYZER_MAX_SIZE, self.data_len - self.sent_bytes); + let len = min(SAT_PAYLOAD_MAX_SIZE, self.data_len - self.sent_bytes); let last = self.sent_bytes + len == self.data_len; if i + len >= BUFFER_SIZE { diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index c524483fc..4384e68d3 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -17,8 +17,7 @@ use board_artiq::si5324; use board_artiq::{spi, drtioaux}; #[cfg(soc_platform = "efc")] use board_artiq::ad9117; -use board_artiq::drtio_routing; -use proto_artiq::drtioaux_proto::ANALYZER_MAX_SIZE; +use proto_artiq::drtioaux_proto::{SAT_PAYLOAD_MAX_SIZE, MASTER_PAYLOAD_MAX_SIZE}; #[cfg(has_drtio_eem)] use board_artiq::drtio_eem; use riscv::register::{mcause, mepc, mtval}; @@ -328,7 +327,7 @@ fn process_aux_packet(_manager: &mut DmaManager, analyzer: &mut Analyzer, _repea drtioaux::Packet::AnalyzerDataRequest { destination: _destination } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); - let mut data_slice: [u8; ANALYZER_MAX_SIZE] = [0; ANALYZER_MAX_SIZE]; + let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE]; let meta = analyzer.get_data(&mut data_slice); drtioaux::send(0, &drtioaux::Packet::AnalyzerData { last: meta.last, From e05be2f8e46caad2a94eba56eeacd5df078468e1 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 5 Oct 2023 15:48:25 +0800 Subject: [PATCH 004/296] runtime: support subkernels --- .../firmware/libproto_artiq/drtioaux_proto.rs | 127 ++++++- .../firmware/libproto_artiq/session_proto.rs | 7 + artiq/firmware/runtime/kernel.rs | 326 ++++++++++++++++++ artiq/firmware/runtime/main.rs | 8 +- artiq/firmware/runtime/rtio_mgt.rs | 122 ++++++- artiq/firmware/runtime/session.rs | 208 ++++++++++- 6 files changed, 760 insertions(+), 38 deletions(-) diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index 4a962f6fe..6ca4230eb 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -72,7 +72,17 @@ pub enum Packet { DmaRemoveTraceReply { succeeded: bool }, DmaPlaybackRequest { destination: u8, id: u32, timestamp: u64 }, DmaPlaybackReply { succeeded: bool }, - DmaPlaybackStatus { destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 } + DmaPlaybackStatus { destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 }, + + SubkernelAddDataRequest { destination: u8, id: u32, last: bool, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, + SubkernelAddDataReply { succeeded: bool }, + SubkernelLoadRunRequest { destination: u8, id: u32, run: bool }, + SubkernelLoadRunReply { succeeded: bool }, + SubkernelFinished { id: u32, with_exception: bool }, + SubkernelExceptionRequest { destination: u8 }, + SubkernelException { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE] }, + SubkernelMessage { destination: u8, id: u32, last: bool, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, + SubkernelMessageAck { destination: u8 }, } impl Packet { @@ -268,6 +278,69 @@ impl Packet { timestamp: reader.read_u64()? }, + 0xc0 => { + let destination = reader.read_u8()?; + let id = reader.read_u32()?; + let last = reader.read_bool()?; + let length = reader.read_u16()?; + let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; + reader.read_exact(&mut data[0..length as usize])?; + Packet::SubkernelAddDataRequest { + destination: destination, + id: id, + last: last, + length: length as u16, + data: data, + } + }, + 0xc1 => Packet::SubkernelAddDataReply { + succeeded: reader.read_bool()? + }, + 0xc4 => Packet::SubkernelLoadRunRequest { + destination: reader.read_u8()?, + id: reader.read_u32()?, + run: reader.read_bool()? + }, + 0xc5 => Packet::SubkernelLoadRunReply { + succeeded: reader.read_bool()? + }, + 0xc8 => Packet::SubkernelFinished { + id: reader.read_u32()?, + with_exception: reader.read_bool()?, + }, + 0xc9 => Packet::SubkernelExceptionRequest { + destination: reader.read_u8()? + }, + 0xca => { + let last = reader.read_bool()?; + let length = reader.read_u16()?; + let mut data: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE]; + reader.read_exact(&mut data[0..length as usize])?; + Packet::SubkernelException { + last: last, + length: length, + data: data + } + }, + 0xcb => { + let destination = reader.read_u8()?; + let id = reader.read_u32()?; + let last = reader.read_bool()?; + let length = reader.read_u16()?; + let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; + reader.read_exact(&mut data[0..length as usize])?; + Packet::SubkernelMessage { + destination: destination, + id: id, + last: last, + length: length as u16, + data: data, + } + }, + 0xcc => Packet::SubkernelMessageAck { + destination: reader.read_u8()? + }, + ty => return Err(Error::UnknownPacket(ty)) }) } @@ -488,7 +561,57 @@ impl Packet { writer.write_u8(error)?; writer.write_u32(channel)?; writer.write_u64(timestamp)?; - } + }, + + Packet::SubkernelAddDataRequest { destination, id, last, data, length } => { + writer.write_u8(0xc0)?; + writer.write_u8(destination)?; + writer.write_u32(id)?; + writer.write_bool(last)?; + writer.write_u16(length)?; + writer.write_all(&data[0..length as usize])?; + }, + Packet::SubkernelAddDataReply { succeeded } => { + writer.write_u8(0xc1)?; + writer.write_bool(succeeded)?; + }, + Packet::SubkernelLoadRunRequest { destination, id, run } => { + writer.write_u8(0xc4)?; + writer.write_u8(destination)?; + writer.write_u32(id)?; + writer.write_bool(run)?; + }, + Packet::SubkernelLoadRunReply { succeeded } => { + writer.write_u8(0xc5)?; + writer.write_bool(succeeded)?; + }, + Packet::SubkernelFinished { id, with_exception } => { + writer.write_u8(0xc8)?; + writer.write_u32(id)?; + writer.write_bool(with_exception)?; + }, + Packet::SubkernelExceptionRequest { destination } => { + writer.write_u8(0xc9)?; + writer.write_u8(destination)?; + }, + Packet::SubkernelException { last, length, data } => { + writer.write_u8(0xca)?; + writer.write_bool(last)?; + writer.write_u16(length)?; + writer.write_all(&data[0..length as usize])?; + }, + Packet::SubkernelMessage { destination, id, last, data, length } => { + writer.write_u8(0xcb)?; + writer.write_u8(destination)?; + writer.write_u32(id)?; + writer.write_bool(last)?; + writer.write_u16(length)?; + writer.write_all(&data[0..length as usize])?; + }, + Packet::SubkernelMessageAck { destination } => { + writer.write_u8(0xcc)?; + writer.write_u8(destination)?; + }, } Ok(()) } diff --git a/artiq/firmware/libproto_artiq/session_proto.rs b/artiq/firmware/libproto_artiq/session_proto.rs index d4277c26c..d5266ec3d 100644 --- a/artiq/firmware/libproto_artiq/session_proto.rs +++ b/artiq/firmware/libproto_artiq/session_proto.rs @@ -84,6 +84,8 @@ pub enum Request { column: u32, function: u32, }, + + UploadSubkernel { id: u32, destination: u8, kernel: Vec }, } #[derive(Debug)] @@ -137,6 +139,11 @@ impl Request { column: reader.read_u32()?, function: reader.read_u32()? }, + 9 => Request::UploadSubkernel { + id: reader.read_u32()?, + destination: reader.read_u8()?, + kernel: reader.read_bytes()? + }, ty => return Err(Error::UnknownPacket(ty)) }) diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index 42c1f2f05..e7a11bfc4 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -87,3 +87,329 @@ unsafe fn load_image(image: &[u8]) -> Result<(), &'static str> { pub fn validate(ptr: usize) -> bool { ptr >= KERNELCPU_EXEC_ADDRESS && ptr <= KERNELCPU_LAST_ADDRESS } + + +#[cfg(has_drtio)] +pub mod subkernel { + use alloc::{vec::Vec, collections::btree_map::BTreeMap, string::String, string::ToString}; + use core::str; + use board_artiq::drtio_routing::RoutingTable; + use board_misoc::clock; + use proto_artiq::{drtioaux_proto::MASTER_PAYLOAD_MAX_SIZE, rpc_proto as rpc}; + use io::Cursor; + use rtio_mgt::drtio; + use sched::{Io, Mutex, Error as SchedError}; + + #[derive(Debug, PartialEq, Clone, Copy)] + pub enum FinishStatus { + Ok, + CommLost, + Exception + } + + #[derive(Debug, PartialEq, Clone, Copy)] + pub enum SubkernelState { + NotLoaded, + Uploaded, + Running, + Finished { status: FinishStatus }, + } + + #[derive(Fail, Debug)] + pub enum Error { + #[fail(display = "Timed out waiting for subkernel")] + Timeout, + #[fail(display = "Session killed while waiting for subkernel")] + SessionKilled, + #[fail(display = "Subkernel is in incorrect state for the given operation")] + IncorrectState, + #[fail(display = "DRTIO error: {}", _0)] + DrtioError(String), + #[fail(display = "scheduler error")] + SchedError(SchedError), + #[fail(display = "rpc io error")] + RpcIoError, + #[fail(display = "subkernel finished prematurely")] + SubkernelFinished, + } + + impl From<&str> for Error { + fn from(value: &str) -> Error { + Error::DrtioError(value.to_string()) + } + } + + impl From for Error { + fn from(value: SchedError) -> Error { + match value { + SchedError::Interrupted => Error::SessionKilled, + x => Error::SchedError(x) + } + } + } + + impl From> for Error { + fn from(_value: io::Error) -> Error { + Error::RpcIoError + } + } + + pub struct SubkernelFinished { + pub id: u32, + pub comm_lost: bool, + pub exception: Option> + } + + struct Subkernel { + pub destination: u8, + pub data: Vec, + pub state: SubkernelState + } + + impl Subkernel { + pub fn new(destination: u8, data: Vec) -> Self { + Subkernel { + destination: destination, + data: data, + state: SubkernelState::NotLoaded + } + } + } + + static mut SUBKERNELS: BTreeMap = BTreeMap::new(); + + pub fn add_subkernel(io: &Io, subkernel_mutex: &Mutex, id: u32, destination: u8, kernel: Vec) { + let _lock = subkernel_mutex.lock(io).unwrap(); + unsafe { SUBKERNELS.insert(id, Subkernel::new(destination, kernel)); } + } + + pub fn upload(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &RoutingTable, id: u32) -> Result<(), Error> { + let _lock = subkernel_mutex.lock(io).unwrap(); + let subkernel = unsafe { SUBKERNELS.get_mut(&id).unwrap() }; + drtio::subkernel_upload(io, aux_mutex, routing_table, id, + subkernel.destination, &subkernel.data)?; + subkernel.state = SubkernelState::Uploaded; + Ok(()) + } + + pub fn load(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, + id: u32, run: bool) -> Result<(), Error> { + let _lock = subkernel_mutex.lock(io).unwrap(); + let subkernel = unsafe { SUBKERNELS.get_mut(&id).unwrap() }; + if subkernel.state != SubkernelState::Uploaded { + return Err(Error::IncorrectState); + } + drtio::subkernel_load(io, aux_mutex, routing_table, id, subkernel.destination, run)?; + if run { + subkernel.state = SubkernelState::Running; + } + Ok(()) + } + + pub fn clear_subkernels(io: &Io, subkernel_mutex: &Mutex) { + let _lock = subkernel_mutex.lock(io).unwrap(); + unsafe { + SUBKERNELS = BTreeMap::new(); + MESSAGE_QUEUE = Vec::new(); + CURRENT_MESSAGES = BTreeMap::new(); + } + } + + pub fn subkernel_finished(io: &Io, subkernel_mutex: &Mutex, id: u32, with_exception: bool) { + // called upon receiving DRTIO SubkernelRunDone + let _lock = subkernel_mutex.lock(io).unwrap(); + let subkernel = unsafe { SUBKERNELS.get_mut(&id) }; + // may be None if session ends and is cleared + if let Some(subkernel) = subkernel { + subkernel.state = SubkernelState::Finished { + status: match with_exception { + true => FinishStatus::Exception, + false => FinishStatus::Ok, + } + } + } + } + + pub fn destination_changed(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &RoutingTable, destination: u8, up: bool) { + let _lock = subkernel_mutex.lock(io).unwrap(); + let subkernels_iter = unsafe { SUBKERNELS.iter_mut() }; + for (id, subkernel) in subkernels_iter { + if subkernel.destination == destination { + if up { + match drtio::subkernel_upload(io, aux_mutex, routing_table, *id, destination, &subkernel.data) + { + Ok(_) => subkernel.state = SubkernelState::Uploaded, + Err(e) => error!("Error adding subkernel on destination {}: {}", destination, e) + } + } else { + subkernel.state = match subkernel.state { + SubkernelState::Running => SubkernelState::Finished { status: FinishStatus::CommLost }, + _ => SubkernelState::NotLoaded, + } + } + } + } + } + + pub fn retrieve_finish_status(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &RoutingTable, id: u32) -> Result { + let _lock = subkernel_mutex.lock(io)?; + let mut subkernel = unsafe { SUBKERNELS.get_mut(&id).unwrap() }; + match subkernel.state { + SubkernelState::Finished { status } => { + subkernel.state = SubkernelState::Uploaded; + Ok(SubkernelFinished { + id: id, + comm_lost: status == FinishStatus::CommLost, + exception: if status == FinishStatus::Exception { + Some(drtio::subkernel_retrieve_exception(io, aux_mutex, + routing_table, subkernel.destination)?) + } else { None } + }) + }, + _ => Err(Error::IncorrectState) + } + } + + pub fn await_finish(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &RoutingTable, id: u32, timeout: u64) -> Result { + { + let _lock = subkernel_mutex.lock(io)?; + match unsafe { SUBKERNELS.get(&id).unwrap().state } { + SubkernelState::Running | SubkernelState::Finished { .. } => (), + _ => return Err(Error::IncorrectState) + } + } + let max_time = clock::get_ms() + timeout as u64; + let _res = io.until(|| { + if clock::get_ms() > max_time { + return true; + } + if subkernel_mutex.test_lock() { + // cannot lock again within io.until - scheduler guarantees + // that it will not be interrupted - so only test the lock + return false; + } + let subkernel = unsafe { SUBKERNELS.get(&id).unwrap() }; + match subkernel.state { + SubkernelState::Finished { .. } => true, + _ => false + } + })?; + if clock::get_ms() > max_time { + error!("Remote subkernel finish await timed out"); + return Err(Error::Timeout); + } + retrieve_finish_status(io, aux_mutex, subkernel_mutex, routing_table, id) + } + + pub struct Message { + from_id: u32, + pub tag_count: u8, + pub tag: u8, + pub data: Vec + } + + // FIFO queue of messages + static mut MESSAGE_QUEUE: Vec = Vec::new(); + // currently under construction message(s) (can be from multiple sources) + static mut CURRENT_MESSAGES: BTreeMap = BTreeMap::new(); + + pub fn message_handle_incoming(io: &Io, subkernel_mutex: &Mutex, + id: u32, last: bool, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { + // called when receiving a message from satellite + let _lock = match subkernel_mutex.lock(io) { + Ok(lock) => lock, + // may get interrupted, when session is cancelled or main kernel finishes without await + Err(_) => return, + }; + if unsafe { SUBKERNELS.get(&id).is_none() } { + // do not add messages for non-existing or deleted subkernels + return + } + match unsafe { CURRENT_MESSAGES.get_mut(&id) } { + Some(message) => message.data.extend(&data[..length]), + None => unsafe { + CURRENT_MESSAGES.insert(id, Message { + from_id: id, + tag_count: data[0], + tag: data[1], + data: data[2..length].to_vec() + }); + } + }; + if last { + unsafe { + // when done, remove from working queue + MESSAGE_QUEUE.push(CURRENT_MESSAGES.remove(&id).unwrap()); + }; + } + } + + pub fn message_await(io: &Io, subkernel_mutex: &Mutex, id: u32, timeout: u64 + ) -> Result { + { + let _lock = subkernel_mutex.lock(io)?; + match unsafe { SUBKERNELS.get(&id).unwrap().state } { + SubkernelState::Finished { .. } => return Err(Error::SubkernelFinished), + SubkernelState::Running => (), + _ => return Err(Error::IncorrectState) + } + } + let max_time = clock::get_ms() + timeout as u64; + let message = io.until_ok(|| { + if clock::get_ms() > max_time { + return Ok(None); + } + if subkernel_mutex.test_lock() { + return Err(()); + } + let msg_len = unsafe { MESSAGE_QUEUE.len() }; + for i in 0..msg_len { + let msg = unsafe { &MESSAGE_QUEUE[i] }; + if msg.from_id == id { + return Ok(Some(unsafe { MESSAGE_QUEUE.remove(i) })); + } + } + match unsafe { SUBKERNELS.get(&id).unwrap().state } { + SubkernelState::Finished { .. } => return Ok(None), + _ => () + } + Err(()) + }); + match message { + Ok(Some(message)) => Ok(message), + Ok(None) => { + if clock::get_ms() > max_time { + Err(Error::Timeout) + } else { + let _lock = subkernel_mutex.lock(io)?; + match unsafe { SUBKERNELS.get(&id).unwrap().state } { + SubkernelState::Finished { .. } => Err(Error::SubkernelFinished), + _ => Err(Error::IncorrectState) + } + } + } + Err(e) => Err(Error::SchedError(e)), + } + } + + pub fn message_send<'a>(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &RoutingTable, id: u32, count: u8, tag: &'a [u8], message: *const *const () + ) -> Result<(), Error> { + let mut writer = Cursor::new(Vec::new()); + let _lock = subkernel_mutex.lock(io).unwrap(); + let destination = unsafe { SUBKERNELS.get(&id).unwrap().destination }; + + // reuse rpc code for sending arbitrary data + rpc::send_args(&mut writer, 0, tag, message)?; + // skip service tag, but overwrite first byte with tag count + let data = &mut writer.into_inner()[3..]; + data[0] = count; + Ok(drtio::subkernel_send_message( + io, aux_mutex, routing_table, id, destination, data + )?) + } +} \ No newline at end of file diff --git a/artiq/firmware/runtime/main.rs b/artiq/firmware/runtime/main.rs index 08367375a..f0970a73f 100644 --- a/artiq/firmware/runtime/main.rs +++ b/artiq/firmware/runtime/main.rs @@ -1,4 +1,4 @@ -#![feature(lang_items, panic_info_message, const_btree_new, iter_advance_by)] +#![feature(lang_items, panic_info_message, const_btree_new, iter_advance_by, never_type)] #![no_std] extern crate dyld; @@ -189,6 +189,7 @@ fn startup() { let aux_mutex = sched::Mutex::new(); let ddma_mutex = sched::Mutex::new(); + let subkernel_mutex = sched::Mutex::new(); let mut scheduler = sched::Scheduler::new(interface); let io = scheduler.io(); @@ -197,7 +198,7 @@ fn startup() { io.spawn(4096, dhcp::dhcp_thread); } - rtio_mgt::startup(&io, &aux_mutex, &drtio_routing_table, &up_destinations, &ddma_mutex); + rtio_mgt::startup(&io, &aux_mutex, &drtio_routing_table, &up_destinations, &ddma_mutex, &subkernel_mutex); io.spawn(4096, mgmt::thread); { @@ -205,7 +206,8 @@ fn startup() { let drtio_routing_table = drtio_routing_table.clone(); let up_destinations = up_destinations.clone(); let ddma_mutex = ddma_mutex.clone(); - io.spawn(16384, move |io| { session::thread(io, &aux_mutex, &drtio_routing_table, &up_destinations, &ddma_mutex) }); + let subkernel_mutex = subkernel_mutex.clone(); + io.spawn(32768, move |io| { session::thread(io, &aux_mutex, &drtio_routing_table, &up_destinations, &ddma_mutex, &subkernel_mutex) }); } #[cfg(any(has_rtio_moninj, has_drtio))] { diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index f643fafd5..ba6ba3865 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -21,18 +21,20 @@ pub mod drtio { use rtio_dma::remote_dma; #[cfg(has_rtio_analyzer)] use analyzer::remote_analyzer::RemoteBuffer; + use kernel::subkernel; pub fn startup(io: &Io, aux_mutex: &Mutex, routing_table: &Urc>, up_destinations: &Urc>, - ddma_mutex: &Mutex) { + ddma_mutex: &Mutex, subkernel_mutex: &Mutex) { let aux_mutex = aux_mutex.clone(); let routing_table = routing_table.clone(); let up_destinations = up_destinations.clone(); let ddma_mutex = ddma_mutex.clone(); + let subkernel_mutex = subkernel_mutex.clone(); io.spawn(8192, move |io| { let routing_table = routing_table.borrow(); - link_thread(io, &aux_mutex, &routing_table, &up_destinations, &ddma_mutex); + link_thread(io, &aux_mutex, &routing_table, &up_destinations, &ddma_mutex, &subkernel_mutex); }); } @@ -61,14 +63,26 @@ pub mod drtio { } } - fn process_async_packets(io: &Io, ddma_mutex: &Mutex, packet: drtioaux::Packet - ) -> Option { + fn process_async_packets(io: &Io, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, linkno: u8, + packet: drtioaux::Packet) -> Option { // returns None if an async packet has been consumed match packet { drtioaux::Packet::DmaPlaybackStatus { id, destination, error, channel, timestamp } => { remote_dma::playback_done(io, ddma_mutex, id, destination, error, channel, timestamp); None }, + drtioaux::Packet::SubkernelFinished { id, with_exception } => { + subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception); + None + }, + drtioaux::Packet::SubkernelMessage { id, destination: from, last, length, data } => { + subkernel::message_handle_incoming(io, subkernel_mutex, id, last, length as usize, &data); + // acknowledge receiving part of the message + drtioaux::send(linkno, + &drtioaux::Packet::SubkernelMessageAck { destination: from } + ).unwrap(); + None + } other => Some(other) } } @@ -166,13 +180,14 @@ pub mod drtio { } } - fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, linkno: u8, ddma_mutex: &Mutex) { + fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, linkno: u8) { let _lock = aux_mutex.lock(io).unwrap(); match drtioaux::recv(linkno) { - Ok(Some(drtioaux::Packet::DmaPlaybackStatus { id, destination, error, channel, timestamp })) => { - remote_dma::playback_done(io, ddma_mutex, id, destination, error, channel, timestamp); + Ok(Some(packet)) => { + if let Some(packet) = process_async_packets(io, ddma_mutex, subkernel_mutex, linkno, packet) { + warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet); + } } - Ok(Some(packet)) => warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet), Ok(None) => (), Err(_) => warn!("[LINK#{}] aux packet error", linkno) } @@ -221,7 +236,7 @@ pub mod drtio { fn destination_survey(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, up_links: &[bool], up_destinations: &Urc>, - ddma_mutex: &Mutex) { + ddma_mutex: &Mutex, subkernel_mutex: &Mutex) { for destination in 0..drtio_routing::DEST_COUNT { let hop = routing_table.0[destination][0]; let destination = destination as u8; @@ -241,11 +256,12 @@ pub mod drtio { destination: destination }); if let Ok(reply) = reply { - let reply = process_async_packets(io, ddma_mutex, reply); + let reply = process_async_packets(io, ddma_mutex, subkernel_mutex, linkno, reply); match reply { Some(drtioaux::Packet::DestinationDownReply) => { destination_set_up(routing_table, up_destinations, destination, false); remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false); + subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false); } Some(drtioaux::Packet::DestinationOkReply) => (), Some(drtioaux::Packet::DestinationSequenceErrorReply { channel }) => { @@ -276,6 +292,7 @@ pub mod drtio { } else { destination_set_up(routing_table, up_destinations, destination, false); remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false); + subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false); } } else { if up_links[linkno as usize] { @@ -289,6 +306,7 @@ pub mod drtio { destination_set_up(routing_table, up_destinations, destination, true); init_buffer_space(destination as u8, linkno); remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, true); + subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, true); }, Ok(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet), Err(e) => error!("[DEST#{}] communication failed ({})", destination, e) @@ -302,7 +320,7 @@ pub mod drtio { pub fn link_thread(io: Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, up_destinations: &Urc>, - ddma_mutex: &Mutex) { + ddma_mutex: &Mutex, subkernel_mutex: &Mutex) { let mut up_links = [false; csr::DRTIO.len()]; loop { for linkno in 0..csr::DRTIO.len() { @@ -310,7 +328,7 @@ pub mod drtio { if up_links[linkno as usize] { /* link was previously up */ if link_rx_up(linkno) { - process_unsolicited_aux(&io, aux_mutex, linkno, ddma_mutex); + process_unsolicited_aux(&io, aux_mutex, ddma_mutex, subkernel_mutex, linkno); process_local_errors(linkno); } else { info!("[LINK#{}] link is down", linkno); @@ -340,7 +358,7 @@ pub mod drtio { } } } - destination_survey(&io, aux_mutex, routing_table, &up_links, up_destinations, ddma_mutex); + destination_survey(&io, aux_mutex, routing_table, &up_links, up_destinations, ddma_mutex, subkernel_mutex); io.sleep(200).unwrap(); } } @@ -374,13 +392,13 @@ pub mod drtio { fn partition_data(data: &[u8], send_f: F) -> Result<(), &'static str> where F: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], bool, usize) -> Result<(), &'static str> { - let mut i = 0; + let mut i = 0; while i < data.len() { let mut slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; let len: usize = if i + MASTER_PAYLOAD_MAX_SIZE < data.len() { MASTER_PAYLOAD_MAX_SIZE } else { data.len() - i } as usize; let last = i + len == data.len(); slice[..len].clone_from_slice(&data[i..i+len]); - i += len; + i += len; send_f(&slice, last, len)?; } Ok(()) @@ -483,6 +501,74 @@ pub mod drtio { Ok(remote_buffers) } + pub fn subkernel_upload(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, + id: u32, destination: u8, data: &Vec) -> Result<(), &'static str> { + let linkno = routing_table.0[destination as usize][0] - 1; + partition_data(data, |slice, last, len: usize| { + let reply = aux_transact(io, aux_mutex, linkno, + &drtioaux::Packet::SubkernelAddDataRequest { + id: id, destination: destination, last: last, length: len as u16, data: *slice}); + match reply { + Ok(drtioaux::Packet::SubkernelAddDataReply { succeeded: true }) => Ok(()), + Ok(drtioaux::Packet::SubkernelAddDataReply { succeeded: false }) => + Err("error adding subkernel on satellite"), + Ok(_) => Err("adding subkernel failed, unexpected aux packet"), + Err(_) => Err("adding subkernel failed, aux error") + } + }) + } + + pub fn subkernel_load(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, + id: u32, destination: u8, run: bool) -> Result<(), &'static str> { + let linkno = routing_table.0[destination as usize][0] - 1; + let reply = aux_transact(io, aux_mutex, linkno, + &drtioaux::Packet::SubkernelLoadRunRequest{ id: id, destination: destination, run: run }); + match reply { + Ok(drtioaux::Packet::SubkernelLoadRunReply { succeeded: true }) => return Ok(()), + Ok(drtioaux::Packet::SubkernelLoadRunReply { succeeded: false }) => + return Err("error on subkernel run request"), + Ok(_) => return Err("received unexpected aux packet during subkernel run"), + Err(_) => return Err("aux error on subkernel run") + } + } + + pub fn subkernel_retrieve_exception(io: &Io, aux_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, destination: u8 + ) -> Result, &'static str> { + let linkno = routing_table.0[destination as usize][0] - 1; + let mut remote_data: Vec = Vec::new(); + loop { + let reply = aux_transact(io, aux_mutex, linkno, + &drtioaux::Packet::SubkernelExceptionRequest { destination: destination }); + match reply { + Ok(drtioaux::Packet::SubkernelException { last, length, data }) => { + remote_data.extend(&data[0..length as usize]); + if last { + return Ok(remote_data); + } + }, + Ok(_) => return Err("received unexpected aux packet during subkernel exception request"), + Err(e) => return Err(e) + } + } + } + + pub fn subkernel_send_message(io: &Io, aux_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, message: &[u8] + ) -> Result<(), &'static str> { + let linkno = routing_table.0[destination as usize][0] - 1; + partition_data(message, |slice, last, len: usize| { + let reply = aux_transact(io, aux_mutex, linkno, + &drtioaux::Packet::SubkernelMessage { + destination: destination, id: id, last: last, length: len as u16, data: *slice}); + match reply { + Ok(drtioaux::Packet::SubkernelMessageAck { .. }) => Ok(()), + Ok(_) => Err("sending message to subkernel failed, unexpected aux packet"), + Err(_) => Err("sending message to subkernel, aux error") + } + }) + } + } #[cfg(not(has_drtio))] @@ -492,7 +578,7 @@ pub mod drtio { pub fn startup(_io: &Io, _aux_mutex: &Mutex, _routing_table: &Urc>, _up_destinations: &Urc>, - _ddma_mutex: &Mutex) {} + _ddma_mutex: &Mutex, _subkernel_mutex: &Mutex) {} pub fn reset(_io: &Io, _aux_mutex: &Mutex) {} } @@ -556,9 +642,9 @@ fn read_device_map() -> DeviceMap { pub fn startup(io: &Io, aux_mutex: &Mutex, routing_table: &Urc>, up_destinations: &Urc>, - ddma_mutex: &Mutex) { + ddma_mutex: &Mutex, subkernel_mutex: &Mutex) { set_device_map(read_device_map()); - drtio::startup(io, aux_mutex, routing_table, up_destinations, ddma_mutex); + drtio::startup(io, aux_mutex, routing_table, up_destinations, ddma_mutex, subkernel_mutex); unsafe { csr::rtio_core::reset_phy_write(1); } diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 64e015038..4f629f512 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -1,9 +1,11 @@ use core::{mem, str, cell::{Cell, RefCell}, fmt::Write as FmtWrite}; -use alloc::{vec::Vec, string::String}; +use alloc::{vec::Vec, string::{String, ToString}}; use byteorder::{ByteOrder, NativeEndian}; use cslice::CSlice; use io::{Read, Write, Error as IoError}; +#[cfg(has_drtio)] +use io::{Cursor, ProtoRead}; use board_misoc::{ident, cache, config}; use {mailbox, rpc_queue, kernel}; use urc::Urc; @@ -12,6 +14,8 @@ use rtio_clocking; use rtio_dma::Manager as DmaManager; #[cfg(has_drtio)] use rtio_dma::remote_dma; +#[cfg(has_drtio)] +use kernel::{subkernel, subkernel::Error as SubkernelError}; use rtio_mgt::get_async_errors; use cache::Cache; use kern_hwreq; @@ -33,6 +37,11 @@ pub enum Error { ClockFailure, #[fail(display = "protocol error: {}", _0)] Protocol(#[cause] host::Error), + #[fail(display = "subkernel io error")] + SubkernelIoError, + #[cfg(has_drtio)] + #[fail(display = "subkernel error: {}", _0)] + Subkernel(#[cause] SubkernelError), #[fail(display = "{}", _0)] Unexpected(String), } @@ -55,10 +64,42 @@ impl From> for Error { } } +impl From<&str> for Error { + fn from(value: &str) -> Error { + Error::Unexpected(value.to_string()) + } +} + +impl From> for Error { + fn from(_value: io::Error) -> Error { + Error::SubkernelIoError + } +} + +#[cfg(has_drtio)] +impl From for Error { + fn from(value: SubkernelError) -> Error { + Error::Subkernel(value) + } +} + macro_rules! unexpected { ($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*)))); } +#[cfg(has_drtio)] +macro_rules! propagate_subkernel_exception { + ( $exception:ident, $stream:ident ) => { + error!("Exception in subkernel"); + match $stream { + None => return Ok(true), + Some(ref mut $stream) => { + $stream.write_all($exception)?; + } + } + } +} + // Persistent state #[derive(Debug)] struct Congress { @@ -131,6 +172,8 @@ fn host_read(reader: &mut R) -> Result> let request = host::Request::read_from(reader)?; match &request { &host::Request::LoadKernel(_) => debug!("comm<-host LoadLibrary(...)"), + &host::Request::UploadSubkernel { id, destination, kernel: _} => debug!( + "comm<-host UploadSubkernel(id: {}, destination: {}, ...)", id, destination), _ => debug!("comm<-host {:?}", request) } Ok(request) @@ -233,8 +276,8 @@ fn kern_run(session: &mut Session) -> Result<(), Error> { kern_acknowledge() } -fn process_host_message(io: &Io, - stream: &mut TcpStream, +fn process_host_message(io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subkernel_mutex: &Mutex, + _routing_table: &drtio_routing::RoutingTable, stream: &mut TcpStream, session: &mut Session) -> Result<(), Error> { match host_read(stream)? { host::Request::SystemInfo => { @@ -245,7 +288,7 @@ fn process_host_message(io: &Io, session.congress.finished_cleanly.set(true) } - host::Request::LoadKernel(kernel) => + host::Request::LoadKernel(kernel) => { match unsafe { kern_load(io, session, &kernel) } { Ok(()) => host_write(stream, host::Reply::LoadCompleted)?, Err(error) => { @@ -254,7 +297,8 @@ fn process_host_message(io: &Io, host_write(stream, host::Reply::LoadFailed(&description))?; kern_acknowledge()?; } - }, + } + }, host::Request::RunKernel => match kern_run(session) { Ok(()) => (), @@ -323,6 +367,23 @@ fn process_host_message(io: &Io, session.kernel_state = KernelState::Running } + + host::Request::UploadSubkernel { id: _id, destination: _dest, kernel: _kernel } => { + #[cfg(has_drtio)] + { + subkernel::add_subkernel(io, _subkernel_mutex, _id, _dest, _kernel); + match subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, _id) { + Ok(_) => host_write(stream, host::Reply::LoadCompleted)?, + Err(error) => { + let mut description = String::new(); + write!(&mut description, "{}", error).unwrap(); + host_write(stream, host::Reply::LoadFailed(&description))? + } + } + } + #[cfg(not(has_drtio))] + host_write(stream, host::Reply::LoadFailed("No DRTIO on this system, subkernels are not supported"))? + } } Ok(()) @@ -331,7 +392,7 @@ fn process_host_message(io: &Io, fn process_kern_message(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, up_destinations: &Urc>, - ddma_mutex: &Mutex, mut stream: Option<&mut TcpStream>, + ddma_mutex: &Mutex, _subkernel_mutex: &Mutex, mut stream: Option<&mut TcpStream>, session: &mut Session) -> Result> { kern_recv_notrace(io, |request| { match (request, session.kernel_state) { @@ -510,6 +571,111 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, } } } + #[cfg(has_drtio)] + &kern::SubkernelLoadRunRequest { id, run } => { + let succeeded = match subkernel::load( + io, aux_mutex, _subkernel_mutex, routing_table, id, run) { + Ok(()) => true, + Err(e) => { error!("Error loading subkernel: {}", e); false } + }; + kern_send(io, &kern::SubkernelLoadRunReply { succeeded: succeeded }) + } + #[cfg(has_drtio)] + &kern::SubkernelAwaitFinishRequest{ id, timeout } => { + let res = subkernel::await_finish(io, aux_mutex, _subkernel_mutex, routing_table, + id, timeout); + let status = match res { + Ok(ref res) => { + if res.comm_lost { + kern::SubkernelStatus::CommLost + } else if let Some(exception) = &res.exception { + propagate_subkernel_exception!(exception, stream); + // will not be called after exception is served + kern::SubkernelStatus::OtherError + } else { + kern::SubkernelStatus::NoError + } + }, + Err(SubkernelError::Timeout) => kern::SubkernelStatus::Timeout, + Err(SubkernelError::IncorrectState) => kern::SubkernelStatus::IncorrectState, + Err(_) => kern::SubkernelStatus::OtherError + }; + kern_send(io, &kern::SubkernelAwaitFinishReply { status: status }) + } + #[cfg(has_drtio)] + &kern::SubkernelMsgSend { id, count, tag, data } => { + subkernel::message_send(io, aux_mutex, _subkernel_mutex, routing_table, id, count, tag, data)?; + kern_acknowledge() + } + #[cfg(has_drtio)] + &kern::SubkernelMsgRecvRequest { id, timeout } => { + let message_received = subkernel::message_await(io, _subkernel_mutex, id, timeout); + let (status, count) = match message_received { + Ok(ref message) => (kern::SubkernelStatus::NoError, message.tag_count), + Err(SubkernelError::Timeout) => (kern::SubkernelStatus::Timeout, 0), + Err(SubkernelError::IncorrectState) => (kern::SubkernelStatus::IncorrectState, 0), + Err(SubkernelError::SubkernelFinished) => { + let res = subkernel::retrieve_finish_status(io, aux_mutex, _subkernel_mutex, + routing_table, id)?; + if res.comm_lost { + (kern::SubkernelStatus::CommLost, 0) + } else if let Some(exception) = &res.exception { + propagate_subkernel_exception!(exception, stream); + (kern::SubkernelStatus::OtherError, 0) + } else { + (kern::SubkernelStatus::OtherError, 0) + } + } + Err(_) => (kern::SubkernelStatus::OtherError, 0) + }; + kern_send(io, &kern::SubkernelMsgRecvReply { status: status, count: count })?; + if let Ok(message) = message_received { + // receive code almost identical to RPC recv, except we are not reading from a stream + let mut reader = Cursor::new(message.data); + let mut tag: [u8; 1] = [message.tag]; + let mut i = 0; + loop { + // kernel has to consume all arguments in the whole message + let slot = kern_recv(io, |reply| { + match reply { + &kern::RpcRecvRequest(slot) => Ok(slot), + other => unexpected!( + "expected root value slot from kernel CPU, not {:?}", other) + } + })?; + let res = rpc::recv_return(&mut reader, &tag, slot, &|size| -> Result<_, Error> { + if size == 0 { + return Ok(0 as *mut ()) + } + kern_send(io, &kern::RpcRecvReply(Ok(size)))?; + Ok(kern_recv(io, |reply| { + match reply { + &kern::RpcRecvRequest(slot) => Ok(slot), + other => unexpected!( + "expected nested value slot from kernel CPU, not {:?}", other) + } + })?) + }); + match res { + Ok(_) => kern_send(io, &kern::RpcRecvReply(Ok(0)))?, + Err(_) => unexpected!("expected valid subkernel message data") + }; + i += 1; + if i < message.tag_count { + // update the tag for next read + tag[0] = reader.read_u8()?; + } else { + // should be done by then + break; + } + } + Ok(()) + } else { + // if timed out, no data has been received, exception should be raised by kernel + Ok(()) + } + }, + request => unexpected!("unexpected request {:?} from kernel CPU", request) }.and(Ok(false)) }) @@ -530,13 +696,17 @@ fn process_kern_queued_rpc(stream: &mut TcpStream, fn host_kernel_worker(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, up_destinations: &Urc>, - ddma_mutex: &Mutex, stream: &mut TcpStream, + ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + stream: &mut TcpStream, congress: &mut Congress) -> Result<(), Error> { let mut session = Session::new(congress); + #[cfg(has_drtio)] + subkernel::clear_subkernels(&io, &subkernel_mutex); loop { if stream.can_recv() { - process_host_message(io, stream, &mut session)? + process_host_message(io, aux_mutex, ddma_mutex, subkernel_mutex, + routing_table, stream, &mut session)? } else if !stream.may_recv() { return Ok(()) } @@ -548,7 +718,7 @@ fn host_kernel_worker(io: &Io, aux_mutex: &Mutex, if mailbox::receive() != 0 { process_kern_message(io, aux_mutex, routing_table, up_destinations, - ddma_mutex, + ddma_mutex, subkernel_mutex, Some(stream), &mut session)?; } @@ -566,7 +736,7 @@ fn host_kernel_worker(io: &Io, aux_mutex: &Mutex, fn flash_kernel_worker(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, up_destinations: &Urc>, - ddma_mutex: &Mutex, congress: &mut Congress, + ddma_mutex: &Mutex, subkernel_mutex: &Mutex, congress: &mut Congress, config_key: &str) -> Result<(), Error> { let mut session = Session::new(congress); @@ -588,7 +758,7 @@ fn flash_kernel_worker(io: &Io, aux_mutex: &Mutex, } if mailbox::receive() != 0 { - if process_kern_message(io, aux_mutex, routing_table, up_destinations, ddma_mutex, None, &mut session)? { + if process_kern_message(io, aux_mutex, routing_table, up_destinations, ddma_mutex, subkernel_mutex, None, &mut session)? { return Ok(()) } } @@ -619,7 +789,7 @@ fn respawn(io: &Io, handle: &mut Option, f: F) pub fn thread(io: Io, aux_mutex: &Mutex, routing_table: &Urc>, up_destinations: &Urc>, - ddma_mutex: &Mutex) { + ddma_mutex: &Mutex, subkernel_mutex: &Mutex) { let listener = TcpListener::new(&io, 65535); listener.listen(1381).expect("session: cannot listen"); info!("accepting network sessions"); @@ -628,9 +798,11 @@ pub fn thread(io: Io, aux_mutex: &Mutex, let mut kernel_thread = None; { + let routing_table = routing_table.borrow(); let mut congress = congress.borrow_mut(); info!("running startup kernel"); - match flash_kernel_worker(&io, &aux_mutex, &routing_table.borrow(), &up_destinations, ddma_mutex, &mut congress, "startup_kernel") { + match flash_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations, + ddma_mutex, subkernel_mutex, &mut congress, "startup_kernel") { Ok(()) => info!("startup kernel finished"), Err(Error::KernelNotFound) => @@ -671,12 +843,14 @@ pub fn thread(io: Io, aux_mutex: &Mutex, let up_destinations = up_destinations.clone(); let congress = congress.clone(); let ddma_mutex = ddma_mutex.clone(); + let subkernel_mutex = subkernel_mutex.clone(); let stream = stream.into_handle(); respawn(&io, &mut kernel_thread, move |io| { let routing_table = routing_table.borrow(); let mut congress = congress.borrow_mut(); let mut stream = TcpStream::from_handle(&io, stream); - match host_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations, &ddma_mutex, &mut stream, &mut *congress) { + match host_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations, + &ddma_mutex, &subkernel_mutex, &mut stream, &mut *congress) { Ok(()) => (), Err(Error::Protocol(host::Error::Io(IoError::UnexpectedEnd))) => info!("connection closed"), @@ -689,6 +863,8 @@ pub fn thread(io: Io, aux_mutex: &Mutex, } } stream.close().expect("session: close socket"); + #[cfg(has_drtio)] + subkernel::clear_subkernels(&io, &subkernel_mutex); }); } @@ -700,10 +876,12 @@ pub fn thread(io: Io, aux_mutex: &Mutex, let up_destinations = up_destinations.clone(); let congress = congress.clone(); let ddma_mutex = ddma_mutex.clone(); + let subkernel_mutex = subkernel_mutex.clone(); respawn(&io, &mut kernel_thread, move |io| { let routing_table = routing_table.borrow(); let mut congress = congress.borrow_mut(); - match flash_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations, &ddma_mutex, &mut *congress, "idle_kernel") { + match flash_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations, + &ddma_mutex, &subkernel_mutex, &mut *congress, "idle_kernel") { Ok(()) => info!("idle kernel finished, standing by"), Err(Error::Protocol(host::Error::Io( From 1a0fc317dfa819b9e26d3fd4542ff788a60e51b9 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 5 Oct 2023 15:48:45 +0800 Subject: [PATCH 005/296] satman: support subkernels --- artiq/firmware/Cargo.lock | 3 + artiq/firmware/satman/Cargo.toml | 3 + artiq/firmware/satman/Makefile | 17 +- artiq/firmware/satman/cache.rs | 83 ++++ artiq/firmware/satman/dma.rs | 16 +- artiq/firmware/satman/kernel.rs | 823 +++++++++++++++++++++++++++++++ artiq/firmware/satman/main.rs | 177 +++++-- artiq/firmware/satman/satman.ld | 17 +- 8 files changed, 1083 insertions(+), 56 deletions(-) create mode 100644 artiq/firmware/satman/cache.rs create mode 100644 artiq/firmware/satman/kernel.rs diff --git a/artiq/firmware/Cargo.lock b/artiq/firmware/Cargo.lock index f4bd9b88a..80933a416 100644 --- a/artiq/firmware/Cargo.lock +++ b/artiq/firmware/Cargo.lock @@ -352,6 +352,9 @@ dependencies = [ "board_artiq", "board_misoc", "build_misoc", + "cslice", + "eh", + "io", "log", "proto_artiq", "riscv", diff --git a/artiq/firmware/satman/Cargo.toml b/artiq/firmware/satman/Cargo.toml index 20dec311f..f8016f576 100644 --- a/artiq/firmware/satman/Cargo.toml +++ b/artiq/firmware/satman/Cargo.toml @@ -14,8 +14,11 @@ build_misoc = { path = "../libbuild_misoc" } [dependencies] log = { version = "0.4", default-features = false } +io = { path = "../libio", features = ["byteorder", "alloc"] } +cslice = { version = "0.3" } board_misoc = { path = "../libboard_misoc", features = ["uart_console", "log"] } board_artiq = { path = "../libboard_artiq", features = ["alloc"] } alloc_list = { path = "../liballoc_list" } riscv = { version = "0.6.0", features = ["inline-asm"] } proto_artiq = { path = "../libproto_artiq", features = ["log", "alloc"] } +eh = { path = "../libeh" } \ No newline at end of file diff --git a/artiq/firmware/satman/Makefile b/artiq/firmware/satman/Makefile index 82e65d730..55befda95 100644 --- a/artiq/firmware/satman/Makefile +++ b/artiq/firmware/satman/Makefile @@ -1,9 +1,14 @@ include ../include/generated/variables.mak include $(MISOC_DIRECTORY)/software/common.mak -LDFLAGS += -L../libbase +CFLAGS += \ + -I$(LIBUNWIND_DIRECTORY) \ + -I$(LIBUNWIND_DIRECTORY)/../unwinder/include -RUSTFLAGS += -Cpanic=abort +LDFLAGS += \ + -L../libunwind + +RUSTFLAGS += -Cpanic=unwind export XBUILD_SYSROOT_PATH=$(BUILDINC_DIRECTORY)/../sysroot @@ -15,8 +20,12 @@ $(RUSTOUT)/libsatman.a: --manifest-path $(SATMAN_DIRECTORY)/Cargo.toml \ --target $(SATMAN_DIRECTORY)/../$(CARGO_TRIPLE).json -satman.elf: $(RUSTOUT)/libsatman.a - $(link) -T $(SATMAN_DIRECTORY)/satman.ld +satman.elf: $(RUSTOUT)/libsatman.a ksupport_data.o + $(link) -T $(SATMAN_DIRECTORY)/satman.ld \ + -lunwind-vexriscv-bare -m elf32lriscv + +ksupport_data.o: ../ksupport/ksupport.elf + $(LD) -r -m elf32lriscv -b binary -o $@ $< %.bin: %.elf $(objcopy) -O binary diff --git a/artiq/firmware/satman/cache.rs b/artiq/firmware/satman/cache.rs new file mode 100644 index 000000000..fa73364cb --- /dev/null +++ b/artiq/firmware/satman/cache.rs @@ -0,0 +1,83 @@ +use alloc::{vec, vec::Vec, string::String, collections::btree_map::BTreeMap}; +use cslice::{CSlice, AsCSlice}; +use core::mem::transmute; + +struct Entry { + data: Vec, + slice: CSlice<'static, i32>, + borrowed: bool +} + +impl core::fmt::Debug for Entry { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Entry") + .field("data", &self.data) + .field("borrowed", &self.borrowed) + .finish() + } +} + +pub struct Cache { + entries: BTreeMap, + empty: CSlice<'static, i32>, +} + +impl core::fmt::Debug for Cache { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Cache") + .field("entries", &self.entries) + .finish() + } +} + +impl Cache { + pub fn new() -> Cache { + let empty_vec = vec![]; + let empty = unsafe { + transmute::, CSlice<'static, i32>>(empty_vec.as_c_slice()) + }; + Cache { entries: BTreeMap::new(), empty } + } + + pub fn get(&mut self, key: &str) -> *const CSlice<'static, i32> { + match self.entries.get_mut(key) { + None => &self.empty, + Some(ref mut entry) => { + entry.borrowed = true; + &entry.slice + } + } + } + + pub fn put(&mut self, key: &str, data: &[i32]) -> Result<(), ()> { + match self.entries.get_mut(key) { + None => (), + Some(ref mut entry) => { + if entry.borrowed { return Err(()) } + entry.data = Vec::from(data); + unsafe { + entry.slice = transmute::, CSlice<'static, i32>>( + entry.data.as_c_slice()); + } + return Ok(()) + } + } + + let data = Vec::from(data); + let slice = unsafe { + transmute::, CSlice<'static, i32>>(data.as_c_slice()) + }; + self.entries.insert(String::from(key), Entry { + data, + slice, + borrowed: false + }); + Ok(()) + } + + pub unsafe fn unborrow(&mut self) { + for (_key, entry) in self.entries.iter_mut() { + entry.borrowed = false; + } + } +} diff --git a/artiq/firmware/satman/dma.rs b/artiq/firmware/satman/dma.rs index 133bfd3c0..34dcaf475 100644 --- a/artiq/firmware/satman/dma.rs +++ b/artiq/firmware/satman/dma.rs @@ -1,5 +1,6 @@ use board_misoc::{csr, cache::flush_l2_cache}; use alloc::{vec::Vec, collections::btree_map::BTreeMap}; +use ::{cricon_select, RtioMaster}; const ALIGNMENT: usize = 64; @@ -126,14 +127,14 @@ impl Manager { csr::rtio_dma::base_address_write(ptr as u64); csr::rtio_dma::time_offset_write(timestamp as u64); - csr::cri_con::selected_write(1); + cricon_select(RtioMaster::Dma); csr::rtio_dma::enable_write(1); // playback has begun here, for status call check_state } Ok(()) } - pub fn check_state(&mut self) -> Option { + pub fn get_status(&mut self) -> Option { if self.state != ManagerState::Playback { // nothing to report return None; @@ -141,12 +142,11 @@ impl Manager { let dma_enable = unsafe { csr::rtio_dma::enable_read() }; if dma_enable != 0 { return None; - } - else { + } else { self.state = ManagerState::Idle; unsafe { - csr::cri_con::selected_write(0); - let error = csr::rtio_dma::error_read(); + cricon_select(RtioMaster::Drtio); + let error = csr::rtio_dma::error_read(); let channel = csr::rtio_dma::error_channel_read(); let timestamp = csr::rtio_dma::error_timestamp_read(); if error != 0 { @@ -161,4 +161,8 @@ impl Manager { } } + pub fn running(&self) -> bool { + self.state == ManagerState::Playback + } + } \ No newline at end of file diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs new file mode 100644 index 000000000..cc401a134 --- /dev/null +++ b/artiq/firmware/satman/kernel.rs @@ -0,0 +1,823 @@ +use core::{mem, option::NoneError, cmp::min}; +use alloc::{string::String, format, vec::Vec, collections::{btree_map::BTreeMap, vec_deque::VecDeque}}; +use cslice::AsCSlice; + +use board_artiq::{mailbox, spi}; +use board_misoc::{csr, clock, i2c}; +use proto_artiq::{kernel_proto as kern, session_proto::Reply::KernelException as HostKernelException, rpc_proto as rpc}; +use eh::eh_artiq; +use io::{Cursor, ProtoRead}; +use kernel::eh_artiq::StackPointerBacktrace; + +use ::{cricon_select, RtioMaster}; +use cache::Cache; +use SAT_PAYLOAD_MAX_SIZE; +use MASTER_PAYLOAD_MAX_SIZE; + +mod kernel_cpu { + use super::*; + use core::ptr; + + use proto_artiq::kernel_proto::{KERNELCPU_EXEC_ADDRESS, KERNELCPU_LAST_ADDRESS, KSUPPORT_HEADER_SIZE}; + + pub unsafe fn start() { + if csr::kernel_cpu::reset_read() == 0 { + panic!("attempted to start kernel CPU when it is already running") + } + + stop(); + + extern { + static _binary____ksupport_ksupport_elf_start: u8; + static _binary____ksupport_ksupport_elf_end: u8; + } + let ksupport_start = &_binary____ksupport_ksupport_elf_start as *const _; + let ksupport_end = &_binary____ksupport_ksupport_elf_end as *const _; + ptr::copy_nonoverlapping(ksupport_start, + (KERNELCPU_EXEC_ADDRESS - KSUPPORT_HEADER_SIZE) as *mut u8, + ksupport_end as usize - ksupport_start as usize); + + csr::kernel_cpu::reset_write(0); + } + + pub unsafe fn stop() { + csr::kernel_cpu::reset_write(1); + cricon_select(RtioMaster::Drtio); + + mailbox::acknowledge(); + } + + pub fn validate(ptr: usize) -> bool { + ptr >= KERNELCPU_EXEC_ADDRESS && ptr <= KERNELCPU_LAST_ADDRESS + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum KernelState { + Absent, + Loaded, + Running, + MsgAwait { max_time: u64 }, + MsgSending +} + +#[derive(Debug)] +pub enum Error { + Load(String), + KernelNotFound, + InvalidPointer(usize), + Unexpected(String), + NoMessage, + AwaitingMessage, + SubkernelIoError, + KernelException(Sliceable) +} + +impl From for Error { + fn from(_: NoneError) -> Error { + Error::KernelNotFound + } +} + +impl From> for Error { + fn from(_value: io::Error) -> Error { + Error::SubkernelIoError + } +} + +macro_rules! unexpected { + ($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*)))); +} + +/* represents data that has to be sent to Master */ +#[derive(Debug)] +pub struct Sliceable { + it: usize, + data: Vec +} + +/* represents interkernel messages */ +struct Message { + count: u8, + tag: u8, + data: Vec +} + +#[derive(PartialEq)] +enum OutMessageState { + NoMessage, + MessageReady, + MessageBeingSent, + MessageSent, + MessageAcknowledged +} + +/* for dealing with incoming and outgoing interkernel messages */ +struct MessageManager { + out_message: Option, + out_state: OutMessageState, + in_queue: VecDeque, + in_buffer: Option, +} + +// Per-run state +struct Session { + kernel_state: KernelState, + log_buffer: String, + last_exception: Option, + messages: MessageManager +} + +#[derive(Debug)] +struct KernelLibrary { + library: Vec, + complete: bool +} + +pub struct Manager { + kernels: BTreeMap, + current_id: u32, + session: Session, + cache: Cache, + last_finished: Option +} + +pub struct SubkernelFinished { + pub id: u32, + pub with_exception: bool +} + +pub struct SliceMeta { + pub len: u16, + pub last: bool +} + +macro_rules! get_slice_fn { + ( $name:tt, $size:expr ) => { + pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta { + if self.data.len() == 0 { + return SliceMeta { len: 0, last: true }; + } + let len = min($size, self.data.len() - self.it); + let last = self.it + len == self.data.len(); + + data_slice[..len].clone_from_slice(&self.data[self.it..self.it+len]); + self.it += len; + + SliceMeta { + len: len as u16, + last: last + } + } + }; +} + +impl Sliceable { + pub fn new(data: Vec) -> Sliceable { + Sliceable { + it: 0, + data: data + } + } + + get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE); + get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE); +} + +impl MessageManager { + pub fn new() -> MessageManager { + MessageManager { + out_message: None, + out_state: OutMessageState::NoMessage, + in_queue: VecDeque::new(), + in_buffer: None + } + } + + pub fn handle_incoming(&mut self, last: bool, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { + // called when receiving a message from master + match self.in_buffer.as_mut() { + Some(message) => message.data.extend(&data[..length]), + None => { + self.in_buffer = Some(Message { + count: data[0], + tag: data[1], + data: data[2..length].to_vec() + }); + } + }; + if last { + // when done, remove from working queue + self.in_queue.push_back(self.in_buffer.take().unwrap()); + } + } + + pub fn is_outgoing_ready(&mut self) -> bool { + // called by main loop, to see if there's anything to send, will send it afterwards + match self.out_state { + OutMessageState::MessageReady => { + self.out_state = OutMessageState::MessageBeingSent; + true + }, + _ => false + } + } + + pub fn was_message_acknowledged(&mut self) -> bool { + match self.out_state { + OutMessageState::MessageAcknowledged => { + self.out_state = OutMessageState::NoMessage; + true + }, + _ => false + } + } + + pub fn get_outgoing_slice(&mut self, data_slice: &mut [u8; MASTER_PAYLOAD_MAX_SIZE]) -> Option { + if self.out_state != OutMessageState::MessageBeingSent { + return None; + } + let meta = self.out_message.as_mut()?.get_slice_master(data_slice); + if meta.last { + // clear the message slot + self.out_message = None; + // notify kernel with a flag that message is sent + self.out_state = OutMessageState::MessageSent; + } + Some(meta) + } + + pub fn ack_slice(&mut self) -> bool { + // returns whether or not there's more to be sent + match self.out_state { + OutMessageState::MessageBeingSent => true, + OutMessageState::MessageSent => { + self.out_state = OutMessageState::MessageAcknowledged; + false + }, + _ => { + warn!("received unsolicited SubkernelMessageAck"); + false + } + } + } + + pub fn accept_outgoing(&mut self, count: u8, tag: &[u8], data: *const *const ()) -> Result<(), Error> { + let mut writer = Cursor::new(Vec::new()); + rpc::send_args(&mut writer, 0, tag, data)?; + // skip service tag, but write the count + let mut data = writer.into_inner().split_off(3); + data[0] = count; + self.out_message = Some(Sliceable::new(data)); + self.out_state = OutMessageState::MessageReady; + Ok(()) + } + + pub fn get_incoming(&mut self) -> Option { + self.in_queue.pop_front() + } +} + +impl Session { + pub fn new() -> Session { + Session { + kernel_state: KernelState::Absent, + log_buffer: String::new(), + last_exception: None, + messages: MessageManager::new() + } + } + + fn running(&self) -> bool { + match self.kernel_state { + KernelState::Absent | KernelState::Loaded => false, + KernelState::Running | KernelState::MsgAwait { .. } | + KernelState::MsgSending => true + } + } + + fn flush_log_buffer(&mut self) { + if &self.log_buffer[self.log_buffer.len() - 1..] == "\n" { + for line in self.log_buffer.lines() { + info!(target: "kernel", "{}", line); + } + self.log_buffer.clear() + } + } +} + +impl Manager { + pub fn new() -> Manager { + Manager { + kernels: BTreeMap::new(), + current_id: 0, + session: Session::new(), + cache: Cache::new(), + last_finished: None, + } + } + + pub fn add(&mut self, id: u32, last: bool, data: &[u8], data_len: usize) -> Result<(), Error> { + let kernel = match self.kernels.get_mut(&id) { + Some(kernel) => { + if kernel.complete { + // replace entry + self.kernels.remove(&id); + self.kernels.insert(id, KernelLibrary { + library: Vec::new(), + complete: false }); + self.kernels.get_mut(&id)? + } else { + kernel + } + }, + None => { + self.kernels.insert(id, KernelLibrary { + library: Vec::new(), + complete: false }); + self.kernels.get_mut(&id)? + }, + }; + kernel.library.extend(&data[0..data_len]); + + kernel.complete = last; + Ok(()) + } + + pub fn is_running(&self) -> bool { + self.session.running() + } + + pub fn get_current_id(&self) -> Option { + match self.is_running() { + true => Some(self.current_id), + false => None + } + } + + pub fn stop(&mut self) { + unsafe { kernel_cpu::stop() } + self.session.kernel_state = KernelState::Absent; + unsafe { self.cache.unborrow() } + } + + pub fn run(&mut self, id: u32) -> Result<(), Error> { + info!("starting subkernel #{}", id); + if self.session.kernel_state != KernelState::Loaded + || self.current_id != id { + self.load(id)?; + } + self.session.kernel_state = KernelState::Running; + cricon_select(RtioMaster::Kernel); + + kern_acknowledge() + } + + pub fn message_handle_incoming(&mut self, last: bool, length: usize, slice: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { + if !self.is_running() { + return; + } + self.session.messages.handle_incoming(last, length, slice); + } + + pub fn message_get_slice(&mut self, slice: &mut [u8; MASTER_PAYLOAD_MAX_SIZE]) -> Option { + if !self.is_running() { + return None; + } + self.session.messages.get_outgoing_slice(slice) + } + + pub fn message_ack_slice(&mut self) -> bool { + if !self.is_running() { + warn!("received unsolicited SubkernelMessageAck"); + return false; + } + self.session.messages.ack_slice() + } + + pub fn message_is_ready(&mut self) -> bool { + self.session.messages.is_outgoing_ready() + } + + pub fn get_last_finished(&mut self) -> Option { + self.last_finished.take() + } + + pub fn load(&mut self, id: u32) -> Result<(), Error> { + if self.current_id == id && self.session.kernel_state == KernelState::Loaded { + return Ok(()) + } + if !self.kernels.get(&id)?.complete { + return Err(Error::KernelNotFound) + } + self.current_id = id; + self.session = Session::new(); + self.stop(); + + unsafe { + kernel_cpu::start(); + + kern_send(&kern::LoadRequest(&self.kernels.get(&id)?.library)).unwrap(); + kern_recv(|reply| { + match reply { + kern::LoadReply(Ok(())) => { + self.session.kernel_state = KernelState::Loaded; + Ok(()) + } + kern::LoadReply(Err(error)) => { + kernel_cpu::stop(); + Err(Error::Load(format!("{}", error))) + } + other => { + unexpected!("unexpected kernel CPU reply to load request: {:?}", other) + } + } + }) + } + } + + pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta { + match self.session.last_exception.as_mut() { + Some(exception) => exception.get_slice_sat(data_slice), + None => SliceMeta { len: 0, last: true } + } + } + + fn runtime_exception(&mut self, cause: Error) { + let raw_exception: Vec = Vec::new(); + let mut writer = Cursor::new(raw_exception); + match (HostKernelException { + exceptions: &[Some(eh_artiq::Exception { + id: 11, // SubkernelError, defined in ksupport + message: format!("in subkernel id {}: {:?}", self.current_id, cause).as_c_slice(), + param: [0, 0, 0], + file: file!().as_c_slice(), + line: line!(), + column: column!(), + function: format!("subkernel id {}", self.current_id).as_c_slice(), + })], + stack_pointers: &[StackPointerBacktrace { + stack_pointer: 0, + initial_backtrace_size: 0, + current_backtrace_size: 0 + }], + backtrace: &[], + async_errors: 0 + }).write_to(&mut writer) { + Ok(_) => self.session.last_exception = Some(Sliceable::new(writer.into_inner())), + Err(_) => error!("Error writing exception data") + } + } + + pub fn process_kern_requests(&mut self, rank: u8) { + if !self.is_running() { + return; + } + + match self.process_external_messages() { + Ok(()) => (), + Err(Error::AwaitingMessage) => return, // kernel still waiting, do not process kernel messages + Err(Error::KernelException(exception)) => { + unsafe { kernel_cpu::stop() } + self.session.kernel_state = KernelState::Absent; + unsafe { self.cache.unborrow() } + self.session.last_exception = Some(exception); + self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true }) + }, + Err(e) => { + error!("Error while running processing external messages: {:?}", e); + self.stop(); + self.runtime_exception(e); + self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true }) + } + } + + match self.process_kern_message(rank) { + Ok(Some(with_exception)) => { + self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: with_exception }) + }, + Ok(None) | Err(Error::NoMessage) => (), + Err(e) => { + error!("Error while running kernel: {:?}", e); + self.stop(); + self.runtime_exception(e); + self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true }) + } + } + } + + fn process_external_messages(&mut self) -> Result<(), Error> { + match self.session.kernel_state { + KernelState::MsgAwait { max_time } => { + if clock::get_ms() > max_time { + kern_send(&kern::SubkernelMsgRecvReply { status: kern::SubkernelStatus::Timeout, count: 0 })?; + self.session.kernel_state = KernelState::Running; + return Ok(()) + } + if let Some(message) = self.session.messages.get_incoming() { + kern_send(&kern::SubkernelMsgRecvReply { status: kern::SubkernelStatus::NoError, count: message.count })?; + self.session.kernel_state = KernelState::Running; + pass_message_to_kernel(&message) + } else { + Err(Error::AwaitingMessage) + } + }, + KernelState::MsgSending => { + if self.session.messages.was_message_acknowledged() { + self.session.kernel_state = KernelState::Running; + kern_acknowledge() + } else { + Err(Error::AwaitingMessage) + } + }, + _ => Ok(()) + } + } + + fn process_kern_message(&mut self, rank: u8) -> Result, Error> { + // returns Ok(with_exception) on finish + // None if the kernel is still running + kern_recv(|request| { + match (request, self.session.kernel_state) { + (&kern::LoadReply(_), KernelState::Loaded) => { + // We're standing by; ignore the message. + return Ok(None) + } + (_, KernelState::Running) => (), + _ => { + unexpected!("unexpected request {:?} from kernel CPU in {:?} state", + request, self.session.kernel_state) + }, + } + + if process_kern_hwreq(request, rank)? { + return Ok(None) + } + + match request { + &kern::Log(args) => { + use core::fmt::Write; + self.session.log_buffer + .write_fmt(args) + .unwrap_or_else(|_| warn!("cannot append to session log buffer")); + self.session.flush_log_buffer(); + kern_acknowledge() + } + + &kern::LogSlice(arg) => { + self.session.log_buffer += arg; + self.session.flush_log_buffer(); + kern_acknowledge() + } + + &kern::RpcFlush => { + // we do not have to do anything about this request, + // it is sent by the kernel firmware regardless of RPC being used + kern_acknowledge() + } + + &kern::CacheGetRequest { key } => { + let value = self.cache.get(key); + kern_send(&kern::CacheGetReply { + value: unsafe { mem::transmute(value) } + }) + } + + &kern::CachePutRequest { key, value } => { + let succeeded = self.cache.put(key, value).is_ok(); + kern_send(&kern::CachePutReply { succeeded: succeeded }) + } + + &kern::RunFinished => { + unsafe { kernel_cpu::stop() } + self.session.kernel_state = KernelState::Absent; + unsafe { self.cache.unborrow() } + + return Ok(Some(false)) + } + &kern::RunException { exceptions, stack_pointers, backtrace } => { + unsafe { kernel_cpu::stop() } + self.session.kernel_state = KernelState::Absent; + unsafe { self.cache.unborrow() } + let exception = slice_kernel_exception(&exceptions, &stack_pointers, &backtrace)?; + self.session.last_exception = Some(exception); + return Ok(Some(true)) + } + + &kern::SubkernelMsgSend { id: _, count, tag, data } => { + self.session.messages.accept_outgoing(count, tag, data)?; + // acknowledge after the message is sent + self.session.kernel_state = KernelState::MsgSending; + Ok(()) + } + + &kern::SubkernelMsgRecvRequest { id: _, timeout } => { + let max_time = clock::get_ms() + timeout as u64; + self.session.kernel_state = KernelState::MsgAwait { max_time: max_time }; + Ok(()) + }, + + request => unexpected!("unexpected request {:?} from kernel CPU", request) + }.and(Ok(None)) + }) + } +} + +impl Drop for Manager { + fn drop(&mut self) { + cricon_select(RtioMaster::Drtio); + unsafe { + kernel_cpu::stop() + }; + } +} + +fn kern_recv(f: F) -> Result + where F: FnOnce(&kern::Message) -> Result { + if mailbox::receive() == 0 { + return Err(Error::NoMessage); + }; + if !kernel_cpu::validate(mailbox::receive()) { + return Err(Error::InvalidPointer(mailbox::receive())) + } + f(unsafe { &*(mailbox::receive() as *const kern::Message) }) +} + +fn kern_recv_w_timeout(timeout: u64, f: F) -> Result + where F: FnOnce(&kern::Message) -> Result + Copy { + // sometimes kernel may be too slow to respond immediately + // (e.g. when receiving external messages) + // we cannot wait indefinitely to keep the satellite responsive + // so a timeout is used instead + let max_time = clock::get_ms() + timeout; + while clock::get_ms() < max_time { + match kern_recv(f) { + Err(Error::NoMessage) => continue, + anything_else => return anything_else + } + } + Err(Error::NoMessage) +} + +fn kern_acknowledge() -> Result<(), Error> { + mailbox::acknowledge(); + Ok(()) +} + +fn kern_send(request: &kern::Message) -> Result<(), Error> { + unsafe { mailbox::send(request as *const _ as usize) } + while !mailbox::acknowledged() {} + Ok(()) +} + +fn slice_kernel_exception(exceptions: &[Option], + stack_pointers: &[eh_artiq::StackPointerBacktrace], + backtrace: &[(usize, usize)] +) -> Result { + error!("exception in kernel"); + for exception in exceptions { + error!("{:?}", exception.unwrap()); + } + error!("stack pointers: {:?}", stack_pointers); + error!("backtrace: {:?}", backtrace); + // master will only pass the exception data back to the host: + let raw_exception: Vec = Vec::new(); + let mut writer = Cursor::new(raw_exception); + match (HostKernelException { + exceptions: exceptions, + stack_pointers: stack_pointers, + backtrace: backtrace, + async_errors: 0 + }).write_to(&mut writer) { + // save last exception data to be received by master + Ok(_) => Ok(Sliceable::new(writer.into_inner())), + Err(_) => Err(Error::SubkernelIoError) + } +} + +fn pass_message_to_kernel(message: &Message) -> Result<(), Error> { + let mut reader = Cursor::new(&message.data); + let mut tag: [u8; 1] = [message.tag]; + let count = message.count; + let mut i = 0; + loop { + let slot = kern_recv_w_timeout(100, |reply| { + match reply { + &kern::RpcRecvRequest(slot) => Ok(slot), + &kern::RunException { exceptions, stack_pointers, backtrace } => { + let exception = slice_kernel_exception(&exceptions, &stack_pointers, &backtrace)?; + Err(Error::KernelException(exception)) + }, + other => unexpected!( + "expected root value slot from kernel CPU, not {:?}", other) + } + })?; + + let res = rpc::recv_return(&mut reader, &tag, slot, &|size| -> Result<_, Error> { + if size == 0 { + return Ok(0 as *mut ()) + } + kern_send(&kern::RpcRecvReply(Ok(size)))?; + Ok(kern_recv_w_timeout(100, |reply| { + match reply { + &kern::RpcRecvRequest(slot) => Ok(slot), + &kern::RunException { + exceptions, + stack_pointers, + backtrace + }=> { + let exception = slice_kernel_exception(&exceptions, &stack_pointers, &backtrace)?; + Err(Error::KernelException(exception)) + }, + other => unexpected!( + "expected nested value slot from kernel CPU, not {:?}", other) + } + })?) + }); + match res { + Ok(_) => kern_send(&kern::RpcRecvReply(Ok(0)))?, + Err(_) => unexpected!("expected valid subkernel message data") + }; + i += 1; + if i < count { + // update the tag for next read + tag[0] = reader.read_u8()?; + } else { + // should be done by then + break; + } + } + Ok(()) +} + +fn process_kern_hwreq(request: &kern::Message, rank: u8) -> Result { + match request { + &kern::RtioInitRequest => { + unsafe { + csr::drtiosat::reset_write(1); + clock::spin_us(100); + csr::drtiosat::reset_write(0); + } + kern_acknowledge() + } + + &kern::RtioDestinationStatusRequest { destination } => { + // only local destination is considered "up" + // no access to other DRTIO destinations + kern_send(&kern::RtioDestinationStatusReply { + up: destination == rank }) + } + + &kern::I2cStartRequest { busno } => { + let succeeded = i2c::start(busno as u8).is_ok(); + kern_send(&kern::I2cBasicReply { succeeded: succeeded }) + } + &kern::I2cRestartRequest { busno } => { + let succeeded = i2c::restart(busno as u8).is_ok(); + kern_send(&kern::I2cBasicReply { succeeded: succeeded }) + } + &kern::I2cStopRequest { busno } => { + let succeeded = i2c::stop(busno as u8).is_ok(); + kern_send(&kern::I2cBasicReply { succeeded: succeeded }) + } + &kern::I2cWriteRequest { busno, data } => { + match i2c::write(busno as u8, data) { + Ok(ack) => kern_send( + &kern::I2cWriteReply { succeeded: true, ack: ack }), + Err(_) => kern_send( + &kern::I2cWriteReply { succeeded: false, ack: false }) + } + } + &kern::I2cReadRequest { busno, ack } => { + match i2c::read(busno as u8, ack) { + Ok(data) => kern_send( + &kern::I2cReadReply { succeeded: true, data: data }), + Err(_) => kern_send( + &kern::I2cReadReply { succeeded: false, data: 0xff }) + } + } + &kern::I2cSwitchSelectRequest { busno, address, mask } => { + let succeeded = i2c::switch_select(busno as u8, address, mask).is_ok(); + kern_send(&kern::I2cBasicReply { succeeded: succeeded }) + } + + &kern::SpiSetConfigRequest { busno, flags, length, div, cs } => { + let succeeded = spi::set_config(busno as u8, flags, length, div, cs).is_ok(); + kern_send(&kern::SpiBasicReply { succeeded: succeeded }) + }, + &kern::SpiWriteRequest { busno, data } => { + let succeeded = spi::write(busno as u8, data).is_ok(); + kern_send(&kern::SpiBasicReply { succeeded: succeeded }) + } + &kern::SpiReadRequest { busno } => { + match spi::read(busno as u8) { + Ok(data) => kern_send( + &kern::SpiReadReply { succeeded: true, data: data }), + Err(_) => kern_send( + &kern::SpiReadReply { succeeded: false, data: 0 }) + } + } + + _ => return Ok(false) + }.and(Ok(true)) +} \ No newline at end of file diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 4384e68d3..a44919933 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -1,4 +1,4 @@ -#![feature(never_type, panic_info_message, llvm_asm, default_alloc_error_handler)] +#![feature(never_type, panic_info_message, llvm_asm, default_alloc_error_handler, try_trait)] #![no_std] #[macro_use] @@ -9,12 +9,15 @@ extern crate board_artiq; extern crate riscv; extern crate alloc; extern crate proto_artiq; +extern crate cslice; +extern crate io; +extern crate eh; use core::convert::TryFrom; use board_misoc::{csr, ident, clock, uart_logger, i2c, pmp}; #[cfg(has_si5324)] use board_artiq::si5324; -use board_artiq::{spi, drtioaux}; +use board_artiq::{spi, drtioaux, drtio_routing}; #[cfg(soc_platform = "efc")] use board_artiq::ad9117; use proto_artiq::drtioaux_proto::{SAT_PAYLOAD_MAX_SIZE, MASTER_PAYLOAD_MAX_SIZE}; @@ -22,6 +25,7 @@ use proto_artiq::drtioaux_proto::{SAT_PAYLOAD_MAX_SIZE, MASTER_PAYLOAD_MAX_SIZE} use board_artiq::drtio_eem; use riscv::register::{mcause, mepc, mtval}; use dma::Manager as DmaManager; +use kernel::Manager as KernelManager; use analyzer::Analyzer; #[global_allocator] @@ -30,6 +34,8 @@ static mut ALLOC: alloc_list::ListAlloc = alloc_list::EMPTY; mod repeater; mod dma; mod analyzer; +mod kernel; +mod cache; fn drtiosat_reset(reset: bool) { unsafe { @@ -59,6 +65,22 @@ fn drtiosat_tsc_loaded() -> bool { } } +pub enum RtioMaster { + Drtio, + Dma, + Kernel +} + +pub fn cricon_select(master: RtioMaster) { + let val = match master { + RtioMaster::Drtio => 0, + RtioMaster::Dma => 1, + RtioMaster::Kernel => 2 + }; + unsafe { + csr::cri_con::selected_write(val); + } +} #[cfg(has_drtio_routing)] macro_rules! forward { @@ -80,8 +102,8 @@ macro_rules! forward { ($routing_table:expr, $destination:expr, $rank:expr, $repeaters:expr, $packet:expr) => {} } -fn process_aux_packet(_manager: &mut DmaManager, analyzer: &mut Analyzer, _repeaters: &mut [repeater::Repeater], - _routing_table: &mut drtio_routing::RoutingTable, _rank: &mut u8, +fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager, + _repeaters: &mut [repeater::Repeater], _routing_table: &mut drtio_routing::RoutingTable, _rank: &mut u8, packet: drtioaux::Packet) -> Result<(), drtioaux::Error> { // In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels, // and u16 otherwise; hence the `as _` conversion. @@ -101,18 +123,30 @@ fn process_aux_packet(_manager: &mut DmaManager, analyzer: &mut Analyzer, _repea drtioaux::send(0, &drtioaux::Packet::ResetAck) }, - drtioaux::Packet::DestinationStatusRequest { destination: _destination } => { + drtioaux::Packet::DestinationStatusRequest { destination } => { #[cfg(has_drtio_routing)] - let hop = _routing_table.0[_destination as usize][*_rank as usize]; + let hop = _routing_table.0[destination as usize][*_rank as usize]; #[cfg(not(has_drtio_routing))] let hop = 0; if hop == 0 { // async messages - if let Some(status) = _manager.check_state() { + if let Some(status) = dmamgr.get_status() { info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp); drtioaux::send(0, &drtioaux::Packet::DmaPlaybackStatus { - destination: _destination, id: status.id, error: status.error, channel: status.channel, timestamp: status.timestamp })?; + destination: destination, id: status.id, error: status.error, channel: status.channel, timestamp: status.timestamp })?; + } else if let Some(subkernel_finished) = kernelmgr.get_last_finished() { + info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception); + drtioaux::send(0, &drtioaux::Packet::SubkernelFinished { + id: subkernel_finished.id, with_exception: subkernel_finished.with_exception + })?; + } else if kernelmgr.message_is_ready() { + let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; + let meta = kernelmgr.message_get_slice(&mut data_slice).unwrap(); + drtioaux::send(0, &drtioaux::Packet::SubkernelMessage { + destination: destination, id: kernelmgr.get_current_id().unwrap(), + last: meta.last, length: meta.len as u16, data: data_slice + })?; } else { let errors; unsafe { @@ -156,7 +190,7 @@ fn process_aux_packet(_manager: &mut DmaManager, analyzer: &mut Analyzer, _repea if hop <= csr::DRTIOREP.len() { let repno = hop - 1; match _repeaters[repno].aux_forward(&drtioaux::Packet::DestinationStatusRequest { - destination: _destination + destination: destination }) { Ok(()) => (), Err(drtioaux::Error::LinkDown) => drtioaux::send(0, &drtioaux::Packet::DestinationDownReply)?, @@ -336,28 +370,80 @@ fn process_aux_packet(_manager: &mut DmaManager, analyzer: &mut Analyzer, _repea }) } - #[cfg(has_rtio_dma)] drtioaux::Packet::DmaAddTraceRequest { destination: _destination, id, last, length, trace } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); - let succeeded = _manager.add(id, last, &trace, length as usize).is_ok(); + let succeeded = dmamgr.add(id, last, &trace, length as usize).is_ok(); drtioaux::send(0, &drtioaux::Packet::DmaAddTraceReply { succeeded: succeeded }) } - #[cfg(has_rtio_dma)] drtioaux::Packet::DmaRemoveTraceRequest { destination: _destination, id } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); - let succeeded = _manager.erase(id).is_ok(); + let succeeded = dmamgr.erase(id).is_ok(); drtioaux::send(0, &drtioaux::Packet::DmaRemoveTraceReply { succeeded: succeeded }) } - #[cfg(has_rtio_dma)] drtioaux::Packet::DmaPlaybackRequest { destination: _destination, id, timestamp } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); - let succeeded = _manager.playback(id, timestamp).is_ok(); + // no DMA with a running kernel + let succeeded = !kernelmgr.is_running() && dmamgr.playback(id, timestamp).is_ok(); drtioaux::send(0, &drtioaux::Packet::DmaPlaybackReply { succeeded: succeeded }) } + drtioaux::Packet::SubkernelAddDataRequest { destination: _destination, id, last, length, data } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let succeeded = kernelmgr.add(id, last, &data, length as usize).is_ok(); + drtioaux::send(0, + &drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded }) + } + drtioaux::Packet::SubkernelLoadRunRequest { destination: _destination, id, run } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let mut succeeded = kernelmgr.load(id).is_ok(); + // allow preloading a kernel with delayed run + if run { + if dmamgr.running() { + // cannot run kernel while DDMA is running + succeeded = false; + } else { + succeeded |= kernelmgr.run(id).is_ok(); + } + } + drtioaux::send(0, + &drtioaux::Packet::SubkernelLoadRunReply { succeeded: succeeded }) + } + drtioaux::Packet::SubkernelExceptionRequest { destination: _destination } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE]; + let meta = kernelmgr.exception_get_slice(&mut data_slice); + drtioaux::send(0, &drtioaux::Packet::SubkernelException { + last: meta.last, + length: meta.len, + data: data_slice, + }) + } + drtioaux::Packet::SubkernelMessage { destination, id: _id, last, length, data } => { + forward!(_routing_table, destination, *_rank, _repeaters, &packet); + kernelmgr.message_handle_incoming(last, length as usize, &data); + drtioaux::send(0, &drtioaux::Packet::SubkernelMessageAck { + destination: destination + }) + } + drtioaux::Packet::SubkernelMessageAck { destination: _destination } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + if kernelmgr.message_ack_slice() { + let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; + if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) { + drtioaux::send(0, &drtioaux::Packet::SubkernelMessage { + destination: *_rank, id: kernelmgr.get_current_id().unwrap(), + last: meta.last, length: meta.len as u16, data: data_slice + })? + } else { + error!("Error receiving message slice"); + } + } + Ok(()) + } + _ => { warn!("received unexpected aux packet"); Ok(()) @@ -366,12 +452,12 @@ fn process_aux_packet(_manager: &mut DmaManager, analyzer: &mut Analyzer, _repea } fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer, - repeaters: &mut [repeater::Repeater], + kernelmgr: &mut KernelManager, repeaters: &mut [repeater::Repeater], routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8) { let result = drtioaux::recv(0).and_then(|packet| { if let Some(packet) = packet { - process_aux_packet(dma_manager, analyzer, repeaters, routing_table, rank, packet) + process_aux_packet(dma_manager, analyzer, kernelmgr, repeaters, routing_table, rank, packet) } else { Ok(()) } @@ -383,10 +469,7 @@ fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer, } fn drtiosat_process_errors() { - let errors; - unsafe { - errors = csr::drtiosat::protocol_error_read(); - } + let errors = unsafe { csr::drtiosat::protocol_error_read() }; if errors & 1 != 0 { error!("received packet of an unknown type"); } @@ -394,26 +477,29 @@ fn drtiosat_process_errors() { error!("received truncated packet"); } if errors & 4 != 0 { - let destination; - unsafe { - destination = csr::drtiosat::buffer_space_timeout_dest_read(); - } + let destination = unsafe { + csr::drtiosat::buffer_space_timeout_dest_read() + }; error!("timeout attempting to get buffer space from CRI, destination=0x{:02x}", destination) } - if errors & 8 != 0 { - let channel; - let timestamp_event; - let timestamp_counter; - unsafe { - channel = csr::drtiosat::underflow_channel_read(); - timestamp_event = csr::drtiosat::underflow_timestamp_event_read() as i64; - timestamp_counter = csr::drtiosat::underflow_timestamp_counter_read() as i64; + let drtiosat_active = unsafe { csr::cri_con::selected_read() == 0 }; + if drtiosat_active { + // RTIO errors are handled by ksupport and dma manager + if errors & 8 != 0 { + let channel; + let timestamp_event; + let timestamp_counter; + unsafe { + channel = csr::drtiosat::underflow_channel_read(); + timestamp_event = csr::drtiosat::underflow_timestamp_event_read() as i64; + timestamp_counter = csr::drtiosat::underflow_timestamp_counter_read() as i64; + } + error!("write underflow, channel={}, timestamp={}, counter={}, slack={}", + channel, timestamp_event, timestamp_counter, timestamp_event-timestamp_counter); + } + if errors & 16 != 0 { + error!("write overflow"); } - error!("write underflow, channel={}, timestamp={}, counter={}, slack={}", - channel, timestamp_event, timestamp_counter, timestamp_event-timestamp_counter); - } - if errors & 16 != 0 { - error!("write overflow"); } unsafe { csr::drtiosat::protocol_error_write(errors); @@ -612,21 +698,23 @@ pub extern fn main() -> i32 { si5324::siphaser::calibrate_skew().expect("failed to calibrate skew"); } - // DMA manager created here, so when link is dropped, all DMA traces - // are cleared out for a clean slate on subsequent connections, - // without a manual intervention. + // various managers created here, so when link is dropped, DMA traces, + // analyzer logs, kernels are cleared and/or stopped for a clean slate + // on subsequent connections, without a manual intervention. let mut dma_manager = DmaManager::new(); - - // Reset the analyzer as well. let mut analyzer = Analyzer::new(); + let mut kernelmgr = KernelManager::new(); + cricon_select(RtioMaster::Drtio); drtioaux::reset(0); drtiosat_reset(false); drtiosat_reset_phy(false); while drtiosat_link_rx_up() { drtiosat_process_errors(); - process_aux_packets(&mut dma_manager, &mut analyzer, &mut repeaters, &mut routing_table, &mut rank); + process_aux_packets(&mut dma_manager, &mut analyzer, + &mut kernelmgr, &mut repeaters, + &mut routing_table, &mut rank); for rep in repeaters.iter_mut() { rep.service(&routing_table, rank); } @@ -649,6 +737,7 @@ pub extern fn main() -> i32 { error!("aux packet error: {}", e); } } + kernelmgr.process_kern_requests(rank); } drtiosat_reset_phy(true); diff --git a/artiq/firmware/satman/satman.ld b/artiq/firmware/satman/satman.ld index f58ef38d8..c188dc3ec 100644 --- a/artiq/firmware/satman/satman.ld +++ b/artiq/firmware/satman/satman.ld @@ -22,6 +22,19 @@ SECTIONS __eh_frame_end = .; } > main_ram + .eh_frame_hdr : + { + KEEP(*(.eh_frame_hdr)) + } > main_ram + + __eh_frame_hdr_start = SIZEOF(.eh_frame_hdr) > 0 ? ADDR(.eh_frame_hdr) : 0; + __eh_frame_hdr_end = SIZEOF(.eh_frame_hdr) > 0 ? . : 0; + + .gcc_except_table : + { + *(.gcc_except_table) + } > main_ram + /* https://sourceware.org/bugzilla/show_bug.cgi?id=20475 */ .got : { @@ -68,11 +81,11 @@ SECTIONS _fstack = . - 16; } > main_ram - /* 64MB heap for alloc use */ + /* remainder of 64MB for heap for alloc use */ .heap (NOLOAD) : ALIGN(16) { _fheap = .; - . = . + 0x4000000; + . = 0x44000000; // not to overwrite RPC queue _eheap = .; } > main_ram } From 0a750c77e8f432c6eac7b0ee5d247276eeabd393 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 5 Oct 2023 14:35:50 +0800 Subject: [PATCH 006/296] compiler: support subkernels --- artiq/compiler/asttyped.py | 10 +- artiq/compiler/builtins.py | 15 +- artiq/compiler/embedding.py | 109 ++++++- artiq/compiler/ir.py | 66 +++- artiq/compiler/module.py | 2 + artiq/compiler/prelude.py | 5 + artiq/compiler/targets.py | 10 +- .../compiler/transforms/artiq_ir_generator.py | 87 ++++- .../compiler/transforms/asttyped_rewriter.py | 7 +- artiq/compiler/transforms/inferencer.py | 66 +++- .../compiler/transforms/iodelay_estimator.py | 25 +- .../compiler/transforms/llvm_ir_generator.py | 296 ++++++++++++++---- artiq/compiler/types.py | 51 +++ artiq/frontend/artiq_compile.py | 4 +- artiq/language/core.py | 58 +++- 15 files changed, 699 insertions(+), 112 deletions(-) diff --git a/artiq/compiler/asttyped.py b/artiq/compiler/asttyped.py index 10b197fa4..b6fb34274 100644 --- a/artiq/compiler/asttyped.py +++ b/artiq/compiler/asttyped.py @@ -21,13 +21,19 @@ class scoped(object): set of variables resolved as globals """ +class remote(object): + """ + :ivar remote_fn: (bool) whether function is ran on a remote device, + meaning arguments are received remotely and return is sent remotely + """ + # Typed versions of untyped nodes class argT(ast.arg, commontyped): pass class ClassDefT(ast.ClassDef): _types = ("constructor_type",) -class FunctionDefT(ast.FunctionDef, scoped): +class FunctionDefT(ast.FunctionDef, scoped, remote): _types = ("signature_type",) class QuotedFunctionDefT(FunctionDefT): """ @@ -58,7 +64,7 @@ class BinOpT(ast.BinOp, commontyped): pass class BoolOpT(ast.BoolOp, commontyped): pass -class CallT(ast.Call, commontyped): +class CallT(ast.Call, commontyped, remote): """ :ivar iodelay: (:class:`iodelay.Expr`) :ivar arg_exprs: (dict of str to :class:`iodelay.Expr`) diff --git a/artiq/compiler/builtins.py b/artiq/compiler/builtins.py index fdd5286e1..64e9b3690 100644 --- a/artiq/compiler/builtins.py +++ b/artiq/compiler/builtins.py @@ -38,6 +38,9 @@ class TInt(types.TMono): def one(): return 1 +def TInt8(): + return TInt(types.TValue(8)) + def TInt32(): return TInt(types.TValue(32)) @@ -244,6 +247,12 @@ def fn_at_mu(): def fn_rtio_log(): return types.TBuiltinFunction("rtio_log") +def fn_subkernel_await(): + return types.TBuiltinFunction("subkernel_await") + +def fn_subkernel_preload(): + return types.TBuiltinFunction("subkernel_preload") + # Accessors def is_none(typ): @@ -326,7 +335,7 @@ def get_iterable_elt(typ): # n-dimensional arrays, rather than the n-1 dimensional result of iterating over # the first axis, which makes the name a bit misleading. if is_str(typ) or is_bytes(typ) or is_bytearray(typ): - return TInt(types.TValue(8)) + return TInt8() elif types._is_pointer(typ) or is_iterable(typ): return typ.find()["elt"].find() else: @@ -342,5 +351,5 @@ def is_allocated(typ): is_float(typ) or is_range(typ) or types._is_pointer(typ) or types.is_function(typ) or types.is_external_function(typ) or types.is_rpc(typ) or - types.is_method(typ) or types.is_tuple(typ) or - types.is_value(typ)) + types.is_subkernel(typ) or types.is_method(typ) or + types.is_tuple(typ) or types.is_value(typ)) diff --git a/artiq/compiler/embedding.py b/artiq/compiler/embedding.py index 040fc80ee..9c2f270d8 100644 --- a/artiq/compiler/embedding.py +++ b/artiq/compiler/embedding.py @@ -74,7 +74,9 @@ class EmbeddingMap: "CacheError", "SPIError", "0:ZeroDivisionError", - "0:IndexError"]) + "0:IndexError", + "UnwrapNoneError", + "SubkernelError"]) def preallocate_runtime_exception_names(self, names): for i, name in enumerate(names): @@ -183,7 +185,15 @@ class EmbeddingMap: obj_typ, _ = self.type_map[type(obj_ref)] yield obj_id, obj_ref, obj_typ - def has_rpc(self): + def subkernels(self): + subkernels = {} + for k, v in self.object_forward_map.items(): + if hasattr(v, "artiq_embedded"): + if v.artiq_embedded.destination is not None: + subkernels[k] = v + return subkernels + + def has_rpc_or_subkernel(self): return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x), self.object_forward_map.values())) @@ -469,7 +479,7 @@ class ASTSynthesizer: return asttyped.QuoteT(value=value, type=instance_type, loc=loc) - def call(self, callee, args, kwargs, callback=None): + def call(self, callee, args, kwargs, callback=None, remote_fn=False): """ Construct an AST fragment calling a function specified by an AST node `function_node`, with given arguments. @@ -513,7 +523,7 @@ class ASTSynthesizer: starargs=None, kwargs=None, type=types.TVar(), iodelay=None, arg_exprs={}, begin_loc=begin_loc, end_loc=end_loc, star_loc=None, dstar_loc=None, - loc=callee_node.loc.join(end_loc)) + loc=callee_node.loc.join(end_loc), remote_fn=remote_fn) if callback is not None: node = asttyped.CallT( @@ -548,7 +558,7 @@ class StitchingASTTypedRewriter(ASTTypedRewriter): arg=node.arg, annotation=None, arg_loc=node.arg_loc, colon_loc=node.colon_loc, loc=node.loc) - def visit_quoted_function(self, node, function): + def visit_quoted_function(self, node, function, remote_fn): extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine) extractor.visit(node) @@ -569,7 +579,7 @@ class StitchingASTTypedRewriter(ASTTypedRewriter): body=node.body, decorator_list=node.decorator_list, keyword_loc=node.keyword_loc, name_loc=node.name_loc, arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs, - loc=node.loc) + loc=node.loc, remote_fn=remote_fn) try: self.env_stack.append(node.typing_env) @@ -777,7 +787,7 @@ class TypedtreeHasher(algorithm.Visitor): return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields)) class Stitcher: - def __init__(self, core, dmgr, engine=None, print_as_rpc=True): + def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[]): self.core = core self.dmgr = dmgr if engine is None: @@ -803,11 +813,19 @@ class Stitcher: self.value_map = defaultdict(lambda: []) self.definitely_changed = False + self.destination = destination + self.first_call = True + # for non-annotated subkernels: + # main kernel inferencer output with types of arguments + self.subkernel_arg_types = subkernel_arg_types + def stitch_call(self, function, args, kwargs, callback=None): # We synthesize source code for the initial call so that # diagnostics would have something meaningful to display to the user. synthesizer = self._synthesizer(self._function_loc(function.artiq_embedded.function)) - call_node = synthesizer.call(function, args, kwargs, callback) + # first call of a subkernel will get its arguments from remote (DRTIO) + remote_fn = self.destination != 0 + call_node = synthesizer.call(function, args, kwargs, callback, remote_fn=remote_fn) synthesizer.finalize() self.typedtree.append(call_node) @@ -919,6 +937,10 @@ class Stitcher: return [diagnostic.Diagnostic("note", "in kernel function here", {}, call_loc)] + elif fn_kind == 'subkernel': + return [diagnostic.Diagnostic("note", + "in subkernel call here", {}, + call_loc)] else: assert False else: @@ -938,7 +960,7 @@ class Stitcher: self._function_loc(function), notes=self._call_site_note(loc, fn_kind)) self.engine.process(diag) - elif fn_kind == 'rpc' and param.default is not inspect.Parameter.empty: + elif fn_kind == 'rpc' or fn_kind == 'subkernel' and param.default is not inspect.Parameter.empty: notes = [] notes.append(diagnostic.Diagnostic("note", "expanded from here while trying to infer a type for an" @@ -957,11 +979,18 @@ class Stitcher: Inferencer(engine=self.engine).visit(ast) IntMonomorphizer(engine=self.engine).visit(ast) return ast.type - else: - # Let the rest of the program decide. - return types.TVar() + elif fn_kind == 'kernel' and self.first_call and self.destination != 0: + # subkernels do not have access to the main kernel code to infer + # arg types - so these are cached and passed onto subkernel + # compilation, to avoid having to annotate them fully + for name, typ in self.subkernel_arg_types: + if param.name == name: + return typ - def _quote_embedded_function(self, function, flags): + # Let the rest of the program decide. + return types.TVar() + + def _quote_embedded_function(self, function, flags, remote_fn=False): # we are now parsing new functions... definitely changed the type self.definitely_changed = True @@ -1060,7 +1089,7 @@ class Stitcher: engine=self.engine, prelude=self.prelude, globals=self.globals, host_environment=host_environment, quote=self._quote) - function_node = asttyped_rewriter.visit_quoted_function(function_node, embedded_function) + function_node = asttyped_rewriter.visit_quoted_function(function_node, embedded_function, remote_fn) function_node.flags = flags # Add it into our typedtree so that it gets inferenced and codegen'd. @@ -1174,7 +1203,6 @@ class Stitcher: signature = inspect.signature(function) arg_types = OrderedDict() - optarg_types = OrderedDict() for param in signature.parameters.values(): if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD: diag = diagnostic.Diagnostic("error", @@ -1212,6 +1240,40 @@ class Stitcher: self.functions[function] = function_type return function_type + def _quote_subkernel(self, function, loc): + if isinstance(function, SpecializedFunction): + host_function = function.host_function + else: + host_function = function + ret_type = builtins.TNone() + signature = inspect.signature(host_function) + + if signature.return_annotation is not inspect.Signature.empty: + ret_type = self._extract_annot(host_function, signature.return_annotation, + "return type", loc, fn_kind='subkernel') + arg_types = OrderedDict() + optarg_types = OrderedDict() + for param in signature.parameters.values(): + if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD: + diag = diagnostic.Diagnostic("error", + "subkernels must only use positional arguments; '{argument}' isn't", + {"argument": param.name}, + self._function_loc(function), + notes=self._call_site_note(loc, fn_kind='subkernel')) + self.engine.process(diag) + + arg_type = self._type_of_param(function, loc, param, fn_kind='subkernel') + if param.default is inspect.Parameter.empty: + arg_types[param.name] = arg_type + else: + optarg_types[param.name] = arg_type + + function_type = types.TSubkernel(arg_types, optarg_types, ret_type, + sid=self.embedding_map.store_object(host_function), + destination=host_function.artiq_embedded.destination) + self.functions[function] = function_type + return function_type + def _quote_rpc(self, function, loc): if isinstance(function, SpecializedFunction): host_function = function.host_function @@ -1271,8 +1333,18 @@ class Stitcher: (host_function.artiq_embedded.core_name is None and host_function.artiq_embedded.portable is False and host_function.artiq_embedded.syscall is None and + host_function.artiq_embedded.destination is None and host_function.artiq_embedded.forbidden is False): self._quote_rpc(function, loc) + elif host_function.artiq_embedded.destination is not None and \ + host_function.artiq_embedded.destination != self.destination: + # treat subkernels as kernels if running on the same device + if not 0 < host_function.artiq_embedded.destination <= 255: + diag = diagnostic.Diagnostic("error", + "subkernel destination must be between 1 and 255 (inclusive)", {}, + self._function_loc(host_function)) + self.engine.process(diag) + self._quote_subkernel(function, loc) elif host_function.artiq_embedded.function is not None: if host_function.__name__ == "": note = diagnostic.Diagnostic("note", @@ -1296,8 +1368,13 @@ class Stitcher: notes=[note]) self.engine.process(diag) + destination = host_function.artiq_embedded.destination + # remote_fn only for first call in subkernels + remote_fn = destination is not None and self.first_call self._quote_embedded_function(function, - flags=host_function.artiq_embedded.flags) + flags=host_function.artiq_embedded.flags, + remote_fn=remote_fn) + self.first_call = False elif host_function.artiq_embedded.syscall is not None: # Insert a storage-less global whose type instructs the compiler # to perform a system call instead of a regular call. diff --git a/artiq/compiler/ir.py b/artiq/compiler/ir.py index 88ef3a151..3af11ccd0 100644 --- a/artiq/compiler/ir.py +++ b/artiq/compiler/ir.py @@ -706,6 +706,64 @@ class SetLocal(Instruction): def value(self): return self.operands[1] +class GetArgFromRemote(Instruction): + """ + An instruction that receives function arguments from remote + (ie. subkernel in DRTIO context) + + :ivar arg_name: (string) argument name + :ivar arg_type: argument type + """ + + """ + :param arg_name: (string) argument name + :param arg_type: argument type + """ + def __init__(self, arg_name, arg_type, name=""): + assert isinstance(arg_name, str) + super().__init__([], arg_type, name) + self.arg_name = arg_name + self.arg_type = arg_type + + def copy(self, mapper): + self_copy = super().copy(mapper) + self_copy.arg_name = self.arg_name + self_copy.arg_type = self.arg_type + return self_copy + + def opcode(self): + return "getargfromremote({})".format(repr(self.arg_name)) + +class GetOptArgFromRemote(GetArgFromRemote): + """ + An instruction that may or may not retrieve an optional function argument + from remote, depending on number of values received by firmware. + + :ivar rcv_count: number of received values, + determined by firmware + :ivar index: (integer) index of the current argument, + in reference to remote arguments + """ + + """ + :param rcv_count: number of received valuese + :param index: (integer) index of the current argument, + in reference to remote arguments + """ + def __init__(self, arg_name, arg_type, rcv_count, index, name=""): + super().__init__(arg_name, arg_type, name) + self.rcv_count = rcv_count + self.index = index + + def copy(self, mapper): + self_copy = super().copy(mapper) + self_copy.rcv_count = self.rcv_count + self_copy.index = self.index + return self_copy + + def opcode(self): + return "getoptargfromremote({})".format(repr(self.arg_name)) + class GetAttr(Instruction): """ An intruction that loads an attribute from an object, @@ -728,7 +786,7 @@ class GetAttr(Instruction): typ = obj.type.attributes[attr] else: typ = obj.type.constructor.attributes[attr] - if types.is_function(typ) or types.is_rpc(typ): + if types.is_function(typ) or types.is_rpc(typ) or types.is_subkernel(typ): typ = types.TMethod(obj.type, typ) super().__init__([obj], typ, name) self.attr = attr @@ -1190,14 +1248,18 @@ class IndirectBranch(Terminator): class Return(Terminator): """ A return instruction. + :param remote_return: (bool) + marks a return in subkernel context, + where the return value is sent back through DRTIO """ """ :param value: (:class:`Value`) return value """ - def __init__(self, value, name=""): + def __init__(self, value, remote_return=False, name=""): assert isinstance(value, Value) super().__init__([value], builtins.TNone(), name) + self.remote_return = remote_return def opcode(self): return "return" diff --git a/artiq/compiler/module.py b/artiq/compiler/module.py index f3bc35cde..cfac4e26e 100644 --- a/artiq/compiler/module.py +++ b/artiq/compiler/module.py @@ -84,6 +84,8 @@ class Module: constant_hoister.process(self.artiq_ir) if remarks: invariant_detection.process(self.artiq_ir) + # for subkernels: main kernel inferencer output, to be passed to further compilations + self.subkernel_arg_types = inferencer.subkernel_arg_types def build_llvm_ir(self, target): """Compile the module to LLVM IR for the specified target.""" diff --git a/artiq/compiler/prelude.py b/artiq/compiler/prelude.py index 13f319650..effbca87c 100644 --- a/artiq/compiler/prelude.py +++ b/artiq/compiler/prelude.py @@ -37,6 +37,7 @@ def globals(): # ARTIQ decorators "kernel": builtins.fn_kernel(), + "subkernel": builtins.fn_kernel(), "portable": builtins.fn_kernel(), "rpc": builtins.fn_kernel(), @@ -54,4 +55,8 @@ def globals(): # ARTIQ utility functions "rtio_log": builtins.fn_rtio_log(), "core_log": builtins.fn_print(), + + # ARTIQ subkernel utility functions + "subkernel_await": builtins.fn_subkernel_await(), + "subkernel_preload": builtins.fn_subkernel_preload(), } diff --git a/artiq/compiler/targets.py b/artiq/compiler/targets.py index 0dd835a0a..5f043eb0e 100644 --- a/artiq/compiler/targets.py +++ b/artiq/compiler/targets.py @@ -94,8 +94,9 @@ class Target: tool_symbolizer = "llvm-symbolizer" tool_cxxfilt = "llvm-cxxfilt" - def __init__(self): + def __init__(self, subkernel_id=None): self.llcontext = ll.Context() + self.subkernel_id = subkernel_id def target_machine(self): lltarget = llvm.Target.from_triple(self.triple) @@ -148,7 +149,8 @@ class Target: ir.BasicBlock._dump_loc = False type_printer = types.TypePrinter() - _dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", ".txt", + suffix = "_subkernel_{}".format(self.subkernel_id) if self.subkernel_id is not None else "" + _dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", suffix + ".txt", lambda: "\n".join(fn.as_entity(type_printer) for fn in module.artiq_ir)) llmod = module.build_llvm_ir(self) @@ -160,12 +162,12 @@ class Target: _dump("", "LLVM IR (broken)", ".ll", lambda: str(llmod)) raise - _dump(os.getenv("ARTIQ_DUMP_UNOPT_LLVM"), "LLVM IR (generated)", "_unopt.ll", + _dump(os.getenv("ARTIQ_DUMP_UNOPT_LLVM"), "LLVM IR (generated)", suffix + "_unopt.ll", lambda: str(llparsedmod)) self.optimize(llparsedmod) - _dump(os.getenv("ARTIQ_DUMP_LLVM"), "LLVM IR (optimized)", ".ll", + _dump(os.getenv("ARTIQ_DUMP_LLVM"), "LLVM IR (optimized)", suffix + ".ll", lambda: str(llparsedmod)) return llparsedmod diff --git a/artiq/compiler/transforms/artiq_ir_generator.py b/artiq/compiler/transforms/artiq_ir_generator.py index 7ede45531..489739ba7 100644 --- a/artiq/compiler/transforms/artiq_ir_generator.py +++ b/artiq/compiler/transforms/artiq_ir_generator.py @@ -108,6 +108,7 @@ class ARTIQIRGenerator(algorithm.Visitor): self.current_args = None self.current_assign = None self.current_exception = None + self.current_remote_fn = False self.break_target = None self.continue_target = None self.return_target = None @@ -211,7 +212,8 @@ class ARTIQIRGenerator(algorithm.Visitor): old_priv_env, self.current_private_env = self.current_private_env, priv_env self.generic_visit(node) - self.terminate(ir.Return(ir.Constant(None, builtins.TNone()))) + self.terminate(ir.Return(ir.Constant(None, builtins.TNone()), + remote_return=self.current_remote_fn)) return self.functions finally: @@ -294,6 +296,8 @@ class ARTIQIRGenerator(algorithm.Visitor): old_block, self.current_block = self.current_block, entry old_globals, self.current_globals = self.current_globals, node.globals_in_scope + old_remote_fn = self.current_remote_fn + self.current_remote_fn = getattr(node, "remote_fn", False) env_without_globals = \ {var: node.typing_env[var] @@ -326,7 +330,8 @@ class ARTIQIRGenerator(algorithm.Visitor): self.terminate(ir.Return(result)) elif builtins.is_none(typ.ret): if not self.current_block.is_terminated(): - self.current_block.append(ir.Return(ir.Constant(None, builtins.TNone()))) + self.current_block.append(ir.Return(ir.Constant(None, builtins.TNone()), + remote_return=self.current_remote_fn)) else: if not self.current_block.is_terminated(): if len(self.current_block.predecessors()) != 0: @@ -345,6 +350,7 @@ class ARTIQIRGenerator(algorithm.Visitor): self.current_block = old_block self.current_globals = old_globals self.current_env = old_env + self.current_remote_fn = old_remote_fn if not is_lambda: self.current_private_env = old_priv_env @@ -367,7 +373,8 @@ class ARTIQIRGenerator(algorithm.Visitor): return_value = self.visit(node.value) if self.return_target is None: - self.append(ir.Return(return_value)) + self.append(ir.Return(return_value, + remote_return=self.current_remote_fn)) else: self.append(ir.SetLocal(self.current_private_env, "$return", return_value)) self.append(ir.Branch(self.return_target)) @@ -2524,6 +2531,33 @@ class ARTIQIRGenerator(algorithm.Visitor): or types.is_builtin(typ, "at_mu"): return self.append(ir.Builtin(typ.name, [self.visit(arg) for arg in node.args], node.type)) + elif types.is_builtin(typ, "subkernel_await"): + if len(node.args) == 2 and len(node.keywords) == 0: + fn = node.args[0].type + timeout = self.visit(node.args[1]) + elif len(node.args) == 1 and len(node.keywords) == 0: + fn = node.args[0].type + timeout = ir.Constant(10_000, builtins.TInt64()) + else: + assert False + if types.is_method(fn): + fn = types.get_method_function(fn) + sid = ir.Constant(fn.sid, builtins.TInt32()) + if not builtins.is_none(fn.ret): + ret = self.append(ir.Builtin("subkernel_retrieve_return", [sid, timeout], fn.ret)) + else: + ret = ir.Constant(None, builtins.TNone()) + self.append(ir.Builtin("subkernel_await_finish", [sid, timeout], builtins.TNone())) + return ret + elif types.is_builtin(typ, "subkernel_preload"): + if len(node.args) == 1 and len(node.keywords) == 0: + fn = node.args[0].type + else: + assert False + if types.is_method(fn): + fn = types.get_method_function(fn) + sid = ir.Constant(fn.sid, builtins.TInt32()) + return self.append(ir.Builtin("subkernel_preload", [sid], builtins.TNone())) elif types.is_exn_constructor(typ): return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args]) elif types.is_constructor(typ): @@ -2535,8 +2569,8 @@ class ARTIQIRGenerator(algorithm.Visitor): node.loc) self.engine.process(diag) - def _user_call(self, callee, positional, keywords, arg_exprs={}): - if types.is_function(callee.type) or types.is_rpc(callee.type): + def _user_call(self, callee, positional, keywords, arg_exprs={}, remote_fn=False): + if types.is_function(callee.type) or types.is_rpc(callee.type) or types.is_subkernel(callee.type): func = callee self_arg = None fn_typ = callee.type @@ -2551,16 +2585,50 @@ class ARTIQIRGenerator(algorithm.Visitor): else: assert False - if types.is_rpc(fn_typ): - if self_arg is None: + if types.is_rpc(fn_typ) or types.is_subkernel(fn_typ): + if self_arg is None or types.is_subkernel(fn_typ): + # self is not passed to subkernels by remote args = positional - else: + elif self_arg is not None: args = [self_arg] + positional for keyword in keywords: arg = keywords[keyword] args.append(self.append(ir.Alloc([ir.Constant(keyword, builtins.TStr()), arg], ir.TKeyword(arg.type)))) + elif remote_fn: + assert self_arg is None + assert len(fn_typ.args) >= len(positional) + assert len(keywords) == 0 # no keyword support + args = [None] * fn_typ.arity() + index = 0 + # fill in first available args + for arg in positional: + args[index] = arg + index += 1 + + # remaining args are received through DRTIO + if index < len(args): + # min/max args received remotely (minus already filled) + offset = index + min_args = ir.Constant(len(fn_typ.args)-offset, builtins.TInt8()) + max_args = ir.Constant(fn_typ.arity()-offset, builtins.TInt8()) + + rcvd_count = self.append(ir.Builtin("subkernel_await_args", [min_args, max_args], builtins.TNone())) + arg_types = list(fn_typ.args.items())[offset:] + # obligatory arguments + for arg_name, arg_type in arg_types: + args[index] = self.append(ir.GetArgFromRemote(arg_name, arg_type, + name="ARG.{}".format(arg_name))) + index += 1 + + # optional arguments + for optarg_name, optarg_type in fn_typ.optargs.items(): + idx = ir.Constant(index-offset, builtins.TInt8()) + args[index] = \ + self.append(ir.GetOptArgFromRemote(optarg_name, optarg_type, rcvd_count, idx)) + index += 1 + else: args = [None] * (len(fn_typ.args) + len(fn_typ.optargs)) @@ -2646,7 +2714,8 @@ class ARTIQIRGenerator(algorithm.Visitor): else: assert False, "Broadcasting for {} arguments not implemented".format(len) else: - insn = self._user_call(callee, args, keywords, node.arg_exprs) + remote_fn = getattr(node, "remote_fn", False) + insn = self._user_call(callee, args, keywords, node.arg_exprs, remote_fn) if isinstance(node.func, asttyped.AttributeT): attr_node = node.func self.method_map[(attr_node.value.type.find(), diff --git a/artiq/compiler/transforms/asttyped_rewriter.py b/artiq/compiler/transforms/asttyped_rewriter.py index 4c3112be6..07ab9ded2 100644 --- a/artiq/compiler/transforms/asttyped_rewriter.py +++ b/artiq/compiler/transforms/asttyped_rewriter.py @@ -238,7 +238,7 @@ class ASTTypedRewriter(algorithm.Transformer): body=node.body, decorator_list=node.decorator_list, keyword_loc=node.keyword_loc, name_loc=node.name_loc, arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs, - loc=node.loc) + loc=node.loc, remote_fn=False) try: self.env_stack.append(node.typing_env) @@ -439,8 +439,9 @@ class ASTTypedRewriter(algorithm.Transformer): def visit_Call(self, node): node = self.generic_visit(node) - node = asttyped.CallT(type=types.TVar(), iodelay=None, arg_exprs={}, - func=node.func, args=node.args, keywords=node.keywords, + node = asttyped.CallT(type=types.TVar(), iodelay=None, arg_exprs={}, + remote_fn=False, func=node.func, + args=node.args, keywords=node.keywords, starargs=node.starargs, kwargs=node.kwargs, star_loc=node.star_loc, dstar_loc=node.dstar_loc, begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc) diff --git a/artiq/compiler/transforms/inferencer.py b/artiq/compiler/transforms/inferencer.py index 57bbedf82..0b95a60e5 100644 --- a/artiq/compiler/transforms/inferencer.py +++ b/artiq/compiler/transforms/inferencer.py @@ -46,6 +46,7 @@ class Inferencer(algorithm.Visitor): self.function = None # currently visited function, for Return inference self.in_loop = False self.has_return = False + self.subkernel_arg_types = dict() def _unify(self, typea, typeb, loca, locb, makenotes=None, when=""): try: @@ -178,7 +179,7 @@ class Inferencer(algorithm.Visitor): # Convert to a method. attr_type = types.TMethod(object_type, attr_type) self._unify_method_self(attr_type, attr_name, attr_loc, loc, value_node.loc) - elif types.is_rpc(attr_type): + elif types.is_rpc(attr_type) or types.is_subkernel(attr_type): # Convert to a method. We don't have to bother typechecking # the self argument, since for RPCs anything goes. attr_type = types.TMethod(object_type, attr_type) @@ -1293,6 +1294,55 @@ class Inferencer(algorithm.Visitor): # Ignored. self._unify(node.type, builtins.TNone(), node.loc, None) + elif types.is_builtin(typ, "subkernel_await"): + valid_forms = lambda: [ + valid_form("subkernel_await(f: subkernel) -> f return type"), + valid_form("subkernel_await(f: subkernel, timeout: numpy.int64) -> f return type") + ] + if 1 <= len(node.args) <= 2: + arg0 = node.args[0].type + if types.is_var(arg0): + pass # undetermined yet + else: + if types.is_method(arg0): + fn = types.get_method_function(arg0) + elif types.is_function(arg0) or types.is_subkernel(arg0): + fn = arg0 + else: + diagnose(valid_forms()) + self._unify(node.type, fn.ret, + node.loc, None) + if len(node.args) == 2: + arg1 = node.args[1] + if types.is_var(arg1.type): + pass + elif builtins.is_int(arg1.type): + # promote to TInt64 + self._unify(arg1.type, builtins.TInt64(), + arg1.loc, None) + else: + diagnose(valid_forms()) + else: + diagnose(valid_forms()) + elif types.is_builtin(typ, "subkernel_preload"): + valid_forms = lambda: [ + valid_form("subkernel_preload(f: subkernel) -> None") + ] + if len(node.args) == 1: + arg0 = node.args[0].type + if types.is_var(arg0): + pass # undetermined yet + else: + if types.is_method(arg0): + fn = types.get_method_function(arg0) + elif types.is_function(arg0) or types.is_subkernel(arg0): + fn = arg0 + else: + diagnose(valid_forms()) + self._unify(node.type, fn.ret, + node.loc, None) + else: + diagnose(valid_forms()) else: assert False @@ -1331,6 +1381,7 @@ class Inferencer(algorithm.Visitor): typ_args = typ.args typ_optargs = typ.optargs typ_ret = typ.ret + typ_func = typ else: typ_self = types.get_method_self(typ) typ_func = types.get_method_function(typ) @@ -1388,12 +1439,23 @@ class Inferencer(algorithm.Visitor): other_node=node.args[0]) self._unify(node.type, ret, node.loc, None) return + if types.is_subkernel(typ_func) and typ_func.sid not in self.subkernel_arg_types: + self.subkernel_arg_types[typ_func.sid] = [] for actualarg, (formalname, formaltyp) in \ zip(node.args, list(typ_args.items()) + list(typ_optargs.items())): self._unify(actualarg.type, formaltyp, actualarg.loc, None) passed_args[formalname] = actualarg.loc + if types.is_subkernel(typ_func): + if types.is_instance(actualarg.type): + # objects cannot be passed to subkernels, as rpc code doesn't support them + diag = diagnostic.Diagnostic("error", + "argument '{name}' of type: {typ} is not supported in subkernels", + {"name": formalname, "typ": actualarg.type}, + actualarg.loc, []) + self.engine.process(diag) + self.subkernel_arg_types[typ_func.sid].append((formalname, formaltyp)) for keyword in node.keywords: if keyword.arg in passed_args: @@ -1424,7 +1486,7 @@ class Inferencer(algorithm.Visitor): passed_args[keyword.arg] = keyword.arg_loc for formalname in typ_args: - if formalname not in passed_args: + if formalname not in passed_args and not node.remote_fn: note = diagnostic.Diagnostic("note", "the called function is of type {type}", {"type": types.TypePrinter().name(node.func.type)}, diff --git a/artiq/compiler/transforms/iodelay_estimator.py b/artiq/compiler/transforms/iodelay_estimator.py index 90bfefdb3..fcee126cf 100644 --- a/artiq/compiler/transforms/iodelay_estimator.py +++ b/artiq/compiler/transforms/iodelay_estimator.py @@ -280,7 +280,7 @@ class IODelayEstimator(algorithm.Visitor): context="as an argument for delay_mu()") call_delay = value elif not types.is_builtin(typ): - if types.is_function(typ) or types.is_rpc(typ): + if types.is_function(typ) or types.is_rpc(typ) or types.is_subkernel(typ): offset = 0 elif types.is_method(typ): offset = 1 @@ -288,7 +288,7 @@ class IODelayEstimator(algorithm.Visitor): else: assert False - if types.is_rpc(typ): + if types.is_rpc(typ) or types.is_subkernel(typ): call_delay = iodelay.Const(0) else: delay = typ.find().delay.find() @@ -311,13 +311,20 @@ class IODelayEstimator(algorithm.Visitor): args[arg_name] = arg_node free_vars = delay.duration.free_vars() - node.arg_exprs = { - arg: self.evaluate(args[arg], abort=abort, - context="in the expression for argument '{}' " - "that affects I/O delay".format(arg)) - for arg in free_vars - } - call_delay = delay.duration.fold(node.arg_exprs) + try: + node.arg_exprs = { + arg: self.evaluate(args[arg], abort=abort, + context="in the expression for argument '{}' " + "that affects I/O delay".format(arg)) + for arg in free_vars + } + call_delay = delay.duration.fold(node.arg_exprs) + except KeyError as e: + if getattr(node, "remote_fn", False): + note = diagnostic.Diagnostic("note", + "function called here", {}, + node.loc) + self.abort("due to arguments passed remotely", node.loc, note) else: assert False else: diff --git a/artiq/compiler/transforms/llvm_ir_generator.py b/artiq/compiler/transforms/llvm_ir_generator.py index e3a554cf3..88412a04c 100644 --- a/artiq/compiler/transforms/llvm_ir_generator.py +++ b/artiq/compiler/transforms/llvm_ir_generator.py @@ -215,7 +215,7 @@ class LLVMIRGenerator: typ = typ.find() if types.is_tuple(typ): return ll.LiteralStructType([self.llty_of_type(eltty) for eltty in typ.elts]) - elif types.is_rpc(typ) or types.is_external_function(typ): + elif types.is_rpc(typ) or types.is_external_function(typ) or types.is_subkernel(typ): if for_return: return llvoid else: @@ -398,6 +398,15 @@ class LLVMIRGenerator: elif name == "rpc_recv": llty = ll.FunctionType(lli32, [llptr]) + elif name == "subkernel_send_message": + llty = ll.FunctionType(llvoid, [lli32, lli8, llsliceptr, llptrptr]) + elif name == "subkernel_load_run": + llty = ll.FunctionType(llvoid, [lli32, lli1]) + elif name == "subkernel_await_finish": + llty = ll.FunctionType(llvoid, [lli32, lli64]) + elif name == "subkernel_await_message": + llty = ll.FunctionType(lli8, [lli32, lli64, lli8, lli8]) + # with now-pinning elif name == "now": llty = lli64 @@ -874,6 +883,53 @@ class LLVMIRGenerator: llvalue = self.llbuilder.bitcast(llvalue, llptr.type.pointee) return self.llbuilder.store(llvalue, llptr) + def process_GetArgFromRemote(self, insn): + llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [], + name="subkernel.arg.stack") + llval = self._build_rpc_recv(insn.arg_type, llstackptr) + return llval + + def process_GetOptArgFromRemote(self, insn): + # optarg = index < rcv_count ? Some(rcv_recv()) : None + llhead = self.llbuilder.basic_block + llrcv = self.llbuilder.append_basic_block(name="optarg.get.{}".format(insn.arg_name)) + + # argument received + self.llbuilder.position_at_end(llrcv) + llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [], + name="subkernel.arg.stack") + llval = self._build_rpc_recv(insn.arg_type, llstackptr) + llrpcretblock = self.llbuilder.basic_block # 'return' from rpc_recv, will be needed later + + # create the tail block, needs to be after the rpc recv tail block + lltail = self.llbuilder.append_basic_block(name="optarg.tail.{}".format(insn.arg_name)) + self.llbuilder.branch(lltail) + + # go back to head to add a branch to the tail + self.llbuilder.position_at_end(llhead) + llargrcvd = self.llbuilder.icmp_unsigned("<", self.map(insn.index), self.map(insn.rcv_count)) + self.llbuilder.cbranch(llargrcvd, llrcv, lltail) + + # argument not received/after arg recvd + self.llbuilder.position_at_end(lltail) + + llargtype = self.llty_of_type(insn.arg_type) + + llphi_arg_present = self.llbuilder.phi(lli1, name="optarg.phi.present.{}".format(insn.arg_name)) + llphi_arg = self.llbuilder.phi(llargtype, name="optarg.phi.{}".format(insn.arg_name)) + + llphi_arg_present.add_incoming(ll.Constant(lli1, 0), llhead) + llphi_arg.add_incoming(ll.Constant(llargtype, ll.Undefined), llhead) + + llphi_arg_present.add_incoming(ll.Constant(lli1, 1), llrpcretblock) + llphi_arg.add_incoming(llval, llrpcretblock) + + lloptarg = ll.Constant(ll.LiteralStructType([lli1, llargtype]), ll.Undefined) + lloptarg = self.llbuilder.insert_value(lloptarg, llphi_arg_present, 0) + lloptarg = self.llbuilder.insert_value(lloptarg, llphi_arg, 1) + + return lloptarg + def attr_index(self, typ, attr): return list(typ.attributes.keys()).index(attr) @@ -898,8 +954,8 @@ class LLVMIRGenerator: def get_global_closure_ptr(self, typ, attr): closure_type = typ.attributes[attr] assert types.is_constructor(typ) - assert types.is_function(closure_type) or types.is_rpc(closure_type) - if types.is_external_function(closure_type) or types.is_rpc(closure_type): + assert types.is_function(closure_type) or types.is_rpc(closure_type) or types.is_subkernel(closure_type) + if types.is_external_function(closure_type) or types.is_rpc(closure_type) or types.is_subkernel(closure_type): return None llty = self.llty_of_type(typ.attributes[attr]) @@ -1344,6 +1400,29 @@ class LLVMIRGenerator: return self.llbuilder.call(self.llbuiltin("delay_mu"), [llinterval]) elif insn.op == "end_catch": return self.llbuilder.call(self.llbuiltin("__artiq_end_catch"), []) + elif insn.op == "subkernel_await_args": + llmin = self.map(insn.operands[0]) + llmax = self.map(insn.operands[1]) + return self.llbuilder.call(self.llbuiltin("subkernel_await_message"), + [ll.Constant(lli32, 0), ll.Constant(lli64, 10_000), llmin, llmax], + name="subkernel.await.args") + elif insn.op == "subkernel_await_finish": + llsid = self.map(insn.operands[0]) + lltimeout = self.map(insn.operands[1]) + return self.llbuilder.call(self.llbuiltin("subkernel_await_finish"), [llsid, lltimeout], + name="subkernel.await.finish") + elif insn.op == "subkernel_retrieve_return": + llsid = self.map(insn.operands[0]) + lltimeout = self.map(insn.operands[1]) + self.llbuilder.call(self.llbuiltin("subkernel_await_message"), [llsid, lltimeout, ll.Constant(lli8, 1), ll.Constant(lli8, 1)], + name="subkernel.await.message") + llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [], + name="subkernel.arg.stack") + return self._build_rpc_recv(insn.type, llstackptr) + elif insn.op == "subkernel_preload": + llsid = self.map(insn.operands[0]) + return self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 0)], + name="subkernel.preload") else: assert False @@ -1426,6 +1505,58 @@ class LLVMIRGenerator: return llfun, list(llargs), llarg_attrs, llcallstackptr + def _build_rpc_recv(self, ret, llstackptr, llnormalblock=None, llunwindblock=None): + # T result = { + # void *ret_ptr = alloca(sizeof(T)); + # void *ptr = ret_ptr; + # loop: int size = rpc_recv(ptr); + # // Non-zero: Provide `size` bytes of extra storage for variable-length data. + # if(size) { ptr = alloca(size); goto loop; } + # else *(T*)ret_ptr + # } + llprehead = self.llbuilder.basic_block + llhead = self.llbuilder.append_basic_block(name="rpc.head") + if llunwindblock: + llheadu = self.llbuilder.append_basic_block(name="rpc.head.unwind") + llalloc = self.llbuilder.append_basic_block(name="rpc.continue") + lltail = self.llbuilder.append_basic_block(name="rpc.tail") + + llretty = self.llty_of_type(ret) + llslot = self.llbuilder.alloca(llretty, name="rpc.ret.alloc") + llslotgen = self.llbuilder.bitcast(llslot, llptr, name="rpc.ret.ptr") + self.llbuilder.branch(llhead) + + self.llbuilder.position_at_end(llhead) + llphi = self.llbuilder.phi(llslotgen.type, name="rpc.ptr") + llphi.add_incoming(llslotgen, llprehead) + if llunwindblock: + llsize = self.llbuilder.invoke(self.llbuiltin("rpc_recv"), [llphi], + llheadu, llunwindblock, + name="rpc.size.next") + self.llbuilder.position_at_end(llheadu) + else: + llsize = self.llbuilder.call(self.llbuiltin("rpc_recv"), [llphi], + name="rpc.size.next") + lldone = self.llbuilder.icmp_unsigned('==', llsize, ll.Constant(llsize.type, 0), + name="rpc.done") + self.llbuilder.cbranch(lldone, lltail, llalloc) + + self.llbuilder.position_at_end(llalloc) + llalloca = self.llbuilder.alloca(lli8, llsize, name="rpc.alloc") + llalloca.align = self.max_target_alignment + llphi.add_incoming(llalloca, llalloc) + self.llbuilder.branch(llhead) + + self.llbuilder.position_at_end(lltail) + llret = self.llbuilder.load(llslot, name="rpc.ret") + if not ret.fold(False, lambda r, t: r or builtins.is_allocated(t)): + # We didn't allocate anything except the slot for the value itself. + # Don't waste stack space. + self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) + if llnormalblock: + self.llbuilder.branch(llnormalblock) + return llret + def _build_rpc(self, fun_loc, fun_type, args, llnormalblock, llunwindblock): llservice = ll.Constant(lli32, fun_type.service) @@ -1501,57 +1632,103 @@ class LLVMIRGenerator: return ll.Undefined - # T result = { - # void *ret_ptr = alloca(sizeof(T)); - # void *ptr = ret_ptr; - # loop: int size = rpc_recv(ptr); - # // Non-zero: Provide `size` bytes of extra storage for variable-length data. - # if(size) { ptr = alloca(size); goto loop; } - # else *(T*)ret_ptr - # } - llprehead = self.llbuilder.basic_block - llhead = self.llbuilder.append_basic_block(name="rpc.head") - if llunwindblock: - llheadu = self.llbuilder.append_basic_block(name="rpc.head.unwind") - llalloc = self.llbuilder.append_basic_block(name="rpc.continue") - lltail = self.llbuilder.append_basic_block(name="rpc.tail") + llret = self._build_rpc_recv(fun_type.ret, llstackptr, llnormalblock, llunwindblock) - llretty = self.llty_of_type(fun_type.ret) - llslot = self.llbuilder.alloca(llretty, name="rpc.ret.alloc") - llslotgen = self.llbuilder.bitcast(llslot, llptr, name="rpc.ret.ptr") - self.llbuilder.branch(llhead) - - self.llbuilder.position_at_end(llhead) - llphi = self.llbuilder.phi(llslotgen.type, name="rpc.ptr") - llphi.add_incoming(llslotgen, llprehead) - if llunwindblock: - llsize = self.llbuilder.invoke(self.llbuiltin("rpc_recv"), [llphi], - llheadu, llunwindblock, - name="rpc.size.next") - self.llbuilder.position_at_end(llheadu) - else: - llsize = self.llbuilder.call(self.llbuiltin("rpc_recv"), [llphi], - name="rpc.size.next") - lldone = self.llbuilder.icmp_unsigned('==', llsize, ll.Constant(llsize.type, 0), - name="rpc.done") - self.llbuilder.cbranch(lldone, lltail, llalloc) - - self.llbuilder.position_at_end(llalloc) - llalloca = self.llbuilder.alloca(lli8, llsize, name="rpc.alloc") - llalloca.align = self.max_target_alignment - llphi.add_incoming(llalloca, llalloc) - self.llbuilder.branch(llhead) - - self.llbuilder.position_at_end(lltail) - llret = self.llbuilder.load(llslot, name="rpc.ret") - if not fun_type.ret.fold(False, lambda r, t: r or builtins.is_allocated(t)): - # We didn't allocate anything except the slot for the value itself. - # Don't waste stack space. - self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) - if llnormalblock: - self.llbuilder.branch(llnormalblock) return llret + def _build_subkernel_call(self, fun_loc, fun_type, args): + llsid = ll.Constant(lli32, fun_type.sid) + tag = b"" + + for arg in args: + def arg_error_handler(typ): + printer = types.TypePrinter() + note = diagnostic.Diagnostic("note", + "value of type {type}", + {"type": printer.name(typ)}, + arg.loc) + diag = diagnostic.Diagnostic("error", + "type {type} is not supported in subkernel calls", + {"type": printer.name(arg.type)}, + arg.loc, notes=[note]) + self.engine.process(diag) + tag += ir.rpc_tag(arg.type, arg_error_handler) + tag += b":" + + # run the kernel first + self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 1)]) + + # arg sent in the same vein as RPC + llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [], + name="subkernel.stack") + + lltag = self.llconst_of_const(ir.Constant(tag, builtins.TStr())) + lltagptr = self.llbuilder.alloca(lltag.type) + self.llbuilder.store(lltag, lltagptr) + + if args: + # only send args if there's anything to send, 'self' is excluded + llargs = self.llbuilder.alloca(llptr, ll.Constant(lli32, len(args)), + name="subkernel.args") + for index, arg in enumerate(args): + if builtins.is_none(arg.type): + llargslot = self.llbuilder.alloca(llunit, + name="subkernel.arg{}".format(index)) + else: + llarg = self.map(arg) + llargslot = self.llbuilder.alloca(llarg.type, + name="subkernel.arg{}".format(index)) + self.llbuilder.store(llarg, llargslot) + llargslot = self.llbuilder.bitcast(llargslot, llptr) + + llargptr = self.llbuilder.gep(llargs, [ll.Constant(lli32, index)]) + self.llbuilder.store(llargslot, llargptr) + + llargcount = ll.Constant(lli8, len(args)) + + self.llbuilder.call(self.llbuiltin("subkernel_send_message"), + [llsid, llargcount, lltagptr, llargs]) + self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) + + return llsid + + def _build_subkernel_return(self, insn): + # builds a remote return. + # unlike args, return only sends one thing. + if builtins.is_none(insn.value().type): + # do not waste time and bandwidth on Nones + return + + def ret_error_handler(typ): + printer = types.TypePrinter() + note = diagnostic.Diagnostic("note", + "value of type {type}", + {"type": printer.name(typ)}, + fun_loc) + diag = diagnostic.Diagnostic("error", + "return type {type} is not supported in subkernel returns", + {"type": printer.name(fun_type.ret)}, + fun_loc, notes=[note]) + self.engine.process(diag) + tag = ir.rpc_tag(insn.value().type, ret_error_handler) + tag += b":" + lltag = self.llconst_of_const(ir.Constant(tag, builtins.TStr())) + lltagptr = self.llbuilder.alloca(lltag.type) + self.llbuilder.store(lltag, lltagptr) + + llrets = self.llbuilder.alloca(llptr, ll.Constant(lli32, 1), + name="subkernel.return") + llret = self.map(insn.value()) + llretslot = self.llbuilder.alloca(llret.type, name="subkernel.retval") + self.llbuilder.store(llret, llretslot) + llretslot = self.llbuilder.bitcast(llretslot, llptr) + self.llbuilder.store(llretslot, llrets) + + llsid = ll.Constant(lli32, 0) # return goes back to master, sid is ignored + lltagcount = ll.Constant(lli8, 1) # only one thing is returned + self.llbuilder.call(self.llbuiltin("subkernel_send_message"), + [llsid, lltagcount, lltagptr, llrets]) + def process_Call(self, insn): functiontyp = insn.target_function().type if types.is_rpc(functiontyp): @@ -1559,6 +1736,10 @@ class LLVMIRGenerator: functiontyp, insn.arguments(), llnormalblock=None, llunwindblock=None) + elif types.is_subkernel(functiontyp): + return self._build_subkernel_call(insn.target_function().loc, + functiontyp, + insn.arguments()) elif types.is_external_function(functiontyp): llfun, llargs, llarg_attrs, llcallstackptr = self._prepare_ffi_call(insn) else: @@ -1595,6 +1776,11 @@ class LLVMIRGenerator: functiontyp, insn.arguments(), llnormalblock, llunwindblock) + elif types.is_subkernel(functiontyp): + return self._build_subkernel_call(insn.target_function().loc, + functiontyp, + insn.arguments(), + llnormalblock, llunwindblock) elif types.is_external_function(functiontyp): llfun, llargs, llarg_attrs, llcallstackptr = self._prepare_ffi_call(insn) else: @@ -1673,7 +1859,8 @@ class LLVMIRGenerator: attrvalue = getattr(value, attr) is_class_function = (types.is_constructor(typ) and types.is_function(typ.attributes[attr]) and - not types.is_external_function(typ.attributes[attr])) + not types.is_external_function(typ.attributes[attr]) and + not types.is_subkernel(typ.attributes[attr])) if is_class_function: attrvalue = self.embedding_map.specialize_function(typ.instance, attrvalue) if not (types.is_instance(typ) and attr in typ.constant_attributes): @@ -1758,7 +1945,8 @@ class LLVMIRGenerator: llelts = [self._quote(v, t, lambda: path() + [str(i)]) for i, (v, t) in enumerate(zip(value, typ.elts))] return ll.Constant(llty, llelts) - elif types.is_rpc(typ) or types.is_external_function(typ) or types.is_builtin_function(typ): + elif types.is_rpc(typ) or types.is_external_function(typ) or \ + types.is_builtin_function(typ) or types.is_subkernel(typ): # RPC, C and builtin functions have no runtime representation. return ll.Constant(llty, ll.Undefined) elif types.is_function(typ): @@ -1813,6 +2001,8 @@ class LLVMIRGenerator: return llinsn def process_Return(self, insn): + if insn.remote_return: + self._build_subkernel_return(insn) if builtins.is_none(insn.value().type): return self.llbuilder.ret_void() else: diff --git a/artiq/compiler/types.py b/artiq/compiler/types.py index 1d9336b4d..7f397d308 100644 --- a/artiq/compiler/types.py +++ b/artiq/compiler/types.py @@ -385,6 +385,50 @@ class TRPC(Type): def __hash__(self): return hash(self.service) +class TSubkernel(TFunction): + """ + A kernel to be run on a satellite. + + :ivar args: (:class:`collections.OrderedDict` of string to :class:`Type`) + function arguments + :ivar ret: (:class:`Type`) + return type + :ivar sid: (int) subkernel ID number + :ivar destination: (int) satellite destination number + """ + + attributes = OrderedDict() + + def __init__(self, args, optargs, ret, sid, destination): + assert isinstance(ret, Type) + super().__init__(args, optargs, ret) + self.sid, self.destination = sid, destination + self.delay = TFixedDelay(iodelay.Const(0)) + + def unify(self, other): + if other is self: + return + if isinstance(other, TSubkernel) and \ + self.sid == other.sid and \ + self.destination == other.destination: + self.ret.unify(other.ret) + elif isinstance(other, TVar): + other.unify(self) + else: + raise UnificationError(self, other) + + def __repr__(self): + if getattr(builtins, "__in_sphinx__", False): + return str(self) + return "artiq.compiler.types.TSubkernel({})".format(repr(self.ret)) + + def __eq__(self, other): + return isinstance(other, TSubkernel) and \ + self.sid == other.sid + + def __hash__(self): + return hash(self.sid) + class TBuiltin(Type): """ An instance of builtin type. Every instance of a builtin @@ -644,6 +688,9 @@ def is_function(typ): def is_rpc(typ): return isinstance(typ.find(), TRPC) +def is_subkernel(typ): + return isinstance(typ.find(), TSubkernel) + def is_external_function(typ, name=None): typ = typ.find() if name is None: @@ -810,6 +857,10 @@ class TypePrinter(object): return "[rpc{} #{}](...)->{}".format(typ.service, " async" if typ.is_async else "", self.name(typ.ret, depth + 1)) + elif isinstance(typ, TSubkernel): + return "->{}".format(typ.sid, + typ.destination, + self.name(typ.ret, depth + 1)) elif isinstance(typ, TBuiltinFunction): return "".format(typ.name) elif isinstance(typ, (TConstructor, TExceptionConstructor)): diff --git a/artiq/frontend/artiq_compile.py b/artiq/frontend/artiq_compile.py index fcba5297d..938f5b787 100755 --- a/artiq/frontend/artiq_compile.py +++ b/artiq/frontend/artiq_compile.py @@ -73,8 +73,8 @@ def main(): finally: dataset_db.close_db() - if object_map.has_rpc(): - raise ValueError("Experiment must not use RPC") + if object_map.has_rpc_or_subkernel(): + raise ValueError("Experiment must not use RPC or subkernels") output = args.output if output is None: diff --git a/artiq/language/core.py b/artiq/language/core.py index 5560398dd..2aff914a9 100644 --- a/artiq/language/core.py +++ b/artiq/language/core.py @@ -7,7 +7,7 @@ from functools import wraps import numpy -__all__ = ["kernel", "portable", "rpc", "syscall", "host_only", +__all__ = ["kernel", "portable", "rpc", "subkernel", "syscall", "host_only", "kernel_from_string", "set_time_manager", "set_watchdog_factory", "TerminationRequested"] @@ -21,7 +21,7 @@ __all__.extend(kernel_globals) _ARTIQEmbeddedInfo = namedtuple("_ARTIQEmbeddedInfo", - "core_name portable function syscall forbidden flags") + "core_name portable function syscall forbidden destination flags") def kernel(arg=None, flags={}): """ @@ -54,7 +54,7 @@ def kernel(arg=None, flags={}): return getattr(self, arg).run(run_on_core, ((self,) + k_args), k_kwargs) run_on_core.artiq_embedded = _ARTIQEmbeddedInfo( core_name=arg, portable=False, function=function, syscall=None, - forbidden=False, flags=set(flags)) + forbidden=False, destination=None, flags=set(flags)) return run_on_core return inner_decorator elif arg is None: @@ -64,6 +64,50 @@ def kernel(arg=None, flags={}): else: return kernel("core", flags)(arg) +def subkernel(arg=None, destination=0, flags={}): + """ + This decorator marks an object's method or function for execution on a satellite device. + Destination must be given, and it must be between 1 and 255 (inclusive). + + Subkernels behave similarly to kernels, with few key differences: + + - they are started from main kernels, + - they do not support RPCs, or running subsequent subkernels on other devices, + - but they can call other kernels or subkernels with the same destination. + + Subkernels can accept arguments and return values. However, they must be fully + annotated with ARTIQ types. + + To call a subkernel, call it like a normal function. + + To await its finishing execution, call ``subkernel_await(subkernel, [timeout])``. + The timeout parameter is optional, and by default is equal to 10000 (miliseconds). + This time can be adjusted for subkernels that take a long time to execute. + + The compiled subkernel is copied to satellites, but not yet to the kernel core + until it's called. For bigger subkernels it may take some time before they + actually start running. To help with that, subkernels can be preloaded, with + ``subkernel_preload(subkernel)`` function. A call to a preloaded subkernel + will take less time, but only one subkernel can be preloaded at a time. + """ + if isinstance(arg, str): + def inner_decorator(function): + @wraps(function) + def run_subkernel(self, *k_args, **k_kwargs): + sid = getattr(self, arg).prepare_subkernel(destination, run_subkernel, ((self,) + k_args), k_kwargs) + getattr(self, arg).run_subkernel(sid) + run_subkernel.artiq_embedded = _ARTIQEmbeddedInfo( + core_name=arg, portable=False, function=function, syscall=None, + forbidden=False, destination=destination, flags=set(flags)) + return run_subkernel + return inner_decorator + elif arg is None: + def inner_decorator(function): + return subkernel(function, destination, flags) + return inner_decorator + else: + return subkernel("core", destination, flags)(arg) + def portable(arg=None, flags={}): """ This decorator marks a function for execution on the same device as its @@ -84,7 +128,7 @@ def portable(arg=None, flags={}): else: arg.artiq_embedded = \ _ARTIQEmbeddedInfo(core_name=None, portable=True, function=arg, syscall=None, - forbidden=False, flags=set(flags)) + forbidden=False, destination=None, flags=set(flags)) return arg def rpc(arg=None, flags={}): @@ -100,7 +144,7 @@ def rpc(arg=None, flags={}): else: arg.artiq_embedded = \ _ARTIQEmbeddedInfo(core_name=None, portable=False, function=arg, syscall=None, - forbidden=False, flags=set(flags)) + forbidden=False, destination=None, flags=set(flags)) return arg def syscall(arg=None, flags={}): @@ -118,7 +162,7 @@ def syscall(arg=None, flags={}): def inner_decorator(function): function.artiq_embedded = \ _ARTIQEmbeddedInfo(core_name=None, portable=False, function=None, - syscall=arg, forbidden=False, + syscall=arg, forbidden=False, destination=None, flags=set(flags)) return function return inner_decorator @@ -136,7 +180,7 @@ def host_only(function): """ function.artiq_embedded = \ _ARTIQEmbeddedInfo(core_name=None, portable=False, function=None, syscall=None, - forbidden=True, flags={}) + forbidden=True, destination=None, flags={}) return function From 8d7194941ecc0fd0e52f38c3f44bbf476a903d23 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 5 Oct 2023 14:36:09 +0800 Subject: [PATCH 007/296] tests: add lit tests for subkernels --- .../lit/embedding/error_subkernel_annot.py | 15 +++++++++++ .../embedding/error_subkernel_annot_return.py | 15 +++++++++++ artiq/test/lit/embedding/subkernel_no_arg.py | 18 +++++++++++++ artiq/test/lit/embedding/subkernel_return.py | 22 ++++++++++++++++ .../lit/embedding/subkernel_return_none.py | 22 ++++++++++++++++ artiq/test/lit/embedding/subkernel_self.py | 25 +++++++++++++++++++ .../test/lit/embedding/subkernel_self_args.py | 25 +++++++++++++++++++ .../test/lit/embedding/subkernel_with_arg.py | 18 +++++++++++++ .../lit/embedding/subkernel_with_opt_arg.py | 21 ++++++++++++++++ 9 files changed, 181 insertions(+) create mode 100644 artiq/test/lit/embedding/error_subkernel_annot.py create mode 100644 artiq/test/lit/embedding/error_subkernel_annot_return.py create mode 100644 artiq/test/lit/embedding/subkernel_no_arg.py create mode 100644 artiq/test/lit/embedding/subkernel_return.py create mode 100644 artiq/test/lit/embedding/subkernel_return_none.py create mode 100644 artiq/test/lit/embedding/subkernel_self.py create mode 100644 artiq/test/lit/embedding/subkernel_self_args.py create mode 100644 artiq/test/lit/embedding/subkernel_with_arg.py create mode 100644 artiq/test/lit/embedding/subkernel_with_opt_arg.py diff --git a/artiq/test/lit/embedding/error_subkernel_annot.py b/artiq/test/lit/embedding/error_subkernel_annot.py new file mode 100644 index 000000000..3f4bc1c5c --- /dev/null +++ b/artiq/test/lit/embedding/error_subkernel_annot.py @@ -0,0 +1,15 @@ +# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t +# RUN: OutputCheck %s --file-to-check=%t + +from artiq.language.core import * +from artiq.language.types import * + +# CHECK-L: ${LINE:+2}: error: type annotation for argument 'x', '1', is not an ARTIQ type +@subkernel(destination=1) +def foo(x: 1) -> TNone: + pass + +@kernel +def entrypoint(): + # CHECK-L: ${LINE:+1}: note: in subkernel call here + foo() diff --git a/artiq/test/lit/embedding/error_subkernel_annot_return.py b/artiq/test/lit/embedding/error_subkernel_annot_return.py new file mode 100644 index 000000000..51977ae00 --- /dev/null +++ b/artiq/test/lit/embedding/error_subkernel_annot_return.py @@ -0,0 +1,15 @@ +# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t +# RUN: OutputCheck %s --file-to-check=%t + +from artiq.language.core import * +from artiq.language.types import * + +# CHECK-L: ${LINE:+2}: error: type annotation for return type, '1', is not an ARTIQ type +@subkernel(destination=1) +def foo() -> 1: + pass + +@kernel +def entrypoint(): + # CHECK-L: ${LINE:+1}: note: in subkernel call here + foo() diff --git a/artiq/test/lit/embedding/subkernel_no_arg.py b/artiq/test/lit/embedding/subkernel_no_arg.py new file mode 100644 index 000000000..11d9c793d --- /dev/null +++ b/artiq/test/lit/embedding/subkernel_no_arg.py @@ -0,0 +1,18 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +@kernel +def entrypoint(): + # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. + no_arg() + + +# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_send_message(i32, { i8*, i32 }*, i8**) local_unnamed_addr +@subkernel(destination=1) +def no_arg() -> TStr: + pass diff --git a/artiq/test/lit/embedding/subkernel_return.py b/artiq/test/lit/embedding/subkernel_return.py new file mode 100644 index 000000000..4845e24ba --- /dev/null +++ b/artiq/test/lit/embedding/subkernel_return.py @@ -0,0 +1,22 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +@kernel +def entrypoint(): + # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. + returning() + # CHECK: call i8 @subkernel_await_message\(i32 1, i64 10000, i8 1, i8 1\), !dbg !. + # CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !. + subkernel_await(returning) + +# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare i8 @subkernel_await_message(i32, i64, i8, i8) local_unnamed_addr +# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr +@subkernel(destination=1) +def returning() -> TInt32: + return 1 diff --git a/artiq/test/lit/embedding/subkernel_return_none.py b/artiq/test/lit/embedding/subkernel_return_none.py new file mode 100644 index 000000000..353b15c3e --- /dev/null +++ b/artiq/test/lit/embedding/subkernel_return_none.py @@ -0,0 +1,22 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +@kernel +def entrypoint(): + # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. + returning_none() + # CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !. + # CHECK-NOT: call void @subkernel_await_message\(i32 1, i64 10000\), !dbg !. + subkernel_await(returning_none) + +# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_send_message(i32, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_await_message(i32, i64) local_unnamed_addr +@subkernel(destination=1) +def returning_none() -> TNone: + pass diff --git a/artiq/test/lit/embedding/subkernel_self.py b/artiq/test/lit/embedding/subkernel_self.py new file mode 100644 index 000000000..8e702bc02 --- /dev/null +++ b/artiq/test/lit/embedding/subkernel_self.py @@ -0,0 +1,25 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +class A: + @subkernel(destination=1) + def sk(self): + pass + + @kernel + def kernel_entrypoint(self): + # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. + self.sk() + +a = A() + +@kernel +def entrypoint(): + a.kernel_entrypoint() + +# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr diff --git a/artiq/test/lit/embedding/subkernel_self_args.py b/artiq/test/lit/embedding/subkernel_self_args.py new file mode 100644 index 000000000..57969398c --- /dev/null +++ b/artiq/test/lit/embedding/subkernel_self_args.py @@ -0,0 +1,25 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +class A: + @subkernel(destination=1) + def sk(self, a): + pass + + @kernel + def kernel_entrypoint(self): + # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_send_message\(i32 1, i8 1, .*\), !dbg !. + self.sk(1) + +a = A() + +@kernel +def entrypoint(): + a.kernel_entrypoint() + +# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr +# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr diff --git a/artiq/test/lit/embedding/subkernel_with_arg.py b/artiq/test/lit/embedding/subkernel_with_arg.py new file mode 100644 index 000000000..17d80dce7 --- /dev/null +++ b/artiq/test/lit/embedding/subkernel_with_arg.py @@ -0,0 +1,18 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +@kernel +def entrypoint(): + # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_send_message\(i32 ., i8 1, .*\), !dbg !. + accept_arg(1) + + +# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr +# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr +@subkernel(destination=1) +def accept_arg(arg: TInt32) -> TNone: + pass diff --git a/artiq/test/lit/embedding/subkernel_with_opt_arg.py b/artiq/test/lit/embedding/subkernel_with_opt_arg.py new file mode 100644 index 000000000..1821fec01 --- /dev/null +++ b/artiq/test/lit/embedding/subkernel_with_opt_arg.py @@ -0,0 +1,21 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +@kernel +def entrypoint(): + # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_send_message\(i32 ., i8 1, .*\), !dbg !. + accept_arg(1) + # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_send_message\(i32 ., i8 2, .*\), !dbg !. + accept_arg(1, 2) + + +# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr +# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr +@subkernel(destination=1) +def accept_arg(arg_a, arg_b=5) -> TNone: + pass From 973fd88b276401ef1b00fcbd57c00478b8ac16d1 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 5 Oct 2023 14:37:04 +0800 Subject: [PATCH 008/296] core: compile and upload subkernels --- artiq/coredevice/comm_kernel.py | 18 +++++++++++++++- artiq/coredevice/core.py | 37 +++++++++++++++++++++++++++------ artiq/coredevice/exceptions.py | 7 +++++++ artiq/master/databases.py | 3 +++ 4 files changed, 58 insertions(+), 7 deletions(-) diff --git a/artiq/coredevice/comm_kernel.py b/artiq/coredevice/comm_kernel.py index 3d5b8dea9..b6ffb8ee7 100644 --- a/artiq/coredevice/comm_kernel.py +++ b/artiq/coredevice/comm_kernel.py @@ -23,6 +23,8 @@ class Request(Enum): RPCReply = 7 RPCException = 8 + SubkernelUpload = 9 + class Reply(Enum): SystemInfo = 2 @@ -208,6 +210,7 @@ class CommKernel: self.unpack_float64 = struct.Struct(self.endian + "d").unpack self.pack_header = struct.Struct(self.endian + "lB").pack + self.pack_int8 = struct.Struct(self.endian + "B").pack self.pack_int32 = struct.Struct(self.endian + "l").pack self.pack_int64 = struct.Struct(self.endian + "q").pack self.pack_float64 = struct.Struct(self.endian + "d").pack @@ -322,7 +325,7 @@ class CommKernel: self._write(chunk) def _write_int8(self, value): - self._write(value) + self._write(self.pack_int8(value)) def _write_int32(self, value): self._write(self.pack_int32(value)) @@ -382,6 +385,19 @@ class CommKernel: else: self._read_expect(Reply.LoadCompleted) + def upload_subkernel(self, kernel_library, id, destination): + self._write_header(Request.SubkernelUpload) + self._write_int32(id) + self._write_int8(destination) + self._write_bytes(kernel_library) + self._flush() + + self._read_header() + if self._read_type == Reply.LoadFailed: + raise LoadError(self._read_string()) + else: + self._read_expect(Reply.LoadCompleted) + def run(self): self._write_empty(Request.RunKernel) self._flush() diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index 928d7eb17..f5a872335 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -1,5 +1,6 @@ import os, sys import numpy +from inspect import getfullargspec from functools import wraps from pythonparser import diagnostic @@ -103,12 +104,13 @@ class Core: def compile(self, function, args, kwargs, set_result=None, attribute_writeback=True, print_as_rpc=True, - target=None): + target=None, destination=0, subkernel_arg_types=[]): try: engine = _DiagnosticEngine(all_errors_are_fatal=True) stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr, - print_as_rpc=print_as_rpc) + print_as_rpc=print_as_rpc, + destination=destination, subkernel_arg_types=subkernel_arg_types) stitcher.stitch_call(function, args, kwargs, set_result) stitcher.finalize() @@ -122,7 +124,8 @@ class Core: return stitcher.embedding_map, stripped_library, \ lambda addresses: target.symbolize(library, addresses), \ - lambda symbols: target.demangle(symbols) + lambda symbols: target.demangle(symbols), \ + module.subkernel_arg_types except diagnostic.Error as error: raise CompileError(error.diagnostic) from error @@ -140,11 +143,32 @@ class Core: def set_result(new_result): nonlocal result result = new_result - embedding_map, kernel_library, symbolizer, demangler = \ + embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \ self.compile(function, args, kwargs, set_result) + self.compile_subkernels(embedding_map, args, subkernel_arg_types) self._run_compiled(kernel_library, embedding_map, symbolizer, demangler) return result + def compile_subkernels(self, embedding_map, args, subkernel_arg_types): + for sid, subkernel_fn in embedding_map.subkernels().items(): + # pass self to subkernels (if applicable) + # assuming the first argument is self + subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function) + self_arg = [] + if len(subkernel_args[0]) > 0: + if subkernel_args[0][0] == 'self': + self_arg = args[:1] + destination = subkernel_fn.artiq_embedded.destination + destination_tgt = self.dmgr.ddb.get_satellite_cpu_target(destination) + target = get_target_cls(destination_tgt)(subkernel_id=sid) + object_map, kernel_library, _, _, _ = \ + self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False, + print_as_rpc=False, target=target, destination=destination, + subkernel_arg_types=subkernel_arg_types.get(sid, [])) + if object_map.has_rpc_or_subkernel(): + raise ValueError("Subkernel must not use RPC or subkernels in other destinations") + self.comm.upload_subkernel(kernel_library, sid, destination) + def precompile(self, function, *args, **kwargs): """Precompile a kernel and return a callable that executes it on the core device at a later time. @@ -153,7 +177,7 @@ class Core: as additional positional and keyword arguments. The returned callable accepts no arguments. - Precompiled kernels may use RPCs. + Precompiled kernels may use RPCs and subkernels. Object attributes at the beginning of a precompiled kernel execution have the values they had at precompilation time. If up-to-date values are required, @@ -178,8 +202,9 @@ class Core: nonlocal result result = new_result - embedding_map, kernel_library, symbolizer, demangler = \ + embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \ self.compile(function, args, kwargs, set_result, attribute_writeback=False) + self.compile_subkernels(embedding_map, args, subkernel_arg_types) @wraps(function) def run_precompiled(): diff --git a/artiq/coredevice/exceptions.py b/artiq/coredevice/exceptions.py index 7b6967743..b40d3b552 100644 --- a/artiq/coredevice/exceptions.py +++ b/artiq/coredevice/exceptions.py @@ -148,6 +148,13 @@ class DMAError(Exception): artiq_builtin = True +class SubkernelError(Exception): + """Raised when an operation regarding a subkernel is invalid + or cannot be completed. + """ + artiq_builtin = True + + class ClockFailure(Exception): """Raised when RTIO PLL has lost lock.""" diff --git a/artiq/master/databases.py b/artiq/master/databases.py index db5760d31..ccdd6d022 100644 --- a/artiq/master/databases.py +++ b/artiq/master/databases.py @@ -36,6 +36,9 @@ class DeviceDB: desc = self.data.raw_view[desc] return desc + def get_satellite_cpu_target(self, destination): + return self.data.raw_view["satellite_cpu_targets"][destination] + class DatasetDB(TaskObject): def __init__(self, persist_file, autosave_period=30): From 7ab52af6036e12d0d13c0677aff8941914b2161b Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 5 Oct 2023 16:45:46 +0800 Subject: [PATCH 009/296] docs: subkernel support --- RELEASE_NOTES.rst | 2 + doc/manual/getting_started_core.rst | 82 ++++++++++++++++++++++++++++- 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index 45573d386..81edc5100 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -29,6 +29,8 @@ Highlights: kernel functions. * Distributed DMA is now supported, allowing DMA to be run directly on satellites for corresponding RTIO events, increasing bandwidth in scenarios with heavy satellite usage. +* Subkernels are now supported, allowing running kernels on satellite devices, offloading some + of the processing, and RTIO events. * Applet Request Interfaces have been implemented, enabling applets to directly modify datasets and temporarily set arguments in the dashboard. * EntryArea widget has been implemented, allowing argument entry widgets to be used in applets. diff --git a/doc/manual/getting_started_core.rst b/doc/manual/getting_started_core.rst index 5b7cab0e5..fe4ba7bd7 100644 --- a/doc/manual/getting_started_core.rst +++ b/doc/manual/getting_started_core.rst @@ -266,4 +266,84 @@ This argument is ignored on standalone systems, as it does not apply there. Enabling DDMA on a purely local sequence on a DRTIO system introduces an overhead during trace recording which comes from additional processing done on the record, so careful use is advised. -Due to the extra time that communicating with relevant satellites takes, an additional delay before playback may be necessary to prevent a :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` when playing back a DDMA-enabled sequence. \ No newline at end of file +Due to the extra time that communicating with relevant satellites takes, an additional delay before playback may be necessary to prevent a :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` when playing back a DDMA-enabled sequence. + +Subkernels +---------- + +Subkernels refer to kernels running on a satellite device. This allows you to offload some of the processing and control over remote RTIO devices, freeing up resources on the master. + +Subkernels behave in most part as regular kernels, they accept arguments and can return values. However, there are few caveats: + + - they do not support RPCs or calling subsequent subkernels on other devices, + - they do not support DRTIO, + - their return value must be fully annotated with an ARTIQ type, + - their arguments should be annotated, and only basic ARTIQ types are supported, + - while ``self`` is allowed, there is no attribute writeback - any changes to it will be discarded when the subkernel is done, + - they can raise exceptions, but they cannot be caught by the master, + - they begin execution as soon as possible when called, and they can be awaited. + +To define a subkernel, use the subkernel decorator (``@subkernel(destination=X)``). The destination is the satellite number as defined in the routing table, and must be between 1 and 255. To call a subkernel, call it like a normal function; and to await its result, use ``subkernel_await(function, [timeout])`` built-in function. + +For example, a subkernel performing integer addition: :: + + from artiq.experiment import * + + + @subkernel(destination=1) + def subkernel_add(a: TInt32, b: TInt32) -> TInt32: + return a + b + + class SubkernelExperiment(EnvExperiment): + def build(self): + self.setattr_device("core") + + @kernel + def run(self): + subkernel_add(2, 2) + result = subkernel_await(subkernel_add) + assert result == 4 + +Sometimes the subkernel execution may take more time - and the await has a default timeout of 10000 milliseconds (10 seconds). It can be adjusted, as ``subkernel_await()`` accepts an optional timeout argument. + +Subkernels are compiled after the main kernel, and then immediately uploaded to satellites. When called, master instructs the appropriate satellite to load the subkernel into their kernel core and to run it. If the subkernel is complex, and its binary relatively big, the delay between the call and actually running the subkernel may be substantial; if that delay has to be minimized, ``subkernel_preload(function)`` should be used before the call. + +While ``self`` is accepted as an argument for subkernels, it is embedded into the compiled data. Any changes made by the main kernel or other subkernels, will not be available. + +Subkernels can call other kernels and subkernels, if they're within the same destination. For a more complex example: :: + + from artiq.experiment import * + + class SubkernelExperiment(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("ttl0") + self.setattr_device("ttl8") # assuming it's on satellite + + @subkernel(destination=1) + def add_and_pulse(self, a: TInt32, b: TInt32) -> TInt32: + c = a + b + self.pulse_ttl(c) + return c + + @subkernel(destination=1) + def pulse_ttl(self, delay: TInt32) -> TNone: + self.ttl8.pulse(delay*us) + + @kernel + def run(self): + subkernel_preload(self.add_and_pulse) + self.core.reset() + delay(10*ms) + self.add_and_pulse(2, 2) + self.ttl0.pulse(15*us) + result = subkernel_await(self.add_and_pulse) + assert result == 4 + self.pulse_ttl(20) + +Without the preload, the delay after the core reset would need to be longer. It's still an operation that can take some time, depending on the connection. Notice that the method ``pulse_ttl()`` can be also called both within a subkernel, and on its own. + +In general, subkernels do not have to be awaited, but awaiting is required to retrieve returned values and exceptions. + +.. note:: + When a subkernel is running, regardless of devices used by it, RTIO devices on that satellite are not available to the master. Control is returned to master after the subkernel finishes - to be sure that you can use the device, the subkernel should be awaited before any RTIO operations on the affected satellite are performed. \ No newline at end of file From 08eea09d44ed0c54c7d31fa8232ebbee12e5f924 Mon Sep 17 00:00:00 2001 From: David Nadlinger Date: Sun, 8 Oct 2023 14:10:00 +0100 Subject: [PATCH 010/296] compiler: Catch escaping numpy.{array, full, transpose}() results Function calls in general can still be used to hide escaping allocations from the compiler (issue #1497), but these calls in particular always allocate, so we can easily and accurately handle them. --- artiq/compiler/validators/escape.py | 16 ++++++++++++++-- artiq/test/lit/escape/error_numpy_array.py | 15 +++++++++++++++ artiq/test/lit/escape/error_numpy_full.py | 16 ++++++++++++++++ artiq/test/lit/escape/error_numpy_transpose.py | 17 +++++++++++++++++ doc/manual/compiler.rst | 18 ------------------ 5 files changed, 62 insertions(+), 20 deletions(-) create mode 100644 artiq/test/lit/escape/error_numpy_array.py create mode 100644 artiq/test/lit/escape/error_numpy_full.py create mode 100644 artiq/test/lit/escape/error_numpy_transpose.py diff --git a/artiq/compiler/validators/escape.py b/artiq/compiler/validators/escape.py index c6ae59704..7d2395830 100644 --- a/artiq/compiler/validators/escape.py +++ b/artiq/compiler/validators/escape.py @@ -102,8 +102,20 @@ class RegionOf(algorithm.Visitor): if types.is_external_function(node.func.type, "cache_get"): # The cache is borrow checked dynamically return Global() - else: - self.visit_sometimes_allocating(node) + + if (types.is_builtin_function(node.func.type, "array") + or types.is_builtin_function(node.func.type, "make_array") + or types.is_builtin_function(node.func.type, "numpy.transpose")): + # While lifetime tracking across function calls in general is currently + # broken (see below), these special builtins that allocate an array on + # the stack of the caller _always_ allocate regardless of the parameters, + # and we can thus handle them without running into the precision issue + # mentioned in commit ae999db. + return self.visit_allocating(node) + + # FIXME: Return statement missing here, but see m-labs/artiq#1497 and + # commit ae999db. + self.visit_sometimes_allocating(node) # Value lives as long as the object/container, if it's mutable, # or else forever diff --git a/artiq/test/lit/escape/error_numpy_array.py b/artiq/test/lit/escape/error_numpy_array.py new file mode 100644 index 000000000..1ebdeda81 --- /dev/null +++ b/artiq/test/lit/escape/error_numpy_array.py @@ -0,0 +1,15 @@ +# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t +# RUN: OutputCheck %s --file-to-check=%t + +from artiq.experiment import * +import numpy as np + +@kernel +def a(): + # CHECK-L: ${LINE:+2}: error: cannot return an allocated value that does not live forever + # CHECK-L: ${LINE:+1}: note: ... to this point + return np.array([0, 1]) + +@kernel +def entrypoint(): + a() diff --git a/artiq/test/lit/escape/error_numpy_full.py b/artiq/test/lit/escape/error_numpy_full.py new file mode 100644 index 000000000..66158d8ca --- /dev/null +++ b/artiq/test/lit/escape/error_numpy_full.py @@ -0,0 +1,16 @@ +# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t +# RUN: OutputCheck %s --file-to-check=%t + +from artiq.experiment import * +import numpy as np + +@kernel +def a(): + # CHECK-L: ${LINE:+2}: error: cannot return an allocated value that does not live forever + # CHECK-L: ${LINE:+1}: note: ... to this point + return np.full(10, 42.0) + + +@kernel +def entrypoint(): + a() diff --git a/artiq/test/lit/escape/error_numpy_transpose.py b/artiq/test/lit/escape/error_numpy_transpose.py new file mode 100644 index 000000000..e1dc32d51 --- /dev/null +++ b/artiq/test/lit/escape/error_numpy_transpose.py @@ -0,0 +1,17 @@ +# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t +# RUN: OutputCheck %s --file-to-check=%t + +from artiq.experiment import * +import numpy as np + +data = np.array([[0, 1], [2, 3]]) + +@kernel +def a(): + # CHECK-L: ${LINE:+2}: error: cannot return an allocated value that does not live forever + # CHECK-L: ${LINE:+1}: note: ... to this point + return np.transpose(data) + +@kernel +def entrypoint(): + a() diff --git a/doc/manual/compiler.rst b/doc/manual/compiler.rst index e4923dfa9..88b57d814 100644 --- a/doc/manual/compiler.rst +++ b/doc/manual/compiler.rst @@ -95,24 +95,6 @@ tracked across function calls (see `#1497 Date: Mon, 9 Oct 2023 10:03:43 +0800 Subject: [PATCH 011/296] artiq_compile: ignore subkernel_arg_types --- artiq/frontend/artiq_compile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/frontend/artiq_compile.py b/artiq/frontend/artiq_compile.py index 938f5b787..9aeceb6d9 100755 --- a/artiq/frontend/artiq_compile.py +++ b/artiq/frontend/artiq_compile.py @@ -63,7 +63,7 @@ def main(): core_name = exp.run.artiq_embedded.core_name core = getattr(exp_inst, core_name) - object_map, kernel_library, _, _ = \ + object_map, kernel_library, _, _, _ = \ core.compile(exp.run, [exp_inst], {}, attribute_writeback=False, print_as_rpc=False) except CompileError as error: From 9c90f923d2be2b7b10f1fe18afa0b30b064630e8 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Mon, 9 Oct 2023 10:07:04 +0800 Subject: [PATCH 012/296] test: check return value of subprocesses in test_compile --- artiq/test/coredevice/test_compile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/artiq/test/coredevice/test_compile.py b/artiq/test/coredevice/test_compile.py index efb6073d5..9b97c7968 100644 --- a/artiq/test/coredevice/test_compile.py +++ b/artiq/test/coredevice/test_compile.py @@ -49,9 +49,9 @@ class TestCompile(ExperimentCase): mgmt.clear_log() with tempfile.TemporaryDirectory() as tmp: db_path = os.path.join(artiq_root, "device_db.py") - subprocess.call([sys.executable, "-m", "artiq.frontend.artiq_compile", "--device-db", db_path, + subprocess.check_call([sys.executable, "-m", "artiq.frontend.artiq_compile", "--device-db", db_path, "-c", "CheckLog", "-o", os.path.join(tmp, "check_log.elf"), __file__]) - subprocess.call([sys.executable, "-m", "artiq.frontend.artiq_run", "--device-db", db_path, + subprocess.check_call([sys.executable, "-m", "artiq.frontend.artiq_run", "--device-db", db_path, os.path.join(tmp, "check_log.elf")]) log = mgmt.get_log() self.assertIn("test_artiq_compile", log) From d070826911e86b5ec9de958c4bcd93ef0b7fddb2 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Mon, 9 Oct 2023 10:13:58 +0800 Subject: [PATCH 013/296] flake: update dependencies --- flake.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index 4acbe9c75..9adc73580 100644 --- a/flake.lock +++ b/flake.lock @@ -45,11 +45,11 @@ "mozilla-overlay": { "flake": false, "locked": { - "lastModified": 1690536331, - "narHash": "sha256-aRIf2FB2GTdfF7gl13WyETmiV/J7EhBGkSWXfZvlxcA=", + "lastModified": 1695805681, + "narHash": "sha256-1ElPLD8eFfnuIk0G52HGGpRtQZ4QPCjChRlEOfkZ5ro=", "owner": "mozilla", "repo": "nixpkgs-mozilla", - "rev": "db89c8707edcffefcd8e738459d511543a339ff5", + "rev": "6eabade97bc28d707a8b9d82ad13ef143836736e", "type": "github" }, "original": { @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1693771906, - "narHash": "sha256-32EnPCaVjOiEERZ+o/2Ir7JH9pkfwJZJ27SKHNvt4yk=", + "lastModified": 1696697597, + "narHash": "sha256-q26Qv4DQ+h6IeozF2o1secyQG0jt2VUT3V0K58jr3pg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "da5adce0ffaff10f6d0fee72a02a5ed9d01b52fc", + "rev": "5a237aecb57296f67276ac9ab296a41c23981f56", "type": "github" }, "original": { From 333b81f789fa2279c4dc46b761f760130d94f989 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 27 Sep 2023 13:17:08 +0800 Subject: [PATCH 014/296] set_argument_value warning in browser --- artiq/browser/experiments.py | 4 ++++ artiq/frontend/artiq_browser.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/artiq/browser/experiments.py b/artiq/browser/experiments.py index c34ea5832..7f74f90bb 100644 --- a/artiq/browser/experiments.py +++ b/artiq/browser/experiments.py @@ -508,5 +508,9 @@ class ExperimentsArea(QtWidgets.QMdiArea): self.open_experiments.append(dock) return dock + def set_argument_value(self, expurl, name, value): + logger.warning("Unable to set argument '%s', dropping change. " + "'set_argument_value' not supported in browser.", name) + def on_dock_closed(self, dock): self.open_experiments.remove(dock) diff --git a/artiq/frontend/artiq_browser.py b/artiq/frontend/artiq_browser.py index 5cb7ef090..751c57d2e 100755 --- a/artiq/frontend/artiq_browser.py +++ b/artiq/frontend/artiq_browser.py @@ -81,7 +81,7 @@ class Browser(QtWidgets.QMainWindow): self.files.dataset_changed.connect( self.experiments.dataset_changed) - self.applets = applets.AppletsDock(self, dataset_sub, dataset_ctl, loop=loop) + self.applets = applets.AppletsDock(self, dataset_sub, dataset_ctl, self.experiments, loop=loop) smgr.register(self.applets) atexit_register_coroutine(self.applets.stop, loop=loop) From f3c79e71e1e06270dc5c6992fba78004383549b7 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Mon, 9 Oct 2023 13:24:32 +0800 Subject: [PATCH 015/296] firmware: merge runtime and satman linker scripts --- .../{runtime/runtime.ld => firmware.ld} | 28 +++--- artiq/firmware/runtime/Makefile | 2 +- artiq/firmware/satman/Makefile | 2 +- artiq/firmware/satman/satman.ld | 91 ------------------- 4 files changed, 16 insertions(+), 107 deletions(-) rename artiq/firmware/{runtime/runtime.ld => firmware.ld} (79%) delete mode 100644 artiq/firmware/satman/satman.ld diff --git a/artiq/firmware/runtime/runtime.ld b/artiq/firmware/firmware.ld similarity index 79% rename from artiq/firmware/runtime/runtime.ld rename to artiq/firmware/firmware.ld index 9f60bf3ac..9778cb541 100644 --- a/artiq/firmware/runtime/runtime.ld +++ b/artiq/firmware/firmware.ld @@ -6,7 +6,7 @@ ENTRY(_reset_handler) * ld does not allow this expression here. */ MEMORY { - runtime (RWX) : ORIGIN = 0x40000000, LENGTH = 0x4000000 /* 64M */ + firmware (RWX) : ORIGIN = 0x40000000, LENGTH = 0x4000000 /* 64M */ } SECTIONS @@ -14,24 +14,24 @@ SECTIONS .vectors : { *(.vectors) - } > runtime + } > firmware .text : { *(.text .text.*) - } > runtime + } > firmware .eh_frame : { __eh_frame_start = .; KEEP(*(.eh_frame)) __eh_frame_end = .; - } > runtime + } > firmware .eh_frame_hdr : { KEEP(*(.eh_frame_hdr)) - } > runtime + } > firmware __eh_frame_hdr_start = SIZEOF(.eh_frame_hdr) > 0 ? ADDR(.eh_frame_hdr) : 0; __eh_frame_hdr_end = SIZEOF(.eh_frame_hdr) > 0 ? . : 0; @@ -39,35 +39,35 @@ SECTIONS .gcc_except_table : { *(.gcc_except_table) - } > runtime + } > firmware /* https://sourceware.org/bugzilla/show_bug.cgi?id=20475 */ .got : { *(.got) - } > runtime + } > firmware .got.plt : { *(.got.plt) - } > runtime + } > firmware .rodata : { *(.rodata .rodata.*) - } > runtime + } > firmware .data : { *(.data .data.*) - } > runtime + } > firmware .bss (NOLOAD) : ALIGN(4) { _fbss = .; *(.sbss .sbss.* .bss .bss.*); _ebss = .; - } > runtime + } > firmware .stack (NOLOAD) : ALIGN(0x1000) { @@ -76,12 +76,12 @@ SECTIONS _estack = .; . += 0x10000; _fstack = . - 16; - } > runtime + } > firmware .heap (NOLOAD) : ALIGN(16) { _fheap = .; - . = ORIGIN(runtime) + LENGTH(runtime); + . = ORIGIN(firmware) + LENGTH(firmware); _eheap = .; - } > runtime + } > firmware } diff --git a/artiq/firmware/runtime/Makefile b/artiq/firmware/runtime/Makefile index 0427d763f..2508b66b4 100644 --- a/artiq/firmware/runtime/Makefile +++ b/artiq/firmware/runtime/Makefile @@ -21,7 +21,7 @@ $(RUSTOUT)/libruntime.a: --target $(RUNTIME_DIRECTORY)/../$(CARGO_TRIPLE).json runtime.elf: $(RUSTOUT)/libruntime.a ksupport_data.o - $(link) -T $(RUNTIME_DIRECTORY)/runtime.ld \ + $(link) -T $(RUNTIME_DIRECTORY)/../firmware.ld \ -lunwind-vexriscv-bare -m elf32lriscv ksupport_data.o: ../ksupport/ksupport.elf diff --git a/artiq/firmware/satman/Makefile b/artiq/firmware/satman/Makefile index 55befda95..a7aab9e21 100644 --- a/artiq/firmware/satman/Makefile +++ b/artiq/firmware/satman/Makefile @@ -21,7 +21,7 @@ $(RUSTOUT)/libsatman.a: --target $(SATMAN_DIRECTORY)/../$(CARGO_TRIPLE).json satman.elf: $(RUSTOUT)/libsatman.a ksupport_data.o - $(link) -T $(SATMAN_DIRECTORY)/satman.ld \ + $(link) -T $(SATMAN_DIRECTORY)/../firmware.ld \ -lunwind-vexriscv-bare -m elf32lriscv ksupport_data.o: ../ksupport/ksupport.elf diff --git a/artiq/firmware/satman/satman.ld b/artiq/firmware/satman/satman.ld deleted file mode 100644 index c188dc3ec..000000000 --- a/artiq/firmware/satman/satman.ld +++ /dev/null @@ -1,91 +0,0 @@ -INCLUDE generated/output_format.ld -INCLUDE generated/regions.ld -ENTRY(_reset_handler) - -SECTIONS -{ - .vectors : - { - *(.vectors) - } > main_ram - - .text : - { - *(.text .text.*) - . = ALIGN(0x40000); - } > main_ram - - .eh_frame : - { - __eh_frame_start = .; - KEEP(*(.eh_frame)) - __eh_frame_end = .; - } > main_ram - - .eh_frame_hdr : - { - KEEP(*(.eh_frame_hdr)) - } > main_ram - - __eh_frame_hdr_start = SIZEOF(.eh_frame_hdr) > 0 ? ADDR(.eh_frame_hdr) : 0; - __eh_frame_hdr_end = SIZEOF(.eh_frame_hdr) > 0 ? . : 0; - - .gcc_except_table : - { - *(.gcc_except_table) - } > main_ram - - /* https://sourceware.org/bugzilla/show_bug.cgi?id=20475 */ - .got : - { - PROVIDE(_GLOBAL_OFFSET_TABLE_ = .); - *(.got) - } > main_ram - - .got.plt : - { - *(.got.plt) - } > main_ram - - .rodata : - { - _frodata = .; - *(.rodata .rodata.*) - _erodata = .; - } > main_ram - - .data : - { - *(.data .data.*) - } > main_ram - - .sdata : - { - *(.sdata .sdata.*) - } > main_ram - - .bss (NOLOAD) : ALIGN(4) - { - _fbss = .; - *(.sbss .sbss.* .bss .bss.*); - . = ALIGN(4); - _ebss = .; - } > main_ram - - .stack (NOLOAD) : ALIGN(0x1000) - { - _sstack_guard = .; - . += 0x1000; - _estack = .; - . += 0x10000; - _fstack = . - 16; - } > main_ram - - /* remainder of 64MB for heap for alloc use */ - .heap (NOLOAD) : ALIGN(16) - { - _fheap = .; - . = 0x44000000; // not to overwrite RPC queue - _eheap = .; - } > main_ram -} From 96941d7c04969aed295381e8bee3ca093f84cde8 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 5 Oct 2023 12:54:08 +0800 Subject: [PATCH 016/296] big_number: fix metadata scaling, add unit label --- artiq/applets/big_number.py | 52 +++++++++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/artiq/applets/big_number.py b/artiq/applets/big_number.py index 56458f507..7bf077fe2 100755 --- a/artiq/applets/big_number.py +++ b/artiq/applets/big_number.py @@ -2,6 +2,8 @@ from PyQt5 import QtWidgets, QtCore, QtGui from artiq.applets.simple import SimpleApplet +from artiq.tools import scale_from_metadata +from artiq.gui.tools import LayoutWidget class QResponsiveLCDNumber(QtWidgets.QLCDNumber): @@ -21,29 +23,41 @@ class QCancellableLineEdit(QtWidgets.QLineEdit): super().keyPressEvent(event) -class NumberWidget(QtWidgets.QStackedWidget): +class NumberWidget(LayoutWidget): def __init__(self, args, req): - QtWidgets.QStackedWidget.__init__(self) + LayoutWidget.__init__(self) self.dataset_name = args.dataset self.req = req + self.metadata = dict() + + self.number_area = QtWidgets.QStackedWidget() + self.addWidget(self.number_area, 0, 0) + + self.unit_area = QtWidgets.QLabel() + self.unit_area.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop) + self.addWidget(self.unit_area, 0, 1) self.lcd_widget = QResponsiveLCDNumber() self.lcd_widget.setDigitCount(args.digit_count) self.lcd_widget.doubleClicked.connect(self.start_edit) - self.addWidget(self.lcd_widget) + self.number_area.addWidget(self.lcd_widget) self.edit_widget = QCancellableLineEdit() self.edit_widget.setValidator(QtGui.QDoubleValidator()) - self.edit_widget.setAlignment(QtCore.Qt.AlignRight) + self.edit_widget.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) self.edit_widget.editCancelled.connect(self.cancel_edit) self.edit_widget.returnPressed.connect(self.confirm_edit) - self.addWidget(self.edit_widget) + self.number_area.addWidget(self.edit_widget) font = QtGui.QFont() font.setPointSize(60) self.edit_widget.setFont(font) - self.setCurrentWidget(self.lcd_widget) + unit_font = QtGui.QFont() + unit_font.setPointSize(20) + self.unit_area.setFont(unit_font) + + self.number_area.setCurrentWidget(self.lcd_widget) def start_edit(self): # QLCDNumber value property contains the value of zero @@ -51,22 +65,32 @@ class NumberWidget(QtWidgets.QStackedWidget): self.edit_widget.setText(str(self.lcd_widget.value())) self.edit_widget.selectAll() self.edit_widget.setFocus() - self.setCurrentWidget(self.edit_widget) + self.number_area.setCurrentWidget(self.edit_widget) def confirm_edit(self): - value = float(self.edit_widget.text()) - self.req.set_dataset(self.dataset_name, value) - self.setCurrentWidget(self.lcd_widget) + scale = scale_from_metadata(self.metadata) + val = float(self.edit_widget.text()) + val *= scale + self.req.set_dataset(self.dataset_name, val, **self.metadata) + self.number_area.setCurrentWidget(self.lcd_widget) def cancel_edit(self): - self.setCurrentWidget(self.lcd_widget) + self.number_area.setCurrentWidget(self.lcd_widget) def data_changed(self, value, metadata, persist, mods): try: - n = float(value[self.dataset_name]) + self.metadata = metadata[self.dataset_name] + # This applet will degenerate other scalar types to native float on edit + # Use the dashboard ChangeEditDialog for consistent type casting + val = float(value[self.dataset_name]) + scale = scale_from_metadata(self.metadata) + val /= scale except (KeyError, ValueError, TypeError): - n = "---" - self.lcd_widget.display(n) + val = "---" + + unit = self.metadata.get("unit", "") + self.unit_area.setText(unit) + self.lcd_widget.display(val) def main(): From de41bd66558ccfcf7e4337860b620f66129698dc Mon Sep 17 00:00:00 2001 From: linuswck Date: Wed, 11 Oct 2023 10:07:06 +0800 Subject: [PATCH 017/296] eem_7series: pass through kwargs for shuttler --- artiq/gateware/eem_7series.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/gateware/eem_7series.py b/artiq/gateware/eem_7series.py index c98d11fad..2ca444d37 100644 --- a/artiq/gateware/eem_7series.py +++ b/artiq/gateware/eem_7series.py @@ -141,7 +141,7 @@ def peripheral_shuttler(module, peripheral, **kwargs): port, port_aux = peripheral["ports"] else: raise ValueError("wrong number of ports") - eem.Shuttler.add_std(module, port, port_aux) + eem.Shuttler.add_std(module, port, port_aux, **kwargs) peripheral_processors = { "dio": peripheral_dio, From f7abc156cbc0a371452148d11436e5f241798092 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 11 Oct 2023 16:41:34 +0800 Subject: [PATCH 018/296] flake: update dependencies --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 9adc73580..a77b22f03 100644 --- a/flake.lock +++ b/flake.lock @@ -108,11 +108,11 @@ "src-migen": { "flake": false, "locked": { - "lastModified": 1693990700, - "narHash": "sha256-qJLA03QcZ5S9DrqrseuzIQBTWS7rjAbYJxLYZEQ8rxA=", + "lastModified": 1697013661, + "narHash": "sha256-qNCqgWyE4vTDmyjE2XMJqW1djuBxT25A36AzQfZqluU=", "owner": "m-labs", "repo": "migen", - "rev": "2cfee3e0db6fdca9b5918686ea77c93252e7cebd", + "rev": "aadc19df93b7aa9ca761aaebbb98a11e0cf2d7c7", "type": "github" }, "original": { From 363f7327f176a2db3723d8729aecd25998bf082b Mon Sep 17 00:00:00 2001 From: occheung Date: Fri, 13 Oct 2023 16:43:06 -0700 Subject: [PATCH 019/296] io_expander: initialize before service --- artiq/firmware/satman/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index a44919933..4ca8c308c 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -632,7 +632,8 @@ pub extern fn main() -> i32 { #[cfg(soc_platform = "efc")] { io_expander = board_misoc::io_expander::IoExpander::new().unwrap(); - + io_expander.init().expect("I2C I/O expander initialization failed"); + // Enable LEDs io_expander.set_oe(0, 1 << 5 | 1 << 6 | 1 << 7).unwrap(); From 5f445f6b929c3985cbc3bf80ac029048b6c50b0a Mon Sep 17 00:00:00 2001 From: mwojcik Date: Mon, 16 Oct 2023 11:21:04 +0800 Subject: [PATCH 020/296] ad53xx: fix `load()` references in documentation --- artiq/coredevice/ad53xx.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/artiq/coredevice/ad53xx.py b/artiq/coredevice/ad53xx.py index 9a2b8eb2e..76bad29bb 100644 --- a/artiq/coredevice/ad53xx.py +++ b/artiq/coredevice/ad53xx.py @@ -233,7 +233,7 @@ class AD53xx: def write_gain_mu(self, channel, gain=0xffff): """Program the gain register for a DAC channel. - The DAC output is not updated until LDAC is pulsed (see :meth load:). + The DAC output is not updated until LDAC is pulsed (see :meth:`load`). This method advances the timeline by the duration of one SPI transfer. :param gain: 16-bit gain register value (default: 0xffff) @@ -245,7 +245,7 @@ class AD53xx: def write_offset_mu(self, channel, offset=0x8000): """Program the offset register for a DAC channel. - The DAC output is not updated until LDAC is pulsed (see :meth load:). + The DAC output is not updated until LDAC is pulsed (see :meth:`load`). This method advances the timeline by the duration of one SPI transfer. :param offset: 16-bit offset register value (default: 0x8000) @@ -258,7 +258,7 @@ class AD53xx: """Program the DAC offset voltage for a channel. An offset of +V can be used to trim out a DAC offset error of -V. - The DAC output is not updated until LDAC is pulsed (see :meth load:). + The DAC output is not updated until LDAC is pulsed (see :meth:`load`). This method advances the timeline by the duration of one SPI transfer. :param voltage: the offset voltage @@ -270,7 +270,7 @@ class AD53xx: def write_dac_mu(self, channel, value): """Program the DAC input register for a channel. - The DAC output is not updated until LDAC is pulsed (see :meth load:). + The DAC output is not updated until LDAC is pulsed (see :meth:`load`). This method advances the timeline by the duration of one SPI transfer. """ self.bus.write( @@ -280,7 +280,7 @@ class AD53xx: def write_dac(self, channel, voltage): """Program the DAC output voltage for a channel. - The DAC output is not updated until LDAC is pulsed (see :meth load:). + The DAC output is not updated until LDAC is pulsed (see :meth:`load`). This method advances the timeline by the duration of one SPI transfer. """ self.write_dac_mu(channel, voltage_to_mu(voltage, self.offset_dacs, @@ -313,7 +313,7 @@ class AD53xx: If no LDAC device was defined, the LDAC pulse is skipped. - See :meth load:. + See :meth:`load`. :param values: list of DAC values to program :param channels: list of DAC channels to program. If not specified, @@ -355,7 +355,7 @@ class AD53xx: """ Two-point calibration of a DAC channel. Programs the offset and gain register to trim out DAC errors. Does not - take effect until LDAC is pulsed (see :meth load:). + take effect until LDAC is pulsed (see :meth:`load`). Calibration consists of measuring the DAC output voltage for a channel with the DAC set to zero-scale (0x0000) and full-scale (0xffff). From 6705c9fbfb05d1be464a99bd1cc9e6a4e0f4724f Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Tue, 17 Oct 2023 15:37:06 +0800 Subject: [PATCH 021/296] flake: update dependencies --- flake.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index a77b22f03..284ffa1ae 100644 --- a/flake.lock +++ b/flake.lock @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1696697597, - "narHash": "sha256-q26Qv4DQ+h6IeozF2o1secyQG0jt2VUT3V0K58jr3pg=", + "lastModified": 1697226376, + "narHash": "sha256-cumLLb1QOUtWieUnLGqo+ylNt3+fU8Lcv5Zl+tYbRUE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "5a237aecb57296f67276ac9ab296a41c23981f56", + "rev": "898cb2064b6e98b8c5499f37e81adbdf2925f7c5", "type": "github" }, "original": { @@ -92,11 +92,11 @@ ] }, "locked": { - "lastModified": 1693473454, - "narHash": "sha256-kr8Ur6JNW/xVRHdPn3ou980IAxg/n+f3ZQBHuJ1uaC4=", + "lastModified": 1697528004, + "narHash": "sha256-FFa2MbhAJEjwY58uOs0swvgymfjubHyWba6Q0X6CbB0=", "owner": "m-labs", "repo": "sipyco", - "rev": "5467dcf9738673ab9a49e6f2377bda7c551b5f90", + "rev": "c0a7ed350ccfb85474217057fc47b3f258ca8d99", "type": "github" }, "original": { From b168f0bb4be1697ff100475c20ee304dcc31fcc2 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Tue, 17 Oct 2023 17:46:56 +0800 Subject: [PATCH 022/296] subkernel: separate tags and data --- artiq/compiler/ir.py | 17 ++++++ .../compiler/transforms/artiq_ir_generator.py | 5 +- .../compiler/transforms/llvm_ir_generator.py | 38 ++++++++++--- artiq/firmware/ksupport/lib.rs | 6 +- artiq/firmware/libproto_artiq/kernel_proto.rs | 2 +- artiq/firmware/libproto_artiq/rpc_proto.rs | 49 ++++++++++------- artiq/firmware/runtime/kernel.rs | 10 ++-- artiq/firmware/runtime/session.rs | 32 ++++++----- artiq/firmware/satman/kernel.rs | 55 +++++++++---------- artiq/test/lit/embedding/subkernel_return.py | 4 +- .../lit/embedding/subkernel_return_none.py | 4 +- 11 files changed, 134 insertions(+), 88 deletions(-) diff --git a/artiq/compiler/ir.py b/artiq/compiler/ir.py index 3af11ccd0..f630f914a 100644 --- a/artiq/compiler/ir.py +++ b/artiq/compiler/ir.py @@ -764,6 +764,23 @@ class GetOptArgFromRemote(GetArgFromRemote): def opcode(self): return "getoptargfromremote({})".format(repr(self.arg_name)) +class SubkernelAwaitArgs(Instruction): + """ + A builtin instruction that takes min and max received messages as operands, + and a list of received types. + + :ivar arg_types: (list of types) types of passed arguments (including optional) + """ + + """ + :param arg_types: (list of types) types of passed arguments (including optional) + """ + + def __init__(self, operands, arg_types, name=None): + assert isinstance(arg_types, list) + self.arg_types = arg_types + super().__init__(operands, builtins.TNone(), name) + class GetAttr(Instruction): """ An intruction that loads an attribute from an object, diff --git a/artiq/compiler/transforms/artiq_ir_generator.py b/artiq/compiler/transforms/artiq_ir_generator.py index 489739ba7..fb9560a5d 100644 --- a/artiq/compiler/transforms/artiq_ir_generator.py +++ b/artiq/compiler/transforms/artiq_ir_generator.py @@ -2614,8 +2614,9 @@ class ARTIQIRGenerator(algorithm.Visitor): min_args = ir.Constant(len(fn_typ.args)-offset, builtins.TInt8()) max_args = ir.Constant(fn_typ.arity()-offset, builtins.TInt8()) - rcvd_count = self.append(ir.Builtin("subkernel_await_args", [min_args, max_args], builtins.TNone())) - arg_types = list(fn_typ.args.items())[offset:] + arg_types = list(fn_typ.args.items())[offset:] + arg_type_list = [a[1] for a in arg_types] + [a[1] for a in fn_typ.optargs.items()] + rcvd_count = self.append(ir.SubkernelAwaitArgs([min_args, max_args], arg_type_list)) # obligatory arguments for arg_name, arg_type in arg_types: args[index] = self.append(ir.GetArgFromRemote(arg_name, arg_type, diff --git a/artiq/compiler/transforms/llvm_ir_generator.py b/artiq/compiler/transforms/llvm_ir_generator.py index 88412a04c..ebde5b53f 100644 --- a/artiq/compiler/transforms/llvm_ir_generator.py +++ b/artiq/compiler/transforms/llvm_ir_generator.py @@ -405,7 +405,7 @@ class LLVMIRGenerator: elif name == "subkernel_await_finish": llty = ll.FunctionType(llvoid, [lli32, lli64]) elif name == "subkernel_await_message": - llty = ll.FunctionType(lli8, [lli32, lli64, lli8, lli8]) + llty = ll.FunctionType(lli8, [lli32, lli64, llsliceptr, lli8, lli8]) # with now-pinning elif name == "now": @@ -1400,12 +1400,6 @@ class LLVMIRGenerator: return self.llbuilder.call(self.llbuiltin("delay_mu"), [llinterval]) elif insn.op == "end_catch": return self.llbuilder.call(self.llbuiltin("__artiq_end_catch"), []) - elif insn.op == "subkernel_await_args": - llmin = self.map(insn.operands[0]) - llmax = self.map(insn.operands[1]) - return self.llbuilder.call(self.llbuiltin("subkernel_await_message"), - [ll.Constant(lli32, 0), ll.Constant(lli64, 10_000), llmin, llmax], - name="subkernel.await.args") elif insn.op == "subkernel_await_finish": llsid = self.map(insn.operands[0]) lltimeout = self.map(insn.operands[1]) @@ -1414,7 +1408,9 @@ class LLVMIRGenerator: elif insn.op == "subkernel_retrieve_return": llsid = self.map(insn.operands[0]) lltimeout = self.map(insn.operands[1]) - self.llbuilder.call(self.llbuiltin("subkernel_await_message"), [llsid, lltimeout, ll.Constant(lli8, 1), ll.Constant(lli8, 1)], + lltagptr = self._build_subkernel_tags([insn.type]) + self.llbuilder.call(self.llbuiltin("subkernel_await_message"), + [llsid, lltimeout, lltagptr, ll.Constant(lli8, 1), ll.Constant(lli8, 1)], name="subkernel.await.message") llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [], name="subkernel.arg.stack") @@ -1426,6 +1422,14 @@ class LLVMIRGenerator: else: assert False + def process_SubkernelAwaitArgs(self, insn): + llmin = self.map(insn.operands[0]) + llmax = self.map(insn.operands[1]) + lltagptr = self._build_subkernel_tags(insn.arg_types) + return self.llbuilder.call(self.llbuiltin("subkernel_await_message"), + [ll.Constant(lli32, 0), ll.Constant(lli64, 10_000), lltagptr, llmin, llmax], + name="subkernel.await.args") + def process_Closure(self, insn): llenv = self.map(insn.environment()) llenv = self.llbuilder.bitcast(llenv, llptr) @@ -1505,6 +1509,24 @@ class LLVMIRGenerator: return llfun, list(llargs), llarg_attrs, llcallstackptr + def _build_subkernel_tags(self, tag_list): + def ret_error_handler(typ): + printer = types.TypePrinter() + note = diagnostic.Diagnostic("note", + "value of type {type}", + {"type": printer.name(typ)}, + fun_loc) + diag = diagnostic.Diagnostic("error", + "type {type} is not supported in subkernels", + {"type": printer.name(fun_type.ret)}, + fun_loc, notes=[note]) + self.engine.process(diag) + tag = b"".join([ir.rpc_tag(arg_type, ret_error_handler) for arg_type in tag_list]) + lltag = self.llconst_of_const(ir.Constant(tag, builtins.TStr())) + lltagptr = self.llbuilder.alloca(lltag.type) + self.llbuilder.store(lltag, lltagptr) + return lltagptr + def _build_rpc_recv(self, ret, llstackptr, llnormalblock=None, llunwindblock=None): # T result = { # void *ret_ptr = alloca(sizeof(T)); diff --git a/artiq/firmware/ksupport/lib.rs b/artiq/firmware/ksupport/lib.rs index 53ea4574d..04153d6d6 100644 --- a/artiq/firmware/ksupport/lib.rs +++ b/artiq/firmware/ksupport/lib.rs @@ -138,7 +138,7 @@ extern fn rpc_send_async(service: u32, tag: &CSlice, data: *const *const ()) rpc_queue::enqueue(|mut slice| { let length = { let mut writer = Cursor::new(&mut slice[4..]); - rpc_proto::send_args(&mut writer, service, tag.as_ref(), data)?; + rpc_proto::send_args(&mut writer, service, tag.as_ref(), data, true)?; writer.position() }; io::ProtoWrite::write_u32(&mut slice, length as u32) @@ -499,8 +499,8 @@ extern fn subkernel_send_message(id: u32, count: u8, tag: &CSlice, data: *co } #[unwind(allowed)] -extern fn subkernel_await_message(id: u32, timeout: u64, min: u8, max: u8) -> u8 { - send(&SubkernelMsgRecvRequest { id: id, timeout: timeout }); +extern fn subkernel_await_message(id: u32, timeout: u64, tags: &CSlice, min: u8, max: u8) -> u8 { + send(&SubkernelMsgRecvRequest { id: id, timeout: timeout, tags: tags.as_ref() }); recv!(SubkernelMsgRecvReply { status, count } => { match status { SubkernelStatus::NoError => { diff --git a/artiq/firmware/libproto_artiq/kernel_proto.rs b/artiq/firmware/libproto_artiq/kernel_proto.rs index 51e619974..5f7795375 100644 --- a/artiq/firmware/libproto_artiq/kernel_proto.rs +++ b/artiq/firmware/libproto_artiq/kernel_proto.rs @@ -108,7 +108,7 @@ pub enum Message<'a> { SubkernelAwaitFinishRequest { id: u32, timeout: u64 }, SubkernelAwaitFinishReply { status: SubkernelStatus }, SubkernelMsgSend { id: u32, count: u8, tag: &'a [u8], data: *const *const () }, - SubkernelMsgRecvRequest { id: u32, timeout: u64 }, + SubkernelMsgRecvRequest { id: u32, timeout: u64, tags: &'a [u8] }, SubkernelMsgRecvReply { status: SubkernelStatus, count: u8 }, Log(fmt::Arguments<'a>), diff --git a/artiq/firmware/libproto_artiq/rpc_proto.rs b/artiq/firmware/libproto_artiq/rpc_proto.rs index cc567d7fb..80284c39e 100644 --- a/artiq/firmware/libproto_artiq/rpc_proto.rs +++ b/artiq/firmware/libproto_artiq/rpc_proto.rs @@ -190,9 +190,9 @@ unsafe fn recv_value(reader: &mut R, tag: Tag, data: &mut *mut (), } } -pub fn recv_return(reader: &mut R, tag_bytes: &[u8], data: *mut (), +pub fn recv_return<'a, R, E>(reader: &mut R, tag_bytes: &'a [u8], data: *mut (), alloc: &dyn Fn(usize) -> Result<*mut (), E>) - -> Result<(), E> + -> Result<&'a [u8], E> where R: Read + ?Sized, E: From> { @@ -204,14 +204,16 @@ pub fn recv_return(reader: &mut R, tag_bytes: &[u8], data: *mut (), let mut data = data; unsafe { recv_value(reader, tag, &mut data, alloc)? }; - Ok(()) + Ok(it.data) } -unsafe fn send_elements(writer: &mut W, elt_tag: Tag, length: usize, data: *const ()) +unsafe fn send_elements(writer: &mut W, elt_tag: Tag, length: usize, data: *const (), write_tags: bool) -> Result<(), Error> where W: Write + ?Sized { - writer.write_u8(elt_tag.as_u8())?; + if write_tags { + writer.write_u8(elt_tag.as_u8())?; + } match elt_tag { // we cannot use NativeEndian::from_slice_i32 as the data is not mutable, // and that is not needed as the data is already in native endian @@ -230,14 +232,14 @@ unsafe fn send_elements(writer: &mut W, elt_tag: Tag, length: usize, data: *c _ => { let mut data = data; for _ in 0..length { - send_value(writer, elt_tag, &mut data)?; + send_value(writer, elt_tag, &mut data, write_tags)?; } } } Ok(()) } -unsafe fn send_value(writer: &mut W, tag: Tag, data: &mut *const ()) +unsafe fn send_value(writer: &mut W, tag: Tag, data: &mut *const (), write_tags: bool) -> Result<(), Error> where W: Write + ?Sized { @@ -248,8 +250,9 @@ unsafe fn send_value(writer: &mut W, tag: Tag, data: &mut *const ()) $map }) } - - writer.write_u8(tag.as_u8())?; + if write_tags { + writer.write_u8(tag.as_u8())?; + } match tag { Tag::None => Ok(()), Tag::Bool => @@ -269,12 +272,14 @@ unsafe fn send_value(writer: &mut W, tag: Tag, data: &mut *const ()) writer.write_bytes((*ptr).as_ref())), Tag::Tuple(it, arity) => { let mut it = it.clone(); - writer.write_u8(arity)?; + if write_tags { + writer.write_u8(arity)?; + } let mut max_alignment = 0; for _ in 0..arity { let tag = it.next().expect("truncated tag"); max_alignment = core::cmp::max(max_alignment, tag.alignment()); - send_value(writer, tag, data)? + send_value(writer, tag, data, write_tags)? } *data = round_up_const(*data, max_alignment); Ok(()) @@ -286,11 +291,13 @@ unsafe fn send_value(writer: &mut W, tag: Tag, data: &mut *const ()) let length = (**ptr).length as usize; writer.write_u32((**ptr).length)?; let tag = it.clone().next().expect("truncated tag"); - send_elements(writer, tag, length, (**ptr).elements) + send_elements(writer, tag, length, (**ptr).elements, write_tags) }) } Tag::Array(it, num_dims) => { - writer.write_u8(num_dims)?; + if write_tags { + writer.write_u8(num_dims)?; + } consume_value!(*const(), |buffer| { let elt_tag = it.clone().next().expect("truncated tag"); @@ -302,14 +309,14 @@ unsafe fn send_value(writer: &mut W, tag: Tag, data: &mut *const ()) }) } let length = total_len as usize; - send_elements(writer, elt_tag, length, *buffer) + send_elements(writer, elt_tag, length, *buffer, write_tags) }) } Tag::Range(it) => { let tag = it.clone().next().expect("truncated tag"); - send_value(writer, tag, data)?; - send_value(writer, tag, data)?; - send_value(writer, tag, data)?; + send_value(writer, tag, data, write_tags)?; + send_value(writer, tag, data, write_tags)?; + send_value(writer, tag, data, write_tags)?; Ok(()) } Tag::Keyword(it) => { @@ -319,7 +326,7 @@ unsafe fn send_value(writer: &mut W, tag: Tag, data: &mut *const ()) writer.write_string(str::from_utf8((*ptr).name.as_ref()).unwrap())?; let tag = it.clone().next().expect("truncated tag"); let mut data = ptr.offset(1) as *const (); - send_value(writer, tag, &mut data) + send_value(writer, tag, &mut data, write_tags) }) // Tag::Keyword never appears in composite types, so we don't have // to accurately advance data. @@ -333,7 +340,7 @@ unsafe fn send_value(writer: &mut W, tag: Tag, data: &mut *const ()) } } -pub fn send_args(writer: &mut W, service: u32, tag_bytes: &[u8], data: *const *const ()) +pub fn send_args(writer: &mut W, service: u32, tag_bytes: &[u8], data: *const *const (), write_tags: bool) -> Result<(), Error> where W: Write + ?Sized { @@ -350,7 +357,7 @@ pub fn send_args(writer: &mut W, service: u32, tag_bytes: &[u8], data: *const for index in 0.. { if let Some(arg_tag) = args_it.next() { let mut data = unsafe { *data.offset(index) }; - unsafe { send_value(writer, arg_tag, &mut data)? }; + unsafe { send_value(writer, arg_tag, &mut data, write_tags)? }; } else { break } @@ -482,7 +489,7 @@ mod tag { #[derive(Debug, Clone, Copy)] pub struct TagIterator<'a> { - data: &'a [u8] + pub data: &'a [u8] } impl<'a> TagIterator<'a> { diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index e7a11bfc4..7196d2ffc 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -307,8 +307,7 @@ pub mod subkernel { pub struct Message { from_id: u32, - pub tag_count: u8, - pub tag: u8, + pub count: u8, pub data: Vec } @@ -334,9 +333,8 @@ pub mod subkernel { None => unsafe { CURRENT_MESSAGES.insert(id, Message { from_id: id, - tag_count: data[0], - tag: data[1], - data: data[2..length].to_vec() + count: data[0], + data: data[1..length].to_vec() }); } }; @@ -404,7 +402,7 @@ pub mod subkernel { let destination = unsafe { SUBKERNELS.get(&id).unwrap().destination }; // reuse rpc code for sending arbitrary data - rpc::send_args(&mut writer, 0, tag, message)?; + rpc::send_args(&mut writer, 0, tag, message, false)?; // skip service tag, but overwrite first byte with tag count let data = &mut writer.into_inner()[3..]; data[0] = count; diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 4f629f512..471cc20c9 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -502,7 +502,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, None => unexpected!("unexpected RPC in flash kernel"), Some(ref mut stream) => { host_write(stream, host::Reply::RpcRequest { async: async })?; - rpc::send_args(stream, service, tag, data)?; + rpc::send_args(stream, service, tag, data, true)?; if !async { session.kernel_state = KernelState::RpcWait } @@ -608,10 +608,10 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, kern_acknowledge() } #[cfg(has_drtio)] - &kern::SubkernelMsgRecvRequest { id, timeout } => { + &kern::SubkernelMsgRecvRequest { id, timeout, tags } => { let message_received = subkernel::message_await(io, _subkernel_mutex, id, timeout); let (status, count) = match message_received { - Ok(ref message) => (kern::SubkernelStatus::NoError, message.tag_count), + Ok(ref message) => (kern::SubkernelStatus::NoError, message.count), Err(SubkernelError::Timeout) => (kern::SubkernelStatus::Timeout, 0), Err(SubkernelError::IncorrectState) => (kern::SubkernelStatus::IncorrectState, 0), Err(SubkernelError::SubkernelFinished) => { @@ -628,11 +628,11 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, } Err(_) => (kern::SubkernelStatus::OtherError, 0) }; - kern_send(io, &kern::SubkernelMsgRecvReply { status: status, count: count })?; + kern_send(io, &kern::SubkernelMsgRecvReply { status: status, count: count})?; if let Ok(message) = message_received { // receive code almost identical to RPC recv, except we are not reading from a stream let mut reader = Cursor::new(message.data); - let mut tag: [u8; 1] = [message.tag]; + let mut current_tags = tags; let mut i = 0; loop { // kernel has to consume all arguments in the whole message @@ -643,7 +643,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, "expected root value slot from kernel CPU, not {:?}", other) } })?; - let res = rpc::recv_return(&mut reader, &tag, slot, &|size| -> Result<_, Error> { + let res = rpc::recv_return(&mut reader, current_tags, slot, &|size| -> Result<_, Error> { if size == 0 { return Ok(0 as *mut ()) } @@ -657,17 +657,19 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, })?) }); match res { - Ok(_) => kern_send(io, &kern::RpcRecvReply(Ok(0)))?, + Ok(new_tags) => { + kern_send(io, &kern::RpcRecvReply(Ok(0)))?; + i += 1; + if i < message.count { + // update the tag for next read + current_tags = new_tags; + } else { + // should be done by then + break; + } + }, Err(_) => unexpected!("expected valid subkernel message data") }; - i += 1; - if i < message.tag_count { - // update the tag for next read - tag[0] = reader.read_u8()?; - } else { - // should be done by then - break; - } } Ok(()) } else { diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index cc401a134..822a17b25 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -6,7 +6,7 @@ use board_artiq::{mailbox, spi}; use board_misoc::{csr, clock, i2c}; use proto_artiq::{kernel_proto as kern, session_proto::Reply::KernelException as HostKernelException, rpc_proto as rpc}; use eh::eh_artiq; -use io::{Cursor, ProtoRead}; +use io::Cursor; use kernel::eh_artiq::StackPointerBacktrace; use ::{cricon_select, RtioMaster}; @@ -52,12 +52,12 @@ mod kernel_cpu { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] enum KernelState { Absent, Loaded, Running, - MsgAwait { max_time: u64 }, + MsgAwait { max_time: u64, tags: Vec }, MsgSending } @@ -99,7 +99,6 @@ pub struct Sliceable { /* represents interkernel messages */ struct Message { count: u8, - tag: u8, data: Vec } @@ -201,8 +200,7 @@ impl MessageManager { None => { self.in_buffer = Some(Message { count: data[0], - tag: data[1], - data: data[2..length].to_vec() + data: data[1..length].to_vec() }); } }; @@ -264,7 +262,7 @@ impl MessageManager { pub fn accept_outgoing(&mut self, count: u8, tag: &[u8], data: *const *const ()) -> Result<(), Error> { let mut writer = Cursor::new(Vec::new()); - rpc::send_args(&mut writer, 0, tag, data)?; + rpc::send_args(&mut writer, 0, tag, data, false)?; // skip service tag, but write the count let mut data = writer.into_inner().split_off(3); data[0] = count; @@ -507,17 +505,18 @@ impl Manager { } fn process_external_messages(&mut self) -> Result<(), Error> { - match self.session.kernel_state { - KernelState::MsgAwait { max_time } => { - if clock::get_ms() > max_time { + match &self.session.kernel_state { + KernelState::MsgAwait { max_time, tags } => { + if clock::get_ms() > *max_time { kern_send(&kern::SubkernelMsgRecvReply { status: kern::SubkernelStatus::Timeout, count: 0 })?; self.session.kernel_state = KernelState::Running; return Ok(()) } if let Some(message) = self.session.messages.get_incoming() { kern_send(&kern::SubkernelMsgRecvReply { status: kern::SubkernelStatus::NoError, count: message.count })?; + let tags = tags.clone(); self.session.kernel_state = KernelState::Running; - pass_message_to_kernel(&message) + pass_message_to_kernel(&message, &tags) } else { Err(Error::AwaitingMessage) } @@ -538,7 +537,7 @@ impl Manager { // returns Ok(with_exception) on finish // None if the kernel is still running kern_recv(|request| { - match (request, self.session.kernel_state) { + match (request, &self.session.kernel_state) { (&kern::LoadReply(_), KernelState::Loaded) => { // We're standing by; ignore the message. return Ok(None) @@ -611,9 +610,9 @@ impl Manager { Ok(()) } - &kern::SubkernelMsgRecvRequest { id: _, timeout } => { + &kern::SubkernelMsgRecvRequest { id: _, timeout, tags } => { let max_time = clock::get_ms() + timeout as u64; - self.session.kernel_state = KernelState::MsgAwait { max_time: max_time }; + self.session.kernel_state = KernelState::MsgAwait { max_time: max_time, tags: tags.to_vec() }; Ok(()) }, @@ -695,10 +694,9 @@ fn slice_kernel_exception(exceptions: &[Option], } } -fn pass_message_to_kernel(message: &Message) -> Result<(), Error> { +fn pass_message_to_kernel(message: &Message, tags: &[u8]) -> Result<(), Error> { let mut reader = Cursor::new(&message.data); - let mut tag: [u8; 1] = [message.tag]; - let count = message.count; + let mut current_tags = tags; let mut i = 0; loop { let slot = kern_recv_w_timeout(100, |reply| { @@ -712,8 +710,7 @@ fn pass_message_to_kernel(message: &Message) -> Result<(), Error> { "expected root value slot from kernel CPU, not {:?}", other) } })?; - - let res = rpc::recv_return(&mut reader, &tag, slot, &|size| -> Result<_, Error> { + let res = rpc::recv_return(&mut reader, current_tags, slot, &|size| -> Result<_, Error> { if size == 0 { return Ok(0 as *mut ()) } @@ -735,17 +732,19 @@ fn pass_message_to_kernel(message: &Message) -> Result<(), Error> { })?) }); match res { - Ok(_) => kern_send(&kern::RpcRecvReply(Ok(0)))?, + Ok(new_tags) => { + kern_send(&kern::RpcRecvReply(Ok(0)))?; + i += 1; + if i < message.count { + // update the tag for next read + current_tags = new_tags; + } else { + // should be done by then + break; + } + }, Err(_) => unexpected!("expected valid subkernel message data") }; - i += 1; - if i < count { - // update the tag for next read - tag[0] = reader.read_u8()?; - } else { - // should be done by then - break; - } } Ok(()) } diff --git a/artiq/test/lit/embedding/subkernel_return.py b/artiq/test/lit/embedding/subkernel_return.py index 4845e24ba..2f498f75e 100644 --- a/artiq/test/lit/embedding/subkernel_return.py +++ b/artiq/test/lit/embedding/subkernel_return.py @@ -9,13 +9,13 @@ def entrypoint(): # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. returning() - # CHECK: call i8 @subkernel_await_message\(i32 1, i64 10000, i8 1, i8 1\), !dbg !. + # CHECK: call i8 @subkernel_await_message\(i32 1, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !. # CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !. subkernel_await(returning) # CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr # CHECK-NOT-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr -# CHECK-L: declare i8 @subkernel_await_message(i32, i64, i8, i8) local_unnamed_addr +# CHECK-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr # CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr @subkernel(destination=1) def returning() -> TInt32: diff --git a/artiq/test/lit/embedding/subkernel_return_none.py b/artiq/test/lit/embedding/subkernel_return_none.py index 353b15c3e..f4e8a4508 100644 --- a/artiq/test/lit/embedding/subkernel_return_none.py +++ b/artiq/test/lit/embedding/subkernel_return_none.py @@ -10,13 +10,13 @@ def entrypoint(): # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. returning_none() # CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !. - # CHECK-NOT: call void @subkernel_await_message\(i32 1, i64 10000\), !dbg !. + # CHECK-NOT: call i8 @subkernel_await_message\(i32 1, i64 10000\, .*\), !dbg !. subkernel_await(returning_none) # CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr # CHECK-NOT-L: declare void @subkernel_send_message(i32, { i8*, i32 }*, i8**) local_unnamed_addr # CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr -# CHECK-NOT-L: declare void @subkernel_await_message(i32, i64) local_unnamed_addr +# CHECK-NOT-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr @subkernel(destination=1) def returning_none() -> TNone: pass From e480bbe8d8c24e9c881d102917ccd95f558320b1 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Wed, 18 Oct 2023 13:02:54 +0800 Subject: [PATCH 023/296] artiq_ddb_template: move satellite_cpu_target to core --- artiq/coredevice/core.py | 6 ++++-- artiq/frontend/artiq_ddb_template.py | 8 +++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index f5a872335..ca9123bee 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -84,9 +84,11 @@ class Core: "core", "ref_period", "coarse_ref_period", "ref_multiplier", } - def __init__(self, dmgr, host, ref_period, ref_multiplier=8, target="rv32g"): + def __init__(self, dmgr, host, ref_period, ref_multiplier=8, + target="rv32g", satellite_cpu_targets={}): self.ref_period = ref_period self.ref_multiplier = ref_multiplier + self.satellite_cpu_targets = satellite_cpu_targets self.target_cls = get_target_cls(target) self.coarse_ref_period = ref_period*ref_multiplier if host is None: @@ -159,7 +161,7 @@ class Core: if subkernel_args[0][0] == 'self': self_arg = args[:1] destination = subkernel_fn.artiq_embedded.destination - destination_tgt = self.dmgr.ddb.get_satellite_cpu_target(destination) + destination_tgt = self.satellite_cpu_targets[destination] target = get_target_cls(destination_tgt)(subkernel_id=sid) object_map, kernel_library, _, _, _ = \ self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False, diff --git a/artiq/frontend/artiq_ddb_template.py b/artiq/frontend/artiq_ddb_template.py index a3272b5d5..394252911 100755 --- a/artiq/frontend/artiq_ddb_template.py +++ b/artiq/frontend/artiq_ddb_template.py @@ -34,7 +34,7 @@ def process_header(output, description): "type": "local", "module": "artiq.coredevice.core", "class": "Core", - "arguments": {{"host": core_addr, "ref_period": {ref_period}, "target": "{cpu_target}"}}, + "arguments": {{"host": core_addr, "ref_period": {ref_period}, "target": "{cpu_target}", "satellite_cpu_targets": {{}} }}, }}, "core_log": {{ "type": "controller", @@ -60,8 +60,6 @@ def process_header(output, description): "class": "CoreDMA" }}, - "satellite_cpu_targets": {{}}, - "i2c_switch0": {{ "type": "local", "module": "artiq.coredevice.i2c", @@ -760,7 +758,7 @@ def process(output, primary_description, satellites): print(textwrap.dedent(""" # DEST#{dest} peripherals - device_db["satellite_cpu_targets"][{dest}] = \"{target}\"""").format( + device_db["core"]["arguments"]["satellite_cpu_targets"][{dest}] = \"{target}\"""").format( dest=destination, target=get_cpu_target(description)), file=output) @@ -773,7 +771,7 @@ def process(output, primary_description, satellites): print(textwrap.dedent(""" # DEST#{dest} peripherals - device_db["satellite_cpu_targets"][{dest}] = \"{target}\"""").format( + device_db["core"]["arguments"]["satellite_cpu_targets"][{dest}] = \"{target}\"""").format( dest=peripheral["drtio_destination"], target=get_cpu_target(peripheral)), file=output) From 93c9d8bcdfa4eb75817f77a1dc307ac803c1687f Mon Sep 17 00:00:00 2001 From: linuswck Date: Thu, 12 Oct 2023 10:33:06 +0800 Subject: [PATCH 024/296] artiq_ddb_template:set default Shuttler drtio_dest - remove default Shuttler "drtio_destination" value in jsonschema - set the default Shuttler "drtio_destination" value according to board "target" and "hw_rev" --- artiq/coredevice/coredevice_generic.schema.json | 3 +-- artiq/frontend/artiq_ddb_template.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/artiq/coredevice/coredevice_generic.schema.json b/artiq/coredevice/coredevice_generic.schema.json index f12f1d9b0..bf79bb976 100644 --- a/artiq/coredevice/coredevice_generic.schema.json +++ b/artiq/coredevice/coredevice_generic.schema.json @@ -630,8 +630,7 @@ "maxItems": 2 }, "drtio_destination": { - "type": "integer", - "default": 4 + "type": "integer" } }, "required": ["ports"] diff --git a/artiq/frontend/artiq_ddb_template.py b/artiq/frontend/artiq_ddb_template.py index 394252911..5038563d6 100755 --- a/artiq/frontend/artiq_ddb_template.py +++ b/artiq/frontend/artiq_ddb_template.py @@ -767,7 +767,17 @@ def process(output, primary_description, satellites): n_channels = pm.process(rtio_offset, peripheral) rtio_offset += n_channels - for peripheral in drtio_peripherals: + for i, peripheral in enumerate(drtio_peripherals): + if not("drtio_destination" in peripheral): + if primary_description["target"] == "kasli": + if primary_description["hw_rev"] in ("v1.0", "v1.1"): + peripheral["drtio_destination"] = 3 + i + else: + peripheral["drtio_destination"] = 4 + i + elif primary_description["target"] == "kasli_soc": + peripheral["drtio_destination"] = 5 + i + else: + raise NotImplementedError print(textwrap.dedent(""" # DEST#{dest} peripherals From 9c68451cae8fb27f76826ffe1465f05ac6c05fcf Mon Sep 17 00:00:00 2001 From: jfniedermeyer Date: Thu, 12 Oct 2023 18:35:25 -0600 Subject: [PATCH 025/296] Add hotkeys to organize experiments in dashboard Signed-off-by: jfniedermeyer --- RELEASE_NOTES.rst | 3 +++ artiq/frontend/artiq_dashboard.py | 12 ++++++++++++ doc/manual/faq.rst | 10 ++++++++++ 3 files changed, 25 insertions(+) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index 81edc5100..49006b8bf 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -38,6 +38,9 @@ Highlights: - The "Close all applets" command (shortcut: Ctrl-Alt-W) now ignores docked applets, making it a convenient way to clean up after exploratory work without destroying a carefully arranged default workspace. + - Hotkeys now organize experiment windows in the order they were last interacted with: + + CTRL+SHIFT+T tiles experiment windows + + CTRL+SHIFT+C cascades experiment windows * Persistent datasets are now stored in a LMDB database for improved performance. PYON databases can be converted with the script below. diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 8bcf9546c..b1a7e39b0 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -85,6 +85,18 @@ class MdiArea(QtWidgets.QMdiArea): self.pixmap = QtGui.QPixmap(os.path.join( artiq_dir, "gui", "logo_ver.svg")) + self.setActivationOrder(self.ActivationHistoryOrder) + + self.tile = QtWidgets.QShortcut( + QtGui.QKeySequence('Ctrl+Shift+T'), self) + self.tile.activated.connect( + lambda: self.tileSubWindows()) + + self.cascade = QtWidgets.QShortcut( + QtGui.QKeySequence('Ctrl+Shift+C'), self) + self.cascade.activated.connect( + lambda: self.cascadeSubWindows()) + def paintEvent(self, event): QtWidgets.QMdiArea.paintEvent(self, event) painter = QtGui.QPainter(self.viewport()) diff --git a/doc/manual/faq.rst b/doc/manual/faq.rst index bd5d4ac89..2e48f9e5c 100644 --- a/doc/manual/faq.rst +++ b/doc/manual/faq.rst @@ -27,6 +27,16 @@ organize datasets in folders? Use the dot (".") in dataset names to separate folders. The GUI will automatically create and delete folders in the dataset tree display. +organize experiment windows in the dashboard? +--------------------------------------------- + +Experiment windows can be organized by using the following hotkeys: + +* CTRL+SHIFT+T to tile experiment windows +* CTRL+SHIFT+C to cascade experiment windows + +The windows will be organized in the order they were last interacted with. + write a generator feeding a kernel feeding an analyze function? --------------------------------------------------------------- From 378dd0e5caf3a2afcb5ffe85f91fcdb68b9359f2 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Mon, 30 Oct 2023 14:09:31 +0800 Subject: [PATCH 026/296] flake: fix and upgrade wavedrom (closes #2266) Signed-off-by: Florian Agbuya --- flake.nix | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/flake.nix b/flake.nix index ae95ee250..0f3fc0a45 100644 --- a/flake.nix +++ b/flake.nix @@ -341,12 +341,13 @@ sphinxcontrib-wavedrom = pkgs.python3Packages.buildPythonPackage rec { pname = "sphinxcontrib-wavedrom"; - version = "3.0.2"; + version = "3.0.4"; + format = "pyproject"; src = pkgs.python3Packages.fetchPypi { inherit pname version; - sha256 = "sha256-ukZd3ajt0Sx3LByof4R80S31F5t1yo+L8QUADrMMm2A="; + sha256 = "sha256-0zTHVBr9kXwMEo4VRTFsxdX2HI31DxdHfLUHCQmw1Ko="; }; - buildInputs = [ pkgs.python3Packages.setuptools_scm ]; + nativeBuildInputs = [ pkgs.python3Packages.setuptools-scm ]; propagatedBuildInputs = (with pkgs.python3Packages; [ wavedrom sphinx xcffib cairosvg ]); }; latex-artiq-manual = pkgs.texlive.combine { From ce80bf57174b9d354801885dfba57dcf06243605 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Tue, 7 Nov 2023 13:40:17 +0800 Subject: [PATCH 027/296] flake: update dependencies --- flake.lock | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/flake.lock b/flake.lock index 284ffa1ae..031fde710 100644 --- a/flake.lock +++ b/flake.lock @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1697226376, - "narHash": "sha256-cumLLb1QOUtWieUnLGqo+ylNt3+fU8Lcv5Zl+tYbRUE=", + "lastModified": 1699169573, + "narHash": "sha256-cvUb1xZkvOp3W2SzylStrTirhVd9zCeo5utJl9nSIhw=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "898cb2064b6e98b8c5499f37e81adbdf2925f7c5", + "rev": "aeefe2054617cae501809b82b44a8e8f7be7cc4b", "type": "github" }, "original": { @@ -108,11 +108,11 @@ "src-migen": { "flake": false, "locked": { - "lastModified": 1697013661, - "narHash": "sha256-qNCqgWyE4vTDmyjE2XMJqW1djuBxT25A36AzQfZqluU=", + "lastModified": 1699335478, + "narHash": "sha256-BsubN4Mfdj02QPK6ZCrl+YOaSg7DaLQdSCVP49ztWik=", "owner": "m-labs", "repo": "migen", - "rev": "aadc19df93b7aa9ca761aaebbb98a11e0cf2d7c7", + "rev": "fd0bf5855a1367eab14b0d6f7f8266178e25d78e", "type": "github" }, "original": { @@ -124,11 +124,11 @@ "src-misoc": { "flake": false, "locked": { - "lastModified": 1693709836, - "narHash": "sha256-YiCk05RYLzZu1CYkQ2r7XtjwVEqkUGTQn388uOls9tI=", + "lastModified": 1699334718, + "narHash": "sha256-ccJnbIJ9si2QXvdW0wGvEK8kaaencfPbYaO7rME1UBY=", "ref": "refs/heads/master", - "rev": "58dc4ee60d165ce9145cf3d904241fc154b6407f", - "revCount": 2448, + "rev": "3cbc746cbd2a6125b8e48a2dc1810e17ba39f885", + "revCount": 2450, "submodules": true, "type": "git", "url": "https://github.com/m-labs/misoc.git" From bb0b8a6c002d00f1d727bdbe02279b2c4152ec92 Mon Sep 17 00:00:00 2001 From: linuswck Date: Fri, 3 Nov 2023 10:24:54 +0800 Subject: [PATCH 028/296] kasli: Correct the GTP TX clock path during init - TXOUT must be fed back into TXUSRCLK during initialization - Now, MMCM Clock Input is switched before GTP TX Init is started instead of after GTP TX Init is done - Reset in Sys Clock domain is kept asserted when clock is switched and GTP TX Init is NOT done --- artiq/firmware/runtime/rtio_clocking.rs | 2 +- artiq/firmware/satman/main.rs | 2 +- artiq/gateware/drtio/transceiver/gtp_7series.py | 8 ++++---- artiq/gateware/drtio/transceiver/gtp_7series_init.py | 4 ++-- artiq/gateware/targets/kasli.py | 6 ++++-- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/artiq/firmware/runtime/rtio_clocking.rs b/artiq/firmware/runtime/rtio_clocking.rs index edb1b74e8..3f11da950 100644 --- a/artiq/firmware/runtime/rtio_clocking.rs +++ b/artiq/firmware/runtime/rtio_clocking.rs @@ -255,7 +255,7 @@ pub fn init() { }; if switched == 0 { info!("Switching sys clock, rebooting..."); - clock::spin_us(500); // delay for clean UART log + clock::spin_us(3000); // delay for clean UART log unsafe { // clock switch and reboot will begin after TX is initialized // and TX will be initialized after this diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 4ca8c308c..f844abd1e 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -571,7 +571,7 @@ fn sysclk_setup() { si5324::setup(&SI5324_SETTINGS, si5324::Input::Ckin1).expect("cannot initialize Si5324"); info!("Switching sys clock, rebooting..."); // delay for clean UART log, wait until UART FIFO is empty - clock::spin_us(1300); + clock::spin_us(3000); unsafe { csr::gt_drtio::stable_clkin_write(1); } diff --git a/artiq/gateware/drtio/transceiver/gtp_7series.py b/artiq/gateware/drtio/transceiver/gtp_7series.py index 75c31a486..a5c0d0c00 100644 --- a/artiq/gateware/drtio/transceiver/gtp_7series.py +++ b/artiq/gateware/drtio/transceiver/gtp_7series.py @@ -18,7 +18,7 @@ class GTPSingle(Module): # # # - self.stable_clkin = Signal() + self.clk_path_ready = Signal() self.txenable = Signal() self.submodules.encoder = encoder = Encoder(2, True) self.submodules.decoders = decoders = [ClockDomainsRenamer("rtio_rx")( @@ -40,7 +40,7 @@ class GTPSingle(Module): self.submodules += rx_init self.comb += [ - tx_init.stable_clkin.eq(self.stable_clkin), + tx_init.clk_path_ready.eq(self.clk_path_ready), qpll_channel.reset.eq(tx_init.pllreset), tx_init.plllock.eq(qpll_channel.lock) ] @@ -715,7 +715,7 @@ class GTP(Module, TransceiverInterface): def __init__(self, qpll_channel, data_pads, sys_clk_freq, rtio_clk_freq, master=0): self.nchannels = nchannels = len(data_pads) self.gtps = [] - + self.clk_path_ready = Signal() # # # channel_interfaces = [] @@ -736,7 +736,7 @@ class GTP(Module, TransceiverInterface): TransceiverInterface.__init__(self, channel_interfaces) for n, gtp in enumerate(self.gtps): self.comb += [ - gtp.stable_clkin.eq(self.stable_clkin.storage), + gtp.clk_path_ready.eq(self.clk_path_ready), gtp.txenable.eq(self.txenable.storage[n]) ] diff --git a/artiq/gateware/drtio/transceiver/gtp_7series_init.py b/artiq/gateware/drtio/transceiver/gtp_7series_init.py index 8916c2c36..d7d86c459 100644 --- a/artiq/gateware/drtio/transceiver/gtp_7series_init.py +++ b/artiq/gateware/drtio/transceiver/gtp_7series_init.py @@ -10,7 +10,7 @@ __all__ = ["GTPTXInit", "GTPRXInit"] class GTPTXInit(Module): def __init__(self, sys_clk_freq, mode="single"): - self.stable_clkin = Signal() + self.clk_path_ready = Signal() self.done = Signal() self.restart = Signal() @@ -87,7 +87,7 @@ class GTPTXInit(Module): startup_fsm.act("PLL_RESET", self.pllreset.eq(1), pll_reset_timer.wait.eq(1), - If(pll_reset_timer.done & self.stable_clkin, + If(pll_reset_timer.done & self.clk_path_ready, NextState("GTP_RESET") ) ) diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py index ca3364398..baf75b6bf 100755 --- a/artiq/gateware/targets/kasli.py +++ b/artiq/gateware/targets/kasli.py @@ -326,7 +326,8 @@ class MasterBase(MiniSoC, AMPSoC): txout_buf = Signal() self.specials += Instance("BUFG", i_I=gtp.txoutclk, o_O=txout_buf) - self.crg.configure(txout_buf, clk_sw=gtp.tx_init.done) + self.crg.configure(txout_buf, clk_sw=self.gt_drtio.stable_clkin.storage, ext_async_rst=self.crg.clk_sw_fsm.o_clk_sw & ~gtp.tx_init.done) + self.specials += MultiReg(self.crg.clk_sw_fsm.o_clk_sw & self.crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap") platform.add_period_constraint(gtp.txoutclk, rtio_clk_period) platform.add_period_constraint(gtp.rxoutclk, rtio_clk_period) @@ -596,7 +597,8 @@ class SatelliteBase(BaseSoC, AMPSoC): gtp = self.gt_drtio.gtps[0] txout_buf = Signal() self.specials += Instance("BUFG", i_I=gtp.txoutclk, o_O=txout_buf) - self.crg.configure(txout_buf, clk_sw=gtp.tx_init.done) + self.crg.configure(txout_buf, clk_sw=self.gt_drtio.stable_clkin.storage, ext_async_rst=self.crg.clk_sw_fsm.o_clk_sw & ~gtp.tx_init.done) + self.specials += MultiReg(self.crg.clk_sw_fsm.o_clk_sw & self.crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap") platform.add_period_constraint(gtp.txoutclk, rtio_clk_period) platform.add_period_constraint(gtp.rxoutclk, rtio_clk_period) From b3c0d084d40ba6bd2cde0bcd42f33b16a8fb6bae Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 26 Oct 2023 16:43:11 +0800 Subject: [PATCH 029/296] drtio: better control state of bigger payloads --- .../firmware/libproto_artiq/drtioaux_proto.rs | 70 +++++++++++++++---- artiq/firmware/runtime/kernel.rs | 6 +- artiq/firmware/runtime/rtio_mgt.rs | 25 ++++--- artiq/firmware/satman/dma.rs | 8 ++- artiq/firmware/satman/kernel.rs | 40 +++++++---- artiq/firmware/satman/main.rs | 18 ++--- 6 files changed, 112 insertions(+), 55 deletions(-) diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index 6ca4230eb..c0333b18c 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -20,6 +20,46 @@ pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet I // used by DDMA, subkernel program data (need to provide extra ID and destination) pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*destination*/1 - /*ID*/4; +#[derive(PartialEq, Clone, Copy, Debug)] +#[repr(u8)] +pub enum PayloadStatus { + Middle = 0, + First = 1, + Last = 2, + FirstAndLast = 3, +} + +impl From for PayloadStatus { + fn from(value: u8) -> PayloadStatus { + match value { + 0 => PayloadStatus::Middle, + 1 => PayloadStatus::First, + 2 => PayloadStatus::Last, + 3 => PayloadStatus::FirstAndLast, + _ => unreachable!(), + } + } +} + +impl PayloadStatus { + pub fn is_first(self) -> bool { + self == PayloadStatus::First || self == PayloadStatus::FirstAndLast + } + + pub fn is_last(self) -> bool { + self == PayloadStatus::Last || self == PayloadStatus::FirstAndLast + } + + pub fn from_status(first: bool, last: bool) -> PayloadStatus { + match (first, last) { + (true, true) => PayloadStatus::FirstAndLast, + (true, false) => PayloadStatus::First, + (false, true) => PayloadStatus::Last, + (false, false) => PayloadStatus::Middle + } + } +} + #[derive(PartialEq, Debug)] pub enum Packet { EchoRequest, @@ -66,7 +106,7 @@ pub enum Packet { AnalyzerDataRequest { destination: u8 }, AnalyzerData { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE]}, - DmaAddTraceRequest { destination: u8, id: u32, last: bool, length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE] }, + DmaAddTraceRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE] }, DmaAddTraceReply { succeeded: bool }, DmaRemoveTraceRequest { destination: u8, id: u32 }, DmaRemoveTraceReply { succeeded: bool }, @@ -74,14 +114,14 @@ pub enum Packet { DmaPlaybackReply { succeeded: bool }, DmaPlaybackStatus { destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 }, - SubkernelAddDataRequest { destination: u8, id: u32, last: bool, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, + SubkernelAddDataRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, SubkernelAddDataReply { succeeded: bool }, SubkernelLoadRunRequest { destination: u8, id: u32, run: bool }, SubkernelLoadRunReply { succeeded: bool }, SubkernelFinished { id: u32, with_exception: bool }, SubkernelExceptionRequest { destination: u8 }, SubkernelException { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE] }, - SubkernelMessage { destination: u8, id: u32, last: bool, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, + SubkernelMessage { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, SubkernelMessageAck { destination: u8 }, } @@ -240,14 +280,14 @@ impl Packet { 0xb0 => { let destination = reader.read_u8()?; let id = reader.read_u32()?; - let last = reader.read_bool()?; + let status = reader.read_u8()?; let length = reader.read_u16()?; let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; reader.read_exact(&mut trace[0..length as usize])?; Packet::DmaAddTraceRequest { destination: destination, id: id, - last: last, + status: PayloadStatus::from(status), length: length as u16, trace: trace, } @@ -281,14 +321,14 @@ impl Packet { 0xc0 => { let destination = reader.read_u8()?; let id = reader.read_u32()?; - let last = reader.read_bool()?; + let status = reader.read_u8()?; let length = reader.read_u16()?; let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; reader.read_exact(&mut data[0..length as usize])?; Packet::SubkernelAddDataRequest { destination: destination, id: id, - last: last, + status: PayloadStatus::from(status), length: length as u16, data: data, } @@ -325,14 +365,14 @@ impl Packet { 0xcb => { let destination = reader.read_u8()?; let id = reader.read_u32()?; - let last = reader.read_bool()?; + let status = reader.read_u8()?; let length = reader.read_u16()?; let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; reader.read_exact(&mut data[0..length as usize])?; Packet::SubkernelMessage { destination: destination, id: id, - last: last, + status: PayloadStatus::from(status), length: length as u16, data: data, } @@ -521,11 +561,11 @@ impl Packet { writer.write_all(&data[0..length as usize])?; }, - Packet::DmaAddTraceRequest { destination, id, last, trace, length } => { + Packet::DmaAddTraceRequest { destination, id, status, trace, length } => { writer.write_u8(0xb0)?; writer.write_u8(destination)?; writer.write_u32(id)?; - writer.write_bool(last)?; + writer.write_u8(status as u8)?; // trace may be broken down to fit within drtio aux memory limit // will be reconstructed by satellite writer.write_u16(length)?; @@ -563,11 +603,11 @@ impl Packet { writer.write_u64(timestamp)?; }, - Packet::SubkernelAddDataRequest { destination, id, last, data, length } => { + Packet::SubkernelAddDataRequest { destination, id, status, data, length } => { writer.write_u8(0xc0)?; writer.write_u8(destination)?; writer.write_u32(id)?; - writer.write_bool(last)?; + writer.write_u8(status as u8)?; writer.write_u16(length)?; writer.write_all(&data[0..length as usize])?; }, @@ -600,11 +640,11 @@ impl Packet { writer.write_u16(length)?; writer.write_all(&data[0..length as usize])?; }, - Packet::SubkernelMessage { destination, id, last, data, length } => { + Packet::SubkernelMessage { destination, id, status, data, length } => { writer.write_u8(0xcb)?; writer.write_u8(destination)?; writer.write_u32(id)?; - writer.write_bool(last)?; + writer.write_u8(status as u8)?; writer.write_u16(length)?; writer.write_all(&data[0..length as usize])?; }, diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index 7196d2ffc..8bf46451f 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -95,7 +95,7 @@ pub mod subkernel { use core::str; use board_artiq::drtio_routing::RoutingTable; use board_misoc::clock; - use proto_artiq::{drtioaux_proto::MASTER_PAYLOAD_MAX_SIZE, rpc_proto as rpc}; + use proto_artiq::{drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE}, rpc_proto as rpc}; use io::Cursor; use rtio_mgt::drtio; use sched::{Io, Mutex, Error as SchedError}; @@ -317,7 +317,7 @@ pub mod subkernel { static mut CURRENT_MESSAGES: BTreeMap = BTreeMap::new(); pub fn message_handle_incoming(io: &Io, subkernel_mutex: &Mutex, - id: u32, last: bool, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { + id: u32, status: PayloadStatus, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { // called when receiving a message from satellite let _lock = match subkernel_mutex.lock(io) { Ok(lock) => lock, @@ -338,7 +338,7 @@ pub mod subkernel { }); } }; - if last { + if status.is_last() { unsafe { // when done, remove from working queue MESSAGE_QUEUE.push(CURRENT_MESSAGES.remove(&id).unwrap()); diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index ba6ba3865..a03737178 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -17,7 +17,7 @@ pub mod drtio { use super::*; use alloc::vec::Vec; use drtioaux; - use proto_artiq::drtioaux_proto::MASTER_PAYLOAD_MAX_SIZE; + use proto_artiq::drtioaux_proto::{MASTER_PAYLOAD_MAX_SIZE, PayloadStatus}; use rtio_dma::remote_dma; #[cfg(has_rtio_analyzer)] use analyzer::remote_analyzer::RemoteBuffer; @@ -75,8 +75,8 @@ pub mod drtio { subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception); None }, - drtioaux::Packet::SubkernelMessage { id, destination: from, last, length, data } => { - subkernel::message_handle_incoming(io, subkernel_mutex, id, last, length as usize, &data); + drtioaux::Packet::SubkernelMessage { id, destination: from, status, length, data } => { + subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data); // acknowledge receiving part of the message drtioaux::send(linkno, &drtioaux::Packet::SubkernelMessageAck { destination: from } @@ -391,15 +391,18 @@ pub mod drtio { } fn partition_data(data: &[u8], send_f: F) -> Result<(), &'static str> - where F: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], bool, usize) -> Result<(), &'static str> { + where F: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], PayloadStatus, usize) -> Result<(), &'static str> { let mut i = 0; + let mut first = true; while i < data.len() { let mut slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; let len: usize = if i + MASTER_PAYLOAD_MAX_SIZE < data.len() { MASTER_PAYLOAD_MAX_SIZE } else { data.len() - i } as usize; let last = i + len == data.len(); + let status = PayloadStatus::from_status(first, last); slice[..len].clone_from_slice(&data[i..i+len]); i += len; - send_f(&slice, last, len)?; + send_f(&slice, status, len)?; + first = false; } Ok(()) } @@ -408,10 +411,10 @@ pub mod drtio { routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, trace: &[u8]) -> Result<(), &'static str> { let linkno = routing_table.0[destination as usize][0] - 1; - partition_data(trace, |slice, last, len: usize| { + partition_data(trace, |slice, status, len: usize| { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::DmaAddTraceRequest { - id: id, destination: destination, last: last, length: len as u16, trace: *slice}); + id: id, destination: destination, status: status, length: len as u16, trace: *slice}); match reply { Ok(drtioaux::Packet::DmaAddTraceReply { succeeded: true }) => Ok(()), Ok(drtioaux::Packet::DmaAddTraceReply { succeeded: false }) => @@ -504,10 +507,10 @@ pub mod drtio { pub fn subkernel_upload(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, data: &Vec) -> Result<(), &'static str> { let linkno = routing_table.0[destination as usize][0] - 1; - partition_data(data, |slice, last, len: usize| { + partition_data(data, |slice, status, len: usize| { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SubkernelAddDataRequest { - id: id, destination: destination, last: last, length: len as u16, data: *slice}); + id: id, destination: destination, status: status, length: len as u16, data: *slice}); match reply { Ok(drtioaux::Packet::SubkernelAddDataReply { succeeded: true }) => Ok(()), Ok(drtioaux::Packet::SubkernelAddDataReply { succeeded: false }) => @@ -557,10 +560,10 @@ pub mod drtio { routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, message: &[u8] ) -> Result<(), &'static str> { let linkno = routing_table.0[destination as usize][0] - 1; - partition_data(message, |slice, last, len: usize| { + partition_data(message, |slice, status, len: usize| { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SubkernelMessage { - destination: destination, id: id, last: last, length: len as u16, data: *slice}); + destination: destination, id: id, status: status, length: len as u16, data: *slice}); match reply { Ok(drtioaux::Packet::SubkernelMessageAck { .. }) => Ok(()), Ok(_) => Err("sending message to subkernel failed, unexpected aux packet"), diff --git a/artiq/firmware/satman/dma.rs b/artiq/firmware/satman/dma.rs index 34dcaf475..277e27927 100644 --- a/artiq/firmware/satman/dma.rs +++ b/artiq/firmware/satman/dma.rs @@ -1,4 +1,5 @@ use board_misoc::{csr, cache::flush_l2_cache}; +use proto_artiq::drtioaux_proto::PayloadStatus; use alloc::{vec::Vec, collections::btree_map::BTreeMap}; use ::{cricon_select, RtioMaster}; @@ -51,7 +52,10 @@ impl Manager { } } - pub fn add(&mut self, id: u32, last: bool, trace: &[u8], trace_len: usize) -> Result<(), Error> { + pub fn add(&mut self, id: u32, status: PayloadStatus, trace: &[u8], trace_len: usize) -> Result<(), Error> { + if status.is_first() { + self.entries.remove(&id); + } let entry = match self.entries.get_mut(&id) { Some(entry) => { if entry.complete { @@ -76,7 +80,7 @@ impl Manager { }; entry.trace.extend(&trace[0..trace_len]); - if last { + if status.is_last() { entry.trace.push(0); let data_len = entry.trace.len(); diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index 822a17b25..850a126a0 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -4,7 +4,11 @@ use cslice::AsCSlice; use board_artiq::{mailbox, spi}; use board_misoc::{csr, clock, i2c}; -use proto_artiq::{kernel_proto as kern, session_proto::Reply::KernelException as HostKernelException, rpc_proto as rpc}; +use proto_artiq::{ + drtioaux_proto::PayloadStatus, + kernel_proto as kern, + session_proto::Reply::KernelException as HostKernelException, + rpc_proto as rpc}; use eh::eh_artiq; use io::Cursor; use kernel::eh_artiq::StackPointerBacktrace; @@ -148,24 +152,22 @@ pub struct SubkernelFinished { pub struct SliceMeta { pub len: u16, - pub last: bool + pub status: PayloadStatus } macro_rules! get_slice_fn { ( $name:tt, $size:expr ) => { pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta { - if self.data.len() == 0 { - return SliceMeta { len: 0, last: true }; - } + let first = self.it == 0; let len = min($size, self.data.len() - self.it); let last = self.it + len == self.data.len(); - + let status = PayloadStatus::from_status(first, last); data_slice[..len].clone_from_slice(&self.data[self.it..self.it+len]); self.it += len; SliceMeta { len: len as u16, - last: last + status: status } } }; @@ -193,8 +195,12 @@ impl MessageManager { } } - pub fn handle_incoming(&mut self, last: bool, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { + pub fn handle_incoming(&mut self, status: PayloadStatus, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { // called when receiving a message from master + if status.is_first() { + // clear the buffer for first message + self.in_buffer = None; + } match self.in_buffer.as_mut() { Some(message) => message.data.extend(&data[..length]), None => { @@ -204,7 +210,7 @@ impl MessageManager { }); } }; - if last { + if status.is_last() { // when done, remove from working queue self.in_queue.push_back(self.in_buffer.take().unwrap()); } @@ -236,7 +242,7 @@ impl MessageManager { return None; } let meta = self.out_message.as_mut()?.get_slice_master(data_slice); - if meta.last { + if meta.status.is_last() { // clear the message slot self.out_message = None; // notify kernel with a flag that message is sent @@ -315,7 +321,11 @@ impl Manager { } } - pub fn add(&mut self, id: u32, last: bool, data: &[u8], data_len: usize) -> Result<(), Error> { + pub fn add(&mut self, id: u32, status: PayloadStatus, data: &[u8], data_len: usize) -> Result<(), Error> { + if status.is_first() { + // in case master is interrupted, and subkernel is sent again, clean the state + self.kernels.remove(&id); + } let kernel = match self.kernels.get_mut(&id) { Some(kernel) => { if kernel.complete { @@ -338,7 +348,7 @@ impl Manager { }; kernel.library.extend(&data[0..data_len]); - kernel.complete = last; + kernel.complete = status.is_last(); Ok(()) } @@ -371,11 +381,11 @@ impl Manager { kern_acknowledge() } - pub fn message_handle_incoming(&mut self, last: bool, length: usize, slice: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { + pub fn message_handle_incoming(&mut self, status: PayloadStatus, length: usize, slice: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { if !self.is_running() { return; } - self.session.messages.handle_incoming(last, length, slice); + self.session.messages.handle_incoming(status, length, slice); } pub fn message_get_slice(&mut self, slice: &mut [u8; MASTER_PAYLOAD_MAX_SIZE]) -> Option { @@ -437,7 +447,7 @@ impl Manager { pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta { match self.session.last_exception.as_mut() { Some(exception) => exception.get_slice_sat(data_slice), - None => SliceMeta { len: 0, last: true } + None => SliceMeta { len: 0, status: PayloadStatus::FirstAndLast } } } diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index f844abd1e..1856c56b2 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -145,7 +145,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg let meta = kernelmgr.message_get_slice(&mut data_slice).unwrap(); drtioaux::send(0, &drtioaux::Packet::SubkernelMessage { destination: destination, id: kernelmgr.get_current_id().unwrap(), - last: meta.last, length: meta.len as u16, data: data_slice + status: meta.status, length: meta.len as u16, data: data_slice })?; } else { let errors; @@ -370,9 +370,9 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg }) } - drtioaux::Packet::DmaAddTraceRequest { destination: _destination, id, last, length, trace } => { + drtioaux::Packet::DmaAddTraceRequest { destination: _destination, id, status, length, trace } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); - let succeeded = dmamgr.add(id, last, &trace, length as usize).is_ok(); + let succeeded = dmamgr.add(id, status, &trace, length as usize).is_ok(); drtioaux::send(0, &drtioaux::Packet::DmaAddTraceReply { succeeded: succeeded }) } @@ -390,9 +390,9 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg &drtioaux::Packet::DmaPlaybackReply { succeeded: succeeded }) } - drtioaux::Packet::SubkernelAddDataRequest { destination: _destination, id, last, length, data } => { + drtioaux::Packet::SubkernelAddDataRequest { destination: _destination, id, status, length, data } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); - let succeeded = kernelmgr.add(id, last, &data, length as usize).is_ok(); + let succeeded = kernelmgr.add(id, status, &data, length as usize).is_ok(); drtioaux::send(0, &drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded }) } @@ -416,14 +416,14 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE]; let meta = kernelmgr.exception_get_slice(&mut data_slice); drtioaux::send(0, &drtioaux::Packet::SubkernelException { - last: meta.last, + last: meta.status.is_last(), length: meta.len, data: data_slice, }) } - drtioaux::Packet::SubkernelMessage { destination, id: _id, last, length, data } => { + drtioaux::Packet::SubkernelMessage { destination, id: _id, status, length, data } => { forward!(_routing_table, destination, *_rank, _repeaters, &packet); - kernelmgr.message_handle_incoming(last, length as usize, &data); + kernelmgr.message_handle_incoming(status, length as usize, &data); drtioaux::send(0, &drtioaux::Packet::SubkernelMessageAck { destination: destination }) @@ -435,7 +435,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) { drtioaux::send(0, &drtioaux::Packet::SubkernelMessage { destination: *_rank, id: kernelmgr.get_current_id().unwrap(), - last: meta.last, length: meta.len as u16, data: data_slice + status: meta.status, length: meta.len as u16, data: data_slice })? } else { error!("Error receiving message slice"); From 6640bf0e829f72d719a4740644e9c389052d7b46 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 26 Oct 2023 17:05:11 +0800 Subject: [PATCH 030/296] drtioaux/subkernel/ddma: introduce proper errors, more robust --- artiq/firmware/runtime/analyzer.rs | 9 +- artiq/firmware/runtime/kern_hwreq.rs | 12 +- artiq/firmware/runtime/kernel.rs | 71 ++++++---- artiq/firmware/runtime/rtio_dma.rs | 88 ++++++++----- artiq/firmware/runtime/rtio_mgt.rs | 189 ++++++++++++++++----------- artiq/firmware/runtime/session.rs | 82 +++++++++--- 6 files changed, 281 insertions(+), 170 deletions(-) diff --git a/artiq/firmware/runtime/analyzer.rs b/artiq/firmware/runtime/analyzer.rs index fa62a535c..41cca6e46 100644 --- a/artiq/firmware/runtime/analyzer.rs +++ b/artiq/firmware/runtime/analyzer.rs @@ -54,19 +54,16 @@ pub mod remote_analyzer { pub fn get_data(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, up_destinations: &Urc> - ) -> Result { + ) -> Result { // gets data from satellites and returns consolidated data let mut remote_data: Vec = Vec::new(); let mut remote_overflow = false; let mut remote_sent_bytes = 0; let mut remote_total_bytes = 0; - let data_vec = match drtio::analyzer_query( + let data_vec = drtio::analyzer_query( io, aux_mutex, routing_table, up_destinations - ) { - Ok(data_vec) => data_vec, - Err(e) => return Err(e) - }; + )?; for data in data_vec { remote_total_bytes += data.total_byte_count; remote_sent_bytes += data.sent_bytes; diff --git a/artiq/firmware/runtime/kern_hwreq.rs b/artiq/firmware/runtime/kern_hwreq.rs index 7fd0b379c..49aa2d8af 100644 --- a/artiq/firmware/runtime/kern_hwreq.rs +++ b/artiq/firmware/runtime/kern_hwreq.rs @@ -32,7 +32,7 @@ mod remote_i2c { } Err(e) => { error!("aux packet error ({})", e); - Err(e) + Err("aux packet error") } } } @@ -55,7 +55,7 @@ mod remote_i2c { } Err(e) => { error!("aux packet error ({})", e); - Err(e) + Err("aux packet error") } } } @@ -78,7 +78,7 @@ mod remote_i2c { } Err(e) => { error!("aux packet error ({})", e); - Err(e) + Err("aux packet error") } } } @@ -102,7 +102,7 @@ mod remote_i2c { } Err(e) => { error!("aux packet error ({})", e); - Err(e) + Err("aux packet error") } } } @@ -126,7 +126,7 @@ mod remote_i2c { } Err(e) => { error!("aux packet error ({})", e); - Err(e) + Err("aux packet error") } } } @@ -151,7 +151,7 @@ mod remote_i2c { } Err(e) => { error!("aux packet error ({})", e); - Err(e) + Err("aux packet error") } } } diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index 8bf46451f..a308e6c1c 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -91,8 +91,7 @@ pub fn validate(ptr: usize) -> bool { #[cfg(has_drtio)] pub mod subkernel { - use alloc::{vec::Vec, collections::btree_map::BTreeMap, string::String, string::ToString}; - use core::str; + use alloc::{vec::Vec, collections::btree_map::BTreeMap}; use board_artiq::drtio_routing::RoutingTable; use board_misoc::clock; use proto_artiq::{drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE}, rpc_proto as rpc}; @@ -119,32 +118,30 @@ pub mod subkernel { pub enum Error { #[fail(display = "Timed out waiting for subkernel")] Timeout, - #[fail(display = "Session killed while waiting for subkernel")] - SessionKilled, #[fail(display = "Subkernel is in incorrect state for the given operation")] IncorrectState, #[fail(display = "DRTIO error: {}", _0)] - DrtioError(String), - #[fail(display = "scheduler error")] - SchedError(SchedError), + DrtioError(#[cause] drtio::Error), + #[fail(display = "scheduler error: {}", _0)] + SchedError(#[cause] SchedError), #[fail(display = "rpc io error")] RpcIoError, #[fail(display = "subkernel finished prematurely")] SubkernelFinished, } - impl From<&str> for Error { - fn from(value: &str) -> Error { - Error::DrtioError(value.to_string()) + impl From for Error { + fn from(value: drtio::Error) -> Error { + match value { + drtio::Error::SchedError(x) => Error::SchedError(x), + x => Error::DrtioError(x), + } } } impl From for Error { fn from(value: SchedError) -> Error { - match value { - SchedError::Interrupted => Error::SessionKilled, - x => Error::SchedError(x) - } + Error::SchedError(value) } } @@ -178,14 +175,15 @@ pub mod subkernel { static mut SUBKERNELS: BTreeMap = BTreeMap::new(); - pub fn add_subkernel(io: &Io, subkernel_mutex: &Mutex, id: u32, destination: u8, kernel: Vec) { - let _lock = subkernel_mutex.lock(io).unwrap(); + pub fn add_subkernel(io: &Io, subkernel_mutex: &Mutex, id: u32, destination: u8, kernel: Vec) -> Result<(), Error> { + let _lock = subkernel_mutex.lock(io)?; unsafe { SUBKERNELS.insert(id, Subkernel::new(destination, kernel)); } + Ok(()) } pub fn upload(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32) -> Result<(), Error> { - let _lock = subkernel_mutex.lock(io).unwrap(); + let _lock = subkernel_mutex.lock(io)?; let subkernel = unsafe { SUBKERNELS.get_mut(&id).unwrap() }; drtio::subkernel_upload(io, aux_mutex, routing_table, id, subkernel.destination, &subkernel.data)?; @@ -195,9 +193,10 @@ pub mod subkernel { pub fn load(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32, run: bool) -> Result<(), Error> { - let _lock = subkernel_mutex.lock(io).unwrap(); + let _lock = subkernel_mutex.lock(io)?; let subkernel = unsafe { SUBKERNELS.get_mut(&id).unwrap() }; if subkernel.state != SubkernelState::Uploaded { + error!("for id: {} expected Uploaded, got: {:?}", id, subkernel.state); return Err(Error::IncorrectState); } drtio::subkernel_load(io, aux_mutex, routing_table, id, subkernel.destination, run)?; @@ -207,13 +206,14 @@ pub mod subkernel { Ok(()) } - pub fn clear_subkernels(io: &Io, subkernel_mutex: &Mutex) { - let _lock = subkernel_mutex.lock(io).unwrap(); + pub fn clear_subkernels(io: &Io, subkernel_mutex: &Mutex) -> Result<(), Error> { + let _lock = subkernel_mutex.lock(io)?; unsafe { SUBKERNELS = BTreeMap::new(); MESSAGE_QUEUE = Vec::new(); CURRENT_MESSAGES = BTreeMap::new(); } + Ok(()) } pub fn subkernel_finished(io: &Io, subkernel_mutex: &Mutex, id: u32, with_exception: bool) { @@ -222,10 +222,13 @@ pub mod subkernel { let subkernel = unsafe { SUBKERNELS.get_mut(&id) }; // may be None if session ends and is cleared if let Some(subkernel) = subkernel { - subkernel.state = SubkernelState::Finished { - status: match with_exception { - true => FinishStatus::Exception, - false => FinishStatus::Ok, + // ignore other messages, could be a late finish reported + if subkernel.state == SubkernelState::Running { + subkernel.state = SubkernelState::Finished { + status: match with_exception { + true => FinishStatus::Exception, + false => FinishStatus::Ok, + } } } } @@ -269,7 +272,9 @@ pub mod subkernel { } else { None } }) }, - _ => Err(Error::IncorrectState) + _ => { + Err(Error::IncorrectState) + } } } @@ -279,7 +284,9 @@ pub mod subkernel { let _lock = subkernel_mutex.lock(io)?; match unsafe { SUBKERNELS.get(&id).unwrap().state } { SubkernelState::Running | SubkernelState::Finished { .. } => (), - _ => return Err(Error::IncorrectState) + _ => { + return Err(Error::IncorrectState); + } } } let max_time = clock::get_ms() + timeout as u64; @@ -324,10 +331,16 @@ pub mod subkernel { // may get interrupted, when session is cancelled or main kernel finishes without await Err(_) => return, }; - if unsafe { SUBKERNELS.get(&id).is_none() } { - // do not add messages for non-existing or deleted subkernels + let subkernel = unsafe { SUBKERNELS.get(&id) }; + if subkernel.is_none() || subkernel.unwrap().state != SubkernelState::Running { + // do not add messages for non-existing, non-running or deleted subkernels return } + if status.is_first() { + unsafe { + CURRENT_MESSAGES.remove(&id); + } + } match unsafe { CURRENT_MESSAGES.get_mut(&id) } { Some(message) => message.data.extend(&data[..length]), None => unsafe { @@ -398,7 +411,7 @@ pub mod subkernel { routing_table: &RoutingTable, id: u32, count: u8, tag: &'a [u8], message: *const *const () ) -> Result<(), Error> { let mut writer = Cursor::new(Vec::new()); - let _lock = subkernel_mutex.lock(io).unwrap(); + let _lock = subkernel_mutex.lock(io)?; let destination = unsafe { SUBKERNELS.get(&id).unwrap().destination }; // reuse rpc code for sending arbitrary data diff --git a/artiq/firmware/runtime/rtio_dma.rs b/artiq/firmware/runtime/rtio_dma.rs index ce0fd4088..63bf563a6 100644 --- a/artiq/firmware/runtime/rtio_dma.rs +++ b/artiq/firmware/runtime/rtio_dma.rs @@ -1,6 +1,6 @@ use core::mem; use alloc::{vec::Vec, string::String, collections::btree_map::BTreeMap}; -use sched::{Io, Mutex}; +use sched::{Io, Mutex, Error as SchedError}; const ALIGNMENT: usize = 64; @@ -39,19 +39,48 @@ pub mod remote_dma { } } + #[derive(Fail, Debug)] + pub enum Error { + #[fail(display = "Timed out waiting for DMA results")] + Timeout, + #[fail(display = "DDMA trace is in incorrect state for the given operation")] + IncorrectState, + #[fail(display = "scheduler error: {}", _0)] + SchedError(#[cause] SchedError), + #[fail(display = "DRTIO error: {}", _0)] + DrtioError(#[cause] drtio::Error), + } + + impl From for Error { + fn from(value: drtio::Error) -> Error { + match value { + drtio::Error::SchedError(x) => Error::SchedError(x), + x => Error::DrtioError(x), + } + } + } + + impl From for Error { + fn from(value: SchedError) -> Error { + Error::SchedError(value) + } + } + // remote traces map. ID -> destination, trace pair static mut TRACES: BTreeMap> = BTreeMap::new(); - pub fn add_traces(io: &Io, ddma_mutex: &Mutex, id: u32, traces: BTreeMap>) { - let _lock = ddma_mutex.lock(io); + pub fn add_traces(io: &Io, ddma_mutex: &Mutex, id: u32, traces: BTreeMap> + ) -> Result<(), SchedError> { + let _lock = ddma_mutex.lock(io)?; let mut trace_map: BTreeMap = BTreeMap::new(); for (destination, trace) in traces { trace_map.insert(destination, trace.into()); } unsafe { TRACES.insert(id, trace_map); } + Ok(()) } - pub fn await_done(io: &Io, ddma_mutex: &Mutex, id: u32, timeout: u64) -> Result { + pub fn await_done(io: &Io, ddma_mutex: &Mutex, id: u32, timeout: u64) -> Result { let max_time = clock::get_ms() + timeout as u64; io.until(|| { if clock::get_ms() > max_time { @@ -70,15 +99,15 @@ pub mod remote_dma { } } true - }).unwrap(); + })?; if clock::get_ms() > max_time { error!("Remote DMA await done timed out"); - return Err("Timed out waiting for results."); + return Err(Error::Timeout); } // clear the internal state, and if there have been any errors, return one of them let mut playback_state: RemoteState = RemoteState::PlaybackEnded { error: 0, channel: 0, timestamp: 0 }; { - let _lock = ddma_mutex.lock(io).unwrap(); + let _lock = ddma_mutex.lock(io)?; let traces = unsafe { TRACES.get_mut(&id).unwrap() }; for (_dest, trace) in traces { match trace.state { @@ -92,8 +121,8 @@ pub mod remote_dma { } pub fn erase(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, - routing_table: &RoutingTable, id: u32) { - let _lock = ddma_mutex.lock(io).unwrap(); + routing_table: &RoutingTable, id: u32) -> Result<(), Error> { + let _lock = ddma_mutex.lock(io)?; let destinations = unsafe { TRACES.get(&id).unwrap() }; for destination in destinations.keys() { match drtio::ddma_send_erase(io, aux_mutex, routing_table, id, *destination) { @@ -102,42 +131,39 @@ pub mod remote_dma { } } unsafe { TRACES.remove(&id); } + Ok(()) } pub fn upload_traces(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, - routing_table: &RoutingTable, id: u32) { - let _lock = ddma_mutex.lock(io); + routing_table: &RoutingTable, id: u32) -> Result<(), Error> { + let _lock = ddma_mutex.lock(io)?; let traces = unsafe { TRACES.get_mut(&id).unwrap() }; for (destination, mut trace) in traces { - match drtio::ddma_upload_trace(io, aux_mutex, routing_table, id, *destination, trace.get_trace()) - { - Ok(_) => trace.state = RemoteState::Loaded, - Err(e) => error!("Error adding DMA trace on destination {}: {}", destination, e) - } + drtio::ddma_upload_trace(io, aux_mutex, routing_table, id, *destination, trace.get_trace())?; + trace.state = RemoteState::Loaded; } + Ok(()) } pub fn playback(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, - routing_table: &RoutingTable, id: u32, timestamp: u64) { + routing_table: &RoutingTable, id: u32, timestamp: u64) -> Result<(), Error>{ // triggers playback on satellites let destinations = unsafe { - let _lock = ddma_mutex.lock(io).unwrap(); + let _lock = ddma_mutex.lock(io)?; TRACES.get(&id).unwrap() }; for (destination, trace) in destinations { { // need to drop the lock before sending the playback request to avoid a deadlock // if a PlaybackStatus is returned from another satellite in the meanwhile. - let _lock = ddma_mutex.lock(io).unwrap(); + let _lock = ddma_mutex.lock(io)?; if trace.state != RemoteState::Loaded { error!("Destination {} not ready for DMA, state: {:?}", *destination, trace.state); - continue; + return Err(Error::IncorrectState); } } - match drtio::ddma_send_playback(io, aux_mutex, routing_table, id, *destination, timestamp) { - Ok(_) => (), - Err(e) => error!("Error during remote DMA playback: {}", e) - } + drtio::ddma_send_playback(io, aux_mutex, routing_table, id, *destination, timestamp)?; } + Ok(()) } pub fn playback_done(io: &Io, ddma_mutex: &Mutex, @@ -172,10 +198,10 @@ pub mod remote_dma { } } - pub fn has_remote_traces(io: &Io, ddma_mutex: &Mutex, id: u32) -> bool { - let _lock = ddma_mutex.lock(io).unwrap(); + pub fn has_remote_traces(io: &Io, ddma_mutex: &Mutex, id: u32) -> Result { + let _lock = ddma_mutex.lock(io)?; let trace_list = unsafe { TRACES.get(&id).unwrap() }; - !trace_list.is_empty() + Ok(!trace_list.is_empty()) } } @@ -225,11 +251,11 @@ impl Manager { } pub fn record_stop(&mut self, duration: u64, _enable_ddma: bool, - _io: &Io, _ddma_mutex: &Mutex) -> u32 { + _io: &Io, _ddma_mutex: &Mutex) -> Result { let mut local_trace = Vec::new(); let mut _remote_traces: BTreeMap> = BTreeMap::new(); - if _enable_ddma & cfg!(has_drtio) { + if _enable_ddma && cfg!(has_drtio) { let mut trace = Vec::new(); mem::swap(&mut self.recording_trace, &mut trace); trace.push(0); @@ -284,9 +310,9 @@ impl Manager { self.name_map.insert(name, id); #[cfg(has_drtio)] - remote_dma::add_traces(_io, _ddma_mutex, id, _remote_traces); + remote_dma::add_traces(_io, _ddma_mutex, id, _remote_traces)?; - id + Ok(id) } pub fn erase(&mut self, name: &str) { diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index a03737178..44661e822 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -22,6 +22,39 @@ pub mod drtio { #[cfg(has_rtio_analyzer)] use analyzer::remote_analyzer::RemoteBuffer; use kernel::subkernel; + use sched::Error as SchedError; + + #[derive(Fail, Debug)] + pub enum Error { + #[fail(display = "timed out")] + Timeout, + #[fail(display = "unexpected packet: {:?}", _0)] + UnexpectedPacket(drtioaux::Packet), + #[fail(display = "aux packet error")] + AuxError, + #[fail(display = "link down")] + LinkDown, + #[fail(display = "unexpected reply")] + UnexpectedReply, + #[fail(display = "error adding DMA trace on satellite #{}", _0)] + DmaAddTraceFail(u8), + #[fail(display = "error erasing DMA trace on satellite #{}", _0)] + DmaEraseFail(u8), + #[fail(display = "error playing back DMA trace on satellite #{}", _0)] + DmaPlaybackFail(u8), + #[fail(display = "error adding subkernel on satellite #{}", _0)] + SubkernelAddFail(u8), + #[fail(display = "error on subkernel run request on satellite #{}", _0)] + SubkernelRunFail(u8), + #[fail(display = "sched error: {}", _0)] + SchedError(#[cause] SchedError), + } + + impl From for Error { + fn from(value: SchedError) -> Error { + Error::SchedError(value) + } + } pub fn startup(io: &Io, aux_mutex: &Mutex, routing_table: &Urc>, @@ -45,21 +78,21 @@ pub mod drtio { } } - fn recv_aux_timeout(io: &Io, linkno: u8, timeout: u32) -> Result { + fn recv_aux_timeout(io: &Io, linkno: u8, timeout: u32) -> Result { let max_time = clock::get_ms() + timeout as u64; loop { if !link_rx_up(linkno) { - return Err("link went down"); + return Err(Error::LinkDown); } if clock::get_ms() > max_time { - return Err("timeout"); + return Err(Error::Timeout); } match drtioaux::recv(linkno) { Ok(Some(packet)) => return Ok(packet), Ok(None) => (), - Err(_) => return Err("aux packet error") + Err(_) => return Err(Error::AuxError) } - io.relinquish().unwrap(); + io.relinquish()?; } } @@ -88,13 +121,23 @@ pub mod drtio { } pub fn aux_transact(io: &Io, aux_mutex: &Mutex, linkno: u8, request: &drtioaux::Packet - ) -> Result { - let _lock = aux_mutex.lock(io).unwrap(); + ) -> Result { + let _lock = aux_mutex.lock(io)?; drtioaux::send(linkno, request).unwrap(); let reply = recv_aux_timeout(io, linkno, 200)?; Ok(reply) } + pub fn clear_buffers(io: &Io, aux_mutex: &Mutex) { + let _lock = aux_mutex.lock(io).unwrap(); + for linkno in 0..(csr::DRTIO.len() as u8) { + if !link_rx_up(linkno) { + continue; + } + let _ = recv_aux_timeout(io, linkno, 200); + } + } + fn ping_remote(io: &Io, aux_mutex: &Mutex, linkno: u8) -> u32 { let mut count = 0; loop { @@ -124,7 +167,7 @@ pub mod drtio { } } - fn sync_tsc(io: &Io, aux_mutex: &Mutex, linkno: u8) -> Result<(), &'static str> { + fn sync_tsc(io: &Io, aux_mutex: &Mutex, linkno: u8) -> Result<(), Error> { let _lock = aux_mutex.lock(io).unwrap(); unsafe { @@ -137,32 +180,32 @@ pub mod drtio { if reply == drtioaux::Packet::TSCAck { return Ok(()); } else { - return Err("unexpected reply"); + return Err(Error::UnexpectedReply); } } fn load_routing_table(io: &Io, aux_mutex: &Mutex, - linkno: u8, routing_table: &drtio_routing::RoutingTable) -> Result<(), &'static str> { + linkno: u8, routing_table: &drtio_routing::RoutingTable) -> Result<(), Error> { for i in 0..drtio_routing::DEST_COUNT { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingSetPath { destination: i as u8, hops: routing_table.0[i] })?; if reply != drtioaux::Packet::RoutingAck { - return Err("unexpected reply"); + return Err(Error::UnexpectedReply); } } Ok(()) } fn set_rank(io: &Io, aux_mutex: &Mutex, - linkno: u8, rank: u8) -> Result<(), &'static str> { + linkno: u8, rank: u8) -> Result<(), Error> { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingSetRank { rank: rank })?; if reply != drtioaux::Packet::RoutingAck { - return Err("unexpected reply"); + return Err(Error::UnexpectedReply); } Ok(()) } @@ -285,7 +328,7 @@ pub mod drtio { } } } else { - error!("[DEST#{}] communication failed ({})", destination, reply.unwrap_err()); + error!("[DEST#{}] communication failed ({:?})", destination, reply.unwrap_err()); } break; } @@ -309,7 +352,7 @@ pub mod drtio { subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, true); }, Ok(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet), - Err(e) => error!("[DEST#{}] communication failed ({})", destination, e) + Err(e) => error!("[DEST#{}] communication failed ({:?})", destination, e) } } } @@ -343,13 +386,13 @@ pub mod drtio { info!("[LINK#{}] remote replied after {} packets", linkno, ping_count); up_links[linkno as usize] = true; if let Err(e) = sync_tsc(&io, aux_mutex, linkno) { - error!("[LINK#{}] failed to sync TSC ({})", linkno, e); + error!("[LINK#{}] failed to sync TSC ({:?})", linkno, e); } if let Err(e) = load_routing_table(&io, aux_mutex, linkno, routing_table) { - error!("[LINK#{}] failed to load routing table ({})", linkno, e); + error!("[LINK#{}] failed to load routing table ({:?})", linkno, e); } if let Err(e) = set_rank(&io, aux_mutex, linkno, 1) { - error!("[LINK#{}] failed to set rank ({})", linkno, e); + error!("[LINK#{}] failed to set rank ({:?})", linkno, e); } info!("[LINK#{}] link initialization completed", linkno); } else { @@ -384,86 +427,79 @@ pub mod drtio { match reply { Ok(drtioaux::Packet::ResetAck) => (), Ok(_) => error!("[LINK#{}] reset failed, received unexpected aux packet", linkno), - Err(e) => error!("[LINK#{}] reset failed, aux packet error ({})", linkno, e) + Err(e) => error!("[LINK#{}] reset failed, aux packet error ({:?})", linkno, e) } } } } - fn partition_data(data: &[u8], send_f: F) -> Result<(), &'static str> - where F: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], PayloadStatus, usize) -> Result<(), &'static str> { + fn partition_data(data: &[u8], send_f: F) -> Result<(), Error> + where F: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], PayloadStatus, usize) -> Result<(), Error> { let mut i = 0; - let mut first = true; while i < data.len() { let mut slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; let len: usize = if i + MASTER_PAYLOAD_MAX_SIZE < data.len() { MASTER_PAYLOAD_MAX_SIZE } else { data.len() - i } as usize; + let first = i == 0; let last = i + len == data.len(); let status = PayloadStatus::from_status(first, last); slice[..len].clone_from_slice(&data[i..i+len]); i += len; send_f(&slice, status, len)?; - first = false; } Ok(()) } pub fn ddma_upload_trace(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8, trace: &[u8]) -> Result<(), &'static str> { + id: u32, destination: u8, trace: &[u8]) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; partition_data(trace, |slice, status, len: usize| { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::DmaAddTraceRequest { - id: id, destination: destination, status: status, length: len as u16, trace: *slice}); + id: id, destination: destination, status: status, length: len as u16, trace: *slice})?; match reply { - Ok(drtioaux::Packet::DmaAddTraceReply { succeeded: true }) => Ok(()), - Ok(drtioaux::Packet::DmaAddTraceReply { succeeded: false }) => - Err("error adding trace on satellite"), - Ok(_) => Err("adding DMA trace failed, unexpected aux packet"), - Err(_) => Err("adding DMA trace failed, aux error") + drtioaux::Packet::DmaAddTraceReply { succeeded: true } => Ok(()), + drtioaux::Packet::DmaAddTraceReply { succeeded: false } => Err(Error::DmaAddTraceFail(destination)), + packet => Err(Error::UnexpectedPacket(packet)), } }) } pub fn ddma_send_erase(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8) -> Result<(), &'static str> { + id: u32, destination: u8) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::DmaRemoveTraceRequest { id: id, destination: destination }); + &drtioaux::Packet::DmaRemoveTraceRequest { id: id, destination: destination })?; match reply { - Ok(drtioaux::Packet::DmaRemoveTraceReply { succeeded: true }) => Ok(()), - Ok(drtioaux::Packet::DmaRemoveTraceReply { succeeded: false }) => Err("satellite DMA erase error"), - Ok(_) => Err("erasing trace failed, unexpected aux packet"), - Err(_) => Err("erasing trace failed, aux error") + drtioaux::Packet::DmaRemoveTraceReply { succeeded: true } => Ok(()), + drtioaux::Packet::DmaRemoveTraceReply { succeeded: false } => Err(Error::DmaEraseFail(destination)), + packet => Err(Error::UnexpectedPacket(packet)), } } pub fn ddma_send_playback(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8, timestamp: u64) -> Result<(), &'static str> { + id: u32, destination: u8, timestamp: u64) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::DmaPlaybackRequest{ id: id, destination: destination, timestamp: timestamp }); + &drtioaux::Packet::DmaPlaybackRequest{ id: id, destination: destination, timestamp: timestamp })?; match reply { - Ok(drtioaux::Packet::DmaPlaybackReply { succeeded: true }) => return Ok(()), - Ok(drtioaux::Packet::DmaPlaybackReply { succeeded: false }) => - return Err("error on DMA playback request"), - Ok(_) => return Err("received unexpected aux packet during DMA playback"), - Err(_) => return Err("aux error on DMA playback") + drtioaux::Packet::DmaPlaybackReply { succeeded: true } => Ok(()), + drtioaux::Packet::DmaPlaybackReply { succeeded: false } => + Err(Error::DmaPlaybackFail(destination)), + packet => Err(Error::UnexpectedPacket(packet)), } } #[cfg(has_rtio_analyzer)] fn analyzer_get_data(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - destination: u8) -> Result { + destination: u8) -> Result { let linkno = routing_table.0[destination as usize][0] - 1; let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::AnalyzerHeaderRequest { destination: destination }); + &drtioaux::Packet::AnalyzerHeaderRequest { destination: destination })?; let (sent, total, overflow) = match reply { - Ok(drtioaux::Packet::AnalyzerHeader { - sent_bytes, total_byte_count, overflow_occurred } - ) => (sent_bytes, total_byte_count, overflow_occurred), - Ok(_) => return Err("received unexpected aux packet during remote analyzer header request"), - Err(e) => return Err(e) + drtioaux::Packet::AnalyzerHeader { sent_bytes, total_byte_count, overflow_occurred } => + (sent_bytes, total_byte_count, overflow_occurred), + packet => return Err(Error::UnexpectedPacket(packet)), }; let mut remote_data: Vec = Vec::new(); @@ -471,14 +507,13 @@ pub mod drtio { let mut last_packet = false; while !last_packet { let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::AnalyzerDataRequest { destination: destination }); + &drtioaux::Packet::AnalyzerDataRequest { destination: destination })?; match reply { - Ok(drtioaux::Packet::AnalyzerData { last, length, data }) => { + drtioaux::Packet::AnalyzerData { last, length, data } => { last_packet = last; remote_data.extend(&data[0..length as usize]); }, - Ok(_) => return Err("received unexpected aux packet during remote analyzer data request"), - Err(e) => return Err(e) + packet => return Err(Error::UnexpectedPacket(packet)), } } } @@ -494,7 +529,7 @@ pub mod drtio { #[cfg(has_rtio_analyzer)] pub fn analyzer_query(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, up_destinations: &Urc> - ) -> Result, &'static str> { + ) -> Result, Error> { let mut remote_buffers: Vec = Vec::new(); for i in 1..drtio_routing::DEST_COUNT { if destination_up(up_destinations, i as u8) { @@ -505,69 +540,65 @@ pub mod drtio { } pub fn subkernel_upload(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8, data: &Vec) -> Result<(), &'static str> { + id: u32, destination: u8, data: &Vec) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; partition_data(data, |slice, status, len: usize| { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SubkernelAddDataRequest { - id: id, destination: destination, status: status, length: len as u16, data: *slice}); + id: id, destination: destination, status: status, length: len as u16, data: *slice})?; match reply { - Ok(drtioaux::Packet::SubkernelAddDataReply { succeeded: true }) => Ok(()), - Ok(drtioaux::Packet::SubkernelAddDataReply { succeeded: false }) => - Err("error adding subkernel on satellite"), - Ok(_) => Err("adding subkernel failed, unexpected aux packet"), - Err(_) => Err("adding subkernel failed, aux error") + drtioaux::Packet::SubkernelAddDataReply { succeeded: true } => Ok(()), + drtioaux::Packet::SubkernelAddDataReply { succeeded: false } => + Err(Error::SubkernelAddFail(destination)), + packet => Err(Error::UnexpectedPacket(packet)), } }) } pub fn subkernel_load(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8, run: bool) -> Result<(), &'static str> { + id: u32, destination: u8, run: bool) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::SubkernelLoadRunRequest{ id: id, destination: destination, run: run }); + &drtioaux::Packet::SubkernelLoadRunRequest{ id: id, destination: destination, run: run })?; match reply { - Ok(drtioaux::Packet::SubkernelLoadRunReply { succeeded: true }) => return Ok(()), - Ok(drtioaux::Packet::SubkernelLoadRunReply { succeeded: false }) => - return Err("error on subkernel run request"), - Ok(_) => return Err("received unexpected aux packet during subkernel run"), - Err(_) => return Err("aux error on subkernel run") + drtioaux::Packet::SubkernelLoadRunReply { succeeded: true } => Ok(()), + drtioaux::Packet::SubkernelLoadRunReply { succeeded: false } => + Err(Error::SubkernelRunFail(destination)), + packet => Err(Error::UnexpectedPacket(packet)), } } pub fn subkernel_retrieve_exception(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, destination: u8 - ) -> Result, &'static str> { + ) -> Result, Error> { let linkno = routing_table.0[destination as usize][0] - 1; let mut remote_data: Vec = Vec::new(); loop { let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::SubkernelExceptionRequest { destination: destination }); + &drtioaux::Packet::SubkernelExceptionRequest { destination: destination })?; match reply { - Ok(drtioaux::Packet::SubkernelException { last, length, data }) => { + drtioaux::Packet::SubkernelException { last, length, data } => { remote_data.extend(&data[0..length as usize]); if last { return Ok(remote_data); } }, - Ok(_) => return Err("received unexpected aux packet during subkernel exception request"), - Err(e) => return Err(e) + packet => return Err(Error::UnexpectedPacket(packet)), } } } pub fn subkernel_send_message(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, message: &[u8] - ) -> Result<(), &'static str> { + ) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; partition_data(message, |slice, status, len: usize| { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SubkernelMessage { - destination: destination, id: id, status: status, length: len as u16, data: *slice}); + destination: destination, id: id, status: status, length: len as u16, data: *slice})?; match reply { - Ok(drtioaux::Packet::SubkernelMessageAck { .. }) => Ok(()), - Ok(_) => Err("sending message to subkernel failed, unexpected aux packet"), - Err(_) => Err("sending message to subkernel, aux error") + drtioaux::Packet::SubkernelMessageAck { .. } => Ok(()), + packet => Err(Error::UnexpectedPacket(packet)), } }) } diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 471cc20c9..d08e42986 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -5,7 +5,7 @@ use cslice::CSlice; use io::{Read, Write, Error as IoError}; #[cfg(has_drtio)] -use io::{Cursor, ProtoRead}; +use io::Cursor; use board_misoc::{ident, cache, config}; use {mailbox, rpc_queue, kernel}; use urc::Urc; @@ -16,6 +16,8 @@ use rtio_dma::Manager as DmaManager; use rtio_dma::remote_dma; #[cfg(has_drtio)] use kernel::{subkernel, subkernel::Error as SubkernelError}; +#[cfg(has_drtio)] +use rtio_mgt::drtio; use rtio_mgt::get_async_errors; use cache::Cache; use kern_hwreq; @@ -40,8 +42,14 @@ pub enum Error { #[fail(display = "subkernel io error")] SubkernelIoError, #[cfg(has_drtio)] + #[fail(display = "DDMA error: {}", _0)] + Ddma(#[cause] remote_dma::Error), + #[cfg(has_drtio)] #[fail(display = "subkernel error: {}", _0)] Subkernel(#[cause] SubkernelError), + #[cfg(has_drtio)] + #[fail(display = "drtio aux error: {}", _0)] + DrtioAux(#[cause] drtio::Error), #[fail(display = "{}", _0)] Unexpected(String), } @@ -52,6 +60,16 @@ impl From> for Error { } } +#[cfg(has_drtio)] +impl From for Error { + fn from(value: drtio::Error) -> Error { + match value { + drtio::Error::SchedError(x) => Error::from(x), + x => Error::DrtioAux(x), + } + } +} + impl From for Error { fn from(value: SchedError) -> Error { Error::Protocol(host::Error::Io(IoError::Other(value))) @@ -79,7 +97,22 @@ impl From> for Error { #[cfg(has_drtio)] impl From for Error { fn from(value: SubkernelError) -> Error { - Error::Subkernel(value) + match value { + SubkernelError::SchedError(x) => Error::from(x), + SubkernelError::DrtioError(x) => Error::from(x), + x => Error::Subkernel(x), + } + } +} + +#[cfg(has_drtio)] +impl From for Error { + fn from(value: remote_dma::Error) -> Error { + match value { + remote_dma::Error::SchedError(x) => Error::from(x), + remote_dma::Error::DrtioError(x) => Error::from(x), + x => Error::Ddma(x), + } } } @@ -371,7 +404,7 @@ fn process_host_message(io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subke host::Request::UploadSubkernel { id: _id, destination: _dest, kernel: _kernel } => { #[cfg(has_drtio)] { - subkernel::add_subkernel(io, _subkernel_mutex, _id, _dest, _kernel); + subkernel::add_subkernel(io, _subkernel_mutex, _id, _dest, _kernel)?; match subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, _id) { Ok(_) => host_write(stream, host::Reply::LoadCompleted)?, Err(error) => { @@ -434,7 +467,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, if let Some(_id) = session.congress.dma_manager.record_start(name) { // replace the record #[cfg(has_drtio)] - remote_dma::erase(io, aux_mutex, ddma_mutex, routing_table, _id); + remote_dma::erase(io, aux_mutex, ddma_mutex, routing_table, _id)?; } kern_acknowledge() } @@ -443,10 +476,10 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, kern_acknowledge() } &kern::DmaRecordStop { duration, enable_ddma } => { - let _id = session.congress.dma_manager.record_stop(duration, enable_ddma, io, ddma_mutex); + let _id = session.congress.dma_manager.record_stop(duration, enable_ddma, io, ddma_mutex)?; #[cfg(has_drtio)] if enable_ddma { - remote_dma::upload_traces(io, aux_mutex, ddma_mutex, routing_table, _id); + remote_dma::upload_traces(io, aux_mutex, ddma_mutex, routing_table, _id)?; } cache::flush_l2_cache(); kern_acknowledge() @@ -454,7 +487,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, &kern::DmaEraseRequest { name } => { #[cfg(has_drtio)] if let Some(id) = session.congress.dma_manager.get_id(name) { - remote_dma::erase(io, aux_mutex, ddma_mutex, routing_table, *id); + remote_dma::erase(io, aux_mutex, ddma_mutex, routing_table, *id)?; } session.congress.dma_manager.erase(name); kern_acknowledge() @@ -463,7 +496,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, session.congress.dma_manager.with_trace(name, |trace, duration| { #[cfg(has_drtio)] let uses_ddma = match trace { - Some(trace) => remote_dma::has_remote_traces(io, aux_mutex, trace.as_ptr() as u32), + Some(trace) => remote_dma::has_remote_traces(io, aux_mutex, trace.as_ptr() as u32)?, None => false }; #[cfg(not(has_drtio))] @@ -477,7 +510,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, } &kern::DmaStartRemoteRequest { id: _id, timestamp: _timestamp } => { #[cfg(has_drtio)] - remote_dma::playback(io, aux_mutex, ddma_mutex, routing_table, _id as u32, _timestamp as u64); + remote_dma::playback(io, aux_mutex, ddma_mutex, routing_table, _id as u32, _timestamp as u64)?; kern_acknowledge() } &kern::DmaAwaitRemoteRequest { id: _id } => { @@ -703,7 +736,7 @@ fn host_kernel_worker(io: &Io, aux_mutex: &Mutex, congress: &mut Congress) -> Result<(), Error> { let mut session = Session::new(congress); #[cfg(has_drtio)] - subkernel::clear_subkernels(&io, &subkernel_mutex); + subkernel::clear_subkernels(&io, &subkernel_mutex)?; loop { if stream.can_recv() { @@ -785,7 +818,7 @@ fn respawn(io: &Io, handle: &mut Option, f: F) } } - *handle = Some(io.spawn(16384, f)) + *handle = Some(io.spawn(24576, f)) } pub fn thread(io: Io, aux_mutex: &Mutex, @@ -857,16 +890,19 @@ pub fn thread(io: Io, aux_mutex: &Mutex, Err(Error::Protocol(host::Error::Io(IoError::UnexpectedEnd))) => info!("connection closed"), Err(Error::Protocol(host::Error::Io( - IoError::Other(SchedError::Interrupted)))) => - info!("kernel interrupted"), + IoError::Other(SchedError::Interrupted)))) => { + info!("kernel interrupted"); + #[cfg(has_drtio)] + drtio::clear_buffers(&io, &aux_mutex); + } Err(err) => { congress.finished_cleanly.set(false); error!("session aborted: {}", err); + #[cfg(has_drtio)] + drtio::clear_buffers(&io, &aux_mutex); } } stream.close().expect("session: close socket"); - #[cfg(has_drtio)] - subkernel::clear_subkernels(&io, &subkernel_mutex); }); } @@ -887,15 +923,23 @@ pub fn thread(io: Io, aux_mutex: &Mutex, Ok(()) => info!("idle kernel finished, standing by"), Err(Error::Protocol(host::Error::Io( - IoError::Other(SchedError::Interrupted)))) => - info!("idle kernel interrupted"), + IoError::Other(SchedError::Interrupted)))) => { + info!("idle kernel interrupted"); + // clear state for regular kernel + #[cfg(has_drtio)] + drtio::clear_buffers(&io, &aux_mutex); + } Err(Error::KernelNotFound) => { info!("no idle kernel found"); while io.relinquish().is_ok() {} } - Err(err) => - error!("idle kernel aborted: {}", err) + Err(err) => { + error!("idle kernel aborted: {}", err); + #[cfg(has_drtio)] + drtio::clear_buffers(&io, &aux_mutex); + } } + }) } From c0a6252e772197d18658dca0a7c87698af2f0011 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Tue, 7 Nov 2023 14:06:31 +0800 Subject: [PATCH 031/296] afws_client: improve compatibility with older versions of prettytable. Closes #2264 --- artiq/frontend/afws_client.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/artiq/frontend/afws_client.py b/artiq/frontend/afws_client.py index 49ce122c8..2faaf560b 100755 --- a/artiq/frontend/afws_client.py +++ b/artiq/frontend/afws_client.py @@ -146,7 +146,8 @@ class Client: print(error_msg) table = PrettyTable() table.field_names = ["Variant", "Expiry date"] - table.add_rows(variants) + for variant in variants: + table.add_row(variant) print(table) sys.exit(1) return variants[0][0] @@ -244,10 +245,11 @@ def main(): sys.exit(1) zip_unarchive(contents, args.directory) elif args.action == "get_variants": - data = client.get_variants() + variants = client.get_variants() table = PrettyTable() table.field_names = ["Variant", "Expiry date"] - table.add_rows(data) + for variant in variants: + table.add_row(variant) print(table) elif args.action == "get_json": if args.variant: From bbe6ff8cac1465237f57cf3c13e18609794e1368 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Tue, 7 Nov 2023 18:36:11 +0800 Subject: [PATCH 032/296] flake: update dependencies --- flake.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flake.lock b/flake.lock index 031fde710..15be8436a 100644 --- a/flake.lock +++ b/flake.lock @@ -124,11 +124,11 @@ "src-misoc": { "flake": false, "locked": { - "lastModified": 1699334718, - "narHash": "sha256-ccJnbIJ9si2QXvdW0wGvEK8kaaencfPbYaO7rME1UBY=", + "lastModified": 1699352904, + "narHash": "sha256-SglyTmXOPv8jJOjwAjJrj/WhAkItQfUbvKfUqrynwRg=", "ref": "refs/heads/master", - "rev": "3cbc746cbd2a6125b8e48a2dc1810e17ba39f885", - "revCount": 2450, + "rev": "a53859f2167c31ab5225b6c09f30cf05527b94f4", + "revCount": 2452, "submodules": true, "type": "git", "url": "https://github.com/m-labs/misoc.git" From 8f7d138dbdd818099533318be4550930be316189 Mon Sep 17 00:00:00 2001 From: linuswck Date: Mon, 6 Nov 2023 16:19:04 +0800 Subject: [PATCH 033/296] gtx: Always enable IBUFDS_GTE2, add clk_path_ready - Set clk_path_ready to High to start Initialization of GTP TX and RX --- artiq/gateware/drtio/transceiver/gtx_7series.py | 13 ++++--------- .../gateware/drtio/transceiver/gtx_7series_init.py | 4 ++-- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/artiq/gateware/drtio/transceiver/gtx_7series.py b/artiq/gateware/drtio/transceiver/gtx_7series.py index f62663895..693e91ea4 100644 --- a/artiq/gateware/drtio/transceiver/gtx_7series.py +++ b/artiq/gateware/drtio/transceiver/gtx_7series.py @@ -279,14 +279,13 @@ class GTX(Module, TransceiverInterface): self.nchannels = nchannels = len(pads) self.gtxs = [] self.rtio_clk_freq = clk_freq - + self.clk_path_ready = Signal() # # # refclk = Signal() - clk_enable = Signal() self.specials += Instance("IBUFDS_GTE2", - i_CEB=~clk_enable, + i_CEB=0, i_I=clock_pads.p, i_IB=clock_pads.n, o_O=refclk, @@ -315,14 +314,10 @@ class GTX(Module, TransceiverInterface): for n, gtx in enumerate(self.gtxs): self.comb += [ gtx.txenable.eq(self.txenable.storage[n]), - gtx.tx_init.stable_clkin.eq(clk_enable) + gtx.tx_init.clk_path_ready.eq(self.clk_path_ready) ] # rx_init is in SYS domain, rather than bootstrap - self.specials += MultiReg(clk_enable, gtx.rx_init.stable_clkin) - - # stable_clkin resets after reboot since it's in SYS domain - # still need to keep clk_enable high after this - self.sync.bootstrap += clk_enable.eq(self.stable_clkin.storage | self.gtxs[0].tx_init.cplllock) + self.specials += MultiReg(self.clk_path_ready, gtx.rx_init.clk_path_ready) # Connect slave i's `rtio_rx` clock to `rtio_rxi` clock for i in range(nchannels): diff --git a/artiq/gateware/drtio/transceiver/gtx_7series_init.py b/artiq/gateware/drtio/transceiver/gtx_7series_init.py index 6f1bff15e..e5b67f125 100644 --- a/artiq/gateware/drtio/transceiver/gtx_7series_init.py +++ b/artiq/gateware/drtio/transceiver/gtx_7series_init.py @@ -16,7 +16,7 @@ class GTXInit(Module): assert mode in ["single", "master", "slave"] self.mode = mode - self.stable_clkin = Signal() + self.clk_path_ready = Signal() self.done = Signal() self.restart = Signal() @@ -110,7 +110,7 @@ class GTXInit(Module): startup_fsm.act("INITIAL", startup_timer.wait.eq(1), - If(startup_timer.done & self.stable_clkin, NextState("RESET_PLL")) + If(startup_timer.done & self.clk_path_ready, NextState("RESET_PLL")) ) startup_fsm.act("RESET_PLL", gtXxreset.eq(1), From ec2b86b08d497a41e75c228385b754c6f3a4a8bf Mon Sep 17 00:00:00 2001 From: linuswck Date: Tue, 7 Nov 2023 12:04:24 +0800 Subject: [PATCH 034/296] kc705: fix gtx clock path durnig init --- artiq/gateware/targets/kc705.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/artiq/gateware/targets/kc705.py b/artiq/gateware/targets/kc705.py index 1c62476e5..b3b5a4af7 100755 --- a/artiq/gateware/targets/kc705.py +++ b/artiq/gateware/targets/kc705.py @@ -273,7 +273,8 @@ class _MasterBase(MiniSoC, AMPSoC): txout_buf = Signal() self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf) - self.crg.configure(txout_buf, clk_sw=gtx0.tx_init.done) + self.crg.configure(txout_buf, clk_sw=self.gt_drtio.stable_clkin.storage, ext_async_rst=self.crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done) + self.specials += MultiReg(self.crg.clk_sw_fsm.o_clk_sw & self.crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap") self.comb += [ platform.request("user_sma_clock_p").eq(ClockSignal("rtio_rx0")), @@ -440,7 +441,8 @@ class _SatelliteBase(BaseSoC, AMPSoC): txout_buf = Signal() self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf) - self.crg.configure(txout_buf, clk_sw=gtx0.tx_init.done) + self.crg.configure(txout_buf, clk_sw=self.gt_drtio.stable_clkin.storage, ext_async_rst=self.crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done) + self.specials += MultiReg(self.crg.clk_sw_fsm.o_clk_sw & self.crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap") self.comb += [ platform.request("user_sma_clock_p").eq(ClockSignal("rtio_rx0")), From e7af2195056324f3e55f24bd6668d14746ee40e3 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 18 Oct 2023 15:11:07 +0800 Subject: [PATCH 035/296] kasli_generic: add support for user LEDs Add additional LED RTIO devices. --- artiq/gateware/targets/kasli_generic.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/artiq/gateware/targets/kasli_generic.py b/artiq/gateware/targets/kasli_generic.py index 2f91de1b1..9756093df 100755 --- a/artiq/gateware/targets/kasli_generic.py +++ b/artiq/gateware/targets/kasli_generic.py @@ -44,8 +44,8 @@ class GenericStandalone(StandaloneBase): phy = ttl_simple.Output(sfp_ctl.led) self.submodules += phy self.rtio_channels.append(rtio.Channel.from_phy(phy)) - if hw_rev == "v2.0": - for i in (1, 2): + if hw_rev in ("v1.1", "v2.0"): + for i in range(3): print("USER LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) phy = ttl_simple.Output(self.platform.request("user_led", i)) self.submodules += phy @@ -93,6 +93,13 @@ class GenericMaster(MasterBase): self.rtio_channels = [] eem_7series.add_peripherals(self, description["peripherals"]) + if hw_rev in ("v1.1", "v2.0"): + for i in range(3): + print("USER LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) + phy = ttl_simple.Output(self.platform.request("user_led", i)) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) + self.config["HAS_RTIO_LOG"] = None self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) self.rtio_channels.append(rtio.LogChannel()) @@ -131,6 +138,13 @@ class GenericSatellite(SatelliteBase): self.rtio_channels = [] eem_7series.add_peripherals(self, description["peripherals"]) + if hw_rev in ("v1.1", "v2.0"): + for i in range(3): + print("USER LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) + phy = ttl_simple.Output(self.platform.request("user_led", i)) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) + self.config["HAS_RTIO_LOG"] = None self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) self.rtio_channels.append(rtio.LogChannel()) From 363afb5fc9a8b60f521a5fafbaba4b9958dee05c Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 18 Oct 2023 15:12:56 +0800 Subject: [PATCH 036/296] artiq_ddb_template: add support for user LEDs Add support for additional user LEDs. --- artiq/frontend/artiq_ddb_template.py | 40 +++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/artiq/frontend/artiq_ddb_template.py b/artiq/frontend/artiq_ddb_template.py index 5038563d6..eeca02c8e 100755 --- a/artiq/frontend/artiq_ddb_template.py +++ b/artiq/frontend/artiq_ddb_template.py @@ -24,6 +24,27 @@ def get_cpu_target(description): else: raise NotImplementedError + +def get_num_leds(description): + drtio_role = description["drtio_role"] + target = description["target"] + hw_rev = description["hw_rev"] + kasli_board_leds = { + "v1.0": 4, + "v1.1": 6, + "v2.0": 3 + } + if target == "kasli": + if hw_rev in ("v1.0", "v1.1") and drtio_role != "standalone": + # LEDs are used for DRTIO status on v1.0 and v1.1 + return kasli_board_leds[hw_rev] - 3 + return kasli_board_leds[hw_rev] + elif target == "kasli_soc": + return 2 + else: + raise ValueError + + def process_header(output, description): print(textwrap.dedent(""" # Autogenerated for the {variant} variant @@ -701,8 +722,8 @@ class PeripheralManager: processor = getattr(self, "process_"+str(peripheral["type"])) return processor(rtio_offset, peripheral) - def add_board_leds(self, rtio_offset, board_name=None): - for i in range(2): + def add_board_leds(self, rtio_offset, board_name=None, num_leds=2): + for i in range(num_leds): if board_name is None: led_name = self.get_name("led") else: @@ -716,7 +737,7 @@ class PeripheralManager: }}""", name=led_name, channel=rtio_offset+i) - return 2 + return num_leds def split_drtio_eem(peripherals): @@ -745,9 +766,10 @@ def process(output, primary_description, satellites): for peripheral in local_peripherals: n_channels = pm.process(rtio_offset, peripheral) rtio_offset += n_channels - if drtio_role == "standalone": - n_channels = pm.add_board_leds(rtio_offset) - rtio_offset += n_channels + + num_leds = get_num_leds(primary_description) + pm.add_board_leds(rtio_offset, num_leds=num_leds) + rtio_offset += num_leds for destination, description in satellites: if description["drtio_role"] != "satellite": @@ -766,7 +788,11 @@ def process(output, primary_description, satellites): for peripheral in peripherals: n_channels = pm.process(rtio_offset, peripheral) rtio_offset += n_channels - + + num_leds = get_num_leds(description) + pm.add_board_leds(rtio_offset, num_leds=num_leds) + rtio_offset += num_leds + for i, peripheral in enumerate(drtio_peripherals): if not("drtio_destination" in peripheral): if primary_description["target"] == "kasli": From 49afa116b3d5b561ed5dc35ef0a05740cf347c62 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 7 Nov 2023 16:40:42 +0800 Subject: [PATCH 037/296] RELEASE_NOTES: artiq_ddb_template needs gateware --- RELEASE_NOTES.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index 49006b8bf..b23a60e38 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -84,6 +84,8 @@ Old syntax should be replaced with the form shown on the right. * The ``ndecimals`` parameter in ``NumberValue`` and ``Scannable`` has been renamed to ``precision``. Parameters after and including ``scale`` in both constructors are now keyword-only. Refer to the updated ``no_hardware/arguments_demo.py`` example for current usage. +* ``artiq_ddb_template`` requires latest generic Kasli gateware to function properly + due to RTIO channel renumbering. ARTIQ-7 From 3ad3fac828823bff50db403151b8e22453c183b0 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 8 Nov 2023 11:15:32 +0800 Subject: [PATCH 038/296] update ARTIQ-8 release notes --- RELEASE_NOTES.rst | 110 ++++++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 53 deletions(-) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index b23a60e38..642f5012d 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -8,41 +8,76 @@ ARTIQ-8 (Unreleased) Highlights: -* Hardware support: +* New hardware support: + - Support for Shuttler, a 16-channel 125MSPS DAC card intended for ion transport. + Waveform generator and user API are similar to the NIST PDQ. - Implemented Phaser-servo. This requires recent gateware on Phaser. - - Implemented Phaser-MIQRO support. This requires the Phaser MIQRO gateware - variant. + - Almazny v1.2 with finer RF switch control. + - Metlino and Sayma support has been dropped due to complications with synchronous RTIO clocking. + - More user LEDs are exposed to RTIO on Kasli. + - Implemented Phaser-MIQRO support. This requires the proprietary Phaser MIQRO gateware + variant from QUARTIQ. - Sampler: fixed ADC MU to Volt conversion factor for Sampler v2.2+. For earlier hardware versions, specify the hardware version in the device database file (e.g. ``"hw_rev": "v2.1"``) to use the correct conversion factor. - - Almazny v1.2. It is incompatible with the legacy versions and is the default. To use legacy - versions, specify ``almazny_hw_rev`` in the JSON description. - - Metlino and Sayma support has been dropped due to complications with synchronous RTIO clocking. +* Support for distributed DMA, where DMA is run directly on satellites for corresponding + RTIO events, increasing bandwidth in scenarios with heavy satellite usage. +* Support for subkernels, where kernels are run on satellite device CPUs to offload some + of the processing and RTIO operations. * CPU (on softcore platforms) and AXI bus (on Zynq) are now clocked synchronously with the RTIO clock, to facilitate implementation of local processing on DRTIO satellites, and to slightly reduce RTIO latency. -* MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to - support legacy installations, but may be removed in a future release. -* Added channel names to RTIO errors. -* Full Python 3.10 support. -* Python's built-in types (such as `float`, or `List[...]`) can now be used in type annotations on - kernel functions. -* Distributed DMA is now supported, allowing DMA to be run directly on satellites for corresponding - RTIO events, increasing bandwidth in scenarios with heavy satellite usage. -* Subkernels are now supported, allowing running kernels on satellite devices, offloading some - of the processing, and RTIO events. -* Applet Request Interfaces have been implemented, enabling applets to directly modify datasets - and temporarily set arguments in the dashboard. -* EntryArea widget has been implemented, allowing argument entry widgets to be used in applets. -* Dashboard: +* Support for DRTIO-over-EEM, used with Shuttler. +* Added channel names to RTIO error messages. +* GUI: + - Implemented Applet Request Interfaces which allow applets to modify datasets and set the + current values of widgets in the dashboard's experiment windows. + - Implemented a new EntryArea widget which allows argument entry widgets to be used in applets. - The "Close all applets" command (shortcut: Ctrl-Alt-W) now ignores docked applets, making it a convenient way to clean up after exploratory work without destroying a carefully arranged default workspace. - Hotkeys now organize experiment windows in the order they were last interacted with: + CTRL+SHIFT+T tiles experiment windows + CTRL+SHIFT+C cascades experiment windows -* Persistent datasets are now stored in a LMDB database for improved performance. PYON databases can - be converted with the script below. +* Persistent datasets are now stored in a LMDB database for improved performance. +* Python's built-in types (such as ``float``, or ``List[...]``) can now be used in type annotations on + kernel functions. +* Full Python 3.10 support. +* MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to + support legacy installations, but may be removed in a future release. + +Breaking changes: + +* ``SimpleApplet`` now calls widget constructors with an additional ``ctl`` parameter for control + operations, which includes dataset operations. It can be ignored if not needed. For an example usage, + refer to the ``big_number.py`` applet. +* ``SimpleApplet`` and ``TitleApplet`` now call ``data_changed`` with additional parameters. Derived applets + should change the function signature as below: + +:: + + # SimpleApplet + def data_changed(self, value, metadata, persist, mods) + # SimpleApplet (old version) + def data_changed(self, data, mods) + # TitleApplet + def data_changed(self, value, metadata, persist, mods, title) + # TitleApplet (old version) + def data_changed(self, data, mods, title) + +Accesses to the data argument should be replaced as below: + +:: + + data[key][0] ==> persist[key] + data[key][1] ==> value[key] + +* The ``ndecimals`` parameter in ``NumberValue`` and ``Scannable`` has been renamed to ``precision``. + Parameters after and including ``scale`` in both constructors are now keyword-only. + Refer to the updated ``no_hardware/arguments_demo.py`` example for current usage. +* Almazny v1.2 is incompatible with the legacy versions and is the default. + To use legacy versions, specify ``almazny_hw_rev`` in the JSON description. +* Legacy PYON databases should be converted to LMDB with the script below: :: @@ -56,37 +91,6 @@ Highlights: txn.put(key.encode(), pyon.encode((value, {})).encode()) new.close() -Breaking changes: - -* ``SimpleApplet`` now calls widget constructors with an additional ``ctl`` parameter for control - operations, which includes dataset operations. It can be ignored if not needed. For an example usage, - refer to the ``big_number.py`` applet. -* ``SimpleApplet`` and ``TitleApplet`` now call ``data_changed`` with additional parameters. Wrapped widgets - should refactor the function signature as seen below: -:: - - # SimpleApplet - def data_changed(self, value, metadata, persist, mods) - # SimpleApplet (old version) - def data_changed(self, data, mods) - # TitleApplet - def data_changed(self, value, metadata, persist, mods, title) - # TitleApplet (old version) - def data_changed(self, data, mods, title) - -Old syntax should be replaced with the form shown on the right. -:: - - data[key][0] ==> persist[key] - data[key][1] ==> value[key] - data[key][2] ==> metadata[key] - -* The ``ndecimals`` parameter in ``NumberValue`` and ``Scannable`` has been renamed to ``precision``. - Parameters after and including ``scale`` in both constructors are now keyword-only. - Refer to the updated ``no_hardware/arguments_demo.py`` example for current usage. -* ``artiq_ddb_template`` requires latest generic Kasli gateware to function properly - due to RTIO channel renumbering. - ARTIQ-7 ------- From 875666f3ecd7185ab1062b0484b1b8f5fbe763f4 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Fri, 10 Nov 2023 15:33:24 +0800 Subject: [PATCH 039/296] doc: add section on new nix flakes config (closes #2232) Signed-off-by: Florian Agbuya --- doc/manual/installing.rst | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/doc/manual/installing.rst b/doc/manual/installing.rst index a231e48ec..c1615eda3 100644 --- a/doc/manual/installing.rst +++ b/doc/manual/installing.rst @@ -90,6 +90,35 @@ If your favorite package is not available with Nix, contact us using the helpdes Troubleshooting ^^^^^^^^^^^^^^^ +"Do you want to allow configuration setting... (y/N)?" +"""""""""""""""""""""""""""""""""""""""""""""""""""""" + +When installing and initializing ARTIQ using commands like ``nix shell``, ``nix develop``, or ``nix profile install``, you may encounter prompts to modify certain configuration settings. These settings correspond to the ``nixConfig`` flag within the ARTIQ flake: + +:: + + do you want to allow configuration setting 'extra-sandbox-paths' to be set to '/opt' (y/N)? + do you want to allow configuration setting 'extra-substituters' to be set to 'https://nixbld.m-labs.hk' (y/N)? + do you want to allow configuration setting 'extra-trusted-public-keys' to be set to 'nixbld.m-labs.hk-1:5aSRVA5b320xbNvu30tqxVPXpld73bhtOeH6uAjRyHc=' (y/N)? + +We recommend accepting these settings by responding with ``y``. If asked to permanently mark these values as trusted, choose ``y`` again. This action saves the configuration to ``~/.local/share/nix/trusted-settings.json``, allowing future prompts to be bypassed. + +Alternatively, you can also use the option `accept-flake-config `_ by appending ``--accept-flake-config`` to your nix command: + +:: + + nix develop --accept-flake-config + +Or add the option to ``~/.config/nix/nix.conf`` to make the setting more permanent: + +:: + + extra-experimental-features = flakes + accept-flake-config = true + +.. note:: + Should you wish to revert to the default settings, you can do so by editing the appropriate options in the aforementioned configuration files. + "Ignoring untrusted substituter, you are not a trusted user" """""""""""""""""""""""""""""""""""""""""""""""""""""""""""" From de10e584f63c53b4224cdcbbb1d07e5bdc6a2703 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Mon, 30 Oct 2023 17:15:19 +0800 Subject: [PATCH 040/296] support .tar flashed idle/startup kernels --- artiq/firmware/Cargo.lock | 17 +++++++ artiq/firmware/runtime/Cargo.toml | 4 ++ artiq/firmware/runtime/main.rs | 2 + artiq/firmware/runtime/session.rs | 79 +++++++++++++++++++++++++++++-- flake.nix | 1 + 5 files changed, 98 insertions(+), 5 deletions(-) diff --git a/artiq/firmware/Cargo.lock b/artiq/firmware/Cargo.lock index 80933a416..71ea74d8b 100644 --- a/artiq/firmware/Cargo.lock +++ b/artiq/firmware/Cargo.lock @@ -13,6 +13,12 @@ dependencies = [ name = "alloc_list" version = "0.0.0" +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + [[package]] name = "bare-metal" version = "0.2.5" @@ -332,6 +338,7 @@ dependencies = [ "proto_artiq", "riscv", "smoltcp", + "tar-no-std", "unwind_backtrace", ] @@ -416,6 +423,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tar-no-std" +version = "0.1.8" +source = "git+https://git.m-labs.hk/M-Labs/tar-no-std?rev=2ab6dc5#2ab6dc58e5249c59c4eb03eaf3a119bcdd678d32" +dependencies = [ + "arrayvec", + "bitflags", + "log", +] + [[package]] name = "unicode-xid" version = "0.0.4" diff --git a/artiq/firmware/runtime/Cargo.toml b/artiq/firmware/runtime/Cargo.toml index 16477707b..af09ca646 100644 --- a/artiq/firmware/runtime/Cargo.toml +++ b/artiq/firmware/runtime/Cargo.toml @@ -40,3 +40,7 @@ git = "https://git.m-labs.hk/M-Labs/libfringe.git" rev = "3ecbe5" default-features = false features = ["alloc"] + +[dependencies.tar-no-std] +git = "https://git.m-labs.hk/M-Labs/tar-no-std" +rev = "2ab6dc5" diff --git a/artiq/firmware/runtime/main.rs b/artiq/firmware/runtime/main.rs index f0970a73f..a27c337ee 100644 --- a/artiq/firmware/runtime/main.rs +++ b/artiq/firmware/runtime/main.rs @@ -25,6 +25,8 @@ extern crate board_artiq; extern crate logger_artiq; extern crate proto_artiq; extern crate riscv; +#[cfg(has_drtio)] +extern crate tar_no_std; use alloc::collections::BTreeMap; use core::cell::RefCell; diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index d08e42986..e930b2b1b 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -2,7 +2,10 @@ use core::{mem, str, cell::{Cell, RefCell}, fmt::Write as FmtWrite}; use alloc::{vec::Vec, string::{String, ToString}}; use byteorder::{ByteOrder, NativeEndian}; use cslice::CSlice; +#[cfg(has_drtio)] +use tar_no_std::TarArchiveRef; +use dyld::elf; use io::{Read, Write, Error as IoError}; #[cfg(has_drtio)] use io::Cursor; @@ -45,6 +48,9 @@ pub enum Error { #[fail(display = "DDMA error: {}", _0)] Ddma(#[cause] remote_dma::Error), #[cfg(has_drtio)] + #[fail(display = "subkernel destination is down")] + DestinationDown, + #[cfg(has_drtio)] #[fail(display = "subkernel error: {}", _0)] Subkernel(#[cause] SubkernelError), #[cfg(has_drtio)] @@ -309,6 +315,63 @@ fn kern_run(session: &mut Session) -> Result<(), Error> { kern_acknowledge() } + +fn process_flash_kernel(io: &Io, _aux_mutex: &Mutex, _subkernel_mutex: &Mutex, + _routing_table: &drtio_routing::RoutingTable, + _up_destinations: &Urc>, + session: &mut Session, kernel: &[u8] +) -> Result<(), Error> { + // handle ELF and TAR files + if kernel[0] == elf::ELFMAG0 && kernel[1] == elf::ELFMAG1 && + kernel[2] == elf::ELFMAG2 && kernel[3] == elf::ELFMAG3 { + // assume ELF file, proceed as before + unsafe { + // make a copy as kernel CPU cannot read SPI directly + kern_load(io, session, Vec::from(kernel).as_ref()) + } + } else { + #[cfg(has_drtio)] + { + let archive = TarArchiveRef::new(kernel); + let entries = archive.entries(); + let mut main_lib: Option<&[u8]> = None; + for entry in entries { + if entry.filename().as_str() == "main.elf" { + main_lib = Some(entry.data()); + } else { + // subkernel filename must be in format: + // " .elf" + let filename = entry.filename(); + let mut iter = filename.as_str().split_whitespace(); + let sid: u32 = iter.next().unwrap() + .parse().unwrap(); + let dest: u8 = iter.next().unwrap() + .strip_suffix(".elf").unwrap() + .parse().unwrap(); + let up = { + let up_destinations = _up_destinations.borrow(); + up_destinations[dest as usize] + }; + if up { + let subkernel_lib = entry.data().to_vec(); + subkernel::add_subkernel(io, _subkernel_mutex, sid, dest, subkernel_lib)?; + subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, sid)?; + } else { + return Err(Error::DestinationDown); + } + } + } + unsafe { + kern_load(io, session, Vec::from(main_lib.unwrap()).as_ref()) + } + } + #[cfg(not(has_drtio))] + { + unexpected!("multi-kernel libraries are not supported in standalone systems") + } + } +} + fn process_host_message(io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subkernel_mutex: &Mutex, _routing_table: &drtio_routing::RoutingTable, stream: &mut TcpStream, session: &mut Session) -> Result<(), Error> { @@ -777,11 +840,17 @@ fn flash_kernel_worker(io: &Io, aux_mutex: &Mutex, config::read(config_key, |result| { match result { - Ok(kernel) => unsafe { - // kernel CPU cannot access the SPI flash address space directly, - // so make a copy. - kern_load(io, &mut session, Vec::from(kernel).as_ref()) - }, + Ok(kernel) => { + // process .ELF or .TAR kernels + let res = process_flash_kernel(io, aux_mutex, subkernel_mutex, routing_table, up_destinations, &mut session, kernel); + #[cfg(has_drtio)] + match res { + // wait to establish the DRTIO connection + Err(Error::DestinationDown) => io.sleep(500)?, + _ => () + } + res + } _ => Err(Error::KernelNotFound) } })?; diff --git a/flake.nix b/flake.nix index 0f3fc0a45..47c599b20 100644 --- a/flake.nix +++ b/flake.nix @@ -249,6 +249,7 @@ lockFile = ./artiq/firmware/Cargo.lock; outputHashes = { "fringe-1.2.1" = "sha256-m4rzttWXRlwx53LWYpaKuU5AZe4GSkbjHS6oINt5d3Y="; + "tar-no-std-0.1.8" = "sha256-xm17108v4smXOqxdLvHl9CxTCJslmeogjm4Y87IXFuM="; }; }; nativeBuildInputs = [ From e81e8f28cf1c35c0d916f4a3e169afa7ae7e342a Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Tue, 14 Nov 2023 14:01:17 +0800 Subject: [PATCH 041/296] gateware: merge kasli_generic into kasli. Closes #2279 --- RELEASE_NOTES.rst | 3 + artiq/gateware/targets/kasli.py | 282 +++++++++++++----------- artiq/gateware/targets/kasli_generic.py | 201 ----------------- doc/manual/developing.rst | 2 +- 4 files changed, 160 insertions(+), 328 deletions(-) delete mode 100755 artiq/gateware/targets/kasli_generic.py diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index 642f5012d..c80d85a18 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -77,6 +77,9 @@ Accesses to the data argument should be replaced as below: Refer to the updated ``no_hardware/arguments_demo.py`` example for current usage. * Almazny v1.2 is incompatible with the legacy versions and is the default. To use legacy versions, specify ``almazny_hw_rev`` in the JSON description. +* kasli_generic.py has been merged into kasli.py, and the demonstration designs without JSON descriptions + have been removed. The base classes remain present in kasli.py to support third-party flows without + JSON descriptions. * Legacy PYON databases should be converted to LMDB with the script below: :: diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py index baf75b6bf..d9ed6f446 100755 --- a/artiq/gateware/targets/kasli.py +++ b/artiq/gateware/targets/kasli.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 import argparse +import logging +from packaging.version import Version from migen import * from migen.genlib.resetsync import AsyncResetSynchronizer @@ -14,16 +16,20 @@ from misoc.targets.kasli import ( BaseSoC, MiniSoC, soc_kasli_args, soc_kasli_argdict) from misoc.integration.builder import builder_args, builder_argdict +from artiq import __version__ as artiq_version from artiq.gateware.amp import AMPSoC from artiq.gateware import rtio from artiq.gateware.rtio.phy import ttl_simple, ttl_serdes_7series, edge_counter from artiq.gateware.rtio.xilinx_clocking import fix_serdes_timing_path -from artiq.gateware import eem +from artiq.gateware import rtio, eem, eem_7series from artiq.gateware.drtio.transceiver import gtp_7series, eem_serdes from artiq.gateware.drtio.siphaser import SiPhaser7Series from artiq.gateware.drtio.rx_synchronizer import XilinxRXSynchronizer from artiq.gateware.drtio import * from artiq.build_soc import * +from artiq.coredevice import jsondesc + +logger = logging.getLogger(__name__) class SMAClkinForward(Module): @@ -130,89 +136,6 @@ class StandaloneBase(MiniSoC, AMPSoC): self.csr_devices.append("rtio_analyzer") -class Tester(StandaloneBase): - """ - Configuration for CI tests. Contains the maximum number of different EEMs. - """ - def __init__(self, hw_rev=None, dds=None, **kwargs): - if hw_rev is None: - hw_rev = "v2.0" - if dds is None: - dds = "ad9910" - StandaloneBase.__init__(self, hw_rev=hw_rev, **kwargs) - - # self.config["SI5324_EXT_REF"] = None - self.config["RTIO_FREQUENCY"] = "125.0" - if hw_rev == "v1.0": - # EEM clock fan-out from Si5324, not MMCX - self.comb += self.platform.request("clk_sel").eq(1) - - self.rtio_channels = [] - eem.DIO.add_std(self, 5, - ttl_serdes_7series.InOut_8X, ttl_serdes_7series.Output_8X, - edge_counter_cls=edge_counter.SimpleEdgeCounter) - eem.Urukul.add_std(self, 0, 1, ttl_serdes_7series.Output_8X, dds, - ttl_simple.ClockGen) - eem.Sampler.add_std(self, 3, 2, ttl_serdes_7series.Output_8X) - eem.Zotino.add_std(self, 4, ttl_serdes_7series.Output_8X) - - if hw_rev in ("v1.0", "v1.1"): - for i in (1, 2): - sfp_ctl = self.platform.request("sfp_ctl", i) - phy = ttl_simple.Output(sfp_ctl.led) - self.submodules += phy - self.rtio_channels.append(rtio.Channel.from_phy(phy)) - - self.config["HAS_RTIO_LOG"] = None - self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) - self.rtio_channels.append(rtio.LogChannel()) - self.add_rtio(self.rtio_channels) - - -class SUServo(StandaloneBase): - """ - SUServo (Sampler-Urukul-Servo) extension variant configuration - """ - def __init__(self, hw_rev=None, **kwargs): - if hw_rev is None: - hw_rev = "v2.0" - StandaloneBase.__init__(self, hw_rev=hw_rev, **kwargs) - - # self.config["SI5324_EXT_REF"] = None - self.config["RTIO_FREQUENCY"] = "125.0" - if hw_rev == "v1.0": - # EEM clock fan-out from Si5324, not MMCX - self.comb += self.platform.request("clk_sel").eq(1) - - self.rtio_channels = [] - # EEM0, EEM1: DIO - eem.DIO.add_std(self, 0, - ttl_serdes_7series.InOut_8X, ttl_serdes_7series.Output_8X) - eem.DIO.add_std(self, 1, - ttl_serdes_7series.Output_8X, ttl_serdes_7series.Output_8X) - - # EEM3/2: Sampler, EEM5/4: Urukul, EEM7/6: Urukul - eem.SUServo.add_std(self, - eems_sampler=(3, 2), - eems_urukul=[[5, 4], [7, 6]]) - - for i in (1, 2): - sfp_ctl = self.platform.request("sfp_ctl", i) - phy = ttl_simple.Output(sfp_ctl.led) - self.submodules += phy - self.rtio_channels.append(rtio.Channel.from_phy(phy)) - - self.config["HAS_RTIO_LOG"] = None - self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) - self.rtio_channels.append(rtio.LogChannel()) - - self.add_rtio(self.rtio_channels) - - pads = self.platform.lookup_request("sampler3_adc_data_p") - self.platform.add_false_path_constraints( - pads.clkout, self.crg.cd_sys.clk) - - class MasterBase(MiniSoC, AMPSoC): mem_map = { "cri_con": 0x10000000, @@ -640,77 +563,184 @@ class SatelliteBase(BaseSoC, AMPSoC): self.get_native_sdram_if(), cpu_dw=self.cpu_dw) self.csr_devices.append("rtio_analyzer") - -class Master(MasterBase): - def __init__(self, hw_rev=None, **kwargs): +class GenericStandalone(StandaloneBase): + def __init__(self, description, hw_rev=None,**kwargs): if hw_rev is None: - hw_rev = "v2.0" - MasterBase.__init__(self, hw_rev=hw_rev, **kwargs) + hw_rev = description["hw_rev"] + self.class_name_override = description["variant"] + StandaloneBase.__init__(self, hw_rev=hw_rev, **kwargs) + self.config["RTIO_FREQUENCY"] = "{:.1f}".format(description["rtio_frequency"]/1e6) + if "ext_ref_frequency" in description: + self.config["SI5324_EXT_REF"] = None + self.config["EXT_REF_FREQUENCY"] = "{:.1f}".format( + description["ext_ref_frequency"]/1e6) + if hw_rev == "v1.0": + # EEM clock fan-out from Si5324, not MMCX + self.comb += self.platform.request("clk_sel").eq(1) + + has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) + if has_grabber: + self.grabber_csr_group = [] self.rtio_channels = [] - - phy = ttl_simple.Output(self.platform.request("user_led", 0)) - self.submodules += phy - self.rtio_channels.append(rtio.Channel.from_phy(phy)) - # matches Tester EEM numbers - eem.DIO.add_std(self, 5, - ttl_serdes_7series.InOut_8X, ttl_serdes_7series.Output_8X) - eem.Urukul.add_std(self, 0, 1, ttl_serdes_7series.Output_8X) + eem_7series.add_peripherals(self, description["peripherals"]) + if hw_rev in ("v1.0", "v1.1"): + for i in (1, 2): + print("SFP LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) + sfp_ctl = self.platform.request("sfp_ctl", i) + phy = ttl_simple.Output(sfp_ctl.led) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) + if hw_rev in ("v1.1", "v2.0"): + for i in range(3): + print("USER LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) + phy = ttl_simple.Output(self.platform.request("user_led", i)) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) self.config["HAS_RTIO_LOG"] = None self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) self.rtio_channels.append(rtio.LogChannel()) - self.add_rtio(self.rtio_channels) + self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) + + if has_grabber: + self.config["HAS_GRABBER"] = None + self.add_csr_group("grabber", self.grabber_csr_group) + for grabber in self.grabber_csr_group: + self.platform.add_false_path_constraints( + self.crg.cd_sys.clk, getattr(self, grabber).deserializer.cd_cl.clk) -class Satellite(SatelliteBase): - def __init__(self, hw_rev=None, **kwargs): +class GenericMaster(MasterBase): + def __init__(self, description, hw_rev=None, **kwargs): if hw_rev is None: - hw_rev = "v2.0" - SatelliteBase.__init__(self, hw_rev=hw_rev, **kwargs) + hw_rev = description["hw_rev"] + self.class_name_override = description["variant"] + has_drtio_over_eem = any(peripheral["type"] == "shuttler" for peripheral in description["peripherals"]) + MasterBase.__init__(self, + hw_rev=hw_rev, + rtio_clk_freq=description["rtio_frequency"], + enable_sata=description["enable_sata_drtio"], + enable_sys5x=has_drtio_over_eem, + **kwargs) + if "ext_ref_frequency" in description: + self.config["SI5324_EXT_REF"] = None + self.config["EXT_REF_FREQUENCY"] = "{:.1f}".format( + description["ext_ref_frequency"]/1e6) + if hw_rev == "v1.0": + # EEM clock fan-out from Si5324, not MMCX + self.comb += self.platform.request("clk_sel").eq(1) + + if has_drtio_over_eem: + self.eem_drtio_channels = [] + has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) + if has_grabber: + self.grabber_csr_group = [] self.rtio_channels = [] - phy = ttl_simple.Output(self.platform.request("user_led", 0)) - self.submodules += phy - self.rtio_channels.append(rtio.Channel.from_phy(phy)) - # matches Tester EEM numbers - eem.DIO.add_std(self, 5, - ttl_serdes_7series.InOut_8X, ttl_serdes_7series.Output_8X) + eem_7series.add_peripherals(self, description["peripherals"]) + if hw_rev in ("v1.1", "v2.0"): + for i in range(3): + print("USER LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) + phy = ttl_simple.Output(self.platform.request("user_led", i)) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) - self.add_rtio(self.rtio_channels) + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) + self.rtio_channels.append(rtio.LogChannel()) + + if has_drtio_over_eem: + self.add_eem_drtio(self.eem_drtio_channels) + self.add_drtio_cpuif_groups() + + self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) + + if has_grabber: + self.config["HAS_GRABBER"] = None + self.add_csr_group("grabber", self.grabber_csr_group) + for grabber in self.grabber_csr_group: + self.platform.add_false_path_constraints( + self.gt_drtio.gtps[0].txoutclk, getattr(self, grabber).deserializer.cd_cl.clk) -VARIANTS = {cls.__name__.lower(): cls for cls in [Tester, SUServo, Master, Satellite]} +class GenericSatellite(SatelliteBase): + def __init__(self, description, hw_rev=None, **kwargs): + if hw_rev is None: + hw_rev = description["hw_rev"] + self.class_name_override = description["variant"] + SatelliteBase.__init__(self, + hw_rev=hw_rev, + rtio_clk_freq=description["rtio_frequency"], + enable_sata=description["enable_sata_drtio"], + **kwargs) + if hw_rev == "v1.0": + # EEM clock fan-out from Si5324, not MMCX + self.comb += self.platform.request("clk_sel").eq(1) + + has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) + if has_grabber: + self.grabber_csr_group = [] + + self.rtio_channels = [] + eem_7series.add_peripherals(self, description["peripherals"]) + if hw_rev in ("v1.1", "v2.0"): + for i in range(3): + print("USER LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) + phy = ttl_simple.Output(self.platform.request("user_led", i)) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) + + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) + self.rtio_channels.append(rtio.LogChannel()) + + self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) + if has_grabber: + self.config["HAS_GRABBER"] = None + self.add_csr_group("grabber", self.grabber_csr_group) + for grabber in self.grabber_csr_group: + self.platform.add_false_path_constraints( + self.gt_drtio.gtps[0].txoutclk, getattr(self, grabber).deserializer.cd_cl.clk) def main(): parser = argparse.ArgumentParser( - description="ARTIQ device binary builder for Kasli systems") + description="ARTIQ device binary builder for generic Kasli systems") builder_args(parser) soc_kasli_args(parser) parser.set_defaults(output_dir="artiq_kasli") - parser.add_argument("-V", "--variant", default="tester", - help="variant: {} (default: %(default)s)".format( - "/".join(sorted(VARIANTS.keys())))) - parser.add_argument("--tester-dds", default=None, - help="Tester variant DDS type: ad9910/ad9912 " - "(default: ad9910)") + parser.add_argument("description", metavar="DESCRIPTION", + help="JSON system description file") parser.add_argument("--gateware-identifier-str", default=None, help="Override ROM identifier") args = parser.parse_args() + description = jsondesc.load(args.description) - argdict = dict() - argdict["gateware_identifier_str"] = args.gateware_identifier_str - argdict["dds"] = args.tester_dds + min_artiq_version = description["min_artiq_version"] + if Version(artiq_version) < Version(min_artiq_version): + logger.warning("ARTIQ version mismatch: current %s < %s minimum", + artiq_version, min_artiq_version) - variant = args.variant.lower() - try: - cls = VARIANTS[variant] - except KeyError: - raise SystemExit("Invalid variant (-V/--variant)") + if description["target"] != "kasli": + raise ValueError("Description is for a different target") - soc = cls(**soc_kasli_argdict(args), **argdict) + if description["drtio_role"] == "standalone": + cls = GenericStandalone + elif description["drtio_role"] == "master": + cls = GenericMaster + elif description["drtio_role"] == "satellite": + cls = GenericSatellite + else: + raise ValueError("Invalid DRTIO role") + + has_shuttler = any(peripheral["type"] == "shuttler" for peripheral in description["peripherals"]) + if has_shuttler and (description["drtio_role"] == "standalone"): + raise ValueError("Shuttler requires DRTIO, please switch role to master") + + soc = cls(description, gateware_identifier_str=args.gateware_identifier_str, **soc_kasli_argdict(args)) + args.variant = description["variant"] build_artiq_soc(soc, builder_argdict(args)) diff --git a/artiq/gateware/targets/kasli_generic.py b/artiq/gateware/targets/kasli_generic.py deleted file mode 100755 index 9756093df..000000000 --- a/artiq/gateware/targets/kasli_generic.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import logging -from packaging.version import Version - -from misoc.integration.builder import builder_args, builder_argdict -from misoc.targets.kasli import soc_kasli_args, soc_kasli_argdict - -from artiq import __version__ as artiq_version -from artiq.coredevice import jsondesc -from artiq.gateware import rtio, eem_7series -from artiq.gateware.rtio.phy import ttl_simple -from artiq.gateware.targets.kasli import StandaloneBase, MasterBase, SatelliteBase -from artiq.build_soc import * - -logger = logging.getLogger(__name__) - -class GenericStandalone(StandaloneBase): - def __init__(self, description, hw_rev=None,**kwargs): - if hw_rev is None: - hw_rev = description["hw_rev"] - self.class_name_override = description["variant"] - StandaloneBase.__init__(self, hw_rev=hw_rev, **kwargs) - self.config["RTIO_FREQUENCY"] = "{:.1f}".format(description["rtio_frequency"]/1e6) - if "ext_ref_frequency" in description: - self.config["SI5324_EXT_REF"] = None - self.config["EXT_REF_FREQUENCY"] = "{:.1f}".format( - description["ext_ref_frequency"]/1e6) - if hw_rev == "v1.0": - # EEM clock fan-out from Si5324, not MMCX - self.comb += self.platform.request("clk_sel").eq(1) - - has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) - if has_grabber: - self.grabber_csr_group = [] - - self.rtio_channels = [] - eem_7series.add_peripherals(self, description["peripherals"]) - if hw_rev in ("v1.0", "v1.1"): - for i in (1, 2): - print("SFP LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) - sfp_ctl = self.platform.request("sfp_ctl", i) - phy = ttl_simple.Output(sfp_ctl.led) - self.submodules += phy - self.rtio_channels.append(rtio.Channel.from_phy(phy)) - if hw_rev in ("v1.1", "v2.0"): - for i in range(3): - print("USER LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) - phy = ttl_simple.Output(self.platform.request("user_led", i)) - self.submodules += phy - self.rtio_channels.append(rtio.Channel.from_phy(phy)) - - self.config["HAS_RTIO_LOG"] = None - self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) - self.rtio_channels.append(rtio.LogChannel()) - - self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) - - if has_grabber: - self.config["HAS_GRABBER"] = None - self.add_csr_group("grabber", self.grabber_csr_group) - for grabber in self.grabber_csr_group: - self.platform.add_false_path_constraints( - self.crg.cd_sys.clk, getattr(self, grabber).deserializer.cd_cl.clk) - - -class GenericMaster(MasterBase): - def __init__(self, description, hw_rev=None, **kwargs): - if hw_rev is None: - hw_rev = description["hw_rev"] - self.class_name_override = description["variant"] - has_drtio_over_eem = any(peripheral["type"] == "shuttler" for peripheral in description["peripherals"]) - MasterBase.__init__(self, - hw_rev=hw_rev, - rtio_clk_freq=description["rtio_frequency"], - enable_sata=description["enable_sata_drtio"], - enable_sys5x=has_drtio_over_eem, - **kwargs) - if "ext_ref_frequency" in description: - self.config["SI5324_EXT_REF"] = None - self.config["EXT_REF_FREQUENCY"] = "{:.1f}".format( - description["ext_ref_frequency"]/1e6) - if hw_rev == "v1.0": - # EEM clock fan-out from Si5324, not MMCX - self.comb += self.platform.request("clk_sel").eq(1) - - if has_drtio_over_eem: - self.eem_drtio_channels = [] - has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) - if has_grabber: - self.grabber_csr_group = [] - - self.rtio_channels = [] - eem_7series.add_peripherals(self, description["peripherals"]) - if hw_rev in ("v1.1", "v2.0"): - for i in range(3): - print("USER LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) - phy = ttl_simple.Output(self.platform.request("user_led", i)) - self.submodules += phy - self.rtio_channels.append(rtio.Channel.from_phy(phy)) - - self.config["HAS_RTIO_LOG"] = None - self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) - self.rtio_channels.append(rtio.LogChannel()) - - if has_drtio_over_eem: - self.add_eem_drtio(self.eem_drtio_channels) - self.add_drtio_cpuif_groups() - - self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) - - if has_grabber: - self.config["HAS_GRABBER"] = None - self.add_csr_group("grabber", self.grabber_csr_group) - for grabber in self.grabber_csr_group: - self.platform.add_false_path_constraints( - self.gt_drtio.gtps[0].txoutclk, getattr(self, grabber).deserializer.cd_cl.clk) - - -class GenericSatellite(SatelliteBase): - def __init__(self, description, hw_rev=None, **kwargs): - if hw_rev is None: - hw_rev = description["hw_rev"] - self.class_name_override = description["variant"] - SatelliteBase.__init__(self, - hw_rev=hw_rev, - rtio_clk_freq=description["rtio_frequency"], - enable_sata=description["enable_sata_drtio"], - **kwargs) - if hw_rev == "v1.0": - # EEM clock fan-out from Si5324, not MMCX - self.comb += self.platform.request("clk_sel").eq(1) - - has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) - if has_grabber: - self.grabber_csr_group = [] - - self.rtio_channels = [] - eem_7series.add_peripherals(self, description["peripherals"]) - if hw_rev in ("v1.1", "v2.0"): - for i in range(3): - print("USER LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) - phy = ttl_simple.Output(self.platform.request("user_led", i)) - self.submodules += phy - self.rtio_channels.append(rtio.Channel.from_phy(phy)) - - self.config["HAS_RTIO_LOG"] = None - self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) - self.rtio_channels.append(rtio.LogChannel()) - - self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) - if has_grabber: - self.config["HAS_GRABBER"] = None - self.add_csr_group("grabber", self.grabber_csr_group) - for grabber in self.grabber_csr_group: - self.platform.add_false_path_constraints( - self.gt_drtio.gtps[0].txoutclk, getattr(self, grabber).deserializer.cd_cl.clk) - - -def main(): - parser = argparse.ArgumentParser( - description="ARTIQ device binary builder for generic Kasli systems") - builder_args(parser) - soc_kasli_args(parser) - parser.set_defaults(output_dir="artiq_kasli") - parser.add_argument("description", metavar="DESCRIPTION", - help="JSON system description file") - parser.add_argument("--gateware-identifier-str", default=None, - help="Override ROM identifier") - args = parser.parse_args() - description = jsondesc.load(args.description) - - min_artiq_version = description["min_artiq_version"] - if Version(artiq_version) < Version(min_artiq_version): - logger.warning("ARTIQ version mismatch: current %s < %s minimum", - artiq_version, min_artiq_version) - - if description["target"] != "kasli": - raise ValueError("Description is for a different target") - - if description["drtio_role"] == "standalone": - cls = GenericStandalone - elif description["drtio_role"] == "master": - cls = GenericMaster - elif description["drtio_role"] == "satellite": - cls = GenericSatellite - else: - raise ValueError("Invalid DRTIO role") - - has_shuttler = any(peripheral["type"] == "shuttler" for peripheral in description["peripherals"]) - if has_shuttler and (description["drtio_role"] == "standalone"): - raise ValueError("Shuttler requires DRTIO, please switch role to master") - - soc = cls(description, gateware_identifier_str=args.gateware_identifier_str, **soc_kasli_argdict(args)) - args.variant = description["variant"] - build_artiq_soc(soc, builder_argdict(args)) - - -if __name__ == "__main__": - main() diff --git a/doc/manual/developing.rst b/doc/manual/developing.rst index cea0c3e9b..481920993 100644 --- a/doc/manual/developing.rst +++ b/doc/manual/developing.rst @@ -16,7 +16,7 @@ ARTIQ itself does not depend on Nix, and it is also possible to compile everythi * Enable flakes in Nix by e.g. adding ``experimental-features = nix-command flakes`` to ``nix.conf`` (for example ``~/.config/nix/nix.conf``). * Clone the ARTIQ Git repository and run ``nix develop`` at the root (where ``flake.nix`` is). * Make the current source code of ARTIQ available to the Python interpreter by running ``export PYTHONPATH=`pwd`:$PYTHONPATH``. -* You can then build the firmware and gateware with a command such as ``$ python -m artiq.gateware.targets.kasli``. If you are using a JSON system description file, use ``$ python -m artiq.gateware.targets.kasli_generic file.json``. +* You can then build the firmware and gateware with a command such as ``$ python -m artiq.gateware.targets.kasli file.json``. * Flash the binaries into the FPGA board with a command such as ``$ artiq_flash --srcbuild -d artiq_kasli/``. You need to configure OpenOCD as explained :ref:`in the user section `. OpenOCD is already part of the flake's development environment. * Check that the board boots and examine the UART messages by running a serial terminal program, e.g. ``$ flterm /dev/ttyUSB1`` (``flterm`` is part of MiSoC and installed in the flake's development environment). Leave the terminal running while you are flashing the board, so that you see the startup messages when the board boots immediately after flashing. You can also restart the board (without reflashing it) with ``$ artiq_flash start``. * The communication parameters are 115200 8-N-1. Ensure that your user has access to the serial device (e.g. by adding the user account to the ``dialout`` group). From 77c6553725d809944bd16a1ab41bc9820c296cf2 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Tue, 14 Nov 2023 14:14:47 +0800 Subject: [PATCH 042/296] always provide artiq._version.get_rev --- artiq/_version.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/artiq/_version.py b/artiq/_version.py index bf9797c9d..36de51339 100644 --- a/artiq/_version.py +++ b/artiq/_version.py @@ -1,4 +1,7 @@ import os +def get_rev(): + return "unknown" + def get_version(): return os.getenv("VERSIONEER_OVERRIDE", default="8.0+unknown.beta") From 56418e342e2779b3e5a191c96d5d96c162ddcb4e Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 22 Nov 2023 20:51:02 +0800 Subject: [PATCH 043/296] take into account VERSIONEER_REV in artiq._version.get_rev --- artiq/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/_version.py b/artiq/_version.py index 36de51339..56f8614c9 100644 --- a/artiq/_version.py +++ b/artiq/_version.py @@ -1,7 +1,7 @@ import os def get_rev(): - return "unknown" + return os.getenv("VERSIONEER_REV", default="unknown") def get_version(): return os.getenv("VERSIONEER_OVERRIDE", default="8.0+unknown.beta") From 1a28069aa2d4b5395f0f34b9e9368b48e409d7d8 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 19 Oct 2023 17:16:26 +0800 Subject: [PATCH 044/296] support for pre-compiling subkernels --- artiq/compiler/embedding.py | 7 +++++ artiq/coredevice/core.py | 42 ++++++++++++++++------------- artiq/frontend/artiq_compile.py | 48 ++++++++++++++++++++++++++------- artiq/frontend/artiq_run.py | 20 +++++++++++++- 4 files changed, 89 insertions(+), 28 deletions(-) diff --git a/artiq/compiler/embedding.py b/artiq/compiler/embedding.py index 9c2f270d8..3b6f6ae6c 100644 --- a/artiq/compiler/embedding.py +++ b/artiq/compiler/embedding.py @@ -193,6 +193,13 @@ class EmbeddingMap: subkernels[k] = v return subkernels + def has_rpc(self): + return any(filter( + lambda x: (inspect.isfunction(x) or inspect.ismethod(x)) and \ + (not hasattr(x, "artiq_embedded") or x.artiq_embedded.destination is None), + self.object_forward_map.values() + )) + def has_rpc_or_subkernel(self): return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x), self.object_forward_map.values())) diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index ca9123bee..4ad85b780 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -147,28 +147,34 @@ class Core: result = new_result embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \ self.compile(function, args, kwargs, set_result) - self.compile_subkernels(embedding_map, args, subkernel_arg_types) + self.compile_and_upload_subkernels(embedding_map, args, subkernel_arg_types) self._run_compiled(kernel_library, embedding_map, symbolizer, demangler) return result - def compile_subkernels(self, embedding_map, args, subkernel_arg_types): + def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types): + # pass self to subkernels (if applicable) + # assuming the first argument is self + subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function) + self_arg = [] + if len(subkernel_args[0]) > 0: + if subkernel_args[0][0] == 'self': + self_arg = args[:1] + destination = subkernel_fn.artiq_embedded.destination + destination_tgt = self.satellite_cpu_targets[destination] + target = get_target_cls(destination_tgt)(subkernel_id=sid) + object_map, kernel_library, _, _, _ = \ + self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False, + print_as_rpc=False, target=target, destination=destination, + subkernel_arg_types=subkernel_arg_types.get(sid, [])) + if object_map.has_rpc_or_subkernel(): + raise ValueError("Subkernel must not use RPC or subkernels in other destinations") + return destination, kernel_library + + def compile_and_upload_subkernels(self, embedding_map, args, subkernel_arg_types): for sid, subkernel_fn in embedding_map.subkernels().items(): - # pass self to subkernels (if applicable) - # assuming the first argument is self - subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function) - self_arg = [] - if len(subkernel_args[0]) > 0: - if subkernel_args[0][0] == 'self': - self_arg = args[:1] - destination = subkernel_fn.artiq_embedded.destination - destination_tgt = self.satellite_cpu_targets[destination] - target = get_target_cls(destination_tgt)(subkernel_id=sid) - object_map, kernel_library, _, _, _ = \ - self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False, - print_as_rpc=False, target=target, destination=destination, - subkernel_arg_types=subkernel_arg_types.get(sid, [])) - if object_map.has_rpc_or_subkernel(): - raise ValueError("Subkernel must not use RPC or subkernels in other destinations") + destination, kernel_library = \ + self.compile_subkernel(sid, subkernel_fn, embedding_map, + args, subkernel_arg_types) self.comm.upload_subkernel(kernel_library, sid, destination) def precompile(self, function, *args, **kwargs): diff --git a/artiq/frontend/artiq_compile.py b/artiq/frontend/artiq_compile.py index 9aeceb6d9..04a466563 100755 --- a/artiq/frontend/artiq_compile.py +++ b/artiq/frontend/artiq_compile.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -import os, sys, logging, argparse +import os, sys, io, tarfile, logging, argparse from sipyco import common_args @@ -63,9 +63,16 @@ def main(): core_name = exp.run.artiq_embedded.core_name core = getattr(exp_inst, core_name) - object_map, kernel_library, _, _, _ = \ + object_map, main_kernel_library, _, _, subkernel_arg_types = \ core.compile(exp.run, [exp_inst], {}, attribute_writeback=False, print_as_rpc=False) + + subkernels = {} + for sid, subkernel_fn in object_map.subkernels().items(): + destination, subkernel_library = core.compile_subkernel( + sid, subkernel_fn, object_map, + [exp_inst], subkernel_arg_types) + subkernels[sid] = (destination, subkernel_library) except CompileError as error: return finally: @@ -73,16 +80,39 @@ def main(): finally: dataset_db.close_db() - if object_map.has_rpc_or_subkernel(): - raise ValueError("Experiment must not use RPC or subkernels") + if object_map.has_rpc(): + raise ValueError("Experiment must not use RPC") output = args.output - if output is None: - basename, ext = os.path.splitext(args.file) - output = "{}.elf".format(basename) - with open(output, "wb") as f: - f.write(kernel_library) + if not subkernels: + # just write the ELF file + if output is None: + basename, ext = os.path.splitext(args.file) + output = "{}.elf".format(basename) + + with open(output, "wb") as f: + f.write(main_kernel_library) + else: + # combine them in a tar archive + if output is None: + basename, ext = os.path.splitext(args.file) + output = "{}.tar".format(basename) + + with tarfile.open(output, "w:") as tar: + # write the main lib as "main.elf" + main_kernel_fileobj = io.BytesIO(main_kernel_library) + main_kernel_info = tarfile.TarInfo(name="main.elf") + main_kernel_info.size = len(main_kernel_library) + tar.addfile(main_kernel_info, fileobj=main_kernel_fileobj) + + # subkernels as " .elf" + for sid, (destination, subkernel_library) in subkernels.items(): + subkernel_fileobj = io.BytesIO(subkernel_library) + subkernel_info = tarfile.TarInfo(name="{} {}.elf".format(sid, destination)) + subkernel_info.size = len(subkernel_library) + tar.addfile(subkernel_info, fileobj=subkernel_fileobj) + if __name__ == "__main__": main() diff --git a/artiq/frontend/artiq_run.py b/artiq/frontend/artiq_run.py index 948c34475..f0a28e91c 100755 --- a/artiq/frontend/artiq_run.py +++ b/artiq/frontend/artiq_run.py @@ -4,6 +4,7 @@ import argparse import sys +import tarfile from operator import itemgetter import logging from collections import defaultdict @@ -86,6 +87,20 @@ class LLVMBitcodeRunner(FileRunner): return self.target.link([self.target.assemble(llmodule)]) +class TARRunner(FileRunner): + def compile(self): + with tarfile.open(self.file, "r:") as tar: + for entry in tar: + if entry.name == 'main.elf': + main_lib = tar.extractfile(entry).read() + else: + subkernel_name = entry.name.removesuffix(".elf") + sid, dest = tuple(map(lambda x: int(x), subkernel_name.split(" "))) + subkernel_lib = tar.extractfile(entry).read() + self.core.comm.upload_subkernel(subkernel_lib, sid, dest) + return main_lib + + class DummyScheduler: def __init__(self): self.rid = 0 @@ -156,6 +171,7 @@ def _build_experiment(device_mgr, dataset_mgr, args): argument_mgr = ProcessArgumentManager(arguments) managers = (device_mgr, dataset_mgr, argument_mgr, {}) if hasattr(args, "file"): + is_tar = tarfile.is_tarfile(args.file) is_elf = args.file.endswith(".elf") is_ll = args.file.endswith(".ll") is_bc = args.file.endswith(".bc") @@ -165,7 +181,9 @@ def _build_experiment(device_mgr, dataset_mgr, args): if args.class_name: raise ValueError("class-name not supported " "for precompiled kernels") - if is_elf: + if is_tar: + return TARRunner(managers, file=args.file) + elif is_elf: return ELFRunner(managers, file=args.file) elif is_ll: return LLVMIRRunner(managers, file=args.file) From cf7cbd0c3b275fedfd52da8cf48737bb621bd851 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Tue, 28 Nov 2023 16:52:07 +0800 Subject: [PATCH 045/296] flake: update nixpkgs Signed-off-by: Florian Agbuya --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 15be8436a..6a5c60471 100644 --- a/flake.lock +++ b/flake.lock @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1699169573, - "narHash": "sha256-cvUb1xZkvOp3W2SzylStrTirhVd9zCeo5utJl9nSIhw=", + "lastModified": 1700501263, + "narHash": "sha256-M0U063Ba2DKL4lMYI7XW13Rsk5tfUXnIYiAVa39AV/0=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "aeefe2054617cae501809b82b44a8e8f7be7cc4b", + "rev": "f741f8a839912e272d7e87ccf4b9dbc6012cdaf9", "type": "github" }, "original": { From 1108cebd7580efc39773918e55ae831ed0a33f52 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Tue, 28 Nov 2023 16:54:13 +0800 Subject: [PATCH 046/296] flake: fix ncurses on vivado Signed-off-by: Florian Agbuya --- flake.nix | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 47c599b20..704ec3f71 100644 --- a/flake.nix +++ b/flake.nix @@ -43,9 +43,17 @@ cargo = rust; }); - vivadoDeps = pkgs: with pkgs; [ + vivadoDeps = pkgs: with pkgs; let + # Apply patch from https://github.com/nix-community/nix-environments/pull/54 + # to fix ncurses libtinfo.so's soname issue + ncurses' = ncurses5.overrideAttrs (old: { + configureFlags = old.configureFlags ++ [ "--with-termlib" ]; + postFixup = ""; + }); + in + [ libxcrypt-legacy - ncurses5 + (ncurses'.override { unicodeSupport = false; }) zlib libuuid xorg.libSM From 9f4b8db2de7edc85caa7e564330835cf8898261c Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 1 Dec 2023 16:34:33 +0800 Subject: [PATCH 047/296] repeater: fix setting tsc --- artiq/gateware/drtio/rt_controller_repeater.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/artiq/gateware/drtio/rt_controller_repeater.py b/artiq/gateware/drtio/rt_controller_repeater.py index 53b2b1b07..79b9559eb 100644 --- a/artiq/gateware/drtio/rt_controller_repeater.py +++ b/artiq/gateware/drtio/rt_controller_repeater.py @@ -17,12 +17,11 @@ class RTController(Module, AutoCSR): self.sync += rt_packet.reset.eq(self.reset.storage) - set_time_stb = Signal() self.sync += [ - If(rt_packet.set_time_stb, set_time_stb.eq(0)), - If(self.set_time.re, set_time_stb.eq(1)) + If(rt_packet.set_time_ack, rt_packet.set_time_stb.eq(0)), + If(self.set_time.re, rt_packet.set_time_stb.eq(1)) ] - self.comb += self.set_time.w.eq(set_time_stb) + self.comb += self.set_time.w.eq(rt_packet.set_time_stb) errors = [ (rt_packet.err_unknown_packet_type, "rtio_rx", None, None), From d458fc27bf627b5196149be49fb350857fa91f70 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Sun, 3 Dec 2023 11:18:25 +0800 Subject: [PATCH 048/296] switch to new nixpkgs release --- flake.lock | 14 +++++++------- flake.nix | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/flake.lock b/flake.lock index 6a5c60471..85fe4ae93 100644 --- a/flake.lock +++ b/flake.lock @@ -60,16 +60,16 @@ }, "nixpkgs": { "locked": { - "lastModified": 1700501263, - "narHash": "sha256-M0U063Ba2DKL4lMYI7XW13Rsk5tfUXnIYiAVa39AV/0=", + "lastModified": 1701389149, + "narHash": "sha256-rU1suTIEd5DGCaAXKW6yHoCfR1mnYjOXQFOaH7M23js=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f741f8a839912e272d7e87ccf4b9dbc6012cdaf9", + "rev": "5de0b32be6e85dc1a9404c75131316e4ffbc634c", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.05", + "ref": "nixos-23.11", "repo": "nixpkgs", "type": "github" } @@ -92,11 +92,11 @@ ] }, "locked": { - "lastModified": 1697528004, - "narHash": "sha256-FFa2MbhAJEjwY58uOs0swvgymfjubHyWba6Q0X6CbB0=", + "lastModified": 1701572254, + "narHash": "sha256-ixq8dlpyOytDr+d/OmW8v1Ioy9V2G2ibOlNj8GFDSq4=", "owner": "m-labs", "repo": "sipyco", - "rev": "c0a7ed350ccfb85474217057fc47b3f258ca8d99", + "rev": "cceac0df537887135f99aa6b1bdd82853f16b4d6", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 704ec3f71..6ddc56de2 100644 --- a/flake.nix +++ b/flake.nix @@ -1,7 +1,7 @@ { description = "A leading-edge control system for quantum information experiments"; - inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-23.05; + inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-23.11; inputs.mozilla-overlay = { url = github:mozilla/nixpkgs-mozilla; flake = false; }; inputs.sipyco.url = github:m-labs/sipyco; inputs.sipyco.inputs.nixpkgs.follows = "nixpkgs"; From 8381b34a79aa7af355522478ccbf68dda8d6d659 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Tue, 28 Nov 2023 10:01:03 +0800 Subject: [PATCH 049/296] flake: add new booktabs dependency for artiq-manual-pdf --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 6ddc56de2..565211391 100644 --- a/flake.nix +++ b/flake.nix @@ -363,7 +363,7 @@ inherit (pkgs.texlive) scheme-basic latexmk cmap collection-fontsrecommended fncychap titlesec tabulary varwidth framed fancyvrb float wrapfig parskip - upquote capt-of needspace etoolbox; + upquote capt-of needspace etoolbox booktabs; }; in rec { packages.x86_64-linux = { From 8ca75a3fb91f2de5416568f3fb14a09f75219c60 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Thu, 14 Sep 2023 10:57:37 +0800 Subject: [PATCH 050/296] firmware: deal with rust nonsense Fixes "error: edition 2021 is unstable and only available with -Z unstable-options. error: could not compile `alloc`" --- flake.nix | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/flake.nix b/flake.nix index 565211391..42964e9ee 100644 --- a/flake.nix +++ b/flake.nix @@ -43,15 +43,18 @@ cargo = rust; }); + cargo-xbuild = pkgs.cargo-xbuild.overrideAttrs(oa: { + postPatch = "substituteInPlace src/sysroot.rs --replace 2021 2018"; + }); + vivadoDeps = pkgs: with pkgs; let - # Apply patch from https://github.com/nix-community/nix-environments/pull/54 - # to fix ncurses libtinfo.so's soname issue + # Apply patch from https://github.com/nix-community/nix-environments/pull/54 + # to fix ncurses libtinfo.so's soname issue ncurses' = ncurses5.overrideAttrs (old: { configureFlags = old.configureFlags ++ [ "--with-termlib" ]; postFixup = ""; }); - in - [ + in [ libxcrypt-legacy (ncurses'.override { unicodeSupport = false; }) zlib @@ -263,7 +266,7 @@ nativeBuildInputs = [ (pkgs.python3.withPackages(ps: [ migen misoc (artiq.withExperimentalFeatures experimentalFeatures) ps.packaging ])) rust - pkgs.cargo-xbuild + cargo-xbuild pkgs.llvmPackages_14.clang-unwrapped pkgs.llvm_14 pkgs.lld_14 @@ -435,7 +438,7 @@ buildInputs = [ (pkgs.python3.withPackages(ps: with packages.x86_64-linux; [ migen misoc ps.paramiko microscope ps.packaging ] ++ artiq.propagatedBuildInputs )) rust - pkgs.cargo-xbuild + cargo-xbuild pkgs.llvmPackages_14.clang-unwrapped pkgs.llvm_14 pkgs.lld_14 @@ -463,7 +466,7 @@ buildInputs = [ (pkgs.python3.withPackages(ps: with packages.x86_64-linux; [ migen misoc artiq ps.packaging ])) rust - pkgs.cargo-xbuild + cargo-xbuild pkgs.llvmPackages_14.clang-unwrapped pkgs.llvm_14 pkgs.lld_14 From 49267671f99455cd396a845f4655a380df769c0d Mon Sep 17 00:00:00 2001 From: mwojcik Date: Mon, 4 Dec 2023 12:10:11 +0800 Subject: [PATCH 051/296] core: fix precompile --- artiq/coredevice/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index 4ad85b780..7fc853bed 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -212,7 +212,7 @@ class Core: embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \ self.compile(function, args, kwargs, set_result, attribute_writeback=False) - self.compile_subkernels(embedding_map, args, subkernel_arg_types) + self.compile_and_upload_subkernels(embedding_map, args, subkernel_arg_types) @wraps(function) def run_precompiled(): From b09a39c82ed551da35d0db0a86034b72c570bdc3 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 8 Dec 2023 18:55:07 +0800 Subject: [PATCH 052/296] add aqctl_coreanalyzer_proxy --- artiq/examples/kasli/device_db.py | 7 ++ .../kasli_drtioswitching/device_db.py | 7 ++ artiq/examples/kasli_suservo/device_db.py | 7 ++ artiq/examples/kc705_nist_clock/device_db.py | 7 ++ artiq/frontend/aqctl_coreanalyzer_proxy.py | 113 ++++++++++++++++++ artiq/frontend/artiq_ddb_template.py | 7 ++ doc/manual/default_network_ports.rst | 4 + doc/manual/utilities.rst | 9 ++ setup.py | 1 + 9 files changed, 162 insertions(+) create mode 100644 artiq/frontend/aqctl_coreanalyzer_proxy.py diff --git a/artiq/examples/kasli/device_db.py b/artiq/examples/kasli/device_db.py index a0263ee24..34da8282f 100644 --- a/artiq/examples/kasli/device_db.py +++ b/artiq/examples/kasli/device_db.py @@ -22,6 +22,13 @@ device_db = { "port": 1384, "command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr }, + "core_analyzer": { + "type": "controller", + "host": "::1", + "port_proxy": 1385, + "port": 1386, + "command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr + }, "core_cache": { "type": "local", "module": "artiq.coredevice.cache", diff --git a/artiq/examples/kasli_drtioswitching/device_db.py b/artiq/examples/kasli_drtioswitching/device_db.py index f9486393d..1605e1988 100644 --- a/artiq/examples/kasli_drtioswitching/device_db.py +++ b/artiq/examples/kasli_drtioswitching/device_db.py @@ -20,6 +20,13 @@ device_db = { "port": 1384, "command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr }, + "core_analyzer": { + "type": "controller", + "host": "::1", + "port_proxy": 1385, + "port": 1386, + "command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr + }, "core_cache": { "type": "local", "module": "artiq.coredevice.cache", diff --git a/artiq/examples/kasli_suservo/device_db.py b/artiq/examples/kasli_suservo/device_db.py index c52b82a94..2b3bb8a08 100644 --- a/artiq/examples/kasli_suservo/device_db.py +++ b/artiq/examples/kasli_suservo/device_db.py @@ -20,6 +20,13 @@ device_db = { "port": 1384, "command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr }, + "core_analyzer": { + "type": "controller", + "host": "::1", + "port_proxy": 1385, + "port": 1386, + "command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr + }, "core_cache": { "type": "local", "module": "artiq.coredevice.cache", diff --git a/artiq/examples/kc705_nist_clock/device_db.py b/artiq/examples/kc705_nist_clock/device_db.py index 1930f584b..804d8a163 100644 --- a/artiq/examples/kc705_nist_clock/device_db.py +++ b/artiq/examples/kc705_nist_clock/device_db.py @@ -24,6 +24,13 @@ device_db = { "port": 1384, "command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr }, + "core_analyzer": { + "type": "controller", + "host": "::1", + "port_proxy": 1385, + "port": 1386, + "command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr + }, "core_cache": { "type": "local", "module": "artiq.coredevice.cache", diff --git a/artiq/frontend/aqctl_coreanalyzer_proxy.py b/artiq/frontend/aqctl_coreanalyzer_proxy.py new file mode 100644 index 000000000..fa92bca1d --- /dev/null +++ b/artiq/frontend/aqctl_coreanalyzer_proxy.py @@ -0,0 +1,113 @@ +import argparse +import asyncio +import atexit +import logging +import struct +from sipyco.asyncio_tools import AsyncioServer, SignalHandler, atexit_register_coroutine +from sipyco.pc_rpc import Server +from sipyco import common_args + +from artiq.coredevice.comm_analyzer import get_analyzer_dump + +logger = logging.getLogger(__name__) + + +# simplified version of sipyco Broadcaster +class ProxyServer(AsyncioServer): + def __init__(self, queue_limit=1024): + AsyncioServer.__init__(self) + self._recipients = set() + self._queue_limit = queue_limit + + async def _handle_connection_cr(self, reader, writer): + try: + queue = asyncio.Queue(self._queue_limit) + self._recipients.add(queue) + try: + while True: + dump = await queue.get() + writer.write(dump) + # raise exception on connection error + await writer.drain() + finally: + self._recipients.remove(queue) + except (ConnectionResetError, ConnectionAbortedError, BrokenPipeError): + # receivers disconnecting are a normal occurence + pass + finally: + writer.close() + + def request_dump_cb(self, dump): + encoded_dump = struct.pack(">L", len(dump)) + dump + for recipient in self._recipients: + recipient.put_nowait(encoded_dump) + + +class ProxyControl: + def __init__(self, request_dump_cb, core_addr, core_port=1382): + self.request_dump_cb = request_dump_cb + self.core_addr = core_addr + self.core_port = core_port + + def ping(self): + return True + + def request_dump(self): + try: + dump = get_analyzer_dump(self.core_addr, self.core_port) + self.request_dump_cb(dump) + except: + logger.warning("Failed to get analyzer dump:", exc_info=1) + return False + else: + return True + + +def get_argparser(): + parser = argparse.ArgumentParser( + description="ARTIQ core analyzer proxy") + common_args.verbosity_args(parser) + common_args.simple_network_args(parser, [ + ("proxy", "proxying", 1385), + ("control", "control", 1386) + ]) + parser.add_argument("core_addr", metavar="CORE_ADDR", + help="hostname or IP address of the core device") + return parser + + +def main(): + args = get_argparser().parse_args() + common_args.init_logger_from_args(args) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + atexit.register(loop.close) + + signal_handler = SignalHandler() + signal_handler.setup() + atexit.register(signal_handler.teardown) + + bind_address = common_args.bind_address_from_args(args) + + proxy_server = ProxyServer() + loop.run_until_complete(proxy_server.start(bind_address, args.port_proxy)) + atexit_register_coroutine(proxy_server.stop, loop=loop) + + controller = ProxyControl(proxy_server.request_dump_cb, args.core_addr) + server = Server({"coreanalyzer_proxy_control": controller}, None, True) + loop.run_until_complete(server.start(bind_address, args.port_control)) + atexit_register_coroutine(server.stop, loop=loop) + + logger.info("ARTIQ core analyzer proxy is ready.") + + _, pending = loop.run_until_complete(asyncio.wait( + [loop.create_task(signal_handler.wait_terminate()), + loop.create_task(server.wait_terminate())], + return_when=asyncio.FIRST_COMPLETED)) + for task in pending: + task.cancel() + + +if __name__ == "__main__": + main() diff --git a/artiq/frontend/artiq_ddb_template.py b/artiq/frontend/artiq_ddb_template.py index eeca02c8e..c795c9073 100755 --- a/artiq/frontend/artiq_ddb_template.py +++ b/artiq/frontend/artiq_ddb_template.py @@ -70,6 +70,13 @@ def process_header(output, description): "port": 1384, "command": "aqctl_moninj_proxy --port-proxy {{port_proxy}} --port-control {{port}} --bind {{bind}} " + core_addr }}, + "core_analyzer": {{ + "type": "controller", + "host": "::1", + "port_proxy": 1385, + "port": 1386, + "command": "aqctl_coreanalyzer_proxy --port-proxy {{port_proxy}} --port-control {{port}} --bind {{bind}} " + core_addr + }}, "core_cache": {{ "type": "local", "module": "artiq.coredevice.cache", diff --git a/doc/manual/default_network_ports.rst b/doc/manual/default_network_ports.rst index 6ed9b4fbf..ddd8f0712 100644 --- a/doc/manual/default_network_ports.rst +++ b/doc/manual/default_network_ports.rst @@ -14,6 +14,10 @@ Default network ports +---------------------------------+--------------+ | Moninj (proxy control) | 1384 | +---------------------------------+--------------+ +| Core analyzer proxy (proxy) | 1385 | ++---------------------------------+--------------+ +| Core analyzer proxy (control) | 1386 | ++---------------------------------+--------------+ | Master (logging input) | 1066 | +---------------------------------+--------------+ | Master (broadcasts) | 1067 | diff --git a/doc/manual/utilities.rst b/doc/manual/utilities.rst index d7642113d..187ad39e4 100644 --- a/doc/manual/utilities.rst +++ b/doc/manual/utilities.rst @@ -139,6 +139,15 @@ Core device RTIO analyzer tool .. _routing-table-tool: +Core device RTIO analyzer proxy +------------------------------- + +:mod:`~artiq.frontend.aqctl_coreanalyzer_proxy` is a tool to distribute the core analyzer dump to several clients such as the dashboard. + +.. argparse:: + :ref: artiq.frontend.aqctl_coreanalyzer_proxy.get_argparser + :prog: aqctl_coreanalyzer_proxy + DRTIO routing table manipulation tool ------------------------------------- diff --git a/setup.py b/setup.py index a38815b22..f7436f582 100755 --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ console_scripts = [ "artiq_route = artiq.frontend.artiq_route:main", "artiq_run = artiq.frontend.artiq_run:main", "artiq_flash = artiq.frontend.artiq_flash:main", + "aqctl_coreanalyzer_proxy = artiq.frontend.aqctl_coreanalyzer_proxy:main", "aqctl_corelog = artiq.frontend.aqctl_corelog:main", "aqctl_moninj_proxy = artiq.frontend.aqctl_moninj_proxy:main", "afws_client = artiq.frontend.afws_client:main", From 05a9422e67e0cda4f2a6ecd875e6aa9f77fd683f Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Fri, 8 Dec 2023 18:56:10 +0800 Subject: [PATCH 053/296] aqctl_coreanalyzer_proxy: cleanup --- artiq/frontend/aqctl_coreanalyzer_proxy.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/artiq/frontend/aqctl_coreanalyzer_proxy.py b/artiq/frontend/aqctl_coreanalyzer_proxy.py index fa92bca1d..75de2da8c 100644 --- a/artiq/frontend/aqctl_coreanalyzer_proxy.py +++ b/artiq/frontend/aqctl_coreanalyzer_proxy.py @@ -2,13 +2,14 @@ import argparse import asyncio import atexit import logging -import struct + from sipyco.asyncio_tools import AsyncioServer, SignalHandler, atexit_register_coroutine from sipyco.pc_rpc import Server from sipyco import common_args from artiq.coredevice.comm_analyzer import get_analyzer_dump + logger = logging.getLogger(__name__) @@ -38,9 +39,8 @@ class ProxyServer(AsyncioServer): writer.close() def request_dump_cb(self, dump): - encoded_dump = struct.pack(">L", len(dump)) + dump for recipient in self._recipients: - recipient.put_nowait(encoded_dump) + recipient.put_nowait(dump) class ProxyControl: @@ -99,8 +99,6 @@ def main(): loop.run_until_complete(server.start(bind_address, args.port_control)) atexit_register_coroutine(server.stop, loop=loop) - logger.info("ARTIQ core analyzer proxy is ready.") - _, pending = loop.run_until_complete(asyncio.wait( [loop.create_task(signal_handler.wait_terminate()), loop.create_task(server.wait_terminate())], From be088626063f83238517ee1422ba8427d477e1e2 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Fri, 8 Dec 2023 19:34:47 +0800 Subject: [PATCH 054/296] logo: text to path --- artiq/gui/logo_ver.svg | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/artiq/gui/logo_ver.svg b/artiq/gui/logo_ver.svg index 4ebd46eed..b98176a18 100644 --- a/artiq/gui/logo_ver.svg +++ b/artiq/gui/logo_ver.svg @@ -1,7 +1,7 @@ 8 - + aria-label="7"> From a26cee6ca7386f1bde5fbba96115cff6a022b701 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 13 Dec 2023 13:07:35 +0800 Subject: [PATCH 055/296] coreanalyzer_proxy: cleanups/renames --- artiq/frontend/aqctl_coreanalyzer_proxy.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/artiq/frontend/aqctl_coreanalyzer_proxy.py b/artiq/frontend/aqctl_coreanalyzer_proxy.py index 75de2da8c..d2b09dd2a 100644 --- a/artiq/frontend/aqctl_coreanalyzer_proxy.py +++ b/artiq/frontend/aqctl_coreanalyzer_proxy.py @@ -15,7 +15,7 @@ logger = logging.getLogger(__name__) # simplified version of sipyco Broadcaster class ProxyServer(AsyncioServer): - def __init__(self, queue_limit=1024): + def __init__(self, queue_limit=8): AsyncioServer.__init__(self) self._recipients = set() self._queue_limit = queue_limit @@ -38,26 +38,26 @@ class ProxyServer(AsyncioServer): finally: writer.close() - def request_dump_cb(self, dump): + def distribute(self, dump): for recipient in self._recipients: recipient.put_nowait(dump) class ProxyControl: - def __init__(self, request_dump_cb, core_addr, core_port=1382): - self.request_dump_cb = request_dump_cb + def __init__(self, distribute_cb, core_addr, core_port=1382): + self.distribute_cb = distribute_cb self.core_addr = core_addr self.core_port = core_port def ping(self): return True - def request_dump(self): + def trigger(self): try: dump = get_analyzer_dump(self.core_addr, self.core_port) - self.request_dump_cb(dump) + self.distribute_cb(dump) except: - logger.warning("Failed to get analyzer dump:", exc_info=1) + logger.warning("Trigger failed:", exc_info=True) return False else: return True @@ -94,7 +94,7 @@ def main(): loop.run_until_complete(proxy_server.start(bind_address, args.port_proxy)) atexit_register_coroutine(proxy_server.stop, loop=loop) - controller = ProxyControl(proxy_server.request_dump_cb, args.core_addr) + controller = ProxyControl(proxy_server.distribute, args.core_addr) server = Server({"coreanalyzer_proxy_control": controller}, None, True) loop.run_until_complete(server.start(bind_address, args.port_control)) atexit_register_coroutine(server.stop, loop=loop) From 7a863b4f5e113aa31155045c7d1e66f37fa3c1d5 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 13 Dec 2023 13:08:54 +0800 Subject: [PATCH 056/296] core: add trigger_analyzer_proxy API --- artiq/coredevice/core.py | 26 ++++++++++++++++++- artiq/examples/kasli/device_db.py | 6 ++++- .../kasli_drtioswitching/device_db.py | 6 ++++- artiq/examples/kasli_suservo/device_db.py | 6 ++++- artiq/examples/kc705_nist_clock/device_db.py | 6 ++++- artiq/frontend/artiq_ddb_template.py | 8 +++++- 6 files changed, 52 insertions(+), 6 deletions(-) diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index 7fc853bed..9377af682 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -84,7 +84,10 @@ class Core: "core", "ref_period", "coarse_ref_period", "ref_multiplier", } - def __init__(self, dmgr, host, ref_period, ref_multiplier=8, + def __init__(self, dmgr, + host, ref_period, + analyzer_proxy=None, + ref_multiplier=8, target="rv32g", satellite_cpu_targets={}): self.ref_period = ref_period self.ref_multiplier = ref_multiplier @@ -95,6 +98,10 @@ class Core: self.comm = CommKernelDummy() else: self.comm = CommKernel(host) + if analyzer_proxy is None: + self.analyzer_proxy = None + else: + self.analyzer_proxy = dmgr.get(analyzer_proxy) self.first_run = True self.dmgr = dmgr @@ -288,3 +295,20 @@ class Core: min_now = rtio_get_counter() + 125000 if now_mu() < min_now: at_mu(min_now) + + def trigger_analyzer_proxy(self): + """Causes the core analyzer proxy to retrieve a dump from the device, + and distribute it to all connected clients (typically dashboards). + + Returns only after the dump has been retrieved from the device. + + Raises IOError if no analyzer proxy has been configured, or if the + analyzer proxy fails. In the latter case, more details would be + available in the proxy log. + """ + if self.analyzer_proxy is None: + raise IOError("No analyzer proxy configured") + else: + success = self.analyzer_proxy.trigger() + if not success: + raise IOError("Analyzer proxy reported failure") diff --git a/artiq/examples/kasli/device_db.py b/artiq/examples/kasli/device_db.py index 34da8282f..274fed61e 100644 --- a/artiq/examples/kasli/device_db.py +++ b/artiq/examples/kasli/device_db.py @@ -7,7 +7,11 @@ device_db = { "type": "local", "module": "artiq.coredevice.core", "class": "Core", - "arguments": {"host": core_addr, "ref_period": 1e-9} + "arguments": { + "host": core_addr, + "ref_period": 1e-9, + "analyzer_proxy": "core_analyzer" + } }, "core_log": { "type": "controller", diff --git a/artiq/examples/kasli_drtioswitching/device_db.py b/artiq/examples/kasli_drtioswitching/device_db.py index 1605e1988..43dc22a0f 100644 --- a/artiq/examples/kasli_drtioswitching/device_db.py +++ b/artiq/examples/kasli_drtioswitching/device_db.py @@ -5,7 +5,11 @@ device_db = { "type": "local", "module": "artiq.coredevice.core", "class": "Core", - "arguments": {"host": core_addr, "ref_period": 1/(8*150e6)} + "arguments": { + "host": core_addr, + "ref_period": 1e-9, + "analyzer_proxy": "core_analyzer" + } }, "core_log": { "type": "controller", diff --git a/artiq/examples/kasli_suservo/device_db.py b/artiq/examples/kasli_suservo/device_db.py index 2b3bb8a08..ded4ada96 100644 --- a/artiq/examples/kasli_suservo/device_db.py +++ b/artiq/examples/kasli_suservo/device_db.py @@ -5,7 +5,11 @@ device_db = { "type": "local", "module": "artiq.coredevice.core", "class": "Core", - "arguments": {"host": core_addr, "ref_period": 1e-9} + "arguments": { + "host": core_addr, + "ref_period": 1e-9, + "analyzer_proxy": "core_analyzer" + } }, "core_log": { "type": "controller", diff --git a/artiq/examples/kc705_nist_clock/device_db.py b/artiq/examples/kc705_nist_clock/device_db.py index 804d8a163..f21d1c5ef 100644 --- a/artiq/examples/kc705_nist_clock/device_db.py +++ b/artiq/examples/kc705_nist_clock/device_db.py @@ -9,7 +9,11 @@ device_db = { "type": "local", "module": "artiq.coredevice.core", "class": "Core", - "arguments": {"host": core_addr, "ref_period": 1e-9} + "arguments": { + "host": core_addr, + "ref_period": 1e-9, + "analyzer_proxy": "core_analyzer" + } }, "core_log": { "type": "controller", diff --git a/artiq/frontend/artiq_ddb_template.py b/artiq/frontend/artiq_ddb_template.py index c795c9073..e55637d37 100755 --- a/artiq/frontend/artiq_ddb_template.py +++ b/artiq/frontend/artiq_ddb_template.py @@ -55,7 +55,13 @@ def process_header(output, description): "type": "local", "module": "artiq.coredevice.core", "class": "Core", - "arguments": {{"host": core_addr, "ref_period": {ref_period}, "target": "{cpu_target}", "satellite_cpu_targets": {{}} }}, + "arguments": {{ + "host": core_addr, + "ref_period": {ref_period}, + "analyzer_proxy": "core_analyzer" + "target": "{cpu_target}", + "satellite_cpu_targets": {{}} + }}, }}, "core_log": {{ "type": "controller", From 85850ad9e8e76d4f623bad656460490b0ebfe4b6 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 13 Dec 2023 13:36:21 +0800 Subject: [PATCH 057/296] wavesynth: remove --- RELEASE_NOTES.rst | 1 + artiq/test/test_wavesynth.py | 127 ---------------- artiq/wavesynth/__init__.py | 0 artiq/wavesynth/coefficients.py | 234 ----------------------------- artiq/wavesynth/compute_samples.py | 133 ---------------- 5 files changed, 1 insertion(+), 494 deletions(-) delete mode 100644 artiq/test/test_wavesynth.py delete mode 100644 artiq/wavesynth/__init__.py delete mode 100644 artiq/wavesynth/coefficients.py delete mode 100644 artiq/wavesynth/compute_samples.py diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index c80d85a18..15ce87197 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -94,6 +94,7 @@ Accesses to the data argument should be replaced as below: txn.put(key.encode(), pyon.encode((value, {})).encode()) new.close() +* ``artiq.wavesynth`` has been removed. ARTIQ-7 ------- diff --git a/artiq/test/test_wavesynth.py b/artiq/test/test_wavesynth.py deleted file mode 100644 index c8323ba5b..000000000 --- a/artiq/test/test_wavesynth.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2014, 2015 Robert Jordens - -import unittest - -from artiq.wavesynth import compute_samples - - -class TestSynthesizer(unittest.TestCase): - program = [ - [ - # frame 0 - { - # frame 0, segment 0, line 0 - "dac_divider": 1, - "duration": 100, - "channel_data": [ - { - # channel 0 - "dds": {"amplitude": [0.0, 0.0, 0.01], - "phase": [0.0, 0.0, 0.0005], - "clear": False} - } - ], - "trigger": True - }, - { - # frame 0, segment 0, line 1 - "dac_divider": 1, - "duration": 100, - "channel_data": [ - { - # channel 0 - "dds": {"amplitude": [49.5, 1.0, -0.01], - "phase": [0.0, 0.05, 0.0005], - "clear": False} - } - ], - "trigger": False - }, - ], - [ - # frame 1 - { - # frame 1, segment 0, line 0 - "dac_divider": 1, - "duration": 100, - "channel_data": [ - { - # channel 0 - "dds": {"amplitude": [100.0, 0.0, -0.01], - "phase": [0.0, 0.1, -0.0005], - "clear": False} - } - ], - "trigger": True - }, - { - # frame 1, segment 0, line 1 - "dac_divider": 1, - "duration": 100, - "channel_data": [ - { - # channel 0 - "dds": {"amplitude": [50.5, -1.0, 0.01], - "phase": [0.0, 0.05, -0.0005], - "clear": False} - } - ], - "trigger": False - } - ], - [ - # frame 2 - { - # frame 2, segment 0, line 0 - "dac_divider": 1, - "duration": 84, - "channel_data": [ - { - # channel 0 - "dds": {"amplitude": [100.0], - "phase": [0.0, 0.05], - "clear": False} - } - ], - "trigger": True - }, - { - # frame 2, segment 1, line 0 - "dac_divider": 1, - "duration": 116, - "channel_data": [ - { - # channel 0 - "dds": {"amplitude": [100.0], - "phase": [0.0, 0.05], - "clear": True} - } - ], - "trigger": True - } - ] - ] - - def setUp(self): - self.dev = compute_samples.Synthesizer(1, self.program) - self.t = list(range(600)) - - def drive(self): - s = self.dev - y = [] - for f in 0, 2, None, 1: - if f is not None: - s.select(f) - y += s.trigger()[0] - x = list(range(600)) - return x, y - - def test_run(self): - x, y = self.drive() - - @unittest.skip("manual/visual test") - def test_plot(self): - from matplotlib import pyplot as plt - x, y = self.drive() - plt.plot(x, y) - plt.show() diff --git a/artiq/wavesynth/__init__.py b/artiq/wavesynth/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/artiq/wavesynth/coefficients.py b/artiq/wavesynth/coefficients.py deleted file mode 100644 index e70a4080e..000000000 --- a/artiq/wavesynth/coefficients.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (C) 2014, 2015 Robert Jordens - -import numpy as np -from scipy.interpolate import splrep, splev, spalde - - -class UnivariateMultiSpline: - """Multidimensional wrapper around `scipy.interpolate.sp*` functions. - `scipy.inteprolate.splprep` is limited to 12 dimensions. - """ - def __init__(self, x, y, *, x0=None, order=4, **kwargs): - self.order = order - self.x = x - self.s = [] - for i, yi in enumerate(y): - if x0 is not None: - yi = self.upsample_knots(x0[i], yi, x) - self.s.append(splrep(x, yi, k=order - 1, **kwargs)) - - def upsample_knots(self, x0, y0, x): - return splev(x, splrep(x0, y0, k=self.order - 1)) - - def lev(self, x, *a, **k): - return np.array([splev(x, si) for si in self.s]) - - def alde(self, x): - u = np.array([spalde(x, si) for si in self.s]) - if len(x) == 1: - u = u[:, None, :] - return u - - def __call__(self, x, use_alde=True): - if use_alde: - u = self.alde(x)[:, :, :self.order] - s = (len(self.s), len(x), self.order) - assert u.shape == s, (u.shape, s) - return u.transpose(2, 0, 1) - else: - return np.array([self.lev(x, der=i) for i in range(self.order)]) - - -def pad_const(x, n, axis=0): - """Prefix and postfix the array `x` by `n` repetitions of the first and - last value along `axis`. - """ - a = np.repeat(x.take([0], axis), n, axis) - b = np.repeat(x.take([-1], axis), n, axis) - xp = np.concatenate([a, x, b], axis) - s = list(x.shape) - s[axis] += 2*n - assert xp.shape == tuple(s), (x.shape, s, xp.shape) - return xp - - -def build_segment(durations, coefficients, target="bias", - variable="amplitude", compress=True): - """Build a wavesynth-style segment from homogeneous duration and - coefficient data. - - :param durations: 1D sequence of line durations. - :param coefficients: 3D array with shape `(n, m, len(durations))`, - with `n` being the interpolation order + 1 and `m` the number of - channels. - :param target: The target component of the channel to affect. - :param variable: The variable within the target component. - :param compress: If `True`, skip zero high order coefficients. - """ - for dxi, yi in zip(durations, coefficients.transpose()): - cd = [] - for yij in yi: - cdj = [] - for yijk in reversed(yij): - if cdj or abs(yijk) or not compress: - cdj.append(float(yijk)) - cdj.reverse() - if not cdj: - cdj.append(float(yij[0])) - cd.append({target: {variable: cdj}}) - yield {"duration": int(dxi), "channel_data": cd} - - -class CoefficientSource: - def crop_x(self, start, stop, num=2): - """Return an array of valid sample positions. - - This method needs to be overloaded if this `CoefficientSource` - does not support sampling at arbitrary positions or at arbitrary - density. - - :param start: First sample position. - :param stop: Last sample position. - :param num: Number of samples between `start` and `stop`. - :return: Array of sample positions. `start` and `stop` should be - returned as the first and last value in the array respectively. - """ - return np.linspace(start, stop, num) - - def scale_x(self, x, scale): - # TODO: This could be moved to the the Driver/Mediator code as it is - # device-specific. - """Scale and round sample positions. - - The sample times may need to be changed and/or decimated if - incompatible with hardware requirements. - - :param x: Input sample positions in data space. - :param scale: Data space position to cycles conversion scale, - in units of x-units per clock cycle. - :return: `x_sample`, the rounded sample positions and `durations`, the - integer durations of the individual samples in cycles. - """ - t = np.rint(x/scale) - x_sample = t*scale - durations = np.diff(t).astype(int) - return x_sample, durations - - def __call__(self, x, **kwargs): - """Perform sampling and return coefficients. - - :param x: Sample positions. - :return: `y` the array of coefficients. `y.shape == (order, n, len(x))` - with `n` being the number of channels.""" - raise NotImplementedError - - def get_segment(self, start, stop, scale, *, cutoff=1e-12, - target="bias", variable="amplitude"): - """Build wavesynth segment. - - :param start: see `crop_x()`. - :param stop: see `crop_x()`. - :param scale: see `scale_x()`. - :param cutoff: coefficient cutoff towards zero to compress data. - """ - x = self.crop_x(start, stop) - x_sample, durations = self.scale_x(x, scale) - coefficients = self(x_sample) - if len(x_sample) == 1 and start == stop: - coefficients = coefficients[:1] - # rescale coefficients accordingly - coefficients *= (scale*np.sign(durations))**np.arange( - coefficients.shape[0])[:, None, None] - if cutoff: - coefficients[np.fabs(coefficients) < cutoff] = 0 - return build_segment(np.fabs(durations), coefficients, target=target, - variable=variable) - - def extend_segment(self, segment, *args, **kwargs): - """Extend a wavesynth segment. - - See `get_segment()` for arguments. - """ - for line in self.get_segment(*args, **kwargs): - segment.add_line(**line) - - -class SplineSource(CoefficientSource): - def __init__(self, x, y, order=4, pad_dx=1.): - """ - :param x: 1D sample positions. - :param y: 2D sample values. - """ - self.x = np.asanyarray(x) - assert self.x.ndim == 1 - self.y = np.asanyarray(y) - assert self.y.ndim == 2 - - if pad_dx is not None: - a = np.arange(-order, 0)*pad_dx + self.x[0] - b = self.x[-1] + np.arange(1, order + 1)*pad_dx - self.x = np.r_[a, self.x, b] - self.y = pad_const(self.y, order, axis=1) - - assert self.y.shape[1] == self.x.shape[0] - self.spline = UnivariateMultiSpline(self.x, self.y, order=order) - - def crop_x(self, start, stop): - ia, ib = np.searchsorted(self.x, (start, stop)) - if start > stop: - x = self.x[ia - 1:ib - 1:-1] - else: - x = self.x[ia:ib] - return np.r_[start, x, stop] - - def scale_x(self, x, scale, min_duration=1, min_length=20): - """Enforce, round, and scale x to device-dependent values. - - Due to minimum duration and/or minimum segment length constraints - this method may drop samples from `x_sample` to comply. - - :param min_duration: Minimum duration of a line. - :param min_length: Minimum segment length to space triggers. - """ - # We want to only sample a spline at t_knot + epsilon - # where the highest order derivative has just jumped - # and is valid at least up to the next knot after t_knot. - # - # To ensure that we are on the correct side of a knot: - # * only ever increase t when rounding (for increasing t) - # * or only ever decrease it (for decreasing t) - t = x/scale - inc = np.diff(t) >= 0 - inc = np.r_[inc, inc[-1]] - t = np.where(inc, np.ceil(t), np.floor(t)) - dt = np.diff(t.astype(int)) - - valid = np.absolute(dt) >= min_duration - if not np.any(valid): - valid[0] = True - dt[0] = max(dt[0], min_length) - dt = dt[valid] - x_sample = t[:-1][valid]*scale - return x_sample, dt - - def __call__(self, x): - return self.spline(x) - - -def discrete_compensate(c): - """Compensate spline coefficients for discrete accumulators - - Given continuous-time b-spline coefficients, this function - compensates for the effect of discrete time steps in the - target devices. - - The compensation is performed in-place. - """ - l = len(c) - if l > 2: - c[1] += c[2]/2. - if l > 3: - c[1] += c[3]/6. - c[2] += c[3] - if l > 4: - raise ValueError("only third-order splines supported") diff --git a/artiq/wavesynth/compute_samples.py b/artiq/wavesynth/compute_samples.py deleted file mode 100644 index 9b80f8298..000000000 --- a/artiq/wavesynth/compute_samples.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (C) 2014, 2015 M-Labs Limited -# Copyright (C) 2014, 2015 Robert Jordens - -from copy import copy -from math import cos, pi - -from artiq.wavesynth.coefficients import discrete_compensate - - -class Spline: - def __init__(self): - self.c = [0.0] - - def set_coefficients(self, c): - if not c: - c = [0.] - self.c = copy(c) - discrete_compensate(self.c) - - def next(self): - r = self.c[0] - for i in range(len(self.c) - 1): - self.c[i] += self.c[i + 1] - return r - - -class SplinePhase: - def __init__(self): - self.c = [0.0] - self.c0 = 0.0 - - def set_coefficients(self, c): - if not c: - c = [0.] - self.c0 = c[0] - c1p = c[1:] - discrete_compensate(c1p) - self.c[1:] = c1p - - def clear(self): - self.c[0] = 0.0 - - def next(self): - r = self.c[0] - for i in range(len(self.c) - 1): - self.c[i] += self.c[i + 1] - self.c[i] %= 1.0 - return r + self.c0 - - -class DDS: - def __init__(self): - self.amplitude = Spline() - self.phase = SplinePhase() - - def next(self): - return self.amplitude.next()*cos(2*pi*self.phase.next()) - - -class Channel: - def __init__(self): - self.bias = Spline() - self.dds = DDS() - self.v = 0. - self.silence = False - - def next(self): - v = self.bias.next() + self.dds.next() - if not self.silence: - self.v = v - return self.v - - def set_silence(self, s): - self.silence = s - - -class TriggerError(Exception): - pass - - -class Synthesizer: - def __init__(self, nchannels, program): - self.channels = [Channel() for _ in range(nchannels)] - self.program = program - # line_iter is None: "wait for segment selection" state - # otherwise: iterator on the current position in the frame - self.line_iter = None - - def select(self, selection): - if self.line_iter is not None: - raise TriggerError("a frame is already selected") - self.line_iter = iter(self.program[selection]) - self.line = next(self.line_iter) - - def trigger(self): - if self.line_iter is None: - raise TriggerError("no frame selected") - - line = self.line - if not line.get("trigger", False): - raise TriggerError("segment is not triggered") - - r = [[] for _ in self.channels] - while True: - for channel, channel_data in zip(self.channels, - line["channel_data"]): - channel.set_silence(channel_data.get("silence", False)) - if "bias" in channel_data: - channel.bias.set_coefficients( - channel_data["bias"]["amplitude"]) - if "dds" in channel_data: - channel.dds.amplitude.set_coefficients( - channel_data["dds"]["amplitude"]) - if "phase" in channel_data["dds"]: - channel.dds.phase.set_coefficients( - channel_data["dds"]["phase"]) - if channel_data["dds"].get("clear", False): - channel.dds.phase.clear() - - if line.get("dac_divider", 1) != 1: - raise NotImplementedError - - for channel, rc in zip(self.channels, r): - for i in range(line["duration"]): - rc.append(channel.next()) - - try: - self.line = line = next(self.line_iter) - if line.get("trigger", False): - return r - except StopIteration: - self.line_iter = None - return r From 402a5d3376a8479e284b02af507fde5c9ae74cab Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 13 Dec 2023 13:46:45 +0800 Subject: [PATCH 058/296] core: connect lazily to analyzer proxy Otherwise artiq_compile and other uses of Core that does not access hardware/network may fail. --- artiq/coredevice/core.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index 9377af682..10af8e3dd 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -98,15 +98,13 @@ class Core: self.comm = CommKernelDummy() else: self.comm = CommKernel(host) - if analyzer_proxy is None: - self.analyzer_proxy = None - else: - self.analyzer_proxy = dmgr.get(analyzer_proxy) + self.analyzer_proxy_name = analyzer_proxy self.first_run = True self.dmgr = dmgr self.core = self self.comm.core = self + self.analyzer_proxy = None def close(self): self.comm.close() @@ -306,6 +304,9 @@ class Core: analyzer proxy fails. In the latter case, more details would be available in the proxy log. """ + if self.analyzer_proxy is None: + if self.analyzer_proxy_name is not None: + self.analyzer_proxy = self.dmgr.get(self.analyzer_proxy_name) if self.analyzer_proxy is None: raise IOError("No analyzer proxy configured") else: From 795c4372fa528dd34885dc1ad7e25bd8d4d495f3 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 13 Dec 2023 14:06:53 +0800 Subject: [PATCH 059/296] DeviceManager: fix close exception error message --- artiq/master/worker_db.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/artiq/master/worker_db.py b/artiq/master/worker_db.py index 5da202b2f..18e51f726 100644 --- a/artiq/master/worker_db.py +++ b/artiq/master/worker_db.py @@ -101,8 +101,9 @@ class DeviceManager: dev.close_rpc() elif hasattr(dev, "close"): dev.close() - except Exception as e: - logger.warning("Exception %r when closing device %r", e, dev) + except: + logger.warning("Exception raised when closing device %r:", + dev, exc_info=True) self.active_devices.clear() From ede0b37c6e0753aa8ca9ade9261b5fd5723d8825 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 13 Dec 2023 14:27:04 +0800 Subject: [PATCH 060/296] devices: introduce notify_run_end API --- artiq/frontend/artiq_run.py | 1 + artiq/frontend/artiq_sinara_tester.py | 1 + artiq/master/worker_db.py | 7 +++++++ artiq/master/worker_impl.py | 2 ++ artiq/test/hardware_testbench.py | 1 + 5 files changed, 12 insertions(+) diff --git a/artiq/frontend/artiq_run.py b/artiq/frontend/artiq_run.py index f0a28e91c..ae3e5f1c7 100755 --- a/artiq/frontend/artiq_run.py +++ b/artiq/frontend/artiq_run.py @@ -222,6 +222,7 @@ def run(with_file=False): exp_inst = _build_experiment(device_mgr, dataset_mgr, args) exp_inst.prepare() exp_inst.run() + device_mgr.notify_run_end() exp_inst.analyze() except CompileError as error: return diff --git a/artiq/frontend/artiq_sinara_tester.py b/artiq/frontend/artiq_sinara_tester.py index eaee07441..e2d48b903 100755 --- a/artiq/frontend/artiq_sinara_tester.py +++ b/artiq/frontend/artiq_sinara_tester.py @@ -804,6 +804,7 @@ def main(): experiment = SinaraTester((device_mgr, None, None, None)) experiment.prepare() experiment.run(tests) + device_mgr.notify_run_end() experiment.analyze() finally: device_mgr.close_devices() diff --git a/artiq/master/worker_db.py b/artiq/master/worker_db.py index 18e51f726..a52b63a1f 100644 --- a/artiq/master/worker_db.py +++ b/artiq/master/worker_db.py @@ -92,6 +92,13 @@ class DeviceManager: self.active_devices.append((desc, dev)) return dev + def notify_run_end(self): + """Sends a "end of Experiment run stage" notification to + all active devices.""" + for _desc, dev in self.active_devices: + if hasattr(dev, "notify_run_end"): + dev.notify_run_end() + def close_devices(self): """Closes all active devices, in the opposite order as they were requested.""" diff --git a/artiq/master/worker_impl.py b/artiq/master/worker_impl.py index 7c5ad2c2f..7ecb8da10 100644 --- a/artiq/master/worker_impl.py +++ b/artiq/master/worker_impl.py @@ -343,6 +343,8 @@ def main(): # for end of analyze stage. write_results() raise + finally: + device_mgr.notify_run_end() put_completed() elif action == "analyze": try: diff --git a/artiq/test/hardware_testbench.py b/artiq/test/hardware_testbench.py index 05e925d0c..55a4d33cb 100644 --- a/artiq/test/hardware_testbench.py +++ b/artiq/test/hardware_testbench.py @@ -55,6 +55,7 @@ class ExperimentCase(unittest.TestCase): try: exp = self.create(cls, *args, **kwargs) exp.run() + self.device_mgr.notify_run_end() exp.analyze() return exp except CompileError as error: From c2b53ecb4328805e9d0da02f04a99004dfeeb02d Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 13 Dec 2023 14:27:48 +0800 Subject: [PATCH 061/296] core: add option to trigger analyzer proxy at run end --- artiq/coredevice/core.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index 10af8e3dd..a29e6018b 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -86,7 +86,7 @@ class Core: def __init__(self, dmgr, host, ref_period, - analyzer_proxy=None, + analyzer_proxy=None, analyze_at_run_end=False, ref_multiplier=8, target="rv32g", satellite_cpu_targets={}): self.ref_period = ref_period @@ -99,6 +99,7 @@ class Core: else: self.comm = CommKernel(host) self.analyzer_proxy_name = analyzer_proxy + self.analyze_at_run_end = analyze_at_run_end self.first_run = True self.dmgr = dmgr @@ -106,6 +107,10 @@ class Core: self.comm.core = self self.analyzer_proxy = None + def notify_run_end(self): + if self.analyze_at_run_end: + self.trigger_analyzer_proxy() + def close(self): self.comm.close() From 413d33c3d11bf6b59175ea3c1fa8783341659465 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 13 Dec 2023 14:29:33 +0800 Subject: [PATCH 062/296] core: document analyzer proxy options --- artiq/coredevice/core.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index a29e6018b..acc1e9c8d 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -78,6 +78,10 @@ class Core: :param ref_multiplier: ratio between the RTIO fine timestamp frequency and the RTIO coarse timestamp frequency (e.g. SERDES multiplication factor). + :param analyzer_proxy: name of the core device analyzer proxy to trigger + (optional). + :param analyze_at_run_end: automatically trigger the core device analyzer + proxy after the Experiment's run stage finishes. """ kernel_invariants = { From d96213dbbcb157c0d094437d471341b8ef0138e0 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Sun, 17 Dec 2023 12:55:36 +0800 Subject: [PATCH 063/296] flake: update dependencies --- flake.lock | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/flake.lock b/flake.lock index 85fe4ae93..0e322b965 100644 --- a/flake.lock +++ b/flake.lock @@ -11,11 +11,11 @@ ] }, "locked": { - "lastModified": 1693473687, - "narHash": "sha256-BdLddCWbvoEyakcGwhph9b5dIU1iA0hCQV7KYgU8nos=", + "lastModified": 1701573753, + "narHash": "sha256-vhEtXjb9AM6/HnsgfVmhJQeqQ9JqysUm7iWNzTIbexs=", "owner": "m-labs", "repo": "artiq-comtools", - "rev": "f522ef3dbc65961f17b2d3d41e927409d970fd79", + "rev": "199bdabf4de49cb7ada8a4ac7133008e0f8434b7", "type": "github" }, "original": { @@ -29,11 +29,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1692799911, - "narHash": "sha256-3eihraek4qL744EvQXsK1Ha6C3CR7nnT8X2qWap4RNk=", + "lastModified": 1694529238, + "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", "owner": "numtide", "repo": "flake-utils", - "rev": "f9e7cf818399d17d347f847525c5a5a8032e4e44", + "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", "type": "github" }, "original": { @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1701389149, - "narHash": "sha256-rU1suTIEd5DGCaAXKW6yHoCfR1mnYjOXQFOaH7M23js=", + "lastModified": 1702346276, + "narHash": "sha256-eAQgwIWApFQ40ipeOjVSoK4TEHVd6nbSd9fApiHIw5A=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "5de0b32be6e85dc1a9404c75131316e4ffbc634c", + "rev": "cf28ee258fd5f9a52de6b9865cdb93a1f96d09b7", "type": "github" }, "original": { From 133b26b6ce640b90f1962ef738b597542578cd9f Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Sun, 17 Dec 2023 13:05:16 +0800 Subject: [PATCH 064/296] flake: add ARTIQ sources to PYTHONPATH in devshell --- flake.nix | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 42964e9ee..4219c44f5 100644 --- a/flake.nix +++ b/flake.nix @@ -431,8 +431,7 @@ defaultPackage.x86_64-linux = pkgs.python3.withPackages(ps: [ packages.x86_64-linux.artiq ]); # Main development shell with everything you need to develop ARTIQ on Linux. - # ARTIQ itself is not included in the environment, you can make Python use the current sources using e.g. - # export PYTHONPATH=`pwd`:$PYTHONPATH + # The current copy of the ARTIQ sources is added to PYTHONPATH so changes can be tested instantly. devShells.x86_64-linux.default = pkgs.mkShell { name = "artiq-dev-shell"; buildInputs = [ @@ -442,6 +441,7 @@ pkgs.llvmPackages_14.clang-unwrapped pkgs.llvm_14 pkgs.lld_14 + pkgs.git # To manually run compiler tests: pkgs.lit outputcheck @@ -457,6 +457,7 @@ export LIBARTIQ_SUPPORT=`libartiq-support` export QT_PLUGIN_PATH=${pkgs.qt5.qtbase}/${pkgs.qt5.qtbase.dev.qtPluginPrefix}:${pkgs.qt5.qtsvg.bin}/${pkgs.qt5.qtbase.dev.qtPluginPrefix} export QML2_IMPORT_PATH=${pkgs.qt5.qtbase}/${pkgs.qt5.qtbase.dev.qtQmlPrefix} + export PYTHONPATH=`git rev-parse --show-toplevel`:$PYTHONPATH ''; }; From 858f0479baa2d362e7576a069413e6451e084892 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Sun, 17 Dec 2023 13:27:38 +0800 Subject: [PATCH 065/296] aqctl_coreanalyzer_proxy: permissions and shebang --- artiq/frontend/aqctl_coreanalyzer_proxy.py | 2 ++ 1 file changed, 2 insertions(+) mode change 100644 => 100755 artiq/frontend/aqctl_coreanalyzer_proxy.py diff --git a/artiq/frontend/aqctl_coreanalyzer_proxy.py b/artiq/frontend/aqctl_coreanalyzer_proxy.py old mode 100644 new mode 100755 index d2b09dd2a..ec9891423 --- a/artiq/frontend/aqctl_coreanalyzer_proxy.py +++ b/artiq/frontend/aqctl_coreanalyzer_proxy.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import argparse import asyncio import atexit From 645b9b8c5fa1555eddec001d8845e2e36e9fa165 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Sun, 17 Dec 2023 13:41:49 +0800 Subject: [PATCH 066/296] flake: add executable wrappers for frontends to devshell --- flake.nix | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/flake.nix b/flake.nix index 4219c44f5..f05bddb82 100644 --- a/flake.nix +++ b/flake.nix @@ -368,6 +368,20 @@ titlesec tabulary varwidth framed fancyvrb float wrapfig parskip upquote capt-of needspace etoolbox booktabs; }; + + artiq-frontend-dev-wrappers = pkgs.runCommandNoCC "artiq-frontend-dev-wrappers" {} + '' + mkdir -p $out/bin + for program in ${self}/artiq/frontend/*.py; do + if [ -x $program ]; then + progname=`basename -s .py $program` + outname=$out/bin/$progname + echo "#!${pkgs.bash}/bin/bash" >> $outname + echo "exec python3 -m artiq.frontend.$progname" >> $outname + chmod 755 $outname + fi + done + ''; in rec { packages.x86_64-linux = { inherit pythonparser qasync artiq; @@ -432,6 +446,8 @@ # Main development shell with everything you need to develop ARTIQ on Linux. # The current copy of the ARTIQ sources is added to PYTHONPATH so changes can be tested instantly. + # Additionally, executable wrappers that import the current ARTIQ sources for the ARTIQ frontends + # are added to PATH. devShells.x86_64-linux.default = pkgs.mkShell { name = "artiq-dev-shell"; buildInputs = [ @@ -442,6 +458,7 @@ pkgs.llvm_14 pkgs.lld_14 pkgs.git + artiq-frontend-dev-wrappers # To manually run compiler tests: pkgs.lit outputcheck From 44a95b5ddaded40c25043702411d068d9dbd4ab8 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Sun, 17 Dec 2023 16:37:02 +0800 Subject: [PATCH 067/296] dashboard: add repository revision clear button --- artiq/dashboard/experiments.py | 1 + 1 file changed, 1 insertion(+) diff --git a/artiq/dashboard/experiments.py b/artiq/dashboard/experiments.py index 74c3496a9..762a85df2 100644 --- a/artiq/dashboard/experiments.py +++ b/artiq/dashboard/experiments.py @@ -333,6 +333,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): if "repo_rev" in options: repo_rev = QtWidgets.QLineEdit() repo_rev.setPlaceholderText("current") + repo_rev.setClearButtonEnabled(True) repo_rev_label = QtWidgets.QLabel("Revision:") repo_rev_label.setToolTip("Experiment repository revision " "(commit ID) to use") From 63260510529166f875b5e01b8c7959987806d9d4 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Sun, 17 Dec 2023 19:42:56 +0800 Subject: [PATCH 068/296] flake: forward cmdline arguments in devshell wrappers --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index f05bddb82..19e5ab65d 100644 --- a/flake.nix +++ b/flake.nix @@ -377,7 +377,7 @@ progname=`basename -s .py $program` outname=$out/bin/$progname echo "#!${pkgs.bash}/bin/bash" >> $outname - echo "exec python3 -m artiq.frontend.$progname" >> $outname + echo "exec python3 -m artiq.frontend.$progname \"\$@\"" >> $outname chmod 755 $outname fi done From 5df07218117819352cb8b35c10056858d66ad065 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Sun, 17 Dec 2023 19:43:41 +0800 Subject: [PATCH 069/296] dashboard,client: add device argument overrides to expid --- artiq/dashboard/experiments.py | 30 ++++++++++++++++++++++++++++-- artiq/frontend/artiq_client.py | 6 +++++- artiq/tools.py | 27 ++++++++++++++++++++++++++- 3 files changed, 59 insertions(+), 4 deletions(-) diff --git a/artiq/dashboard/experiments.py b/artiq/dashboard/experiments.py index 762a85df2..8526fdf32 100644 --- a/artiq/dashboard/experiments.py +++ b/artiq/dashboard/experiments.py @@ -13,6 +13,7 @@ from artiq.gui.entries import procdesc_to_entry, ScanEntry from artiq.gui.fuzzy_select import FuzzySelectWidget from artiq.gui.tools import (LayoutWidget, WheelFilter, log_level_to_name, get_open_file_name) +from artiq.tools import parse_devarg_override, unparse_devarg_override logger = logging.getLogger(__name__) @@ -305,7 +306,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): flush = self.flush flush.setToolTip("Flush the pipeline (of current- and higher-priority " "experiments) before starting the experiment") - self.layout.addWidget(flush, 2, 2, 1, 2) + self.layout.addWidget(flush, 2, 2) flush.setChecked(scheduling["flush"]) @@ -313,6 +314,20 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): scheduling["flush"] = bool(checked) flush.stateChanged.connect(update_flush) + devarg_override = QtWidgets.QComboBox() + devarg_override.setEditable(True) + devarg_override.lineEdit().setPlaceholderText("Override device arguments") + devarg_override.lineEdit().setClearButtonEnabled(True) + devarg_override.insertItem(0, "core:analyze_at_run_end=True") + self.layout.addWidget(devarg_override, 2, 3) + + devarg_override.setCurrentText(options["devarg_override"]) + + def update_devarg_override(text): + options["devarg_override"] = text + devarg_override.editTextChanged.connect(update_devarg_override) + self.devarg_override = devarg_override + log_level = QtWidgets.QComboBox() log_level.addItems(log_levels) log_level.setCurrentIndex(1) @@ -466,6 +481,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): return try: + if "devarg_override" in expid: + self.devarg_override.setCurrentText( + unparse_devarg_override(expid["devarg_override"])) self.log_level.setCurrentIndex(log_levels.index( log_level_to_name(expid["log_level"]))) if ("repo_rev" in expid and @@ -639,7 +657,8 @@ class ExperimentManager: else: # mutated by _ExperimentDock options = { - "log_level": logging.WARNING + "log_level": logging.WARNING, + "devarg_override": "" } if expurl[:5] == "repo:": options["repo_rev"] = None @@ -734,7 +753,14 @@ class ExperimentManager: entry_cls = procdesc_to_entry(argument["desc"]) argument_values[name] = entry_cls.state_to_value(argument["state"]) + try: + devarg_override = parse_devarg_override(options["devarg_override"]) + except: + logger.error("Failed to parse device argument overrides for %s", expurl) + return + expid = { + "devarg_override": devarg_override, "log_level": options["log_level"], "file": file, "class_name": class_name, diff --git a/artiq/frontend/artiq_client.py b/artiq/frontend/artiq_client.py index 4e7667465..d945f6853 100755 --- a/artiq/frontend/artiq_client.py +++ b/artiq/frontend/artiq_client.py @@ -23,7 +23,8 @@ from sipyco.sync_struct import Subscriber from sipyco.broadcast import Receiver from sipyco import common_args, pyon -from artiq.tools import scale_from_metadata, short_format, parse_arguments +from artiq.tools import (scale_from_metadata, short_format, parse_arguments, + parse_devarg_override) from artiq import __version__ as artiq_version @@ -68,6 +69,8 @@ def get_argparser(): parser_add.add_argument("-r", "--revision", default=None, help="use a specific repository revision " "(defaults to head, ignored without -R)") + parser_add.add_argument("--devarg-override", default="", + help="specify device arguments to override") parser_add.add_argument("--content", default=False, action="store_true", help="submit by content") @@ -146,6 +149,7 @@ def _action_submit(remote, args): raise ValueError("Failed to parse run arguments") from err expid = { + "devarg_override": parse_devarg_override(args.devarg_override), "log_level": logging.WARNING + args.quiet*10 - args.verbose*10, "class_name": args.class_name, "arguments": arguments, diff --git a/artiq/tools.py b/artiq/tools.py index dec615b51..46c0ce2f7 100644 --- a/artiq/tools.py +++ b/artiq/tools.py @@ -18,7 +18,9 @@ from artiq.language.environment import is_public_experiment from artiq.language import units -__all__ = ["parse_arguments", "elide", "scale_from_metadata", +__all__ = ["parse_arguments", + "parse_devarg_override", "unparse_devarg_override", + "elide", "scale_from_metadata", "short_format", "file_import", "get_experiment", "exc_to_warning", "asyncio_wait_or_cancel", @@ -36,6 +38,29 @@ def parse_arguments(arguments): return d +def parse_devarg_override(devarg_override): + devarg_override_dict = {} + for item in devarg_override.split(): + device, _, override = item.partition(":") + if not override: + raise ValueError + if device not in devarg_override_dict: + devarg_override_dict[device] = {} + argument, _, value = override.partition("=") + if not value: + raise ValueError + devarg_override_dict[device][argument] = pyon.decode(value) + return devarg_override_dict + + +def unparse_devarg_override(devarg_override): + devarg_override_strs = [ + "{}:{}={}".format(device, argument, pyon.encode(value)) + for device, overrides in devarg_override.items() + for argument, value in overrides.items()] + return " ".join(devarg_override_strs) + + def elide(s, maxlen): elided = False if len(s) > maxlen: From 8dd8cfa6b0b91d18e3bcdbc0712cf6c7949532a2 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Mon, 18 Dec 2023 12:11:40 +0800 Subject: [PATCH 070/296] master: implement devarg_override --- artiq/master/worker_db.py | 8 +++++--- artiq/master/worker_impl.py | 2 ++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/artiq/master/worker_db.py b/artiq/master/worker_db.py index a52b63a1f..af316e4cf 100644 --- a/artiq/master/worker_db.py +++ b/artiq/master/worker_db.py @@ -19,12 +19,13 @@ class DummyDevice: pass -def _create_device(desc, device_mgr): +def _create_device(desc, device_mgr, argument_overrides): ty = desc["type"] if ty == "local": module = importlib.import_module(desc["module"]) device_class = getattr(module, desc["class"]) - return device_class(device_mgr, **desc.get("arguments", {})) + arguments = desc.get("arguments", {}) | argument_overrides + return device_class(device_mgr, **arguments) elif ty == "controller": if desc.get("best_effort", False): cls = BestEffortClient @@ -60,6 +61,7 @@ class DeviceManager: self.ddb = ddb self.virtual_devices = virtual_devices self.active_devices = [] + self.devarg_override = {} def get_device_db(self): """Returns the full contents of the device database.""" @@ -85,7 +87,7 @@ class DeviceManager: return existing_dev try: - dev = _create_device(desc, self) + dev = _create_device(desc, self, self.devarg_override.get(name, {})) except Exception as e: raise DeviceError("Failed to create device '{}'" .format(name)) from e diff --git a/artiq/master/worker_impl.py b/artiq/master/worker_impl.py index 7ecb8da10..ef6e5ab16 100644 --- a/artiq/master/worker_impl.py +++ b/artiq/master/worker_impl.py @@ -306,6 +306,8 @@ def main(): start_time = time.time() rid = obj["rid"] expid = obj["expid"] + if "devarg_override" in expid: + device_mgr.devarg_override = expid["devarg_override"] if "file" in expid: if obj["wd"] is not None: # Using repository From 76fba538b18166f770113140f4e3d35ce8cca834 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 18 Dec 2023 13:15:41 +0800 Subject: [PATCH 071/296] artiq_ddb_template: fixed missing separator --- artiq/frontend/artiq_ddb_template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/frontend/artiq_ddb_template.py b/artiq/frontend/artiq_ddb_template.py index e55637d37..376467d75 100755 --- a/artiq/frontend/artiq_ddb_template.py +++ b/artiq/frontend/artiq_ddb_template.py @@ -58,7 +58,7 @@ def process_header(output, description): "arguments": {{ "host": core_addr, "ref_period": {ref_period}, - "analyzer_proxy": "core_analyzer" + "analyzer_proxy": "core_analyzer", "target": "{cpu_target}", "satellite_cpu_targets": {{}} }}, From e556c29b402bae9deda38745152a6493803eb0f7 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 16 Nov 2023 17:00:37 +0800 Subject: [PATCH 072/296] drtioaux: add source to relevant drtio packets --- .../firmware/libproto_artiq/drtioaux_proto.rs | 72 +++++++++++++------ 1 file changed, 50 insertions(+), 22 deletions(-) diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index c0333b18c..803c5fd69 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -18,7 +18,7 @@ impl From> for Error { // used by satellite -> master analyzer, subkernel exceptions pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2; // used by DDMA, subkernel program data (need to provide extra ID and destination) -pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*destination*/1 - /*ID*/4; +pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*source*/1 - /*destination*/1 - /*ID*/4; #[derive(PartialEq, Clone, Copy, Debug)] #[repr(u8)] @@ -106,22 +106,26 @@ pub enum Packet { AnalyzerDataRequest { destination: u8 }, AnalyzerData { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE]}, - DmaAddTraceRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE] }, - DmaAddTraceReply { succeeded: bool }, - DmaRemoveTraceRequest { destination: u8, id: u32 }, - DmaRemoveTraceReply { succeeded: bool }, - DmaPlaybackRequest { destination: u8, id: u32, timestamp: u64 }, - DmaPlaybackReply { succeeded: bool }, + DmaAddTraceRequest { + source: u8, destination: u8, + id: u32, status: PayloadStatus, + length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE] + }, + DmaAddTraceReply { destination: u8, succeeded: bool }, + DmaRemoveTraceRequest { source: u8, destination: u8, id: u32 }, + DmaRemoveTraceReply { destination: u8, succeeded: bool }, + DmaPlaybackRequest { source: u8, destination: u8, id: u32, timestamp: u64 }, + DmaPlaybackReply { destination: u8, succeeded: bool }, DmaPlaybackStatus { destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 }, SubkernelAddDataRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, SubkernelAddDataReply { succeeded: bool }, - SubkernelLoadRunRequest { destination: u8, id: u32, run: bool }, - SubkernelLoadRunReply { succeeded: bool }, - SubkernelFinished { id: u32, with_exception: bool }, + SubkernelLoadRunRequest { source: u8, destination: u8, id: u32, run: bool }, + SubkernelLoadRunReply { destination: u8, succeeded: bool }, + SubkernelFinished { destination: u8, id: u32, with_exception: bool, exception_src: u8 }, SubkernelExceptionRequest { destination: u8 }, SubkernelException { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE] }, - SubkernelMessage { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, + SubkernelMessage { source: u8, destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, SubkernelMessageAck { destination: u8 }, } @@ -277,7 +281,8 @@ impl Packet { } }, - 0xb0 => { + 0xb0 => { + let source = reader.read_u8()?; let destination = reader.read_u8()?; let id = reader.read_u32()?; let status = reader.read_u8()?; @@ -285,6 +290,7 @@ impl Packet { let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; reader.read_exact(&mut trace[0..length as usize])?; Packet::DmaAddTraceRequest { + source: source, destination: destination, id: id, status: PayloadStatus::from(status), @@ -293,21 +299,26 @@ impl Packet { } }, 0xb1 => Packet::DmaAddTraceReply { + destination: reader.read_u8()?, succeeded: reader.read_bool()? }, 0xb2 => Packet::DmaRemoveTraceRequest { + source: reader.read_u8()?, destination: reader.read_u8()?, id: reader.read_u32()? }, 0xb3 => Packet::DmaRemoveTraceReply { + destination: reader.read_u8()?, succeeded: reader.read_bool()? }, 0xb4 => Packet::DmaPlaybackRequest { + source: reader.read_u8()?, destination: reader.read_u8()?, id: reader.read_u32()?, timestamp: reader.read_u64()? }, 0xb5 => Packet::DmaPlaybackReply { + destination: reader.read_u8()?, succeeded: reader.read_bool()? }, 0xb6 => Packet::DmaPlaybackStatus { @@ -337,16 +348,20 @@ impl Packet { succeeded: reader.read_bool()? }, 0xc4 => Packet::SubkernelLoadRunRequest { + source: reader.read_u8()?, destination: reader.read_u8()?, id: reader.read_u32()?, run: reader.read_bool()? }, 0xc5 => Packet::SubkernelLoadRunReply { + destination: reader.read_u8()?, succeeded: reader.read_bool()? }, 0xc8 => Packet::SubkernelFinished { + destination: reader.read_u8()?, id: reader.read_u32()?, with_exception: reader.read_bool()?, + exception_src: reader.read_u8()? }, 0xc9 => Packet::SubkernelExceptionRequest { destination: reader.read_u8()? @@ -363,6 +378,7 @@ impl Packet { } }, 0xcb => { + let source = reader.read_u8()?; let destination = reader.read_u8()?; let id = reader.read_u32()?; let status = reader.read_u8()?; @@ -370,6 +386,7 @@ impl Packet { let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; reader.read_exact(&mut data[0..length as usize])?; Packet::SubkernelMessage { + source: source, destination: destination, id: id, status: PayloadStatus::from(status), @@ -561,8 +578,9 @@ impl Packet { writer.write_all(&data[0..length as usize])?; }, - Packet::DmaAddTraceRequest { destination, id, status, trace, length } => { + Packet::DmaAddTraceRequest { source, destination, id, status, trace, length } => { writer.write_u8(0xb0)?; + writer.write_u8(source)?; writer.write_u8(destination)?; writer.write_u32(id)?; writer.write_u8(status as u8)?; @@ -571,27 +589,32 @@ impl Packet { writer.write_u16(length)?; writer.write_all(&trace[0..length as usize])?; }, - Packet::DmaAddTraceReply { succeeded } => { + Packet::DmaAddTraceReply { destination, succeeded } => { writer.write_u8(0xb1)?; + writer.write_u8(destination)?; writer.write_bool(succeeded)?; }, - Packet::DmaRemoveTraceRequest { destination, id } => { + Packet::DmaRemoveTraceRequest { source, destination, id } => { writer.write_u8(0xb2)?; + writer.write_u8(source)?; writer.write_u8(destination)?; writer.write_u32(id)?; }, - Packet::DmaRemoveTraceReply { succeeded } => { + Packet::DmaRemoveTraceReply { destination, succeeded } => { writer.write_u8(0xb3)?; + writer.write_u8(destination)?; writer.write_bool(succeeded)?; }, - Packet::DmaPlaybackRequest { destination, id, timestamp } => { + Packet::DmaPlaybackRequest { source, destination, id, timestamp } => { writer.write_u8(0xb4)?; + writer.write_u8(source)?; writer.write_u8(destination)?; writer.write_u32(id)?; writer.write_u64(timestamp)?; }, - Packet::DmaPlaybackReply { succeeded } => { + Packet::DmaPlaybackReply { destination, succeeded } => { writer.write_u8(0xb5)?; + writer.write_u8(destination)?; writer.write_bool(succeeded)?; }, Packet::DmaPlaybackStatus { destination, id, error, channel, timestamp } => { @@ -615,20 +638,24 @@ impl Packet { writer.write_u8(0xc1)?; writer.write_bool(succeeded)?; }, - Packet::SubkernelLoadRunRequest { destination, id, run } => { + Packet::SubkernelLoadRunRequest { source, destination, id, run } => { writer.write_u8(0xc4)?; + writer.write_u8(source)?; writer.write_u8(destination)?; writer.write_u32(id)?; writer.write_bool(run)?; }, - Packet::SubkernelLoadRunReply { succeeded } => { + Packet::SubkernelLoadRunReply { destination, succeeded } => { writer.write_u8(0xc5)?; + writer.write_u8(destination)?; writer.write_bool(succeeded)?; }, - Packet::SubkernelFinished { id, with_exception } => { + Packet::SubkernelFinished { destination, id, with_exception, exception_src } => { writer.write_u8(0xc8)?; + writer.write_u8(destination)?; writer.write_u32(id)?; writer.write_bool(with_exception)?; + writer.write_u8(exception_src)?; }, Packet::SubkernelExceptionRequest { destination } => { writer.write_u8(0xc9)?; @@ -640,8 +667,9 @@ impl Packet { writer.write_u16(length)?; writer.write_all(&data[0..length as usize])?; }, - Packet::SubkernelMessage { destination, id, status, data, length } => { + Packet::SubkernelMessage { source, destination, id, status, data, length } => { writer.write_u8(0xcb)?; + writer.write_u8(source)?; writer.write_u8(destination)?; writer.write_u32(id)?; writer.write_u8(status as u8)?; From 4495f6035e244c70b10cf04da9c9a8a0fc6b3309 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 17 Nov 2023 10:52:31 +0800 Subject: [PATCH 073/296] master: support source parameters --- artiq/firmware/runtime/kernel.rs | 10 +++--- artiq/firmware/runtime/rtio_mgt.rs | 33 ++++++++--------- artiq/firmware/satman/dma.rs | 26 +++++++------- artiq/firmware/satman/kernel.rs | 37 ++++++++++++------- artiq/firmware/satman/main.rs | 57 ++++++++++++++++-------------- 5 files changed, 91 insertions(+), 72 deletions(-) diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index a308e6c1c..124b17f19 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -103,7 +103,7 @@ pub mod subkernel { pub enum FinishStatus { Ok, CommLost, - Exception + Exception(u8) // exception source } #[derive(Debug, PartialEq, Clone, Copy)] @@ -216,7 +216,7 @@ pub mod subkernel { Ok(()) } - pub fn subkernel_finished(io: &Io, subkernel_mutex: &Mutex, id: u32, with_exception: bool) { + pub fn subkernel_finished(io: &Io, subkernel_mutex: &Mutex, id: u32, with_exception: bool, exception_src: u8) { // called upon receiving DRTIO SubkernelRunDone let _lock = subkernel_mutex.lock(io).unwrap(); let subkernel = unsafe { SUBKERNELS.get_mut(&id) }; @@ -226,7 +226,7 @@ pub mod subkernel { if subkernel.state == SubkernelState::Running { subkernel.state = SubkernelState::Finished { status: match with_exception { - true => FinishStatus::Exception, + true => FinishStatus::Exception(exception_src), false => FinishStatus::Ok, } } @@ -266,9 +266,9 @@ pub mod subkernel { Ok(SubkernelFinished { id: id, comm_lost: status == FinishStatus::CommLost, - exception: if status == FinishStatus::Exception { + exception: if let FinishStatus::Exception(dest) = status { Some(drtio::subkernel_retrieve_exception(io, aux_mutex, - routing_table, subkernel.destination)?) + routing_table, dest)?) } else { None } }) }, diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 44661e822..1e17ab63d 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -104,11 +104,11 @@ pub mod drtio { remote_dma::playback_done(io, ddma_mutex, id, destination, error, channel, timestamp); None }, - drtioaux::Packet::SubkernelFinished { id, with_exception } => { - subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception); + drtioaux::Packet::SubkernelFinished { id, destination: 0, with_exception, exception_src } => { + subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception, exception_src); None }, - drtioaux::Packet::SubkernelMessage { id, destination: from, status, length, data } => { + drtioaux::Packet::SubkernelMessage { id, source: 0, destination: from, status, length, data } => { subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data); // acknowledge receiving part of the message drtioaux::send(linkno, @@ -456,10 +456,10 @@ pub mod drtio { partition_data(trace, |slice, status, len: usize| { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::DmaAddTraceRequest { - id: id, destination: destination, status: status, length: len as u16, trace: *slice})?; + id: id, source: 0, destination: destination, status: status, length: len as u16, trace: *slice})?; match reply { - drtioaux::Packet::DmaAddTraceReply { succeeded: true } => Ok(()), - drtioaux::Packet::DmaAddTraceReply { succeeded: false } => Err(Error::DmaAddTraceFail(destination)), + drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: true } => Ok(()), + drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: false } => Err(Error::DmaAddTraceFail(destination)), packet => Err(Error::UnexpectedPacket(packet)), } }) @@ -469,10 +469,10 @@ pub mod drtio { id: u32, destination: u8) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::DmaRemoveTraceRequest { id: id, destination: destination })?; + &drtioaux::Packet::DmaRemoveTraceRequest { id: id, source: 0, destination: destination })?; match reply { - drtioaux::Packet::DmaRemoveTraceReply { succeeded: true } => Ok(()), - drtioaux::Packet::DmaRemoveTraceReply { succeeded: false } => Err(Error::DmaEraseFail(destination)), + drtioaux::Packet::DmaRemoveTraceReply { destination: 0, succeeded: true } => Ok(()), + drtioaux::Packet::DmaRemoveTraceReply { destination: 0, succeeded: false } => Err(Error::DmaEraseFail(destination)), packet => Err(Error::UnexpectedPacket(packet)), } } @@ -481,10 +481,10 @@ pub mod drtio { id: u32, destination: u8, timestamp: u64) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::DmaPlaybackRequest{ id: id, destination: destination, timestamp: timestamp })?; + &drtioaux::Packet::DmaPlaybackRequest{ id: id, source: 0, destination: destination, timestamp: timestamp })?; match reply { - drtioaux::Packet::DmaPlaybackReply { succeeded: true } => Ok(()), - drtioaux::Packet::DmaPlaybackReply { succeeded: false } => + drtioaux::Packet::DmaPlaybackReply { destination: 0, succeeded: true } => Ok(()), + drtioaux::Packet::DmaPlaybackReply { destination: 0, succeeded: false } => Err(Error::DmaPlaybackFail(destination)), packet => Err(Error::UnexpectedPacket(packet)), } @@ -559,10 +559,10 @@ pub mod drtio { id: u32, destination: u8, run: bool) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::SubkernelLoadRunRequest{ id: id, destination: destination, run: run })?; + &drtioaux::Packet::SubkernelLoadRunRequest{ id: id, source: 0, destination: destination, run: run })?; match reply { - drtioaux::Packet::SubkernelLoadRunReply { succeeded: true } => Ok(()), - drtioaux::Packet::SubkernelLoadRunReply { succeeded: false } => + drtioaux::Packet::SubkernelLoadRunReply { destination: 0, succeeded: true } => Ok(()), + drtioaux::Packet::SubkernelLoadRunReply { destination: 0, succeeded: false } => Err(Error::SubkernelRunFail(destination)), packet => Err(Error::UnexpectedPacket(packet)), } @@ -595,7 +595,8 @@ pub mod drtio { partition_data(message, |slice, status, len: usize| { let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SubkernelMessage { - destination: destination, id: id, status: status, length: len as u16, data: *slice})?; + source: 0, destination: destination, + id: id, status: status, length: len as u16, data: *slice})?; match reply { drtioaux::Packet::SubkernelMessageAck { .. } => Ok(()), packet => Err(Error::UnexpectedPacket(packet)), diff --git a/artiq/firmware/satman/dma.rs b/artiq/firmware/satman/dma.rs index 277e27927..6b9ea3f70 100644 --- a/artiq/firmware/satman/dma.rs +++ b/artiq/firmware/satman/dma.rs @@ -33,7 +33,7 @@ struct Entry { #[derive(Debug)] pub struct Manager { - entries: BTreeMap, + entries: BTreeMap<(u8, u32), Entry>, state: ManagerState, currentid: u32 } @@ -52,30 +52,30 @@ impl Manager { } } - pub fn add(&mut self, id: u32, status: PayloadStatus, trace: &[u8], trace_len: usize) -> Result<(), Error> { + pub fn add(&mut self, source: u8, id: u32, status: PayloadStatus, trace: &[u8], trace_len: usize) -> Result<(), Error> { if status.is_first() { - self.entries.remove(&id); + self.entries.remove(&(source, id)); } - let entry = match self.entries.get_mut(&id) { + let entry = match self.entries.get_mut(&(source, id)) { Some(entry) => { if entry.complete { // replace entry - self.entries.remove(&id); - self.entries.insert(id, Entry { + self.entries.remove(&(source, id)); + self.entries.insert((source, id), Entry { trace: Vec::new(), padding_len: 0, complete: false }); - self.entries.get_mut(&id).unwrap() + self.entries.get_mut(&(source, id)).unwrap() } else { entry } }, None => { - self.entries.insert(id, Entry { + self.entries.insert((source, id), Entry { trace: Vec::new(), padding_len: 0, complete: false }); - self.entries.get_mut(&id).unwrap() + self.entries.get_mut(&(source, id)).unwrap() }, }; entry.trace.extend(&trace[0..trace_len]); @@ -102,19 +102,19 @@ impl Manager { Ok(()) } - pub fn erase(&mut self, id: u32) -> Result<(), Error> { - match self.entries.remove(&id) { + pub fn erase(&mut self, source: u8, id: u32) -> Result<(), Error> { + match self.entries.remove(&(source, id)) { Some(_) => Ok(()), None => Err(Error::IdNotFound) } } - pub fn playback(&mut self, id: u32, timestamp: u64) -> Result<(), Error> { + pub fn playback(&mut self, source: u8, id: u32, timestamp: u64) -> Result<(), Error> { if self.state != ManagerState::Idle { return Err(Error::PlaybackInProgress); } - let entry = match self.entries.get(&id){ + let entry = match self.entries.get(&(source, id)){ Some(entry) => entry, None => { return Err(Error::IdNotFound); } }; diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index 850a126a0..33b5ed0bb 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -128,6 +128,7 @@ struct Session { kernel_state: KernelState, log_buffer: String, last_exception: Option, + source: u8, // which destination requested running the kernel messages: MessageManager } @@ -147,7 +148,9 @@ pub struct Manager { pub struct SubkernelFinished { pub id: u32, - pub with_exception: bool + pub with_exception: bool, + pub exception_source: u8, + pub source: u8 } pub struct SliceMeta { @@ -288,6 +291,7 @@ impl Session { kernel_state: KernelState::Absent, log_buffer: String::new(), last_exception: None, + source: 0, messages: MessageManager::new() } } @@ -369,12 +373,13 @@ impl Manager { unsafe { self.cache.unborrow() } } - pub fn run(&mut self, id: u32) -> Result<(), Error> { + pub fn run(&mut self, source: u8, id: u32) -> Result<(), Error> { info!("starting subkernel #{}", id); if self.session.kernel_state != KernelState::Loaded || self.current_id != id { self.load(id)?; } + self.session.source = source; self.session.kernel_state = KernelState::Running; cricon_select(RtioMaster::Kernel); @@ -477,7 +482,7 @@ impl Manager { } } - pub fn process_kern_requests(&mut self, rank: u8) { + pub fn process_kern_requests(&mut self, destination: u8) { if !self.is_running() { return; } @@ -490,26 +495,34 @@ impl Manager { self.session.kernel_state = KernelState::Absent; unsafe { self.cache.unborrow() } self.session.last_exception = Some(exception); - self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true }) + self.last_finished = Some(SubkernelFinished { + source: self.session.source, id: self.current_id, with_exception: true, exception_source: destination + }) }, Err(e) => { error!("Error while running processing external messages: {:?}", e); self.stop(); self.runtime_exception(e); - self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true }) + self.last_finished = Some(SubkernelFinished { + source: self.session.source, id: self.current_id, with_exception: true, exception_source: destination + }) } } - match self.process_kern_message(rank) { + match self.process_kern_message(destination) { Ok(Some(with_exception)) => { - self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: with_exception }) + self.last_finished = Some(SubkernelFinished { + source: self.session.source, id: self.current_id, with_exception: with_exception, exception_source: destination + }) }, Ok(None) | Err(Error::NoMessage) => (), Err(e) => { error!("Error while running kernel: {:?}", e); self.stop(); self.runtime_exception(e); - self.last_finished = Some(SubkernelFinished { id: self.current_id, with_exception: true }) + self.last_finished = Some(SubkernelFinished { + source: self.session.source, id: self.current_id, with_exception: true, exception_source: destination + }) } } } @@ -543,7 +556,7 @@ impl Manager { } } - fn process_kern_message(&mut self, rank: u8) -> Result, Error> { + fn process_kern_message(&mut self, destination: u8) -> Result, Error> { // returns Ok(with_exception) on finish // None if the kernel is still running kern_recv(|request| { @@ -559,7 +572,7 @@ impl Manager { }, } - if process_kern_hwreq(request, rank)? { + if process_kern_hwreq(request, destination)? { return Ok(None) } @@ -759,7 +772,7 @@ fn pass_message_to_kernel(message: &Message, tags: &[u8]) -> Result<(), Error> { Ok(()) } -fn process_kern_hwreq(request: &kern::Message, rank: u8) -> Result { +fn process_kern_hwreq(request: &kern::Message, self_destination: u8) -> Result { match request { &kern::RtioInitRequest => { unsafe { @@ -774,7 +787,7 @@ fn process_kern_hwreq(request: &kern::Message, rank: u8) -> Result // only local destination is considered "up" // no access to other DRTIO destinations kern_send(&kern::RtioDestinationStatusReply { - up: destination == rank }) + up: destination == self_destination }) } &kern::I2cStartRequest { busno } => { diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 1856c56b2..dbe4b08d4 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -104,7 +104,7 @@ macro_rules! forward { fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager, _repeaters: &mut [repeater::Repeater], _routing_table: &mut drtio_routing::RoutingTable, _rank: &mut u8, - packet: drtioaux::Packet) -> Result<(), drtioaux::Error> { + self_destination: &mut u8, packet: drtioaux::Packet) -> Result<(), drtioaux::Error> { // In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels, // and u16 otherwise; hence the `as _` conversion. match packet { @@ -138,13 +138,14 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg } else if let Some(subkernel_finished) = kernelmgr.get_last_finished() { info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception); drtioaux::send(0, &drtioaux::Packet::SubkernelFinished { - id: subkernel_finished.id, with_exception: subkernel_finished.with_exception + destination: subkernel_finished.source, id: subkernel_finished.id, + with_exception: subkernel_finished.with_exception, exception_src: *self_destination })?; } else if kernelmgr.message_is_ready() { let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; let meta = kernelmgr.message_get_slice(&mut data_slice).unwrap(); drtioaux::send(0, &drtioaux::Packet::SubkernelMessage { - destination: destination, id: kernelmgr.get_current_id().unwrap(), + source: *self_destination, destination: 0, id: kernelmgr.get_current_id().unwrap(), status: meta.status, length: meta.len as u16, data: data_slice })?; } else { @@ -370,33 +371,35 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg }) } - drtioaux::Packet::DmaAddTraceRequest { destination: _destination, id, status, length, trace } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); - let succeeded = dmamgr.add(id, status, &trace, length as usize).is_ok(); + drtioaux::Packet::DmaAddTraceRequest { source, destination, id, status, length, trace } => { + forward!(_routing_table, destination, *_rank, _repeaters, &packet); + *self_destination = destination; + let succeeded = dmamgr.add(source, id, status, &trace, length as usize).is_ok(); drtioaux::send(0, - &drtioaux::Packet::DmaAddTraceReply { succeeded: succeeded }) + &drtioaux::Packet::DmaAddTraceReply { destination: source, succeeded: succeeded }) } - drtioaux::Packet::DmaRemoveTraceRequest { destination: _destination, id } => { + drtioaux::Packet::DmaRemoveTraceRequest { source, destination: _destination, id } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); - let succeeded = dmamgr.erase(id).is_ok(); + let succeeded = dmamgr.erase(source, id).is_ok(); drtioaux::send(0, - &drtioaux::Packet::DmaRemoveTraceReply { succeeded: succeeded }) + &drtioaux::Packet::DmaRemoveTraceReply { destination: source, succeeded: succeeded }) } - drtioaux::Packet::DmaPlaybackRequest { destination: _destination, id, timestamp } => { + drtioaux::Packet::DmaPlaybackRequest { source, destination: _destination, id, timestamp } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); // no DMA with a running kernel - let succeeded = !kernelmgr.is_running() && dmamgr.playback(id, timestamp).is_ok(); + let succeeded = !kernelmgr.is_running() && dmamgr.playback(source, id, timestamp).is_ok(); drtioaux::send(0, - &drtioaux::Packet::DmaPlaybackReply { succeeded: succeeded }) + &drtioaux::Packet::DmaPlaybackReply { destination: source, succeeded: succeeded }) } - drtioaux::Packet::SubkernelAddDataRequest { destination: _destination, id, status, length, data } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + drtioaux::Packet::SubkernelAddDataRequest { destination, id, status, length, data } => { + forward!(_routing_table, destination, *_rank, _repeaters, &packet); + *self_destination = destination; let succeeded = kernelmgr.add(id, status, &data, length as usize).is_ok(); drtioaux::send(0, &drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded }) } - drtioaux::Packet::SubkernelLoadRunRequest { destination: _destination, id, run } => { + drtioaux::Packet::SubkernelLoadRunRequest { source, destination: _destination, id, run } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); let mut succeeded = kernelmgr.load(id).is_ok(); // allow preloading a kernel with delayed run @@ -405,11 +408,11 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg // cannot run kernel while DDMA is running succeeded = false; } else { - succeeded |= kernelmgr.run(id).is_ok(); + succeeded |= kernelmgr.run(source, id).is_ok(); } } drtioaux::send(0, - &drtioaux::Packet::SubkernelLoadRunReply { succeeded: succeeded }) + &drtioaux::Packet::SubkernelLoadRunReply { destination: source, succeeded: succeeded }) } drtioaux::Packet::SubkernelExceptionRequest { destination: _destination } => { forward!(_routing_table, _destination, *_rank, _repeaters, &packet); @@ -421,11 +424,11 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg data: data_slice, }) } - drtioaux::Packet::SubkernelMessage { destination, id: _id, status, length, data } => { - forward!(_routing_table, destination, *_rank, _repeaters, &packet); + drtioaux::Packet::SubkernelMessage { source, destination: _destination, id: _id, status, length, data } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); kernelmgr.message_handle_incoming(status, length as usize, &data); drtioaux::send(0, &drtioaux::Packet::SubkernelMessageAck { - destination: destination + destination: source }) } drtioaux::Packet::SubkernelMessageAck { destination: _destination } => { @@ -434,7 +437,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) { drtioaux::send(0, &drtioaux::Packet::SubkernelMessage { - destination: *_rank, id: kernelmgr.get_current_id().unwrap(), + source: *self_destination, destination: 0, id: kernelmgr.get_current_id().unwrap(), status: meta.status, length: meta.len as u16, data: data_slice })? } else { @@ -453,11 +456,12 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager, repeaters: &mut [repeater::Repeater], - routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8) { + routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8, + destination: &mut u8) { let result = drtioaux::recv(0).and_then(|packet| { if let Some(packet) = packet { - process_aux_packet(dma_manager, analyzer, kernelmgr, repeaters, routing_table, rank, packet) + process_aux_packet(dma_manager, analyzer, kernelmgr, repeaters, routing_table, rank, destination, packet) } else { Ok(()) } @@ -670,6 +674,7 @@ pub extern fn main() -> i32 { } let mut routing_table = drtio_routing::RoutingTable::default_empty(); let mut rank = 1; + let mut destination = 1; let mut hardware_tick_ts = 0; @@ -715,7 +720,7 @@ pub extern fn main() -> i32 { drtiosat_process_errors(); process_aux_packets(&mut dma_manager, &mut analyzer, &mut kernelmgr, &mut repeaters, - &mut routing_table, &mut rank); + &mut routing_table, &mut rank, &mut destination); for rep in repeaters.iter_mut() { rep.service(&routing_table, rank); } @@ -738,7 +743,7 @@ pub extern fn main() -> i32 { error!("aux packet error: {}", e); } } - kernelmgr.process_kern_requests(rank); + kernelmgr.process_kern_requests(destination); } drtiosat_reset_phy(true); From 9bc66e5c1451ecd297d1e88e4d9701aeedc22932 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Wed, 22 Nov 2023 14:09:48 +0800 Subject: [PATCH 074/296] support routing packets between satellites and master --- artiq/firmware/runtime/rtio_mgt.rs | 55 ++++++--- artiq/firmware/satman/dma.rs | 17 ++- artiq/firmware/satman/main.rs | 174 ++++++++++++++++++----------- artiq/firmware/satman/repeater.rs | 21 +++- artiq/firmware/satman/routing.rs | 122 ++++++++++++++++++++ 5 files changed, 297 insertions(+), 92 deletions(-) create mode 100644 artiq/firmware/satman/routing.rs diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 1e17ab63d..e469e1ea9 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -96,26 +96,55 @@ pub mod drtio { } } - fn process_async_packets(io: &Io, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, linkno: u8, - packet: drtioaux::Packet) -> Option { - // returns None if an async packet has been consumed + fn process_async_packets(io: &Io, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, packet: drtioaux::Packet + ) -> Option { + // returns None if a packet has been consumed or re-routed + macro_rules! route_packet { + ($dest:ident) => {{ + let dest_link = routing_table.0[$dest as usize][0] - 1; + if dest_link == linkno { + warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet); + } else if $dest == 0 { + warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet) + } + else { + drtioaux::send(dest_link, &packet).unwrap(); + } + None + }} + } match packet { - drtioaux::Packet::DmaPlaybackStatus { id, destination, error, channel, timestamp } => { - remote_dma::playback_done(io, ddma_mutex, id, destination, error, channel, timestamp); + // packets to be consumed locally + drtioaux::Packet::DmaPlaybackStatus { id, destination: 0, error, channel, timestamp } => { + remote_dma::playback_done(io, ddma_mutex, id, 0, error, channel, timestamp); None }, drtioaux::Packet::SubkernelFinished { id, destination: 0, with_exception, exception_src } => { subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception, exception_src); None }, - drtioaux::Packet::SubkernelMessage { id, source: 0, destination: from, status, length, data } => { + drtioaux::Packet::SubkernelMessage { id, source: from, destination: 0, status, length, data } => { subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data); // acknowledge receiving part of the message drtioaux::send(linkno, &drtioaux::Packet::SubkernelMessageAck { destination: from } ).unwrap(); None - } + }, + // routable packets + drtioaux::Packet::DmaAddTraceRequest { destination, .. } => route_packet!(destination), + drtioaux::Packet::DmaAddTraceReply { destination, .. } => route_packet!(destination), + drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } => route_packet!(destination), + drtioaux::Packet::DmaRemoveTraceReply { destination, .. } => route_packet!(destination), + drtioaux::Packet::DmaPlaybackRequest { destination, .. } => route_packet!(destination), + drtioaux::Packet::DmaPlaybackReply { destination, .. } => route_packet!(destination), + drtioaux::Packet::SubkernelLoadRunRequest { destination, .. } => route_packet!(destination), + drtioaux::Packet::SubkernelLoadRunReply { destination, .. } => route_packet!(destination), + drtioaux::Packet::SubkernelMessage { destination, .. } => route_packet!(destination), + drtioaux::Packet::SubkernelMessageAck { destination, .. } => route_packet!(destination), + drtioaux::Packet::DmaPlaybackStatus { destination, .. } => route_packet!(destination), + drtioaux::Packet::SubkernelFinished { destination, .. } => route_packet!(destination), other => Some(other) } } @@ -223,14 +252,10 @@ pub mod drtio { } } - fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, linkno: u8) { + fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, linkno: u8) { let _lock = aux_mutex.lock(io).unwrap(); match drtioaux::recv(linkno) { - Ok(Some(packet)) => { - if let Some(packet) = process_async_packets(io, ddma_mutex, subkernel_mutex, linkno, packet) { - warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet); - } - } + Ok(Some(packet)) => warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet), Ok(None) => (), Err(_) => warn!("[LINK#{}] aux packet error", linkno) } @@ -299,7 +324,7 @@ pub mod drtio { destination: destination }); if let Ok(reply) = reply { - let reply = process_async_packets(io, ddma_mutex, subkernel_mutex, linkno, reply); + let reply = process_async_packets(io, ddma_mutex, subkernel_mutex, routing_table, linkno, reply); match reply { Some(drtioaux::Packet::DestinationDownReply) => { destination_set_up(routing_table, up_destinations, destination, false); @@ -371,7 +396,7 @@ pub mod drtio { if up_links[linkno as usize] { /* link was previously up */ if link_rx_up(linkno) { - process_unsolicited_aux(&io, aux_mutex, ddma_mutex, subkernel_mutex, linkno); + process_unsolicited_aux(&io, aux_mutex, linkno); process_local_errors(linkno); } else { info!("[LINK#{}] link is down", linkno); diff --git a/artiq/firmware/satman/dma.rs b/artiq/firmware/satman/dma.rs index 6b9ea3f70..b22be573a 100644 --- a/artiq/firmware/satman/dma.rs +++ b/artiq/firmware/satman/dma.rs @@ -12,8 +12,9 @@ enum ManagerState { } pub struct RtioStatus { + pub source: u8, pub id: u32, - pub error: u8, + pub error: u8, pub channel: u32, pub timestamp: u64 } @@ -35,7 +36,8 @@ struct Entry { pub struct Manager { entries: BTreeMap<(u8, u32), Entry>, state: ManagerState, - currentid: u32 + current_id: u32, + current_source: u8 } impl Manager { @@ -47,7 +49,8 @@ impl Manager { } Manager { entries: BTreeMap::new(), - currentid: 0, + current_id: 0, + current_source: 0, state: ManagerState::Idle, } } @@ -125,7 +128,8 @@ impl Manager { assert!(ptr as u32 % 64 == 0); self.state = ManagerState::Playback; - self.currentid = id; + self.current_id = id; + self.current_source = source; unsafe { csr::rtio_dma::base_address_write(ptr as u64); @@ -156,8 +160,9 @@ impl Manager { if error != 0 { csr::rtio_dma::error_write(1); } - return Some(RtioStatus { - id: self.currentid, + return Some(RtioStatus { + source: self.current_source, + id: self.current_id, error: error, channel: channel, timestamp: timestamp }); diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index dbe4b08d4..2747f16c6 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -32,6 +32,7 @@ use analyzer::Analyzer; static mut ALLOC: alloc_list::ListAlloc = alloc_list::EMPTY; mod repeater; +mod routing; mod dma; mod analyzer; mod kernel; @@ -103,8 +104,9 @@ macro_rules! forward { } fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager, - _repeaters: &mut [repeater::Repeater], _routing_table: &mut drtio_routing::RoutingTable, _rank: &mut u8, - self_destination: &mut u8, packet: drtioaux::Packet) -> Result<(), drtioaux::Error> { + _repeaters: &mut [repeater::Repeater], _routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8, + router: &mut routing::Router, self_destination: &mut u8, packet: drtioaux::Packet +) -> Result<(), drtioaux::Error> { // In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels, // and u16 otherwise; hence the `as _` conversion. match packet { @@ -125,29 +127,18 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg drtioaux::Packet::DestinationStatusRequest { destination } => { #[cfg(has_drtio_routing)] - let hop = _routing_table.0[destination as usize][*_rank as usize]; + let hop = _routing_table.0[destination as usize][*rank as usize]; #[cfg(not(has_drtio_routing))] let hop = 0; if hop == 0 { // async messages - if let Some(status) = dmamgr.get_status() { - info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp); - drtioaux::send(0, &drtioaux::Packet::DmaPlaybackStatus { - destination: destination, id: status.id, error: status.error, channel: status.channel, timestamp: status.timestamp })?; - } else if let Some(subkernel_finished) = kernelmgr.get_last_finished() { - info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception); - drtioaux::send(0, &drtioaux::Packet::SubkernelFinished { - destination: subkernel_finished.source, id: subkernel_finished.id, - with_exception: subkernel_finished.with_exception, exception_src: *self_destination - })?; - } else if kernelmgr.message_is_ready() { - let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; - let meta = kernelmgr.message_get_slice(&mut data_slice).unwrap(); - drtioaux::send(0, &drtioaux::Packet::SubkernelMessage { - source: *self_destination, destination: 0, id: kernelmgr.get_current_id().unwrap(), - status: meta.status, length: meta.len as u16, data: data_slice - })?; + if *rank == 1 { + if let Some(packet) = router.get_upstream_packet(*rank) { + // pass any async or routed packets to master + // this does mean that DDMA/SK packets to master will "trickle down" to higher rank + drtioaux::send(0, &packet)?; + } } else { let errors; unsafe { @@ -220,18 +211,18 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg drtioaux::send(0, &drtioaux::Packet::RoutingAck) } #[cfg(has_drtio_routing)] - drtioaux::Packet::RoutingSetRank { rank } => { - *_rank = rank; - drtio_routing::interconnect_enable_all(_routing_table, rank); + drtioaux::Packet::RoutingSetRank { rank: new_rank } => { + *rank = new_rank; + drtio_routing::interconnect_enable_all(_routing_table, new_rank); - let rep_rank = rank + 1; + let rep_rank = new_rank + 1; for rep in _repeaters.iter() { if let Err(e) = rep.set_rank(rep_rank) { error!("failed to set rank ({})", e); } } - info!("rank: {}", rank); + info!("rank: {}", new_rank); info!("routing table: {}", _routing_table); drtioaux::send(0, &drtioaux::Packet::RoutingAck) @@ -246,8 +237,18 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg drtioaux::send(0, &drtioaux::Packet::RoutingAck) } + #[cfg(has_drtio_routing)] + drtioaux::Packet::RoutingAck => { + if *rank > 1 { + router.routing_ack_received(); + } else { + warn!("received unexpected RoutingAck"); + } + Ok(()) + } + drtioaux::Packet::MonitorRequest { destination: _destination, channel, probe } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let value; #[cfg(has_rtio_moninj)] unsafe { @@ -264,7 +265,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg drtioaux::send(0, &reply) }, drtioaux::Packet::InjectionRequest { destination: _destination, channel, overrd, value } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); #[cfg(has_rtio_moninj)] unsafe { csr::rtio_moninj::inj_chan_sel_write(channel as _); @@ -274,7 +275,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg Ok(()) }, drtioaux::Packet::InjectionStatusRequest { destination: _destination, channel, overrd } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let value; #[cfg(has_rtio_moninj)] unsafe { @@ -290,22 +291,22 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg }, drtioaux::Packet::I2cStartRequest { destination: _destination, busno } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let succeeded = i2c::start(busno).is_ok(); drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }) } drtioaux::Packet::I2cRestartRequest { destination: _destination, busno } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let succeeded = i2c::restart(busno).is_ok(); drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }) } drtioaux::Packet::I2cStopRequest { destination: _destination, busno } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let succeeded = i2c::stop(busno).is_ok(); drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }) } drtioaux::Packet::I2cWriteRequest { destination: _destination, busno, data } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); match i2c::write(busno, data) { Ok(ack) => drtioaux::send(0, &drtioaux::Packet::I2cWriteReply { succeeded: true, ack: ack }), @@ -314,7 +315,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg } } drtioaux::Packet::I2cReadRequest { destination: _destination, busno, ack } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); match i2c::read(busno, ack) { Ok(data) => drtioaux::send(0, &drtioaux::Packet::I2cReadReply { succeeded: true, data: data }), @@ -323,25 +324,25 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg } } drtioaux::Packet::I2cSwitchSelectRequest { destination: _destination, busno, address, mask } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let succeeded = i2c::switch_select(busno, address, mask).is_ok(); drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }) } drtioaux::Packet::SpiSetConfigRequest { destination: _destination, busno, flags, length, div, cs } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok(); drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: succeeded }) }, drtioaux::Packet::SpiWriteRequest { destination: _destination, busno, data } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let succeeded = spi::write(busno, data).is_ok(); drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: succeeded }) } drtioaux::Packet::SpiReadRequest { destination: _destination, busno } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); match spi::read(busno) { Ok(data) => drtioaux::send(0, &drtioaux::Packet::SpiReadReply { succeeded: true, data: data }), @@ -351,7 +352,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg } drtioaux::Packet::AnalyzerHeaderRequest { destination: _destination } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let header = analyzer.get_header(); drtioaux::send(0, &drtioaux::Packet::AnalyzerHeader { total_byte_count: header.total_byte_count, @@ -361,7 +362,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg } drtioaux::Packet::AnalyzerDataRequest { destination: _destination } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE]; let meta = analyzer.get_data(&mut data_slice); drtioaux::send(0, &drtioaux::Packet::AnalyzerData { @@ -372,35 +373,38 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg } drtioaux::Packet::DmaAddTraceRequest { source, destination, id, status, length, trace } => { - forward!(_routing_table, destination, *_rank, _repeaters, &packet); + forward!(_routing_table, destination, *rank, _repeaters, &packet); *self_destination = destination; let succeeded = dmamgr.add(source, id, status, &trace, length as usize).is_ok(); - drtioaux::send(0, - &drtioaux::Packet::DmaAddTraceReply { destination: source, succeeded: succeeded }) + router.send(drtioaux::Packet::DmaAddTraceReply { + destination: source, succeeded: succeeded + }, _routing_table, *rank) } drtioaux::Packet::DmaRemoveTraceRequest { source, destination: _destination, id } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let succeeded = dmamgr.erase(source, id).is_ok(); - drtioaux::send(0, - &drtioaux::Packet::DmaRemoveTraceReply { destination: source, succeeded: succeeded }) + router.send(drtioaux::Packet::DmaRemoveTraceReply { + destination: source, succeeded: succeeded + }, _routing_table, *rank) } drtioaux::Packet::DmaPlaybackRequest { source, destination: _destination, id, timestamp } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); // no DMA with a running kernel let succeeded = !kernelmgr.is_running() && dmamgr.playback(source, id, timestamp).is_ok(); - drtioaux::send(0, - &drtioaux::Packet::DmaPlaybackReply { destination: source, succeeded: succeeded }) + router.send(drtioaux::Packet::DmaPlaybackReply { + destination: source, succeeded: succeeded + }, _routing_table, *rank) } drtioaux::Packet::SubkernelAddDataRequest { destination, id, status, length, data } => { - forward!(_routing_table, destination, *_rank, _repeaters, &packet); + forward!(_routing_table, destination, *rank, _repeaters, &packet); *self_destination = destination; let succeeded = kernelmgr.add(id, status, &data, length as usize).is_ok(); drtioaux::send(0, &drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded }) } drtioaux::Packet::SubkernelLoadRunRequest { source, destination: _destination, id, run } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let mut succeeded = kernelmgr.load(id).is_ok(); // allow preloading a kernel with delayed run if run { @@ -411,11 +415,13 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg succeeded |= kernelmgr.run(source, id).is_ok(); } } - drtioaux::send(0, - &drtioaux::Packet::SubkernelLoadRunReply { destination: source, succeeded: succeeded }) + router.send(drtioaux::Packet::SubkernelLoadRunReply { + destination: source, succeeded: succeeded + }, + _routing_table, *rank) } drtioaux::Packet::SubkernelExceptionRequest { destination: _destination } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE]; let meta = kernelmgr.exception_get_slice(&mut data_slice); drtioaux::send(0, &drtioaux::Packet::SubkernelException { @@ -425,21 +431,22 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg }) } drtioaux::Packet::SubkernelMessage { source, destination: _destination, id: _id, status, length, data } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); kernelmgr.message_handle_incoming(status, length as usize, &data); - drtioaux::send(0, &drtioaux::Packet::SubkernelMessageAck { - destination: source - }) + router.send(drtioaux::Packet::SubkernelMessageAck { + destination: source + }, _routing_table, *rank) } drtioaux::Packet::SubkernelMessageAck { destination: _destination } => { - forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + forward!(_routing_table, _destination, *rank, _repeaters, &packet); if kernelmgr.message_ack_slice() { let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) { - drtioaux::send(0, &drtioaux::Packet::SubkernelMessage { + router.send(drtioaux::Packet::SubkernelMessage { source: *self_destination, destination: 0, id: kernelmgr.get_current_id().unwrap(), status: meta.status, length: meta.len as u16, data: data_slice - })? + }, + _routing_table, *rank)?; } else { error!("Error receiving message slice"); } @@ -456,12 +463,12 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager, repeaters: &mut [repeater::Repeater], - routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8, + routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8, router: &mut routing::Router, destination: &mut u8) { let result = - drtioaux::recv(0).and_then(|packet| { + drtioaux::recv(0).or_else(|_| Ok(router.get_local_packet())).and_then(|packet| { if let Some(packet) = packet { - process_aux_packet(dma_manager, analyzer, kernelmgr, repeaters, routing_table, rank, destination, packet) + process_aux_packet(dma_manager, analyzer, kernelmgr, repeaters, routing_table, rank, router, destination, packet) } else { Ok(()) } @@ -682,10 +689,12 @@ pub extern fn main() -> i32 { ad9117::init().expect("AD9117 initialization failed"); loop { + let mut router = routing::Router::new(); + while !drtiosat_link_rx_up() { drtiosat_process_errors(); for rep in repeaters.iter_mut() { - rep.service(&routing_table, rank); + rep.service(&routing_table, rank, &mut router); } #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] { @@ -719,10 +728,10 @@ pub extern fn main() -> i32 { while drtiosat_link_rx_up() { drtiosat_process_errors(); process_aux_packets(&mut dma_manager, &mut analyzer, - &mut kernelmgr, &mut repeaters, - &mut routing_table, &mut rank, &mut destination); + &mut kernelmgr, &mut repeaters, &mut routing_table, + &mut rank, &mut router, &mut destination); for rep in repeaters.iter_mut() { - rep.service(&routing_table, rank); + rep.service(&routing_table, rank, &mut router); } #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] { @@ -743,7 +752,38 @@ pub extern fn main() -> i32 { error!("aux packet error: {}", e); } } + if let Some(status) = dma_manager.get_status() { + info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp); + let res = router.route(drtioaux::Packet::DmaPlaybackStatus { + destination: status.source, id: status.id, error: status.error, + channel: status.channel, timestamp: status.timestamp + }, &routing_table, rank); + if let Err(e) = res { + warn!("error sending DmaPlaybackStatus: {}", e); + } + } kernelmgr.process_kern_requests(destination); + if let Some(subkernel_finished) = kernelmgr.get_last_finished() { + info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception); + let res = router.route(drtioaux::Packet::SubkernelFinished { + destination: subkernel_finished.source, id: subkernel_finished.id, + with_exception: subkernel_finished.with_exception, exception_src: destination + }, &routing_table, rank); + if let Err(e) = res { + warn!("error sending SubkernelFinished: {}", e); + } + } + if kernelmgr.message_is_ready() { + let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; + let meta = kernelmgr.message_get_slice(&mut data_slice).unwrap(); + let res = router.route(drtioaux::Packet::SubkernelMessage { + source: destination, destination: 0, id: kernelmgr.get_current_id().unwrap(), + status: meta.status, length: meta.len as u16, data: data_slice + }, &routing_table, rank); + if let Err(e) = res { + warn!("error sending SubkernelMessage: {}", e); + } + } } drtiosat_reset_phy(true); diff --git a/artiq/firmware/satman/repeater.rs b/artiq/firmware/satman/repeater.rs index 9969d5099..96af6ba84 100644 --- a/artiq/firmware/satman/repeater.rs +++ b/artiq/firmware/satman/repeater.rs @@ -1,6 +1,7 @@ use board_artiq::{drtioaux, drtio_routing}; #[cfg(has_drtio_routing)] use board_misoc::{csr, clock}; +use routing::{Router, get_routable_packet_destination}; #[cfg(has_drtio_routing)] fn rep_link_rx_up(repno: u8) -> bool { @@ -48,7 +49,7 @@ impl Repeater { self.state == RepeaterState::Up } - pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8) { + pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8, router: &mut Router) { self.process_local_errors(); match self.state { @@ -106,7 +107,7 @@ impl Repeater { } } RepeaterState::Up => { - self.process_unsolicited_aux(); + self.process_unsolicited_aux(routing_table, rank, router); if !rep_link_rx_up(self.repno) { info!("[REP#{}] link is down", self.repno); self.state = RepeaterState::Down; @@ -121,9 +122,21 @@ impl Repeater { } } - fn process_unsolicited_aux(&self) { + fn process_unsolicited_aux(&self, routing_table: &drtio_routing::RoutingTable, rank: u8, router: &mut Router) { match drtioaux::recv(self.auxno) { - Ok(Some(packet)) => warn!("[REP#{}] unsolicited aux packet: {:?}", self.repno, packet), + Ok(Some(packet)) => { + let destination = get_routable_packet_destination(&packet); + if destination.is_none() { + warn!("[REP#{}] unsolicited aux packet: {:?}", self.repno, packet); + } else { + // routable packet + let res = router.route(packet, routing_table, rank); + match res { + Ok(()) => drtioaux::send(self.auxno, &drtioaux::Packet::RoutingAck).unwrap(), + Err(e) => warn!("[REP#{}] Error routing packet: {:?}", self.repno, e), + } + } + } Ok(None) => (), Err(_) => warn!("[REP#{}] aux packet error", self.repno) } diff --git a/artiq/firmware/satman/routing.rs b/artiq/firmware/satman/routing.rs new file mode 100644 index 000000000..e27ec53c9 --- /dev/null +++ b/artiq/firmware/satman/routing.rs @@ -0,0 +1,122 @@ +use alloc::collections::vec_deque::VecDeque; +use board_artiq::{drtioaux, drtio_routing}; +use board_misoc::csr; + +// Packets from downstream (further satellites) are received and routed appropriately. +// they're passed immediately if it's possible (within the subtree), or sent upstream. +// for rank 1 (connected to master) satellites, these packets are passed as an answer to DestinationStatusRequest; +// for higher ranks, straight upstream, but awaiting for an ACK to make sure the upstream is not overwhelmed. + +// forward! macro is not deprecated, as routable packets are only these that can originate +// from both master and satellite, e.g. DDMA and Subkernel. + +pub fn get_routable_packet_destination(packet: &drtioaux::Packet) -> Option { + let destination = match packet { + // received from downstream + drtioaux::Packet::DmaAddTraceRequest { destination, .. } => destination, + drtioaux::Packet::DmaAddTraceReply { destination, .. } => destination, + drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } => destination, + drtioaux::Packet::DmaRemoveTraceReply { destination, .. } => destination, + drtioaux::Packet::DmaPlaybackRequest { destination, .. } => destination, + drtioaux::Packet::DmaPlaybackReply { destination, .. } => destination, + drtioaux::Packet::SubkernelLoadRunRequest { destination, .. } => destination, + drtioaux::Packet::SubkernelLoadRunReply { destination, .. } => destination, + // received from downstream or produced locally + drtioaux::Packet::SubkernelMessage { destination, .. } => destination, + drtioaux::Packet::SubkernelMessageAck { destination, .. } => destination, + // "async" - master gets them by deststatreq, satellites would get it through the router + drtioaux::Packet::DmaPlaybackStatus { destination, .. } => destination, + drtioaux::Packet::SubkernelFinished { destination, .. } => destination, + _ => return None + }; + Some(*destination) +} + +pub struct Router { + out_messages: VecDeque, + local_messages: VecDeque, + upstream_ready: bool +} + +impl Router { + pub fn new() -> Router { + Router { + out_messages: VecDeque::new(), + local_messages: VecDeque::new(), + upstream_ready: true + } + } + + + // called by local sources (DDMA, kernel) and by repeaters on receiving unsolicited data + // messages are always buffered for upstream, or passed downstream directly + pub fn route(&mut self, packet: drtioaux::Packet, + _routing_table: &drtio_routing::RoutingTable, _rank: u8 + ) -> Result<(), drtioaux::Error> { + #[cfg(has_drtio_routing)] + { + let destination = get_routable_packet_destination(&packet); + if let Some(destination) = destination { + let hop = _routing_table.0[destination as usize][_rank as usize]; + let auxno = if destination == 0 { 0 } else { hop }; + if hop != 0 { + if hop as usize <= csr::DRTIOREP.len() { + drtioaux::send(auxno, &packet)?; + } else { + self.out_messages.push_back(packet); + } + } else { + self.local_messages.push_back(packet); + } + } else { + return Err(drtioaux::Error::RoutingError); + } + } + #[cfg(not(has_drtio_routing))] + { + self.out_messages.push_back(packet); + } + Ok(()) + } + + // Sends a packet to a required destination, routing if it's necessary + pub fn send(&mut self, packet: drtioaux::Packet, + _routing_table: &drtio_routing::RoutingTable, _rank: u8) -> Result<(), drtioaux::Error> { + #[cfg(has_drtio_routing)] + { + let destination = get_routable_packet_destination(&packet); + if destination.is_none() || destination == Some(0) { + // send upstream directly (response to master) + drtioaux::send(0, &packet) + } else { + self.route(packet, _routing_table, _rank) + } + } + #[cfg(not(has_drtio_routing))] + { + drtioaux::send(0, &packet) + } + } + + pub fn get_upstream_packet(&mut self, rank: u8) -> Option { + // called on DestinationStatusRequest on rank 1, in loop in others + if self.upstream_ready { + let packet = self.out_messages.pop_front(); + if rank > 1 && packet.is_some() { + // packet will be sent out, awaiting ACK + self.upstream_ready = false; + } + packet + } else { + None + } + } + + pub fn routing_ack_received(&mut self) { + self.upstream_ready = true; + } + + pub fn get_local_packet(&mut self) -> Option { + self.local_messages.pop_front() + } +} \ No newline at end of file From 4956fac86108621d3bee29638a79739fcd34ef04 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 23 Nov 2023 18:25:07 +0800 Subject: [PATCH 075/296] satman: allow subkernels start subkernels --- artiq/compiler/embedding.py | 22 +- .../compiler/transforms/artiq_ir_generator.py | 3 +- .../compiler/transforms/llvm_ir_generator.py | 20 +- artiq/coredevice/core.py | 39 ++-- artiq/firmware/ksupport/lib.rs | 8 +- .../firmware/libproto_artiq/drtioaux_proto.rs | 6 +- artiq/firmware/libproto_artiq/kernel_proto.rs | 4 +- artiq/firmware/runtime/rtio_mgt.rs | 64 ++++-- artiq/firmware/runtime/session.rs | 6 +- artiq/firmware/satman/kernel.rs | 197 +++++++++++++----- artiq/firmware/satman/main.rs | 133 ++++++------ artiq/firmware/satman/repeater.rs | 15 +- artiq/firmware/satman/routing.rs | 20 +- artiq/frontend/artiq_compile.py | 23 +- artiq/test/lit/embedding/subkernel_no_arg.py | 6 +- artiq/test/lit/embedding/subkernel_return.py | 6 +- .../lit/embedding/subkernel_return_none.py | 6 +- artiq/test/lit/embedding/subkernel_self.py | 6 +- .../test/lit/embedding/subkernel_self_args.py | 8 +- .../test/lit/embedding/subkernel_with_arg.py | 8 +- .../lit/embedding/subkernel_with_opt_arg.py | 12 +- 21 files changed, 392 insertions(+), 220 deletions(-) diff --git a/artiq/compiler/embedding.py b/artiq/compiler/embedding.py index 3b6f6ae6c..928e9e75d 100644 --- a/artiq/compiler/embedding.py +++ b/artiq/compiler/embedding.py @@ -48,7 +48,7 @@ class SpecializedFunction: class EmbeddingMap: - def __init__(self): + def __init__(self, subkernels={}): self.object_current_key = 0 self.object_forward_map = {} self.object_reverse_map = {} @@ -64,6 +64,13 @@ class EmbeddingMap: self.function_map = {} self.str_forward_map = {} self.str_reverse_map = {} + + # subkernels: dict of ID: function, just like object_forward_map + # allow the embedding map to be aware of subkernels from other kernels + for key, obj_ref in subkernels.items(): + self.object_forward_map[key] = obj_ref + obj_id = id(obj_ref) + self.object_reverse_map[obj_id] = key self.preallocate_runtime_exception_names(["RuntimeError", "RTIOUnderflow", @@ -165,6 +172,11 @@ class EmbeddingMap: return self.object_reverse_map[obj_id] self.object_current_key += 1 + while self.object_forward_map.get(self.object_current_key): + # make sure there's no collisions with previously inserted subkernels + # their identifiers must be consistent between kernels/subkernels + self.object_current_key += 1 + self.object_forward_map[self.object_current_key] = obj_ref self.object_reverse_map[obj_id] = self.object_current_key return self.object_current_key @@ -200,10 +212,6 @@ class EmbeddingMap: self.object_forward_map.values() )) - def has_rpc_or_subkernel(self): - return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x), - self.object_forward_map.values())) - class ASTSynthesizer: def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None): @@ -794,7 +802,7 @@ class TypedtreeHasher(algorithm.Visitor): return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields)) class Stitcher: - def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[]): + def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[], subkernels={}): self.core = core self.dmgr = dmgr if engine is None: @@ -816,7 +824,7 @@ class Stitcher: self.functions = {} - self.embedding_map = EmbeddingMap() + self.embedding_map = EmbeddingMap(subkernels) self.value_map = defaultdict(lambda: []) self.definitely_changed = False diff --git a/artiq/compiler/transforms/artiq_ir_generator.py b/artiq/compiler/transforms/artiq_ir_generator.py index fb9560a5d..6998e0ddc 100644 --- a/artiq/compiler/transforms/artiq_ir_generator.py +++ b/artiq/compiler/transforms/artiq_ir_generator.py @@ -2557,7 +2557,8 @@ class ARTIQIRGenerator(algorithm.Visitor): if types.is_method(fn): fn = types.get_method_function(fn) sid = ir.Constant(fn.sid, builtins.TInt32()) - return self.append(ir.Builtin("subkernel_preload", [sid], builtins.TNone())) + dest = ir.Constant(fn.destination, builtins.TInt32()) + return self.append(ir.Builtin("subkernel_preload", [sid, dest], builtins.TNone())) elif types.is_exn_constructor(typ): return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args]) elif types.is_constructor(typ): diff --git a/artiq/compiler/transforms/llvm_ir_generator.py b/artiq/compiler/transforms/llvm_ir_generator.py index ebde5b53f..3b4e165f3 100644 --- a/artiq/compiler/transforms/llvm_ir_generator.py +++ b/artiq/compiler/transforms/llvm_ir_generator.py @@ -399,9 +399,9 @@ class LLVMIRGenerator: llty = ll.FunctionType(lli32, [llptr]) elif name == "subkernel_send_message": - llty = ll.FunctionType(llvoid, [lli32, lli8, llsliceptr, llptrptr]) + llty = ll.FunctionType(llvoid, [lli32, lli1, lli8, lli8, llsliceptr, llptrptr]) elif name == "subkernel_load_run": - llty = ll.FunctionType(llvoid, [lli32, lli1]) + llty = ll.FunctionType(llvoid, [lli32, lli8, lli1]) elif name == "subkernel_await_finish": llty = ll.FunctionType(llvoid, [lli32, lli64]) elif name == "subkernel_await_message": @@ -1417,7 +1417,8 @@ class LLVMIRGenerator: return self._build_rpc_recv(insn.type, llstackptr) elif insn.op == "subkernel_preload": llsid = self.map(insn.operands[0]) - return self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 0)], + lldest = ll.Constant(lli8, insn.operands[1].value) + return self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, lldest, ll.Constant(lli1, 0)], name="subkernel.preload") else: assert False @@ -1660,6 +1661,7 @@ class LLVMIRGenerator: def _build_subkernel_call(self, fun_loc, fun_type, args): llsid = ll.Constant(lli32, fun_type.sid) + lldest = ll.Constant(lli8, fun_type.destination) tag = b"" for arg in args: @@ -1678,7 +1680,7 @@ class LLVMIRGenerator: tag += b":" # run the kernel first - self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 1)]) + self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, lldest, ll.Constant(lli1, 1)]) # arg sent in the same vein as RPC llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [], @@ -1708,8 +1710,10 @@ class LLVMIRGenerator: llargcount = ll.Constant(lli8, len(args)) + llisreturn = ll.Constant(lli1, False) + self.llbuilder.call(self.llbuiltin("subkernel_send_message"), - [llsid, llargcount, lltagptr, llargs]) + [llsid, llisreturn, lldest, llargcount, lltagptr, llargs]) self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) return llsid @@ -1746,10 +1750,12 @@ class LLVMIRGenerator: llretslot = self.llbuilder.bitcast(llretslot, llptr) self.llbuilder.store(llretslot, llrets) - llsid = ll.Constant(lli32, 0) # return goes back to master, sid is ignored + llsid = ll.Constant(lli32, 0) # return goes back to the caller, sid is ignored lltagcount = ll.Constant(lli8, 1) # only one thing is returned + llisreturn = ll.Constant(lli1, True) # it's a return, so destination is ignored + lldest = ll.Constant(lli8, 0) self.llbuilder.call(self.llbuiltin("subkernel_send_message"), - [llsid, lltagcount, lltagptr, llrets]) + [llsid, llisreturn, lldest, lltagcount, lltagptr, llrets]) def process_Call(self, insn): functiontyp = insn.target_function().type diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index acc1e9c8d..26d60e92e 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -120,13 +120,15 @@ class Core: def compile(self, function, args, kwargs, set_result=None, attribute_writeback=True, print_as_rpc=True, - target=None, destination=0, subkernel_arg_types=[]): + target=None, destination=0, subkernel_arg_types=[], + subkernels={}): try: engine = _DiagnosticEngine(all_errors_are_fatal=True) stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr, print_as_rpc=print_as_rpc, - destination=destination, subkernel_arg_types=subkernel_arg_types) + destination=destination, subkernel_arg_types=subkernel_arg_types, + subkernels=subkernels) stitcher.stitch_call(function, args, kwargs, set_result) stitcher.finalize() @@ -165,7 +167,7 @@ class Core: self._run_compiled(kernel_library, embedding_map, symbolizer, demangler) return result - def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types): + def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types, subkernels): # pass self to subkernels (if applicable) # assuming the first argument is self subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function) @@ -179,17 +181,30 @@ class Core: object_map, kernel_library, _, _, _ = \ self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False, print_as_rpc=False, target=target, destination=destination, - subkernel_arg_types=subkernel_arg_types.get(sid, [])) - if object_map.has_rpc_or_subkernel(): - raise ValueError("Subkernel must not use RPC or subkernels in other destinations") - return destination, kernel_library + subkernel_arg_types=subkernel_arg_types.get(sid, []), + subkernels=subkernels) + if object_map.has_rpc(): + raise ValueError("Subkernel must not use RPC") + return destination, kernel_library, object_map def compile_and_upload_subkernels(self, embedding_map, args, subkernel_arg_types): - for sid, subkernel_fn in embedding_map.subkernels().items(): - destination, kernel_library = \ - self.compile_subkernel(sid, subkernel_fn, embedding_map, - args, subkernel_arg_types) - self.comm.upload_subkernel(kernel_library, sid, destination) + subkernels = embedding_map.subkernels() + subkernels_compiled = [] + while True: + new_subkernels = {} + for sid, subkernel_fn in subkernels.items(): + if sid in subkernels_compiled: + continue + destination, kernel_library, sub_embedding_map = \ + self.compile_subkernel(sid, subkernel_fn, embedding_map, + args, subkernel_arg_types, subkernels) + self.comm.upload_subkernel(kernel_library, sid, destination) + new_subkernels.update(sub_embedding_map.subkernels()) + subkernels_compiled.append(sid) + if new_subkernels == subkernels: + break + subkernels.update(new_subkernels) + def precompile(self, function, *args, **kwargs): """Precompile a kernel and return a callable that executes it on the core device diff --git a/artiq/firmware/ksupport/lib.rs b/artiq/firmware/ksupport/lib.rs index 04153d6d6..6f313dd6b 100644 --- a/artiq/firmware/ksupport/lib.rs +++ b/artiq/firmware/ksupport/lib.rs @@ -460,8 +460,8 @@ extern fn dma_playback(_timestamp: i64, _ptr: i32, _uses_ddma: bool) { } #[unwind(allowed)] -extern fn subkernel_load_run(id: u32, run: bool) { - send(&SubkernelLoadRunRequest { id: id, run: run }); +extern fn subkernel_load_run(id: u32, destination: u8, run: bool) { + send(&SubkernelLoadRunRequest { id: id, destination: destination, run: run }); recv!(&SubkernelLoadRunReply { succeeded } => { if !succeeded { raise!("SubkernelError", @@ -489,9 +489,11 @@ extern fn subkernel_await_finish(id: u32, timeout: u64) { } #[unwind(aborts)] -extern fn subkernel_send_message(id: u32, count: u8, tag: &CSlice, data: *const *const ()) { +extern fn subkernel_send_message(id: u32, is_return: bool, destination: u8, + count: u8, tag: &CSlice, data: *const *const ()) { send(&SubkernelMsgSend { id: id, + destination: if is_return { None } else { Some(destination) }, count: count, tag: tag.as_ref(), data: data diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index 803c5fd69..ca38e6b71 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -116,7 +116,7 @@ pub enum Packet { DmaRemoveTraceReply { destination: u8, succeeded: bool }, DmaPlaybackRequest { source: u8, destination: u8, id: u32, timestamp: u64 }, DmaPlaybackReply { destination: u8, succeeded: bool }, - DmaPlaybackStatus { destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 }, + DmaPlaybackStatus { source: u8, destination: u8, id: u32, error: u8, channel: u32, timestamp: u64 }, SubkernelAddDataRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] }, SubkernelAddDataReply { succeeded: bool }, @@ -322,6 +322,7 @@ impl Packet { succeeded: reader.read_bool()? }, 0xb6 => Packet::DmaPlaybackStatus { + source: reader.read_u8()?, destination: reader.read_u8()?, id: reader.read_u32()?, error: reader.read_u8()?, @@ -617,8 +618,9 @@ impl Packet { writer.write_u8(destination)?; writer.write_bool(succeeded)?; }, - Packet::DmaPlaybackStatus { destination, id, error, channel, timestamp } => { + Packet::DmaPlaybackStatus { source, destination, id, error, channel, timestamp } => { writer.write_u8(0xb6)?; + writer.write_u8(source)?; writer.write_u8(destination)?; writer.write_u32(id)?; writer.write_u8(error)?; diff --git a/artiq/firmware/libproto_artiq/kernel_proto.rs b/artiq/firmware/libproto_artiq/kernel_proto.rs index 5f7795375..108c83401 100644 --- a/artiq/firmware/libproto_artiq/kernel_proto.rs +++ b/artiq/firmware/libproto_artiq/kernel_proto.rs @@ -103,11 +103,11 @@ pub enum Message<'a> { SpiReadReply { succeeded: bool, data: u32 }, SpiBasicReply { succeeded: bool }, - SubkernelLoadRunRequest { id: u32, run: bool }, + SubkernelLoadRunRequest { id: u32, destination: u8, run: bool }, SubkernelLoadRunReply { succeeded: bool }, SubkernelAwaitFinishRequest { id: u32, timeout: u64 }, SubkernelAwaitFinishReply { status: SubkernelStatus }, - SubkernelMsgSend { id: u32, count: u8, tag: &'a [u8], data: *const *const () }, + SubkernelMsgSend { id: u32, destination: Option, count: u8, tag: &'a [u8], data: *const *const () }, SubkernelMsgRecvRequest { id: u32, timeout: u64, tags: &'a [u8] }, SubkernelMsgRecvReply { status: SubkernelStatus, count: u8 }, diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index e469e1ea9..fcb61da26 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -65,7 +65,7 @@ pub mod drtio { let up_destinations = up_destinations.clone(); let ddma_mutex = ddma_mutex.clone(); let subkernel_mutex = subkernel_mutex.clone(); - io.spawn(8192, move |io| { + io.spawn(10240, move |io| { let routing_table = routing_table.borrow(); link_thread(io, &aux_mutex, &routing_table, &up_destinations, &ddma_mutex, &subkernel_mutex); }); @@ -96,17 +96,57 @@ pub mod drtio { } } - fn process_async_packets(io: &Io, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, - routing_table: &drtio_routing::RoutingTable, linkno: u8, packet: drtioaux::Packet - ) -> Option { - // returns None if a packet has been consumed or re-routed - macro_rules! route_packet { - ($dest:ident) => {{ - let dest_link = routing_table.0[$dest as usize][0] - 1; - if dest_link == linkno { - warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet); - } else if $dest == 0 { - warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet) + fn process_async_packets(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8) + { + if link_has_async_ready(linkno) { + loop { + let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingRetrievePackets); + if let Ok(packet) = reply { + match packet { + // packets to be consumed locally + drtioaux::Packet::DmaPlaybackStatus { id, source, destination: 0, error, channel, timestamp } => { + remote_dma::playback_done(io, ddma_mutex, id, source, error, channel, timestamp); + }, + drtioaux::Packet::SubkernelFinished { id, destination: 0, with_exception, exception_src } => { + subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception, exception_src); + }, + drtioaux::Packet::SubkernelMessage { id, source: from, destination: 0, status, length, data } => { + subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data); + // acknowledge receiving part of the message + drtioaux::send(linkno, + &drtioaux::Packet::SubkernelMessageAck { destination: from } + ).unwrap(); + }, + // routable packets + drtioaux::Packet::DmaAddTraceRequest { destination, .. } | + drtioaux::Packet::DmaAddTraceReply { destination, .. } | + drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } | + drtioaux::Packet::DmaRemoveTraceReply { destination, .. } | + drtioaux::Packet::DmaPlaybackRequest { destination, .. } | + drtioaux::Packet::DmaPlaybackReply { destination, .. } | + drtioaux::Packet::SubkernelLoadRunRequest { destination, .. } | + drtioaux::Packet::SubkernelLoadRunReply { destination, .. } | + drtioaux::Packet::SubkernelMessage { destination, .. } | + drtioaux::Packet::SubkernelMessageAck { destination, .. } | + drtioaux::Packet::DmaPlaybackStatus { destination, .. } | + drtioaux::Packet::SubkernelFinished { destination, .. } => { + let dest_link = routing_table.0[destination as usize][0] - 1; + if dest_link == linkno { + warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet); + } else if destination == 0 { + warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet) + } else { + drtioaux::send(dest_link, &packet).unwrap(); + } + } + + drtioaux::Packet::RoutingNoPackets => break, + + other => warn!("[LINK#{}] Received an unroutable packet: {:?}", linkno, other) + } + } else { + warn!("[LINK#{}] Error handling async packets ({})", linkno, reply.unwrap_err()); } else { drtioaux::send(dest_link, &packet).unwrap(); diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index e930b2b1b..129d5013f 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -631,6 +631,8 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, unsafe { kernel::stop() } session.kernel_state = KernelState::Absent; unsafe { session.congress.cache.unborrow() } + #[cfg(has_drtio)] + subkernel::clear_subkernels(io, _subkernel_mutex)?; match stream { None => return Ok(true), @@ -668,7 +670,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, } } #[cfg(has_drtio)] - &kern::SubkernelLoadRunRequest { id, run } => { + &kern::SubkernelLoadRunRequest { id, destination: _, run } => { let succeeded = match subkernel::load( io, aux_mutex, _subkernel_mutex, routing_table, id, run) { Ok(()) => true, @@ -699,7 +701,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, kern_send(io, &kern::SubkernelAwaitFinishReply { status: status }) } #[cfg(has_drtio)] - &kern::SubkernelMsgSend { id, count, tag, data } => { + &kern::SubkernelMsgSend { id, destination: _, count, tag, data } => { subkernel::message_send(io, aux_mutex, _subkernel_mutex, routing_table, id, count, tag, data)?; kern_acknowledge() } diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index 33b5ed0bb..07fed8094 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -2,10 +2,10 @@ use core::{mem, option::NoneError, cmp::min}; use alloc::{string::String, format, vec::Vec, collections::{btree_map::BTreeMap, vec_deque::VecDeque}}; use cslice::AsCSlice; -use board_artiq::{mailbox, spi}; +use board_artiq::{drtioaux, drtio_routing::RoutingTable, mailbox, spi}; use board_misoc::{csr, clock, i2c}; use proto_artiq::{ - drtioaux_proto::PayloadStatus, + drtioaux_proto::PayloadStatus, kernel_proto as kern, session_proto::Reply::KernelException as HostKernelException, rpc_proto as rpc}; @@ -15,6 +15,7 @@ use kernel::eh_artiq::StackPointerBacktrace; use ::{cricon_select, RtioMaster}; use cache::Cache; +use routing::Router; use SAT_PAYLOAD_MAX_SIZE; use MASTER_PAYLOAD_MAX_SIZE; @@ -62,7 +63,9 @@ enum KernelState { Loaded, Running, MsgAwait { max_time: u64, tags: Vec }, - MsgSending + MsgSending, + SubkernelAwaitLoad, + SubkernelAwaitFinish { max_time: u64, id: u32 } } #[derive(Debug)] @@ -74,6 +77,7 @@ pub enum Error { NoMessage, AwaitingMessage, SubkernelIoError, + DrtioError, KernelException(Sliceable) } @@ -89,6 +93,12 @@ impl From> for Error { } } +impl From> for Error { + fn from(_value: drtioaux::Error) -> Error { + Error::DrtioError + } +} + macro_rules! unexpected { ($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*)))); } @@ -97,7 +107,8 @@ macro_rules! unexpected { #[derive(Debug)] pub struct Sliceable { it: usize, - data: Vec + data: Vec, + destination: u8 } /* represents interkernel messages */ @@ -109,7 +120,6 @@ struct Message { #[derive(PartialEq)] enum OutMessageState { NoMessage, - MessageReady, MessageBeingSent, MessageSent, MessageAcknowledged @@ -129,7 +139,8 @@ struct Session { log_buffer: String, last_exception: Option, source: u8, // which destination requested running the kernel - messages: MessageManager + messages: MessageManager, + subkernels_finished: VecDeque<(u32, bool, u8)> // tuple of id, with_exception, exception_source } #[derive(Debug)] @@ -154,6 +165,7 @@ pub struct SubkernelFinished { } pub struct SliceMeta { + pub destination: u8, pub len: u16, pub status: PayloadStatus } @@ -169,6 +181,7 @@ macro_rules! get_slice_fn { self.it += len; SliceMeta { + destination: self.destination, len: len as u16, status: status } @@ -177,10 +190,11 @@ macro_rules! get_slice_fn { } impl Sliceable { - pub fn new(data: Vec) -> Sliceable { + pub fn new(destination: u8, data: Vec) -> Sliceable { Sliceable { it: 0, - data: data + data: data, + destination: destination } } @@ -219,17 +233,6 @@ impl MessageManager { } } - pub fn is_outgoing_ready(&mut self) -> bool { - // called by main loop, to see if there's anything to send, will send it afterwards - match self.out_state { - OutMessageState::MessageReady => { - self.out_state = OutMessageState::MessageBeingSent; - true - }, - _ => false - } - } - pub fn was_message_acknowledged(&mut self) -> bool { match self.out_state { OutMessageState::MessageAcknowledged => { @@ -269,14 +272,27 @@ impl MessageManager { } } - pub fn accept_outgoing(&mut self, count: u8, tag: &[u8], data: *const *const ()) -> Result<(), Error> { + pub fn accept_outgoing(&mut self, id: u32, self_destination: u8, destination: u8, + count: u8, tag: &[u8], data: *const *const (), + routing_table: &RoutingTable, rank: u8, router: &mut Router + ) -> Result<(), Error> { let mut writer = Cursor::new(Vec::new()); rpc::send_args(&mut writer, 0, tag, data, false)?; // skip service tag, but write the count let mut data = writer.into_inner().split_off(3); data[0] = count; - self.out_message = Some(Sliceable::new(data)); - self.out_state = OutMessageState::MessageReady; + self.out_message = Some(Sliceable::new(destination, data)); + + let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; + self.out_state = OutMessageState::MessageBeingSent; + let meta = self.get_outgoing_slice(&mut data_slice).unwrap(); + let res = router.route(drtioaux::Packet::SubkernelMessage { + source: self_destination, destination: destination, id: id, + status: meta.status, length: meta.len as u16, data: data_slice + }, routing_table, rank, self_destination); + if let Err(e) = res { + warn!("error sending SubkernelMessage: {}", e); + } Ok(()) } @@ -292,7 +308,8 @@ impl Session { log_buffer: String::new(), last_exception: None, source: 0, - messages: MessageManager::new() + messages: MessageManager::new(), + subkernels_finished: VecDeque::new() } } @@ -300,7 +317,8 @@ impl Session { match self.kernel_state { KernelState::Absent | KernelState::Loaded => false, KernelState::Running | KernelState::MsgAwait { .. } | - KernelState::MsgSending => true + KernelState::MsgSending | KernelState::SubkernelAwaitLoad | + KernelState::SubkernelAwaitFinish { .. } => true } } @@ -408,14 +426,6 @@ impl Manager { self.session.messages.ack_slice() } - pub fn message_is_ready(&mut self) -> bool { - self.session.messages.is_outgoing_ready() - } - - pub fn get_last_finished(&mut self) -> Option { - self.last_finished.take() - } - pub fn load(&mut self, id: u32) -> Result<(), Error> { if self.current_id == id && self.session.kernel_state == KernelState::Loaded { return Ok(()) @@ -439,6 +449,7 @@ impl Manager { } kern::LoadReply(Err(error)) => { kernel_cpu::stop(); + error!("load error: {:?}", error); Err(Error::Load(format!("{}", error))) } other => { @@ -452,7 +463,7 @@ impl Manager { pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta { match self.session.last_exception.as_mut() { Some(exception) => exception.get_slice_sat(data_slice), - None => SliceMeta { len: 0, status: PayloadStatus::FirstAndLast } + None => SliceMeta { destination: 0, len: 0, status: PayloadStatus::FirstAndLast } } } @@ -477,12 +488,18 @@ impl Manager { backtrace: &[], async_errors: 0 }).write_to(&mut writer) { - Ok(_) => self.session.last_exception = Some(Sliceable::new(writer.into_inner())), + Ok(_) => self.session.last_exception = Some(Sliceable::new(0, writer.into_inner())), Err(_) => error!("Error writing exception data") } } - pub fn process_kern_requests(&mut self, destination: u8) { + pub fn process_kern_requests(&mut self, router: &mut Router, routing_table: &RoutingTable, rank: u8, destination: u8) { + macro_rules! finished { + ($with_exception:expr) => {{ Some(SubkernelFinished { + source: self.session.source, id: self.current_id, + with_exception: $with_exception, exception_source: destination + }) }} + } if !self.is_running() { return; } @@ -495,34 +512,37 @@ impl Manager { self.session.kernel_state = KernelState::Absent; unsafe { self.cache.unborrow() } self.session.last_exception = Some(exception); - self.last_finished = Some(SubkernelFinished { - source: self.session.source, id: self.current_id, with_exception: true, exception_source: destination - }) + self.last_finished = finished!(true); }, Err(e) => { error!("Error while running processing external messages: {:?}", e); self.stop(); self.runtime_exception(e); - self.last_finished = Some(SubkernelFinished { - source: self.session.source, id: self.current_id, with_exception: true, exception_source: destination - }) + self.last_finished = finished!(true); } } - match self.process_kern_message(destination) { + match self.process_kern_message(router, routing_table, rank, destination) { Ok(Some(with_exception)) => { - self.last_finished = Some(SubkernelFinished { - source: self.session.source, id: self.current_id, with_exception: with_exception, exception_source: destination - }) + self.last_finished = finished!(with_exception) }, Ok(None) | Err(Error::NoMessage) => (), Err(e) => { error!("Error while running kernel: {:?}", e); self.stop(); self.runtime_exception(e); - self.last_finished = Some(SubkernelFinished { - source: self.session.source, id: self.current_id, with_exception: true, exception_source: destination - }) + self.last_finished = finished!(true); + } + } + + if let Some(subkernel_finished) = self.last_finished.take() { + info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception); + let res = router.route(drtioaux::Packet::SubkernelFinished { + destination: subkernel_finished.source, id: subkernel_finished.id, + with_exception: subkernel_finished.with_exception, exception_src: destination + }, &routing_table, rank, destination); + if let Err(e) = res { + warn!("error sending SubkernelFinished: {}", e); } } } @@ -552,11 +572,64 @@ impl Manager { Err(Error::AwaitingMessage) } }, + KernelState::SubkernelAwaitFinish { max_time, id } => { + if clock::get_ms() > *max_time { + kern_send(&kern::SubkernelAwaitFinishReply { status: kern::SubkernelStatus::Timeout })?; + self.session.kernel_state = KernelState::Running; + } else { + let mut i = 0; + for status in self.session.subkernels_finished.iter() { + if status.0 == *id { + break; + } + i += 1; + } + if let Some(finish_status) = self.session.subkernels_finished.remove(i) { + if finish_status.1 { + unsafe { kernel_cpu::stop() } + self.session.kernel_state = KernelState::Absent; + unsafe { self.cache.unborrow() } + self.last_finished = Some(SubkernelFinished { + source: self.session.source, id: self.current_id, + with_exception: true, exception_source: finish_status.2 + }) + } else { + kern_send(&kern::SubkernelAwaitFinishReply { status: kern::SubkernelStatus::NoError })?; + self.session.kernel_state = KernelState::Running; + } + } + } + Ok(()) + } _ => Ok(()) } } - fn process_kern_message(&mut self, destination: u8) -> Result, Error> { + pub fn subkernel_load_run_reply(&mut self, succeeded: bool, self_destination: u8) { + if self.session.kernel_state == KernelState::SubkernelAwaitLoad { + if let Err(e) = kern_send(&kern::SubkernelLoadRunReply { succeeded: succeeded }) { + self.stop(); + self.runtime_exception(e); + self.last_finished = Some(SubkernelFinished { + source: self.session.source, id: self.current_id, + with_exception: true, exception_source: self_destination + }) + } else { + self.session.kernel_state = KernelState::Running; + } + } else { + warn!("received unsolicited SubkernelLoadRunReply"); + } + } + + pub fn remote_subkernel_finished(&mut self, id: u32, with_exception: bool, exception_source: u8) { + self.session.subkernels_finished.push_back((id, with_exception, exception_source)); + } + + fn process_kern_message(&mut self, router: &mut Router, + routing_table: &RoutingTable, + rank: u8, destination: u8 + ) -> Result, Error> { // returns Ok(with_exception) on finish // None if the kernel is still running kern_recv(|request| { @@ -626,8 +699,14 @@ impl Manager { return Ok(Some(true)) } - &kern::SubkernelMsgSend { id: _, count, tag, data } => { - self.session.messages.accept_outgoing(count, tag, data)?; + &kern::SubkernelMsgSend { id: _, destination: msg_dest, count, tag, data } => { + let dest = match msg_dest { + Some(dest) => dest, + None => self.session.source + }; + self.session.messages.accept_outgoing(self.current_id, destination, + dest, count, tag, data, + routing_table, rank, router)?; // acknowledge after the message is sent self.session.kernel_state = KernelState::MsgSending; Ok(()) @@ -639,6 +718,20 @@ impl Manager { Ok(()) }, + &kern::SubkernelLoadRunRequest { id, destination: sk_destination, run } => { + self.session.kernel_state = KernelState::SubkernelAwaitLoad; + router.route(drtioaux::Packet::SubkernelLoadRunRequest { + source: destination, destination: sk_destination, id: id, run: run + }, routing_table, rank, destination).map_err(|_| Error::DrtioError)?; + kern_acknowledge() + } + + &kern::SubkernelAwaitFinishRequest{ id, timeout } => { + let max_time = clock::get_ms() + timeout as u64; + self.session.kernel_state = KernelState::SubkernelAwaitFinish { max_time: max_time, id: id }; + kern_acknowledge() + } + request => unexpected!("unexpected request {:?} from kernel CPU", request) }.and(Ok(None)) }) @@ -712,7 +805,7 @@ fn slice_kernel_exception(exceptions: &[Option], async_errors: 0 }).write_to(&mut writer) { // save last exception data to be received by master - Ok(_) => Ok(Sliceable::new(writer.into_inner())), + Ok(_) => Ok(Sliceable::new(0, writer.into_inner())), Err(_) => Err(Error::SubkernelIoError) } } diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 2747f16c6..64eab6e77 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -132,46 +132,46 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg let hop = 0; if hop == 0 { + *self_destination = destination; // async messages if *rank == 1 { if let Some(packet) = router.get_upstream_packet(*rank) { // pass any async or routed packets to master // this does mean that DDMA/SK packets to master will "trickle down" to higher rank - drtioaux::send(0, &packet)?; + return drtioaux::send(0, &packet) } - } else { - let errors; + } + let errors; + unsafe { + errors = csr::drtiosat::rtio_error_read(); + } + if errors & 1 != 0 { + let channel; unsafe { - errors = csr::drtiosat::rtio_error_read(); + channel = csr::drtiosat::sequence_error_channel_read(); + csr::drtiosat::rtio_error_write(1); } - if errors & 1 != 0 { - let channel; - unsafe { - channel = csr::drtiosat::sequence_error_channel_read(); - csr::drtiosat::rtio_error_write(1); - } - drtioaux::send(0, - &drtioaux::Packet::DestinationSequenceErrorReply { channel })?; - } else if errors & 2 != 0 { - let channel; - unsafe { - channel = csr::drtiosat::collision_channel_read(); - csr::drtiosat::rtio_error_write(2); - } - drtioaux::send(0, - &drtioaux::Packet::DestinationCollisionReply { channel })?; - } else if errors & 4 != 0 { - let channel; - unsafe { - channel = csr::drtiosat::busy_channel_read(); - csr::drtiosat::rtio_error_write(4); - } - drtioaux::send(0, - &drtioaux::Packet::DestinationBusyReply { channel })?; + drtioaux::send(0, + &drtioaux::Packet::DestinationSequenceErrorReply { channel })?; + } else if errors & 2 != 0 { + let channel; + unsafe { + channel = csr::drtiosat::collision_channel_read(); + csr::drtiosat::rtio_error_write(2); } - else { - drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?; + drtioaux::send(0, + &drtioaux::Packet::DestinationCollisionReply { channel })?; + } else if errors & 4 != 0 { + let channel; + unsafe { + channel = csr::drtiosat::busy_channel_read(); + csr::drtiosat::rtio_error_write(4); } + drtioaux::send(0, + &drtioaux::Packet::DestinationBusyReply { channel })?; + } + else { + drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?; } } @@ -196,7 +196,6 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg } } } - Ok(()) } @@ -378,14 +377,14 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg let succeeded = dmamgr.add(source, id, status, &trace, length as usize).is_ok(); router.send(drtioaux::Packet::DmaAddTraceReply { destination: source, succeeded: succeeded - }, _routing_table, *rank) + }, _routing_table, *rank, *self_destination) } drtioaux::Packet::DmaRemoveTraceRequest { source, destination: _destination, id } => { forward!(_routing_table, _destination, *rank, _repeaters, &packet); let succeeded = dmamgr.erase(source, id).is_ok(); router.send(drtioaux::Packet::DmaRemoveTraceReply { destination: source, succeeded: succeeded - }, _routing_table, *rank) + }, _routing_table, *rank, *self_destination) } drtioaux::Packet::DmaPlaybackRequest { source, destination: _destination, id, timestamp } => { forward!(_routing_table, _destination, *rank, _repeaters, &packet); @@ -393,7 +392,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg let succeeded = !kernelmgr.is_running() && dmamgr.playback(source, id, timestamp).is_ok(); router.send(drtioaux::Packet::DmaPlaybackReply { destination: source, succeeded: succeeded - }, _routing_table, *rank) + }, _routing_table, *rank, *self_destination) } drtioaux::Packet::SubkernelAddDataRequest { destination, id, status, length, data } => { @@ -418,7 +417,19 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg router.send(drtioaux::Packet::SubkernelLoadRunReply { destination: source, succeeded: succeeded }, - _routing_table, *rank) + _routing_table, *rank, *self_destination) + } + drtioaux::Packet::SubkernelLoadRunReply { destination: _destination, succeeded } => { + forward!(_routing_table, _destination, *rank, _repeaters, &packet); + // received if local subkernel started another, remote subkernel + kernelmgr.subkernel_load_run_reply(succeeded, *self_destination); + Ok(()) + } + // { destination: u8, id: u32, with_exception: bool, exception_src: u8 }, + drtioaux::Packet::SubkernelFinished { destination: _destination, id, with_exception, exception_src } => { + forward!(_routing_table, _destination, *rank, _repeaters, &packet); + kernelmgr.remote_subkernel_finished(id, with_exception, exception_src); + Ok(()) } drtioaux::Packet::SubkernelExceptionRequest { destination: _destination } => { forward!(_routing_table, _destination, *rank, _repeaters, &packet); @@ -435,7 +446,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg kernelmgr.message_handle_incoming(status, length as usize, &data); router.send(drtioaux::Packet::SubkernelMessageAck { destination: source - }, _routing_table, *rank) + }, _routing_table, *rank, *self_destination) } drtioaux::Packet::SubkernelMessageAck { destination: _destination } => { forward!(_routing_table, _destination, *rank, _repeaters, &packet); @@ -443,10 +454,9 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) { router.send(drtioaux::Packet::SubkernelMessage { - source: *self_destination, destination: 0, id: kernelmgr.get_current_id().unwrap(), + source: *self_destination, destination: meta.destination, id: kernelmgr.get_current_id().unwrap(), status: meta.status, length: meta.len as u16, data: data_slice - }, - _routing_table, *rank)?; + }, _routing_table, *rank, *self_destination)?; } else { error!("Error receiving message slice"); } @@ -694,7 +704,7 @@ pub extern fn main() -> i32 { while !drtiosat_link_rx_up() { drtiosat_process_errors(); for rep in repeaters.iter_mut() { - rep.service(&routing_table, rank, &mut router); + rep.service(&routing_table, rank, destination, &mut router); } #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] { @@ -731,7 +741,7 @@ pub extern fn main() -> i32 { &mut kernelmgr, &mut repeaters, &mut routing_table, &mut rank, &mut router, &mut destination); for rep in repeaters.iter_mut() { - rep.service(&routing_table, rank, &mut router); + rep.service(&routing_table, rank, destination, &mut router); } #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] { @@ -754,34 +764,21 @@ pub extern fn main() -> i32 { } if let Some(status) = dma_manager.get_status() { info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp); - let res = router.route(drtioaux::Packet::DmaPlaybackStatus { - destination: status.source, id: status.id, error: status.error, - channel: status.channel, timestamp: status.timestamp - }, &routing_table, rank); - if let Err(e) = res { - warn!("error sending DmaPlaybackStatus: {}", e); - } + router.route(drtioaux::Packet::DmaPlaybackStatus { + source: destination, destination: status.source, id: status.id, + error: status.error, channel: status.channel, timestamp: status.timestamp + }, &routing_table, rank, destination); } - kernelmgr.process_kern_requests(destination); - if let Some(subkernel_finished) = kernelmgr.get_last_finished() { - info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception); - let res = router.route(drtioaux::Packet::SubkernelFinished { - destination: subkernel_finished.source, id: subkernel_finished.id, - with_exception: subkernel_finished.with_exception, exception_src: destination - }, &routing_table, rank); - if let Err(e) = res { - warn!("error sending SubkernelFinished: {}", e); - } - } - if kernelmgr.message_is_ready() { - let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; - let meta = kernelmgr.message_get_slice(&mut data_slice).unwrap(); - let res = router.route(drtioaux::Packet::SubkernelMessage { - source: destination, destination: 0, id: kernelmgr.get_current_id().unwrap(), - status: meta.status, length: meta.len as u16, data: data_slice - }, &routing_table, rank); - if let Err(e) = res { - warn!("error sending SubkernelMessage: {}", e); + + kernelmgr.process_kern_requests(&mut router, &routing_table, rank, destination); + + if rank > 1 { + if let Some(packet) = router.get_upstream_packet(rank) { + // in sat-sat communications, it can be async + let res = drtioaux::send(0, &packet); + if let Err(e) = res { + warn!("error routing packet: {}", e); + } } } } diff --git a/artiq/firmware/satman/repeater.rs b/artiq/firmware/satman/repeater.rs index 96af6ba84..15542488b 100644 --- a/artiq/firmware/satman/repeater.rs +++ b/artiq/firmware/satman/repeater.rs @@ -49,7 +49,7 @@ impl Repeater { self.state == RepeaterState::Up } - pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8, router: &mut Router) { + pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8, destination: u8, router: &mut Router) { self.process_local_errors(); match self.state { @@ -107,7 +107,7 @@ impl Repeater { } } RepeaterState::Up => { - self.process_unsolicited_aux(routing_table, rank, router); + self.process_unsolicited_aux(routing_table, rank, destination, router); if !rep_link_rx_up(self.repno) { info!("[REP#{}] link is down", self.repno); self.state = RepeaterState::Down; @@ -122,7 +122,7 @@ impl Repeater { } } - fn process_unsolicited_aux(&self, routing_table: &drtio_routing::RoutingTable, rank: u8, router: &mut Router) { + fn process_unsolicited_aux(&self, routing_table: &drtio_routing::RoutingTable, rank: u8, self_destination: u8, router: &mut Router) { match drtioaux::recv(self.auxno) { Ok(Some(packet)) => { let destination = get_routable_packet_destination(&packet); @@ -130,7 +130,7 @@ impl Repeater { warn!("[REP#{}] unsolicited aux packet: {:?}", self.repno, packet); } else { // routable packet - let res = router.route(packet, routing_table, rank); + let res = router.route(packet, routing_table, rank, self_destination); match res { Ok(()) => drtioaux::send(self.auxno, &drtioaux::Packet::RoutingAck).unwrap(), Err(e) => warn!("[REP#{}] Error routing packet: {:?}", self.repno, e), @@ -212,9 +212,8 @@ impl Repeater { (csr::DRTIOREP[repno].set_time_write)(1); while (csr::DRTIOREP[repno].set_time_read)() == 1 {} } - - // TSCAck is the only aux packet that is sent spontaneously - // by the satellite, in response to a TSC set on the RT link. + // TSCAck is sent spontaneously by the satellite, + // in response to a TSC set on the RT link. let reply = self.recv_aux_timeout(10000)?; if reply == drtioaux::Packet::TSCAck { return Ok(()); @@ -288,7 +287,7 @@ pub struct Repeater { impl Repeater { pub fn new(_repno: u8) -> Repeater { Repeater::default() } - pub fn service(&self, _routing_table: &drtio_routing::RoutingTable, _rank: u8) { } + pub fn service(&self, _routing_table: &drtio_routing::RoutingTable, _rank: u8, _destination: u8, _router: &mut Router) { } pub fn sync_tsc(&self) -> Result<(), drtioaux::Error> { Ok(()) } diff --git a/artiq/firmware/satman/routing.rs b/artiq/firmware/satman/routing.rs index e27ec53c9..ba82d95cb 100644 --- a/artiq/firmware/satman/routing.rs +++ b/artiq/firmware/satman/routing.rs @@ -1,6 +1,5 @@ use alloc::collections::vec_deque::VecDeque; use board_artiq::{drtioaux, drtio_routing}; -use board_misoc::csr; // Packets from downstream (further satellites) are received and routed appropriately. // they're passed immediately if it's possible (within the subtree), or sent upstream. @@ -51,7 +50,8 @@ impl Router { // called by local sources (DDMA, kernel) and by repeaters on receiving unsolicited data // messages are always buffered for upstream, or passed downstream directly pub fn route(&mut self, packet: drtioaux::Packet, - _routing_table: &drtio_routing::RoutingTable, _rank: u8 + _routing_table: &drtio_routing::RoutingTable, _rank: u8, + _self_destination: u8 ) -> Result<(), drtioaux::Error> { #[cfg(has_drtio_routing)] { @@ -59,14 +59,12 @@ impl Router { if let Some(destination) = destination { let hop = _routing_table.0[destination as usize][_rank as usize]; let auxno = if destination == 0 { 0 } else { hop }; - if hop != 0 { - if hop as usize <= csr::DRTIOREP.len() { - drtioaux::send(auxno, &packet)?; - } else { - self.out_messages.push_back(packet); - } - } else { + if destination == _self_destination { self.local_messages.push_back(packet); + } else if _rank > 1 { + drtioaux::send(auxno, &packet)?; + } else { + self.out_messages.push_back(packet); } } else { return Err(drtioaux::Error::RoutingError); @@ -81,7 +79,7 @@ impl Router { // Sends a packet to a required destination, routing if it's necessary pub fn send(&mut self, packet: drtioaux::Packet, - _routing_table: &drtio_routing::RoutingTable, _rank: u8) -> Result<(), drtioaux::Error> { + _routing_table: &drtio_routing::RoutingTable, _rank: u8, _destination: u8) -> Result<(), drtioaux::Error> { #[cfg(has_drtio_routing)] { let destination = get_routable_packet_destination(&packet); @@ -89,7 +87,7 @@ impl Router { // send upstream directly (response to master) drtioaux::send(0, &packet) } else { - self.route(packet, _routing_table, _rank) + self.route(packet, _routing_table, _rank, _destination) } } #[cfg(not(has_drtio_routing))] diff --git a/artiq/frontend/artiq_compile.py b/artiq/frontend/artiq_compile.py index 04a466563..d492cb35e 100755 --- a/artiq/frontend/artiq_compile.py +++ b/artiq/frontend/artiq_compile.py @@ -67,12 +67,21 @@ def main(): core.compile(exp.run, [exp_inst], {}, attribute_writeback=False, print_as_rpc=False) - subkernels = {} - for sid, subkernel_fn in object_map.subkernels().items(): - destination, subkernel_library = core.compile_subkernel( - sid, subkernel_fn, object_map, - [exp_inst], subkernel_arg_types) - subkernels[sid] = (destination, subkernel_library) + subkernels = object_map.subkernels() + compiled_subkernels = {} + while True: + new_subkernels = {} + for sid, subkernel_fn in subkernels.items(): + if sid in compiled_subkernels.keys(): + continue + destination, subkernel_library, embedding_map = core.compile_subkernel( + sid, subkernel_fn, object_map, + [exp_inst], subkernel_arg_types, subkernels) + compiled_subkernels[sid] = (destination, subkernel_library) + new_subkernels.update(embedding_map.subkernels()) + if new_subkernels == subkernels: + break + subkernels.update(new_subkernels) except CompileError as error: return finally: @@ -107,7 +116,7 @@ def main(): tar.addfile(main_kernel_info, fileobj=main_kernel_fileobj) # subkernels as " .elf" - for sid, (destination, subkernel_library) in subkernels.items(): + for sid, (destination, subkernel_library) in compiled_subkernels.items(): subkernel_fileobj = io.BytesIO(subkernel_library) subkernel_info = tarfile.TarInfo(name="{} {}.elf".format(sid, destination)) subkernel_info.size = len(subkernel_library) diff --git a/artiq/test/lit/embedding/subkernel_no_arg.py b/artiq/test/lit/embedding/subkernel_no_arg.py index 11d9c793d..605fbbfaa 100644 --- a/artiq/test/lit/embedding/subkernel_no_arg.py +++ b/artiq/test/lit/embedding/subkernel_no_arg.py @@ -6,13 +6,13 @@ from artiq.language.types import * @kernel def entrypoint(): - # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. no_arg() -# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr -# CHECK-NOT-L: declare void @subkernel_send_message(i32, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, { i8*, i32 }*, i8**) local_unnamed_addr @subkernel(destination=1) def no_arg() -> TStr: pass diff --git a/artiq/test/lit/embedding/subkernel_return.py b/artiq/test/lit/embedding/subkernel_return.py index 2f498f75e..8fcf023d6 100644 --- a/artiq/test/lit/embedding/subkernel_return.py +++ b/artiq/test/lit/embedding/subkernel_return.py @@ -6,15 +6,15 @@ from artiq.language.types import * @kernel def entrypoint(): - # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. returning() # CHECK: call i8 @subkernel_await_message\(i32 1, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !. # CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !. subkernel_await(returning) -# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr -# CHECK-NOT-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr # CHECK-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr # CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr @subkernel(destination=1) diff --git a/artiq/test/lit/embedding/subkernel_return_none.py b/artiq/test/lit/embedding/subkernel_return_none.py index f4e8a4508..d1636220c 100644 --- a/artiq/test/lit/embedding/subkernel_return_none.py +++ b/artiq/test/lit/embedding/subkernel_return_none.py @@ -6,15 +6,15 @@ from artiq.language.types import * @kernel def entrypoint(): - # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. returning_none() # CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !. # CHECK-NOT: call i8 @subkernel_await_message\(i32 1, i64 10000\, .*\), !dbg !. subkernel_await(returning_none) -# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr -# CHECK-NOT-L: declare void @subkernel_send_message(i32, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, { i8*, i32 }*, i8**) local_unnamed_addr # CHECK-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr # CHECK-NOT-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr @subkernel(destination=1) diff --git a/artiq/test/lit/embedding/subkernel_self.py b/artiq/test/lit/embedding/subkernel_self.py index 8e702bc02..7bf9cbafd 100644 --- a/artiq/test/lit/embedding/subkernel_self.py +++ b/artiq/test/lit/embedding/subkernel_self.py @@ -11,7 +11,7 @@ class A: @kernel def kernel_entrypoint(self): - # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. self.sk() @@ -21,5 +21,5 @@ a = A() def entrypoint(): a.kernel_entrypoint() -# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr -# CHECK-NOT-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr diff --git a/artiq/test/lit/embedding/subkernel_self_args.py b/artiq/test/lit/embedding/subkernel_self_args.py index 57969398c..5aebed2e9 100644 --- a/artiq/test/lit/embedding/subkernel_self_args.py +++ b/artiq/test/lit/embedding/subkernel_self_args.py @@ -11,8 +11,8 @@ class A: @kernel def kernel_entrypoint(self): - # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. - # CHECK: call void @subkernel_send_message\(i32 1, i8 1, .*\), !dbg !. + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_send_message\(i32 1, i1 false, i8 1, i8 1, .*\), !dbg !. self.sk(1) a = A() @@ -21,5 +21,5 @@ a = A() def entrypoint(): a.kernel_entrypoint() -# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr -# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr +# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr diff --git a/artiq/test/lit/embedding/subkernel_with_arg.py b/artiq/test/lit/embedding/subkernel_with_arg.py index 17d80dce7..114516586 100644 --- a/artiq/test/lit/embedding/subkernel_with_arg.py +++ b/artiq/test/lit/embedding/subkernel_with_arg.py @@ -6,13 +6,13 @@ from artiq.language.types import * @kernel def entrypoint(): - # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. - # CHECK: call void @subkernel_send_message\(i32 ., i8 1, .*\), !dbg !. + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 1, .*\), !dbg !. accept_arg(1) -# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr -# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr +# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr @subkernel(destination=1) def accept_arg(arg: TInt32) -> TNone: pass diff --git a/artiq/test/lit/embedding/subkernel_with_opt_arg.py b/artiq/test/lit/embedding/subkernel_with_opt_arg.py index 1821fec01..fb5cc3df1 100644 --- a/artiq/test/lit/embedding/subkernel_with_opt_arg.py +++ b/artiq/test/lit/embedding/subkernel_with_opt_arg.py @@ -6,16 +6,16 @@ from artiq.language.types import * @kernel def entrypoint(): - # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. - # CHECK: call void @subkernel_send_message\(i32 ., i8 1, .*\), !dbg !. + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 1, .*\), !dbg !. accept_arg(1) - # CHECK: call void @subkernel_load_run\(i32 1, i1 true\), !dbg !. - # CHECK: call void @subkernel_send_message\(i32 ., i8 2, .*\), !dbg !. + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. + # CHECK: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 2, .*\), !dbg !. accept_arg(1, 2) -# CHECK-L: declare void @subkernel_load_run(i32, i1) local_unnamed_addr -# CHECK-L: declare void @subkernel_send_message(i32, i8, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr +# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr @subkernel(destination=1) def accept_arg(arg_a, arg_b=5) -> TNone: pass From 1cc7398bc02659adc5232cb36dc98d723be908a0 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Wed, 6 Dec 2023 17:23:04 +0800 Subject: [PATCH 076/296] drtio: add sat -> mst async notif packet --- artiq/gateware/drtio/core.py | 8 ++++++-- artiq/gateware/drtio/rt_controller_master.py | 6 ++++++ artiq/gateware/drtio/rt_controller_repeater.py | 7 +++++++ artiq/gateware/drtio/rt_packet_master.py | 7 +++++++ artiq/gateware/drtio/rt_packet_repeater.py | 4 +++- artiq/gateware/drtio/rt_packet_satellite.py | 12 ++++++++++++ artiq/gateware/drtio/rt_serializer.py | 1 + 7 files changed, 42 insertions(+), 3 deletions(-) diff --git a/artiq/gateware/drtio/core.py b/artiq/gateware/drtio/core.py index b2a108237..b344b9c95 100644 --- a/artiq/gateware/drtio/core.py +++ b/artiq/gateware/drtio/core.py @@ -78,6 +78,7 @@ class DRTIOSatellite(Module): self.reset = CSRStorage(reset=1) self.reset_phy = CSRStorage(reset=1) self.tsc_loaded = CSR() + self.async_messages_ready = CSR() # master interface in the sys domain self.cri = cri.Interface() self.async_errors = Record(async_errors_layout) @@ -129,6 +130,9 @@ class DRTIOSatellite(Module): link_layer_sync, interface=self.cri) self.comb += self.rt_packet.reset.eq(self.cd_rio.rst) + self.sync += If(self.async_messages_ready.re, self.rt_packet.async_msg_stb.eq(1)) + self.comb += self.async_messages_ready.w.eq(self.rt_packet.async_msg_ack) + self.comb += [ tsc.load.eq(self.rt_packet.tsc_load), tsc.load_value.eq(self.rt_packet.tsc_load_value) @@ -136,14 +140,14 @@ class DRTIOSatellite(Module): self.sync += [ If(self.tsc_loaded.re, self.tsc_loaded.w.eq(0)), - If(self.rt_packet.tsc_load, self.tsc_loaded.w.eq(1)) + If(self.rt_packet.tsc_load, self.tsc_loaded.w.eq(1)), ] self.submodules.rt_errors = rt_errors_satellite.RTErrorsSatellite( self.rt_packet, tsc, self.async_errors) def get_csrs(self): - return ([self.reset, self.reset_phy, self.tsc_loaded] + + return ([self.reset, self.reset_phy, self.tsc_loaded, self.async_messages_ready] + self.link_layer.get_csrs() + self.link_stats.get_csrs() + self.rt_errors.get_csrs()) diff --git a/artiq/gateware/drtio/rt_controller_master.py b/artiq/gateware/drtio/rt_controller_master.py index 3ed22dbe7..aa630254f 100644 --- a/artiq/gateware/drtio/rt_controller_master.py +++ b/artiq/gateware/drtio/rt_controller_master.py @@ -17,6 +17,7 @@ class _CSRs(AutoCSR): self.set_time = CSR() self.underflow_margin = CSRStorage(16, reset=300) + self.async_messages_ready = CSR() self.force_destination = CSRStorage() self.destination = CSRStorage(8) @@ -60,6 +61,11 @@ class RTController(Module): If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1)) ] + self.sync += [ + If(rt_packet.async_messages_ready, self.csrs.async_messages_ready.w.eq(1)), + If(self.csrs.async_messages_ready.re, self.csrs.async_messages_ready.w.eq(0)) + ] + # chan_sel forcing chan_sel = Signal(24) self.comb += chan_sel.eq(Mux(self.csrs.force_destination.storage, diff --git a/artiq/gateware/drtio/rt_controller_repeater.py b/artiq/gateware/drtio/rt_controller_repeater.py index 79b9559eb..bdc96fe38 100644 --- a/artiq/gateware/drtio/rt_controller_repeater.py +++ b/artiq/gateware/drtio/rt_controller_repeater.py @@ -14,6 +14,7 @@ class RTController(Module, AutoCSR): self.command_missed_cmd = CSRStatus(2) self.command_missed_chan_sel = CSRStatus(24) self.buffer_space_timeout_dest = CSRStatus(8) + self.async_messages_ready = CSR() self.sync += rt_packet.reset.eq(self.reset.storage) @@ -23,6 +24,12 @@ class RTController(Module, AutoCSR): ] self.comb += self.set_time.w.eq(rt_packet.set_time_stb) + self.sync += [ + If(rt_packet.async_messages_ready, self.async_messages_ready.w.eq(1)), + If(self.async_messages_ready.re, self.async_messages_ready.w.eq(0)) + ] + + errors = [ (rt_packet.err_unknown_packet_type, "rtio_rx", None, None), (rt_packet.err_packet_truncated, "rtio_rx", None, None), diff --git a/artiq/gateware/drtio/rt_packet_master.py b/artiq/gateware/drtio/rt_packet_master.py index 70d44ecaf..32d3a39a7 100644 --- a/artiq/gateware/drtio/rt_packet_master.py +++ b/artiq/gateware/drtio/rt_packet_master.py @@ -61,6 +61,9 @@ class RTPacketMaster(Module): # a set_time request pending self.tsc_value = Signal(64) + # async aux messages interface, only received + self.async_messages_ready = Signal() + # rx errors self.err_unknown_packet_type = Signal() self.err_packet_truncated = Signal() @@ -283,12 +286,16 @@ class RTPacketMaster(Module): echo_received_now = Signal() self.sync.rtio_rx += self.echo_received_now.eq(echo_received_now) + async_messages_ready = Signal() + self.sync.rtio_rx += self.async_messages_ready.eq(async_messages_ready) + rx_fsm.act("INPUT", If(rx_dp.frame_r, rx_dp.packet_buffer_load.eq(1), If(rx_dp.packet_last, Case(rx_dp.packet_type, { rx_plm.types["echo_reply"]: echo_received_now.eq(1), + rx_plm.types["async_messages_ready"]: async_messages_ready.eq(1), rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"), rx_plm.types["read_reply"]: NextState("READ_REPLY"), rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"), diff --git a/artiq/gateware/drtio/rt_packet_repeater.py b/artiq/gateware/drtio/rt_packet_repeater.py index 728c24ae8..62abeeee1 100644 --- a/artiq/gateware/drtio/rt_packet_repeater.py +++ b/artiq/gateware/drtio/rt_packet_repeater.py @@ -19,6 +19,7 @@ class RTPacketRepeater(Module): # in rtio_rx domain self.err_unknown_packet_type = Signal() self.err_packet_truncated = Signal() + self.async_messages_ready = Signal() # in rtio domain self.err_command_missed = Signal() @@ -304,6 +305,7 @@ class RTPacketRepeater(Module): rx_dp.packet_buffer_load.eq(1), If(rx_dp.packet_last, Case(rx_dp.packet_type, { + rx_plm.types["async_messages_ready"]: self.async_messages_ready.eq(1), rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"), rx_plm.types["read_reply"]: NextState("READ_REPLY"), rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"), @@ -331,4 +333,4 @@ class RTPacketRepeater(Module): read_not.eq(1), read_no_event.eq(1), NextState("INPUT") - ) + ) \ No newline at end of file diff --git a/artiq/gateware/drtio/rt_packet_satellite.py b/artiq/gateware/drtio/rt_packet_satellite.py index 79a48f493..a4094d9db 100644 --- a/artiq/gateware/drtio/rt_packet_satellite.py +++ b/artiq/gateware/drtio/rt_packet_satellite.py @@ -19,6 +19,9 @@ class RTPacketSatellite(Module): self.tsc_load = Signal() self.tsc_load_value = Signal(64) + self.async_msg_stb = Signal() + self.async_msg_ack = Signal() + if interface is None: interface = cri.Interface() self.cri = interface @@ -78,6 +81,8 @@ class RTPacketSatellite(Module): ) ] + self.sync += If(self.async_msg_ack, self.async_msg_stb.eq(0)) + # RX FSM cri_read = Signal() cri_buffer_space = Signal() @@ -197,6 +202,7 @@ class RTPacketSatellite(Module): tx_fsm.act("IDLE", If(echo_req, NextState("ECHO")), + If(self.async_msg_stb, NextState("ASYNC_MESSAGES_READY")), If(buffer_space_req, NextState("BUFFER_SPACE")), If(read_request_pending & ~self.cri.i_status[2], NextState("READ"), @@ -210,6 +216,12 @@ class RTPacketSatellite(Module): If(tx_dp.packet_last, NextState("IDLE")) ) + tx_fsm.act("ASYNC_MESSAGES_READY", + self.async_msg_ack.eq(1), + tx_dp.send("async_messages_ready"), + If(tx_dp.packet_last, NextState("IDLE")) + ) + tx_fsm.act("BUFFER_SPACE", buffer_space_ack.eq(1), tx_dp.send("buffer_space_reply", space=buffer_space), diff --git a/artiq/gateware/drtio/rt_serializer.py b/artiq/gateware/drtio/rt_serializer.py index 01e5cf19e..9a77263a4 100644 --- a/artiq/gateware/drtio/rt_serializer.py +++ b/artiq/gateware/drtio/rt_serializer.py @@ -69,6 +69,7 @@ def get_s2m_layouts(alignment): plm.add_type("read_reply", ("timestamp", 64), ("data", 32)) plm.add_type("read_reply_noevent", ("overflow", 1)) # overflow=0→timeout + plm.add_type("async_messages_ready") return plm From 95b92a178b9aa1b87e6de69aeabe2ec538cb379e Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 7 Dec 2023 16:42:39 +0800 Subject: [PATCH 077/296] satman: make use of the async flag --- .../firmware/libproto_artiq/drtioaux_proto.rs | 27 ++++ artiq/firmware/runtime/rtio_mgt.rs | 2 +- artiq/firmware/runtime/session.rs | 2 + artiq/firmware/satman/kernel.rs | 64 +++++---- artiq/firmware/satman/main.rs | 67 ++++++---- artiq/firmware/satman/repeater.rs | 63 +++++---- artiq/firmware/satman/routing.rs | 121 +++++++++--------- 7 files changed, 197 insertions(+), 149 deletions(-) diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index ca38e6b71..0013c8171 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -77,6 +77,8 @@ pub enum Packet { RoutingSetPath { destination: u8, hops: [u8; 32] }, RoutingSetRank { rank: u8 }, + RoutingRetrievePackets, + RoutingNoPackets, RoutingAck, MonitorRequest { destination: u8, channel: u16, probe: u8 }, @@ -168,6 +170,8 @@ impl Packet { rank: reader.read_u8()? }, 0x32 => Packet::RoutingAck, + 0x33 => Packet::RoutingRetrievePackets, + 0x34 => Packet::RoutingNoPackets, 0x40 => Packet::MonitorRequest { destination: reader.read_u8()?, @@ -450,6 +454,10 @@ impl Packet { }, Packet::RoutingAck => writer.write_u8(0x32)?, + Packet::RoutingRetrievePackets => + writer.write_u8(0x33)?, + Packet::RoutingNoPackets => + writer.write_u8(0x34)?, Packet::MonitorRequest { destination, channel, probe } => { writer.write_u8(0x40)?; @@ -685,4 +693,23 @@ impl Packet { } Ok(()) } + + pub fn routable_destination(&self) -> Option { + // only for packets that could be re-routed, not only forwarded + match self { + Packet::DmaAddTraceRequest { destination, .. } => Some(*destination), + Packet::DmaAddTraceReply { destination, .. } => Some(*destination), + Packet::DmaRemoveTraceRequest { destination, .. } => Some(*destination), + Packet::DmaRemoveTraceReply { destination, .. } => Some(*destination), + Packet::DmaPlaybackRequest { destination, .. } => Some(*destination), + Packet::DmaPlaybackReply { destination, .. } => Some(*destination), + Packet::SubkernelLoadRunRequest { destination, .. } => Some(*destination), + Packet::SubkernelLoadRunReply { destination, .. } => Some(*destination), + Packet::SubkernelMessage { destination, .. } => Some(*destination), + Packet::SubkernelMessageAck { destination, .. } => Some(*destination), + Packet::DmaPlaybackStatus { destination, .. } => Some(*destination), + Packet::SubkernelFinished { destination, .. } => Some(*destination), + _ => None + } + } } diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index fcb61da26..75347da68 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -65,7 +65,7 @@ pub mod drtio { let up_destinations = up_destinations.clone(); let ddma_mutex = ddma_mutex.clone(); let subkernel_mutex = subkernel_mutex.clone(); - io.spawn(10240, move |io| { + io.spawn(8192, move |io| { let routing_table = routing_table.borrow(); link_thread(io, &aux_mutex, &routing_table, &up_destinations, &ddma_mutex, &subkernel_mutex); }); diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 129d5013f..44b4bd0be 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -650,6 +650,8 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, unsafe { kernel::stop() } session.kernel_state = KernelState::Absent; unsafe { session.congress.cache.unborrow() } + #[cfg(has_drtio)] + subkernel::clear_subkernels(io, _subkernel_mutex)?; match stream { None => { diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index 07fed8094..0b9762385 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -140,7 +140,7 @@ struct Session { last_exception: Option, source: u8, // which destination requested running the kernel messages: MessageManager, - subkernels_finished: VecDeque<(u32, bool, u8)> // tuple of id, with_exception, exception_source + subkernels_finished: Vec // ids of subkernels finished } #[derive(Debug)] @@ -286,13 +286,10 @@ impl MessageManager { let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; self.out_state = OutMessageState::MessageBeingSent; let meta = self.get_outgoing_slice(&mut data_slice).unwrap(); - let res = router.route(drtioaux::Packet::SubkernelMessage { + router.route(drtioaux::Packet::SubkernelMessage { source: self_destination, destination: destination, id: id, status: meta.status, length: meta.len as u16, data: data_slice }, routing_table, rank, self_destination); - if let Err(e) = res { - warn!("error sending SubkernelMessage: {}", e); - } Ok(()) } @@ -309,7 +306,7 @@ impl Session { last_exception: None, source: 0, messages: MessageManager::new(), - subkernels_finished: VecDeque::new() + subkernels_finished: Vec::new() } } @@ -500,6 +497,15 @@ impl Manager { with_exception: $with_exception, exception_source: destination }) }} } + + if let Some(subkernel_finished) = self.last_finished.take() { + info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception); + router.route(drtioaux::Packet::SubkernelFinished { + destination: subkernel_finished.source, id: subkernel_finished.id, + with_exception: subkernel_finished.with_exception, exception_src: subkernel_finished.exception_source + }, &routing_table, rank, destination); + } + if !self.is_running() { return; } @@ -534,17 +540,6 @@ impl Manager { self.last_finished = finished!(true); } } - - if let Some(subkernel_finished) = self.last_finished.take() { - info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception); - let res = router.route(drtioaux::Packet::SubkernelFinished { - destination: subkernel_finished.source, id: subkernel_finished.id, - with_exception: subkernel_finished.with_exception, exception_src: destination - }, &routing_table, rank, destination); - if let Err(e) = res { - warn!("error sending SubkernelFinished: {}", e); - } - } } fn process_external_messages(&mut self) -> Result<(), Error> { @@ -578,26 +573,15 @@ impl Manager { self.session.kernel_state = KernelState::Running; } else { let mut i = 0; - for status in self.session.subkernels_finished.iter() { - if status.0 == *id { + for status in &self.session.subkernels_finished { + if *status == *id { + kern_send(&kern::SubkernelAwaitFinishReply { status: kern::SubkernelStatus::NoError })?; + self.session.kernel_state = KernelState::Running; + self.session.subkernels_finished.swap_remove(i); break; } i += 1; } - if let Some(finish_status) = self.session.subkernels_finished.remove(i) { - if finish_status.1 { - unsafe { kernel_cpu::stop() } - self.session.kernel_state = KernelState::Absent; - unsafe { self.cache.unborrow() } - self.last_finished = Some(SubkernelFinished { - source: self.session.source, id: self.current_id, - with_exception: true, exception_source: finish_status.2 - }) - } else { - kern_send(&kern::SubkernelAwaitFinishReply { status: kern::SubkernelStatus::NoError })?; - self.session.kernel_state = KernelState::Running; - } - } } Ok(()) } @@ -623,7 +607,17 @@ impl Manager { } pub fn remote_subkernel_finished(&mut self, id: u32, with_exception: bool, exception_source: u8) { - self.session.subkernels_finished.push_back((id, with_exception, exception_source)); + if with_exception { + unsafe { kernel_cpu::stop() } + self.session.kernel_state = KernelState::Absent; + unsafe { self.cache.unborrow() } + self.last_finished = Some(SubkernelFinished { + source: self.session.source, id: self.current_id, + with_exception: true, exception_source: exception_source + }) + } else { + self.session.subkernels_finished.push(id); + } } fn process_kern_message(&mut self, router: &mut Router, @@ -722,7 +716,7 @@ impl Manager { self.session.kernel_state = KernelState::SubkernelAwaitLoad; router.route(drtioaux::Packet::SubkernelLoadRunRequest { source: destination, destination: sk_destination, id: id, run: run - }, routing_table, rank, destination).map_err(|_| Error::DrtioError)?; + }, routing_table, rank, destination); kern_acknowledge() } diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 64eab6e77..1a8e794a3 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -66,6 +66,12 @@ fn drtiosat_tsc_loaded() -> bool { } } +fn drtiosat_async_ready() { + unsafe { + csr::drtiosat::async_messages_ready_write(1); + } +} + pub enum RtioMaster { Drtio, Dma, @@ -90,7 +96,14 @@ macro_rules! forward { if hop != 0 { let repno = (hop - 1) as usize; if repno < $repeaters.len() { - return $repeaters[repno].aux_forward($packet); + if $packet.expects_response() { + return $repeaters[repno].aux_forward($packet); + } else { + let res = $repeaters[repno].aux_send($packet); + // allow the satellite to parse the packet before next + clock::spin_us(10_000); + return res; + } } else { return Err(drtioaux::Error::RoutingError); } @@ -135,7 +148,8 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg *self_destination = destination; // async messages if *rank == 1 { - if let Some(packet) = router.get_upstream_packet(*rank) { + // for now, master ignores the async_messages_ready packet + if let Some(packet) = router.get_upstream_packet() { // pass any async or routed packets to master // this does mean that DDMA/SK packets to master will "trickle down" to higher rank return drtioaux::send(0, &packet) @@ -236,14 +250,10 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg drtioaux::send(0, &drtioaux::Packet::RoutingAck) } - #[cfg(has_drtio_routing)] - drtioaux::Packet::RoutingAck => { - if *rank > 1 { - router.routing_ack_received(); - } else { - warn!("received unexpected RoutingAck"); - } - Ok(()) + drtioaux::Packet::RoutingRetrievePackets => { + let packet = router.get_upstream_packet().or( + Some(drtioaux::Packet::RoutingNoPackets)).unwrap(); + drtioaux::send(0, &packet) } drtioaux::Packet::MonitorRequest { destination: _destination, channel, probe } => { @@ -453,10 +463,11 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg if kernelmgr.message_ack_slice() { let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; if let Some(meta) = kernelmgr.message_get_slice(&mut data_slice) { - router.send(drtioaux::Packet::SubkernelMessage { + // route and not send immediately as ACKs are not a beginning of a transaction + router.route(drtioaux::Packet::SubkernelMessage { source: *self_destination, destination: meta.destination, id: kernelmgr.get_current_id().unwrap(), status: meta.status, length: meta.len as u16, data: data_slice - }, _routing_table, *rank, *self_destination)?; + }, _routing_table, *rank, *self_destination); } else { error!("Error receiving message slice"); } @@ -476,16 +487,16 @@ fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer, routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8, router: &mut routing::Router, destination: &mut u8) { let result = - drtioaux::recv(0).or_else(|_| Ok(router.get_local_packet())).and_then(|packet| { - if let Some(packet) = packet { - process_aux_packet(dma_manager, analyzer, kernelmgr, repeaters, routing_table, rank, router, destination, packet) + drtioaux::recv(0).and_then(|packet| { + if let Some(packet) = packet.or_else(|| router.get_local_packet()) { + process_aux_packet(dma_manager, analyzer, kernelmgr, + repeaters, routing_table, rank, router, destination, packet) } else { Ok(()) } }); - match result { - Ok(()) => (), - Err(e) => warn!("aux packet error ({})", e) + if let Err(e) = result { + warn!("aux packet error ({})", e); } } @@ -765,22 +776,22 @@ pub extern fn main() -> i32 { if let Some(status) = dma_manager.get_status() { info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp); router.route(drtioaux::Packet::DmaPlaybackStatus { - source: destination, destination: status.source, id: status.id, - error: status.error, channel: status.channel, timestamp: status.timestamp - }, &routing_table, rank, destination); + destination: status.source, id: status.id, error: status.error, + channel: status.channel, timestamp: status.timestamp + }, &routing_table, rank, destination) } kernelmgr.process_kern_requests(&mut router, &routing_table, rank, destination); - if rank > 1 { - if let Some(packet) = router.get_upstream_packet(rank) { - // in sat-sat communications, it can be async - let res = drtioaux::send(0, &packet); - if let Err(e) = res { - warn!("error routing packet: {}", e); - } + if let Some((repno, packet)) = router.get_downstream_packet() { + if let Err(e) = repeaters[repno].aux_send(&packet) { + warn!("[REP#{}] Error when sending packet to satellite ({:?})", repno, e) } } + + if router.any_upstream_waiting() { + drtiosat_async_ready(); + } } drtiosat_reset_phy(true); diff --git a/artiq/firmware/satman/repeater.rs b/artiq/firmware/satman/repeater.rs index 15542488b..544527cda 100644 --- a/artiq/firmware/satman/repeater.rs +++ b/artiq/firmware/satman/repeater.rs @@ -1,7 +1,7 @@ use board_artiq::{drtioaux, drtio_routing}; #[cfg(has_drtio_routing)] use board_misoc::{csr, clock}; -use routing::{Router, get_routable_packet_destination}; +use routing::Router; #[cfg(has_drtio_routing)] fn rep_link_rx_up(repno: u8) -> bool { @@ -107,11 +107,16 @@ impl Repeater { } } RepeaterState::Up => { - self.process_unsolicited_aux(routing_table, rank, destination, router); + self.process_unsolicited_aux(); if !rep_link_rx_up(self.repno) { info!("[REP#{}] link is down", self.repno); self.state = RepeaterState::Down; } + if self.async_messages_ready() { + if let Err(e) = self.handle_async(routing_table, rank, destination, router) { + warn!("[REP#{}] Error handling async messages ({})", self.repno, e); + } + } } RepeaterState::Failed => { if !rep_link_rx_up(self.repno) { @@ -122,21 +127,9 @@ impl Repeater { } } - fn process_unsolicited_aux(&self, routing_table: &drtio_routing::RoutingTable, rank: u8, self_destination: u8, router: &mut Router) { + fn process_unsolicited_aux(&self) { match drtioaux::recv(self.auxno) { - Ok(Some(packet)) => { - let destination = get_routable_packet_destination(&packet); - if destination.is_none() { - warn!("[REP#{}] unsolicited aux packet: {:?}", self.repno, packet); - } else { - // routable packet - let res = router.route(packet, routing_table, rank, self_destination); - match res { - Ok(()) => drtioaux::send(self.auxno, &drtioaux::Packet::RoutingAck).unwrap(), - Err(e) => warn!("[REP#{}] Error routing packet: {:?}", self.repno, e), - } - } - } + Ok(Some(packet)) => warn!("[REP#{}] unsolicited aux packet: {:?}", self.repno, packet), Ok(None) => (), Err(_) => warn!("[REP#{}] aux packet error", self.repno) } @@ -192,16 +185,42 @@ impl Repeater { } } - pub fn aux_forward(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error> { - if self.state != RepeaterState::Up { - return Err(drtioaux::Error::LinkDown); + fn async_messages_ready(&self) -> bool { + let async_rdy; + unsafe { + async_rdy = (csr::DRTIOREP[self.repno as usize].async_messages_ready_read)(); + (csr::DRTIOREP[self.repno as usize].async_messages_ready_write)(0); } - drtioaux::send(self.auxno, request).unwrap(); + async_rdy == 1 + } + + fn handle_async(&self, routing_table: &drtio_routing::RoutingTable, rank: u8, self_destination: u8, router: &mut Router + ) -> Result<(), drtioaux::Error> { + loop { + drtioaux::send(self.auxno, &drtioaux::Packet::RoutingRetrievePackets).unwrap(); + let reply = self.recv_aux_timeout(200)?; + match reply { + drtioaux::Packet::RoutingNoPackets => break, + packet => router.route(packet, routing_table, rank, self_destination) + } + } + Ok(()) + } + + pub fn aux_forward(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error> { + self.aux_send(request)?; let reply = self.recv_aux_timeout(200)?; drtioaux::send(0, &reply).unwrap(); Ok(()) } + pub fn aux_send(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error> { + if self.state != RepeaterState::Up { + return Err(drtioaux::Error::LinkDown); + } + drtioaux::send(self.auxno, request) + } + pub fn sync_tsc(&self) -> Result<(), drtioaux::Error> { if self.state != RepeaterState::Up { return Ok(()); @@ -212,8 +231,8 @@ impl Repeater { (csr::DRTIOREP[repno].set_time_write)(1); while (csr::DRTIOREP[repno].set_time_read)() == 1 {} } - // TSCAck is sent spontaneously by the satellite, - // in response to a TSC set on the RT link. + // TSCAck is the only aux packet that is sent spontaneously + // by the satellite, in response to a TSC set on the RT link. let reply = self.recv_aux_timeout(10000)?; if reply == drtioaux::Packet::TSCAck { return Ok(()); diff --git a/artiq/firmware/satman/routing.rs b/artiq/firmware/satman/routing.rs index ba82d95cb..e129e4a54 100644 --- a/artiq/firmware/satman/routing.rs +++ b/artiq/firmware/satman/routing.rs @@ -1,93 +1,87 @@ use alloc::collections::vec_deque::VecDeque; use board_artiq::{drtioaux, drtio_routing}; +use board_misoc::csr; // Packets from downstream (further satellites) are received and routed appropriately. -// they're passed immediately if it's possible (within the subtree), or sent upstream. +// they're passed as soon as possible downstream (within the subtree), or sent upstream, +// which is notified about pending packets. // for rank 1 (connected to master) satellites, these packets are passed as an answer to DestinationStatusRequest; -// for higher ranks, straight upstream, but awaiting for an ACK to make sure the upstream is not overwhelmed. +// for higher ranks, after getting a notification, it will transact with downstream to get the pending packets. // forward! macro is not deprecated, as routable packets are only these that can originate // from both master and satellite, e.g. DDMA and Subkernel. -pub fn get_routable_packet_destination(packet: &drtioaux::Packet) -> Option { - let destination = match packet { - // received from downstream - drtioaux::Packet::DmaAddTraceRequest { destination, .. } => destination, - drtioaux::Packet::DmaAddTraceReply { destination, .. } => destination, - drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } => destination, - drtioaux::Packet::DmaRemoveTraceReply { destination, .. } => destination, - drtioaux::Packet::DmaPlaybackRequest { destination, .. } => destination, - drtioaux::Packet::DmaPlaybackReply { destination, .. } => destination, - drtioaux::Packet::SubkernelLoadRunRequest { destination, .. } => destination, - drtioaux::Packet::SubkernelLoadRunReply { destination, .. } => destination, - // received from downstream or produced locally - drtioaux::Packet::SubkernelMessage { destination, .. } => destination, - drtioaux::Packet::SubkernelMessageAck { destination, .. } => destination, - // "async" - master gets them by deststatreq, satellites would get it through the router - drtioaux::Packet::DmaPlaybackStatus { destination, .. } => destination, - drtioaux::Packet::SubkernelFinished { destination, .. } => destination, - _ => return None - }; - Some(*destination) -} - pub struct Router { - out_messages: VecDeque, - local_messages: VecDeque, - upstream_ready: bool + upstream_queue: VecDeque, + local_queue: VecDeque, + downstream_queue: VecDeque<(usize, drtioaux::Packet)>, + upstream_notified: bool, } impl Router { pub fn new() -> Router { Router { - out_messages: VecDeque::new(), - local_messages: VecDeque::new(), - upstream_ready: true + upstream_queue: VecDeque::new(), + local_queue: VecDeque::new(), + downstream_queue: VecDeque::new(), + upstream_notified: false, } } - - // called by local sources (DDMA, kernel) and by repeaters on receiving unsolicited data - // messages are always buffered for upstream, or passed downstream directly + // called by local sources (DDMA, kernel) and by repeaters on receiving async data + // messages are always buffered for both upstream and downstream pub fn route(&mut self, packet: drtioaux::Packet, _routing_table: &drtio_routing::RoutingTable, _rank: u8, _self_destination: u8 - ) -> Result<(), drtioaux::Error> { + ) { #[cfg(has_drtio_routing)] { - let destination = get_routable_packet_destination(&packet); + let destination = packet.routable_destination(); if let Some(destination) = destination { - let hop = _routing_table.0[destination as usize][_rank as usize]; - let auxno = if destination == 0 { 0 } else { hop }; + let hop = _routing_table.0[destination as usize][_rank as usize] as usize; if destination == _self_destination { - self.local_messages.push_back(packet); - } else if _rank > 1 { - drtioaux::send(auxno, &packet)?; + self.local_queue.push_back(packet); + } else if hop > 0 && hop < csr::DRTIOREP.len() { + let repno = (hop - 1) as usize; + self.downstream_queue.push_back((repno, packet)); } else { - self.out_messages.push_back(packet); + self.upstream_queue.push_back(packet); } } else { - return Err(drtioaux::Error::RoutingError); + error!("Received an unroutable packet: {:?}", packet); } } #[cfg(not(has_drtio_routing))] { - self.out_messages.push_back(packet); + self.upstream_queue.push_back(packet); } - Ok(()) } // Sends a packet to a required destination, routing if it's necessary pub fn send(&mut self, packet: drtioaux::Packet, - _routing_table: &drtio_routing::RoutingTable, _rank: u8, _destination: u8) -> Result<(), drtioaux::Error> { + _routing_table: &drtio_routing::RoutingTable, + _rank: u8, _destination: u8 + ) -> Result<(), drtioaux::Error> { #[cfg(has_drtio_routing)] { - let destination = get_routable_packet_destination(&packet); - if destination.is_none() || destination == Some(0) { - // send upstream directly (response to master) - drtioaux::send(0, &packet) + let destination = packet.routable_destination(); + if let Some(destination) = destination { + let hop = _routing_table.0[destination as usize][_rank as usize] as usize; + if destination == 0 { + // response is needed immediately if master required it + drtioaux::send(0, &packet)?; + } else if !(hop > 0 && hop < csr::DRTIOREP.len()) { + // higher rank can wait + self.upstream_queue.push_back(packet); + } else { + let repno = (hop - 1) as usize; + // transaction will occur at closest possible opportunity + self.downstream_queue.push_back((repno, packet)); + } + Ok(()) } else { - self.route(packet, _routing_table, _rank, _destination) + // packet not supported in routing, fallback - sent directly + drtioaux::send(0, &packet) } } #[cfg(not(has_drtio_routing))] @@ -96,25 +90,26 @@ impl Router { } } - pub fn get_upstream_packet(&mut self, rank: u8) -> Option { - // called on DestinationStatusRequest on rank 1, in loop in others - if self.upstream_ready { - let packet = self.out_messages.pop_front(); - if rank > 1 && packet.is_some() { - // packet will be sent out, awaiting ACK - self.upstream_ready = false; - } - packet + pub fn any_upstream_waiting(&mut self) -> bool { + let empty = self.upstream_queue.is_empty(); + if !empty && !self.upstream_notified { + self.upstream_notified = true; // so upstream will not get spammed with notifications + true } else { - None + false } } - pub fn routing_ack_received(&mut self) { - self.upstream_ready = true; + pub fn get_upstream_packet(&mut self) -> Option { + self.upstream_notified = false; + self.upstream_queue.pop_front() + } + + pub fn get_downstream_packet(&mut self) -> Option<(usize, drtioaux::Packet)> { + self.downstream_queue.pop_front() } pub fn get_local_packet(&mut self) -> Option { - self.local_messages.pop_front() + self.local_queue.pop_front() } } \ No newline at end of file From 4363cdf9fa13fe4575ce3ee5722af2d15d2335f1 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 8 Dec 2023 15:48:31 +0800 Subject: [PATCH 078/296] master: make use of the async message ready flag --- .../firmware/libproto_artiq/drtioaux_proto.rs | 13 ++ artiq/firmware/runtime/kernel.rs | 8 +- artiq/firmware/runtime/rtio_mgt.rs | 116 +++++++----------- artiq/firmware/runtime/session.rs | 1 + artiq/firmware/satman/main.rs | 15 +-- artiq/firmware/satman/routing.rs | 7 +- 6 files changed, 70 insertions(+), 90 deletions(-) diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index 0013c8171..1157e864c 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -712,4 +712,17 @@ impl Packet { _ => None } } + + pub fn expects_response(&self) -> bool { + // returns true if the routable packet should elicit a response + // e.g. reply, ACK packets end a conversation, + // and firmware should not wait for response + match self { + Packet::DmaAddTraceReply { .. } | Packet::DmaRemoveTraceReply { .. } | + Packet::DmaPlaybackReply { .. } | Packet::SubkernelLoadRunReply { .. } | + Packet::SubkernelMessageAck { .. } | Packet::DmaPlaybackStatus { .. } | + Packet::SubkernelFinished { .. } => false, + _ => true + } + } } diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index 124b17f19..855648845 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -364,8 +364,11 @@ pub mod subkernel { { let _lock = subkernel_mutex.lock(io)?; match unsafe { SUBKERNELS.get(&id).unwrap().state } { - SubkernelState::Finished { .. } => return Err(Error::SubkernelFinished), + SubkernelState::Finished { status: FinishStatus::Ok } | SubkernelState::Running => (), + SubkernelState::Finished { + status: FinishStatus::CommLost, + } => return Err(Error::SubkernelFinished), _ => return Err(Error::IncorrectState) } } @@ -385,7 +388,8 @@ pub mod subkernel { } } match unsafe { SUBKERNELS.get(&id).unwrap().state } { - SubkernelState::Finished { .. } => return Ok(None), + SubkernelState::Finished { status: FinishStatus::CommLost } | + SubkernelState::Finished { status: FinishStatus::Exception(_) } => return Ok(None), _ => () } Err(()) diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 75347da68..5a9263e55 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -78,6 +78,16 @@ pub mod drtio { } } + fn link_has_async_ready(linkno: u8) -> bool { + let linkno = linkno as usize; + let async_ready; + unsafe { + async_ready = (csr::DRTIO[linkno].async_messages_ready_read)() == 1; + (csr::DRTIO[linkno].async_messages_ready_write)(1); + } + async_ready + } + fn recv_aux_timeout(io: &Io, linkno: u8, timeout: u32) -> Result { let max_time = clock::get_ms() + timeout as u64; loop { @@ -117,6 +127,8 @@ pub mod drtio { drtioaux::send(linkno, &drtioaux::Packet::SubkernelMessageAck { destination: from } ).unwrap(); + // give the satellite some time to process the message + io.sleep(10).unwrap(); }, // routable packets drtioaux::Packet::DmaAddTraceRequest { destination, .. } | @@ -147,45 +159,9 @@ pub mod drtio { } } else { warn!("[LINK#{}] Error handling async packets ({})", linkno, reply.unwrap_err()); + return; } - else { - drtioaux::send(dest_link, &packet).unwrap(); - } - None - }} - } - match packet { - // packets to be consumed locally - drtioaux::Packet::DmaPlaybackStatus { id, destination: 0, error, channel, timestamp } => { - remote_dma::playback_done(io, ddma_mutex, id, 0, error, channel, timestamp); - None - }, - drtioaux::Packet::SubkernelFinished { id, destination: 0, with_exception, exception_src } => { - subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception, exception_src); - None - }, - drtioaux::Packet::SubkernelMessage { id, source: from, destination: 0, status, length, data } => { - subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data); - // acknowledge receiving part of the message - drtioaux::send(linkno, - &drtioaux::Packet::SubkernelMessageAck { destination: from } - ).unwrap(); - None - }, - // routable packets - drtioaux::Packet::DmaAddTraceRequest { destination, .. } => route_packet!(destination), - drtioaux::Packet::DmaAddTraceReply { destination, .. } => route_packet!(destination), - drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } => route_packet!(destination), - drtioaux::Packet::DmaRemoveTraceReply { destination, .. } => route_packet!(destination), - drtioaux::Packet::DmaPlaybackRequest { destination, .. } => route_packet!(destination), - drtioaux::Packet::DmaPlaybackReply { destination, .. } => route_packet!(destination), - drtioaux::Packet::SubkernelLoadRunRequest { destination, .. } => route_packet!(destination), - drtioaux::Packet::SubkernelLoadRunReply { destination, .. } => route_packet!(destination), - drtioaux::Packet::SubkernelMessage { destination, .. } => route_packet!(destination), - drtioaux::Packet::SubkernelMessageAck { destination, .. } => route_packet!(destination), - drtioaux::Packet::DmaPlaybackStatus { destination, .. } => route_packet!(destination), - drtioaux::Packet::SubkernelFinished { destination, .. } => route_packet!(destination), - other => Some(other) + } } } @@ -358,44 +334,35 @@ pub mod drtio { let linkno = hop - 1; if destination_up(up_destinations, destination) { if up_links[linkno as usize] { - loop { - let reply = aux_transact(io, aux_mutex, linkno, - &drtioaux::Packet::DestinationStatusRequest { - destination: destination - }); - if let Ok(reply) = reply { - let reply = process_async_packets(io, ddma_mutex, subkernel_mutex, routing_table, linkno, reply); - match reply { - Some(drtioaux::Packet::DestinationDownReply) => { - destination_set_up(routing_table, up_destinations, destination, false); - remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false); - subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false); - } - Some(drtioaux::Packet::DestinationOkReply) => (), - Some(drtioaux::Packet::DestinationSequenceErrorReply { channel }) => { - error!("[DEST#{}] RTIO sequence error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32)); - unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_SEQUENCE_ERROR }; - } - Some(drtioaux::Packet::DestinationCollisionReply { channel }) => { - error!("[DEST#{}] RTIO collision involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32)); - unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_COLLISION }; - } - Some(drtioaux::Packet::DestinationBusyReply { channel }) => { - error!("[DEST#{}] RTIO busy error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32)); - unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY }; - } - Some(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet), - None => { - // continue asking until we get Destination...Reply or error out - // wait a bit not to overwhelm the receiver causing gateway errors - io.sleep(10).unwrap(); - continue; - } + let reply = aux_transact(io, aux_mutex, linkno, + &drtioaux::Packet::DestinationStatusRequest { + destination: destination + }); + if let Ok(reply) = reply { + match reply { + drtioaux::Packet::DestinationDownReply => { + destination_set_up(routing_table, up_destinations, destination, false); + remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false); + subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false); } - } else { - error!("[DEST#{}] communication failed ({:?})", destination, reply.unwrap_err()); + drtioaux::Packet::DestinationOkReply => (), + drtioaux::Packet::DestinationSequenceErrorReply { channel } => { + error!("[DEST#{}] RTIO sequence error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32)); + unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_SEQUENCE_ERROR }; + } + drtioaux::Packet::DestinationCollisionReply { channel } => { + error!("[DEST#{}] RTIO collision involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32)); + unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_COLLISION }; + } + drtioaux::Packet::DestinationBusyReply { channel } => { + error!("[DEST#{}] RTIO busy error involving channel 0x{:04x}:{}", destination, channel, resolve_channel_name(channel as u32)); + unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY }; + } + packet => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet), + } - break; + } else { + error!("[DEST#{}] communication failed ({:?})", destination, reply.unwrap_err()); } } else { destination_set_up(routing_table, up_destinations, destination, false); @@ -436,6 +403,7 @@ pub mod drtio { if up_links[linkno as usize] { /* link was previously up */ if link_rx_up(linkno) { + process_async_packets(&io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno); process_unsolicited_aux(&io, aux_mutex, linkno); process_local_errors(linkno); } else { diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 44b4bd0be..800c6d040 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -471,6 +471,7 @@ fn process_host_message(io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subke match subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, _id) { Ok(_) => host_write(stream, host::Reply::LoadCompleted)?, Err(error) => { + subkernel::clear_subkernels(io, _subkernel_mutex)?; let mut description = String::new(); write!(&mut description, "{}", error).unwrap(); host_write(stream, host::Reply::LoadFailed(&description))? diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 1a8e794a3..eec0bab8f 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -146,15 +146,6 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg if hop == 0 { *self_destination = destination; - // async messages - if *rank == 1 { - // for now, master ignores the async_messages_ready packet - if let Some(packet) = router.get_upstream_packet() { - // pass any async or routed packets to master - // this does mean that DDMA/SK packets to master will "trickle down" to higher rank - return drtioaux::send(0, &packet) - } - } let errors; unsafe { errors = csr::drtiosat::rtio_error_read(); @@ -776,9 +767,9 @@ pub extern fn main() -> i32 { if let Some(status) = dma_manager.get_status() { info!("playback done, error: {}, channel: {}, timestamp: {}", status.error, status.channel, status.timestamp); router.route(drtioaux::Packet::DmaPlaybackStatus { - destination: status.source, id: status.id, error: status.error, - channel: status.channel, timestamp: status.timestamp - }, &routing_table, rank, destination) + source: destination, destination: status.source, id: status.id, + error: status.error, channel: status.channel, timestamp: status.timestamp + }, &routing_table, rank, destination); } kernelmgr.process_kern_requests(&mut router, &routing_table, rank, destination); diff --git a/artiq/firmware/satman/routing.rs b/artiq/firmware/satman/routing.rs index e129e4a54..118ad11af 100644 --- a/artiq/firmware/satman/routing.rs +++ b/artiq/firmware/satman/routing.rs @@ -101,8 +101,11 @@ impl Router { } pub fn get_upstream_packet(&mut self) -> Option { - self.upstream_notified = false; - self.upstream_queue.pop_front() + let packet = self.upstream_queue.pop_front(); + if packet.is_none() { + self.upstream_notified = false; + } + packet } pub fn get_downstream_packet(&mut self) -> Option<(usize, drtioaux::Packet)> { From c876acd5a53ef04d611712c49a60a7b7decbadf9 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 8 Dec 2023 17:22:44 +0800 Subject: [PATCH 079/296] docs: subkernels can call other subkernels now --- artiq/language/core.py | 4 ++-- doc/manual/getting_started_core.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/artiq/language/core.py b/artiq/language/core.py index 2aff914a9..6c4e242a1 100644 --- a/artiq/language/core.py +++ b/artiq/language/core.py @@ -72,8 +72,8 @@ def subkernel(arg=None, destination=0, flags={}): Subkernels behave similarly to kernels, with few key differences: - they are started from main kernels, - - they do not support RPCs, or running subsequent subkernels on other devices, - - but they can call other kernels or subkernels with the same destination. + - they do not support RPCs, + - but they can call other kernels or subkernels. Subkernels can accept arguments and return values. However, they must be fully annotated with ARTIQ types. diff --git a/doc/manual/getting_started_core.rst b/doc/manual/getting_started_core.rst index fe4ba7bd7..ad86a74f5 100644 --- a/doc/manual/getting_started_core.rst +++ b/doc/manual/getting_started_core.rst @@ -275,7 +275,7 @@ Subkernels refer to kernels running on a satellite device. This allows you to of Subkernels behave in most part as regular kernels, they accept arguments and can return values. However, there are few caveats: - - they do not support RPCs or calling subsequent subkernels on other devices, + - they do not support RPCs, - they do not support DRTIO, - their return value must be fully annotated with an ARTIQ type, - their arguments should be annotated, and only basic ARTIQ types are supported, @@ -310,7 +310,7 @@ Subkernels are compiled after the main kernel, and then immediately uploaded to While ``self`` is accepted as an argument for subkernels, it is embedded into the compiled data. Any changes made by the main kernel or other subkernels, will not be available. -Subkernels can call other kernels and subkernels, if they're within the same destination. For a more complex example: :: +Subkernels can call other kernels and subkernels. For a more complex example: :: from artiq.experiment import * From c9e3771cd5990aafef4480cd11a96b50de7d22d5 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 29 Dec 2023 15:10:04 +0800 Subject: [PATCH 080/296] subkernels: add support for (d)dma --- artiq/firmware/ksupport/lib.rs | 29 +- .../firmware/libproto_artiq/drtioaux_proto.rs | 8 +- artiq/firmware/runtime/rtio_dma.rs | 4 +- artiq/firmware/runtime/rtio_mgt.rs | 4 +- artiq/firmware/satman/dma.rs | 317 ++++++++++++++++-- artiq/firmware/satman/kernel.rs | 179 ++++++---- artiq/firmware/satman/main.rs | 22 +- artiq/firmware/satman/routing.rs | 60 +++- 8 files changed, 529 insertions(+), 94 deletions(-) diff --git a/artiq/firmware/ksupport/lib.rs b/artiq/firmware/ksupport/lib.rs index 6f313dd6b..3cd2052ec 100644 --- a/artiq/firmware/ksupport/lib.rs +++ b/artiq/firmware/ksupport/lib.rs @@ -453,12 +453,39 @@ extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { } } -#[cfg(not(kernel_has_rtio_dma))] +#[cfg(all(not(kernel_has_rtio_dma), not(has_rtio_dma)))] #[unwind(allowed)] extern fn dma_playback(_timestamp: i64, _ptr: i32, _uses_ddma: bool) { unimplemented!("not(kernel_has_rtio_dma)") } +// for satellite (has_rtio_dma but not in kernel) +#[cfg(all(not(kernel_has_rtio_dma), has_rtio_dma))] +#[unwind(allowed)] +extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { + // DDMA is always used on satellites, so the `uses_ddma` setting is ignored + // StartRemoteRequest reused as "normal" start request + send(&DmaStartRemoteRequest { id: ptr as i32, timestamp: timestamp }); + // skip awaitremoterequest - it's a given + recv!(&DmaAwaitRemoteReply { timeout, error, channel, timestamp } => { + if timeout { + raise!("DMAError", + "Error running DMA on satellite device, timed out waiting for results"); + } + if error & 1 != 0 { + raise!("RTIOUnderflow", + "RTIO underflow at channel {rtio_channel_info:0}, {1} mu", + channel as i64, timestamp as i64, 0); + } + if error & 2 != 0 { + raise!("RTIODestinationUnreachable", + "RTIO destination unreachable, output, at channel {rtio_channel_info:0}, {1} mu", + channel as i64, timestamp as i64, 0); + } + }); +} + + #[unwind(allowed)] extern fn subkernel_load_run(id: u32, destination: u8, run: bool) { send(&SubkernelLoadRunRequest { id: id, destination: destination, run: run }); diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index 1157e864c..f58217f9c 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -113,7 +113,7 @@ pub enum Packet { id: u32, status: PayloadStatus, length: u16, trace: [u8; MASTER_PAYLOAD_MAX_SIZE] }, - DmaAddTraceReply { destination: u8, succeeded: bool }, + DmaAddTraceReply { source: u8, destination: u8, id: u32, succeeded: bool }, DmaRemoveTraceRequest { source: u8, destination: u8, id: u32 }, DmaRemoveTraceReply { destination: u8, succeeded: bool }, DmaPlaybackRequest { source: u8, destination: u8, id: u32, timestamp: u64 }, @@ -303,7 +303,9 @@ impl Packet { } }, 0xb1 => Packet::DmaAddTraceReply { + source: reader.read_u8()?, destination: reader.read_u8()?, + id: reader.read_u32()?, succeeded: reader.read_bool()? }, 0xb2 => Packet::DmaRemoveTraceRequest { @@ -598,9 +600,11 @@ impl Packet { writer.write_u16(length)?; writer.write_all(&trace[0..length as usize])?; }, - Packet::DmaAddTraceReply { destination, succeeded } => { + Packet::DmaAddTraceReply { source, destination, id, succeeded } => { writer.write_u8(0xb1)?; + writer.write_u8(source)?; writer.write_u8(destination)?; + writer.write_u32(id)?; writer.write_bool(succeeded)?; }, Packet::DmaRemoveTraceRequest { source, destination, id } => { diff --git a/artiq/firmware/runtime/rtio_dma.rs b/artiq/firmware/runtime/rtio_dma.rs index 63bf563a6..666986919 100644 --- a/artiq/firmware/runtime/rtio_dma.rs +++ b/artiq/firmware/runtime/rtio_dma.rs @@ -167,10 +167,10 @@ pub mod remote_dma { } pub fn playback_done(io: &Io, ddma_mutex: &Mutex, - id: u32, destination: u8, error: u8, channel: u32, timestamp: u64) { + id: u32, source: u8, error: u8, channel: u32, timestamp: u64) { // called upon receiving PlaybackDone aux packet let _lock = ddma_mutex.lock(io).unwrap(); - let mut trace = unsafe { TRACES.get_mut(&id).unwrap().get_mut(&destination).unwrap() }; + let mut trace = unsafe { TRACES.get_mut(&id).unwrap().get_mut(&source).unwrap() }; trace.state = RemoteState::PlaybackEnded { error: error, channel: channel, diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 5a9263e55..55d97d532 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -491,8 +491,8 @@ pub mod drtio { &drtioaux::Packet::DmaAddTraceRequest { id: id, source: 0, destination: destination, status: status, length: len as u16, trace: *slice})?; match reply { - drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: true } => Ok(()), - drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: false } => Err(Error::DmaAddTraceFail(destination)), + drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: true, .. } => Ok(()), + drtioaux::Packet::DmaAddTraceReply { destination: 0, succeeded: false, .. } => Err(Error::DmaAddTraceFail(destination)), packet => Err(Error::UnexpectedPacket(packet)), } }) diff --git a/artiq/firmware/satman/dma.rs b/artiq/firmware/satman/dma.rs index b22be573a..4fc7a1393 100644 --- a/artiq/firmware/satman/dma.rs +++ b/artiq/firmware/satman/dma.rs @@ -1,7 +1,11 @@ +use alloc::{vec::Vec, collections::btree_map::BTreeMap, string::String}; +use core::mem; +use board_artiq::{drtioaux, drtio_routing::RoutingTable}; use board_misoc::{csr, cache::flush_l2_cache}; use proto_artiq::drtioaux_proto::PayloadStatus; -use alloc::{vec::Vec, collections::btree_map::BTreeMap}; -use ::{cricon_select, RtioMaster}; +use routing::{Router, Sliceable}; +use kernel::Manager as KernelManager; +use ::{cricon_select, RtioMaster, MASTER_PAYLOAD_MAX_SIZE}; const ALIGNMENT: usize = 64; @@ -19,17 +23,158 @@ pub struct RtioStatus { pub timestamp: u64 } +#[derive(Debug)] pub enum Error { IdNotFound, PlaybackInProgress, - EntryNotComplete + EntryNotComplete, + MasterDmaFound, + UploadFail, } #[derive(Debug)] struct Entry { trace: Vec, padding_len: usize, - complete: bool + complete: bool, + duration: u64, // relevant for locally ran DMA +} + +impl Entry { + pub fn from_vec(data: Vec, duration: u64) -> Entry { + let mut entry = Entry { + trace: data, + padding_len: 0, + complete: true, + duration: duration, + }; + entry.realign(); + entry + } + + pub fn id(&self) -> u32 { + self.trace[self.padding_len..].as_ptr() as u32 + } + + pub fn realign(&mut self) { + self.trace.push(0); + let data_len = self.trace.len(); + + self.trace.reserve(ALIGNMENT - 1); + let padding = ALIGNMENT - self.trace.as_ptr() as usize % ALIGNMENT; + let padding = if padding == ALIGNMENT { 0 } else { padding }; + for _ in 0..padding { + // Vec guarantees that this will not reallocate + self.trace.push(0) + } + for i in 1..data_len + 1 { + self.trace[data_len + padding - i] = self.trace[data_len - i] + } + self.complete = true; + self.padding_len = padding; + } +} + +#[derive(Debug)] +enum RemoteTraceState { + Unsent, + Sending(usize), + Ready, + Running(usize), +} + +#[derive(Debug)] +struct RemoteTraces { + remote_traces: BTreeMap, + state: RemoteTraceState, +} + +impl RemoteTraces { + pub fn new(traces: BTreeMap) -> RemoteTraces { + RemoteTraces { + remote_traces: traces, + state: RemoteTraceState::Unsent + } + } + + // on subkernel request + pub fn upload_traces(&mut self, id: u32, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) -> usize { + let len = self.remote_traces.len(); + if len > 0 { + self.state = RemoteTraceState::Sending(self.remote_traces.len()); + for (dest, trace) in self.remote_traces.iter_mut() { + // queue up the first packet for all destinations, rest will be sent after first ACK + let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; + let meta = trace.get_slice_master(&mut data_slice); + router.route(drtioaux::Packet::DmaAddTraceRequest { + source: self_destination, destination: *dest, id: id, + status: meta.status, length: meta.len, trace: data_slice + }, routing_table, rank, self_destination); + } + } + len + } + + // on incoming Packet::DmaAddTraceReply + pub fn ack_upload(&mut self, kernel_manager: &mut KernelManager, source: u8, id: u32, succeeded: bool, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) { + if let RemoteTraceState::Sending(count) = self.state { + if let Some(trace) = self.remote_traces.get_mut(&source) { + if trace.at_end() { + if count - 1 == 0 { + self.state = RemoteTraceState::Ready; + kernel_manager.ddma_remote_uploaded(succeeded); + } else { + self.state = RemoteTraceState::Sending(count - 1); + } + } else { + // send next slice + let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE]; + let meta = trace.get_slice_master(&mut data_slice); + router.route(drtioaux::Packet::DmaAddTraceRequest { + source: self_destination, destination: meta.destination, id: id, + status: meta.status, length: meta.len, trace: data_slice + }, routing_table, rank, self_destination); + } + } + } + + } + + // on subkernel request + pub fn playback(&mut self, id: u32, timestamp: u64, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) { + // route all the playback requests + // remote traces + local trace + self.state = RemoteTraceState::Running(self.remote_traces.len() + 1); + for (dest, _) in self.remote_traces.iter() { + router.route(drtioaux::Packet::DmaPlaybackRequest { + source: self_destination, destination: *dest, id: id, timestamp: timestamp + }, routing_table, rank, self_destination); + // response will be ignored (succeeded = false handled by the main thread) + } + } + + // on incoming Packet::DmaPlaybackDone + pub fn remote_finished(&mut self, kernel_manager: &mut KernelManager, error: u8, channel: u32, timestamp: u64) { + if let RemoteTraceState::Running(count) = self.state { + if error != 0 || count - 1 == 0 { + // notify the kernel about a DDMA error or finish + kernel_manager.ddma_finished(error, channel, timestamp); + self.state = RemoteTraceState::Ready; + // further messages will be ignored (if there was an error) + } else { // no error and not the last one awaited + self.state = RemoteTraceState::Running(count - 1); + } + } + } + + pub fn erase(&mut self, id: u32, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) { + for (dest, _) in self.remote_traces.iter() { + router.route(drtioaux::Packet::DmaRemoveTraceRequest { + source: self_destination, destination: *dest, id: id + }, routing_table, rank, self_destination); + // response will be ignored as this object will stop existing too + } + } } #[derive(Debug)] @@ -37,7 +182,12 @@ pub struct Manager { entries: BTreeMap<(u8, u32), Entry>, state: ManagerState, current_id: u32, - current_source: u8 + current_source: u8, + + remote_entries: BTreeMap, + name_map: BTreeMap, + recording_trace: Vec, + recording_name: String } impl Manager { @@ -52,6 +202,10 @@ impl Manager { current_id: 0, current_source: 0, state: ManagerState::Idle, + remote_entries: BTreeMap::new(), + name_map: BTreeMap::new(), + recording_trace: Vec::new(), + recording_name: String::new(), } } @@ -67,7 +221,9 @@ impl Manager { self.entries.insert((source, id), Entry { trace: Vec::new(), padding_len: 0, - complete: false }); + complete: false, + duration: 0 + }); self.entries.get_mut(&(source, id)).unwrap() } else { entry @@ -77,34 +233,122 @@ impl Manager { self.entries.insert((source, id), Entry { trace: Vec::new(), padding_len: 0, - complete: false }); + complete: false, + duration: 0, + }); self.entries.get_mut(&(source, id)).unwrap() }, }; entry.trace.extend(&trace[0..trace_len]); if status.is_last() { - entry.trace.push(0); - let data_len = entry.trace.len(); - - // Realign. - entry.trace.reserve(ALIGNMENT - 1); - let padding = ALIGNMENT - entry.trace.as_ptr() as usize % ALIGNMENT; - let padding = if padding == ALIGNMENT { 0 } else { padding }; - for _ in 0..padding { - // Vec guarantees that this will not reallocate - entry.trace.push(0) - } - for i in 1..data_len + 1 { - entry.trace[data_len + padding - i] = entry.trace[data_len - i] - } - entry.complete = true; - entry.padding_len = padding; + entry.realign(); flush_l2_cache(); } Ok(()) } + // API for subkernel + pub fn record_start(&mut self, name: &str) { + self.recording_name = String::from(name); + self.recording_trace = Vec::new(); + } + // API for subkernel + pub fn record_append(&mut self, data: &[u8]) { + self.recording_trace.extend_from_slice(data); + } + + // API for subkernel + pub fn record_stop(&mut self, duration: u64, self_destination: u8) -> Result { + let mut trace = Vec::new(); + mem::swap(&mut self.recording_trace, &mut trace); + trace.push(0); + let mut local_trace = Vec::new(); + let mut remote_traces: BTreeMap = BTreeMap::new(); + // analyze each entry and put in proper buckets, as the kernel core + // sends whole chunks, to limit comms/kernel CPU communication, + // and as only comms core has access to varios DMA buffers. + let mut ptr = 0; + while trace[ptr] != 0 { + // ptr + 3 = tgt >> 24 (destination) + let len = trace[ptr] as usize; + let destination = trace[ptr+3]; + if destination == 0 { + return Err(Error::MasterDmaFound); + } else if destination == self_destination { + local_trace.extend(&trace[ptr..ptr+len]); + } + else { + if let Some(remote_trace) = remote_traces.get_mut(&destination) { + remote_trace.extend(&trace[ptr..ptr+len]); + } else { + remote_traces.insert(destination, Sliceable::new(destination, trace[ptr..ptr+len].to_vec())); + } + } + // and jump to the next event + ptr += len; + } + let local_entry = Entry::from_vec(local_trace, duration); + let id = local_entry.id(); + + self.entries.insert((self_destination, id), local_entry); + self.remote_entries.insert(id, RemoteTraces::new(remote_traces)); + let mut name = String::new(); + mem::swap(&mut self.recording_name, &mut name); + self.name_map.insert(name, id); + + flush_l2_cache(); + + Ok(id) + } + + pub fn upload_traces(&mut self, id: u32, router: &mut Router, rank: u8, self_destination: u8, + routing_table: &RoutingTable) -> Result { + let remote_traces = self.remote_entries.get_mut(&id); + let mut len = 0; + if let Some(traces) = remote_traces { + len = traces.upload_traces(id, router, rank, self_destination, routing_table); + } + Ok(len) + } + + pub fn with_trace(&self, self_destination: u8, name: &str, f: F) -> R + where F: FnOnce(Option<&[u8]>, u64) -> R { + if let Some(ptr) = self.name_map.get(name) { + match self.entries.get(&(self_destination, *ptr)) { + Some(entry) => f(Some(&entry.trace[entry.padding_len..]), entry.duration), + None => f(None, 0) + } + } else { + f(None, 0) + } + } + + // API for subkernel + pub fn playback_remote(&mut self, id: u32, timestamp: u64, + router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable + ) -> Result<(), Error> { + if let Some(traces) = self.remote_entries.get_mut(&id) { + traces.playback(id, timestamp, router, rank, self_destination, routing_table); + Ok(()) + } else { + Err(Error::IdNotFound) + } + } + + // API for subkernel + pub fn erase_name(&mut self, name: &str, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) { + if let Some(id) = self.name_map.get(name) { + if let Some(traces) = self.remote_entries.get_mut(&id) { + traces.erase(*id, router, rank, self_destination, routing_table); + self.remote_entries.remove(&id); + } + self.entries.remove(&(self_destination, *id)); + self.name_map.remove(name); + } + } + + // API for incoming DDMA (drtio) pub fn erase(&mut self, source: u8, id: u32) -> Result<(), Error> { match self.entries.remove(&(source, id)) { Some(_) => Ok(()), @@ -112,6 +356,33 @@ impl Manager { } } + pub fn remote_finished(&mut self, kernel_manager: &mut KernelManager, + id: u32, error: u8, channel: u32, timestamp: u64) { + if let Some(entry) = self.remote_entries.get_mut(&id) { + entry.remote_finished(kernel_manager, error, channel, timestamp); + } + } + + pub fn ack_upload(&mut self, kernel_manager: &mut KernelManager, source: u8, id: u32, succeeded: bool, + router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) { + if let Some(entry) = self.remote_entries.get_mut(&id) { + entry.ack_upload(kernel_manager, source, id, succeeded, router, rank, self_destination, routing_table); + } + } + + pub fn cleanup(&mut self, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) { + // after subkernel ends, remove all self-generated traces + for (_, id) in self.name_map.iter_mut() { + if let Some(traces) = self.remote_entries.get_mut(&id) { + traces.erase(*id, router, rank, self_destination, routing_table); + self.remote_entries.remove(&id); + } + self.entries.remove(&(self_destination, *id)); + } + self.name_map.clear(); + } + + // API for both incoming DDMA (drtio) and subkernel pub fn playback(&mut self, source: u8, id: u32, timestamp: u64) -> Result<(), Error> { if self.state != ManagerState::Idle { return Err(Error::PlaybackInProgress); diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index 0b9762385..d4cc226cb 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -1,4 +1,4 @@ -use core::{mem, option::NoneError, cmp::min}; +use core::{mem, option::NoneError}; use alloc::{string::String, format, vec::Vec, collections::{btree_map::BTreeMap, vec_deque::VecDeque}}; use cslice::AsCSlice; @@ -15,7 +15,8 @@ use kernel::eh_artiq::StackPointerBacktrace; use ::{cricon_select, RtioMaster}; use cache::Cache; -use routing::Router; +use dma::{Manager as DmaManager, Error as DmaError}; +use routing::{Router, Sliceable, SliceMeta}; use SAT_PAYLOAD_MAX_SIZE; use MASTER_PAYLOAD_MAX_SIZE; @@ -65,7 +66,9 @@ enum KernelState { MsgAwait { max_time: u64, tags: Vec }, MsgSending, SubkernelAwaitLoad, - SubkernelAwaitFinish { max_time: u64, id: u32 } + SubkernelAwaitFinish { max_time: u64, id: u32 }, + DmaUploading { max_time: u64 }, + DmaAwait { max_time: u64 }, } #[derive(Debug)] @@ -78,7 +81,8 @@ pub enum Error { AwaitingMessage, SubkernelIoError, DrtioError, - KernelException(Sliceable) + KernelException(Sliceable), + DmaError(DmaError), } impl From for Error { @@ -99,16 +103,14 @@ impl From> for Error { } } -macro_rules! unexpected { - ($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*)))); +impl From for Error { + fn from(value: DmaError) -> Error { + Error::DmaError(value) + } } -/* represents data that has to be sent to Master */ -#[derive(Debug)] -pub struct Sliceable { - it: usize, - data: Vec, - destination: u8 +macro_rules! unexpected { + ($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*)))); } /* represents interkernel messages */ @@ -164,44 +166,6 @@ pub struct SubkernelFinished { pub source: u8 } -pub struct SliceMeta { - pub destination: u8, - pub len: u16, - pub status: PayloadStatus -} - -macro_rules! get_slice_fn { - ( $name:tt, $size:expr ) => { - pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta { - let first = self.it == 0; - let len = min($size, self.data.len() - self.it); - let last = self.it + len == self.data.len(); - let status = PayloadStatus::from_status(first, last); - data_slice[..len].clone_from_slice(&self.data[self.it..self.it+len]); - self.it += len; - - SliceMeta { - destination: self.destination, - len: len as u16, - status: status - } - } - }; -} - -impl Sliceable { - pub fn new(destination: u8, data: Vec) -> Sliceable { - Sliceable { - it: 0, - data: data, - destination: destination - } - } - - get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE); - get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE); -} - impl MessageManager { pub fn new() -> MessageManager { MessageManager { @@ -312,10 +276,8 @@ impl Session { fn running(&self) -> bool { match self.kernel_state { - KernelState::Absent | KernelState::Loaded => false, - KernelState::Running | KernelState::MsgAwait { .. } | - KernelState::MsgSending | KernelState::SubkernelAwaitLoad | - KernelState::SubkernelAwaitFinish { .. } => true + KernelState::Absent | KernelState::Loaded => false, + _ => true } } @@ -490,7 +452,39 @@ impl Manager { } } - pub fn process_kern_requests(&mut self, router: &mut Router, routing_table: &RoutingTable, rank: u8, destination: u8) { + pub fn ddma_finished(&mut self, error: u8, channel: u32, timestamp: u64) { + if let KernelState::DmaAwait { .. } = self.session.kernel_state { + kern_send(&kern::DmaAwaitRemoteReply { + timeout: false, error: error, channel: channel, timestamp: timestamp + }).unwrap(); + self.session.kernel_state = KernelState::Running; + } + } + + pub fn ddma_nack(&mut self) { + // for simplicity treat it as a timeout for now... + if let KernelState::DmaAwait { .. } = self.session.kernel_state { + kern_send(&kern::DmaAwaitRemoteReply { + timeout: true, error: 0, channel: 0, timestamp: 0 + }).unwrap(); + self.session.kernel_state = KernelState::Running; + } + } + + pub fn ddma_remote_uploaded(&mut self, succeeded: bool) { + if let KernelState::DmaUploading { .. } = self.session.kernel_state { + if succeeded { + self.session.kernel_state = KernelState::Running; + kern_acknowledge().unwrap(); + } else { + self.stop(); + self.runtime_exception(Error::DmaError(DmaError::UploadFail)); + } + + } + } + + pub fn process_kern_requests(&mut self, router: &mut Router, routing_table: &RoutingTable, rank: u8, destination: u8, dma_manager: &mut DmaManager) { macro_rules! finished { ($with_exception:expr) => {{ Some(SubkernelFinished { source: self.session.source, id: self.current_id, @@ -504,6 +498,7 @@ impl Manager { destination: subkernel_finished.source, id: subkernel_finished.id, with_exception: subkernel_finished.with_exception, exception_src: subkernel_finished.exception_source }, &routing_table, rank, destination); + dma_manager.cleanup(router, rank, destination, routing_table); } if !self.is_running() { @@ -528,7 +523,7 @@ impl Manager { } } - match self.process_kern_message(router, routing_table, rank, destination) { + match self.process_kern_message(router, routing_table, rank, destination, dma_manager) { Ok(Some(with_exception)) => { self.last_finished = finished!(with_exception) }, @@ -585,6 +580,20 @@ impl Manager { } Ok(()) } + KernelState::DmaAwait { max_time } => { + if clock::get_ms() > *max_time { + kern_send(&kern::DmaAwaitRemoteReply { timeout: true, error: 0, channel: 0, timestamp: 0 })?; + self.session.kernel_state = KernelState::Running; + } + // ddma_finished() and nack() covers the other case + Ok(()) + } + KernelState::DmaUploading { max_time } => { + if clock::get_ms() > *max_time { + unexpected!("DMAError: Timed out sending traces to remote"); + } + Ok(()) + } _ => Ok(()) } } @@ -622,13 +631,19 @@ impl Manager { fn process_kern_message(&mut self, router: &mut Router, routing_table: &RoutingTable, - rank: u8, destination: u8 + rank: u8, destination: u8, + dma_manager: &mut DmaManager ) -> Result, Error> { // returns Ok(with_exception) on finish // None if the kernel is still running kern_recv(|request| { match (request, &self.session.kernel_state) { - (&kern::LoadReply(_), KernelState::Loaded) => { + (&kern::LoadReply(_), KernelState::Loaded) | + (_, KernelState::DmaUploading { .. }) | + (_, KernelState::DmaAwait { .. }) | + (_, KernelState::MsgSending) | + (_, KernelState::SubkernelAwaitLoad) | + (_, KernelState::SubkernelAwaitFinish { .. }) => { // We're standing by; ignore the message. return Ok(None) } @@ -693,6 +708,50 @@ impl Manager { return Ok(Some(true)) } + &kern::DmaRecordStart(name) => { + dma_manager.record_start(name); + kern_acknowledge() + } + &kern::DmaRecordAppend(data) => { + dma_manager.record_append(data); + kern_acknowledge() + } + &kern::DmaRecordStop { duration, enable_ddma: _ } => { + // ddma is always used on satellites + if let Ok(id) = dma_manager.record_stop(duration, destination) { + let remote_count = dma_manager.upload_traces(id, router, rank, destination, routing_table)?; + if remote_count > 0 { + let max_time = clock::get_ms() + 10_000 as u64; + self.session.kernel_state = KernelState::DmaUploading { max_time: max_time }; + Ok(()) + } else { + kern_acknowledge() + } + } else { + unexpected!("DMAError: found an unsupported call to RTIO devices on master") + } + } + &kern::DmaEraseRequest { name } => { + dma_manager.erase_name(name, router, rank, destination, routing_table); + kern_acknowledge() + } + &kern::DmaRetrieveRequest { name } => { + dma_manager.with_trace(destination, name, |trace, duration| { + kern_send(&kern::DmaRetrieveReply { + trace: trace, + duration: duration, + uses_ddma: true, + }) + }) + } + &kern::DmaStartRemoteRequest { id, timestamp } => { + let max_time = clock::get_ms() + 10_000 as u64; + self.session.kernel_state = KernelState::DmaAwait { max_time: max_time }; + dma_manager.playback_remote(id as u32, timestamp as u64, router, rank, destination, routing_table)?; + dma_manager.playback(destination, id as u32, timestamp as u64)?; + Ok(()) + } + &kern::SubkernelMsgSend { id: _, destination: msg_dest, count, tag, data } => { let dest = match msg_dest { Some(dest) => dest, @@ -717,13 +776,13 @@ impl Manager { router.route(drtioaux::Packet::SubkernelLoadRunRequest { source: destination, destination: sk_destination, id: id, run: run }, routing_table, rank, destination); - kern_acknowledge() + Ok(()) } &kern::SubkernelAwaitFinishRequest{ id, timeout } => { let max_time = clock::get_ms() + timeout as u64; self.session.kernel_state = KernelState::SubkernelAwaitFinish { max_time: max_time, id: id }; - kern_acknowledge() + Ok(()) } request => unexpected!("unexpected request {:?} from kernel CPU", request) diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index eec0bab8f..a496afe3e 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -377,9 +377,14 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg *self_destination = destination; let succeeded = dmamgr.add(source, id, status, &trace, length as usize).is_ok(); router.send(drtioaux::Packet::DmaAddTraceReply { - destination: source, succeeded: succeeded + source: *self_destination, destination: source, id: id, succeeded: succeeded }, _routing_table, *rank, *self_destination) } + drtioaux::Packet::DmaAddTraceReply { source, destination: _destination, id, succeeded } => { + forward!(_routing_table, _destination, *rank, _repeaters, &packet); + dmamgr.ack_upload(kernelmgr, source, id, succeeded, router, *rank, *self_destination, _routing_table); + Ok(()) + } drtioaux::Packet::DmaRemoveTraceRequest { source, destination: _destination, id } => { forward!(_routing_table, _destination, *rank, _repeaters, &packet); let succeeded = dmamgr.erase(source, id).is_ok(); @@ -395,6 +400,18 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg destination: source, succeeded: succeeded }, _routing_table, *rank, *self_destination) } + drtioaux::Packet::DmaPlaybackReply { destination: _destination, succeeded } => { + forward!(_routing_table, _destination, *rank, _repeaters, &packet); + if !succeeded { + kernelmgr.ddma_nack(); + } + Ok(()) + } + drtioaux::Packet::DmaPlaybackStatus { source: _, destination: _destination, id, error, channel, timestamp } => { + forward!(_routing_table, _destination, *rank, _repeaters, &packet); + dmamgr.remote_finished(kernelmgr, id, error, channel, timestamp); + Ok(()) + } drtioaux::Packet::SubkernelAddDataRequest { destination, id, status, length, data } => { forward!(_routing_table, destination, *rank, _repeaters, &packet); @@ -426,7 +443,6 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg kernelmgr.subkernel_load_run_reply(succeeded, *self_destination); Ok(()) } - // { destination: u8, id: u32, with_exception: bool, exception_src: u8 }, drtioaux::Packet::SubkernelFinished { destination: _destination, id, with_exception, exception_src } => { forward!(_routing_table, _destination, *rank, _repeaters, &packet); kernelmgr.remote_subkernel_finished(id, with_exception, exception_src); @@ -772,7 +788,7 @@ pub extern fn main() -> i32 { }, &routing_table, rank, destination); } - kernelmgr.process_kern_requests(&mut router, &routing_table, rank, destination); + kernelmgr.process_kern_requests(&mut router, &routing_table, rank, destination, &mut dma_manager); if let Some((repno, packet)) = router.get_downstream_packet() { if let Err(e) = repeaters[repno].aux_send(&packet) { diff --git a/artiq/firmware/satman/routing.rs b/artiq/firmware/satman/routing.rs index 118ad11af..6867575ea 100644 --- a/artiq/firmware/satman/routing.rs +++ b/artiq/firmware/satman/routing.rs @@ -1,6 +1,64 @@ -use alloc::collections::vec_deque::VecDeque; +use alloc::{vec::Vec, collections::vec_deque::VecDeque}; use board_artiq::{drtioaux, drtio_routing}; use board_misoc::csr; +use core::cmp::min; +use proto_artiq::drtioaux_proto::PayloadStatus; +use SAT_PAYLOAD_MAX_SIZE; +use MASTER_PAYLOAD_MAX_SIZE; + +/* represents data that has to be sent with the aux protocol */ +#[derive(Debug)] +pub struct Sliceable { + it: usize, + data: Vec, + destination: u8 +} + +pub struct SliceMeta { + pub destination: u8, + pub len: u16, + pub status: PayloadStatus +} + +macro_rules! get_slice_fn { + ( $name:tt, $size:expr ) => { + pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta { + let first = self.it == 0; + let len = min($size, self.data.len() - self.it); + let last = self.it + len == self.data.len(); + let status = PayloadStatus::from_status(first, last); + data_slice[..len].clone_from_slice(&self.data[self.it..self.it+len]); + self.it += len; + + SliceMeta { + destination: self.destination, + len: len as u16, + status: status + } + } + }; +} + +impl Sliceable { + pub fn new(destination: u8, data: Vec) -> Sliceable { + Sliceable { + it: 0, + data: data, + destination: destination + } + } + + pub fn at_end(&self) -> bool { + self.it == self.data.len() + } + + pub fn extend(&mut self, data: &[u8]) { + self.data.extend(data); + } + + get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE); + get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE); +} // Packets from downstream (further satellites) are received and routed appropriately. // they're passed as soon as possible downstream (within the subtree), or sent upstream, From 6c0ff9a912518bb5126733773edb252144f5d108 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Tue, 9 Jan 2024 10:41:22 +0800 Subject: [PATCH 081/296] satman: fix targets without drtio routing --- artiq/firmware/satman/main.rs | 3 ++- artiq/firmware/satman/routing.rs | 16 ++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index a496afe3e..78c84103a 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -789,7 +789,8 @@ pub extern fn main() -> i32 { } kernelmgr.process_kern_requests(&mut router, &routing_table, rank, destination, &mut dma_manager); - + + #[cfg(has_drtio_routing)] if let Some((repno, packet)) = router.get_downstream_packet() { if let Err(e) = repeaters[repno].aux_send(&packet) { warn!("[REP#{}] Error when sending packet to satellite ({:?})", repno, e) diff --git a/artiq/firmware/satman/routing.rs b/artiq/firmware/satman/routing.rs index 6867575ea..169630dbd 100644 --- a/artiq/firmware/satman/routing.rs +++ b/artiq/firmware/satman/routing.rs @@ -1,5 +1,6 @@ use alloc::{vec::Vec, collections::vec_deque::VecDeque}; use board_artiq::{drtioaux, drtio_routing}; +#[cfg(has_drtio_routing)] use board_misoc::csr; use core::cmp::min; use proto_artiq::drtioaux_proto::PayloadStatus; @@ -72,6 +73,7 @@ impl Sliceable { pub struct Router { upstream_queue: VecDeque, local_queue: VecDeque, + #[cfg(has_drtio_routing)] downstream_queue: VecDeque<(usize, drtioaux::Packet)>, upstream_notified: bool, } @@ -81,6 +83,7 @@ impl Router { Router { upstream_queue: VecDeque::new(), local_queue: VecDeque::new(), + #[cfg(has_drtio_routing)] downstream_queue: VecDeque::new(), upstream_notified: false, } @@ -90,14 +93,14 @@ impl Router { // messages are always buffered for both upstream and downstream pub fn route(&mut self, packet: drtioaux::Packet, _routing_table: &drtio_routing::RoutingTable, _rank: u8, - _self_destination: u8 + self_destination: u8 ) { + let destination = packet.routable_destination(); #[cfg(has_drtio_routing)] { - let destination = packet.routable_destination(); if let Some(destination) = destination { let hop = _routing_table.0[destination as usize][_rank as usize] as usize; - if destination == _self_destination { + if destination == self_destination { self.local_queue.push_back(packet); } else if hop > 0 && hop < csr::DRTIOREP.len() { let repno = (hop - 1) as usize; @@ -111,7 +114,11 @@ impl Router { } #[cfg(not(has_drtio_routing))] { - self.upstream_queue.push_back(packet); + if destination == Some(self_destination) { + self.local_queue.push_back(packet); + } else { + self.upstream_queue.push_back(packet); + } } } @@ -166,6 +173,7 @@ impl Router { packet } + #[cfg(has_drtio_routing)] pub fn get_downstream_packet(&mut self) -> Option<(usize, drtioaux::Packet)> { self.downstream_queue.pop_front() } From b215df2d25b69082802f596d28271e4f0ee0d99d Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 8 Jan 2024 14:42:28 +0800 Subject: [PATCH 082/296] comm_analyzer: add WaveformManager, WaveformChannel --- artiq/coredevice/comm_analyzer.py | 192 +++++++++++++++++++++--------- 1 file changed, 136 insertions(+), 56 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 09ad3618f..3e3fcafba 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -34,6 +34,13 @@ class ExceptionType(Enum): i_overflow = 0b100001 +class WaveformType(Enum): + ANALOG = 0 + BIT = 1 + VECTOR = 2 + LOG = 3 + + def get_analyzer_dump(host, port=1382): sock = socket.create_connection((host, port)) try: @@ -150,6 +157,12 @@ class VCDChannel: integer_cast = struct.unpack(">Q", struct.pack(">d", x))[0] self.set_value("{:064b}".format(integer_cast)) + def set_log(self, log_message): + value = "" + for c in log_message: + value += "{:08b}".format(ord(c)) + self.set_value(value) + class VCDManager: def __init__(self, fileobj): @@ -160,15 +173,15 @@ class VCDManager: def set_timescale_ps(self, timescale): self.out.write("$timescale {}ps $end\n".format(round(timescale))) - def get_channel(self, name, width): + def get_channel(self, name, width, ty): code = next(self.codes) self.out.write("$var wire {width} {code} {name} $end\n" .format(name=name, code=code, width=width)) return VCDChannel(self.out, code) @contextmanager - def scope(self, name): - self.out.write("$scope module {} $end\n".format(name)) + def scope(self, scope, name): + self.out.write("$scope module {}/{} $end\n".format(scope, name)) yield self.out.write("$upscope $end\n") @@ -177,11 +190,66 @@ class VCDManager: self.out.write("#{}\n".format(time)) self.current_time = time + def set_end_time(self, time): + pass + + +class WaveformManager: + def __init__(self): + self.current_time = 0 + self.channels = list() + self.current_scope = "" + self.trace = {"timescale": 1, "stopped_x": None, "logs": dict(), "data": dict()} + + def set_timescale_ps(self, timescale): + self.trace["timescale"] = int(timescale) + + def get_channel(self, name, width, ty): + if ty == WaveformType.LOG: + data = self.trace["logs"][self.current_scope + name] = list() + else: + data = self.trace["data"][self.current_scope + name] = list() + channel = WaveformChannel(data, self.current_time) + self.channels.append(channel) + return channel + + @contextmanager + def scope(self, scope, name): + old_scope = self.current_scope + self.current_scope = scope + "/" + yield + self.current_scope = old_scope + + def set_time(self, time): + for channel in self.channels: + channel.set_time(time) + + def set_end_time(self, time): + self.trace["stopped_x"] = time + + +class WaveformChannel: + def __init__(self, data, current_time): + self.data = data + self.current_time = current_time + + def set_value(self, value): + self.data.append((self.current_time, value)) + + def set_value_double(self, x): + self.data.append((self.current_time, x)) + + def set_time(self, time): + self.current_time = time + + def set_log(self, log_message): + self.data.append((self.current_time, log_message)) + class TTLHandler: - def __init__(self, vcd_manager, name): + def __init__(self, manager, name): self.name = name - self.channel_value = vcd_manager.get_channel("ttl/" + name, 1) + self.channel_value = manager.get_channel("ttl/" + name, 1, ty=WaveformType.BIT) self.last_value = "X" self.oe = True @@ -206,11 +274,11 @@ class TTLHandler: class TTLClockGenHandler: - def __init__(self, vcd_manager, name, ref_period): + def __init__(self, manager, name, ref_period): self.name = name self.ref_period = ref_period - self.channel_frequency = vcd_manager.get_channel( - "ttl_clkgen/" + name, 64) + self.channel_frequency = manager.get_channel( + "ttl_clkgen/" + name, 64, ty=WaveformType.ANALOG) def process_message(self, message): if isinstance(message, OutputMessage): @@ -221,8 +289,8 @@ class TTLClockGenHandler: class DDSHandler: - def __init__(self, vcd_manager, onehot_sel, sysclk): - self.vcd_manager = vcd_manager + def __init__(self, manager, onehot_sel, sysclk): + self.manager = manager self.onehot_sel = onehot_sel self.sysclk = sysclk @@ -231,11 +299,11 @@ class DDSHandler: def add_dds_channel(self, name, dds_channel_nr): dds_channel = dict() - with self.vcd_manager.scope("dds/{}".format(name)): + with self.manager.scope("dds", name): dds_channel["vcd_frequency"] = \ - self.vcd_manager.get_channel(name + "/frequency", 64) + self.manager.get_channel(name + "/frequency", 64, ty=WaveformType.ANALOG) dds_channel["vcd_phase"] = \ - self.vcd_manager.get_channel(name + "/phase", 64) + self.manager.get_channel(name + "/phase", 64, ty=WaveformType.ANALOG) dds_channel["ftw"] = [None, None] dds_channel["pow"] = None self.dds_channels[dds_channel_nr] = dds_channel @@ -285,10 +353,10 @@ class DDSHandler: class WishboneHandler: - def __init__(self, vcd_manager, name, read_bit): + def __init__(self, manager, name, read_bit): self._reads = [] self._read_bit = read_bit - self.stb = vcd_manager.get_channel("{}/{}".format(name, "stb"), 1) + self.stb = manager.get_channel(name + "/stb", 1, ty=WaveformType.BIT) def process_message(self, message): self.stb.set_value("1") @@ -318,16 +386,17 @@ class WishboneHandler: class SPIMasterHandler(WishboneHandler): - def __init__(self, vcd_manager, name): + def __init__(self, manager, name): self.channels = {} - with vcd_manager.scope("spi/{}".format(name)): - super().__init__(vcd_manager, name, read_bit=0b100) + self.scope = "spi" + with manager.scope("spi", name): + super().__init__(manager, name, read_bit=0b100) for reg_name, reg_width in [ ("config", 32), ("chip_select", 16), ("write_length", 8), ("read_length", 8), ("write", 32), ("read", 32)]: - self.channels[reg_name] = vcd_manager.get_channel( - "{}/{}".format(name, reg_name), reg_width) + self.channels[reg_name] = manager.get_channel( + "{}/{}".format(name, reg_name), reg_width, ty=WaveformType.VECTOR) def process_write(self, address, data): if address == 0: @@ -352,11 +421,12 @@ class SPIMasterHandler(WishboneHandler): class SPIMaster2Handler(WishboneHandler): - def __init__(self, vcd_manager, name): + def __init__(self, manager, name): self._reads = [] self.channels = {} - with vcd_manager.scope("spi2/{}".format(name)): - self.stb = vcd_manager.get_channel("{}/{}".format(name, "stb"), 1) + self.scope = "spi2" + with manager.scope("spi2", name): + self.stb = manager.get_channel(name + "/stb", 1, ty=WaveformType.BIT) for reg_name, reg_width in [ ("flags", 8), ("length", 5), @@ -364,8 +434,8 @@ class SPIMaster2Handler(WishboneHandler): ("chip_select", 8), ("write", 32), ("read", 32)]: - self.channels[reg_name] = vcd_manager.get_channel( - "{}/{}".format(name, reg_name), reg_width) + self.channels[reg_name] = manager.get_channel( + "{}/{}".format(name, reg_name), reg_width, ty=WaveformType.VECTOR) def process_message(self, message): self.stb.set_value("1") @@ -413,11 +483,12 @@ def _extract_log_chars(data): class LogHandler: - def __init__(self, vcd_manager, vcd_log_channels): - self.vcd_channels = dict() - for name, maxlength in vcd_log_channels.items(): - self.vcd_channels[name] = vcd_manager.get_channel("log/" + name, - maxlength*8) + def __init__(self, manager, log_channels): + self.channels = dict() + for name, maxlength in log_channels.items(): + self.channels[name] = manager.get_channel("logs/" + name, + maxlength * 8, + ty=WaveformType.LOG) self.current_entry = "" def process_message(self, message): @@ -425,15 +496,12 @@ class LogHandler: self.current_entry += _extract_log_chars(message.data) if len(self.current_entry) > 1 and self.current_entry[-1] == "\x1D": channel_name, log_message = self.current_entry[:-1].split("\x1E", maxsplit=1) - vcd_value = "" - for c in log_message: - vcd_value += "{:08b}".format(ord(c)) - self.vcd_channels[channel_name].set_value(vcd_value) + self.channels[channel_name].set_log(log_message) self.current_entry = "" -def get_vcd_log_channels(log_channel, messages): - vcd_log_channels = dict() +def get_log_channels(log_channel, messages): + log_channels = dict() log_entry = "" for message in messages: if (isinstance(message, OutputMessage) @@ -442,13 +510,13 @@ def get_vcd_log_channels(log_channel, messages): if len(log_entry) > 1 and log_entry[-1] == "\x1D": channel_name, log_message = log_entry[:-1].split("\x1E", maxsplit=1) l = len(log_message) - if channel_name in vcd_log_channels: - if vcd_log_channels[channel_name] < l: - vcd_log_channels[channel_name] = l + if channel_name in log_channels: + if log_channels[channel_name] < l: + log_channels[channel_name] = l else: - vcd_log_channels[channel_name] = l + log_channels[channel_name] = l log_entry = "" - return vcd_log_channels + return log_channels def get_single_device_argument(devices, module, cls, argument): @@ -475,7 +543,7 @@ def get_dds_sysclk(devices): ("AD9914",), "sysclk") -def create_channel_handlers(vcd_manager, devices, ref_period, +def create_channel_handlers(manager, devices, ref_period, dds_sysclk, dds_onehot_sel): channel_handlers = dict() for name, desc in sorted(devices.items(), key=itemgetter(0)): @@ -483,11 +551,11 @@ def create_channel_handlers(vcd_manager, devices, ref_period, if (desc["module"] == "artiq.coredevice.ttl" and desc["class"] in {"TTLOut", "TTLInOut"}): channel = desc["arguments"]["channel"] - channel_handlers[channel] = TTLHandler(vcd_manager, name) + channel_handlers[channel] = TTLHandler(manager, name) if (desc["module"] == "artiq.coredevice.ttl" and desc["class"] == "TTLClockGen"): channel = desc["arguments"]["channel"] - channel_handlers[channel] = TTLClockGenHandler(vcd_manager, name, ref_period) + channel_handlers[channel] = TTLClockGenHandler(manager, name, ref_period) if (desc["module"] == "artiq.coredevice.ad9914" and desc["class"] == "AD9914"): dds_bus_channel = desc["arguments"]["bus_channel"] @@ -495,14 +563,14 @@ def create_channel_handlers(vcd_manager, devices, ref_period, if dds_bus_channel in channel_handlers: dds_handler = channel_handlers[dds_bus_channel] else: - dds_handler = DDSHandler(vcd_manager, dds_onehot_sel, dds_sysclk) + dds_handler = DDSHandler(manager, dds_onehot_sel, dds_sysclk) channel_handlers[dds_bus_channel] = dds_handler dds_handler.add_dds_channel(name, dds_channel) if (desc["module"] == "artiq.coredevice.spi2" and desc["class"] == "SPIMaster"): channel = desc["arguments"]["channel"] channel_handlers[channel] = SPIMaster2Handler( - vcd_manager, name) + manager, name) return channel_handlers @@ -512,11 +580,21 @@ def get_message_time(message): def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False): vcd_manager = VCDManager(fileobj) + decoded_dump_to_target(vcd_manager, devices, dump, uniform_interval) + + +def decoded_dump_to_waveform_data(devices, dump, uniform_interval=False): + manager = WaveformManager() + decoded_dump_to_target(manager, devices, dump, uniform_interval) + return manager.trace + + +def decoded_dump_to_target(manager, devices, dump, uniform_interval): ref_period = get_ref_period(devices) if ref_period is not None: if not uniform_interval: - vcd_manager.set_timescale_ps(ref_period*1e12) + manager.set_timescale_ps(ref_period*1e12) else: logger.warning("unable to determine core device ref_period") ref_period = 1e-9 # guess @@ -526,6 +604,8 @@ def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False): dds_sysclk = 3e9 # guess if isinstance(dump.messages[-1], StoppedMessage): + m = dump.messages[-1] + end_time = get_message_time(m) messages = dump.messages[:-1] else: logger.warning("StoppedMessage missing") @@ -533,20 +613,20 @@ def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False): messages = sorted(messages, key=get_message_time) channel_handlers = create_channel_handlers( - vcd_manager, devices, ref_period, + manager, devices, ref_period, dds_sysclk, dump.dds_onehot_sel) - vcd_log_channels = get_vcd_log_channels(dump.log_channel, messages) + log_channels = get_log_channels(dump.log_channel, messages) channel_handlers[dump.log_channel] = LogHandler( - vcd_manager, vcd_log_channels) + manager, log_channels) if uniform_interval: # RTIO event timestamp in machine units - timestamp = vcd_manager.get_channel("timestamp", 64) + timestamp = manager.get_channel("timestamp", 64, ty=WaveformType.VECTOR) # RTIO time interval between this and the next timed event # in SI seconds - interval = vcd_manager.get_channel("interval", 64) - slack = vcd_manager.get_channel("rtio_slack", 64) + interval = manager.get_channel("interval", 64, ty=WaveformType.ANALOG) + slack = manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG) - vcd_manager.set_time(0) + manager.set_time(0) start_time = 0 for m in messages: start_time = get_message_time(m) @@ -560,11 +640,11 @@ def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False): if t >= 0: if uniform_interval: interval.set_value_double((t - t0)*ref_period) - vcd_manager.set_time(i) + manager.set_time(i) timestamp.set_value("{:064b}".format(t)) t0 = t else: - vcd_manager.set_time(t) + manager.set_time(t) channel_handlers[message.channel].process_message(message) if isinstance(message, OutputMessage): slack.set_value_double( From 669edf17c5a60ee76d0555600dab80889bfcad79 Mon Sep 17 00:00:00 2001 From: Charles Baynham Date: Wed, 10 Jan 2024 08:05:26 +0000 Subject: [PATCH 083/296] scheduler: resolve git references into revisions on submission (#2296) Signed-off-by: Charles Baynham --- RELEASE_NOTES.rst | 1 + artiq/dashboard/experiments.py | 14 ++++++++++---- artiq/master/experiments.py | 20 ++++++++++++++++---- artiq/master/scheduler.py | 16 ++++++++++++---- 4 files changed, 39 insertions(+), 12 deletions(-) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index 15ce87197..40fec5f56 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -45,6 +45,7 @@ Highlights: * Full Python 3.10 support. * MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to support legacy installations, but may be removed in a future release. +* Experiments can now be submitted with revisions set to a branch / tag name instead of only git hashes. Breaking changes: diff --git a/artiq/dashboard/experiments.py b/artiq/dashboard/experiments.py index 8526fdf32..a7db9047b 100644 --- a/artiq/dashboard/experiments.py +++ b/artiq/dashboard/experiments.py @@ -349,9 +349,10 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): repo_rev = QtWidgets.QLineEdit() repo_rev.setPlaceholderText("current") repo_rev.setClearButtonEnabled(True) - repo_rev_label = QtWidgets.QLabel("Revision:") + repo_rev_label = QtWidgets.QLabel("Rev / ref:") repo_rev_label.setToolTip("Experiment repository revision " - "(commit ID) to use") + "(commit ID) or reference (branch " + "or tag) to use") self.layout.addWidget(repo_rev_label, 3, 2) self.layout.addWidget(repo_rev, 3, 3) @@ -739,8 +740,13 @@ class ExperimentManager: del self.open_experiments[expurl] async def _submit_task(self, expurl, *args): - rid = await self.schedule_ctl.submit(*args) - logger.info("Submitted '%s', RID is %d", expurl, rid) + try: + rid = await self.schedule_ctl.submit(*args) + except KeyError: + expid = args[1] + logger.error("Submission failed - revision \"%s\" was not found", expid["repo_rev"]) + else: + logger.info("Submitted '%s', RID is %d", expurl, rid) def submit(self, expurl): file, class_name, _ = self.resolve_expurl(expurl) diff --git a/artiq/master/experiments.py b/artiq/master/experiments.py index df29f2a6b..935872fb0 100644 --- a/artiq/master/experiments.py +++ b/artiq/master/experiments.py @@ -111,7 +111,7 @@ class ExperimentDB: try: if new_cur_rev is None: new_cur_rev = self.repo_backend.get_head_rev() - wd, _ = self.repo_backend.request_rev(new_cur_rev) + wd, _, _ = self.repo_backend.request_rev(new_cur_rev) self.repo_backend.release_rev(self.cur_rev) self.cur_rev = new_cur_rev self.status["cur_rev"] = new_cur_rev @@ -132,7 +132,7 @@ class ExperimentDB: if use_repository: if revision is None: revision = self.cur_rev - wd, _ = self.repo_backend.request_rev(revision) + wd, _, revision = self.repo_backend.request_rev(revision) filename = os.path.join(wd, filename) worker = Worker(self.worker_handlers) try: @@ -169,7 +169,7 @@ class FilesystemBackend: return "N/A" def request_rev(self, rev): - return self.root, None + return self.root, None, "N/A" def release_rev(self, rev): pass @@ -200,14 +200,26 @@ class GitBackend: def get_head_rev(self): return str(self.git.head.target) + def _get_pinned_rev(self, rev): + """ + Resolve a git reference (e.g. "HEAD", "master", "abcdef123456...") into + a git hash + """ + commit, _ = self.git.resolve_refish(rev) + + logger.debug('Resolved git ref "%s" into "%s"', rev, commit.hex) + + return commit.hex + def request_rev(self, rev): + rev = self._get_pinned_rev(rev) if rev in self.checkouts: co = self.checkouts[rev] co.ref_count += 1 else: co = _GitCheckout(self.git, rev) self.checkouts[rev] = co - return co.path, co.message + return co.path, co.message, rev def release_rev(self, rev): co = self.checkouts[rev] diff --git a/artiq/master/scheduler.py b/artiq/master/scheduler.py index bc264964e..451e5c84c 100644 --- a/artiq/master/scheduler.py +++ b/artiq/master/scheduler.py @@ -132,15 +132,23 @@ class RunPool: writer.writerow([rid, start_time, expid["file"]]) def submit(self, expid, priority, due_date, flush, pipeline_name): + """ + Submits an experiment to be run by this pool + + If expid has the attribute `repo_rev`, treat it as a git revision or + reference and resolve into a unique git hash before submission + """ # mutates expid to insert head repository revision if None and # replaces relative path with the absolute one. # called through scheduler. rid = self.ridc.get() if "repo_rev" in expid: - if expid["repo_rev"] is None: - expid["repo_rev"] = self.experiment_db.cur_rev - wd, repo_msg = self.experiment_db.repo_backend.request_rev( - expid["repo_rev"]) + repo_rev_or_ref = expid["repo_rev"] or self.experiment_db.cur_rev + wd, repo_msg, repo_rev = self.experiment_db.repo_backend.request_rev(repo_rev_or_ref) + + # Mutate expid's repo_rev to that returned from request_rev, in case + # a branch was passed instead of a hash + expid["repo_rev"] = repo_rev else: if "file" in expid: expid["file"] = os.path.abspath(expid["file"]) From da15e94c22c2979771156695da19314b63f78e85 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Thu, 11 Jan 2024 12:30:37 +0800 Subject: [PATCH 084/296] flake: update dependencies --- flake.lock | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/flake.lock b/flake.lock index 0e322b965..9a3245079 100644 --- a/flake.lock +++ b/flake.lock @@ -45,11 +45,11 @@ "mozilla-overlay": { "flake": false, "locked": { - "lastModified": 1695805681, - "narHash": "sha256-1ElPLD8eFfnuIk0G52HGGpRtQZ4QPCjChRlEOfkZ5ro=", + "lastModified": 1704373101, + "narHash": "sha256-+gi59LRWRQmwROrmE1E2b3mtocwueCQqZ60CwLG+gbg=", "owner": "mozilla", "repo": "nixpkgs-mozilla", - "rev": "6eabade97bc28d707a8b9d82ad13ef143836736e", + "rev": "9b11a87c0cc54e308fa83aac5b4ee1816d5418a2", "type": "github" }, "original": { @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1702346276, - "narHash": "sha256-eAQgwIWApFQ40ipeOjVSoK4TEHVd6nbSd9fApiHIw5A=", + "lastModified": 1704874635, + "narHash": "sha256-YWuCrtsty5vVZvu+7BchAxmcYzTMfolSPP5io8+WYCg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "cf28ee258fd5f9a52de6b9865cdb93a1f96d09b7", + "rev": "3dc440faeee9e889fe2d1b4d25ad0f430d449356", "type": "github" }, "original": { @@ -108,11 +108,11 @@ "src-migen": { "flake": false, "locked": { - "lastModified": 1699335478, - "narHash": "sha256-BsubN4Mfdj02QPK6ZCrl+YOaSg7DaLQdSCVP49ztWik=", + "lastModified": 1702942348, + "narHash": "sha256-gKIfHZxsv+jcgDFRW9mPqmwqbZXuRvXefkZcSFjOGHw=", "owner": "m-labs", "repo": "migen", - "rev": "fd0bf5855a1367eab14b0d6f7f8266178e25d78e", + "rev": "50934ad10a87ade47219b796535978b9bdf24023", "type": "github" }, "original": { From 64567bc26fff55dc6a9b31bf4cfaa779818bf6e4 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 11 Dec 2023 11:43:27 +0800 Subject: [PATCH 085/296] comm_analyzer: add AnalyzerProxyReceiver --- artiq/coredevice/comm_analyzer.py | 54 +++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 3e3fcafba..84fe7cbfd 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -2,6 +2,8 @@ from operator import itemgetter from collections import namedtuple from itertools import count from contextlib import contextmanager +from sipyco import keepalive +import asyncio from enum import Enum import struct import logging @@ -131,6 +133,58 @@ def decode_dump(data): return DecodedDump(log_channel, bool(dds_onehot_sel), messages) +# simplified from sipyco broadcast Receiver +class AnalyzerProxyReceiver: + def __init__(self, receive_cb): + self.receive_cb = receive_cb + + async def connect(self, host, port): + self.reader, self.writer = \ + await keepalive.async_open_connection(host, port) + try: + self.receive_task = asyncio.ensure_future(self._receive_cr()) + except: + self.writer.close() + del self.reader + del self.writer + raise + + async def close(self): + try: + self.receive_task.cancel() + try: + await asyncio.wait_for(self.receive_task, None) + except asyncio.CancelledError: + pass + finally: + self.writer.close() + del self.reader + del self.writer + + async def _receive_cr(self): + try: + while True: + endian_byte = await self.reader.readexactly(1) + if endian_byte == b"E": + endian = '>' + elif endian_byte == b"e": + endian = '<' + else: + raise ValueError + payload_length_word = await self.reader.readexactly(4) + payload_length = struct.unpack(endian + "I", payload_length_word)[0] + if payload_length > 10 * 512 * 1024: + # 10x buffer size of firmware + raise ValueError + + # The remaining header length is 11 bytes. + remaining_data = await self.reader.readexactly(payload_length + 11) + data = endian_byte + payload_length_word + remaining_data + self.receive_cb(data) + finally: + pass + + def vcd_codes(): codechars = [chr(i) for i in range(33, 127)] for n in count(): From 3af4c9d51771ef4b430e9b1455161249743eb9d1 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 8 Jan 2024 16:21:12 +0800 Subject: [PATCH 086/296] comm_analyzer: add get_channel_list --- artiq/coredevice/comm_analyzer.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 84fe7cbfd..5c3520fd9 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -300,6 +300,23 @@ class WaveformChannel: self.data.append((self.current_time, log_message)) +class ChannelSignatureManager: + def __init__(self): + self.current_scope = "" + self.channels = dict() + + def get_channel(self, name, width, ty): + self.channels[self.current_scope + name] = (width, ty) + return None + + @contextmanager + def scope(self, scope, name): + old_scope = self.current_scope + self.current_scope = scope + "/" + yield + self.current_scope = old_scope + + class TTLHandler: def __init__(self, manager, name): self.name = name @@ -628,6 +645,15 @@ def create_channel_handlers(manager, devices, ref_period, return channel_handlers +def get_channel_list(devices): + manager = ChannelSignatureManager() + create_channel_handlers(manager, devices, 1e-9, 3e9, False) + manager.get_channel("timestamp", 64, ty=WaveformType.VECTOR) + manager.get_channel("interval", 64, ty=WaveformType.ANALOG) + manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG) + return manager.channels + + def get_message_time(message): return getattr(message, "timestamp", message.rtio_counter) From e393b3ab37bb17fff7332105d36e739c1e3ca432 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 15 Jan 2024 11:00:24 +0800 Subject: [PATCH 087/296] comm_analyzer: add set_end_time call --- artiq/coredevice/comm_analyzer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 5c3520fd9..e912890ce 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -686,6 +686,7 @@ def decoded_dump_to_target(manager, devices, dump, uniform_interval): if isinstance(dump.messages[-1], StoppedMessage): m = dump.messages[-1] end_time = get_message_time(m) + manager.set_end_time(end_time) messages = dump.messages[:-1] else: logger.warning("StoppedMessage missing") From d44f55c6d958d2b94060db8545e51aae6b1ae3ad Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 11 Jan 2024 11:19:07 +0800 Subject: [PATCH 088/296] waveform: add WaveformDock --- artiq/dashboard/waveform.py | 132 ++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 artiq/dashboard/waveform.py diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py new file mode 100644 index 000000000..3bdf439ba --- /dev/null +++ b/artiq/dashboard/waveform.py @@ -0,0 +1,132 @@ +import os +import asyncio +import logging + +from PyQt5 import QtCore, QtWidgets, QtGui +from PyQt5.QtCore import Qt + +from sipyco.sync_struct import Subscriber + +from artiq.tools import exc_to_warning +from artiq.coredevice import comm_analyzer +from artiq.coredevice.comm_analyzer import WaveformType +from artiq.gui.tools import LayoutWidget, get_open_file_name +from artiq.gui.models import DictSyncTreeSepModel, LocalModelManager + + +logger = logging.getLogger(__name__) + + +class Model(DictSyncTreeSepModel): + def __init__(self, init): + DictSyncTreeSepModel.__init__(self, "/", ["Channels"], init) + + def clear(self): + for k in self.backing_store: + self._del_item(self, k.split(self.separator)) + self.backing_store.clear() + + def update(self, d): + for k, v in d.items(): + self[k] = v + + +class WaveformDock(QtWidgets.QDockWidget): + def __init__(self): + QtWidgets.QDockWidget.__init__(self, "Waveform") + self.setObjectName("Waveform") + self.setFeatures( + QtWidgets.QDockWidget.DockWidgetMovable | QtWidgets.QDockWidget.DockWidgetFloatable) + + self._channel_model = Model({}) + + self._ddb = None + + self._waveform_data = { + "timescale": 1, + "stopped_x": None, + "logs": dict(), + "data": dict(), + } + + self._current_dir = os.getcwd() + + devices_sub = Subscriber("devices", self.init_ddb, self.update_ddb) + + proxy_receiver = comm_analyzer.AnalyzerProxyReceiver( + self.on_dump_receive) + + grid = LayoutWidget() + self.setWidget(grid) + + self._menu_btn = QtWidgets.QPushButton() + self._menu_btn.setIcon( + QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_FileDialogStart)) + grid.addWidget(self._menu_btn, 0, 0) + + self._request_dump_btn = QtWidgets.QToolButton() + self._request_dump_btn.setToolTip("Fetch analyzer data from device") + self._request_dump_btn.setIcon( + QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_BrowserReload)) + grid.addWidget(self._request_dump_btn, 0, 1) + + self._add_btn = QtWidgets.QToolButton() + self._add_btn.setToolTip("Add channels...") + self._add_btn.setIcon( + QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_FileDialogListView)) + grid.addWidget(self._add_btn, 0, 2) + + self._file_menu = QtWidgets.QMenu() + self._add_async_action("Open trace...", self.load_trace) + self._menu_btn.setMenu(self._file_menu) + + def _add_async_action(self, label, coro): + action = QtWidgets.QAction(label, self) + action.triggered.connect( + lambda: asyncio.ensure_future(exc_to_warning(coro()))) + self._file_menu.addAction(action) + + def on_dump_receive(self, dump): + decoded_dump = comm_analyzer.decode_dump(dump) + waveform_data = comm_analyzer.decoded_dump_to_waveform_data(self._ddb, decoded_dump) + self._waveform_data.update(waveform_data) + for log in self._waveform_data['logs']: + self._channel_model[log] = (0, WaveformType.LOG) + + async def load_trace(self): + try: + filename = await get_open_file_name( + self, + "Load Analyzer Trace", + self._current_dir, + "All files (*.*)") + except asyncio.CancelledError: + return + self._current_dir = os.path.dirname(filename) + try: + with open(filename, 'rb') as f: + dump = f.read() + self.on_dump_receive(dump) + except: + logger.error("Failed to open analyzer trace.", exc_info=True) + + def _process_ddb(self): + channel_list = comm_analyzer.get_channel_list(self._ddb) + self._channel_model.clear() + self._channel_model.update(channel_list) + desc = self._ddb.get("core_analyzer") + if desc is not None: + addr = desc["host"] + port = desc.get("port_proxy", 1385) + port_control = desc.get("port_proxy_control", 1386) + + def init_ddb(self, ddb): + self._ddb = ddb + self._process_ddb() + return ddb + + def update_ddb(self, mod): + self._process_ddb() From 9088ffa2ca87491b4bf7f5c61023ddb21d48de88 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 11 Jan 2024 11:27:44 +0800 Subject: [PATCH 089/296] artiq_dashboard: add WaveformDock --- artiq/frontend/artiq_dashboard.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index b1a7e39b0..83bb6e8e4 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -21,7 +21,8 @@ from artiq.tools import get_user_config_dir from artiq.gui.models import ModelSubscriber from artiq.gui import state, log from artiq.dashboard import (experiments, shortcuts, explorer, - moninj, datasets, schedule, applets_ccb) + moninj, datasets, schedule, applets_ccb, + waveform) def get_argparser(): @@ -219,6 +220,8 @@ def main(): loop.run_until_complete(d_ttl_dds.start(args.server, args.port_notify)) atexit_register_coroutine(d_ttl_dds.stop, loop=loop) + d_waveform = waveform.WaveformDock() + d_schedule = schedule.ScheduleDock( rpc_clients["schedule"], sub_clients["schedule"]) smgr.register(d_schedule) @@ -232,7 +235,7 @@ def main(): right_docks = [ d_explorer, d_shortcuts, d_ttl_dds.ttl_dock, d_ttl_dds.dds_dock, d_ttl_dds.dac_dock, - d_datasets, d_applets + d_datasets, d_applets, d_waveform ] main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, right_docks[0]) for d1, d2 in zip(right_docks, right_docks[1:]): From 73be2257d3d6968c59541f54538963ccd9c0ffeb Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 17 Jan 2024 15:55:55 +0800 Subject: [PATCH 090/296] waveform: add proxy clients --- artiq/dashboard/waveform.py | 97 +++++++++++++++++++++++++++++++++++-- 1 file changed, 92 insertions(+), 5 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 3bdf439ba..48012b3bc 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -6,6 +6,7 @@ from PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore import Qt from sipyco.sync_struct import Subscriber +from sipyco.pc_rpc import AsyncioClient from artiq.tools import exc_to_warning from artiq.coredevice import comm_analyzer @@ -17,6 +18,87 @@ from artiq.gui.models import DictSyncTreeSepModel, LocalModelManager logger = logging.getLogger(__name__) +class _BaseProxyClient: + def __init__(self): + self.addr = None + self.port = None + self._reconnect_event = asyncio.Event() + self._reconnect_task = None + + async def start(self): + self._reconnect_task = asyncio.ensure_future( + exc_to_warning(self._reconnect())) + + def update_address(self, addr, port): + self.addr = addr + self.port = port + self._reconnect_event.set() + + async def _reconnect(self): + try: + while True: + await self._reconnect_event.wait() + self._reconnect_event.clear() + try: + await self.disconnect_cr() + except: + logger.error("Error caught when disconnecting proxy client.", exc_info=True) + try: + await self.reconnect_cr() + except Exception: + logger.error( + "Error caught when reconnecting proxy client. Retrying...", exc_info=True) + await asyncio.sleep(5) + self._reconnect_event.set() + except asyncio.CancelledError: + pass + + async def close(self): + try: + self._reconnect_task.cancel() + await asyncio.wait_for(self._reconnect_task, None) + await self.disconnect_cr() + except: + logger.error("Error caught while closing proxy client.", exc_info=True) + + async def reconnect_cr(self): + raise NotImplementedError + + async def disconnect_cr(self): + raise NotImplementedError + + +class RPCProxyClient(_BaseProxyClient): + def __init__(self): + _BaseProxyClient.__init__(self) + self.client = AsyncioClient() + + async def trigger_proxy_task(self): + if self.client.get_rpc_id()[0] is None: + raise AttributeError("Unable to identify RPC target. Is analyzer proxy connected?") + await self.client.trigger() + + async def reconnect_cr(self): + await self.client.connect_rpc(self.addr, + self.port, + "coreanalyzer_proxy_control") + + async def disconnect_cr(self): + self.client.close_rpc() + + +class ReceiverProxyClient(_BaseProxyClient): + def __init__(self, receiver): + _BaseProxyClient.__init__(self) + self.receiver = receiver + + async def reconnect_cr(self): + await self.receiver.connect(self.addr, self.port) + + async def disconnect_cr(self): + await self.receiver.close() + + class Model(DictSyncTreeSepModel): def __init__(self, init): DictSyncTreeSepModel.__init__(self, "/", ["Channels"], init) @@ -51,10 +133,11 @@ class WaveformDock(QtWidgets.QDockWidget): self._current_dir = os.getcwd() - devices_sub = Subscriber("devices", self.init_ddb, self.update_ddb) - - proxy_receiver = comm_analyzer.AnalyzerProxyReceiver( + self.devices_sub = Subscriber("devices", self.init_ddb, self.update_ddb) + self.rpc_client = RPCProxyClient() + receiver = comm_analyzer.AnalyzerProxyReceiver( self.on_dump_receive) + self.receiver_client = ReceiverProxyClient(receiver) grid = LayoutWidget() self.setWidget(grid) @@ -70,6 +153,8 @@ class WaveformDock(QtWidgets.QDockWidget): self._request_dump_btn.setIcon( QtWidgets.QApplication.style().standardIcon( QtWidgets.QStyle.SP_BrowserReload)) + self._request_dump_btn.clicked.connect( + lambda: asyncio.ensure_future(exc_to_warning(self.rpc_client.trigger_proxy_task()))) grid.addWidget(self._request_dump_btn, 0, 1) self._add_btn = QtWidgets.QToolButton() @@ -120,8 +205,10 @@ class WaveformDock(QtWidgets.QDockWidget): desc = self._ddb.get("core_analyzer") if desc is not None: addr = desc["host"] - port = desc.get("port_proxy", 1385) - port_control = desc.get("port_proxy_control", 1386) + port_proxy = desc.get("port_proxy", 1385) + port = desc.get("port", 1386) + self.receiver_client.update_address(addr, port_proxy) + self.rpc_client.update_address(addr, port) def init_ddb(self, ddb): self._ddb = ddb From 8a9b6a449bc3177065b7f3bfc5c48fbd5a4d33c3 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 17 Jan 2024 16:00:47 +0800 Subject: [PATCH 091/296] artiq_dashboard: start proxy clients, device_sub --- artiq/frontend/artiq_dashboard.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 83bb6e8e4..17ec0527d 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -221,6 +221,12 @@ def main(): atexit_register_coroutine(d_ttl_dds.stop, loop=loop) d_waveform = waveform.WaveformDock() + loop.run_until_complete(d_waveform.devices_sub.connect(args.server, args.port_notify)) + atexit_register_coroutine(d_waveform.devices_sub.close, loop=loop) + for name in ["rpc_client", "receiver_client"]: + client = getattr(d_waveform, name) + loop.run_until_complete(client.start()) + atexit_register_coroutine(client.close, loop=loop) d_schedule = schedule.ScheduleDock( rpc_clients["schedule"], sub_clients["schedule"]) From 096664c1ba8c28873aa71d75d7953fc92472839c Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 22 Nov 2023 17:31:30 +0800 Subject: [PATCH 092/296] dndwidgets: add drag drop helper widgets --- artiq/gui/dndwidgets.py | 98 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 artiq/gui/dndwidgets.py diff --git a/artiq/gui/dndwidgets.py b/artiq/gui/dndwidgets.py new file mode 100644 index 000000000..b93f4d330 --- /dev/null +++ b/artiq/gui/dndwidgets.py @@ -0,0 +1,98 @@ +from PyQt5 import QtCore, QtWidgets + + +class VDragDropSplitter(QtWidgets.QSplitter): + def __init__(self, parent): + QtWidgets.QSplitter.__init__(self, parent=parent) + self.setAcceptDrops(True) + self.setContentsMargins(0, 0, 0, 0) + self.setOrientation(QtCore.Qt.Vertical) + self.setChildrenCollapsible(False) + + def resetSizes(self): + self.setSizes(self.count() * [1]) + + def dragEnterEvent(self, e): + e.accept() + + def dragLeaveEvent(self, e): + self.setRubberBand(-1) + e.accept() + + def dragMoveEvent(self, e): + pos = e.pos() + src = e.source() + src_i = self.indexOf(src) + self.setRubberBand(self.height()) + # case 0: smaller than source widget + if pos.y() < src.y(): + for n in range(src_i): + w = self.widget(n) + if pos.y() < w.y() + w.size().height(): + self.setRubberBand(w.y()) + break + # case 2: greater than source widget + elif pos.y() > src.y() + src.size().height(): + for n in range(src_i + 1, self.count()): + w = self.widget(n) + if pos.y() < w.y(): + self.setRubberBand(w.y()) + break + else: + self.setRubberBand(-1) + e.accept() + + def dropEvent(self, e): + self.setRubberBand(-1) + pos = e.pos() + src = e.source() + src_i = self.indexOf(src) + for n in range(self.count()): + w = self.widget(n) + if pos.y() < w.y() + w.size().height(): + self.insertWidget(n, src) + break + e.accept() + + +# Scroll area with auto-scroll on vertical drag +class VDragScrollArea(QtWidgets.QScrollArea): + def __init__(self, parent): + QtWidgets.QScrollArea.__init__(self, parent) + self.installEventFilter(self) + self._margin = 40 + self._timer = QtCore.QTimer(self) + self._timer.setInterval(20) + self._timer.timeout.connect(self._on_auto_scroll) + self._direction = 0 + self._speed = 10 + + def setAutoScrollMargin(self, margin): + self._margin = margin + + def setAutoScrollSpeed(self, speed): + self._speed = speed + + def eventFilter(self, obj, e): + if e.type() == QtCore.QEvent.DragMove: + val = self.verticalScrollBar().value() + height = self.viewport().height() + y = e.pos().y() + self._direction = 0 + if y < val + self._margin: + self._direction = -1 + elif y > height + val - self._margin: + self._direction = 1 + if not self._timer.isActive(): + self._timer.start() + elif e.type() in (QtCore.QEvent.Drop, QtCore.QEvent.DragLeave): + self._timer.stop() + return False + + def _on_auto_scroll(self): + val = self.verticalScrollBar().value() + min_ = self.verticalScrollBar().minimum() + max_ = self.verticalScrollBar().maximum() + dy = self._direction * self._speed + new_val = min(max_, max(min_, val + dy)) + self.verticalScrollBar().setValue(new_val) From 12a44fad3c88e4886a924521c7e99aa051357374 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 17 Jan 2024 16:46:15 +0800 Subject: [PATCH 093/296] comm_analyzer: change usage of logs field --- artiq/coredevice/comm_analyzer.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index e912890ce..8b6d7c526 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -260,9 +260,8 @@ class WaveformManager: def get_channel(self, name, width, ty): if ty == WaveformType.LOG: - data = self.trace["logs"][self.current_scope + name] = list() - else: - data = self.trace["data"][self.current_scope + name] = list() + self.trace["logs"][self.current_scope + name] = (width, ty) + data = self.trace["data"][self.current_scope + name] = list() channel = WaveformChannel(data, self.current_time) self.channels.append(channel) return channel From 5036230ff3bc2fdc87cc974fb043a315f899e2ba Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 22 Jan 2024 14:40:51 +0800 Subject: [PATCH 094/296] waveform: change log channel update --- artiq/dashboard/waveform.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 48012b3bc..e4c9e89ee 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -178,8 +178,7 @@ class WaveformDock(QtWidgets.QDockWidget): decoded_dump = comm_analyzer.decode_dump(dump) waveform_data = comm_analyzer.decoded_dump_to_waveform_data(self._ddb, decoded_dump) self._waveform_data.update(waveform_data) - for log in self._waveform_data['logs']: - self._channel_model[log] = (0, WaveformType.LOG) + self._channel_model.update(self._waveform_data['logs']) async def load_trace(self): try: From 466d865e586595b79e202864e21d67ce11884cee Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 22 Jan 2024 15:07:18 +0800 Subject: [PATCH 095/296] waveform: add _AddChannelDialog --- artiq/dashboard/waveform.py | 59 +++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index e4c9e89ee..5fdf4a5d7 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -113,6 +113,51 @@ class Model(DictSyncTreeSepModel): self[k] = v +class _AddChannelDialog(QtWidgets.QDialog): + accepted = QtCore.pyqtSignal(list) + + def __init__(self, parent, model): + QtWidgets.QDialog.__init__(self, parent=parent) + self.setContextMenuPolicy(Qt.ActionsContextMenu) + self.setWindowTitle("Add channels") + + grid = QtWidgets.QGridLayout() + self.setLayout(grid) + + self._model = model + self._tree_view = QtWidgets.QTreeView() + self._tree_view.setHeaderHidden(True) + self._tree_view.setSelectionBehavior( + QtWidgets.QAbstractItemView.SelectItems) + self._tree_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + self._tree_view.setModel(self._model) + grid.addWidget(self._tree_view, 0, 0, 1, 2) + cancel_btn = QtWidgets.QPushButton("Cancel") + cancel_btn.clicked.connect(self.close) + cancel_btn.setIcon( + QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_DialogCancelButton)) + grid.addWidget(cancel_btn, 1, 0) + confirm_btn = QtWidgets.QPushButton("Confirm") + confirm_btn.clicked.connect(self.add_channels) + confirm_btn.setIcon( + QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_DialogApplyButton)) + grid.addWidget(confirm_btn, 1, 1) + + def add_channels(self): + selection = self._tree_view.selectedIndexes() + channels = [] + for select in selection: + key = self._model.index_to_key(select) + if key is not None: + width, ty = self._model[key].ref + channels.append((key, width, ty, [])) + self.accepted.emit(channels) + self.close() + + class WaveformDock(QtWidgets.QDockWidget): def __init__(self): QtWidgets.QDockWidget.__init__(self, "Waveform") @@ -162,6 +207,7 @@ class WaveformDock(QtWidgets.QDockWidget): self._add_btn.setIcon( QtWidgets.QApplication.style().standardIcon( QtWidgets.QStyle.SP_FileDialogListView)) + self._add_btn.clicked.connect(self.on_add_channel_click) grid.addWidget(self._add_btn, 0, 2) self._file_menu = QtWidgets.QMenu() @@ -174,6 +220,19 @@ class WaveformDock(QtWidgets.QDockWidget): lambda: asyncio.ensure_future(exc_to_warning(coro()))) self._file_menu.addAction(action) + async def _add_channel_task(self): + dialog = _AddChannelDialog(self, self._channel_model) + fut = asyncio.Future() + + def on_accept(s): + fut.set_result(s) + dialog.accepted.connect(on_accept) + dialog.open() + channels = await fut + + def on_add_channel_click(self): + asyncio.ensure_future(self._add_channel_task()) + def on_dump_receive(self, dump): decoded_dump = comm_analyzer.decode_dump(dump) waveform_data = comm_analyzer.decoded_dump_to_waveform_data(self._ddb, decoded_dump) From fcaf4a8af08b09d014d116a6d0bda7b1f5da9b15 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 30 Aug 2023 16:36:16 +0800 Subject: [PATCH 096/296] gui.tools: add get_save_file_name helper --- artiq/gui/tools.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/artiq/gui/tools.py b/artiq/gui/tools.py index 441da9b81..2171ab915 100644 --- a/artiq/gui/tools.py +++ b/artiq/gui/tools.py @@ -69,6 +69,23 @@ async def get_open_file_name(parent, caption, dir, filter): return await fut +async def get_save_file_name(parent, caption, dir, filter, suffix=None): + """like QtWidgets.QFileDialog.getSaveFileName(), but a coroutine""" + dialog = QtWidgets.QFileDialog(parent, caption, dir, filter) + dialog.setFileMode(dialog.AnyFile) + dialog.setAcceptMode(dialog.AcceptSave) + if suffix is not None: + dialog.setDefaultSuffix(suffix) + fut = asyncio.Future() + + def on_accept(): + fut.set_result(dialog.selectedFiles()[0]) + dialog.accepted.connect(on_accept) + dialog.rejected.connect(fut.cancel) + dialog.open() + return await fut + + # Based on: # http://stackoverflow.com/questions/250890/using-qsortfilterproxymodel-with-a-tree-model class QRecursiveFilterProxyModel(QtCore.QSortFilterProxyModel): From 863daca2daafaedee3f2e8f10540bc6198944023 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 25 Jan 2024 13:54:51 +0800 Subject: [PATCH 097/296] waveform: remove punctuation in log msgs --- artiq/dashboard/waveform.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 5fdf4a5d7..887cdb954 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -42,12 +42,12 @@ class _BaseProxyClient: try: await self.disconnect_cr() except: - logger.error("Error caught when disconnecting proxy client.", exc_info=True) + logger.error("Error caught when disconnecting proxy client", exc_info=True) try: await self.reconnect_cr() except Exception: logger.error( - "Error caught when reconnecting proxy client. Retrying...", exc_info=True) + "Error caught when reconnecting proxy client, retrying...", exc_info=True) await asyncio.sleep(5) self._reconnect_event.set() except asyncio.CancelledError: @@ -59,7 +59,7 @@ class _BaseProxyClient: await asyncio.wait_for(self._reconnect_task, None) await self.disconnect_cr() except: - logger.error("Error caught while closing proxy client.", exc_info=True) + logger.error("Error caught while closing proxy client", exc_info=True) async def reconnect_cr(self): raise NotImplementedError @@ -254,7 +254,7 @@ class WaveformDock(QtWidgets.QDockWidget): dump = f.read() self.on_dump_receive(dump) except: - logger.error("Failed to open analyzer trace.", exc_info=True) + logger.error("Failed to open analyzer trace", exc_info=True) def _process_ddb(self): channel_list = comm_analyzer.get_channel_list(self._ddb) From 847b4ee2a3df58eaafa2711ce80b42f003bbe7e8 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 22 Jan 2024 15:59:35 +0800 Subject: [PATCH 098/296] waveform: add save_trace --- artiq/dashboard/waveform.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 887cdb954..4ba021218 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -168,6 +168,7 @@ class WaveformDock(QtWidgets.QDockWidget): self._channel_model = Model({}) self._ddb = None + self._dump = None self._waveform_data = { "timescale": 1, @@ -212,6 +213,7 @@ class WaveformDock(QtWidgets.QDockWidget): self._file_menu = QtWidgets.QMenu() self._add_async_action("Open trace...", self.load_trace) + self._add_async_action("Save trace...", self.save_trace) self._menu_btn.setMenu(self._file_menu) def _add_async_action(self, label, coro): @@ -234,6 +236,7 @@ class WaveformDock(QtWidgets.QDockWidget): asyncio.ensure_future(self._add_channel_task()) def on_dump_receive(self, dump): + self._dump = dump decoded_dump = comm_analyzer.decode_dump(dump) waveform_data = comm_analyzer.decoded_dump_to_waveform_data(self._ddb, decoded_dump) self._waveform_data.update(waveform_data) @@ -256,6 +259,26 @@ class WaveformDock(QtWidgets.QDockWidget): except: logger.error("Failed to open analyzer trace", exc_info=True) + async def save_trace(self): + if self._dump is None: + logger.error("No analyzer trace stored in dashboard, " + "try loading from file or fetching from device") + return + try: + filename = await get_save_file_name( + self, + "Save Analyzer Trace", + self._current_dir, + "All files (*.*)") + except asyncio.CancelledError: + return + self._current_dir = os.path.dirname(filename) + try: + with open(filename, 'wb') as f: + f.write(self._dump) + except: + logger.error("Failed to save analyzer trace", exc_info=True) + def _process_ddb(self): channel_list = comm_analyzer.get_channel_list(self._ddb) self._channel_model.clear() From e72f37eb4ee749842fa6682f1399aa38cb4b45a2 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 23 Jan 2024 15:26:03 +0800 Subject: [PATCH 099/296] waveform: add _WaveformModel --- artiq/dashboard/waveform.py | 59 +++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 4ba021218..5793bdb1e 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -99,6 +99,58 @@ class ReceiverProxyClient(_BaseProxyClient): await self.receiver.close() +class _WaveformModel(QtCore.QAbstractTableModel): + def __init__(self): + self.backing_struct = [] + self.headers = ["name", "type", "width", "data"] + QtCore.QAbstractTableModel.__init__(self) + + def rowCount(self, parent=QtCore.QModelIndex()): + return len(self.backing_struct) + + def columnCount(self, parent=QtCore.QModelIndex()): + return len(self.headers) + + def data(self, index, role=QtCore.Qt.DisplayRole): + if index.isValid(): + return self.backing_struct[index.row()][index.column()] + return None + + def extend(self, data): + length = len(self.backing_struct) + len_data = len(data) + self.beginInsertRows(QtCore.QModelIndex(), length, length + len_data - 1) + self.backing_struct.extend(data) + self.endInsertRows() + + def pop(self, row): + self.beginRemoveRows(QtCore.QModelIndex(), row, row) + self.backing_struct.pop(row) + self.endRemoveRows() + + def move(self, src, dest): + if src == dest: + return + if src < dest: + dest, src = src, dest + self.beginMoveRows(QtCore.QModelIndex(), src, src, QtCore.QModelIndex(), dest) + self.backing_struct.insert(dest, self.backing_struct.pop(src)) + self.endMoveRows() + + def update_data(self, waveform_data, top, bottom): + name_col = self.headers.index("name") + data_col = self.headers.index("data") + for i in range(top, bottom): + name = self.data(self.index(i, name_col)) + if name in waveform_data: + self.backing_struct[i][data_col] = waveform_data[name] + self.dataChanged.emit(self.index(i, data_col), + self.index(i, data_col)) + + def update_all(self, waveform_data): + self.update_data(waveform_data, 0, self.rowCount()) + + class Model(DictSyncTreeSepModel): def __init__(self, init): DictSyncTreeSepModel.__init__(self, "/", ["Channels"], init) @@ -166,6 +218,7 @@ class WaveformDock(QtWidgets.QDockWidget): QtWidgets.QDockWidget.DockWidgetMovable | QtWidgets.QDockWidget.DockWidgetFloatable) self._channel_model = Model({}) + self._waveform_model = _WaveformModel() self._ddb = None self._dump = None @@ -231,6 +284,11 @@ class WaveformDock(QtWidgets.QDockWidget): dialog.accepted.connect(on_accept) dialog.open() channels = await fut + count = self._waveform_model.rowCount() + self._waveform_model.extend(channels) + self._waveform_model.update_data(self._waveform_data['data'], + count, + count + len(channels)) def on_add_channel_click(self): asyncio.ensure_future(self._add_channel_task()) @@ -241,6 +299,7 @@ class WaveformDock(QtWidgets.QDockWidget): waveform_data = comm_analyzer.decoded_dump_to_waveform_data(self._ddb, decoded_dump) self._waveform_data.update(waveform_data) self._channel_model.update(self._waveform_data['logs']) + self._waveform_model.update_all(self._waveform_data['data']) async def load_trace(self): try: From 06b908fd18c6d9f6353cc99e867a28f685c51bb1 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 23 Jan 2024 15:53:11 +0800 Subject: [PATCH 100/296] waveform: fix in _AddChannelDialog --- artiq/dashboard/waveform.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 5793bdb1e..d8d150311 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -205,7 +205,7 @@ class _AddChannelDialog(QtWidgets.QDialog): key = self._model.index_to_key(select) if key is not None: width, ty = self._model[key].ref - channels.append((key, width, ty, [])) + channels.append([key, width, ty, []]) self.accepted.emit(channels) self.close() From 6c9f1cbf7c9ef058152bf6656a9bd5c5e33165ba Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 25 Jan 2024 15:02:13 +0800 Subject: [PATCH 101/296] waveform: add save_vcd --- artiq/dashboard/waveform.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index d8d150311..0d0c8ca37 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -11,7 +11,7 @@ from sipyco.pc_rpc import AsyncioClient from artiq.tools import exc_to_warning from artiq.coredevice import comm_analyzer from artiq.coredevice.comm_analyzer import WaveformType -from artiq.gui.tools import LayoutWidget, get_open_file_name +from artiq.gui.tools import LayoutWidget, get_open_file_name, get_save_file_name from artiq.gui.models import DictSyncTreeSepModel, LocalModelManager @@ -267,6 +267,7 @@ class WaveformDock(QtWidgets.QDockWidget): self._file_menu = QtWidgets.QMenu() self._add_async_action("Open trace...", self.load_trace) self._add_async_action("Save trace...", self.save_trace) + self._add_async_action("Save trace as VCD...", self.save_vcd) self._menu_btn.setMenu(self._file_menu) def _add_async_action(self, label, coro): @@ -338,6 +339,27 @@ class WaveformDock(QtWidgets.QDockWidget): except: logger.error("Failed to save analyzer trace", exc_info=True) + async def save_vcd(self): + if self._dump is None: + logger.error("No analyzer trace stored in dashboard, " + "try loading from file or fetching from device") + return + try: + filename = await get_save_file_name( + self, + "Save VCD", + self._current_dir, + "All files (*.*)") + except asyncio.CancelledError: + return + self._current_dir = os.path.dirname(filename) + try: + decoded_dump = comm_analyzer.decode_dump(self._dump) + with open(filename, 'w') as f: + comm_analyzer.decoded_dump_to_vcd(f, self._ddb, decoded_dump) + except: + logger.error("Failed to save trace as VCD", exc_info=True) + def _process_ddb(self): channel_list = comm_analyzer.get_channel_list(self._ddb) self._channel_model.clear() From 3861d587494356e322d053903a2b39d4dfa31c28 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 24 Jan 2024 10:37:54 +0800 Subject: [PATCH 102/296] dndwidgets: change splitter to use signal --- artiq/gui/dndwidgets.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/artiq/gui/dndwidgets.py b/artiq/gui/dndwidgets.py index b93f4d330..03e8a5e63 100644 --- a/artiq/gui/dndwidgets.py +++ b/artiq/gui/dndwidgets.py @@ -2,6 +2,8 @@ from PyQt5 import QtCore, QtWidgets class VDragDropSplitter(QtWidgets.QSplitter): + dropped = QtCore.pyqtSignal(int, int) + def __init__(self, parent): QtWidgets.QSplitter.__init__(self, parent=parent) self.setAcceptDrops(True) @@ -50,7 +52,7 @@ class VDragDropSplitter(QtWidgets.QSplitter): for n in range(self.count()): w = self.widget(n) if pos.y() < w.y() + w.size().height(): - self.insertWidget(n, src) + self.dropped.emit(src_i, n) break e.accept() From 28dfe1f9c650c81e8fcc799fc76440157625bfbd Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 24 Jan 2024 10:25:39 +0800 Subject: [PATCH 103/296] waveform: add _WaveformView --- artiq/dashboard/waveform.py | 99 +++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 0d0c8ca37..82b4de5a1 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -5,6 +5,8 @@ import logging from PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore import Qt +import pyqtgraph as pg + from sipyco.sync_struct import Subscriber from sipyco.pc_rpc import AsyncioClient @@ -13,10 +15,14 @@ from artiq.coredevice import comm_analyzer from artiq.coredevice.comm_analyzer import WaveformType from artiq.gui.tools import LayoutWidget, get_open_file_name, get_save_file_name from artiq.gui.models import DictSyncTreeSepModel, LocalModelManager +from artiq.gui.dndwidgets import VDragScrollArea, VDragDropSplitter logger = logging.getLogger(__name__) +WAVEFORM_MIN_HEIGHT = 50 +WAVEFORM_MAX_HEIGHT = 200 + class _BaseProxyClient: def __init__(self): @@ -99,6 +105,93 @@ class ReceiverProxyClient(_BaseProxyClient): await self.receiver.close() +class _WaveformView(QtWidgets.QWidget): + def __init__(self, parent): + QtWidgets.QWidget.__init__(self, parent=parent) + + layout = QtWidgets.QVBoxLayout() + layout.setContentsMargins(0, 0, 0, 0) + layout.setSpacing(0) + self.setLayout(layout) + + self._ref_axis = pg.PlotWidget() + self._ref_axis.hideAxis("bottom") + self._ref_axis.hideAxis("left") + self._ref_axis.hideButtons() + self._ref_axis.setFixedHeight(45) + self._ref_axis.setMenuEnabled(False) + self._top = pg.AxisItem("top") + self._top.setScale(1e-12) + self._top.setLabel(units="s") + self._ref_axis.setAxisItems({"top": self._top}) + layout.addWidget(self._ref_axis) + + self._ref_vb = self._ref_axis.getPlotItem().getViewBox() + self._ref_vb.setFixedHeight(0) + self._ref_vb.setMouseEnabled(x=True, y=False) + self._ref_vb.setLimits(xMin=0) + + scroll_area = VDragScrollArea(self) + scroll_area.setWidgetResizable(True) + scroll_area.setContentsMargins(0, 0, 0, 0) + scroll_area.setFrameShape(QtWidgets.QFrame.NoFrame) + layout.addWidget(scroll_area) + + self._splitter = VDragDropSplitter(parent=scroll_area) + self._splitter.setHandleWidth(1) + scroll_area.setWidget(self._splitter) + + def setModel(self, model): + self._model = model + self._model.dataChanged.connect(self.onDataChange) + self._model.rowsInserted.connect(self.onInsert) + self._model.rowsRemoved.connect(self.onRemove) + self._model.rowsMoved.connect(self.onMove) + self._splitter.dropped.connect(self._model.move) + + def setTimescale(self, timescale): + self._timescale = timescale + self._top.setScale(1e-12 * timescale) + for i in range(self._model.rowCount()): + self._splitter.widget(i).setTimescale(timescale) + + def setStoppedX(self, stopped_x): + self._stopped_x = stopped_x + for i in range(self._model.rowCount()): + self._splitter.widget(i).setStoppedX(stopped_x) + + def onDataChange(self, top, bottom, roles): + first = top.row() + last = bottom.row() + for i in range(first, last + 1): + data = self._model.data(self._model.index(i, 3)) + self._splitter.widget(i).onDataChange(data) + + def onInsert(self, parent, first, last): + for i in range(first, last + 1): + w = self._create_waveform(i) + self._splitter.insertWidget(i, w) + self._resize() + + def onRemove(self, parent, first, last): + for i in reversed(range(first, last + 1)): + w = self._splitter.widget(i) + w.deleteLater() + self._splitter.refresh() + self._resize() + + def onMove(self, src_parent, src_start, src_end, dest_parent, dest_row): + w = self._splitter.widget(src_start) + self._splitter.insertWidget(dest_row, w) + + def _create_waveform(self, row): + raise NotImplementedError + + def _resize(self): + self._splitter.setFixedHeight( + int((WAVEFORM_MIN_HEIGHT + WAVEFORM_MAX_HEIGHT) * self._model.rowCount() / 2)) + + class _WaveformModel(QtCore.QAbstractTableModel): def __init__(self): self.backing_struct = [] @@ -270,6 +363,10 @@ class WaveformDock(QtWidgets.QDockWidget): self._add_async_action("Save trace as VCD...", self.save_vcd) self._menu_btn.setMenu(self._file_menu) + self._waveform_view = _WaveformView(self) + self._waveform_view.setModel(self._waveform_model) + grid.addWidget(self._waveform_view, 1, 0, colspan=12) + def _add_async_action(self, label, coro): action = QtWidgets.QAction(label, self) action.triggered.connect( @@ -301,6 +398,8 @@ class WaveformDock(QtWidgets.QDockWidget): self._waveform_data.update(waveform_data) self._channel_model.update(self._waveform_data['logs']) self._waveform_model.update_all(self._waveform_data['data']) + self._waveform_view.setStoppedX(self._waveform_data['stopped_x']) + self._waveform_view.setTimescale(self._waveform_data['timescale']) async def load_trace(self): try: From c087a47e45280cda6a8622c829612e1dfaad2785 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 24 Jan 2024 16:18:31 +0800 Subject: [PATCH 104/296] waveform: add _BaseWaveform --- artiq/dashboard/waveform.py | 81 +++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 82b4de5a1..53e250f34 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -105,6 +105,87 @@ class ReceiverProxyClient(_BaseProxyClient): await self.receiver.close() +class _BackgroundItem(pg.GraphicsWidgetAnchor, pg.GraphicsWidget): + def __init__(self, parent, rect): + pg.GraphicsWidget.__init__(self, parent) + pg.GraphicsWidgetAnchor.__init__(self) + self.item = QtWidgets.QGraphicsRectItem(rect, self) + brush = QtGui.QBrush(QtGui.QColor(10, 10, 10, 140)) + self.item.setBrush(brush) + + +class _BaseWaveform(pg.PlotWidget): + def __init__(self, name, width, parent=None, pen="r", stepMode="right", connect="finite"): + pg.PlotWidget.__init__(self, + parent=parent, + x=None, + y=None, + pen=pen, + stepMode=stepMode, + connect=connect) + + self.setMinimumHeight(WAVEFORM_MIN_HEIGHT) + self.setMaximumHeight(WAVEFORM_MAX_HEIGHT) + self.setMenuEnabled(False) + self.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu) + + self.name = name + self.width = width + + self.plot_item = self.getPlotItem() + self.plot_item.hideButtons() + self.plot_item.hideAxis("top") + self.plot_item.getAxis("bottom").setStyle(showValues=False, tickLength=0) + self.plot_item.getAxis("left").setStyle(showValues=False, tickLength=0) + self.plot_item.setRange(yRange=(0, 1), padding=0.1) + self.plot_item.showGrid(x=True, y=True) + + self.plot_data_item = self.plot_item.listDataItems()[0] + self.plot_data_item.setClipToView(True) + + self.view_box = self.plot_item.getViewBox() + self.view_box.setMouseEnabled(x=True, y=False) + self.view_box.disableAutoRange(axis=pg.ViewBox.YAxis) + self.view_box.setLimits(xMin=0, minXRange=20) + + self.title_label = pg.LabelItem(self.name, parent=self.plot_item) + self.title_label.anchor(itemPos=(0, 0), parentPos=(0, 0), offset=(0, 0)) + self.title_label.setAttr('justify', 'left') + self.title_label.setZValue(10) + + rect = self.title_label.boundingRect() + rect.setHeight(rect.height() * 2) + self.label_bg = _BackgroundItem(parent=self.plot_item, rect=rect) + self.label_bg.anchor(itemPos=(0, 0), parentPos=(0, 0), offset=(0, 0)) + + def setStoppedX(self, stopped_x): + self.stopped_x = stopped_x + self.view_box.setLimits(xMax=stopped_x) + + def setTimescale(self, timescale): + self.timescale = timescale + + def onDataChange(self, data): + raise NotImplementedError + + def mouseMoveEvent(self, e): + if e.buttons() == QtCore.Qt.LeftButton \ + and e.modifiers() == QtCore.Qt.ShiftModifier: + drag = QtGui.QDrag(self) + mime = QtCore.QMimeData() + drag.setMimeData(mime) + pixmapi = QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_FileIcon) + drag.setPixmap(pixmapi.pixmap(32)) + drag.exec_(QtCore.Qt.MoveAction) + else: + super().mouseMoveEvent(e) + + def wheelEvent(self, e): + if e.modifiers() & QtCore.Qt.ControlModifier: + super().wheelEvent(e) + + class _WaveformView(QtWidgets.QWidget): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent=parent) From 171c7a6e11fb59e6920a8b43db5648e3530e0a8a Mon Sep 17 00:00:00 2001 From: mwojcik Date: Wed, 24 Jan 2024 11:51:05 +0800 Subject: [PATCH 105/296] runtime: use the destination passed by kernel --- artiq/firmware/runtime/kernel.rs | 23 ++++++++++++++++------- artiq/firmware/runtime/session.rs | 4 ++-- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index 855648845..b14a64d34 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -332,8 +332,9 @@ pub mod subkernel { Err(_) => return, }; let subkernel = unsafe { SUBKERNELS.get(&id) }; - if subkernel.is_none() || subkernel.unwrap().state != SubkernelState::Running { - // do not add messages for non-existing, non-running or deleted subkernels + if subkernel.is_some() && subkernel.unwrap().state != SubkernelState::Running { + warn!("received a message for a non-running subkernel #{}", id); + // do not add messages for non-running or deleted subkernels return } if status.is_first() { @@ -361,8 +362,10 @@ pub mod subkernel { pub fn message_await(io: &Io, subkernel_mutex: &Mutex, id: u32, timeout: u64 ) -> Result { - { + let is_subkernel = { let _lock = subkernel_mutex.lock(io)?; + let is_subkernel = unsafe { SUBKERNELS.get(&id).is_some() }; + if is_subkernel { match unsafe { SUBKERNELS.get(&id).unwrap().state } { SubkernelState::Finished { status: FinishStatus::Ok } | SubkernelState::Running => (), @@ -372,6 +375,8 @@ pub mod subkernel { _ => return Err(Error::IncorrectState) } } + is_subkernel + }; let max_time = clock::get_ms() + timeout as u64; let message = io.until_ok(|| { if clock::get_ms() > max_time { @@ -387,10 +392,12 @@ pub mod subkernel { return Ok(Some(unsafe { MESSAGE_QUEUE.remove(i) })); } } + if is_subkernel { match unsafe { SUBKERNELS.get(&id).unwrap().state } { SubkernelState::Finished { status: FinishStatus::CommLost } | SubkernelState::Finished { status: FinishStatus::Exception(_) } => return Ok(None), _ => () + } } Err(()) }); @@ -412,15 +419,17 @@ pub mod subkernel { } pub fn message_send<'a>(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, - routing_table: &RoutingTable, id: u32, count: u8, tag: &'a [u8], message: *const *const () + routing_table: &RoutingTable, id: u32, destination: Option, count: u8, tag: &'a [u8], message: *const *const () ) -> Result<(), Error> { let mut writer = Cursor::new(Vec::new()); - let _lock = subkernel_mutex.lock(io)?; - let destination = unsafe { SUBKERNELS.get(&id).unwrap().destination }; - // reuse rpc code for sending arbitrary data rpc::send_args(&mut writer, 0, tag, message, false)?; // skip service tag, but overwrite first byte with tag count + let destination = destination.unwrap_or_else(|| { + let _lock = subkernel_mutex.lock(io).unwrap(); + unsafe { SUBKERNELS.get(&id).unwrap().destination } + } + ); let data = &mut writer.into_inner()[3..]; data[0] = count; Ok(drtio::subkernel_send_message( diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 800c6d040..28c6a92c7 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -704,8 +704,8 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, kern_send(io, &kern::SubkernelAwaitFinishReply { status: status }) } #[cfg(has_drtio)] - &kern::SubkernelMsgSend { id, destination: _, count, tag, data } => { - subkernel::message_send(io, aux_mutex, _subkernel_mutex, routing_table, id, count, tag, data)?; + &kern::SubkernelMsgSend { id, destination, count, tag, data } => { + subkernel::message_send(io, aux_mutex, _subkernel_mutex, routing_table, id, destination, count, tag, data)?; kern_acknowledge() } #[cfg(has_drtio)] From 7d3bcc7cacd2cb005d4f70e30f04a887163b2a43 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 26 Jan 2024 15:48:00 +0800 Subject: [PATCH 106/296] satman: support free subkernel message passing --- artiq/firmware/satman/kernel.rs | 56 ++++++++++++++++++++------------- artiq/firmware/satman/main.rs | 4 +-- 2 files changed, 37 insertions(+), 23 deletions(-) diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index d4cc226cb..fdf8792f8 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -1,5 +1,5 @@ use core::{mem, option::NoneError}; -use alloc::{string::String, format, vec::Vec, collections::{btree_map::BTreeMap, vec_deque::VecDeque}}; +use alloc::{string::String, format, vec::Vec, collections::btree_map::BTreeMap}; use cslice::AsCSlice; use board_artiq::{drtioaux, drtio_routing::RoutingTable, mailbox, spi}; @@ -63,7 +63,7 @@ enum KernelState { Absent, Loaded, Running, - MsgAwait { max_time: u64, tags: Vec }, + MsgAwait { id: u32, max_time: u64, tags: Vec }, MsgSending, SubkernelAwaitLoad, SubkernelAwaitFinish { max_time: u64, id: u32 }, @@ -115,6 +115,7 @@ macro_rules! unexpected { /* represents interkernel messages */ struct Message { + id: u32, count: u8, data: Vec } @@ -131,7 +132,7 @@ enum OutMessageState { struct MessageManager { out_message: Option, out_state: OutMessageState, - in_queue: VecDeque, + in_queue: Vec, in_buffer: Option, } @@ -171,12 +172,12 @@ impl MessageManager { MessageManager { out_message: None, out_state: OutMessageState::NoMessage, - in_queue: VecDeque::new(), + in_queue: Vec::new(), in_buffer: None } } - pub fn handle_incoming(&mut self, status: PayloadStatus, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { + pub fn handle_incoming(&mut self, status: PayloadStatus, length: usize, id: u32, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { // called when receiving a message from master if status.is_first() { // clear the buffer for first message @@ -186,6 +187,7 @@ impl MessageManager { Some(message) => message.data.extend(&data[..length]), None => { self.in_buffer = Some(Message { + id: id, count: data[0], data: data[1..length].to_vec() }); @@ -193,7 +195,7 @@ impl MessageManager { }; if status.is_last() { // when done, remove from working queue - self.in_queue.push_back(self.in_buffer.take().unwrap()); + self.in_queue.push(self.in_buffer.take().unwrap()); } } @@ -257,8 +259,13 @@ impl MessageManager { Ok(()) } - pub fn get_incoming(&mut self) -> Option { - self.in_queue.pop_front() + pub fn get_incoming(&mut self, id: u32) -> Option { + for i in 0..self.in_queue.len() { + if self.in_queue[i].id == id { + return Some(self.in_queue.remove(i)); + } + } + None } } @@ -363,11 +370,11 @@ impl Manager { kern_acknowledge() } - pub fn message_handle_incoming(&mut self, status: PayloadStatus, length: usize, slice: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { + pub fn message_handle_incoming(&mut self, status: PayloadStatus, length: usize, id: u32, slice: &[u8; MASTER_PAYLOAD_MAX_SIZE]) { if !self.is_running() { return; } - self.session.messages.handle_incoming(status, length, slice); + self.session.messages.handle_incoming(status, length, id, slice); } pub fn message_get_slice(&mut self, slice: &mut [u8; MASTER_PAYLOAD_MAX_SIZE]) -> Option { @@ -539,13 +546,13 @@ impl Manager { fn process_external_messages(&mut self) -> Result<(), Error> { match &self.session.kernel_state { - KernelState::MsgAwait { max_time, tags } => { + KernelState::MsgAwait { id, max_time, tags } => { if clock::get_ms() > *max_time { kern_send(&kern::SubkernelMsgRecvReply { status: kern::SubkernelStatus::Timeout, count: 0 })?; self.session.kernel_state = KernelState::Running; return Ok(()) } - if let Some(message) = self.session.messages.get_incoming() { + if let Some(message) = self.session.messages.get_incoming(*id) { kern_send(&kern::SubkernelMsgRecvReply { status: kern::SubkernelStatus::NoError, count: message.count })?; let tags = tags.clone(); self.session.kernel_state = KernelState::Running; @@ -752,22 +759,29 @@ impl Manager { Ok(()) } - &kern::SubkernelMsgSend { id: _, destination: msg_dest, count, tag, data } => { - let dest = match msg_dest { - Some(dest) => dest, - None => self.session.source - }; - self.session.messages.accept_outgoing(self.current_id, destination, - dest, count, tag, data, + &kern::SubkernelMsgSend { id, destination: msg_dest, count, tag, data } => { + let message_destination; + let message_id; + if let Some(dest) = msg_dest { + message_destination = dest; + message_id = id; + } else { + // return message, return to source + message_destination = self.session.source; + message_id = self.current_id; + } + self.session.messages.accept_outgoing(message_id, destination, + message_destination, count, tag, data, routing_table, rank, router)?; // acknowledge after the message is sent self.session.kernel_state = KernelState::MsgSending; Ok(()) } - &kern::SubkernelMsgRecvRequest { id: _, timeout, tags } => { + &kern::SubkernelMsgRecvRequest { id, timeout, tags } => { let max_time = clock::get_ms() + timeout as u64; - self.session.kernel_state = KernelState::MsgAwait { max_time: max_time, tags: tags.to_vec() }; + self.session.kernel_state = KernelState::MsgAwait { + id: id, max_time: max_time, tags: tags.to_vec() }; Ok(()) }, diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 78c84103a..2105b4c26 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -458,9 +458,9 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg data: data_slice, }) } - drtioaux::Packet::SubkernelMessage { source, destination: _destination, id: _id, status, length, data } => { + drtioaux::Packet::SubkernelMessage { source, destination: _destination, id, status, length, data } => { forward!(_routing_table, _destination, *rank, _repeaters, &packet); - kernelmgr.message_handle_incoming(status, length as usize, &data); + kernelmgr.message_handle_incoming(status, length as usize, id, &data); router.send(drtioaux::Packet::SubkernelMessageAck { destination: source }, _routing_table, *rank, *self_destination) From 0ba0330b53ce13f2c724286b36c9c9ecd7f51d52 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 26 Jan 2024 16:02:28 +0800 Subject: [PATCH 107/296] compiler: support free subkernel message passing --- artiq/compiler/builtins.py | 6 + artiq/compiler/embedding.py | 48 ++++++-- artiq/compiler/prelude.py | 2 + .../compiler/transforms/artiq_ir_generator.py | 36 ++++++ artiq/compiler/transforms/inferencer.py | 51 +++++++++ .../compiler/transforms/llvm_ir_generator.py | 103 +++++++++--------- artiq/coredevice/core.py | 6 +- 7 files changed, 189 insertions(+), 63 deletions(-) diff --git a/artiq/compiler/builtins.py b/artiq/compiler/builtins.py index 64e9b3690..cb0834f71 100644 --- a/artiq/compiler/builtins.py +++ b/artiq/compiler/builtins.py @@ -253,6 +253,12 @@ def fn_subkernel_await(): def fn_subkernel_preload(): return types.TBuiltinFunction("subkernel_preload") +def fn_subkernel_send(): + return types.TBuiltinFunction("subkernel_send") + +def fn_subkernel_recv(): + return types.TBuiltinFunction("subkernel_recv") + # Accessors def is_none(typ): diff --git a/artiq/compiler/embedding.py b/artiq/compiler/embedding.py index 928e9e75d..c46c69da3 100644 --- a/artiq/compiler/embedding.py +++ b/artiq/compiler/embedding.py @@ -47,8 +47,13 @@ class SpecializedFunction: return hash((self.instance_type, self.host_function)) +class SubkernelMessageType: + def __init__(self, name, value_type): + self.name = name + self.value_type = value_type + class EmbeddingMap: - def __init__(self, subkernels={}): + def __init__(self, old_embedding_map=None): self.object_current_key = 0 self.object_forward_map = {} self.object_reverse_map = {} @@ -64,13 +69,22 @@ class EmbeddingMap: self.function_map = {} self.str_forward_map = {} self.str_reverse_map = {} - + + # mapping `name` to object ID + self.subkernel_message_map = {} + # subkernels: dict of ID: function, just like object_forward_map # allow the embedding map to be aware of subkernels from other kernels - for key, obj_ref in subkernels.items(): - self.object_forward_map[key] = obj_ref - obj_id = id(obj_ref) - self.object_reverse_map[obj_id] = key + if not old_embedding_map is None: + for key, obj_ref in old_embedding_map.subkernels().items(): + self.object_forward_map[key] = obj_ref + obj_id = id(obj_ref) + self.object_reverse_map[obj_id] = key + for msg_id, msg_type in old_embedding_map.subkernel_messages().items(): + self.object_forward_map[msg_id] = msg_type + obj_id = id(msg_type) + self.subkernel_message_map[msg_type.name] = msg_id + self.object_reverse_map[obj_id] = msg_id self.preallocate_runtime_exception_names(["RuntimeError", "RTIOUnderflow", @@ -174,7 +188,7 @@ class EmbeddingMap: self.object_current_key += 1 while self.object_forward_map.get(self.object_current_key): # make sure there's no collisions with previously inserted subkernels - # their identifiers must be consistent between kernels/subkernels + # their identifiers must be consistent across all kernels/subkernels self.object_current_key += 1 self.object_forward_map[self.object_current_key] = obj_ref @@ -189,7 +203,7 @@ class EmbeddingMap: obj_ref = self.object_forward_map[obj_id] if isinstance(obj_ref, (pytypes.FunctionType, pytypes.MethodType, pytypes.BuiltinFunctionType, pytypes.ModuleType, - SpecializedFunction)): + SpecializedFunction, SubkernelMessageType)): continue elif isinstance(obj_ref, type): _, obj_typ = self.type_map[obj_ref] @@ -205,6 +219,20 @@ class EmbeddingMap: subkernels[k] = v return subkernels + def store_subkernel_message(self, name, value_type): + if name in self.subkernel_message_map: + msg_id = self.subkernel_message_map[name] + else: + msg_id = self.store_object(SubkernelMessageType(name, value_type)) + self.subkernel_message_map[name] = msg_id + return msg_id, self.retrieve_object(msg_id) + + def subkernel_messages(self): + messages = {} + for name, msg_id in self.subkernel_message_map.items(): + messages[msg_id] = self.retrieve_object(msg_id) + return messages + def has_rpc(self): return any(filter( lambda x: (inspect.isfunction(x) or inspect.ismethod(x)) and \ @@ -802,7 +830,7 @@ class TypedtreeHasher(algorithm.Visitor): return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields)) class Stitcher: - def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[], subkernels={}): + def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[], old_embedding_map=None): self.core = core self.dmgr = dmgr if engine is None: @@ -824,7 +852,7 @@ class Stitcher: self.functions = {} - self.embedding_map = EmbeddingMap(subkernels) + self.embedding_map = EmbeddingMap(old_embedding_map) self.value_map = defaultdict(lambda: []) self.definitely_changed = False diff --git a/artiq/compiler/prelude.py b/artiq/compiler/prelude.py index effbca87c..f96b4d0d7 100644 --- a/artiq/compiler/prelude.py +++ b/artiq/compiler/prelude.py @@ -59,4 +59,6 @@ def globals(): # ARTIQ subkernel utility functions "subkernel_await": builtins.fn_subkernel_await(), "subkernel_preload": builtins.fn_subkernel_preload(), + "subkernel_send": builtins.fn_subkernel_send(), + "subkernel_recv": builtins.fn_subkernel_recv(), } diff --git a/artiq/compiler/transforms/artiq_ir_generator.py b/artiq/compiler/transforms/artiq_ir_generator.py index 6998e0ddc..fe084caab 100644 --- a/artiq/compiler/transforms/artiq_ir_generator.py +++ b/artiq/compiler/transforms/artiq_ir_generator.py @@ -2559,6 +2559,42 @@ class ARTIQIRGenerator(algorithm.Visitor): sid = ir.Constant(fn.sid, builtins.TInt32()) dest = ir.Constant(fn.destination, builtins.TInt32()) return self.append(ir.Builtin("subkernel_preload", [sid, dest], builtins.TNone())) + elif types.is_builtin(typ, "subkernel_send"): + if len(node.args) == 3 and len(node.keywords) == 0: + dest = self.visit(node.args[0]) + name = node.args[1].s + value = self.visit(node.args[2]) + else: + assert False + msg_id, msg = self.embedding_map.store_subkernel_message(name, value.type) + msg_id = ir.Constant(msg_id, builtins.TInt32()) + if value.type != msg.value_type: + diag = diagnostic.Diagnostic("error", + "type mismatch for subkernel message '{name}', receiver expects {recv} while sending {send}", + {"name": name, "recv": msg.value_type, "send": value.type}, + node.loc) + self.engine.process(diag) + return self.append(ir.Builtin("subkernel_send", [msg_id, dest, value], builtins.TNone())) + elif types.is_builtin(typ, "subkernel_recv"): + if len(node.args) == 2 and len(node.keywords) == 0: + name = node.args[0].s + vartype = node.args[1].value + timeout = ir.Constant(10_000, builtins.TInt64()) + elif len(node.args) == 3 and len(node.keywords) == 0: + name = node.args[0].s + vartype = node.args[1].value + timeout = self.visit(node.args[2]) + else: + assert False + msg_id, msg = self.embedding_map.store_subkernel_message(name, vartype) + msg_id = ir.Constant(msg_id, builtins.TInt32()) + if vartype != msg.value_type: + diag = diagnostic.Diagnostic("error", + "type mismatch for subkernel message '{name}', receiver expects {recv} while sending {send}", + {"name": name, "recv": vartype, "send": msg.value_type}, + node.loc) + self.engine.process(diag) + return self.append(ir.Builtin("subkernel_recv", [msg_id, timeout], vartype)) elif types.is_exn_constructor(typ): return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args]) elif types.is_constructor(typ): diff --git a/artiq/compiler/transforms/inferencer.py b/artiq/compiler/transforms/inferencer.py index 0b95a60e5..b94985463 100644 --- a/artiq/compiler/transforms/inferencer.py +++ b/artiq/compiler/transforms/inferencer.py @@ -1343,6 +1343,57 @@ class Inferencer(algorithm.Visitor): node.loc, None) else: diagnose(valid_forms()) + elif types.is_builtin(typ, "subkernel_send"): + valid_forms = lambda: [ + valid_form("subkernel_send(dest: numpy.int?, name: str, value: V) -> None"), + ] + self._unify(node.type, builtins.TNone(), + node.loc, None) + if len(node.args) == 3: + arg0 = node.args[0] + if types.is_var(arg0.type): + pass # undetermined yet + else: + if builtins.is_int(arg0.type): + self._unify(arg0.type, builtins.TInt8(), + arg0.loc, None) + else: + diagnose(valid_forms()) + arg1 = node.args[1] + self._unify(arg1.type, builtins.TStr(), + arg1.loc, None) + else: + diagnose(valid_forms()) + elif types.is_builtin(typ, "subkernel_recv"): + valid_forms = lambda: [ + valid_form("subkernel_recv(name: str, value_type: type) -> value_type"), + valid_form("subkernel_recv(name: str, value_type: type, timeout: numpy.int64) -> value_type"), + ] + if 2 <= len(node.args) <= 3: + arg0 = node.args[0] + if types.is_var(arg0.type): + pass + else: + self._unify(arg0.type, builtins.TStr(), + arg0.loc, None) + arg1 = node.args[1] + if types.is_var(arg1.type): + pass + else: + self._unify(node.type, arg1.value, + node.loc, None) + if len(node.args) == 3: + arg2 = node.args[2] + if types.is_var(arg2.type): + pass + elif builtins.is_int(arg2.type): + # promote to TInt64 + self._unify(arg2.type, builtins.TInt64(), + arg2.loc, None) + else: + diagnose(valid_forms()) + else: + diagnose(valid_forms()) else: assert False diff --git a/artiq/compiler/transforms/llvm_ir_generator.py b/artiq/compiler/transforms/llvm_ir_generator.py index 3b4e165f3..4f68d27a2 100644 --- a/artiq/compiler/transforms/llvm_ir_generator.py +++ b/artiq/compiler/transforms/llvm_ir_generator.py @@ -1420,6 +1420,20 @@ class LLVMIRGenerator: lldest = ll.Constant(lli8, insn.operands[1].value) return self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, lldest, ll.Constant(lli1, 0)], name="subkernel.preload") + elif insn.op == "subkernel_send": + llmsgid = self.map(insn.operands[0]) + lldest = self.map(insn.operands[1]) + return self._build_subkernel_message(llmsgid, lldest, [insn.operands[2]]) + elif insn.op == "subkernel_recv": + llmsgid = self.map(insn.operands[0]) + lltimeout = self.map(insn.operands[1]) + lltagptr = self._build_subkernel_tags([insn.type]) + self.llbuilder.call(self.llbuiltin("subkernel_await_message"), + [llmsgid, lltimeout, lltagptr, ll.Constant(lli8, 1), ll.Constant(lli8, 1)], + name="subkernel.await.message") + llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [], + name="subkernel.arg.stack") + return self._build_rpc_recv(insn.type, llstackptr) else: assert False @@ -1580,11 +1594,8 @@ class LLVMIRGenerator: self.llbuilder.branch(llnormalblock) return llret - def _build_rpc(self, fun_loc, fun_type, args, llnormalblock, llunwindblock): - llservice = ll.Constant(lli32, fun_type.service) - + def _build_arg_tag(self, args, call_type): tag = b"" - for arg in args: def arg_error_handler(typ): printer = types.TypePrinter() @@ -1593,12 +1604,18 @@ class LLVMIRGenerator: {"type": printer.name(typ)}, arg.loc) diag = diagnostic.Diagnostic("error", - "type {type} is not supported in remote procedure calls", - {"type": printer.name(arg.type)}, + "type {type} is not supported in {call_type} calls", + {"type": printer.name(arg.type), "call_type": call_type}, arg.loc, notes=[note]) self.engine.process(diag) tag += ir.rpc_tag(arg.type, arg_error_handler) tag += b":" + return tag + + def _build_rpc(self, fun_loc, fun_type, args, llnormalblock, llunwindblock): + llservice = ll.Constant(lli32, fun_type.service) + + tag = self._build_arg_tag(args, call_type="remote procedure") def ret_error_handler(typ): printer = types.TypePrinter() @@ -1662,61 +1679,47 @@ class LLVMIRGenerator: def _build_subkernel_call(self, fun_loc, fun_type, args): llsid = ll.Constant(lli32, fun_type.sid) lldest = ll.Constant(lli8, fun_type.destination) - tag = b"" - - for arg in args: - def arg_error_handler(typ): - printer = types.TypePrinter() - note = diagnostic.Diagnostic("note", - "value of type {type}", - {"type": printer.name(typ)}, - arg.loc) - diag = diagnostic.Diagnostic("error", - "type {type} is not supported in subkernel calls", - {"type": printer.name(arg.type)}, - arg.loc, notes=[note]) - self.engine.process(diag) - tag += ir.rpc_tag(arg.type, arg_error_handler) - tag += b":" - # run the kernel first self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, lldest, ll.Constant(lli1, 1)]) - # arg sent in the same vein as RPC - llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [], - name="subkernel.stack") + if args: + # only send args if there's anything to send, 'self' is excluded + self._build_subkernel_message(llsid, lldest, args) + return llsid + + def _build_subkernel_message(self, llid, lldest, args): + # args (or messages) are sent in the same vein as RPC + tag = self._build_arg_tag(args, call_type="subkernel") + + llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [], + name="subkernel.stack") lltag = self.llconst_of_const(ir.Constant(tag, builtins.TStr())) lltagptr = self.llbuilder.alloca(lltag.type) self.llbuilder.store(lltag, lltagptr) - if args: - # only send args if there's anything to send, 'self' is excluded - llargs = self.llbuilder.alloca(llptr, ll.Constant(lli32, len(args)), - name="subkernel.args") - for index, arg in enumerate(args): - if builtins.is_none(arg.type): - llargslot = self.llbuilder.alloca(llunit, - name="subkernel.arg{}".format(index)) - else: - llarg = self.map(arg) - llargslot = self.llbuilder.alloca(llarg.type, - name="subkernel.arg{}".format(index)) - self.llbuilder.store(llarg, llargslot) - llargslot = self.llbuilder.bitcast(llargslot, llptr) + llargs = self.llbuilder.alloca(llptr, ll.Constant(lli32, len(args)), + name="subkernel.args") + for index, arg in enumerate(args): + if builtins.is_none(arg.type): + llargslot = self.llbuilder.alloca(llunit, + name="subkernel.arg{}".format(index)) + else: + llarg = self.map(arg) + llargslot = self.llbuilder.alloca(llarg.type, + name="subkernel.arg{}".format(index)) + self.llbuilder.store(llarg, llargslot) + llargslot = self.llbuilder.bitcast(llargslot, llptr) - llargptr = self.llbuilder.gep(llargs, [ll.Constant(lli32, index)]) - self.llbuilder.store(llargslot, llargptr) + llargptr = self.llbuilder.gep(llargs, [ll.Constant(lli32, index)]) + self.llbuilder.store(llargslot, llargptr) - llargcount = ll.Constant(lli8, len(args)) + llargcount = ll.Constant(lli8, len(args)) - llisreturn = ll.Constant(lli1, False) - - self.llbuilder.call(self.llbuiltin("subkernel_send_message"), - [llsid, llisreturn, lldest, llargcount, lltagptr, llargs]) - self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) - - return llsid + llisreturn = ll.Constant(lli1, False) + self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) + return self.llbuilder.call(self.llbuiltin("subkernel_send_message"), + [llid, llisreturn, lldest, llargcount, lltagptr, llargs]) def _build_subkernel_return(self, insn): # builds a remote return. diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index 26d60e92e..4d3ed36b5 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -121,14 +121,14 @@ class Core: def compile(self, function, args, kwargs, set_result=None, attribute_writeback=True, print_as_rpc=True, target=None, destination=0, subkernel_arg_types=[], - subkernels={}): + old_embedding_map=None): try: engine = _DiagnosticEngine(all_errors_are_fatal=True) stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr, print_as_rpc=print_as_rpc, destination=destination, subkernel_arg_types=subkernel_arg_types, - subkernels=subkernels) + old_embedding_map=old_embedding_map) stitcher.stitch_call(function, args, kwargs, set_result) stitcher.finalize() @@ -182,7 +182,7 @@ class Core: self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False, print_as_rpc=False, target=target, destination=destination, subkernel_arg_types=subkernel_arg_types.get(sid, []), - subkernels=subkernels) + old_embedding_map=embedding_map) if object_map.has_rpc(): raise ValueError("Subkernel must not use RPC") return destination, kernel_library, object_map From fbbc8d3dd17b139e856037917513b97ea4ad2a03 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 26 Jan 2024 16:35:16 +0800 Subject: [PATCH 108/296] docs: add a section for subkernel message passing --- doc/manual/getting_started_core.rst | 31 +++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/doc/manual/getting_started_core.rst b/doc/manual/getting_started_core.rst index ad86a74f5..66002a766 100644 --- a/doc/manual/getting_started_core.rst +++ b/doc/manual/getting_started_core.rst @@ -304,7 +304,7 @@ For example, a subkernel performing integer addition: :: result = subkernel_await(subkernel_add) assert result == 4 -Sometimes the subkernel execution may take more time - and the await has a default timeout of 10000 milliseconds (10 seconds). It can be adjusted, as ``subkernel_await()`` accepts an optional timeout argument. +Sometimes the subkernel execution may take more time - and the await has a default timeout of 10000 milliseconds (10 seconds). It can be adjusted, as ``subkernel_await()`` accepts an optional timeout argument. If the given value is negative, timeout is disabled. Subkernels are compiled after the main kernel, and then immediately uploaded to satellites. When called, master instructs the appropriate satellite to load the subkernel into their kernel core and to run it. If the subkernel is complex, and its binary relatively big, the delay between the call and actually running the subkernel may be substantial; if that delay has to be minimized, ``subkernel_preload(function)`` should be used before the call. @@ -346,4 +346,31 @@ Without the preload, the delay after the core reset would need to be longer. It' In general, subkernels do not have to be awaited, but awaiting is required to retrieve returned values and exceptions. .. note:: - When a subkernel is running, regardless of devices used by it, RTIO devices on that satellite are not available to the master. Control is returned to master after the subkernel finishes - to be sure that you can use the device, the subkernel should be awaited before any RTIO operations on the affected satellite are performed. \ No newline at end of file + When a subkernel is running, regardless of devices used by it, RTIO devices on that satellite are not available to the master. Control is returned to master after the subkernel finishes - to be sure that you can use the device, the subkernel should be awaited before any RTIO operations on the affected satellite are performed. + +Message passing +^^^^^^^^^^^^^^^ + +Subkernels besides arguments and returns, can also pass messages between each other or the master with built-in ``subkernel_send`` and ``subkernel_recv`` functions. This can be used for communication between subkernels, passing additional data, or partially computed data. Consider the following example: :: + + from artiq.experiment import * + + class MessagePassing(EnvExperiment): + def build(self): + self.setattr_device("core") + + @subkernel(destination=1) + def simple_self(self) -> TInt32: + data = subkernel_recv("message", TInt32) + return data + 20 + + @kernel + def run(self): + self.simple_self() + subkernel_send(1, "message", 150) + result = subkernel_await(self.simple_self) + assert result == 170 + +The ``subkernel_send`` function accepts three arguments: destination, name of the message that will be linked with the ``subkernel_recv``, and the value. + +The ``subkernel_recv`` function accepts two obligatory arguments: message name (matching the name provided in ``subkernel_send``) and expected type; and optionally, a third argument - timeout for the operation in milliseconds. If the value is negative, timeout is disabled. The default value is -1 (no timeout). The type between the two functions with the same name must match. \ No newline at end of file From 726cb092cadd367ee3784a603de2374ac77e8920 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 26 Jan 2024 16:48:04 +0800 Subject: [PATCH 109/296] tests: add message passing tests --- .../lit/embedding/subkernel_message_recv.py | 22 +++++++++++++++++++ .../lit/embedding/subkernel_message_send.py | 20 +++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 artiq/test/lit/embedding/subkernel_message_recv.py create mode 100644 artiq/test/lit/embedding/subkernel_message_send.py diff --git a/artiq/test/lit/embedding/subkernel_message_recv.py b/artiq/test/lit/embedding/subkernel_message_recv.py new file mode 100644 index 000000000..2331001e7 --- /dev/null +++ b/artiq/test/lit/embedding/subkernel_message_recv.py @@ -0,0 +1,22 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +@kernel +def entrypoint(): + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. + message_pass() + # CHECK-NOT: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 1, .*\), !dbg !. + # CHECK: call i8 @subkernel_await_message\(i32 2, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !. + subkernel_recv("message", TInt32) + + +# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr +# CHECK-L: declare i8 @subkernel_await_message(i32, i64, { i8*, i32 }*, i8, i8) local_unnamed_addr +# CHECK-NOT-L: declare void @subkernel_await_finish(i32, i64) local_unnamed_addr +@subkernel(destination=1) +def message_pass() -> TNone: + subkernel_send(0, "message", 15) diff --git a/artiq/test/lit/embedding/subkernel_message_send.py b/artiq/test/lit/embedding/subkernel_message_send.py new file mode 100644 index 000000000..3f0f77e35 --- /dev/null +++ b/artiq/test/lit/embedding/subkernel_message_send.py @@ -0,0 +1,20 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +@kernel +def entrypoint(): + # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. + message_pass() + # CHECK: call void @subkernel_send_message\(i32 2, i1 false, i8 1, i8 1, .*\), !dbg !. + # CHECK-NOT: call i8 @subkernel_await_message\(i32 1, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !. + subkernel_send(1, "message", 15) + + +# CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr +# CHECK-L: declare void @subkernel_send_message(i32, i1, i8, i8, { i8*, i32 }*, i8**) local_unnamed_addr +@subkernel(destination=1) +def message_pass() -> TNone: + subkernel_recv("message", TInt32) From 09462442f7e575c463fb8dc701066677a54a2132 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Mon, 29 Jan 2024 15:04:49 +0800 Subject: [PATCH 110/296] subkernel: allow negative timeouts for no timeout --- .../compiler/transforms/artiq_ir_generator.py | 4 ++-- artiq/firmware/ksupport/lib.rs | 4 ++-- artiq/firmware/libproto_artiq/kernel_proto.rs | 4 ++-- artiq/firmware/runtime/kernel.rs | 10 ++++----- artiq/firmware/satman/kernel.rs | 13 ++++++----- doc/manual/getting_started_core.rst | 22 +++++++++---------- 6 files changed, 29 insertions(+), 28 deletions(-) diff --git a/artiq/compiler/transforms/artiq_ir_generator.py b/artiq/compiler/transforms/artiq_ir_generator.py index fe084caab..6a929702f 100644 --- a/artiq/compiler/transforms/artiq_ir_generator.py +++ b/artiq/compiler/transforms/artiq_ir_generator.py @@ -2537,7 +2537,7 @@ class ARTIQIRGenerator(algorithm.Visitor): timeout = self.visit(node.args[1]) elif len(node.args) == 1 and len(node.keywords) == 0: fn = node.args[0].type - timeout = ir.Constant(10_000, builtins.TInt64()) + timeout = ir.Constant(-1, builtins.TInt64()) else: assert False if types.is_method(fn): @@ -2579,7 +2579,7 @@ class ARTIQIRGenerator(algorithm.Visitor): if len(node.args) == 2 and len(node.keywords) == 0: name = node.args[0].s vartype = node.args[1].value - timeout = ir.Constant(10_000, builtins.TInt64()) + timeout = ir.Constant(-1, builtins.TInt64()) elif len(node.args) == 3 and len(node.keywords) == 0: name = node.args[0].s vartype = node.args[1].value diff --git a/artiq/firmware/ksupport/lib.rs b/artiq/firmware/ksupport/lib.rs index 3cd2052ec..5c3946494 100644 --- a/artiq/firmware/ksupport/lib.rs +++ b/artiq/firmware/ksupport/lib.rs @@ -498,7 +498,7 @@ extern fn subkernel_load_run(id: u32, destination: u8, run: bool) { } #[unwind(allowed)] -extern fn subkernel_await_finish(id: u32, timeout: u64) { +extern fn subkernel_await_finish(id: u32, timeout: i64) { send(&SubkernelAwaitFinishRequest { id: id, timeout: timeout }); recv!(SubkernelAwaitFinishReply { status } => { match status { @@ -528,7 +528,7 @@ extern fn subkernel_send_message(id: u32, is_return: bool, destination: u8, } #[unwind(allowed)] -extern fn subkernel_await_message(id: u32, timeout: u64, tags: &CSlice, min: u8, max: u8) -> u8 { +extern fn subkernel_await_message(id: u32, timeout: i64, tags: &CSlice, min: u8, max: u8) -> u8 { send(&SubkernelMsgRecvRequest { id: id, timeout: timeout, tags: tags.as_ref() }); recv!(SubkernelMsgRecvReply { status, count } => { match status { diff --git a/artiq/firmware/libproto_artiq/kernel_proto.rs b/artiq/firmware/libproto_artiq/kernel_proto.rs index 108c83401..e50178f46 100644 --- a/artiq/firmware/libproto_artiq/kernel_proto.rs +++ b/artiq/firmware/libproto_artiq/kernel_proto.rs @@ -105,10 +105,10 @@ pub enum Message<'a> { SubkernelLoadRunRequest { id: u32, destination: u8, run: bool }, SubkernelLoadRunReply { succeeded: bool }, - SubkernelAwaitFinishRequest { id: u32, timeout: u64 }, + SubkernelAwaitFinishRequest { id: u32, timeout: i64 }, SubkernelAwaitFinishReply { status: SubkernelStatus }, SubkernelMsgSend { id: u32, destination: Option, count: u8, tag: &'a [u8], data: *const *const () }, - SubkernelMsgRecvRequest { id: u32, timeout: u64, tags: &'a [u8] }, + SubkernelMsgRecvRequest { id: u32, timeout: i64, tags: &'a [u8] }, SubkernelMsgRecvReply { status: SubkernelStatus, count: u8 }, Log(fmt::Arguments<'a>), diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index b14a64d34..acbce47d7 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -279,7 +279,7 @@ pub mod subkernel { } pub fn await_finish(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, - routing_table: &RoutingTable, id: u32, timeout: u64) -> Result { + routing_table: &RoutingTable, id: u32, timeout: i64) -> Result { { let _lock = subkernel_mutex.lock(io)?; match unsafe { SUBKERNELS.get(&id).unwrap().state } { @@ -291,7 +291,7 @@ pub mod subkernel { } let max_time = clock::get_ms() + timeout as u64; let _res = io.until(|| { - if clock::get_ms() > max_time { + if timeout > 0 && clock::get_ms() > max_time { return true; } if subkernel_mutex.test_lock() { @@ -305,7 +305,7 @@ pub mod subkernel { _ => false } })?; - if clock::get_ms() > max_time { + if timeout > 0 && clock::get_ms() > max_time { error!("Remote subkernel finish await timed out"); return Err(Error::Timeout); } @@ -360,7 +360,7 @@ pub mod subkernel { } } - pub fn message_await(io: &Io, subkernel_mutex: &Mutex, id: u32, timeout: u64 + pub fn message_await(io: &Io, subkernel_mutex: &Mutex, id: u32, timeout: i64 ) -> Result { let is_subkernel = { let _lock = subkernel_mutex.lock(io)?; @@ -379,7 +379,7 @@ pub mod subkernel { }; let max_time = clock::get_ms() + timeout as u64; let message = io.until_ok(|| { - if clock::get_ms() > max_time { + if timeout > 0 && clock::get_ms() > max_time { return Ok(None); } if subkernel_mutex.test_lock() { diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index fdf8792f8..e942aac7a 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -63,10 +63,10 @@ enum KernelState { Absent, Loaded, Running, - MsgAwait { id: u32, max_time: u64, tags: Vec }, + MsgAwait { id: u32, max_time: i64, tags: Vec }, MsgSending, SubkernelAwaitLoad, - SubkernelAwaitFinish { max_time: u64, id: u32 }, + SubkernelAwaitFinish { max_time: i64, id: u32 }, DmaUploading { max_time: u64 }, DmaAwait { max_time: u64 }, } @@ -547,7 +547,7 @@ impl Manager { fn process_external_messages(&mut self) -> Result<(), Error> { match &self.session.kernel_state { KernelState::MsgAwait { id, max_time, tags } => { - if clock::get_ms() > *max_time { + if *max_time > 0 && clock::get_ms() > *max_time as u64 { kern_send(&kern::SubkernelMsgRecvReply { status: kern::SubkernelStatus::Timeout, count: 0 })?; self.session.kernel_state = KernelState::Running; return Ok(()) @@ -570,7 +570,7 @@ impl Manager { } }, KernelState::SubkernelAwaitFinish { max_time, id } => { - if clock::get_ms() > *max_time { + if *max_time > 0 && clock::get_ms() > *max_time as u64 { kern_send(&kern::SubkernelAwaitFinishReply { status: kern::SubkernelStatus::Timeout })?; self.session.kernel_state = KernelState::Running; } else { @@ -779,7 +779,8 @@ impl Manager { } &kern::SubkernelMsgRecvRequest { id, timeout, tags } => { - let max_time = clock::get_ms() + timeout as u64; + // negative timeout value means no timeout + let max_time = if timeout > 0 { clock::get_ms() as i64 + timeout } else { timeout }; self.session.kernel_state = KernelState::MsgAwait { id: id, max_time: max_time, tags: tags.to_vec() }; Ok(()) @@ -794,7 +795,7 @@ impl Manager { } &kern::SubkernelAwaitFinishRequest{ id, timeout } => { - let max_time = clock::get_ms() + timeout as u64; + let max_time = if timeout > 0 { clock::get_ms() as i64 + timeout } else { timeout }; self.session.kernel_state = KernelState::SubkernelAwaitFinish { max_time: max_time, id: id }; Ok(()) } diff --git a/doc/manual/getting_started_core.rst b/doc/manual/getting_started_core.rst index 66002a766..a0a9e762c 100644 --- a/doc/manual/getting_started_core.rst +++ b/doc/manual/getting_started_core.rst @@ -304,7 +304,7 @@ For example, a subkernel performing integer addition: :: result = subkernel_await(subkernel_add) assert result == 4 -Sometimes the subkernel execution may take more time - and the await has a default timeout of 10000 milliseconds (10 seconds). It can be adjusted, as ``subkernel_await()`` accepts an optional timeout argument. If the given value is negative, timeout is disabled. +Sometimes the subkernel execution may take more time. By default, the await function will wait forever. However, if timeout is needed it can be set, as ``subkernel_await()`` accepts an optional argument. The value is interpreted in milliseconds and if it is negative, timeout is disabled. Subkernels are compiled after the main kernel, and then immediately uploaded to satellites. When called, master instructs the appropriate satellite to load the subkernel into their kernel core and to run it. If the subkernel is complex, and its binary relatively big, the delay between the call and actually running the subkernel may be substantial; if that delay has to be minimized, ``subkernel_preload(function)`` should be used before the call. @@ -351,26 +351,26 @@ In general, subkernels do not have to be awaited, but awaiting is required to re Message passing ^^^^^^^^^^^^^^^ -Subkernels besides arguments and returns, can also pass messages between each other or the master with built-in ``subkernel_send`` and ``subkernel_recv`` functions. This can be used for communication between subkernels, passing additional data, or partially computed data. Consider the following example: :: +Subkernels besides arguments and returns, can also pass messages between each other or the master with built-in ``subkernel_send()`` and ``subkernel_recv()`` functions. This can be used for communication between subkernels, passing additional data, or partially computed data. Consider the following example: :: from artiq.experiment import * + @subkernel(destination=1) + def simple_message() -> TInt32: + data = subkernel_recv("message", TInt32) + return data + 20 + class MessagePassing(EnvExperiment): def build(self): self.setattr_device("core") - @subkernel(destination=1) - def simple_self(self) -> TInt32: - data = subkernel_recv("message", TInt32) - return data + 20 - @kernel def run(self): - self.simple_self() + simple_self() subkernel_send(1, "message", 150) - result = subkernel_await(self.simple_self) + result = subkernel_await(simple_self) assert result == 170 -The ``subkernel_send`` function accepts three arguments: destination, name of the message that will be linked with the ``subkernel_recv``, and the value. +The ``subkernel_send(destination, name, value)`` function requires three arguments: destination, name of the message that will be linked with the ``subkernel_recv()``, and the passed value. -The ``subkernel_recv`` function accepts two obligatory arguments: message name (matching the name provided in ``subkernel_send``) and expected type; and optionally, a third argument - timeout for the operation in milliseconds. If the value is negative, timeout is disabled. The default value is -1 (no timeout). The type between the two functions with the same name must match. \ No newline at end of file +The ``subkernel_recv(name, type, [timeout])`` function requires two arguments: message name (matching the name provided in ``subkernel_send``) and expected type. Optionally, it accepts a third argument - timeout for the operation in milliseconds. If the value is negative, timeout is disabled. The default value is no timeout. The value and declared types between the function pair with the same name must match. \ No newline at end of file From 5f3126f39369121b67f97e029d22c1374a5c7762 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 26 Jan 2024 13:19:40 +0800 Subject: [PATCH 111/296] waveform: add BitWaveform --- artiq/dashboard/waveform.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 53e250f34..9897c41a2 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -186,6 +186,41 @@ class _BaseWaveform(pg.PlotWidget): super().wheelEvent(e) +class BitWaveform(_BaseWaveform): + def __init__(self, name, width, parent=None): + _BaseWaveform.__init__(self, name, width, parent) + self._arrows = [] + + def onDataChange(self, data): + try: + l = len(data) + display_y = np.empty(l) + display_x = np.empty(l) + display_map = { + "X": 0.5, + "1": 1, + "0": 0 + } + previous_y = None + for i, coord in enumerate(data): + x, y = coord + dis_y = display_map[y] + if previous_y == y: + arw = pg.ArrowItem(pxMode=True, angle=90) + self.addItem(arw) + self._arrows.append(arw) + arw.setPos(x, dis_y) + display_y[i] = dis_y + display_x[i] = x + previous_y = y + self.plot_data_item.setData(x=display_x, y=display_y) + except: + logger.error('Error when displaying waveform: {}'.format(self.name), exc_info=True) + for arw in self._arrows: + self.removeItem(arw) + self.plot_data_item.setData(x=[], y=[]) + + class _WaveformView(QtWidgets.QWidget): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent=parent) From 2d8de3ed937d4a7278f866352f86ab30a64d47a3 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 26 Jan 2024 13:36:20 +0800 Subject: [PATCH 112/296] waveform: add BitVectorWaveform --- artiq/dashboard/waveform.py | 48 +++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 9897c41a2..91fed3856 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -221,6 +221,54 @@ class BitWaveform(_BaseWaveform): self.plot_data_item.setData(x=[], y=[]) +class BitVectorWaveform(_BaseWaveform): + def __init__(self, name, width, parent=None): + _BaseWaveform.__init__(self, name, width, parent) + self._labels = [] + self.x_data = [] + self.view_box.sigTransformChanged.connect(self._update_labels) + + def _update_labels(self): + for label in self._labels: + self.removeItem(label) + xmin, xmax = self.view_box.viewRange()[0] + left_label_i = bisect.bisect_left(self.x_data, xmin) + right_label_i = bisect.bisect_right(self.x_data, xmax) + 1 + for i, j in itertools.pairwise(range(left_label_i, right_label_i)): + x1 = self.x_data[i] + x2 = self.x_data[j] if j < len(self.x_data) else self._stopped_x + lbl = self._labels[i] + bounds = lbl.boundingRect() + bounds_view = self.view_box.mapSceneToView(bounds) + if bounds_view.boundingRect().width() < x2 - x1: + self.addItem(lbl) + + def onDataChange(self, data): + try: + self.x_data = zip(*data)[0] + l = len(data) + display_x = np.array(l * 2) + display_y = np.array(l * 2) + for i, coord in enumerate(data): + x, y = coord + display_x[i * 2] = x + display_x[i * 2 + 1] = x + display_y[i * 2] = 0 + display_y[i * 2 + 1] = int(int(y) != 0) + lbl = pg.TextItem( + self._format_string.format(y), anchor=(0, 0.5)) + lbl.setPos(x, 0.5) + lbl.setTextWidth(100) + self._labels.append(lbl) + self.plot_data_item.setData(x=display_x, y=display_y) + except: + logger.error( + "Error when displaying waveform: {}".format(self.name), exc_info=True) + for lbl in self._labels: + self.plot_item.removeItem(lbl) + self.plot_data_item.setData(x=[], y=[]) + + class _WaveformView(QtWidgets.QWidget): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent=parent) From cbe7ac1cfd61ce550d5223362d1caa9492b0e51c Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 26 Jan 2024 15:08:49 +0800 Subject: [PATCH 113/296] waveform: add AnalogWaveform --- artiq/dashboard/waveform.py | 47 +++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 91fed3856..bc9fe255b 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -221,6 +221,23 @@ class BitWaveform(_BaseWaveform): self.plot_data_item.setData(x=[], y=[]) +class AnalogWaveform(_BaseWaveform): + def __init__(self, name, width, parent=None): + _BaseWaveform.__init__(self, name, width, parent) + + def onDataChange(self, data): + try: + x_data, y_data = zip(*data) + self.plot_data_item.setData(x=x_data, y=y_data) + max_y = max(y_data) + min_y = min(y_data) + self.plot_item.setRange(yRange=(min_y, max_y), padding=0.1) + except: + logger.error( + 'Error when displaying waveform: {}'.format(self.name), exc_info=True) + self.plot_data_item.setData(x=[], y=[]) + + class BitVectorWaveform(_BaseWaveform): def __init__(self, name, width, parent=None): _BaseWaveform.__init__(self, name, width, parent) @@ -269,6 +286,36 @@ class BitVectorWaveform(_BaseWaveform): self.plot_data_item.setData(x=[], y=[]) +class LogWaveform(_BaseWaveform): + def __init__(self, name, width, parent=None): + _BaseWaveform.__init__(self, name, width, parent) + self.plot_data_item.opts['pen'] = None + self.plot_data_item.opts['symbol'] = 'x' + + def onDataChange(self, data): + try: + x_data = zip(*data)[0] + self.plot_data_item.setData( + x=x_data, y=np.ones(len(x_data))) + old_msg = "" + old_x = 0 + for x, msg in data: + if x == old_x: + old_msg += "\n" + msg + else: + lbl = pg.TextItem(old_msg) + self.addItem(lbl) + lbl.setPos(old_x, 1) + old_msg = msg + old_x = x + lbl = pg.TextItem(old_msg) + self.addItem(lbl) + lbl.setPos(old_x, 1) + except: + logger.error('Error when displaying waveform: {}'.format(self.name), exc_info=True) + self.plot_data_item.setData(x=[], y=[]) + + class _WaveformView(QtWidgets.QWidget): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent=parent) From d1ee0ffb83cb02713cfebbc23dc90537b3cb9e2f Mon Sep 17 00:00:00 2001 From: mwojcik Date: Wed, 31 Jan 2024 16:48:48 +0800 Subject: [PATCH 114/296] subkernel: fix passing arguments --- artiq/compiler/transforms/llvm_ir_generator.py | 2 +- artiq/firmware/ksupport/lib.rs | 2 +- artiq/firmware/libproto_artiq/kernel_proto.rs | 2 +- artiq/firmware/runtime/session.rs | 4 ++-- artiq/firmware/satman/kernel.rs | 2 ++ 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/artiq/compiler/transforms/llvm_ir_generator.py b/artiq/compiler/transforms/llvm_ir_generator.py index 4f68d27a2..a22991720 100644 --- a/artiq/compiler/transforms/llvm_ir_generator.py +++ b/artiq/compiler/transforms/llvm_ir_generator.py @@ -1442,7 +1442,7 @@ class LLVMIRGenerator: llmax = self.map(insn.operands[1]) lltagptr = self._build_subkernel_tags(insn.arg_types) return self.llbuilder.call(self.llbuiltin("subkernel_await_message"), - [ll.Constant(lli32, 0), ll.Constant(lli64, 10_000), lltagptr, llmin, llmax], + [ll.Constant(lli32, -1), ll.Constant(lli64, 10_000), lltagptr, llmin, llmax], name="subkernel.await.args") def process_Closure(self, insn): diff --git a/artiq/firmware/ksupport/lib.rs b/artiq/firmware/ksupport/lib.rs index 5c3946494..0bbb40df1 100644 --- a/artiq/firmware/ksupport/lib.rs +++ b/artiq/firmware/ksupport/lib.rs @@ -528,7 +528,7 @@ extern fn subkernel_send_message(id: u32, is_return: bool, destination: u8, } #[unwind(allowed)] -extern fn subkernel_await_message(id: u32, timeout: i64, tags: &CSlice, min: u8, max: u8) -> u8 { +extern fn subkernel_await_message(id: i32, timeout: i64, tags: &CSlice, min: u8, max: u8) -> u8 { send(&SubkernelMsgRecvRequest { id: id, timeout: timeout, tags: tags.as_ref() }); recv!(SubkernelMsgRecvReply { status, count } => { match status { diff --git a/artiq/firmware/libproto_artiq/kernel_proto.rs b/artiq/firmware/libproto_artiq/kernel_proto.rs index e50178f46..1a2f057b7 100644 --- a/artiq/firmware/libproto_artiq/kernel_proto.rs +++ b/artiq/firmware/libproto_artiq/kernel_proto.rs @@ -108,7 +108,7 @@ pub enum Message<'a> { SubkernelAwaitFinishRequest { id: u32, timeout: i64 }, SubkernelAwaitFinishReply { status: SubkernelStatus }, SubkernelMsgSend { id: u32, destination: Option, count: u8, tag: &'a [u8], data: *const *const () }, - SubkernelMsgRecvRequest { id: u32, timeout: i64, tags: &'a [u8] }, + SubkernelMsgRecvRequest { id: i32, timeout: i64, tags: &'a [u8] }, SubkernelMsgRecvReply { status: SubkernelStatus, count: u8 }, Log(fmt::Arguments<'a>), diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 28c6a92c7..8c1a7aaee 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -710,14 +710,14 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, } #[cfg(has_drtio)] &kern::SubkernelMsgRecvRequest { id, timeout, tags } => { - let message_received = subkernel::message_await(io, _subkernel_mutex, id, timeout); + let message_received = subkernel::message_await(io, _subkernel_mutex, id as u32, timeout); let (status, count) = match message_received { Ok(ref message) => (kern::SubkernelStatus::NoError, message.count), Err(SubkernelError::Timeout) => (kern::SubkernelStatus::Timeout, 0), Err(SubkernelError::IncorrectState) => (kern::SubkernelStatus::IncorrectState, 0), Err(SubkernelError::SubkernelFinished) => { let res = subkernel::retrieve_finish_status(io, aux_mutex, _subkernel_mutex, - routing_table, id)?; + routing_table, id as u32)?; if res.comm_lost { (kern::SubkernelStatus::CommLost, 0) } else if let Some(exception) = &res.exception { diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index e942aac7a..755b80d16 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -781,6 +781,8 @@ impl Manager { &kern::SubkernelMsgRecvRequest { id, timeout, tags } => { // negative timeout value means no timeout let max_time = if timeout > 0 { clock::get_ms() as i64 + timeout } else { timeout }; + // ID equal to -1 indicates wildcard for receiving arguments + let id = if id == -1 { self.current_id } else { id as u32 }; self.session.kernel_state = KernelState::MsgAwait { id: id, max_time: max_time, tags: tags.to_vec() }; Ok(()) From 502204cab22b9331af0b18e6c7fd2eb95e3aef45 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Wed, 31 Jan 2024 16:49:43 +0800 Subject: [PATCH 115/296] subkernel: fix DMA return control to wrong master --- artiq/firmware/satman/dma.rs | 13 ++++++------- artiq/firmware/satman/main.rs | 11 +++++++++++ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/artiq/firmware/satman/dma.rs b/artiq/firmware/satman/dma.rs index 4fc7a1393..97aea721d 100644 --- a/artiq/firmware/satman/dma.rs +++ b/artiq/firmware/satman/dma.rs @@ -5,11 +5,11 @@ use board_misoc::{csr, cache::flush_l2_cache}; use proto_artiq::drtioaux_proto::PayloadStatus; use routing::{Router, Sliceable}; use kernel::Manager as KernelManager; -use ::{cricon_select, RtioMaster, MASTER_PAYLOAD_MAX_SIZE}; +use ::{cricon_select, cricon_read, RtioMaster, MASTER_PAYLOAD_MAX_SIZE}; const ALIGNMENT: usize = 64; -#[derive(Debug, PartialEq)] +#[derive(PartialEq)] enum ManagerState { Idle, Playback @@ -32,7 +32,6 @@ pub enum Error { UploadFail, } -#[derive(Debug)] struct Entry { trace: Vec, padding_len: usize, @@ -75,7 +74,6 @@ impl Entry { } } -#[derive(Debug)] enum RemoteTraceState { Unsent, Sending(usize), @@ -83,7 +81,6 @@ enum RemoteTraceState { Running(usize), } -#[derive(Debug)] struct RemoteTraces { remote_traces: BTreeMap, state: RemoteTraceState, @@ -177,12 +174,12 @@ impl RemoteTraces { } } -#[derive(Debug)] pub struct Manager { entries: BTreeMap<(u8, u32), Entry>, state: ManagerState, current_id: u32, current_source: u8, + previous_cri_master: RtioMaster, remote_entries: BTreeMap, name_map: BTreeMap, @@ -201,6 +198,7 @@ impl Manager { entries: BTreeMap::new(), current_id: 0, current_source: 0, + previous_cri_master: RtioMaster::Drtio, state: ManagerState::Idle, remote_entries: BTreeMap::new(), name_map: BTreeMap::new(), @@ -401,6 +399,7 @@ impl Manager { self.state = ManagerState::Playback; self.current_id = id; self.current_source = source; + self.previous_cri_master = cricon_read(); unsafe { csr::rtio_dma::base_address_write(ptr as u64); @@ -424,7 +423,7 @@ impl Manager { } else { self.state = ManagerState::Idle; unsafe { - cricon_select(RtioMaster::Drtio); + cricon_select(self.previous_cri_master); let error = csr::rtio_dma::error_read(); let channel = csr::rtio_dma::error_channel_read(); let timestamp = csr::rtio_dma::error_timestamp_read(); diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 2105b4c26..1b9a4c53f 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -72,6 +72,7 @@ fn drtiosat_async_ready() { } } +#[derive(Clone, Copy)] pub enum RtioMaster { Drtio, Dma, @@ -89,6 +90,16 @@ pub fn cricon_select(master: RtioMaster) { } } +pub fn cricon_read() -> RtioMaster { + let val = unsafe { csr::cri_con::selected_read() }; + match val { + 0 => RtioMaster::Drtio, + 1 => RtioMaster::Dma, + 2 => RtioMaster::Kernel, + _ => unreachable!() + } +} + #[cfg(has_drtio_routing)] macro_rules! forward { ($routing_table:expr, $destination:expr, $rank:expr, $repeaters:expr, $packet:expr) => {{ From 849b77fbf2f1bf1fa353b8be30c0a925f9fc8ea2 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Wed, 31 Jan 2024 17:03:03 +0800 Subject: [PATCH 116/296] compiler: fix send_message after stackrestore --- artiq/compiler/transforms/llvm_ir_generator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/artiq/compiler/transforms/llvm_ir_generator.py b/artiq/compiler/transforms/llvm_ir_generator.py index a22991720..08b71021c 100644 --- a/artiq/compiler/transforms/llvm_ir_generator.py +++ b/artiq/compiler/transforms/llvm_ir_generator.py @@ -1717,9 +1717,9 @@ class LLVMIRGenerator: llargcount = ll.Constant(lli8, len(args)) llisreturn = ll.Constant(lli1, False) - self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) - return self.llbuilder.call(self.llbuiltin("subkernel_send_message"), + self.llbuilder.call(self.llbuiltin("subkernel_send_message"), [llid, llisreturn, lldest, llargcount, lltagptr, llargs]) + return self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) def _build_subkernel_return(self, insn): # builds a remote return. From 7fee68ede06553a13c3a4aa1f674020b5960681f Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 1 Feb 2024 11:09:59 +0800 Subject: [PATCH 117/296] subkernel messages: check for send/recv pairs --- artiq/compiler/embedding.py | 23 ++++++++++++++++--- .../compiler/transforms/artiq_ir_generator.py | 4 ++-- artiq/coredevice/core.py | 23 +++++++++++++++++-- 3 files changed, 43 insertions(+), 7 deletions(-) diff --git a/artiq/compiler/embedding.py b/artiq/compiler/embedding.py index c46c69da3..380656e44 100644 --- a/artiq/compiler/embedding.py +++ b/artiq/compiler/embedding.py @@ -51,6 +51,8 @@ class SubkernelMessageType: def __init__(self, name, value_type): self.name = name self.value_type = value_type + self.send_loc = None + self.recv_loc = None class EmbeddingMap: def __init__(self, old_embedding_map=None): @@ -219,20 +221,35 @@ class EmbeddingMap: subkernels[k] = v return subkernels - def store_subkernel_message(self, name, value_type): + def store_subkernel_message(self, name, value_type, function_type, function_loc): if name in self.subkernel_message_map: msg_id = self.subkernel_message_map[name] else: msg_id = self.store_object(SubkernelMessageType(name, value_type)) self.subkernel_message_map[name] = msg_id - return msg_id, self.retrieve_object(msg_id) + subkernel_msg = self.retrieve_object(msg_id) + if function_type == "send": + subkernel_msg.send_loc = function_loc + elif function_type == "recv": + subkernel_msg.recv_loc = function_loc + else: + assert False + return msg_id, subkernel_msg def subkernel_messages(self): messages = {} - for name, msg_id in self.subkernel_message_map.items(): + for msg_id in self.subkernel_message_map.values(): messages[msg_id] = self.retrieve_object(msg_id) return messages + def subkernel_messages_unpaired(self): + unpaired = [] + for msg_id in self.subkernel_message_map.values(): + msg_obj = self.retrieve_object(msg_id) + if msg_obj.send_loc is None or msg_obj.recv_loc is None: + unpaired.append(msg_obj) + return unpaired + def has_rpc(self): return any(filter( lambda x: (inspect.isfunction(x) or inspect.ismethod(x)) and \ diff --git a/artiq/compiler/transforms/artiq_ir_generator.py b/artiq/compiler/transforms/artiq_ir_generator.py index 6a929702f..92345caae 100644 --- a/artiq/compiler/transforms/artiq_ir_generator.py +++ b/artiq/compiler/transforms/artiq_ir_generator.py @@ -2566,7 +2566,7 @@ class ARTIQIRGenerator(algorithm.Visitor): value = self.visit(node.args[2]) else: assert False - msg_id, msg = self.embedding_map.store_subkernel_message(name, value.type) + msg_id, msg = self.embedding_map.store_subkernel_message(name, value.type, "send", node.loc) msg_id = ir.Constant(msg_id, builtins.TInt32()) if value.type != msg.value_type: diag = diagnostic.Diagnostic("error", @@ -2586,7 +2586,7 @@ class ARTIQIRGenerator(algorithm.Visitor): timeout = self.visit(node.args[2]) else: assert False - msg_id, msg = self.embedding_map.store_subkernel_message(name, vartype) + msg_id, msg = self.embedding_map.store_subkernel_message(name, vartype, "recv", node.loc) msg_id = ir.Constant(msg_id, builtins.TInt32()) if vartype != msg.value_type: diag = diagnostic.Diagnostic("error", diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index 4d3ed36b5..d92351d57 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -195,15 +195,34 @@ class Core: for sid, subkernel_fn in subkernels.items(): if sid in subkernels_compiled: continue - destination, kernel_library, sub_embedding_map = \ + destination, kernel_library, embedding_map = \ self.compile_subkernel(sid, subkernel_fn, embedding_map, args, subkernel_arg_types, subkernels) self.comm.upload_subkernel(kernel_library, sid, destination) - new_subkernels.update(sub_embedding_map.subkernels()) + new_subkernels.update(embedding_map.subkernels()) subkernels_compiled.append(sid) if new_subkernels == subkernels: break subkernels.update(new_subkernels) + # check for messages without a send/recv pair + unpaired_messages = embedding_map.subkernel_messages_unpaired() + if unpaired_messages: + for unpaired_message in unpaired_messages: + engine = _DiagnosticEngine(all_errors_are_fatal=False) + # errors are non-fatal in order to display + # all unpaired message errors before raising an excption + if unpaired_message.send_loc is None: + diag = diagnostic.Diagnostic("error", + "subkernel message '{name}' only has a receiver but no sender", + {"name": unpaired_message.name}, + unpaired_message.recv_loc) + else: + diag = diagnostic.Diagnostic("error", + "subkernel message '{name}' only has a sender but no receiver", + {"name": unpaired_message.name}, + unpaired_message.send_loc) + engine.process(diag) + raise ValueError("Found subkernel message(s) without a full send/recv pair") def precompile(self, function, *args, **kwargs): From 392533f8eefdce44deae08f6e2401838b53dc5a5 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 1 Feb 2024 11:12:11 +0800 Subject: [PATCH 118/296] subkernel lit tests: fix timeouts to no-timeouts --- artiq/test/lit/embedding/subkernel_message_recv.py | 2 +- artiq/test/lit/embedding/subkernel_return.py | 4 ++-- artiq/test/lit/embedding/subkernel_return_none.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/artiq/test/lit/embedding/subkernel_message_recv.py b/artiq/test/lit/embedding/subkernel_message_recv.py index 2331001e7..35e094aa6 100644 --- a/artiq/test/lit/embedding/subkernel_message_recv.py +++ b/artiq/test/lit/embedding/subkernel_message_recv.py @@ -9,7 +9,7 @@ def entrypoint(): # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. message_pass() # CHECK-NOT: call void @subkernel_send_message\(i32 ., i1 false, i8 1, i8 1, .*\), !dbg !. - # CHECK: call i8 @subkernel_await_message\(i32 2, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !. + # CHECK: call i8 @subkernel_await_message\(i32 2, i64 -1, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !. subkernel_recv("message", TInt32) diff --git a/artiq/test/lit/embedding/subkernel_return.py b/artiq/test/lit/embedding/subkernel_return.py index 8fcf023d6..3c9d1169a 100644 --- a/artiq/test/lit/embedding/subkernel_return.py +++ b/artiq/test/lit/embedding/subkernel_return.py @@ -9,8 +9,8 @@ def entrypoint(): # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. returning() - # CHECK: call i8 @subkernel_await_message\(i32 1, i64 10000, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !. - # CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !. + # CHECK: call i8 @subkernel_await_message\(i32 1, i64 -1, { i8\*, i32 }\* nonnull .*, i8 1, i8 1\), !dbg !. + # CHECK: call void @subkernel_await_finish\(i32 1, i64 -1\), !dbg !. subkernel_await(returning) # CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr diff --git a/artiq/test/lit/embedding/subkernel_return_none.py b/artiq/test/lit/embedding/subkernel_return_none.py index d1636220c..a7795f785 100644 --- a/artiq/test/lit/embedding/subkernel_return_none.py +++ b/artiq/test/lit/embedding/subkernel_return_none.py @@ -9,8 +9,8 @@ def entrypoint(): # CHECK: call void @subkernel_load_run\(i32 1, i8 1, i1 true\), !dbg !. # CHECK-NOT: call void @subkernel_send_message\(.*\), !dbg !. returning_none() - # CHECK: call void @subkernel_await_finish\(i32 1, i64 10000\), !dbg !. - # CHECK-NOT: call i8 @subkernel_await_message\(i32 1, i64 10000\, .*\), !dbg !. + # CHECK: call void @subkernel_await_finish\(i32 1, i64 -1\), !dbg !. + # CHECK-NOT: call i8 @subkernel_await_message\(i32 1, i64 -1\, .*\), !dbg !. subkernel_await(returning_none) # CHECK-L: declare void @subkernel_load_run(i32, i8, i1) local_unnamed_addr From b64c75fd71af13c80f133d653bc847a199b326fe Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 1 Feb 2024 11:27:41 +0800 Subject: [PATCH 119/296] subkernel: warn on kernel finish w/ pending msgs --- artiq/firmware/satman/kernel.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index 755b80d16..250f6a263 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -267,6 +267,14 @@ impl MessageManager { } None } + + pub fn pending_ids(&self) -> Vec { + let mut pending_ids: Vec = Vec::new(); + for msg in self.in_queue.iter() { + pending_ids.push(msg.id); + } + pending_ids + } } impl Session { @@ -487,7 +495,6 @@ impl Manager { self.stop(); self.runtime_exception(Error::DmaError(DmaError::UploadFail)); } - } } @@ -501,6 +508,10 @@ impl Manager { if let Some(subkernel_finished) = self.last_finished.take() { info!("subkernel {} finished, with exception: {}", subkernel_finished.id, subkernel_finished.with_exception); + let pending = self.session.messages.pending_ids(); + if pending.len() > 0 { + warn!("subkernel terminated with messages still pending: {:?}", pending); + } router.route(drtioaux::Packet::SubkernelFinished { destination: subkernel_finished.source, id: subkernel_finished.id, with_exception: subkernel_finished.with_exception, exception_src: subkernel_finished.exception_source From b648a2930bbb9515b2b9f9563f2b03d4cd9dd4d9 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 1 Feb 2024 12:08:41 +0800 Subject: [PATCH 120/296] docs: elaborate on subkernel message names --- doc/manual/getting_started_core.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/manual/getting_started_core.rst b/doc/manual/getting_started_core.rst index a0a9e762c..1ffaccb62 100644 --- a/doc/manual/getting_started_core.rst +++ b/doc/manual/getting_started_core.rst @@ -373,4 +373,8 @@ Subkernels besides arguments and returns, can also pass messages between each ot The ``subkernel_send(destination, name, value)`` function requires three arguments: destination, name of the message that will be linked with the ``subkernel_recv()``, and the passed value. -The ``subkernel_recv(name, type, [timeout])`` function requires two arguments: message name (matching the name provided in ``subkernel_send``) and expected type. Optionally, it accepts a third argument - timeout for the operation in milliseconds. If the value is negative, timeout is disabled. The default value is no timeout. The value and declared types between the function pair with the same name must match. \ No newline at end of file +The ``subkernel_recv(name, type, [timeout])`` function requires two arguments: message name (matching the name provided in ``subkernel_send``) and expected type. Optionally, it accepts a third argument - timeout for the operation in milliseconds. If the value is negative, timeout is disabled. The default value is no timeout. + +The "name" argument in both ``send`` and ``recv`` functions acts as a link, and must match exactly between the two for a successful message transaction. The type of the value sent by ``subkernel_send`` is checked against the type declared in ``subkernel_recv`` with the same name, to avoid misinterpretation of the data. The compiler also checks if all subkernel message names have both a sending and receiving functions to help with typos. However, it cannot help if wrong names are used - the receiver will wait only for a matching message for the duration of the timeout. + +A message can be received only when a subkernel is running, and is put into a buffer to be taken when required - thus whatever sending order will not cause a deadlock. However, a subkernel may timeout or wait forever, if destination or names do not match (e.g. message sent to wrong destination, or under different than expected name even if types match). \ No newline at end of file From 3aaa7e04f26a495e8847e47424bfc16d76d82bf8 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Thu, 1 Feb 2024 18:58:27 +0800 Subject: [PATCH 121/296] flake: update dependencies --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 9a3245079..8487a63cb 100644 --- a/flake.lock +++ b/flake.lock @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1704874635, - "narHash": "sha256-YWuCrtsty5vVZvu+7BchAxmcYzTMfolSPP5io8+WYCg=", + "lastModified": 1706515015, + "narHash": "sha256-eFfY5A7wlYy3jD/75lx6IJRueg4noE+jowl0a8lIlVo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "3dc440faeee9e889fe2d1b4d25ad0f430d449356", + "rev": "f4a8d6d5324c327dcc2d863eb7f3cc06ad630df4", "type": "github" }, "original": { From a7b045a4786423c5b30ce0fb3fc0133fd393f0ea Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 1 Feb 2024 13:47:01 +0800 Subject: [PATCH 122/296] waveform: misc fixes --- artiq/dashboard/waveform.py | 46 ++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index bc9fe255b..a9c80b2a8 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -1,11 +1,15 @@ import os import asyncio import logging +import bisect +import itertools +import math from PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore import Qt import pyqtgraph as pg +import numpy as np from sipyco.sync_struct import Subscriber from sipyco.pc_rpc import AsyncioClient @@ -193,6 +197,9 @@ class BitWaveform(_BaseWaveform): def onDataChange(self, data): try: + for arw in self._arrows: + self.removeItem(arw) + self._arrows = [] l = len(data) display_y = np.empty(l) display_x = np.empty(l) @@ -243,6 +250,7 @@ class BitVectorWaveform(_BaseWaveform): _BaseWaveform.__init__(self, name, width, parent) self._labels = [] self.x_data = [] + self._format_string = "{:0=" + str(math.ceil(width / 4)) + "X}" self.view_box.sigTransformChanged.connect(self._update_labels) def _update_labels(self): @@ -253,7 +261,7 @@ class BitVectorWaveform(_BaseWaveform): right_label_i = bisect.bisect_right(self.x_data, xmax) + 1 for i, j in itertools.pairwise(range(left_label_i, right_label_i)): x1 = self.x_data[i] - x2 = self.x_data[j] if j < len(self.x_data) else self._stopped_x + x2 = self.x_data[j] if j < len(self.x_data) else self.stopped_x lbl = self._labels[i] bounds = lbl.boundingRect() bounds_view = self.view_box.mapSceneToView(bounds) @@ -262,10 +270,13 @@ class BitVectorWaveform(_BaseWaveform): def onDataChange(self, data): try: - self.x_data = zip(*data)[0] + for lbl in self._labels: + self.plot_item.removeItem(lbl) + self._labels = [] + self.x_data, _ = zip(*data) l = len(data) - display_x = np.array(l * 2) - display_y = np.array(l * 2) + display_x = np.empty(l * 2) + display_y = np.empty(l * 2) for i, coord in enumerate(data): x, y = coord display_x[i * 2] = x @@ -273,7 +284,7 @@ class BitVectorWaveform(_BaseWaveform): display_y[i * 2] = 0 display_y[i * 2 + 1] = int(int(y) != 0) lbl = pg.TextItem( - self._format_string.format(y), anchor=(0, 0.5)) + self._format_string.format(int(y, 2)), anchor=(0, 0.5)) lbl.setPos(x, 0.5) lbl.setTextWidth(100) self._labels.append(lbl) @@ -291,10 +302,14 @@ class LogWaveform(_BaseWaveform): _BaseWaveform.__init__(self, name, width, parent) self.plot_data_item.opts['pen'] = None self.plot_data_item.opts['symbol'] = 'x' + self._labels = [] def onDataChange(self, data): try: - x_data = zip(*data)[0] + for lbl in self._labels: + self.plot_item.removeItem(lbl) + self._labels = [] + x_data, _ = zip(*data) self.plot_data_item.setData( x=x_data, y=np.ones(len(x_data))) old_msg = "" @@ -305,14 +320,18 @@ class LogWaveform(_BaseWaveform): else: lbl = pg.TextItem(old_msg) self.addItem(lbl) + self._labels.append(lbl) lbl.setPos(old_x, 1) old_msg = msg old_x = x lbl = pg.TextItem(old_msg) self.addItem(lbl) + self._labels.append(lbl) lbl.setPos(old_x, 1) except: logger.error('Error when displaying waveform: {}'.format(self.name), exc_info=True) + for lbl in self._labels: + self.plot_item.removeItem(lbl) self.plot_data_item.setData(x=[], y=[]) @@ -320,6 +339,9 @@ class _WaveformView(QtWidgets.QWidget): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent=parent) + self._stopped_x = None + self._timescale = 1 + layout = QtWidgets.QVBoxLayout() layout.setContentsMargins(0, 0, 0, 0) layout.setSpacing(0) @@ -346,6 +368,7 @@ class _WaveformView(QtWidgets.QWidget): scroll_area.setWidgetResizable(True) scroll_area.setContentsMargins(0, 0, 0, 0) scroll_area.setFrameShape(QtWidgets.QFrame.NoFrame) + scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) layout.addWidget(scroll_area) self._splitter = VDragDropSplitter(parent=scroll_area) @@ -368,6 +391,8 @@ class _WaveformView(QtWidgets.QWidget): def setStoppedX(self, stopped_x): self._stopped_x = stopped_x + self._ref_vb.setLimits(xMax=stopped_x) + self._ref_vb.setRange(xRange=(0, stopped_x)) for i in range(self._model.rowCount()): self._splitter.widget(i).setStoppedX(stopped_x) @@ -446,10 +471,9 @@ class _WaveformModel(QtCore.QAbstractTableModel): data_col = self.headers.index("data") for i in range(top, bottom): name = self.data(self.index(i, name_col)) - if name in waveform_data: - self.backing_struct[i][data_col] = waveform_data[name] - self.dataChanged.emit(self.index(i, data_col), - self.index(i, data_col)) + self.backing_struct[i][data_col] = waveform_data.get(name, []) + self.dataChanged.emit(self.index(i, data_col), + self.index(i, data_col)) def update_all(self, waveform_data): self.update_data(waveform_data, 0, self.rowCount()) @@ -509,7 +533,7 @@ class _AddChannelDialog(QtWidgets.QDialog): key = self._model.index_to_key(select) if key is not None: width, ty = self._model[key].ref - channels.append([key, width, ty, []]) + channels.append([key, ty, width, []]) self.accepted.emit(channels) self.close() From dcf1bba8c63c701b2a7b52f95107cd140a67de6f Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 31 Jan 2024 15:00:54 +0800 Subject: [PATCH 123/296] waveform: implement _create_waveform --- artiq/dashboard/waveform.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index a9c80b2a8..256de1504 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -421,7 +421,20 @@ class _WaveformView(QtWidgets.QWidget): self._splitter.insertWidget(dest_row, w) def _create_waveform(self, row): - raise NotImplementedError + name = self._model.data(self._model.index(row, 0)) + ty = self._model.data(self._model.index(row, 1)) + width = self._model.data(self._model.index(row, 2)) + waveform_cls = { + WaveformType.BIT: BitWaveform, + WaveformType.VECTOR: BitVectorWaveform, + WaveformType.ANALOG: AnalogWaveform, + WaveformType.LOG: LogWaveform + }[ty] + w = waveform_cls(name, width, parent=self._splitter) + w.setXLink(self._ref_vb) + w.setStoppedX(self._stopped_x) + w.setTimescale(self._timescale) + return w def _resize(self): self._splitter.setFixedHeight( From 1e9070a2af8d551d88ade36e4aee04975ba8ed2e Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 1 Feb 2024 17:43:47 +0800 Subject: [PATCH 124/296] testing: add coreanalyzer_proxy smoke test --- artiq/test/test_frontends.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/test/test_frontends.py b/artiq/test/test_frontends.py index caef4839c..3dc1a6b87 100644 --- a/artiq/test/test_frontends.py +++ b/artiq/test/test_frontends.py @@ -9,7 +9,7 @@ class TestFrontends(unittest.TestCase): """Test --help as a simple smoke test against catastrophic breakage.""" commands = { "aqctl": [ - "corelog", "moninj_proxy" + "corelog", "moninj_proxy", "coreanalyzer_proxy" ], "artiq": [ "client", "compile", "coreanalyzer", "coremgmt", From 8b503c3b4f1ed9adff03c1eab6de495a3c5f39a1 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 1 Feb 2024 17:15:23 +0800 Subject: [PATCH 125/296] waveform: add remove, clear waveform actions --- artiq/dashboard/waveform.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 256de1504..b309ee229 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -434,8 +434,18 @@ class _WaveformView(QtWidgets.QWidget): w.setXLink(self._ref_vb) w.setStoppedX(self._stopped_x) w.setTimescale(self._timescale) + action = QtWidgets.QAction("Delete waveform", w) + action.triggered.connect(lambda: self._delete_waveform(w)) + w.addAction(action) + action = QtWidgets.QAction("Clear waveforms", w) + action.triggered.connect(self._model.clear) + w.addAction(action) return w + def _delete_waveform(self, waveform): + row = self._splitter.indexOf(waveform) + self._model.pop(row) + def _resize(self): self._splitter.setFixedHeight( int((WAVEFORM_MIN_HEIGHT + WAVEFORM_MAX_HEIGHT) * self._model.rowCount() / 2)) @@ -479,6 +489,11 @@ class _WaveformModel(QtCore.QAbstractTableModel): self.backing_struct.insert(dest, self.backing_struct.pop(src)) self.endMoveRows() + def clear(self): + self.beginRemoveRows(QtCore.QModelIndex(), 0, len(self.backing_struct) - 1) + self.backing_struct.clear() + self.endRemoveRows() + def update_data(self, waveform_data, top, bottom): name_col = self.headers.index("name") data_col = self.headers.index("data") From 40cea3028567ed3dce82fad4ff5b4824f6723e3e Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 2 Feb 2024 11:57:38 +0800 Subject: [PATCH 126/296] waveform: add open, save channel list --- artiq/dashboard/waveform.py | 44 +++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index b309ee229..8fef900c0 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -13,6 +13,7 @@ import numpy as np from sipyco.sync_struct import Subscriber from sipyco.pc_rpc import AsyncioClient +from sipyco import pyon from artiq.tools import exc_to_warning from artiq.coredevice import comm_analyzer @@ -494,6 +495,14 @@ class _WaveformModel(QtCore.QAbstractTableModel): self.backing_struct.clear() self.endRemoveRows() + def export_list(self): + return [[row[0], row[1].value, row[2]] for row in self.backing_struct] + + def import_list(self, channel_list): + self.clear() + data = [[row[0], WaveformType(row[1]), row[2], []] for row in channel_list] + self.extend(data) + def update_data(self, waveform_data, top, bottom): name_col = self.headers.index("name") data_col = self.headers.index("data") @@ -624,6 +633,8 @@ class WaveformDock(QtWidgets.QDockWidget): self._add_async_action("Open trace...", self.load_trace) self._add_async_action("Save trace...", self.save_trace) self._add_async_action("Save trace as VCD...", self.save_vcd) + self._add_async_action("Open channel list...", self.load_channels) + self._add_async_action("Save channel list...", self.save_channels) self._menu_btn.setMenu(self._file_menu) self._waveform_view = _WaveformView(self) @@ -722,6 +733,39 @@ class WaveformDock(QtWidgets.QDockWidget): except: logger.error("Failed to save trace as VCD", exc_info=True) + async def load_channels(self): + try: + filename = await get_open_file_name( + self, + "Open channel list", + self._current_dir, + "PYON files (*.pyon);;All files (*.*)") + except asyncio.CancelledError: + return + self._current_dir = os.path.dirname(filename) + try: + channel_list = pyon.load_file(filename) + self._waveform_model.import_list(channel_list) + self._waveform_model.update_all(self._waveform_data['data']) + except: + logger.error("Failed to open channel list", exc_info=True) + + async def save_channels(self): + try: + filename = await get_save_file_name( + self, + "Save channel list", + self._current_dir, + "PYON files (*.pyon);;All files (*.*)") + except asyncio.CancelledError: + return + self._current_dir = os.path.dirname(filename) + try: + channel_list = self._waveform_model.export_list() + pyon.store_file(filename, channel_list) + except: + logger.error("Failed to save channel list", exc_info=True) + def _process_ddb(self): channel_list = comm_analyzer.get_channel_list(self._ddb) self._channel_model.clear() From 7f43c5c31a27c32b633c0d7f9088ae364be4b48f Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 5 Feb 2024 11:43:32 +0800 Subject: [PATCH 127/296] waveform: add cursor --- artiq/dashboard/waveform.py | 124 +++++++++++++++++++++++++++++++++--- 1 file changed, 115 insertions(+), 9 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 8fef900c0..831efbac6 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -120,6 +120,8 @@ class _BackgroundItem(pg.GraphicsWidgetAnchor, pg.GraphicsWidget): class _BaseWaveform(pg.PlotWidget): + cursorMove = QtCore.pyqtSignal(float) + def __init__(self, name, width, parent=None, pen="r", stepMode="right", connect="finite"): pg.PlotWidget.__init__(self, parent=parent, @@ -136,6 +138,8 @@ class _BaseWaveform(pg.PlotWidget): self.name = name self.width = width + self.x_data = [] + self.y_data = [] self.plot_item = self.getPlotItem() self.plot_item.hideButtons() @@ -160,9 +164,19 @@ class _BaseWaveform(pg.PlotWidget): rect = self.title_label.boundingRect() rect.setHeight(rect.height() * 2) + rect.setWidth(225) self.label_bg = _BackgroundItem(parent=self.plot_item, rect=rect) self.label_bg.anchor(itemPos=(0, 0), parentPos=(0, 0), offset=(0, 0)) + self.cursor = pg.InfiniteLine() + self.cursor_y = None + self.addItem(self.cursor) + + self.cursor_label = pg.LabelItem('', parent=self.plot_item) + self.cursor_label.anchor(itemPos=(0, 0), parentPos=(0, 0), offset=(0, 20)) + self.cursor_label.setAttr('justify', 'left') + self.cursor_label.setZValue(10) + def setStoppedX(self, stopped_x): self.stopped_x = stopped_x self.view_box.setLimits(xMax=stopped_x) @@ -171,7 +185,20 @@ class _BaseWaveform(pg.PlotWidget): self.timescale = timescale def onDataChange(self, data): - raise NotImplementedError + try: + self.x_data, self.y_data = zip(*data) + except: + logger.error("Error getting data for waveform: {}".format(self.name), exc_info=True) + + def onCursorMove(self, x): + self.cursor.setValue(x) + if len(self.x_data) < 1: + return + ind = bisect.bisect_left(self.x_data, x) - 1 + dr = self.plot_data_item.dataRect() + self.cursor_y = None + if dr is not None and 0 <= ind < len(self.y_data): + self.cursor_y = self.y_data[ind] def mouseMoveEvent(self, e): if e.buttons() == QtCore.Qt.LeftButton \ @@ -190,6 +217,10 @@ class _BaseWaveform(pg.PlotWidget): if e.modifiers() & QtCore.Qt.ControlModifier: super().wheelEvent(e) + def mouseDoubleClickEvent(self, e): + pos = self.view_box.mapSceneToView(e.pos()) + self.cursorMove.emit(pos.x()) + class BitWaveform(_BaseWaveform): def __init__(self, name, width, parent=None): @@ -197,6 +228,7 @@ class BitWaveform(_BaseWaveform): self._arrows = [] def onDataChange(self, data): + _BaseWaveform.onDataChange(self, data) try: for arw in self._arrows: self.removeItem(arw) @@ -228,29 +260,36 @@ class BitWaveform(_BaseWaveform): self.removeItem(arw) self.plot_data_item.setData(x=[], y=[]) + def onCursorMove(self, x): + _BaseWaveform.onCursorMove(self, x) + self.cursor_label.setText(self.cursor_y) + class AnalogWaveform(_BaseWaveform): def __init__(self, name, width, parent=None): _BaseWaveform.__init__(self, name, width, parent) def onDataChange(self, data): + _BaseWaveform.onDataChange(self, data) try: - x_data, y_data = zip(*data) - self.plot_data_item.setData(x=x_data, y=y_data) - max_y = max(y_data) - min_y = min(y_data) + self.plot_data_item.setData(x=self.x_data, y=self.y_data) + max_y = max(self.y_data) + min_y = min(self.y_data) self.plot_item.setRange(yRange=(min_y, max_y), padding=0.1) except: logger.error( 'Error when displaying waveform: {}'.format(self.name), exc_info=True) self.plot_data_item.setData(x=[], y=[]) + def onCursorMove(self, x): + _BaseWaveform.onCursorMove(self, x) + self.cursor_label.setText(self.cursor_y) + class BitVectorWaveform(_BaseWaveform): def __init__(self, name, width, parent=None): _BaseWaveform.__init__(self, name, width, parent) self._labels = [] - self.x_data = [] self._format_string = "{:0=" + str(math.ceil(width / 4)) + "X}" self.view_box.sigTransformChanged.connect(self._update_labels) @@ -270,11 +309,11 @@ class BitVectorWaveform(_BaseWaveform): self.addItem(lbl) def onDataChange(self, data): + _BaseWaveform.onDataChange(self, data) try: for lbl in self._labels: self.plot_item.removeItem(lbl) self._labels = [] - self.x_data, _ = zip(*data) l = len(data) display_x = np.empty(l * 2) display_y = np.empty(l * 2) @@ -297,6 +336,13 @@ class BitVectorWaveform(_BaseWaveform): self.plot_item.removeItem(lbl) self.plot_data_item.setData(x=[], y=[]) + def onCursorMove(self, x): + _BaseWaveform.onCursorMove(self, x) + t = None + if self.cursor_y is not None: + t = self._format_string.format(int(self.cursor_y, 2)) + self.cursor_label.setText(t) + class LogWaveform(_BaseWaveform): def __init__(self, name, width, parent=None): @@ -306,13 +352,13 @@ class LogWaveform(_BaseWaveform): self._labels = [] def onDataChange(self, data): + _BaseWaveform.onDataChange(self, data) try: for lbl in self._labels: self.plot_item.removeItem(lbl) self._labels = [] - x_data, _ = zip(*data) self.plot_data_item.setData( - x=x_data, y=np.ones(len(x_data))) + x=self.x_data, y=np.ones(len(self.x_data))) old_msg = "" old_x = 0 for x, msg in data: @@ -335,13 +381,19 @@ class LogWaveform(_BaseWaveform): self.plot_item.removeItem(lbl) self.plot_data_item.setData(x=[], y=[]) + def onCursorMove(self, x): + _BaseWaveform.onCursorMove(self, x) + class _WaveformView(QtWidgets.QWidget): + cursorMove = QtCore.pyqtSignal(float) + def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent=parent) self._stopped_x = None self._timescale = 1 + self._cursor_x = 0 layout = QtWidgets.QVBoxLayout() layout.setContentsMargins(0, 0, 0, 0) @@ -376,6 +428,8 @@ class _WaveformView(QtWidgets.QWidget): self._splitter.setHandleWidth(1) scroll_area.setWidget(self._splitter) + self.cursorMove.connect(self.onCursorMove) + def setModel(self, model): self._model = model self._model.dataChanged.connect(self.onDataChange) @@ -421,6 +475,11 @@ class _WaveformView(QtWidgets.QWidget): w = self._splitter.widget(src_start) self._splitter.insertWidget(dest_row, w) + def onCursorMove(self, x): + self._cursor_x = x + for i in range(self._model.rowCount()): + self._splitter.widget(i).onCursorMove(x) + def _create_waveform(self, row): name = self._model.data(self._model.index(row, 0)) ty = self._model.data(self._model.index(row, 1)) @@ -435,6 +494,8 @@ class _WaveformView(QtWidgets.QWidget): w.setXLink(self._ref_vb) w.setStoppedX(self._stopped_x) w.setTimescale(self._timescale) + w.cursorMove.connect(self.cursorMove) + w.onCursorMove(self._cursor_x) action = QtWidgets.QAction("Delete waveform", w) action.triggered.connect(lambda: self._delete_waveform(w)) w.addAction(action) @@ -516,6 +577,45 @@ class _WaveformModel(QtCore.QAbstractTableModel): self.update_data(waveform_data, 0, self.rowCount()) +class _CursorTimeControl(QtWidgets.QLineEdit): + submit = QtCore.pyqtSignal(float) + + def __init__(self, parent): + QtWidgets.QLineEdit.__init__(self, parent=parent) + self._text = "" + self._value = 0 + self._timescale = 1 + self.setDisplayValue(0) + self.textChanged.connect(self._onTextChange) + self.returnPressed.connect(self._onReturnPress) + + def setTimescale(self, timescale): + self._timescale = timescale + + def _onTextChange(self, text): + self._text = text + + def setDisplayValue(self, value): + self._value = value + self._text = pg.siFormat(value * 1e-12 * self._timescale, + suffix="s", + allowUnicode=False, + precision=15) + self.setText(self._text) + + def _setValueFromText(self, text): + try: + self._value = pg.siEval(text) * (1e12 / self._timescale) + except: + logger.error("Error when parsing cursor time input", exc_info=True) + + def _onReturnPress(self): + self._setValueFromText(self._text) + self.setDisplayValue(self._value) + self.submit.emit(self._value) + self.clearFocus() + + class Model(DictSyncTreeSepModel): def __init__(self, init): DictSyncTreeSepModel.__init__(self, "/", ["Channels"], init) @@ -641,6 +741,11 @@ class WaveformDock(QtWidgets.QDockWidget): self._waveform_view.setModel(self._waveform_model) grid.addWidget(self._waveform_view, 1, 0, colspan=12) + self._cursor_control = _CursorTimeControl(self) + self._waveform_view.cursorMove.connect(self._cursor_control.setDisplayValue) + self._cursor_control.submit.connect(self._waveform_view.onCursorMove) + grid.addWidget(self._cursor_control, 0, 3, colspan=6) + def _add_async_action(self, label, coro): action = QtWidgets.QAction(label, self) action.triggered.connect( @@ -674,6 +779,7 @@ class WaveformDock(QtWidgets.QDockWidget): self._waveform_model.update_all(self._waveform_data['data']) self._waveform_view.setStoppedX(self._waveform_data['stopped_x']) self._waveform_view.setTimescale(self._waveform_data['timescale']) + self._cursor_control.setTimescale(self._waveform_data['timescale']) async def load_trace(self): try: From 7c583b9c04fce80c695774a307e7ac6797426b98 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Fri, 9 Feb 2024 14:04:16 +0800 Subject: [PATCH 128/296] flake: update dependencies --- flake.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index 8487a63cb..d42b650b2 100644 --- a/flake.lock +++ b/flake.lock @@ -11,11 +11,11 @@ ] }, "locked": { - "lastModified": 1701573753, - "narHash": "sha256-vhEtXjb9AM6/HnsgfVmhJQeqQ9JqysUm7iWNzTIbexs=", + "lastModified": 1707216368, + "narHash": "sha256-ZXoqzG2QsVsybALLYXs473avXcyKSZNh2kIgcPo60XQ=", "owner": "m-labs", "repo": "artiq-comtools", - "rev": "199bdabf4de49cb7ada8a4ac7133008e0f8434b7", + "rev": "e5d0204490bccc07ef9141b0d7c405ab01cb8273", "type": "github" }, "original": { @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1706515015, - "narHash": "sha256-eFfY5A7wlYy3jD/75lx6IJRueg4noE+jowl0a8lIlVo=", + "lastModified": 1707347730, + "narHash": "sha256-0etC/exQIaqC9vliKhc3eZE2Mm2wgLa0tj93ZF/egvM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f4a8d6d5324c327dcc2d863eb7f3cc06ad630df4", + "rev": "6832d0d99649db3d65a0e15fa51471537b2c56a6", "type": "github" }, "original": { From efb8aaf9f9d7415802338a736c22039d6fcc0e18 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 8 Feb 2024 15:44:09 +0800 Subject: [PATCH 129/296] comm_analyzer: fix stopped_x --- artiq/coredevice/comm_analyzer.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 8b6d7c526..61c13348a 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -223,6 +223,7 @@ class VCDManager: self.out = fileobj self.codes = vcd_codes() self.current_time = None + self.start_time = 0 def set_timescale_ps(self, timescale): self.out.write("$timescale {}ps $end\n".format(round(timescale))) @@ -240,10 +241,14 @@ class VCDManager: self.out.write("$upscope $end\n") def set_time(self, time): + time -= self.start_time if time != self.current_time: self.out.write("#{}\n".format(time)) self.current_time = time + def set_start_time(self, time): + self.start_time = time + def set_end_time(self, time): pass @@ -251,6 +256,8 @@ class VCDManager: class WaveformManager: def __init__(self): self.current_time = 0 + self.start_time = 0 + self.end_time = 0 self.channels = list() self.current_scope = "" self.trace = {"timescale": 1, "stopped_x": None, "logs": dict(), "data": dict()} @@ -274,11 +281,18 @@ class WaveformManager: self.current_scope = old_scope def set_time(self, time): + time -= self.start_time for channel in self.channels: channel.set_time(time) + def set_start_time(self, time): + self.start_time = time + if self.trace["stopped_x"] is not None: + self.trace["stopped_x"] = self.end_time - self.start_time + def set_end_time(self, time): - self.trace["stopped_x"] = time + self.end_time = time + self.trace["stopped_x"] = self.end_time - self.start_time class WaveformChannel: @@ -712,11 +726,12 @@ def decoded_dump_to_target(manager, devices, dump, uniform_interval): start_time = get_message_time(m) if start_time: break - - t0 = 0 + if not uniform_interval: + manager.set_start_time(start_time) + t0 = start_time for i, message in enumerate(messages): if message.channel in channel_handlers: - t = get_message_time(message) - start_time + t = get_message_time(message) if t >= 0: if uniform_interval: interval.set_value_double((t - t0)*ref_period) From 720cbb44905b8f23179dc41b6bbdc5c839a38ade Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 7 Feb 2024 16:44:53 +0800 Subject: [PATCH 130/296] comm_analyzer, waveform add ndecimals --- artiq/coredevice/comm_analyzer.py | 44 +++++++++++++++++++---------- artiq/dashboard/waveform.py | 47 ++++++++++++++++++------------- 2 files changed, 56 insertions(+), 35 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 61c13348a..be6993bdd 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -8,11 +8,15 @@ from enum import Enum import struct import logging import socket +import math logger = logging.getLogger(__name__) +DEFAULT_REF_PERIOD = 1e-9 + + class MessageType(Enum): output = 0b00 input = 0b01 @@ -228,7 +232,7 @@ class VCDManager: def set_timescale_ps(self, timescale): self.out.write("$timescale {}ps $end\n".format(round(timescale))) - def get_channel(self, name, width, ty): + def get_channel(self, name, width, ty, ndecimals=0): code = next(self.codes) self.out.write("$var wire {width} {code} {name} $end\n" .format(name=name, code=code, width=width)) @@ -265,9 +269,9 @@ class WaveformManager: def set_timescale_ps(self, timescale): self.trace["timescale"] = int(timescale) - def get_channel(self, name, width, ty): + def get_channel(self, name, width, ty, ndecimals=0): if ty == WaveformType.LOG: - self.trace["logs"][self.current_scope + name] = (width, ty) + self.trace["logs"][self.current_scope + name] = (width, ty, ndecimals) data = self.trace["data"][self.current_scope + name] = list() channel = WaveformChannel(data, self.current_time) self.channels.append(channel) @@ -318,8 +322,8 @@ class ChannelSignatureManager: self.current_scope = "" self.channels = dict() - def get_channel(self, name, width, ty): - self.channels[self.current_scope + name] = (width, ty) + def get_channel(self, name, width, ty, ndecimals=0): + self.channels[self.current_scope + name] = (width, ty, ndecimals) return None @contextmanager @@ -361,8 +365,9 @@ class TTLClockGenHandler: def __init__(self, manager, name, ref_period): self.name = name self.ref_period = ref_period + ndecimals = max(0, math.ceil(math.log10(2**24 * ref_period))) self.channel_frequency = manager.get_channel( - "ttl_clkgen/" + name, 64, ty=WaveformType.ANALOG) + "ttl_clkgen/" + name, 64, ty=WaveformType.ANALOG, ndecimals=ndecimals) def process_message(self, message): if isinstance(message, OutputMessage): @@ -383,11 +388,17 @@ class DDSHandler: def add_dds_channel(self, name, dds_channel_nr): dds_channel = dict() + frequency_decimals = max(0, math.ceil(math.log10(2**32 / self.sysclk))) + phase_decimals = max(0, math.ceil(math.log10(2**16))) with self.manager.scope("dds", name): dds_channel["vcd_frequency"] = \ - self.manager.get_channel(name + "/frequency", 64, ty=WaveformType.ANALOG) + self.manager.get_channel(name + "/frequency", 64, + ty=WaveformType.ANALOG, + ndecimals=frequency_decimals) dds_channel["vcd_phase"] = \ - self.manager.get_channel(name + "/phase", 64, ty=WaveformType.ANALOG) + self.manager.get_channel(name + "/phase", 64, + ty=WaveformType.ANALOG, + ndecimals=phase_decimals) dds_channel["ftw"] = [None, None] dds_channel["pow"] = None self.dds_channels[dds_channel_nr] = dds_channel @@ -662,8 +673,12 @@ def get_channel_list(devices): manager = ChannelSignatureManager() create_channel_handlers(manager, devices, 1e-9, 3e9, False) manager.get_channel("timestamp", 64, ty=WaveformType.VECTOR) - manager.get_channel("interval", 64, ty=WaveformType.ANALOG) - manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG) + ref_period = get_ref_period(devices) + if ref_period is None: + ref_period = DEFAULT_REF_PERIOD + ndecimals = max(0, math.ceil(math.log10(1 / ref_period))) + manager.get_channel("interval", 64, ty=WaveformType.ANALOG, ndecimals=ndecimals) + manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG, ndecimals=ndecimals) return manager.channels @@ -685,12 +700,11 @@ def decoded_dump_to_waveform_data(devices, dump, uniform_interval=False): def decoded_dump_to_target(manager, devices, dump, uniform_interval): ref_period = get_ref_period(devices) - if ref_period is not None: - if not uniform_interval: - manager.set_timescale_ps(ref_period*1e12) - else: + if ref_period is None: logger.warning("unable to determine core device ref_period") - ref_period = 1e-9 # guess + ref_period = DEFAULT_REF_PERIOD + if not uniform_interval: + manager.set_timescale_ps(ref_period*1e12) dds_sysclk = get_dds_sysclk(devices) if dds_sysclk is None: logger.warning("unable to determine DDS sysclk") diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 831efbac6..a20d65a32 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -122,7 +122,8 @@ class _BackgroundItem(pg.GraphicsWidgetAnchor, pg.GraphicsWidget): class _BaseWaveform(pg.PlotWidget): cursorMove = QtCore.pyqtSignal(float) - def __init__(self, name, width, parent=None, pen="r", stepMode="right", connect="finite"): + def __init__(self, name, width, ndecimals, + parent=None, pen="r", stepMode="right", connect="finite"): pg.PlotWidget.__init__(self, parent=parent, x=None, @@ -138,6 +139,8 @@ class _BaseWaveform(pg.PlotWidget): self.name = name self.width = width + self.ndecimals = ndecimals + self.x_data = [] self.y_data = [] @@ -223,8 +226,8 @@ class _BaseWaveform(pg.PlotWidget): class BitWaveform(_BaseWaveform): - def __init__(self, name, width, parent=None): - _BaseWaveform.__init__(self, name, width, parent) + def __init__(self, name, width, ndecimals, parent=None): + _BaseWaveform.__init__(self, name, width, ndecimals, parent) self._arrows = [] def onDataChange(self, data): @@ -266,8 +269,9 @@ class BitWaveform(_BaseWaveform): class AnalogWaveform(_BaseWaveform): - def __init__(self, name, width, parent=None): - _BaseWaveform.__init__(self, name, width, parent) + def __init__(self, name, width, ndecimals, parent=None): + _BaseWaveform.__init__(self, name, width, ndecimals, parent) + self._format_string = "{:." + str(ndecimals) + "f}" def onDataChange(self, data): _BaseWaveform.onDataChange(self, data) @@ -283,12 +287,15 @@ class AnalogWaveform(_BaseWaveform): def onCursorMove(self, x): _BaseWaveform.onCursorMove(self, x) - self.cursor_label.setText(self.cursor_y) + t = None + if self.cursor_y is not None: + t = self._format_string.format(self.cursor_y) + self.cursor_label.setText(t) class BitVectorWaveform(_BaseWaveform): - def __init__(self, name, width, parent=None): - _BaseWaveform.__init__(self, name, width, parent) + def __init__(self, name, width, ndecimals, parent=None): + _BaseWaveform.__init__(self, name, width, ndecimals, parent) self._labels = [] self._format_string = "{:0=" + str(math.ceil(width / 4)) + "X}" self.view_box.sigTransformChanged.connect(self._update_labels) @@ -345,8 +352,8 @@ class BitVectorWaveform(_BaseWaveform): class LogWaveform(_BaseWaveform): - def __init__(self, name, width, parent=None): - _BaseWaveform.__init__(self, name, width, parent) + def __init__(self, name, width, ndecimals, parent=None): + _BaseWaveform.__init__(self, name, width, ndecimals, parent) self.plot_data_item.opts['pen'] = None self.plot_data_item.opts['symbol'] = 'x' self._labels = [] @@ -454,8 +461,9 @@ class _WaveformView(QtWidgets.QWidget): def onDataChange(self, top, bottom, roles): first = top.row() last = bottom.row() + data_row = self._model.headers.index("data") for i in range(first, last + 1): - data = self._model.data(self._model.index(i, 3)) + data = self._model.data(self._model.index(i, data_row)) self._splitter.widget(i).onDataChange(data) def onInsert(self, parent, first, last): @@ -481,16 +489,15 @@ class _WaveformView(QtWidgets.QWidget): self._splitter.widget(i).onCursorMove(x) def _create_waveform(self, row): - name = self._model.data(self._model.index(row, 0)) - ty = self._model.data(self._model.index(row, 1)) - width = self._model.data(self._model.index(row, 2)) + name, ty, width, ndecimals = ( + self._model.data(self._model.index(row, i)) for i in range(4)) waveform_cls = { WaveformType.BIT: BitWaveform, WaveformType.VECTOR: BitVectorWaveform, WaveformType.ANALOG: AnalogWaveform, WaveformType.LOG: LogWaveform }[ty] - w = waveform_cls(name, width, parent=self._splitter) + w = waveform_cls(name, width, ndecimals, parent=self._splitter) w.setXLink(self._ref_vb) w.setStoppedX(self._stopped_x) w.setTimescale(self._timescale) @@ -516,7 +523,7 @@ class _WaveformView(QtWidgets.QWidget): class _WaveformModel(QtCore.QAbstractTableModel): def __init__(self): self.backing_struct = [] - self.headers = ["name", "type", "width", "data"] + self.headers = ["name", "type", "width", "ndecimals", "data"] QtCore.QAbstractTableModel.__init__(self) def rowCount(self, parent=QtCore.QModelIndex()): @@ -557,11 +564,11 @@ class _WaveformModel(QtCore.QAbstractTableModel): self.endRemoveRows() def export_list(self): - return [[row[0], row[1].value, row[2]] for row in self.backing_struct] + return [[row[0], row[1].value, row[2], row[3]] for row in self.backing_struct] def import_list(self, channel_list): self.clear() - data = [[row[0], WaveformType(row[1]), row[2], []] for row in channel_list] + data = [[row[0], WaveformType(row[1]), row[2], row[3], []] for row in channel_list] self.extend(data) def update_data(self, waveform_data, top, bottom): @@ -669,8 +676,8 @@ class _AddChannelDialog(QtWidgets.QDialog): for select in selection: key = self._model.index_to_key(select) if key is not None: - width, ty = self._model[key].ref - channels.append([key, ty, width, []]) + width, ty, ndecimals = self._model[key].ref + channels.append([key, ty, width, ndecimals, []]) self.accepted.emit(channels) self.close() From d085c1e4a4e93b87d75839e3bce2bd6da5938b9b Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 9 Feb 2024 16:03:55 +0800 Subject: [PATCH 131/296] waveform, analyzer proxy fix connect errors --- artiq/coredevice/comm_analyzer.py | 40 +++++++++---- artiq/coredevice/core.py | 4 +- artiq/dashboard/waveform.py | 69 +++++++++++++--------- artiq/frontend/aqctl_coreanalyzer_proxy.py | 4 +- 4 files changed, 70 insertions(+), 47 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index be6993bdd..9a175fc8c 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -117,6 +117,8 @@ def decode_dump(data): (sent_bytes, total_byte_count, error_occurred, log_channel, dds_onehot_sel) = parts + logger.debug("analyzer dump has length %d", sent_bytes) + expected_len = sent_bytes + 15 if expected_len != len(data): raise ValueError("analyzer dump has incorrect length " @@ -128,39 +130,47 @@ def decode_dump(data): if total_byte_count > sent_bytes: logger.info("analyzer ring buffer has wrapped %d times", total_byte_count//sent_bytes) + if sent_bytes == 0: + logger.warning("analyzer dump is empty") position = 15 messages = [] for _ in range(sent_bytes//32): messages.append(decode_message(data[position:position+32])) position += 32 + + if len(messages) == 1 and isinstance(messages[0], StoppedMessage): + logger.warning("analyzer dump is empty aside from stop message") + return DecodedDump(log_channel, bool(dds_onehot_sel), messages) # simplified from sipyco broadcast Receiver class AnalyzerProxyReceiver: - def __init__(self, receive_cb): + def __init__(self, receive_cb, disconnect_cb=None): self.receive_cb = receive_cb + self.disconnect_cb = disconnect_cb + self.receive_task = None + self.writer = None async def connect(self, host, port): self.reader, self.writer = \ await keepalive.async_open_connection(host, port) + magic = get_analyzer_magic() try: - self.receive_task = asyncio.ensure_future(self._receive_cr()) + self.receive_task = asyncio.create_task(self._receive_cr()) except: - self.writer.close() - del self.reader - del self.writer + if self.writer is not None: + self.writer.close() + del self.reader + del self.writer raise async def close(self): - try: + self.disconnect_cb = None + if self.receive_task is not None: self.receive_task.cancel() - try: - await asyncio.wait_for(self.receive_task, None) - except asyncio.CancelledError: - pass - finally: + if self.writer is not None: self.writer.close() del self.reader del self.writer @@ -168,11 +178,14 @@ class AnalyzerProxyReceiver: async def _receive_cr(self): try: while True: - endian_byte = await self.reader.readexactly(1) + endian_byte = await self.reader.read(1) if endian_byte == b"E": endian = '>' elif endian_byte == b"e": endian = '<' + elif endian_byte == b"": + # EOF reached, connection lost + return else: raise ValueError payload_length_word = await self.reader.readexactly(4) @@ -186,7 +199,8 @@ class AnalyzerProxyReceiver: data = endian_byte + payload_length_word + remaining_data self.receive_cb(data) finally: - pass + if self.disconnect_cb is not None: + self.disconnect_cb() def vcd_codes(): diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index d92351d57..ae28f98ab 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -353,6 +353,4 @@ class Core: if self.analyzer_proxy is None: raise IOError("No analyzer proxy configured") else: - success = self.analyzer_proxy.trigger() - if not success: - raise IOError("Analyzer proxy reported failure") + self.analyzer_proxy.trigger() diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index a20d65a32..f03a12e05 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -37,8 +37,7 @@ class _BaseProxyClient: self._reconnect_task = None async def start(self): - self._reconnect_task = asyncio.ensure_future( - exc_to_warning(self._reconnect())) + self._reconnect_task = asyncio.create_task(self._reconnect()) def update_address(self, addr, port): self.addr = addr @@ -50,32 +49,25 @@ class _BaseProxyClient: while True: await self._reconnect_event.wait() self._reconnect_event.clear() - try: - await self.disconnect_cr() - except: - logger.error("Error caught when disconnecting proxy client", exc_info=True) try: await self.reconnect_cr() - except Exception: - logger.error( - "Error caught when reconnecting proxy client, retrying...", exc_info=True) + except: await asyncio.sleep(5) self._reconnect_event.set() except asyncio.CancelledError: pass async def close(self): - try: - self._reconnect_task.cancel() - await asyncio.wait_for(self._reconnect_task, None) - await self.disconnect_cr() - except: - logger.error("Error caught while closing proxy client", exc_info=True) + self._reconnect_task.cancel() + await self.close_cr() + + def reconnect(self): + self._reconnect_event.set() async def reconnect_cr(self): raise NotImplementedError - async def disconnect_cr(self): + async def close_cr(self): raise NotImplementedError @@ -85,17 +77,26 @@ class RPCProxyClient(_BaseProxyClient): self.client = AsyncioClient() async def trigger_proxy_task(self): - if self.client.get_rpc_id()[0] is None: - raise AttributeError("Unable to identify RPC target. Is analyzer proxy connected?") - await self.client.trigger() + try: + await self.client.trigger() + except: + logger.error("analyzer proxy reported failure", exc_info=True) async def reconnect_cr(self): - await self.client.connect_rpc(self.addr, - self.port, - "coreanalyzer_proxy_control") + try: + await self.client.connect_rpc(self.addr, + self.port, + "coreanalyzer_proxy_control") + logger.info("connected to analyzer proxy control %s:%d", self.addr, self.port) + except: + logger.error("error connecting to analyzer proxy control", exc_info=True) + raise - async def disconnect_cr(self): - self.client.close_rpc() + async def close_cr(self): + try: + self.client.close_rpc() + except: + logger.error("error closing connection with analyzer proxy control", exc_info=True) class ReceiverProxyClient(_BaseProxyClient): @@ -104,10 +105,18 @@ class ReceiverProxyClient(_BaseProxyClient): self.receiver = receiver async def reconnect_cr(self): - await self.receiver.connect(self.addr, self.port) + try: + await self.receiver.connect(self.addr, self.port) + logger.info("listening to analyzer proxy %s:%d", self.addr, self.port) + except: + logger.error("error connecting to analyzer proxy", exc_info=True) + raise - async def disconnect_cr(self): - await self.receiver.close() + async def close_cr(self): + try: + await self.receiver.close() + except: + logger.error("error closing connection to analyzer proxy", exc_info=True) class _BackgroundItem(pg.GraphicsWidgetAnchor, pg.GraphicsWidget): @@ -707,7 +716,7 @@ class WaveformDock(QtWidgets.QDockWidget): self.devices_sub = Subscriber("devices", self.init_ddb, self.update_ddb) self.rpc_client = RPCProxyClient() receiver = comm_analyzer.AnalyzerProxyReceiver( - self.on_dump_receive) + self.on_dump_receive, self.on_proxy_disconnect) self.receiver_client = ReceiverProxyClient(receiver) grid = LayoutWidget() @@ -788,6 +797,10 @@ class WaveformDock(QtWidgets.QDockWidget): self._waveform_view.setTimescale(self._waveform_data['timescale']) self._cursor_control.setTimescale(self._waveform_data['timescale']) + def on_proxy_disconnect(self): + self.receiver_client.reconnect() + self.rpc_client.reconnect() + async def load_trace(self): try: filename = await get_open_file_name( diff --git a/artiq/frontend/aqctl_coreanalyzer_proxy.py b/artiq/frontend/aqctl_coreanalyzer_proxy.py index ec9891423..8a2bee56e 100755 --- a/artiq/frontend/aqctl_coreanalyzer_proxy.py +++ b/artiq/frontend/aqctl_coreanalyzer_proxy.py @@ -60,9 +60,7 @@ class ProxyControl: self.distribute_cb(dump) except: logger.warning("Trigger failed:", exc_info=True) - return False - else: - return True + raise def get_argparser(): From 21b77567f255dbbcaf86e5f19feb5eeca0c078bc Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 14 Feb 2024 17:12:27 +0800 Subject: [PATCH 132/296] waveform: add timeout to reconnect_cr --- artiq/dashboard/waveform.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index f03a12e05..97d07576e 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -50,7 +50,9 @@ class _BaseProxyClient: await self._reconnect_event.wait() self._reconnect_event.clear() try: - await self.reconnect_cr() + await asyncio.wait_for(self.reconnect_cr(), timeout=5) + except asyncio.CancelledError: + raise except: await asyncio.sleep(5) self._reconnect_event.set() From 795b8ae4c67d6b25db885c03ddf19d9d556ccaa7 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 14 Feb 2024 17:39:14 +0800 Subject: [PATCH 133/296] add analyzer proxy magic --- artiq/coredevice/comm_analyzer.py | 4 +++- artiq/frontend/aqctl_coreanalyzer_proxy.py | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 9a175fc8c..9fd592162 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -15,6 +15,7 @@ logger = logging.getLogger(__name__) DEFAULT_REF_PERIOD = 1e-9 +ANALYZER_MAGIC = b"ARTIQ Analyzer Proxy\n" class MessageType(Enum): @@ -156,8 +157,9 @@ class AnalyzerProxyReceiver: async def connect(self, host, port): self.reader, self.writer = \ await keepalive.async_open_connection(host, port) - magic = get_analyzer_magic() try: + line = await self.reader.readline() + assert line == ANALYZER_MAGIC self.receive_task = asyncio.create_task(self._receive_cr()) except: if self.writer is not None: diff --git a/artiq/frontend/aqctl_coreanalyzer_proxy.py b/artiq/frontend/aqctl_coreanalyzer_proxy.py index 8a2bee56e..34726e47a 100755 --- a/artiq/frontend/aqctl_coreanalyzer_proxy.py +++ b/artiq/frontend/aqctl_coreanalyzer_proxy.py @@ -9,7 +9,7 @@ from sipyco.asyncio_tools import AsyncioServer, SignalHandler, atexit_register_c from sipyco.pc_rpc import Server from sipyco import common_args -from artiq.coredevice.comm_analyzer import get_analyzer_dump +from artiq.coredevice.comm_analyzer import get_analyzer_dump, ANALYZER_MAGIC logger = logging.getLogger(__name__) @@ -24,6 +24,7 @@ class ProxyServer(AsyncioServer): async def _handle_connection_cr(self, reader, writer): try: + writer.write(ANALYZER_MAGIC) queue = asyncio.Queue(self._queue_limit) self._recipients.add(queue) try: From fc282d4e17649d8cff51ce4279dac3cf50fe171f Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 16 Feb 2024 15:14:01 +0800 Subject: [PATCH 134/296] artiq_ddb_template: fix clk_div config remove clk_div default in jsonschema set CLK IN divided by 1 as default when bypassing PLL --- artiq/coredevice/coredevice_generic.schema.json | 3 +-- artiq/frontend/artiq_ddb_template.py | 11 ++++++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/artiq/coredevice/coredevice_generic.schema.json b/artiq/coredevice/coredevice_generic.schema.json index bf79bb976..ce7385ea4 100644 --- a/artiq/coredevice/coredevice_generic.schema.json +++ b/artiq/coredevice/coredevice_generic.schema.json @@ -308,8 +308,7 @@ "clk_div": { "type": "integer", "minimum": 0, - "maximum": 3, - "default": 0 + "maximum": 3 }, "pll_n": { "type": "integer" diff --git a/artiq/frontend/artiq_ddb_template.py b/artiq/frontend/artiq_ddb_template.py index 376467d75..1bb6d8b9c 100755 --- a/artiq/frontend/artiq_ddb_template.py +++ b/artiq/frontend/artiq_ddb_template.py @@ -211,6 +211,11 @@ class PeripheralManager: urukul_name = self.get_name("urukul") synchronization = peripheral["synchronization"] channel = count(0) + pll_en = peripheral["pll_en"] + clk_div = peripheral.get("clk_div") + if clk_div is None: + clk_div = 0 if pll_en else 1 + self.gen(""" device_db["eeprom_{name}"] = {{ "type": "local", @@ -277,7 +282,7 @@ class PeripheralManager: sync_device="\"ttl_{name}_sync\"".format(name=urukul_name) if synchronization else "None", refclk=peripheral.get("refclk", self.primary_description["rtio_frequency"]), clk_sel=peripheral["clk_sel"], - clk_div=peripheral["clk_div"]) + clk_div=clk_div) dds = peripheral["dds"] pll_vco = peripheral.get("pll_vco") for i in range(4): @@ -299,7 +304,7 @@ class PeripheralManager: uchn=i, sw=",\n \"sw_device\": \"ttl_{name}_sw{uchn}\"".format(name=urukul_name, uchn=i) if len(peripheral["ports"]) > 1 else "", pll_vco=",\n \"pll_vco\": {}".format(pll_vco) if pll_vco is not None else "", - pll_n=peripheral.get("pll_n", 32), pll_en=peripheral["pll_en"], + pll_n=peripheral.get("pll_n", 32), pll_en=pll_en, sync_delay_seed=",\n \"sync_delay_seed\": \"eeprom_{}:{}\"".format(urukul_name, 64 + 4*i) if synchronization else "", io_update_delay=",\n \"io_update_delay\": \"eeprom_{}:{}\"".format(urukul_name, 64 + 4*i) if synchronization else "") elif dds == "ad9912": @@ -320,7 +325,7 @@ class PeripheralManager: uchn=i, sw=",\n \"sw_device\": \"ttl_{name}_sw{uchn}\"".format(name=urukul_name, uchn=i) if len(peripheral["ports"]) > 1 else "", pll_vco=",\n \"pll_vco\": {}".format(pll_vco) if pll_vco is not None else "", - pll_n=peripheral.get("pll_n", 8), pll_en=peripheral["pll_en"]) + pll_n=peripheral.get("pll_n", 8), pll_en=pll_en) else: raise ValueError return next(channel) From 6ed6fb0bce1a9a52cd7426e7e1239143f4ed785a Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 15 Feb 2024 17:41:25 +0800 Subject: [PATCH 135/296] waveform: fix log messages --- artiq/dashboard/waveform.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 97d07576e..ef15b41b9 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -202,7 +202,7 @@ class _BaseWaveform(pg.PlotWidget): try: self.x_data, self.y_data = zip(*data) except: - logger.error("Error getting data for waveform: {}".format(self.name), exc_info=True) + logger.error("Error getting data for waveform: %s", self.name, exc_info=True) def onCursorMove(self, x): self.cursor.setValue(x) @@ -269,7 +269,7 @@ class BitWaveform(_BaseWaveform): previous_y = y self.plot_data_item.setData(x=display_x, y=display_y) except: - logger.error('Error when displaying waveform: {}'.format(self.name), exc_info=True) + logger.error("Error when displaying waveform: %s", self.name, exc_info=True) for arw in self._arrows: self.removeItem(arw) self.plot_data_item.setData(x=[], y=[]) @@ -292,8 +292,7 @@ class AnalogWaveform(_BaseWaveform): min_y = min(self.y_data) self.plot_item.setRange(yRange=(min_y, max_y), padding=0.1) except: - logger.error( - 'Error when displaying waveform: {}'.format(self.name), exc_info=True) + logger.error("Error when displaying waveform: %s", self.name, exc_info=True) self.plot_data_item.setData(x=[], y=[]) def onCursorMove(self, x): @@ -348,8 +347,7 @@ class BitVectorWaveform(_BaseWaveform): self._labels.append(lbl) self.plot_data_item.setData(x=display_x, y=display_y) except: - logger.error( - "Error when displaying waveform: {}".format(self.name), exc_info=True) + logger.error("Error when displaying waveform: %s", self.name, exc_info=True) for lbl in self._labels: self.plot_item.removeItem(lbl) self.plot_data_item.setData(x=[], y=[]) @@ -394,7 +392,7 @@ class LogWaveform(_BaseWaveform): self._labels.append(lbl) lbl.setPos(old_x, 1) except: - logger.error('Error when displaying waveform: {}'.format(self.name), exc_info=True) + logger.error("Error when displaying waveform: %s", self.name, exc_info=True) for lbl in self._labels: self.plot_item.removeItem(lbl) self.plot_data_item.setData(x=[], y=[]) From 1749fa661f56643f7810b642ca9f0c2a61386a73 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 16 Feb 2024 15:38:28 +0800 Subject: [PATCH 136/296] waveform: reset cursor onDataChange --- artiq/dashboard/waveform.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index ef15b41b9..b34dae6d0 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -133,7 +133,7 @@ class _BackgroundItem(pg.GraphicsWidgetAnchor, pg.GraphicsWidget): class _BaseWaveform(pg.PlotWidget): cursorMove = QtCore.pyqtSignal(float) - def __init__(self, name, width, ndecimals, + def __init__(self, name, width, ndecimals, parent=None, pen="r", stepMode="right", connect="finite"): pg.PlotWidget.__init__(self, parent=parent, @@ -198,11 +198,13 @@ class _BaseWaveform(pg.PlotWidget): def setTimescale(self, timescale): self.timescale = timescale + def setData(self, data): + if len(data) == 0: + raise ValueError("no timeseries data to display for this channel") + self.x_data, self.y_data = zip(*data) + def onDataChange(self, data): - try: - self.x_data, self.y_data = zip(*data) - except: - logger.error("Error getting data for waveform: %s", self.name, exc_info=True) + raise NotImplementedError def onCursorMove(self, x): self.cursor.setValue(x) @@ -242,8 +244,8 @@ class BitWaveform(_BaseWaveform): self._arrows = [] def onDataChange(self, data): - _BaseWaveform.onDataChange(self, data) try: + self.setData(data) for arw in self._arrows: self.removeItem(arw) self._arrows = [] @@ -285,8 +287,8 @@ class AnalogWaveform(_BaseWaveform): self._format_string = "{:." + str(ndecimals) + "f}" def onDataChange(self, data): - _BaseWaveform.onDataChange(self, data) try: + self.setData(data) self.plot_data_item.setData(x=self.x_data, y=self.y_data) max_y = max(self.y_data) min_y = min(self.y_data) @@ -326,8 +328,8 @@ class BitVectorWaveform(_BaseWaveform): self.addItem(lbl) def onDataChange(self, data): - _BaseWaveform.onDataChange(self, data) try: + self.setData(data) for lbl in self._labels: self.plot_item.removeItem(lbl) self._labels = [] @@ -368,8 +370,8 @@ class LogWaveform(_BaseWaveform): self._labels = [] def onDataChange(self, data): - _BaseWaveform.onDataChange(self, data) try: + self.setData(data) for lbl in self._labels: self.plot_item.removeItem(lbl) self._labels = [] @@ -397,9 +399,6 @@ class LogWaveform(_BaseWaveform): self.plot_item.removeItem(lbl) self.plot_data_item.setData(x=[], y=[]) - def onCursorMove(self, x): - _BaseWaveform.onCursorMove(self, x) - class _WaveformView(QtWidgets.QWidget): cursorMove = QtCore.pyqtSignal(float) @@ -468,6 +467,7 @@ class _WaveformView(QtWidgets.QWidget): self._splitter.widget(i).setStoppedX(stopped_x) def onDataChange(self, top, bottom, roles): + self.cursorMove.emit(0) first = top.row() last = bottom.row() data_row = self._model.headers.index("data") From de539a4d33d79bd4164d54994af8a81dd90779af Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 19 Feb 2024 10:33:02 +0800 Subject: [PATCH 137/296] waveform: remove None cursor label --- artiq/dashboard/waveform.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index b34dae6d0..6b9f1e99d 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -278,7 +278,10 @@ class BitWaveform(_BaseWaveform): def onCursorMove(self, x): _BaseWaveform.onCursorMove(self, x) - self.cursor_label.setText(self.cursor_y) + if self.cursor_y is not None: + self.cursor_label.setText(self.cursor_y) + else: + self.cursor_label.setText("") class AnalogWaveform(_BaseWaveform): @@ -299,9 +302,10 @@ class AnalogWaveform(_BaseWaveform): def onCursorMove(self, x): _BaseWaveform.onCursorMove(self, x) - t = None if self.cursor_y is not None: t = self._format_string.format(self.cursor_y) + else: + t = "" self.cursor_label.setText(t) @@ -356,9 +360,10 @@ class BitVectorWaveform(_BaseWaveform): def onCursorMove(self, x): _BaseWaveform.onCursorMove(self, x) - t = None if self.cursor_y is not None: t = self._format_string.format(int(self.cursor_y, 2)) + else: + t = "" self.cursor_label.setText(t) From 652bcc22c66d36e03794fdc956cec43d532c9744 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 19 Feb 2024 10:35:07 +0800 Subject: [PATCH 138/296] waveform: remove empty waveform error msg --- artiq/dashboard/waveform.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 6b9f1e99d..030b2247a 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -200,8 +200,9 @@ class _BaseWaveform(pg.PlotWidget): def setData(self, data): if len(data) == 0: - raise ValueError("no timeseries data to display for this channel") - self.x_data, self.y_data = zip(*data) + self.x_data, self.y_data = [], [] + else: + self.x_data, self.y_data = zip(*data) def onDataChange(self, data): raise NotImplementedError @@ -293,9 +294,10 @@ class AnalogWaveform(_BaseWaveform): try: self.setData(data) self.plot_data_item.setData(x=self.x_data, y=self.y_data) - max_y = max(self.y_data) - min_y = min(self.y_data) - self.plot_item.setRange(yRange=(min_y, max_y), padding=0.1) + if len(data) > 0: + max_y = max(self.y_data) + min_y = min(self.y_data) + self.plot_item.setRange(yRange=(min_y, max_y), padding=0.1) except: logger.error("Error when displaying waveform: %s", self.name, exc_info=True) self.plot_data_item.setData(x=[], y=[]) From 1b0fd2e2d3142e476db0c804d4aa69788d13dcc5 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 19 Feb 2024 10:44:50 +0800 Subject: [PATCH 139/296] comm_analyzer: remove interval, timestamp --- artiq/coredevice/comm_analyzer.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 9fd592162..904a26c8c 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -688,12 +688,10 @@ def create_channel_handlers(manager, devices, ref_period, def get_channel_list(devices): manager = ChannelSignatureManager() create_channel_handlers(manager, devices, 1e-9, 3e9, False) - manager.get_channel("timestamp", 64, ty=WaveformType.VECTOR) ref_period = get_ref_period(devices) if ref_period is None: ref_period = DEFAULT_REF_PERIOD ndecimals = max(0, math.ceil(math.log10(1 / ref_period))) - manager.get_channel("interval", 64, ty=WaveformType.ANALOG, ndecimals=ndecimals) manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG, ndecimals=ndecimals) return manager.channels From f460af3a6aac130653bd0b17c67dcb51dd247a9c Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 19 Feb 2024 10:53:27 +0800 Subject: [PATCH 140/296] waveform: remove vertical grids --- artiq/dashboard/waveform.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 030b2247a..cb3299136 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -242,6 +242,7 @@ class _BaseWaveform(pg.PlotWidget): class BitWaveform(_BaseWaveform): def __init__(self, name, width, ndecimals, parent=None): _BaseWaveform.__init__(self, name, width, ndecimals, parent) + self.plot_item.showGrid(x=True, y=False) self._arrows = [] def onDataChange(self, data): @@ -317,6 +318,7 @@ class BitVectorWaveform(_BaseWaveform): self._labels = [] self._format_string = "{:0=" + str(math.ceil(width / 4)) + "X}" self.view_box.sigTransformChanged.connect(self._update_labels) + self.plot_item.showGrid(x=True, y=False) def _update_labels(self): for label in self._labels: @@ -375,6 +377,7 @@ class LogWaveform(_BaseWaveform): self.plot_data_item.opts['pen'] = None self.plot_data_item.opts['symbol'] = 'x' self._labels = [] + self.plot_item.showGrid(x=True, y=False) def onDataChange(self, data): try: From edd23977f83e3e2e80790e08543fe9fdc29207dc Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 19 Feb 2024 11:37:46 +0800 Subject: [PATCH 141/296] waveform: delete all waveforms confirm dialog --- artiq/dashboard/waveform.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index cb3299136..4191e172a 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -455,6 +455,18 @@ class _WaveformView(QtWidgets.QWidget): self.cursorMove.connect(self.onCursorMove) + self.confirm_delete_dialog = QtWidgets.QMessageBox(self) + self.confirm_delete_dialog.setIcon( + QtWidgets.QMessageBox.Icon.Warning + ) + self.confirm_delete_dialog.setText("Delete all waveforms?") + self.confirm_delete_dialog.setStandardButtons( + QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel + ) + self.confirm_delete_dialog.setDefaultButton( + QtWidgets.QMessageBox.Ok + ) + def setModel(self, model): self._model = model self._model.dataChanged.connect(self.onDataChange) @@ -462,6 +474,7 @@ class _WaveformView(QtWidgets.QWidget): self._model.rowsRemoved.connect(self.onRemove) self._model.rowsMoved.connect(self.onMove) self._splitter.dropped.connect(self._model.move) + self.confirm_delete_dialog.accepted.connect(self._model.clear) def setTimescale(self, timescale): self._timescale = timescale @@ -525,8 +538,8 @@ class _WaveformView(QtWidgets.QWidget): action = QtWidgets.QAction("Delete waveform", w) action.triggered.connect(lambda: self._delete_waveform(w)) w.addAction(action) - action = QtWidgets.QAction("Clear waveforms", w) - action.triggered.connect(self._model.clear) + action = QtWidgets.QAction("Delete all waveforms", w) + action.triggered.connect(self.confirm_delete_dialog.open) w.addAction(action) return w From 779b7704ed8af48ad0bfa12bb3ce4cb1a95e1022 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 20 Feb 2024 11:07:10 +0800 Subject: [PATCH 142/296] waveform, comm_analyzer add cursor label unit --- artiq/coredevice/comm_analyzer.py | 27 ++++++++++---------- artiq/dashboard/waveform.py | 41 +++++++++++++++---------------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 904a26c8c..9ac0c8010 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -248,7 +248,7 @@ class VCDManager: def set_timescale_ps(self, timescale): self.out.write("$timescale {}ps $end\n".format(round(timescale))) - def get_channel(self, name, width, ty, ndecimals=0): + def get_channel(self, name, width, ty, precision=0, unit=""): code = next(self.codes) self.out.write("$var wire {width} {code} {name} $end\n" .format(name=name, code=code, width=width)) @@ -285,9 +285,9 @@ class WaveformManager: def set_timescale_ps(self, timescale): self.trace["timescale"] = int(timescale) - def get_channel(self, name, width, ty, ndecimals=0): + def get_channel(self, name, width, ty, precision=0, unit=""): if ty == WaveformType.LOG: - self.trace["logs"][self.current_scope + name] = (width, ty, ndecimals) + self.trace["logs"][self.current_scope + name] = (ty, width, precision, unit) data = self.trace["data"][self.current_scope + name] = list() channel = WaveformChannel(data, self.current_time) self.channels.append(channel) @@ -338,8 +338,8 @@ class ChannelSignatureManager: self.current_scope = "" self.channels = dict() - def get_channel(self, name, width, ty, ndecimals=0): - self.channels[self.current_scope + name] = (width, ty, ndecimals) + def get_channel(self, name, width, ty, precision=0, unit=""): + self.channels[self.current_scope + name] = (ty, width, precision, unit) return None @contextmanager @@ -381,9 +381,9 @@ class TTLClockGenHandler: def __init__(self, manager, name, ref_period): self.name = name self.ref_period = ref_period - ndecimals = max(0, math.ceil(math.log10(2**24 * ref_period))) + precision = max(0, math.ceil(math.log10(2**24 * ref_period) + 6)) self.channel_frequency = manager.get_channel( - "ttl_clkgen/" + name, 64, ty=WaveformType.ANALOG, ndecimals=ndecimals) + "ttl_clkgen/" + name, 64, ty=WaveformType.ANALOG, precision=precision, unit="MHz") def process_message(self, message): if isinstance(message, OutputMessage): @@ -404,17 +404,18 @@ class DDSHandler: def add_dds_channel(self, name, dds_channel_nr): dds_channel = dict() - frequency_decimals = max(0, math.ceil(math.log10(2**32 / self.sysclk))) - phase_decimals = max(0, math.ceil(math.log10(2**16))) + frequency_precision = max(0, math.ceil(math.log10(2**32 / self.sysclk) + 6)) + phase_precision = max(0, math.ceil(math.log10(2**16))) with self.manager.scope("dds", name): dds_channel["vcd_frequency"] = \ self.manager.get_channel(name + "/frequency", 64, ty=WaveformType.ANALOG, - ndecimals=frequency_decimals) + precision=frequency_precision, + unit="MHz") dds_channel["vcd_phase"] = \ self.manager.get_channel(name + "/phase", 64, ty=WaveformType.ANALOG, - ndecimals=phase_decimals) + precision=phase_precision) dds_channel["ftw"] = [None, None] dds_channel["pow"] = None self.dds_channels[dds_channel_nr] = dds_channel @@ -691,8 +692,8 @@ def get_channel_list(devices): ref_period = get_ref_period(devices) if ref_period is None: ref_period = DEFAULT_REF_PERIOD - ndecimals = max(0, math.ceil(math.log10(1 / ref_period))) - manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG, ndecimals=ndecimals) + precision = max(0, math.ceil(math.log10(1 / ref_period) - 6)) + manager.get_channel("rtio_slack", 64, ty=WaveformType.ANALOG, precision=precision, unit="us") return manager.channels diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 4191e172a..5edb82eb9 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -15,7 +15,7 @@ from sipyco.sync_struct import Subscriber from sipyco.pc_rpc import AsyncioClient from sipyco import pyon -from artiq.tools import exc_to_warning +from artiq.tools import exc_to_warning, short_format from artiq.coredevice import comm_analyzer from artiq.coredevice.comm_analyzer import WaveformType from artiq.gui.tools import LayoutWidget, get_open_file_name, get_save_file_name @@ -133,7 +133,7 @@ class _BackgroundItem(pg.GraphicsWidgetAnchor, pg.GraphicsWidget): class _BaseWaveform(pg.PlotWidget): cursorMove = QtCore.pyqtSignal(float) - def __init__(self, name, width, ndecimals, + def __init__(self, name, width, precision, unit, parent=None, pen="r", stepMode="right", connect="finite"): pg.PlotWidget.__init__(self, parent=parent, @@ -150,7 +150,8 @@ class _BaseWaveform(pg.PlotWidget): self.name = name self.width = width - self.ndecimals = ndecimals + self.precision = precision + self.unit = unit self.x_data = [] self.y_data = [] @@ -240,8 +241,8 @@ class _BaseWaveform(pg.PlotWidget): class BitWaveform(_BaseWaveform): - def __init__(self, name, width, ndecimals, parent=None): - _BaseWaveform.__init__(self, name, width, ndecimals, parent) + def __init__(self, name, width, precision, unit, parent=None): + _BaseWaveform.__init__(self, name, width, precision, unit, parent) self.plot_item.showGrid(x=True, y=False) self._arrows = [] @@ -287,9 +288,8 @@ class BitWaveform(_BaseWaveform): class AnalogWaveform(_BaseWaveform): - def __init__(self, name, width, ndecimals, parent=None): - _BaseWaveform.__init__(self, name, width, ndecimals, parent) - self._format_string = "{:." + str(ndecimals) + "f}" + def __init__(self, name, width, precision, unit, parent=None): + _BaseWaveform.__init__(self, name, width, precision, unit, parent) def onDataChange(self, data): try: @@ -306,15 +306,15 @@ class AnalogWaveform(_BaseWaveform): def onCursorMove(self, x): _BaseWaveform.onCursorMove(self, x) if self.cursor_y is not None: - t = self._format_string.format(self.cursor_y) + t = short_format(self.cursor_y, {"precision": self.precision, "unit": self.unit}) else: t = "" self.cursor_label.setText(t) class BitVectorWaveform(_BaseWaveform): - def __init__(self, name, width, ndecimals, parent=None): - _BaseWaveform.__init__(self, name, width, ndecimals, parent) + def __init__(self, name, width, precision, unit, parent=None): + _BaseWaveform.__init__(self, name, width, precision, parent) self._labels = [] self._format_string = "{:0=" + str(math.ceil(width / 4)) + "X}" self.view_box.sigTransformChanged.connect(self._update_labels) @@ -372,8 +372,8 @@ class BitVectorWaveform(_BaseWaveform): class LogWaveform(_BaseWaveform): - def __init__(self, name, width, ndecimals, parent=None): - _BaseWaveform.__init__(self, name, width, ndecimals, parent) + def __init__(self, name, width, precision, unit, parent=None): + _BaseWaveform.__init__(self, name, width, precision, parent) self.plot_data_item.opts['pen'] = None self.plot_data_item.opts['symbol'] = 'x' self._labels = [] @@ -521,15 +521,15 @@ class _WaveformView(QtWidgets.QWidget): self._splitter.widget(i).onCursorMove(x) def _create_waveform(self, row): - name, ty, width, ndecimals = ( - self._model.data(self._model.index(row, i)) for i in range(4)) + name, ty, width, precision, unit = ( + self._model.data(self._model.index(row, i)) for i in range(5)) waveform_cls = { WaveformType.BIT: BitWaveform, WaveformType.VECTOR: BitVectorWaveform, WaveformType.ANALOG: AnalogWaveform, WaveformType.LOG: LogWaveform }[ty] - w = waveform_cls(name, width, ndecimals, parent=self._splitter) + w = waveform_cls(name, width, precision, unit, parent=self._splitter) w.setXLink(self._ref_vb) w.setStoppedX(self._stopped_x) w.setTimescale(self._timescale) @@ -555,7 +555,7 @@ class _WaveformView(QtWidgets.QWidget): class _WaveformModel(QtCore.QAbstractTableModel): def __init__(self): self.backing_struct = [] - self.headers = ["name", "type", "width", "ndecimals", "data"] + self.headers = ["name", "type", "width", "precision", "unit", "data"] QtCore.QAbstractTableModel.__init__(self) def rowCount(self, parent=QtCore.QModelIndex()): @@ -596,11 +596,11 @@ class _WaveformModel(QtCore.QAbstractTableModel): self.endRemoveRows() def export_list(self): - return [[row[0], row[1].value, row[2], row[3]] for row in self.backing_struct] + return [[row[0], row[1].value, *row[2:5]] for row in self.backing_struct] def import_list(self, channel_list): self.clear() - data = [[row[0], WaveformType(row[1]), row[2], row[3], []] for row in channel_list] + data = [[row[0], WaveformType(row[1]), *row[2:5], []] for row in channel_list] self.extend(data) def update_data(self, waveform_data, top, bottom): @@ -708,8 +708,7 @@ class _AddChannelDialog(QtWidgets.QDialog): for select in selection: key = self._model.index_to_key(select) if key is not None: - width, ty, ndecimals = self._model[key].ref - channels.append([key, ty, width, ndecimals, []]) + channels.append([key, *self._model[key].ref, []]) self.accepted.emit(channels) self.close() From bd9e8b397764b11d02c46fa1d3036bd1c1d425d0 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 19 Feb 2024 16:58:00 +0800 Subject: [PATCH 143/296] waveform: simplify AddChannelDialog --- artiq/dashboard/waveform.py | 51 ++++++++++++++----------------------- 1 file changed, 19 insertions(+), 32 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 5edb82eb9..cd8529e39 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -670,15 +670,13 @@ class Model(DictSyncTreeSepModel): class _AddChannelDialog(QtWidgets.QDialog): - accepted = QtCore.pyqtSignal(list) - def __init__(self, parent, model): QtWidgets.QDialog.__init__(self, parent=parent) self.setContextMenuPolicy(Qt.ActionsContextMenu) self.setWindowTitle("Add channels") - grid = QtWidgets.QGridLayout() - self.setLayout(grid) + layout = QtWidgets.QVBoxLayout() + self.setLayout(layout) self._model = model self._tree_view = QtWidgets.QTreeView() @@ -688,19 +686,15 @@ class _AddChannelDialog(QtWidgets.QDialog): self._tree_view.setSelectionMode( QtWidgets.QAbstractItemView.ExtendedSelection) self._tree_view.setModel(self._model) - grid.addWidget(self._tree_view, 0, 0, 1, 2) - cancel_btn = QtWidgets.QPushButton("Cancel") - cancel_btn.clicked.connect(self.close) - cancel_btn.setIcon( - QtWidgets.QApplication.style().standardIcon( - QtWidgets.QStyle.SP_DialogCancelButton)) - grid.addWidget(cancel_btn, 1, 0) - confirm_btn = QtWidgets.QPushButton("Confirm") - confirm_btn.clicked.connect(self.add_channels) - confirm_btn.setIcon( - QtWidgets.QApplication.style().standardIcon( - QtWidgets.QStyle.SP_DialogApplyButton)) - grid.addWidget(confirm_btn, 1, 1) + layout.addWidget(self._tree_view) + + self._button_box = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel + ) + self._button_box.setCenterButtons(True) + self._button_box.accepted.connect(self.add_channels) + self._button_box.rejected.connect(self.reject) + layout.addWidget(self._button_box) def add_channels(self): selection = self._tree_view.selectedIndexes() @@ -709,8 +703,8 @@ class _AddChannelDialog(QtWidgets.QDialog): key = self._model.index_to_key(select) if key is not None: channels.append([key, *self._model[key].ref, []]) - self.accepted.emit(channels) - self.close() + self.channels = channels + self.accept() class WaveformDock(QtWidgets.QDockWidget): @@ -759,12 +753,15 @@ class WaveformDock(QtWidgets.QDockWidget): lambda: asyncio.ensure_future(exc_to_warning(self.rpc_client.trigger_proxy_task()))) grid.addWidget(self._request_dump_btn, 0, 1) + self._add_channel_dialog = _AddChannelDialog(self, self._channel_model) + self._add_channel_dialog.accepted.connect(self._add_channels) + self._add_btn = QtWidgets.QToolButton() self._add_btn.setToolTip("Add channels...") self._add_btn.setIcon( QtWidgets.QApplication.style().standardIcon( QtWidgets.QStyle.SP_FileDialogListView)) - self._add_btn.clicked.connect(self.on_add_channel_click) + self._add_btn.clicked.connect(self._add_channel_dialog.open) grid.addWidget(self._add_btn, 0, 2) self._file_menu = QtWidgets.QMenu() @@ -790,24 +787,14 @@ class WaveformDock(QtWidgets.QDockWidget): lambda: asyncio.ensure_future(exc_to_warning(coro()))) self._file_menu.addAction(action) - async def _add_channel_task(self): - dialog = _AddChannelDialog(self, self._channel_model) - fut = asyncio.Future() - - def on_accept(s): - fut.set_result(s) - dialog.accepted.connect(on_accept) - dialog.open() - channels = await fut + def _add_channels(self): + channels = self._add_channel_dialog.channels count = self._waveform_model.rowCount() self._waveform_model.extend(channels) self._waveform_model.update_data(self._waveform_data['data'], count, count + len(channels)) - def on_add_channel_click(self): - asyncio.ensure_future(self._add_channel_task()) - def on_dump_receive(self, dump): self._dump = dump decoded_dump = comm_analyzer.decode_dump(dump) From cda758ef53e2777d16ffd5b461f0367b5d0352d1 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 22 Feb 2024 16:55:39 +0800 Subject: [PATCH 144/296] docs: fix waveform imports --- artiq/dashboard/waveform.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index cd8529e39..45d04d9cc 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -6,7 +6,6 @@ import itertools import math from PyQt5 import QtCore, QtWidgets, QtGui -from PyQt5.QtCore import Qt import pyqtgraph as pg import numpy as np @@ -672,7 +671,7 @@ class Model(DictSyncTreeSepModel): class _AddChannelDialog(QtWidgets.QDialog): def __init__(self, parent, model): QtWidgets.QDialog.__init__(self, parent=parent) - self.setContextMenuPolicy(Qt.ActionsContextMenu) + self.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu) self.setWindowTitle("Add channels") layout = QtWidgets.QVBoxLayout() From c49600a2fcd86e77244e2f9eb6f2b225bb0a71a7 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 23 Feb 2024 10:18:11 +0800 Subject: [PATCH 145/296] docs: fix sampler, waveform --- artiq/coredevice/sampler.py | 2 +- doc/manual/conf.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/artiq/coredevice/sampler.py b/artiq/coredevice/sampler.py index 8f9997e20..f12cc16ac 100644 --- a/artiq/coredevice/sampler.py +++ b/artiq/coredevice/sampler.py @@ -21,7 +21,7 @@ def adc_mu_to_volt(data, gain=0, corrected_fs=True): :param data: 16 bit signed ADC word :param gain: PGIA gain setting (0: 1, ..., 3: 1000) :param corrected_fs: use corrected ADC FS reference. - Should be True for Samplers' revisions after v2.1. False for v2.1 and earlier. + Should be True for Samplers' revisions after v2.1. False for v2.1 and earlier. :return: Voltage in Volts """ if gain == 0: diff --git a/doc/manual/conf.py b/doc/manual/conf.py index 7e06ea680..af1f56e82 100644 --- a/doc/manual/conf.py +++ b/doc/manual/conf.py @@ -34,6 +34,7 @@ mock_modules = ["artiq.gui.waitingspinnerwidget", "artiq.gui.models", "artiq.compiler.module", "artiq.compiler.embedding", + "artiq.dashboard.waveform", "qasync", "pyqtgraph", "matplotlib", "numpy", "dateutil", "dateutil.parser", "prettytable", "PyQt5", "h5py", "serial", "scipy", "scipy.interpolate", From 4c142ec3f164bf5cae6ee609256796d1db01783e Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 19 Feb 2024 13:37:24 +0800 Subject: [PATCH 146/296] waveform: add reset zoom button --- artiq/dashboard/waveform.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 45d04d9cc..f97601501 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -488,6 +488,10 @@ class _WaveformView(QtWidgets.QWidget): for i in range(self._model.rowCount()): self._splitter.widget(i).setStoppedX(stopped_x) + def resetZoom(self): + if self._stopped_x is not None: + self._ref_vb.setRange(xRange=(0, self._stopped_x)) + def onDataChange(self, top, bottom, roles): self.cursorMove.emit(0) first = top.row() @@ -775,10 +779,18 @@ class WaveformDock(QtWidgets.QDockWidget): self._waveform_view.setModel(self._waveform_model) grid.addWidget(self._waveform_view, 1, 0, colspan=12) + self._reset_zoom_btn = QtWidgets.QToolButton() + self._reset_zoom_btn.setToolTip("Reset zoom") + self._reset_zoom_btn.setIcon( + QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_TitleBarMaxButton)) + self._reset_zoom_btn.clicked.connect(self._waveform_view.resetZoom) + grid.addWidget(self._reset_zoom_btn, 0, 3) + self._cursor_control = _CursorTimeControl(self) self._waveform_view.cursorMove.connect(self._cursor_control.setDisplayValue) self._cursor_control.submit.connect(self._waveform_view.onCursorMove) - grid.addWidget(self._cursor_control, 0, 3, colspan=6) + grid.addWidget(self._cursor_control, 0, 4, colspan=6) def _add_async_action(self, label, coro): action = QtWidgets.QAction(label, self) From 20d7604f873e307c226efbecb400181b357f8c43 Mon Sep 17 00:00:00 2001 From: Charles Baynham Date: Thu, 25 Jan 2024 13:54:25 +0000 Subject: [PATCH 147/296] grabber: Add optional timeout for getting grabber data Signed-off-by: Charles Baynham --- RELEASE_NOTES.rst | 1 + artiq/coredevice/grabber.py | 25 ++++++++++++++++++++----- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index 40fec5f56..ed9ca7ca5 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -46,6 +46,7 @@ Highlights: * MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to support legacy installations, but may be removed in a future release. * Experiments can now be submitted with revisions set to a branch / tag name instead of only git hashes. +* Grabber image input now has an optional timeout Breaking changes: diff --git a/artiq/coredevice/grabber.py b/artiq/coredevice/grabber.py index 518a5b12a..c17486514 100644 --- a/artiq/coredevice/grabber.py +++ b/artiq/coredevice/grabber.py @@ -2,7 +2,7 @@ from numpy import int32, int64 from artiq.language.core import * from artiq.language.types import * -from artiq.coredevice.rtio import rtio_output, rtio_input_data +from artiq.coredevice.rtio import rtio_output, rtio_input_timestamped_data class OutOfSyncException(Exception): @@ -11,6 +11,11 @@ class OutOfSyncException(Exception): pass +class GrabberTimeoutException(Exception): + """Raised when a timeout occurs while attempting to read Grabber RTIO input events.""" + pass + + class Grabber: """Driver for the Grabber camera interface.""" kernel_invariants = {"core", "channel_base", "sentinel"} @@ -82,10 +87,10 @@ class Grabber: self.gate_roi(0) @kernel - def input_mu(self, data): + def input_mu(self, data, timeout_mu=-1): """ Retrieves the accumulated values for one frame from the ROI engines. - Blocks until values are available. + Blocks until values are available or timeout is reached. The input list must be a list of integers of the same length as there are enabled ROI engines. This method replaces the elements of the @@ -95,15 +100,25 @@ class Grabber: If the number of elements in the list does not match the number of ROI engines that produced output, an exception will be raised during this call or the next. + + If the timeout is reached before data is available, a RuntimeError is + raised. + + :param timeout_mu: Timestamp at which a timeout will occur. Set to -1 + (default) to disable timeout. """ channel = self.channel_base + 1 - sentinel = rtio_input_data(channel) + timestamp, sentinel = rtio_input_timestamped_data(timeout_mu, channel) + if timestamp == -1: + raise GrabberTimeoutException("Timeout before Grabber frame available") if sentinel != self.sentinel: raise OutOfSyncException for i in range(len(data)): - roi_output = rtio_input_data(channel) + timestamp, roi_output = rtio_input_timestamped_data(timeout_mu, channel) if roi_output == self.sentinel: raise OutOfSyncException + if timestamp == -1: + raise GrabberTimeoutException("Timeout retrieving ROIs") data[i] = roi_output From 716d0f556d0f5231cf35af961e1317819c9d8e60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Mon, 26 Feb 2024 11:03:59 +0800 Subject: [PATCH 148/296] grabber: timeout fixes --- RELEASE_NOTES.rst | 2 +- artiq/coredevice/grabber.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index ed9ca7ca5..04bb5567d 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -46,7 +46,7 @@ Highlights: * MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to support legacy installations, but may be removed in a future release. * Experiments can now be submitted with revisions set to a branch / tag name instead of only git hashes. -* Grabber image input now has an optional timeout +* Grabber image input now has an optional timeout. Breaking changes: diff --git a/artiq/coredevice/grabber.py b/artiq/coredevice/grabber.py index c17486514..509e84b9a 100644 --- a/artiq/coredevice/grabber.py +++ b/artiq/coredevice/grabber.py @@ -101,8 +101,8 @@ class Grabber: ROI engines that produced output, an exception will be raised during this call or the next. - If the timeout is reached before data is available, a RuntimeError is - raised. + If the timeout is reached before data is available, the exception + GrabberTimeoutException is raised. :param timeout_mu: Timestamp at which a timeout will occur. Set to -1 (default) to disable timeout. @@ -120,5 +120,6 @@ class Grabber: if roi_output == self.sentinel: raise OutOfSyncException if timestamp == -1: - raise GrabberTimeoutException("Timeout retrieving ROIs") + raise GrabberTimeoutException( + "Timeout retrieving ROIs (attempting to read more ROIs than enabled?)") data[i] = roi_output From 7c1274f2542359c4dcd902e7cd4bd237bfb2875a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Mon, 26 Feb 2024 16:30:22 +0800 Subject: [PATCH 149/296] RELEASE_NOTES: Python 3.10 -> 3.11 --- RELEASE_NOTES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index 04bb5567d..77399000c 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -42,7 +42,7 @@ Highlights: * Persistent datasets are now stored in a LMDB database for improved performance. * Python's built-in types (such as ``float``, or ``List[...]``) can now be used in type annotations on kernel functions. -* Full Python 3.10 support. +* Full Python 3.11 support. * MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to support legacy installations, but may be removed in a future release. * Experiments can now be submitted with revisions set to a branch / tag name instead of only git hashes. From 0a24d72b9fd2e73ef1620547b143fd08901e8f52 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 21 Feb 2024 13:37:47 +0800 Subject: [PATCH 150/296] dashboard: change analyzer proxy client --- artiq/dashboard/waveform.py | 124 +++++++++++++----------------- artiq/frontend/artiq_dashboard.py | 19 ++++- 2 files changed, 67 insertions(+), 76 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index f97601501..ff796fed4 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -28,97 +28,82 @@ WAVEFORM_MIN_HEIGHT = 50 WAVEFORM_MAX_HEIGHT = 200 -class _BaseProxyClient: - def __init__(self): +class ProxyClient(): + def __init__(self, receive_cb, disconnect_cb): + self.receive_cb = receive_cb + self.disconnect_cb = disconnect_cb + self.receiver = None self.addr = None + self.port_proxy = None self.port = None self._reconnect_event = asyncio.Event() self._reconnect_task = None - async def start(self): + async def start(self, timeout=5, timer=5, timer_backoff=1.1): + self.timeout = timeout + self.timer = timer + self.timer_cur = timer + self.timer_backoff = timer_backoff self._reconnect_task = asyncio.create_task(self._reconnect()) - def update_address(self, addr, port): + def update_address(self, addr, port, port_proxy): self.addr = addr self.port = port - self._reconnect_event.set() + self.port_proxy = port_proxy + self.reconnect() + + async def trigger_proxy_task(self): + remote = AsyncioClient() + try: + try: + await remote.connect_rpc(self.addr, self.port, "coreanalyzer_proxy_control") + except: + logger.error("error connecting to analyzer proxy control", exc_info=True) + return + await remote.trigger() + except: + logger.error("analyzer proxy reported failure", exc_info=True) + finally: + remote.close_rpc() async def _reconnect(self): try: while True: await self._reconnect_event.wait() self._reconnect_event.clear() + if self.receiver is not None: + await self.receiver.close() + self.receiver = None + self.receiver = comm_analyzer.AnalyzerProxyReceiver( + self.receive_cb, self.disconnect_cb) try: - await asyncio.wait_for(self.reconnect_cr(), timeout=5) - except asyncio.CancelledError: - raise + await asyncio.wait_for(self.receiver.connect(self.addr, self.port_proxy), + self.timeout) + logger.info("connected to analyzer proxy %s:%d", self.addr, self.port_proxy) + self.timer_cur = self.timer + continue except: - await asyncio.sleep(5) + logger.error("error connecting to analyzer proxy", exc_info=True) + try: + await asyncio.wait_for(self._reconnect_event.wait(), self.timer_cur) + except asyncio.TimeoutError: + self.timer_cur *= self.timer_backoff self._reconnect_event.set() + else: + self.timer_cur = self.timer except asyncio.CancelledError: pass async def close(self): self._reconnect_task.cancel() - await self.close_cr() - - def reconnect(self): - self._reconnect_event.set() - - async def reconnect_cr(self): - raise NotImplementedError - - async def close_cr(self): - raise NotImplementedError - - -class RPCProxyClient(_BaseProxyClient): - def __init__(self): - _BaseProxyClient.__init__(self) - self.client = AsyncioClient() - - async def trigger_proxy_task(self): - try: - await self.client.trigger() - except: - logger.error("analyzer proxy reported failure", exc_info=True) - - async def reconnect_cr(self): - try: - await self.client.connect_rpc(self.addr, - self.port, - "coreanalyzer_proxy_control") - logger.info("connected to analyzer proxy control %s:%d", self.addr, self.port) - except: - logger.error("error connecting to analyzer proxy control", exc_info=True) - raise - - async def close_cr(self): - try: - self.client.close_rpc() - except: - logger.error("error closing connection with analyzer proxy control", exc_info=True) - - -class ReceiverProxyClient(_BaseProxyClient): - def __init__(self, receiver): - _BaseProxyClient.__init__(self) - self.receiver = receiver - - async def reconnect_cr(self): - try: - await self.receiver.connect(self.addr, self.port) - logger.info("listening to analyzer proxy %s:%d", self.addr, self.port) - except: - logger.error("error connecting to analyzer proxy", exc_info=True) - raise - - async def close_cr(self): try: await self.receiver.close() except: logger.error("error closing connection to analyzer proxy", exc_info=True) + def reconnect(self): + self._reconnect_event.set() + class _BackgroundItem(pg.GraphicsWidgetAnchor, pg.GraphicsWidget): def __init__(self, parent, rect): @@ -733,10 +718,7 @@ class WaveformDock(QtWidgets.QDockWidget): self._current_dir = os.getcwd() self.devices_sub = Subscriber("devices", self.init_ddb, self.update_ddb) - self.rpc_client = RPCProxyClient() - receiver = comm_analyzer.AnalyzerProxyReceiver( - self.on_dump_receive, self.on_proxy_disconnect) - self.receiver_client = ReceiverProxyClient(receiver) + self.proxy_client = ProxyClient(self.on_dump_receive, self.on_proxy_disconnect) grid = LayoutWidget() self.setWidget(grid) @@ -753,7 +735,7 @@ class WaveformDock(QtWidgets.QDockWidget): QtWidgets.QApplication.style().standardIcon( QtWidgets.QStyle.SP_BrowserReload)) self._request_dump_btn.clicked.connect( - lambda: asyncio.ensure_future(exc_to_warning(self.rpc_client.trigger_proxy_task()))) + lambda: asyncio.ensure_future(exc_to_warning(self.proxy_client.trigger_proxy_task()))) grid.addWidget(self._request_dump_btn, 0, 1) self._add_channel_dialog = _AddChannelDialog(self, self._channel_model) @@ -818,8 +800,7 @@ class WaveformDock(QtWidgets.QDockWidget): self._cursor_control.setTimescale(self._waveform_data['timescale']) def on_proxy_disconnect(self): - self.receiver_client.reconnect() - self.rpc_client.reconnect() + self.proxy_client.reconnect() async def load_trace(self): try: @@ -921,8 +902,7 @@ class WaveformDock(QtWidgets.QDockWidget): addr = desc["host"] port_proxy = desc.get("port_proxy", 1385) port = desc.get("port", 1386) - self.receiver_client.update_address(addr, port_proxy) - self.rpc_client.update_address(addr, port) + self.proxy_client.update_address(addr, port, port_proxy) def init_ddb(self, ddb): self._ddb = ddb diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 17ec0527d..f6a1c4504 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -48,6 +48,15 @@ def get_argparser(): parser.add_argument( "-p", "--load-plugin", dest="plugin_modules", action="append", help="Python module to load on startup") + parser.add_argument( + "--analyzer-proxy-timeout", default=5, type=float, + help="connection timeout to core analyzer proxy") + parser.add_argument( + "--analyzer-proxy-timer", default=5, type=float, + help="retry timer to core analyzer proxy") + parser.add_argument( + "--analyzer-proxy-timer-backoff", default=1.1, type=float, + help="retry timer backoff multiplier to core analyzer proxy") common_args.verbosity_args(parser) return parser @@ -221,12 +230,14 @@ def main(): atexit_register_coroutine(d_ttl_dds.stop, loop=loop) d_waveform = waveform.WaveformDock() + loop.run_until_complete(d_waveform.proxy_client.start( + args.analyzer_proxy_timeout, + args.analyzer_proxy_timer, + args.analyzer_proxy_timer_backoff + )) + atexit_register_coroutine(d_waveform.proxy_client.close, loop=loop) loop.run_until_complete(d_waveform.devices_sub.connect(args.server, args.port_notify)) atexit_register_coroutine(d_waveform.devices_sub.close, loop=loop) - for name in ["rpc_client", "receiver_client"]: - client = getattr(d_waveform, name) - loop.run_until_complete(client.start()) - atexit_register_coroutine(client.close, loop=loop) d_schedule = schedule.ScheduleDock( rpc_clients["schedule"], sub_clients["schedule"]) From 750fdf89b3baf45ed76a6abb33c3d3112ea9463d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Mon, 26 Feb 2024 17:18:28 +0800 Subject: [PATCH 151/296] doc: get rid of confusing 'Extensions' wording --- doc/manual/management_system.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/management_system.rst b/doc/manual/management_system.rst index abb5c2d17..401e51a3e 100644 --- a/doc/manual/management_system.rst +++ b/doc/manual/management_system.rst @@ -157,7 +157,7 @@ Embedded applets should use `AppletRequestIPC` while standalone applets use `App Applet entry area ***************** -Extensions are provided to enable the use of argument widgets in applets through the `EntryArea` class. +Argument widgets can be used in applets through the `EntryArea` class. Below is a simple example code snippet using the `EntryArea` class: :: From b1424286077badc124f6dc01d67f1239c26f5b51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Mon, 26 Feb 2024 17:26:23 +0800 Subject: [PATCH 152/296] doc: remove incorrect and unneeded code comment --- doc/manual/management_system.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/manual/management_system.rst b/doc/manual/management_system.rst index 401e51a3e..440b27d9d 100644 --- a/doc/manual/management_system.rst +++ b/doc/manual/management_system.rst @@ -161,7 +161,6 @@ Argument widgets can be used in applets through the `EntryArea` class. Below is a simple example code snippet using the `EntryArea` class: :: - # Create the experiment area entry_area = EntryArea() # Create a new widget From a0450555e2896f1235f836de5d30d19f4f8c66d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Mon, 26 Feb 2024 19:29:31 +0800 Subject: [PATCH 153/296] RELEASE_NOTES: units in datasets --- RELEASE_NOTES.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index 77399000c..d8d5e3029 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -39,6 +39,8 @@ Highlights: - Hotkeys now organize experiment windows in the order they were last interacted with: + CTRL+SHIFT+T tiles experiment windows + CTRL+SHIFT+C cascades experiment windows +* Datasets can now be associated with units and scale factors, and displayed accordingly in the dashboard + including applets, like widgets such as ``NumberValue`` already did in earlier ARTIQ versions. * Persistent datasets are now stored in a LMDB database for improved performance. * Python's built-in types (such as ``float``, or ``List[...]``) can now be used in type annotations on kernel functions. From 7688f380b122603cfdcd2541a2ad6d8014e5d623 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Mon, 26 Feb 2024 19:30:31 +0800 Subject: [PATCH 154/296] environment, artiq_run: introduce interactive arguments --- RELEASE_NOTES.rst | 1 + artiq/frontend/artiq_run.py | 23 +++++++++++++++++++++-- artiq/language/environment.py | 31 +++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 2 deletions(-) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index d8d5e3029..fa801a11f 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -41,6 +41,7 @@ Highlights: + CTRL+SHIFT+C cascades experiment windows * Datasets can now be associated with units and scale factors, and displayed accordingly in the dashboard including applets, like widgets such as ``NumberValue`` already did in earlier ARTIQ versions. +* Experiments can now request arguments interactively from the user at any time. * Persistent datasets are now stored in a LMDB database for improved performance. * Python's built-in types (such as ``float``, or ``List[...]``) can now be used in type annotations on kernel functions. diff --git a/artiq/frontend/artiq_run.py b/artiq/frontend/artiq_run.py index ae3e5f1c7..19d83e787 100755 --- a/artiq/frontend/artiq_run.py +++ b/artiq/frontend/artiq_run.py @@ -13,7 +13,7 @@ import h5py from llvmlite import binding as llvm -from sipyco import common_args +from sipyco import common_args, pyon from artiq import __version__ as artiq_version from artiq.language.environment import EnvExperiment, ProcessArgumentManager @@ -166,9 +166,28 @@ def get_argparser(with_file=True): return parser +class ArgumentManager(ProcessArgumentManager): + def get_interactive(self, interactive_arglist): + result = dict() + for key, processor, group, tooltip in interactive_arglist: + success = False + while not success: + user_input = input("{}:{} (group={}, tooltip={}): ".format( + key, type(processor).__name__, group, tooltip)) + try: + user_input_deser = pyon.decode(user_input) + value = processor.process(user_input_deser) + except: + logger.error("failed to process user input, retrying", exc_info=True) + else: + success = True + result[key] = value + return result + + def _build_experiment(device_mgr, dataset_mgr, args): arguments = parse_arguments(args.arguments) - argument_mgr = ProcessArgumentManager(arguments) + argument_mgr = ArgumentManager(arguments) managers = (device_mgr, dataset_mgr, argument_mgr, {}) if hasattr(args, "file"): is_tar = tarfile.is_tarfile(args.file) diff --git a/artiq/language/environment.py b/artiq/language/environment.py index ba42b2791..797f1eeeb 100644 --- a/artiq/language/environment.py +++ b/artiq/language/environment.py @@ -1,5 +1,7 @@ from collections import OrderedDict from inspect import isclass +from contextlib import contextmanager +from types import SimpleNamespace from sipyco import pyon @@ -212,6 +214,9 @@ class TraceArgumentManager: self.requested_args[key] = processor, group, tooltip return None + def get_interactive(self, interactive_arglist): + raise NotImplementedError + class ProcessArgumentManager: def __init__(self, unprocessed_arguments): @@ -233,6 +238,10 @@ class ProcessArgumentManager: raise AttributeError("Supplied argument(s) not queried in experiment: " + ", ".join(unprocessed)) + def get_interactive(self, interactive_arglist): + raise NotImplementedError + + class HasEnvironment: """Provides methods to manage the environment of an experiment (arguments, devices, datasets).""" @@ -322,6 +331,28 @@ class HasEnvironment: kernel_invariants = getattr(self, "kernel_invariants", set()) self.kernel_invariants = kernel_invariants | {key} + @contextmanager + def interactive(self): + """Request arguments from the user interactively. + + This context manager returns a namespace object on which the method + `setattr_argument` should be called, with the usual semantics. + + When the context manager terminates, the experiment is blocked + and the user is presented with the requested argument widgets. + After the user enters values, the experiment is resumed and + the namespace contains the values of the arguments.""" + interactive_arglist = [] + namespace = SimpleNamespace() + def setattr_argument(key, processor=None, group=None, tooltip=None): + interactive_arglist.append((key, processor, group, tooltip)) + namespace.setattr_argument = setattr_argument + yield namespace + del namespace.setattr_argument + argdict = self.__argument_mgr.get_interactive(interactive_arglist) + for key, value in argdict.items(): + setattr(namespace, key, value) + def get_device_db(self): """Returns the full contents of the device database.""" return self.__device_mgr.get_device_db() From a8a5fc213b2a08fb65506050f6e7fdb0cf248ee6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Tue, 27 Feb 2024 10:39:37 +0800 Subject: [PATCH 155/296] worker_impl: style fixes (NFC) --- artiq/master/worker_impl.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/artiq/master/worker_impl.py b/artiq/master/worker_impl.py index ef6e5ab16..d4b9893a6 100644 --- a/artiq/master/worker_impl.py +++ b/artiq/master/worker_impl.py @@ -200,9 +200,7 @@ def examine(device_mgr, dataset_mgr, file): name = name[:-1] argument_mgr = TraceArgumentManager() scheduler_defaults = {} - cls = exp_class( # noqa: F841 (fill argument_mgr) - (device_mgr, dataset_mgr, argument_mgr, scheduler_defaults) - ) + exp_class((device_mgr, dataset_mgr, argument_mgr, scheduler_defaults)) arginfo = OrderedDict( (k, (proc.describe(), group, tooltip)) for k, (proc, group, tooltip) in argument_mgr.requested_args.items() @@ -325,8 +323,8 @@ def main(): rid, obj["pipeline_name"], expid, obj["priority"]) start_local_time = time.localtime(start_time) dirname = os.path.join("results", - time.strftime("%Y-%m-%d", start_local_time), - time.strftime("%H", start_local_time)) + time.strftime("%Y-%m-%d", start_local_time), + time.strftime("%H", start_local_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) From b2ba087acdf362202d6489030bde35264d8abafb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Tue, 27 Feb 2024 11:09:37 +0800 Subject: [PATCH 156/296] dashboard: do not use float64. Closes #2347 --- artiq/dashboard/datasets.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/artiq/dashboard/datasets.py b/artiq/dashboard/datasets.py index 97146bb33..f7c996849 100644 --- a/artiq/dashboard/datasets.py +++ b/artiq/dashboard/datasets.py @@ -81,8 +81,7 @@ class CreateEditDialog(QtWidgets.QDialog): t = value.dtype if value is np.ndarray else type(value) if scale != 1 and np.issubdtype(t, np.number): # degenerates to float type - value_edit_string = self.value_to_edit_string( - np.float64(value / scale)) + value_edit_string = self.value_to_edit_string(value / scale) self.unit_widget.setText(metadata.get('unit', '')) self.scale_widget.setText(str(metadata.get('scale', ''))) self.precision_widget.setText(str(metadata.get('precision', ''))) @@ -109,7 +108,7 @@ class CreateEditDialog(QtWidgets.QDialog): t = value.dtype if value is np.ndarray else type(value) if scale != 1 and np.issubdtype(t, np.number): # degenerates to float type - value = np.float64(value * scale) + value = float(value * scale) if self.key and self.key != key: asyncio.ensure_future(exc_to_warning(rename(self.key, key, value, metadata, persist, self.dataset_ctl))) else: From bafa69098a182a764119ba6633ad7e40a5009b38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Tue, 27 Feb 2024 11:10:21 +0800 Subject: [PATCH 157/296] style/doc fixes (NFC) --- artiq/frontend/artiq_master.py | 14 +++++++------- artiq/frontend/artiq_run.py | 3 ++- artiq/master/scheduler.py | 4 ++-- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/artiq/frontend/artiq_master.py b/artiq/frontend/artiq_master.py index f61d6cf52..3e5d4dd11 100755 --- a/artiq/frontend/artiq_master.py +++ b/artiq/frontend/artiq_master.py @@ -57,10 +57,10 @@ def get_argparser(): log_args(parser) parser.add_argument("--name", - help="friendly name, displayed in dashboards " - "to identify master instead of server address") - parser.add_argument("--log-submissions", default=None, - help="set the filename to create the experiment subimission") + help="friendly name, displayed in dashboards " + "to identify master instead of server address") + parser.add_argument("--log-submissions", default=None, + help="log experiment submissions to specified file") return parser @@ -81,8 +81,7 @@ def main(): bind, args.port_broadcast)) atexit_register_coroutine(server_broadcast.stop, loop=loop) - log_forwarder.callback = (lambda msg: - server_broadcast.broadcast("log", msg)) + log_forwarder.callback = lambda msg: server_broadcast.broadcast("log", msg) def ccb_issue(service, *args, **kwargs): msg = { "service": service, @@ -106,7 +105,8 @@ def main(): repo_backend, worker_handlers, args.experiment_subdir) atexit.register(experiment_db.close) - scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db, args.log_submissions) + scheduler = Scheduler(RIDCounter(), worker_handlers, experiment_db, + args.log_submissions) scheduler.start(loop=loop) atexit_register_coroutine(scheduler.stop, loop=loop) diff --git a/artiq/frontend/artiq_run.py b/artiq/frontend/artiq_run.py index 19d83e787..d46bad022 100755 --- a/artiq/frontend/artiq_run.py +++ b/artiq/frontend/artiq_run.py @@ -178,7 +178,8 @@ class ArgumentManager(ProcessArgumentManager): user_input_deser = pyon.decode(user_input) value = processor.process(user_input_deser) except: - logger.error("failed to process user input, retrying", exc_info=True) + logger.error("failed to process user input, retrying", + exc_info=True) else: success = True result[key] = value diff --git a/artiq/master/scheduler.py b/artiq/master/scheduler.py index 451e5c84c..7d4179a63 100644 --- a/artiq/master/scheduler.py +++ b/artiq/master/scheduler.py @@ -156,7 +156,7 @@ class RunPool: run = Run(rid, pipeline_name, wd, expid, priority, due_date, flush, self, repo_msg=repo_msg) - if self.log_submissions is not None: + if self.log_submissions is not None: self.log_submission(rid, expid) self.runs[rid] = run self.state_changed.notify() @@ -514,4 +514,4 @@ class Scheduler: if run.termination_requested: return True return False - \ No newline at end of file + From c794e51c1c9f83d2fd6a06ec52a92a68b0355c7f Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 26 Feb 2024 13:01:16 +0800 Subject: [PATCH 158/296] waveform: fix log msg display --- artiq/dashboard/waveform.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index ff796fed4..34b855727 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -371,9 +371,11 @@ class LogWaveform(_BaseWaveform): self._labels = [] self.plot_data_item.setData( x=self.x_data, y=np.ones(len(self.x_data))) - old_msg = "" - old_x = 0 - for x, msg in data: + if len(data) == 0: + return + old_x = data[0][0] + old_msg = data[0][1] + for x, msg in data[1:]: if x == old_x: old_msg += "\n" + msg else: From c151f0c3ce82b0e0172221a6dc54df9781f6c324 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 26 Feb 2024 12:49:47 +0800 Subject: [PATCH 159/296] waveform: remove unused setTimescale --- artiq/dashboard/waveform.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 34b855727..40ec948ae 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -180,9 +180,6 @@ class _BaseWaveform(pg.PlotWidget): self.stopped_x = stopped_x self.view_box.setLimits(xMax=stopped_x) - def setTimescale(self, timescale): - self.timescale = timescale - def setData(self, data): if len(data) == 0: self.x_data, self.y_data = [], [] @@ -465,8 +462,6 @@ class _WaveformView(QtWidgets.QWidget): def setTimescale(self, timescale): self._timescale = timescale self._top.setScale(1e-12 * timescale) - for i in range(self._model.rowCount()): - self._splitter.widget(i).setTimescale(timescale) def setStoppedX(self, stopped_x): self._stopped_x = stopped_x @@ -522,7 +517,6 @@ class _WaveformView(QtWidgets.QWidget): w = waveform_cls(name, width, precision, unit, parent=self._splitter) w.setXLink(self._ref_vb) w.setStoppedX(self._stopped_x) - w.setTimescale(self._timescale) w.cursorMove.connect(self.cursorMove) w.onCursorMove(self._cursor_x) action = QtWidgets.QAction("Delete waveform", w) From a21805598aa9bcff807631db1f9128def720c925 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 27 Feb 2024 10:37:00 +0800 Subject: [PATCH 160/296] dashboard fix moninj, analyzer clients --- artiq/coredevice/comm_analyzer.py | 2 +- artiq/dashboard/waveform.py | 42 +++++++++++++++++-------------- artiq/frontend/artiq_dashboard.py | 5 ++-- 3 files changed, 26 insertions(+), 23 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index 9ac0c8010..ea4e8757a 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -168,7 +168,7 @@ class AnalyzerProxyReceiver: del self.writer raise - async def close(self): + def close(self): self.disconnect_cb = None if self.receive_task is not None: self.receive_task.cancel() diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 40ec948ae..718321f4a 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -29,22 +29,18 @@ WAVEFORM_MAX_HEIGHT = 200 class ProxyClient(): - def __init__(self, receive_cb, disconnect_cb): + def __init__(self, receive_cb, timeout=5, timer=5, timer_backoff=1.1): self.receive_cb = receive_cb - self.disconnect_cb = disconnect_cb self.receiver = None self.addr = None self.port_proxy = None self.port = None self._reconnect_event = asyncio.Event() - self._reconnect_task = None - - async def start(self, timeout=5, timer=5, timer_backoff=1.1): self.timeout = timeout self.timer = timer self.timer_cur = timer self.timer_backoff = timer_backoff - self._reconnect_task = asyncio.create_task(self._reconnect()) + self._reconnect_task = asyncio.ensure_future(self._reconnect()) def update_address(self, addr, port, port_proxy): self.addr = addr @@ -56,6 +52,9 @@ class ProxyClient(): remote = AsyncioClient() try: try: + if self.addr is None: + logger.error("missing core_analyzer host in device db") + return await remote.connect_rpc(self.addr, self.port, "coreanalyzer_proxy_control") except: logger.error("error connecting to analyzer proxy control", exc_info=True) @@ -72,14 +71,15 @@ class ProxyClient(): await self._reconnect_event.wait() self._reconnect_event.clear() if self.receiver is not None: - await self.receiver.close() + self.receiver.close() self.receiver = None self.receiver = comm_analyzer.AnalyzerProxyReceiver( - self.receive_cb, self.disconnect_cb) + self.receive_cb, self.reconnect) try: - await asyncio.wait_for(self.receiver.connect(self.addr, self.port_proxy), - self.timeout) - logger.info("connected to analyzer proxy %s:%d", self.addr, self.port_proxy) + if self.addr is not None: + await asyncio.wait_for(self.receiver.connect(self.addr, self.port_proxy), + self.timeout) + logger.info("connected to analyzer proxy %s:%d", self.addr, self.port_proxy) self.timer_cur = self.timer continue except: @@ -97,9 +97,11 @@ class ProxyClient(): async def close(self): self._reconnect_task.cancel() try: - await self.receiver.close() - except: - logger.error("error closing connection to analyzer proxy", exc_info=True) + await asyncio.wait_for(self._reconnect_task, None) + except asyncio.CancelledError: + pass + if self.receiver is not None: + self.receiver.close() def reconnect(self): self._reconnect_event.set() @@ -692,7 +694,7 @@ class _AddChannelDialog(QtWidgets.QDialog): class WaveformDock(QtWidgets.QDockWidget): - def __init__(self): + def __init__(self, timeout, timer, timer_backoff): QtWidgets.QDockWidget.__init__(self, "Waveform") self.setObjectName("Waveform") self.setFeatures( @@ -714,7 +716,10 @@ class WaveformDock(QtWidgets.QDockWidget): self._current_dir = os.getcwd() self.devices_sub = Subscriber("devices", self.init_ddb, self.update_ddb) - self.proxy_client = ProxyClient(self.on_dump_receive, self.on_proxy_disconnect) + self.proxy_client = ProxyClient(self.on_dump_receive, + timeout, + timer, + timer_backoff) grid = LayoutWidget() self.setWidget(grid) @@ -795,9 +800,6 @@ class WaveformDock(QtWidgets.QDockWidget): self._waveform_view.setTimescale(self._waveform_data['timescale']) self._cursor_control.setTimescale(self._waveform_data['timescale']) - def on_proxy_disconnect(self): - self.proxy_client.reconnect() - async def load_trace(self): try: filename = await get_open_file_name( @@ -899,6 +901,8 @@ class WaveformDock(QtWidgets.QDockWidget): port_proxy = desc.get("port_proxy", 1385) port = desc.get("port", 1386) self.proxy_client.update_address(addr, port, port_proxy) + else: + self.proxy_client.update_address(None, None, None) def init_ddb(self, ddb): self._ddb = ddb diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index f6a1c4504..f906240a3 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -229,12 +229,11 @@ def main(): loop.run_until_complete(d_ttl_dds.start(args.server, args.port_notify)) atexit_register_coroutine(d_ttl_dds.stop, loop=loop) - d_waveform = waveform.WaveformDock() - loop.run_until_complete(d_waveform.proxy_client.start( + d_waveform = waveform.WaveformDock( args.analyzer_proxy_timeout, args.analyzer_proxy_timer, args.analyzer_proxy_timer_backoff - )) + ) atexit_register_coroutine(d_waveform.proxy_client.close, loop=loop) loop.run_until_complete(d_waveform.devices_sub.connect(args.server, args.port_notify)) atexit_register_coroutine(d_waveform.devices_sub.close, loop=loop) From 5e016614435974e4dc31061d5bdbd425115969d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Tue, 27 Feb 2024 11:21:19 +0800 Subject: [PATCH 161/296] master.databases: style (NFC) --- artiq/master/databases.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/artiq/master/databases.py b/artiq/master/databases.py index ccdd6d022..a24085446 100644 --- a/artiq/master/databases.py +++ b/artiq/master/databases.py @@ -2,7 +2,8 @@ import asyncio import lmdb -from sipyco.sync_struct import Notifier, process_mod, ModAction, update_from_dict +from sipyco.sync_struct import (Notifier, process_mod, ModAction, + update_from_dict) from sipyco import pyon from sipyco.asyncio_tools import TaskObject @@ -60,12 +61,13 @@ class DatasetDB(TaskObject): def save(self): with self.lmdb.begin(write=True) as txn: for key in self.pending_keys: - if key not in self.data.raw_view or not self.data.raw_view[key][0]: + if (key not in self.data.raw_view + or not self.data.raw_view[key][0]): txn.delete(key.encode()) else: value_and_metadata = (self.data.raw_view[key][1], self.data.raw_view[key][2]) - txn.put(key.encode(), + txn.put(key.encode(), pyon.encode(value_and_metadata).encode()) self.pending_keys.clear() @@ -87,7 +89,8 @@ class DatasetDB(TaskObject): if mod["path"]: key = mod["path"][0] else: - assert(mod["action"] == ModAction.setitem.value or mod["action"] == ModAction.delitem.value) + assert (mod["action"] == ModAction.setitem.value + or mod["action"] == ModAction.delitem.value) key = mod["key"] self.pending_keys.add(key) process_mod(self.data, mod) From 3609f95207ab19878d4717174516aada742641f9 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Tue, 27 Feb 2024 11:57:15 +0800 Subject: [PATCH 162/296] flake: add new lmdb mock module for artiq-manual Signed-off-by: Florian Agbuya --- doc/manual/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/conf.py b/doc/manual/conf.py index af1f56e82..33032b259 100644 --- a/doc/manual/conf.py +++ b/doc/manual/conf.py @@ -35,7 +35,7 @@ mock_modules = ["artiq.gui.waitingspinnerwidget", "artiq.compiler.module", "artiq.compiler.embedding", "artiq.dashboard.waveform", - "qasync", "pyqtgraph", "matplotlib", + "qasync", "pyqtgraph", "matplotlib", "lmdb", "numpy", "dateutil", "dateutil.parser", "prettytable", "PyQt5", "h5py", "serial", "scipy", "scipy.interpolate", "llvmlite", "Levenshtein", "pythonparser", From 92eb3947a40c7b2bd3c31a649e3ad1c95667c5fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Tue, 27 Feb 2024 15:20:48 +0800 Subject: [PATCH 163/296] master: shorten RPC target names --- artiq/applets/simple.py | 2 +- artiq/browser/datasets.py | 2 +- artiq/examples/artiq_ipython_notebook.ipynb | 2 +- artiq/frontend/artiq_client.py | 14 +++++++------- artiq/frontend/artiq_dashboard.py | 2 +- artiq/frontend/artiq_master.py | 8 ++++---- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/artiq/applets/simple.py b/artiq/applets/simple.py index cd980bb34..a77496081 100644 --- a/artiq/applets/simple.py +++ b/artiq/applets/simple.py @@ -255,7 +255,7 @@ class SimpleApplet: if self.embed is None: dataset_ctl = RPCClient() self.loop.run_until_complete(dataset_ctl.connect_rpc( - self.args.server, self.args.port_control, "master_dataset_db")) + self.args.server, self.args.port_control, "dataset_db")) self.req = AppletRequestRPC(self.loop, dataset_ctl) else: self.req = AppletRequestIPC(self.ipc) diff --git a/artiq/browser/datasets.py b/artiq/browser/datasets.py index 815f7acbf..6e5473786 100644 --- a/artiq/browser/datasets.py +++ b/artiq/browser/datasets.py @@ -33,7 +33,7 @@ class DatasetCtl: try: remote = RPCClient() await remote.connect_rpc(self.master_host, self.master_port, - "master_dataset_db") + "dataset_db") try: if op_name == "set": await remote.set(key_or_mod, value, persist, metadata) diff --git a/artiq/examples/artiq_ipython_notebook.ipynb b/artiq/examples/artiq_ipython_notebook.ipynb index 7964cd5a4..5f988b3b9 100644 --- a/artiq/examples/artiq_ipython_notebook.ipynb +++ b/artiq/examples/artiq_ipython_notebook.ipynb @@ -127,7 +127,7 @@ "# let's connect to the master\n", "\n", "schedule, exps, datasets = [\n", - " Client(\"::1\", 3251, \"master_\" + i) for i in\n", + " Client(\"::1\", 3251, i) for i in\n", " \"schedule experiment_db dataset_db\".split()]\n", "\n", "print(\"current schedule\")\n", diff --git a/artiq/frontend/artiq_client.py b/artiq/frontend/artiq_client.py index d945f6853..a83cfd774 100755 --- a/artiq/frontend/artiq_client.py +++ b/artiq/frontend/artiq_client.py @@ -343,13 +343,13 @@ def main(): else: port = 3251 if args.port is None else args.port target_name = { - "submit": "master_schedule", - "delete": "master_schedule", - "set_dataset": "master_dataset_db", - "del_dataset": "master_dataset_db", - "scan_devices": "master_device_db", - "scan_repository": "master_experiment_db", - "ls": "master_experiment_db", + "submit": "schedule", + "delete": "schedule", + "set_dataset": "dataset_db", + "del_dataset": "dataset_db", + "scan_devices": "device_db", + "scan_repository": "experiment_db", + "ls": "experiment_db", "terminate": "master_management", }[action] remote = Client(args.server, port, target_name) diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index f906240a3..e9c6269d3 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -143,7 +143,7 @@ def main(): for target in "schedule", "experiment_db", "dataset_db", "device_db": client = AsyncioClient() loop.run_until_complete(client.connect_rpc( - args.server, args.port_control, "master_" + target)) + args.server, args.port_control, target)) atexit.register(client.close_rpc) rpc_clients[target] = client diff --git a/artiq/frontend/artiq_master.py b/artiq/frontend/artiq_master.py index 3e5d4dd11..074d50e48 100755 --- a/artiq/frontend/artiq_master.py +++ b/artiq/frontend/artiq_master.py @@ -133,10 +133,10 @@ def main(): server_control = RPCServer({ "master_management": master_management, - "master_device_db": device_db, - "master_dataset_db": dataset_db, - "master_schedule": scheduler, - "master_experiment_db": experiment_db, + "device_db": device_db, + "dataset_db": dataset_db, + "schedule": scheduler, + "experiment_db": experiment_db, }, allow_parallel=True) loop.run_until_complete(server_control.start( bind, args.port_control)) From 002325be176ec638d7c010435ceadba528e422e7 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 27 Feb 2024 13:09:22 +0800 Subject: [PATCH 164/296] applets: rename params --- artiq/applets/simple.py | 12 +++---- artiq/gui/applets.py | 72 ++++++++++++++++++++--------------------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/artiq/applets/simple.py b/artiq/applets/simple.py index a77496081..fcc5a4217 100644 --- a/artiq/applets/simple.py +++ b/artiq/applets/simple.py @@ -42,13 +42,13 @@ class _AppletRequestInterface: """ raise NotImplementedError - def set_argument_value(self, expurl, name, value): + def set_argument_value(self, expurl, key, value): """ Temporarily set the value of an argument in a experiment in the dashboard. The value resets to default value when recomputing the argument. :param expurl: Experiment URL identifying the experiment in the dashboard. Example: 'repo:ArgumentsDemo'. - :param name: Name of the argument in the experiment. + :param key: Name of the argument in the experiment. :param value: Object representing the new temporary value of the argument. For ``Scannable`` arguments, this parameter should be a ``ScanObject``. The type of the ``ScanObject`` will be set as the selected type when this function is called. """ @@ -77,10 +77,10 @@ class AppletRequestIPC(_AppletRequestInterface): mod = {"action": "append", "path": [key, 1], "x": value} self.ipc.update_dataset(mod) - def set_argument_value(self, expurl, name, value): + def set_argument_value(self, expurl, key, value): if isinstance(value, ScanObject): value = value.describe() - self.ipc.set_argument_value(expurl, name, value) + self.ipc.set_argument_value(expurl, key, value) class AppletRequestRPC(_AppletRequestInterface): @@ -182,10 +182,10 @@ class AppletIPCClient(AsyncioChildComm): self.write_pyon({"action": "update_dataset", "mod": mod}) - def set_argument_value(self, expurl, name, value): + def set_argument_value(self, expurl, key, value): self.write_pyon({"action": "set_argument_value", "expurl": expurl, - "name": name, + "key": key, "value": value}) diff --git a/artiq/gui/applets.py b/artiq/gui/applets.py index 411075ec6..de282c846 100644 --- a/artiq/gui/applets.py +++ b/artiq/gui/applets.py @@ -67,19 +67,19 @@ class EntryArea(QtWidgets.QTreeWidget): self.setItemWidget(self.bottom_item, 1, buttons) self.bottom_item.setHidden(True) - def setattr_argument(self, name, proc, group=None, tooltip=None): + def setattr_argument(self, key, processor, group=None, tooltip=None): argument = dict() - desc = proc.describe() + desc = processor.describe() argument["desc"] = desc argument["group"] = group argument["tooltip"] = tooltip - self._arguments[name] = argument + self._arguments[key] = argument widgets = dict() - self._arg_to_widgets[name] = widgets + self._arg_to_widgets[key] = widgets entry_class = procdesc_to_entry(argument["desc"]) argument["state"] = entry_class.default_state(argument["desc"]) entry = entry_class(argument) - widget_item = QtWidgets.QTreeWidgetItem([name]) + widget_item = QtWidgets.QTreeWidgetItem([key]) if argument["tooltip"]: widget_item.setToolTip(0, argument["tooltip"]) widgets["entry"] = entry @@ -109,16 +109,16 @@ class EntryArea(QtWidgets.QTreeWidget): reset_value.setIcon( QtWidgets.QApplication.style().standardIcon( QtWidgets.QStyle.SP_BrowserReload)) - reset_value.clicked.connect(partial(self.reset_value, name)) + reset_value.clicked.connect(partial(self.reset_value, key)) tool_buttons = LayoutWidget() tool_buttons.addWidget(reset_value, 0) self.setItemWidget(widget_item, 2, tool_buttons) - def _get_group(self, name): - if name in self._groups: - return self._groups[name] - group = QtWidgets.QTreeWidgetItem([name]) + def _get_group(self, key): + if key in self._groups: + return self._groups[key] + group = QtWidgets.QTreeWidgetItem([key]) for col in range(3): group.setBackground(col, self.palette().mid()) group.setForeground(col, self.palette().brightText()) @@ -126,40 +126,40 @@ class EntryArea(QtWidgets.QTreeWidget): font.setBold(True) group.setFont(col, font) self.insertTopLevelItem(self.indexFromItem(self.bottom_item).row(), group) - self._groups[name] = group + self._groups[key] = group return group - def __getattr__(self, name): - return self.get_value(name) + def __getattr__(self, key): + return self.get_value(key) - def get_value(self, name): - entry = self._arg_to_widgets[name]["entry"] - argument = self._arguments[name] + def get_value(self, key): + entry = self._arg_to_widgets[key]["entry"] + argument = self._arguments[key] return entry.state_to_value(argument["state"]) - def set_value(self, name, value): - ty = self._arguments[name]["desc"]["ty"] + def set_value(self, key, value): + ty = self._arguments[key]["desc"]["ty"] if ty == "Scannable": desc = value.describe() - self._arguments[name]["state"][desc["ty"]] = desc - self._arguments[name]["state"]["selected"] = desc["ty"] + self._arguments[key]["state"][desc["ty"]] = desc + self._arguments[key]["state"]["selected"] = desc["ty"] else: - self._arguments[name]["state"] = value - self.update_value(name) + self._arguments[key]["state"] = value + self.update_value(key) def get_values(self): d = dict() - for name in self._arguments.keys(): - d[name] = self.get_value(name) + for key in self._arguments.keys(): + d[key] = self.get_value(key) return d def set_values(self, values): - for name, value in values.items(): - self.set_value(name, value) + for key, value in values.items(): + self.set_value(key, value) - def update_value(self, name): - widgets = self._arg_to_widgets[name] - argument = self._arguments[name] + def update_value(self, key): + widgets = self._arg_to_widgets[key] + argument = self._arguments[key] # Qt needs a setItemWidget() to handle layout correctly, # simply replacing the entry inside the LayoutWidget @@ -173,14 +173,14 @@ class EntryArea(QtWidgets.QTreeWidget): self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"]) self.updateGeometries() - def reset_value(self, name): - procdesc = self._arguments[name]["desc"] - self._arguments[name]["state"] = procdesc_to_entry(procdesc).default_state(procdesc) - self.update_value(name) + def reset_value(self, key): + procdesc = self._arguments[key]["desc"] + self._arguments[key]["state"] = procdesc_to_entry(procdesc).default_state(procdesc) + self.update_value(key) def reset_all(self): - for name in self._arguments.keys(): - self.reset_value(name) + for key in self._arguments.keys(): + self.reset_value(key) class AppletIPCServer(AsyncioParentComm): @@ -255,7 +255,7 @@ class AppletIPCServer(AsyncioParentComm): elif action == "update_dataset": await self.dataset_ctl.update(obj["mod"]) elif action == "set_argument_value": - self.expmgr.set_argument_value(obj["expurl"], obj["name"], obj["value"]) + self.expmgr.set_argument_value(obj["expurl"], obj["key"], obj["value"]) else: raise ValueError("unknown action in applet message") except: From 450fe91e938c1e15ce01ccaf23806cd118d6820a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Tue, 27 Feb 2024 15:46:23 +0800 Subject: [PATCH 165/296] artiq_client: handle Ctrl-C gracefully --- artiq/frontend/artiq_client.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/artiq/frontend/artiq_client.py b/artiq/frontend/artiq_client.py index a83cfd774..00f2db602 100755 --- a/artiq/frontend/artiq_client.py +++ b/artiq/frontend/artiq_client.py @@ -22,6 +22,7 @@ from sipyco.pc_rpc import Client from sipyco.sync_struct import Subscriber from sipyco.broadcast import Receiver from sipyco import common_args, pyon +from sipyco.asyncio_tools import SignalHandler from artiq.tools import (scale_from_metadata, short_format, parse_arguments, parse_devarg_override) @@ -278,13 +279,20 @@ def _run_subscriber(host, port, subscriber): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: - loop.run_until_complete(subscriber.connect(host, port)) + signal_handler = SignalHandler() + signal_handler.setup() try: - loop.run_until_complete(asyncio.wait_for(subscriber.receive_task, - None)) - print("Connection to master lost") + loop.run_until_complete(subscriber.connect(host, port)) + try: + _, pending = loop.run_until_complete(asyncio.wait( + [signal_handler.wait_terminate(), subscriber.receive_task], + return_when=asyncio.FIRST_COMPLETED)) + for task in pending: + task.cancel() + finally: + loop.run_until_complete(subscriber.close()) finally: - loop.run_until_complete(subscriber.close()) + signal_handler.teardown() finally: loop.close() From 42d3c3b4b2943f24cc9792350a391b273cd19686 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Tue, 27 Feb 2024 16:52:36 +0800 Subject: [PATCH 166/296] session: workaround for stream.close interrupted --- artiq/firmware/runtime/session.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 8c1a7aaee..52daaa2d0 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -976,7 +976,13 @@ pub fn thread(io: Io, aux_mutex: &Mutex, drtio::clear_buffers(&io, &aux_mutex); } } - stream.close().expect("session: close socket"); + loop { + match stream.close() { + Ok(_) => break, + Err(SchedError::Interrupted) => (), + Err(e) => panic!("session: close socket: {:?}", e) + }; + } }); } From de29db0b35b6841d1db372091fd7c2dc50235738 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Wed, 28 Feb 2024 11:49:33 +0800 Subject: [PATCH 167/296] master: implement interactive arguments Interaction with experiment termination (forceful and requested) still needs some work. --- artiq/frontend/artiq_master.py | 13 +++++++++++-- artiq/master/databases.py | 31 +++++++++++++++++++++++++++++++ artiq/master/worker.py | 8 +++++++- artiq/master/worker_impl.py | 14 +++++++++++++- 4 files changed, 62 insertions(+), 4 deletions(-) diff --git a/artiq/frontend/artiq_master.py b/artiq/frontend/artiq_master.py index 074d50e48..dfd22565b 100755 --- a/artiq/frontend/artiq_master.py +++ b/artiq/frontend/artiq_master.py @@ -15,7 +15,8 @@ from sipyco.asyncio_tools import atexit_register_coroutine, SignalHandler from artiq import __version__ as artiq_version from artiq.master.log import log_args, init_log -from artiq.master.databases import DeviceDB, DatasetDB +from artiq.master.databases import (DeviceDB, DatasetDB, + InteractiveArgDB) from artiq.master.scheduler import Scheduler from artiq.master.rid_counter import RIDCounter from artiq.master.experiments import (FilesystemBackend, GitBackend, @@ -95,6 +96,7 @@ def main(): atexit.register(dataset_db.close_db) dataset_db.start(loop=loop) atexit_register_coroutine(dataset_db.stop, loop=loop) + interactive_arg_db = InteractiveArgDB() worker_handlers = dict() if args.git: @@ -110,11 +112,16 @@ def main(): scheduler.start(loop=loop) atexit_register_coroutine(scheduler.stop, loop=loop) + # Python doesn't allow writing attributes to bound methods. + def get_interactive_arguments(*args, **kwargs): + return interactive_arg_db.get(*args, **kwargs) + get_interactive_arguments._worker_pass_rid = True worker_handlers.update({ "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, "update_dataset": dataset_db.update, + "get_interactive_arguments": get_interactive_arguments, "scheduler_submit": scheduler.submit, "scheduler_delete": scheduler.delete, "scheduler_request_termination": scheduler.request_termination, @@ -135,6 +142,7 @@ def main(): "master_management": master_management, "device_db": device_db, "dataset_db": dataset_db, + "interactive_arg_db": interactive_arg_db, "schedule": scheduler, "experiment_db": experiment_db, }, allow_parallel=True) @@ -146,8 +154,9 @@ def main(): "schedule": scheduler.notifier, "devices": device_db.data, "datasets": dataset_db.data, + "interactive_args": interactive_arg_db.pending, "explist": experiment_db.explist, - "explist_status": experiment_db.status + "explist_status": experiment_db.status, }) loop.run_until_complete(server_notify.start( bind, args.port_notify)) diff --git a/artiq/master/databases.py b/artiq/master/databases.py index a24085446..c34a01300 100644 --- a/artiq/master/databases.py +++ b/artiq/master/databases.py @@ -114,3 +114,34 @@ class DatasetDB(TaskObject): del self.data[key] self.pending_keys.add(key) # + + +class InteractiveArgDB: + def __init__(self): + self.pending = Notifier(dict()) + self.futures = dict() + + async def get(self, rid, arglist_desc): + self.pending[rid] = arglist_desc + self.futures[rid] = asyncio.get_running_loop().create_future() + try: + value = await self.futures[rid] + finally: + del self.pending[rid] + del self.futures[rid] + return value + + def supply(self, rid, values): + # quick sanity checks + if rid not in self.futures: + raise ValueError("no experiment with this RID is " + "waiting for interactive arguments") + if {i[0] for i in self.pending.raw_view[rid]} != set(values.keys()): + raise ValueError("supplied and requested keys do not match") + self.futures[rid].set_result(values) + + def cancel(self, rid): + if rid not in self.futures: + raise ValueError("no experiment with this RID is " + "waiting for interactive arguments") + self.futures[rid].cancel() diff --git a/artiq/master/worker.py b/artiq/master/worker.py index ca83dc7db..ddcf57802 100644 --- a/artiq/master/worker.py +++ b/artiq/master/worker.py @@ -226,7 +226,13 @@ class Worker: else: func = self.handlers[action] try: - data = func(*obj["args"], **obj["kwargs"]) + if getattr(func, "_worker_pass_rid", False): + args = [self.rid] + list(obj["args"]) + else: + args = obj["args"] + data = func(*args, **obj["kwargs"]) + if asyncio.iscoroutine(data): + data = await data reply = {"status": "ok", "data": data} except: reply = { diff --git a/artiq/master/worker_impl.py b/artiq/master/worker_impl.py index d4b9893a6..72e5326ad 100644 --- a/artiq/master/worker_impl.py +++ b/artiq/master/worker_impl.py @@ -215,6 +215,18 @@ def examine(device_mgr, dataset_mgr, file): del sys.modules[key] +class ArgumentManager(ProcessArgumentManager): + _get_interactive = make_parent_action("get_interactive_arguments") + + def get_interactive(self, interactive_arglist): + arglist_desc = [(k, p.describe(), g, t) + for k, p, g, t in interactive_arglist] + arguments = ArgumentManager._get_interactive(arglist_desc) + for key, processor, _, _ in interactive_arglist: + arguments[key] = processor.process(arguments[key]) + return arguments + + def setup_diagnostics(experiment_file, repository_path): def render_diagnostic(self, diagnostic): message = "While compiling {}\n".format(experiment_file) + \ @@ -327,7 +339,7 @@ def main(): time.strftime("%H", start_local_time)) os.makedirs(dirname, exist_ok=True) os.chdir(dirname) - argument_mgr = ProcessArgumentManager(expid["arguments"]) + argument_mgr = ArgumentManager(expid["arguments"]) exp_inst = exp((device_mgr, dataset_mgr, argument_mgr, {})) argument_mgr.check_unprocessed_arguments() put_completed() From 3e8a853e530ab360b67057f037afe5a0f9767f67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Wed, 28 Feb 2024 11:51:30 +0800 Subject: [PATCH 168/296] artiq_client: implement interactive arguments --- artiq/frontend/artiq_client.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/artiq/frontend/artiq_client.py b/artiq/frontend/artiq_client.py index 00f2db602..05e3707bb 100755 --- a/artiq/frontend/artiq_client.py +++ b/artiq/frontend/artiq_client.py @@ -113,11 +113,20 @@ def get_argparser(): "del-dataset", help="delete a dataset") parser_del_dataset.add_argument("name", help="name of the dataset") + parser_supply_interactive = subparsers.add_parser( + "supply-interactive", help="supply interactive arguments") + parser_supply_interactive.add_argument( + "rid", metavar="RID", type=int, help="RID of target experiment") + parser_supply_interactive.add_argument( + "arguments", metavar="ARGUMENTS", nargs="*", + help="interactive arguments") + parser_show = subparsers.add_parser( "show", help="show schedule, log, devices or datasets") parser_show.add_argument( "what", metavar="WHAT", - choices=["schedule", "log", "ccb", "devices", "datasets"], + choices=["schedule", "log", "ccb", "devices", "datasets", + "interactive-args"], help="select object to show: %(choices)s") subparsers.add_parser( @@ -136,8 +145,7 @@ def get_argparser(): "ls", help="list a directory on the master") parser_ls.add_argument("directory", default="", nargs="?") - subparsers.add_parser( - "terminate", help="terminate the ARTIQ master") + subparsers.add_parser("terminate", help="terminate the ARTIQ master") common_args.verbosity_args(parser) return parser @@ -209,6 +217,11 @@ def _action_scan_devices(remote, args): remote.scan() +def _action_supply_interactive(remote, args): + arguments = parse_arguments(args.arguments) + remote.supply(args.rid, arguments) + + def _action_scan_repository(remote, args): if getattr(args, "async"): remote.scan_repository_async(args.revision) @@ -275,6 +288,15 @@ def _show_datasets(datasets): print(table) +def _show_interactive_args(interactive_args): + clear_screen() + table = PrettyTable(["RID", "Key", "Type", "Group", "Tooltip"]) + for rid, args in sorted(interactive_args.items(), key=itemgetter(0)): + for key, procdesc, group, tooltip in args: + table.add_row([rid, key, procdesc["ty"], group, tooltip]) + print(table) + + def _run_subscriber(host, port, subscriber): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) @@ -346,6 +368,8 @@ def main(): _show_dict(args, "devices", _show_devices) elif args.what == "datasets": _show_dict(args, "datasets", _show_datasets) + elif args.what == "interactive-args": + _show_dict(args, "interactive_args", _show_interactive_args) else: raise ValueError else: @@ -356,6 +380,7 @@ def main(): "set_dataset": "dataset_db", "del_dataset": "dataset_db", "scan_devices": "device_db", + "supply_interactive": "interactive_arg_db", "scan_repository": "experiment_db", "ls": "experiment_db", "terminate": "master_management", From 18f55bb19657f87fbf431649d319a9711acbb88b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Wed, 28 Feb 2024 12:48:07 +0800 Subject: [PATCH 169/296] master: fix asyncio exception handling Follow Python 3.8. --- artiq/master/scheduler.py | 6 +++--- artiq/master/worker.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/artiq/master/scheduler.py b/artiq/master/scheduler.py index 7d4179a63..037e012bd 100644 --- a/artiq/master/scheduler.py +++ b/artiq/master/scheduler.py @@ -239,7 +239,7 @@ class PrepareStage(TaskObject): try: await run.build() await run.prepare() - except: + except Exception: logger.error("got worker exception in prepare stage, " "deleting RID %d", run.rid) log_worker_exception() @@ -289,7 +289,7 @@ class RunStage(TaskObject): else: run.status = RunStatus.running completed = await run.run() - except: + except Exception: logger.error("got worker exception in run stage, " "deleting RID %d", run.rid) log_worker_exception() @@ -326,7 +326,7 @@ class AnalyzeStage(TaskObject): run.status = RunStatus.analyzing try: await run.analyze() - except: + except Exception: logger.error("got worker exception in analyze stage of RID %d.", run.rid) log_worker_exception() diff --git a/artiq/master/worker.py b/artiq/master/worker.py index ddcf57802..0353f9731 100644 --- a/artiq/master/worker.py +++ b/artiq/master/worker.py @@ -234,7 +234,7 @@ class Worker: if asyncio.iscoroutine(data): data = await data reply = {"status": "ok", "data": data} - except: + except Exception: reply = { "status": "failed", "exception": current_exc_packed() From 692572a3b9e2a43bcc5404927641655d6b989f1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Wed, 28 Feb 2024 12:48:31 +0800 Subject: [PATCH 170/296] style (NFC) --- artiq/master/scheduler.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/artiq/master/scheduler.py b/artiq/master/scheduler.py index 037e012bd..18e16f90d 100644 --- a/artiq/master/scheduler.py +++ b/artiq/master/scheduler.py @@ -510,8 +510,7 @@ class Scheduler: """Returns ``True`` if termination is requested.""" for pipeline in self._pipelines.values(): if rid in pipeline.pool.runs: - run = pipeline.pool.runs[rid] + run = pipeline.pool.runs[rid] if run.termination_requested: return True return False - From e56331248eed93de15d0c088dea87d6bcd465433 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 28 Feb 2024 17:04:51 +0800 Subject: [PATCH 171/296] dashboard: fix device subscriber connections --- artiq/coredevice/comm_analyzer.py | 21 ++++++----- artiq/dashboard/moninj.py | 11 +----- artiq/dashboard/waveform.py | 62 +++++++++++++++---------------- artiq/frontend/artiq_dashboard.py | 14 +++++-- 4 files changed, 54 insertions(+), 54 deletions(-) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index ea4e8757a..9fad6666a 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -151,8 +151,6 @@ class AnalyzerProxyReceiver: def __init__(self, receive_cb, disconnect_cb=None): self.receive_cb = receive_cb self.disconnect_cb = disconnect_cb - self.receive_task = None - self.writer = None async def connect(self, host, port): self.reader, self.writer = \ @@ -162,17 +160,20 @@ class AnalyzerProxyReceiver: assert line == ANALYZER_MAGIC self.receive_task = asyncio.create_task(self._receive_cr()) except: - if self.writer is not None: - self.writer.close() - del self.reader - del self.writer + self.writer.close() + del self.reader + del self.writer raise - def close(self): + async def close(self): self.disconnect_cb = None - if self.receive_task is not None: + try: self.receive_task.cancel() - if self.writer is not None: + try: + await self.receive_task + except asyncio.CancelledError: + pass + finally: self.writer.close() del self.reader del self.writer @@ -200,6 +201,8 @@ class AnalyzerProxyReceiver: remaining_data = await self.reader.readexactly(payload_length + 11) data = endian_byte + payload_length_word + remaining_data self.receive_cb(data) + except Exception: + logger.error("analyzer receiver connection terminating with exception", exc_info=True) finally: if self.disconnect_cb is not None: self.disconnect_cb() diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index 3ee97b897..4810f4d34 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -5,8 +5,6 @@ from collections import namedtuple from PyQt5 import QtCore, QtWidgets, QtGui -from sipyco.sync_struct import Subscriber - from artiq.coredevice.comm_moninj import * from artiq.coredevice.ad9910 import ( _AD9910_REG_PROFILE0, _AD9910_REG_PROFILE7, @@ -458,9 +456,8 @@ class _DeviceManager: def init_ddb(self, ddb): self.ddb = ddb - return ddb - def notify(self, mod): + def notify_ddb(self, mod): mi_addr, mi_port, description = setup_from_ddb(self.ddb) if (mi_addr, mi_port) != (self.mi_addr, self.mi_port): @@ -786,12 +783,6 @@ class MonInj: self.dm.dac_cb = lambda: self.dac_dock.layout_widgets( self.dm.dac_widgets.values()) - self.subscriber = Subscriber("devices", self.dm.init_ddb, self.dm.notify) - - async def start(self, server, port): - await self.subscriber.connect(server, port) - async def stop(self): - await self.subscriber.close() if self.dm is not None: await self.dm.close() diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 718321f4a..a0eda50a3 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -10,7 +10,6 @@ from PyQt5 import QtCore, QtWidgets, QtGui import pyqtgraph as pg import numpy as np -from sipyco.sync_struct import Subscriber from sipyco.pc_rpc import AsyncioClient from sipyco import pyon @@ -66,33 +65,31 @@ class ProxyClient(): remote.close_rpc() async def _reconnect(self): - try: - while True: - await self._reconnect_event.wait() - self._reconnect_event.clear() - if self.receiver is not None: - self.receiver.close() - self.receiver = None - self.receiver = comm_analyzer.AnalyzerProxyReceiver( - self.receive_cb, self.reconnect) - try: - if self.addr is not None: - await asyncio.wait_for(self.receiver.connect(self.addr, self.port_proxy), - self.timeout) - logger.info("connected to analyzer proxy %s:%d", self.addr, self.port_proxy) + while True: + await self._reconnect_event.wait() + self._reconnect_event.clear() + if self.receiver is not None: + await self.receiver.close() + self.receiver = None + new_receiver = comm_analyzer.AnalyzerProxyReceiver( + self.receive_cb, self.reconnect) + try: + if self.addr is not None: + await asyncio.wait_for(new_receiver.connect(self.addr, self.port_proxy), + self.timeout) + logger.info("connected to analyzer proxy %s:%d", self.addr, self.port_proxy) self.timer_cur = self.timer - continue - except: - logger.error("error connecting to analyzer proxy", exc_info=True) - try: - await asyncio.wait_for(self._reconnect_event.wait(), self.timer_cur) - except asyncio.TimeoutError: - self.timer_cur *= self.timer_backoff - self._reconnect_event.set() - else: - self.timer_cur = self.timer - except asyncio.CancelledError: - pass + self.receiver = new_receiver + continue + except Exception: + logger.error("error connecting to analyzer proxy", exc_info=True) + try: + await asyncio.wait_for(self._reconnect_event.wait(), self.timer_cur) + except asyncio.TimeoutError: + self.timer_cur *= self.timer_backoff + self._reconnect_event.set() + else: + self.timer_cur = self.timer async def close(self): self._reconnect_task.cancel() @@ -101,7 +98,7 @@ class ProxyClient(): except asyncio.CancelledError: pass if self.receiver is not None: - self.receiver.close() + await self.receiver.close() def reconnect(self): self._reconnect_event.set() @@ -715,8 +712,7 @@ class WaveformDock(QtWidgets.QDockWidget): self._current_dir = os.getcwd() - self.devices_sub = Subscriber("devices", self.init_ddb, self.update_ddb) - self.proxy_client = ProxyClient(self.on_dump_receive, + self.proxy_client = ProxyClient(self.on_dump_receive, timeout, timer, timer_backoff) @@ -909,5 +905,9 @@ class WaveformDock(QtWidgets.QDockWidget): self._process_ddb() return ddb - def update_ddb(self, mod): + def notify_ddb(self, mod): self._process_ddb() + + async def stop(self): + if self.proxy_client is not None: + await self.proxy_client.close() diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index e9c6269d3..05bc5e531 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -15,6 +15,7 @@ from sipyco.pc_rpc import AsyncioClient, Client from sipyco.broadcast import Receiver from sipyco import common_args from sipyco.asyncio_tools import atexit_register_coroutine +from sipyco.sync_struct import Subscriber from artiq import __artiq_dir__ as artiq_dir, __version__ as artiq_version from artiq.tools import get_user_config_dir @@ -226,7 +227,6 @@ def main(): broadcast_clients["ccb"].notify_cbs.append(d_applets.ccb_notify) d_ttl_dds = moninj.MonInj(rpc_clients["schedule"]) - loop.run_until_complete(d_ttl_dds.start(args.server, args.port_notify)) atexit_register_coroutine(d_ttl_dds.stop, loop=loop) d_waveform = waveform.WaveformDock( @@ -234,9 +234,15 @@ def main(): args.analyzer_proxy_timer, args.analyzer_proxy_timer_backoff ) - atexit_register_coroutine(d_waveform.proxy_client.close, loop=loop) - loop.run_until_complete(d_waveform.devices_sub.connect(args.server, args.port_notify)) - atexit_register_coroutine(d_waveform.devices_sub.close, loop=loop) + atexit_register_coroutine(d_waveform.stop, loop=loop) + + def init_cbs(ddb): + d_ttl_dds.dm.init_ddb(ddb) + d_waveform.init_ddb(ddb) + return ddb + devices_sub = Subscriber("devices", init_cbs, [d_ttl_dds.dm.notify_ddb, d_waveform.notify_ddb]) + loop.run_until_complete(devices_sub.connect(args.server, args.port_notify)) + atexit_register_coroutine(devices_sub.close, loop=loop) d_schedule = schedule.ScheduleDock( rpc_clients["schedule"], sub_clients["schedule"]) From 27178c147868c5dadce4decef4164d34f7071509 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 6 Mar 2024 16:32:22 +0800 Subject: [PATCH 172/296] moninj: remove CancelledError workaround --- artiq/coredevice/comm_moninj.py | 4 +--- artiq/dashboard/moninj.py | 5 +---- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/artiq/coredevice/comm_moninj.py b/artiq/coredevice/comm_moninj.py index e02da526c..1de7f50df 100644 --- a/artiq/coredevice/comm_moninj.py +++ b/artiq/coredevice/comm_moninj.py @@ -94,9 +94,7 @@ class CommMonInj: self.injection_status_cb(channel, override, value) else: raise ValueError("Unknown packet type", ty) - except asyncio.CancelledError: - raise - except: + except Exception: logger.error("Moninj connection terminating with exception", exc_info=True) finally: if self.disconnect_cb is not None: diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index 4810f4d34..c3fc75dee 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -719,10 +719,7 @@ class _DeviceManager: self.disconnect_cb) try: await new_mi_connection.connect(self.mi_addr, self.mi_port) - except asyncio.CancelledError: - logger.info("cancelled connection to moninj") - break - except: + except Exception: logger.error("failed to connect to moninj. Is aqctl_moninj_proxy running?", exc_info=True) await asyncio.sleep(10.) self.reconnect_mi.set() From 332c9c0fcdb91716288052e696fde3ef45916acd Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 7 Mar 2024 10:40:44 +0800 Subject: [PATCH 173/296] waveform: consistent log messages --- artiq/dashboard/waveform.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index a0eda50a3..5153dd807 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -45,7 +45,7 @@ class ProxyClient(): self.addr = addr self.port = port self.port_proxy = port_proxy - self.reconnect() + self._reconnect_event.set() async def trigger_proxy_task(self): remote = AsyncioClient() @@ -72,12 +72,12 @@ class ProxyClient(): await self.receiver.close() self.receiver = None new_receiver = comm_analyzer.AnalyzerProxyReceiver( - self.receive_cb, self.reconnect) + self.receive_cb, self.disconnect_cb) try: if self.addr is not None: await asyncio.wait_for(new_receiver.connect(self.addr, self.port_proxy), self.timeout) - logger.info("connected to analyzer proxy %s:%d", self.addr, self.port_proxy) + logger.info("ARTIQ dashboard connected to analyzer proxy (%s)", self.addr) self.timer_cur = self.timer self.receiver = new_receiver continue @@ -100,7 +100,8 @@ class ProxyClient(): if self.receiver is not None: await self.receiver.close() - def reconnect(self): + def disconnect_cb(self): + logger.error("lost connection to analyzer proxy") self._reconnect_event.set() From 7e6ed1655f4cee769c46ff434c306e9fed43ff2c Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 11 Mar 2024 12:25:24 +0800 Subject: [PATCH 174/296] artiq_client: fix deprecated wait usage --- artiq/frontend/artiq_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/frontend/artiq_client.py b/artiq/frontend/artiq_client.py index 05e3707bb..0bf0631a7 100755 --- a/artiq/frontend/artiq_client.py +++ b/artiq/frontend/artiq_client.py @@ -307,7 +307,7 @@ def _run_subscriber(host, port, subscriber): loop.run_until_complete(subscriber.connect(host, port)) try: _, pending = loop.run_until_complete(asyncio.wait( - [signal_handler.wait_terminate(), subscriber.receive_task], + [loop.create_task(signal_handler.wait_terminate()), subscriber.receive_task], return_when=asyncio.FIRST_COMPLETED)) for task in pending: task.cancel() From 609684664a431790bd509eb7fee51056934ac769 Mon Sep 17 00:00:00 2001 From: morgan Date: Mon, 11 Mar 2024 16:32:23 +0800 Subject: [PATCH 175/296] coredevice schema: add enable_wrpll option to json --- artiq/coredevice/coredevice_generic.schema.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/artiq/coredevice/coredevice_generic.schema.json b/artiq/coredevice/coredevice_generic.schema.json index ce7385ea4..13de70c20 100644 --- a/artiq/coredevice/coredevice_generic.schema.json +++ b/artiq/coredevice/coredevice_generic.schema.json @@ -49,6 +49,10 @@ "default": 125e6, "description": "RTIO frequency" }, + "enable_wrpll": { + "type": "boolean", + "default": false + }, "core_addr": { "type": "string", "format": "ipv4", From c4323e1179aa0b9c9b4c135f894f267715cf2391 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 11 Mar 2024 17:07:25 +0800 Subject: [PATCH 176/296] interactive args: add title param --- artiq/frontend/artiq_client.py | 9 +++++---- artiq/frontend/artiq_run.py | 3 ++- artiq/language/environment.py | 8 ++++---- artiq/master/databases.py | 6 +++--- artiq/master/worker_impl.py | 4 ++-- 5 files changed, 16 insertions(+), 14 deletions(-) diff --git a/artiq/frontend/artiq_client.py b/artiq/frontend/artiq_client.py index 0bf0631a7..f552c1f33 100755 --- a/artiq/frontend/artiq_client.py +++ b/artiq/frontend/artiq_client.py @@ -290,10 +290,11 @@ def _show_datasets(datasets): def _show_interactive_args(interactive_args): clear_screen() - table = PrettyTable(["RID", "Key", "Type", "Group", "Tooltip"]) - for rid, args in sorted(interactive_args.items(), key=itemgetter(0)): - for key, procdesc, group, tooltip in args: - table.add_row([rid, key, procdesc["ty"], group, tooltip]) + table = PrettyTable(["RID", "Title", "Key", "Type", "Group", "Tooltip"]) + for rid, input_request in sorted(interactive_args.items(), key=itemgetter(0)): + title = input_request["title"] + for key, procdesc, group, tooltip in input_request["arglist_desc"]: + table.add_row([rid, title, key, procdesc["ty"], group, tooltip]) print(table) diff --git a/artiq/frontend/artiq_run.py b/artiq/frontend/artiq_run.py index d46bad022..ea6a50c00 100755 --- a/artiq/frontend/artiq_run.py +++ b/artiq/frontend/artiq_run.py @@ -167,7 +167,8 @@ def get_argparser(with_file=True): class ArgumentManager(ProcessArgumentManager): - def get_interactive(self, interactive_arglist): + def get_interactive(self, interactive_arglist, title): + print(title) result = dict() for key, processor, group, tooltip in interactive_arglist: success = False diff --git a/artiq/language/environment.py b/artiq/language/environment.py index 797f1eeeb..8b4076a4c 100644 --- a/artiq/language/environment.py +++ b/artiq/language/environment.py @@ -214,7 +214,7 @@ class TraceArgumentManager: self.requested_args[key] = processor, group, tooltip return None - def get_interactive(self, interactive_arglist): + def get_interactive(self, interactive_arglist, title): raise NotImplementedError @@ -238,7 +238,7 @@ class ProcessArgumentManager: raise AttributeError("Supplied argument(s) not queried in experiment: " + ", ".join(unprocessed)) - def get_interactive(self, interactive_arglist): + def get_interactive(self, interactive_arglist, title): raise NotImplementedError @@ -332,7 +332,7 @@ class HasEnvironment: self.kernel_invariants = kernel_invariants | {key} @contextmanager - def interactive(self): + def interactive(self, title=""): """Request arguments from the user interactively. This context manager returns a namespace object on which the method @@ -349,7 +349,7 @@ class HasEnvironment: namespace.setattr_argument = setattr_argument yield namespace del namespace.setattr_argument - argdict = self.__argument_mgr.get_interactive(interactive_arglist) + argdict = self.__argument_mgr.get_interactive(interactive_arglist, title) for key, value in argdict.items(): setattr(namespace, key, value) diff --git a/artiq/master/databases.py b/artiq/master/databases.py index c34a01300..7a4a6b153 100644 --- a/artiq/master/databases.py +++ b/artiq/master/databases.py @@ -121,8 +121,8 @@ class InteractiveArgDB: self.pending = Notifier(dict()) self.futures = dict() - async def get(self, rid, arglist_desc): - self.pending[rid] = arglist_desc + async def get(self, rid, arglist_desc, title): + self.pending[rid] = {"title": title, "arglist_desc": arglist_desc} self.futures[rid] = asyncio.get_running_loop().create_future() try: value = await self.futures[rid] @@ -136,7 +136,7 @@ class InteractiveArgDB: if rid not in self.futures: raise ValueError("no experiment with this RID is " "waiting for interactive arguments") - if {i[0] for i in self.pending.raw_view[rid]} != set(values.keys()): + if {i[0] for i in self.pending.raw_view[rid]["arglist_desc"]} != set(values.keys()): raise ValueError("supplied and requested keys do not match") self.futures[rid].set_result(values) diff --git a/artiq/master/worker_impl.py b/artiq/master/worker_impl.py index 72e5326ad..65c7f2a88 100644 --- a/artiq/master/worker_impl.py +++ b/artiq/master/worker_impl.py @@ -218,10 +218,10 @@ def examine(device_mgr, dataset_mgr, file): class ArgumentManager(ProcessArgumentManager): _get_interactive = make_parent_action("get_interactive_arguments") - def get_interactive(self, interactive_arglist): + def get_interactive(self, interactive_arglist, title): arglist_desc = [(k, p.describe(), g, t) for k, p, g, t in interactive_arglist] - arguments = ArgumentManager._get_interactive(arglist_desc) + arguments = ArgumentManager._get_interactive(arglist_desc, title) for key, processor, _, _ in interactive_arglist: arguments[key] = processor.process(arguments[key]) return arguments From 244c73a592a1d3c3280d47421a331d529ec2ca72 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 19 Mar 2024 11:03:27 +0800 Subject: [PATCH 177/296] entries: add EntryTreeWidget --- artiq/gui/entries.py | 155 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 154 insertions(+), 1 deletion(-) diff --git a/artiq/gui/entries.py b/artiq/gui/entries.py index 8b9bf4788..58044ef8c 100644 --- a/artiq/gui/entries.py +++ b/artiq/gui/entries.py @@ -1,9 +1,10 @@ import logging from collections import OrderedDict +from functools import partial from PyQt5 import QtCore, QtGui, QtWidgets -from artiq.gui.tools import LayoutWidget, disable_scroll_wheel +from artiq.gui.tools import LayoutWidget, disable_scroll_wheel, WheelFilter from artiq.gui.scanwidget import ScanWidget from artiq.gui.scientific_spinbox import ScientificSpinBox @@ -11,6 +12,158 @@ from artiq.gui.scientific_spinbox import ScientificSpinBox logger = logging.getLogger(__name__) +class EntryTreeWidget(QtWidgets.QTreeWidget): + def __init__(self): + QtWidgets.QTreeWidget.__init__(self) + self.setColumnCount(3) + self.header().setStretchLastSection(False) + if hasattr(self.header(), "setSectionResizeMode"): + set_resize_mode = self.header().setSectionResizeMode + else: + set_resize_mode = self.header().setResizeMode + set_resize_mode(0, QtWidgets.QHeaderView.ResizeToContents) + set_resize_mode(1, QtWidgets.QHeaderView.Stretch) + set_resize_mode(2, QtWidgets.QHeaderView.ResizeToContents) + self.header().setVisible(False) + self.setSelectionMode(self.NoSelection) + self.setHorizontalScrollMode(self.ScrollPerPixel) + self.setVerticalScrollMode(self.ScrollPerPixel) + + self.setStyleSheet("QTreeWidget {background: " + + self.palette().midlight().color().name() + " ;}") + + self.viewport().installEventFilter(WheelFilter(self.viewport(), True)) + + self._groups = dict() + self._arg_to_widgets = dict() + self._arguments = dict() + + self.gradient = QtGui.QLinearGradient( + 0, 0, 0, QtGui.QFontMetrics(self.font()).lineSpacing() * 2.5) + self.gradient.setColorAt(0, self.palette().base().color()) + self.gradient.setColorAt(1, self.palette().midlight().color()) + + self.bottom_item = QtWidgets.QTreeWidgetItem() + self.addTopLevelItem(self.bottom_item) + self.bottom_item.setHidden(True) + + def set_argument(self, key, argument): + self._arguments[key] = argument + widgets = dict() + self._arg_to_widgets[key] = widgets + entry_class = procdesc_to_entry(argument["desc"]) + argument["state"] = entry_class.default_state(argument["desc"]) + entry = entry_class(argument) + widget_item = QtWidgets.QTreeWidgetItem([key]) + if argument["tooltip"]: + widget_item.setToolTip(0, argument["tooltip"]) + widgets["entry"] = entry + widgets["widget_item"] = widget_item + + if len(self._arguments) > 1: + self.bottom_item.setHidden(False) + + for col in range(3): + widget_item.setBackground(col, self.gradient) + font = widget_item.font(0) + font.setBold(True) + widget_item.setFont(0, font) + + if argument["group"] is None: + self.insertTopLevelItem(self.indexFromItem(self.bottom_item).row(), widget_item) + else: + self._get_group(argument["group"]).addChild(widget_item) + self.bottom_item.setHidden(False) + fix_layout = LayoutWidget() + widgets["fix_layout"] = fix_layout + fix_layout.addWidget(entry) + self.setItemWidget(widget_item, 1, fix_layout) + + reset_entry = QtWidgets.QToolButton() + reset_entry.setToolTip("Reset to default value") + reset_entry.setIcon( + QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_BrowserReload)) + reset_entry.clicked.connect(partial(self.reset_entry, key)) + + disable_other_scans = QtWidgets.QToolButton() + widgets["disable_other_scans"] = disable_other_scans + disable_other_scans.setIcon( + QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_DialogResetButton)) + disable_other_scans.setToolTip("Disable other scans") + disable_other_scans.clicked.connect( + partial(self._disable_other_scans, key)) + if not isinstance(entry, ScanEntry): + disable_other_scans.setVisible(False) + + tool_buttons = LayoutWidget() + tool_buttons.layout.setRowStretch(0, 1) + tool_buttons.layout.setRowStretch(3, 1) + tool_buttons.addWidget(reset_entry, 1) + tool_buttons.addWidget(disable_other_scans, 2) + self.setItemWidget(widget_item, 2, tool_buttons) + + def _get_group(self, key): + if key in self._groups: + return self._groups[key] + group = QtWidgets.QTreeWidgetItem([key]) + for col in range(3): + group.setBackground(col, self.palette().mid()) + group.setForeground(col, self.palette().brightText()) + font = group.font(col) + font.setBold(True) + group.setFont(col, font) + self.insertTopLevelItem(self.indexFromItem(self.bottom_item).row(), group) + self._groups[key] = group + return group + + def _disable_other_scans(self, current_key): + for key, widgets in self._arg_to_widgets.items(): + if (key != current_key and isinstance(widgets["entry"], ScanEntry)): + widgets["entry"].disable() + + def update_argument(self, key, argument): + widgets = self._arg_to_widgets[key] + + # Qt needs a setItemWidget() to handle layout correctly, + # simply replacing the entry inside the LayoutWidget + # results in a bug. + + widgets["entry"].deleteLater() + widgets["entry"] = procdesc_to_entry(argument["desc"])(argument) + widgets["disable_other_scans"].setVisible( + isinstance(widgets["entry"], ScanEntry)) + widgets["fix_layout"].deleteLater() + widgets["fix_layout"] = LayoutWidget() + widgets["fix_layout"].addWidget(widgets["entry"]) + self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"]) + self.updateGeometries() + + def reset_entry(self, key): + procdesc = self._arguments[key]["desc"] + self._arguments[key]["state"] = procdesc_to_entry(procdesc).default_state(procdesc) + self.update_argument(key, self._arguments[key]) + + def save_state(self): + expanded = [] + for k, v in self._groups.items(): + if v.isExpanded(): + expanded.append(k) + return { + "expanded": expanded, + "scroll": self.verticalScrollBar().value() + } + + def restore_state(self, state): + for e in state["expanded"]: + try: + self._groups[e].setExpanded(True) + except KeyError: + pass + self.verticalScrollBar().setValue(state["scroll"]) + + class StringEntry(QtWidgets.QLineEdit): def __init__(self, argument): QtWidgets.QLineEdit.__init__(self) From 6978101b1f66acb4a02299af7d65f60f5ee47e22 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 19 Mar 2024 11:03:54 +0800 Subject: [PATCH 178/296] applets: subclass EntryArea as EntryTreeWidget --- artiq/gui/applets.py | 118 ++++--------------------------------------- 1 file changed, 10 insertions(+), 108 deletions(-) diff --git a/artiq/gui/applets.py b/artiq/gui/applets.py index de282c846..0518d7c58 100644 --- a/artiq/gui/applets.py +++ b/artiq/gui/applets.py @@ -14,43 +14,16 @@ from sipyco.pipe_ipc import AsyncioParentComm from sipyco.logging_tools import LogParser from sipyco import pyon -from artiq.gui.entries import procdesc_to_entry -from artiq.gui.tools import (QDockWidgetCloseDetect, LayoutWidget, - WheelFilter) +from artiq.gui.entries import procdesc_to_entry, EntryTreeWidget +from artiq.gui.tools import QDockWidgetCloseDetect, LayoutWidget logger = logging.getLogger(__name__) -class EntryArea(QtWidgets.QTreeWidget): + +class EntryArea(EntryTreeWidget): def __init__(self): - QtWidgets.QTreeWidget.__init__(self) - self.setColumnCount(3) - self.header().setStretchLastSection(False) - if hasattr(self.header(), "setSectionResizeMode"): - set_resize_mode = self.header().setSectionResizeMode - else: - set_resize_mode = self.header().setResizeMode - set_resize_mode(0, QtWidgets.QHeaderView.ResizeToContents) - set_resize_mode(1, QtWidgets.QHeaderView.Stretch) - self.header().setVisible(False) - self.setSelectionMode(self.NoSelection) - self.setHorizontalScrollMode(self.ScrollPerPixel) - self.setVerticalScrollMode(self.ScrollPerPixel) - - self.setStyleSheet("QTreeWidget {background: " + - self.palette().midlight().color().name() + " ;}") - - self.viewport().installEventFilter(WheelFilter(self.viewport(), True)) - - self._groups = dict() - self._arg_to_widgets = dict() - self._arguments = dict() - - self.gradient = QtGui.QLinearGradient( - 0, 0, 0, QtGui.QFontMetrics(self.font()).lineSpacing()*2.5) - self.gradient.setColorAt(0, self.palette().base().color()) - self.gradient.setColorAt(1, self.palette().midlight().color()) - + EntryTreeWidget.__init__(self) reset_all_button = QtWidgets.QPushButton("Restore defaults") reset_all_button.setToolTip("Reset all to default values") reset_all_button.setIcon( @@ -62,73 +35,16 @@ class EntryArea(QtWidgets.QTreeWidget): buttons.layout.setColumnStretch(1, 0) buttons.layout.setColumnStretch(2, 1) buttons.addWidget(reset_all_button, 0, 1) - self.bottom_item = QtWidgets.QTreeWidgetItem() - self.addTopLevelItem(self.bottom_item) self.setItemWidget(self.bottom_item, 1, buttons) - self.bottom_item.setHidden(True) - + def setattr_argument(self, key, processor, group=None, tooltip=None): argument = dict() desc = processor.describe() argument["desc"] = desc argument["group"] = group argument["tooltip"] = tooltip - self._arguments[key] = argument - widgets = dict() - self._arg_to_widgets[key] = widgets - entry_class = procdesc_to_entry(argument["desc"]) - argument["state"] = entry_class.default_state(argument["desc"]) - entry = entry_class(argument) - widget_item = QtWidgets.QTreeWidgetItem([key]) - if argument["tooltip"]: - widget_item.setToolTip(0, argument["tooltip"]) - widgets["entry"] = entry - widgets["widget_item"] = widget_item + self.set_argument(key, argument) - if len(self._arguments) > 1: - self.bottom_item.setHidden(False) - - for col in range(3): - widget_item.setBackground(col, self.gradient) - font = widget_item.font(0) - font.setBold(True) - widget_item.setFont(0, font) - - if argument["group"] is None: - self.insertTopLevelItem(self.indexFromItem(self.bottom_item).row(), widget_item) - else: - self._get_group(argument["group"]).addChild(widget_item) - self.bottom_item.setHidden(False) - fix_layout = LayoutWidget() - widgets["fix_layout"] = fix_layout - fix_layout.addWidget(entry) - self.setItemWidget(widget_item, 1, fix_layout) - - reset_value = QtWidgets.QToolButton() - reset_value.setToolTip("Reset to default value") - reset_value.setIcon( - QtWidgets.QApplication.style().standardIcon( - QtWidgets.QStyle.SP_BrowserReload)) - reset_value.clicked.connect(partial(self.reset_value, key)) - - tool_buttons = LayoutWidget() - tool_buttons.addWidget(reset_value, 0) - self.setItemWidget(widget_item, 2, tool_buttons) - - def _get_group(self, key): - if key in self._groups: - return self._groups[key] - group = QtWidgets.QTreeWidgetItem([key]) - for col in range(3): - group.setBackground(col, self.palette().mid()) - group.setForeground(col, self.palette().brightText()) - font = group.font(col) - font.setBold(True) - group.setFont(col, font) - self.insertTopLevelItem(self.indexFromItem(self.bottom_item).row(), group) - self._groups[key] = group - return group - def __getattr__(self, key): return self.get_value(key) @@ -158,29 +74,15 @@ class EntryArea(QtWidgets.QTreeWidget): self.set_value(key, value) def update_value(self, key): - widgets = self._arg_to_widgets[key] argument = self._arguments[key] - - # Qt needs a setItemWidget() to handle layout correctly, - # simply replacing the entry inside the LayoutWidget - # results in a bug. - - widgets["entry"].deleteLater() - widgets["entry"] = procdesc_to_entry(argument["desc"])(argument) - widgets["fix_layout"].deleteLater() - widgets["fix_layout"] = LayoutWidget() - widgets["fix_layout"].addWidget(widgets["entry"]) - self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"]) - self.updateGeometries() + self.update_argument(key, argument) def reset_value(self, key): - procdesc = self._arguments[key]["desc"] - self._arguments[key]["state"] = procdesc_to_entry(procdesc).default_state(procdesc) - self.update_value(key) + self.reset_entry(key) def reset_all(self): for key in self._arguments.keys(): - self.reset_value(key) + self.reset_entry(key) class AppletIPCServer(AsyncioParentComm): From 1a41b16fb63c5a2894d4bcb6604166673eccde63 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 19 Mar 2024 11:04:34 +0800 Subject: [PATCH 179/296] dashboard: subclass _ArgumentEditor as EntryTreeWidget --- artiq/dashboard/experiments.py | 150 ++------------------------------- 1 file changed, 8 insertions(+), 142 deletions(-) diff --git a/artiq/dashboard/experiments.py b/artiq/dashboard/experiments.py index a7db9047b..a1bc2ec00 100644 --- a/artiq/dashboard/experiments.py +++ b/artiq/dashboard/experiments.py @@ -9,10 +9,9 @@ import h5py from sipyco import pyon -from artiq.gui.entries import procdesc_to_entry, ScanEntry +from artiq.gui.entries import procdesc_to_entry, EntryTreeWidget from artiq.gui.fuzzy_select import FuzzySelectWidget -from artiq.gui.tools import (LayoutWidget, WheelFilter, - log_level_to_name, get_open_file_name) +from artiq.gui.tools import (LayoutWidget, log_level_to_name, get_open_file_name) from artiq.tools import parse_devarg_override, unparse_devarg_override @@ -25,99 +24,21 @@ logger = logging.getLogger(__name__) # 2. file:@ -class _ArgumentEditor(QtWidgets.QTreeWidget): +class _ArgumentEditor(EntryTreeWidget): def __init__(self, manager, dock, expurl): self.manager = manager self.expurl = expurl - QtWidgets.QTreeWidget.__init__(self) - self.setColumnCount(3) - self.header().setStretchLastSection(False) - if hasattr(self.header(), "setSectionResizeMode"): - set_resize_mode = self.header().setSectionResizeMode - else: - set_resize_mode = self.header().setResizeMode - set_resize_mode(0, QtWidgets.QHeaderView.ResizeToContents) - set_resize_mode(1, QtWidgets.QHeaderView.Stretch) - set_resize_mode(2, QtWidgets.QHeaderView.ResizeToContents) - self.header().setVisible(False) - self.setSelectionMode(self.NoSelection) - self.setHorizontalScrollMode(self.ScrollPerPixel) - self.setVerticalScrollMode(self.ScrollPerPixel) - - self.setStyleSheet("QTreeWidget {background: " + - self.palette().midlight().color().name() + " ;}") - - self.viewport().installEventFilter(WheelFilter(self.viewport(), True)) - - self._groups = dict() - self._arg_to_widgets = dict() + EntryTreeWidget.__init__(self) arguments = self.manager.get_submission_arguments(self.expurl) if not arguments: self.addTopLevelItem(QtWidgets.QTreeWidgetItem(["No arguments"])) - gradient = QtGui.QLinearGradient( - 0, 0, 0, QtGui.QFontMetrics(self.font()).lineSpacing()*2.5) - gradient.setColorAt(0, self.palette().base().color()) - gradient.setColorAt(1, self.palette().midlight().color()) for name, argument in arguments.items(): - widgets = dict() - self._arg_to_widgets[name] = widgets + self.set_argument(name, argument) - entry = procdesc_to_entry(argument["desc"])(argument) - widget_item = QtWidgets.QTreeWidgetItem([name]) - if argument["tooltip"]: - widget_item.setToolTip(0, argument["tooltip"]) - widgets["entry"] = entry - widgets["widget_item"] = widget_item - - for col in range(3): - widget_item.setBackground(col, gradient) - font = widget_item.font(0) - font.setBold(True) - widget_item.setFont(0, font) - - if argument["group"] is None: - self.addTopLevelItem(widget_item) - else: - self._get_group(argument["group"]).addChild(widget_item) - fix_layout = LayoutWidget() - widgets["fix_layout"] = fix_layout - fix_layout.addWidget(entry) - self.setItemWidget(widget_item, 1, fix_layout) - recompute_argument = QtWidgets.QToolButton() - recompute_argument.setToolTip("Re-run the experiment's build " - "method and take the default value") - recompute_argument.setIcon( - QtWidgets.QApplication.style().standardIcon( - QtWidgets.QStyle.SP_BrowserReload)) - recompute_argument.clicked.connect( - partial(self._recompute_argument_clicked, name)) - - tool_buttons = LayoutWidget() - tool_buttons.addWidget(recompute_argument, 1) - - disable_other_scans = QtWidgets.QToolButton() - widgets["disable_other_scans"] = disable_other_scans - disable_other_scans.setIcon( - QtWidgets.QApplication.style().standardIcon( - QtWidgets.QStyle.SP_DialogResetButton)) - disable_other_scans.setToolTip("Disable all other scans in " - "this experiment") - disable_other_scans.clicked.connect( - partial(self._disable_other_scans, name)) - tool_buttons.layout.setRowStretch(0, 1) - tool_buttons.layout.setRowStretch(3, 1) - tool_buttons.addWidget(disable_other_scans, 2) - if not isinstance(entry, ScanEntry): - disable_other_scans.setVisible(False) - - self.setItemWidget(widget_item, 2, tool_buttons) - - widget_item = QtWidgets.QTreeWidgetItem() - self.addTopLevelItem(widget_item) recompute_arguments = QtWidgets.QPushButton("Recompute all arguments") recompute_arguments.setIcon( QtWidgets.QApplication.style().standardIcon( @@ -136,41 +57,10 @@ class _ArgumentEditor(QtWidgets.QTreeWidget): buttons.layout.setColumnStretch(1, 0) buttons.layout.setColumnStretch(2, 0) buttons.layout.setColumnStretch(3, 1) - self.setItemWidget(widget_item, 1, buttons) + self.setItemWidget(self.bottom_item, 1, buttons) - def _get_group(self, name): - if name in self._groups: - return self._groups[name] - group = QtWidgets.QTreeWidgetItem([name]) - for col in range(3): - group.setBackground(col, self.palette().mid()) - group.setForeground(col, self.palette().brightText()) - font = group.font(col) - font.setBold(True) - group.setFont(col, font) - self.addTopLevelItem(group) - self._groups[name] = group - return group - - def update_argument(self, name, argument): - widgets = self._arg_to_widgets[name] - - # Qt needs a setItemWidget() to handle layout correctly, - # simply replacing the entry inside the LayoutWidget - # results in a bug. - - widgets["entry"].deleteLater() - widgets["entry"] = procdesc_to_entry(argument["desc"])(argument) - widgets["disable_other_scans"].setVisible( - isinstance(widgets["entry"], ScanEntry)) - widgets["fix_layout"].deleteLater() - widgets["fix_layout"] = LayoutWidget() - widgets["fix_layout"].addWidget(widgets["entry"]) - self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"]) - self.updateGeometries() - - def _recompute_argument_clicked(self, name): - asyncio.ensure_future(self._recompute_argument(name)) + def reset_entry(self, key): + asyncio.ensure_future(self._recompute_argument(key)) async def _recompute_argument(self, name): try: @@ -187,30 +77,6 @@ class _ArgumentEditor(QtWidgets.QTreeWidget): argument["state"] = state self.update_argument(name, argument) - def _disable_other_scans(self, current_name): - for name, widgets in self._arg_to_widgets.items(): - if (name != current_name - and isinstance(widgets["entry"], ScanEntry)): - widgets["entry"].disable() - - def save_state(self): - expanded = [] - for k, v in self._groups.items(): - if v.isExpanded(): - expanded.append(k) - return { - "expanded": expanded, - "scroll": self.verticalScrollBar().value() - } - - def restore_state(self, state): - for e in state["expanded"]: - try: - self._groups[e].setExpanded(True) - except KeyError: - pass - self.verticalScrollBar().setValue(state["scroll"]) - # Hooks that allow user-supplied argument editors to react to imminent user # actions. Here, we always keep the manager-stored submission arguments # up-to-date, so no further action is required. From 88438e2d764229d0fd700ba38b767123c706105e Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 19 Mar 2024 11:05:05 +0800 Subject: [PATCH 180/296] browser: subclass _ArgumentEditor as EntryTreeWidget --- artiq/browser/experiments.py | 119 +++-------------------------------- 1 file changed, 10 insertions(+), 109 deletions(-) diff --git a/artiq/browser/experiments.py b/artiq/browser/experiments.py index 7f74f90bb..4c0e42f77 100644 --- a/artiq/browser/experiments.py +++ b/artiq/browser/experiments.py @@ -10,87 +10,24 @@ import h5py from sipyco import pyon from artiq import __artiq_dir__ as artiq_dir -from artiq.gui.tools import (LayoutWidget, WheelFilter, - log_level_to_name, get_open_file_name) -from artiq.gui.entries import procdesc_to_entry +from artiq.gui.tools import (LayoutWidget, log_level_to_name, get_open_file_name) +from artiq.gui.entries import procdesc_to_entry, EntryTreeWidget from artiq.master.worker import Worker, log_worker_exception logger = logging.getLogger(__name__) -class _ArgumentEditor(QtWidgets.QTreeWidget): +class _ArgumentEditor(EntryTreeWidget): def __init__(self, dock): - QtWidgets.QTreeWidget.__init__(self) - self.setColumnCount(3) - self.header().setStretchLastSection(False) - try: - set_resize_mode = self.header().setSectionResizeMode - except AttributeError: - set_resize_mode = self.header().setResizeMode - set_resize_mode(0, QtWidgets.QHeaderView.ResizeToContents) - set_resize_mode(1, QtWidgets.QHeaderView.Stretch) - set_resize_mode(2, QtWidgets.QHeaderView.ResizeToContents) - self.header().setVisible(False) - self.setSelectionMode(self.NoSelection) - self.setHorizontalScrollMode(self.ScrollPerPixel) - self.setVerticalScrollMode(self.ScrollPerPixel) - - self.setStyleSheet("QTreeWidget {background: " + - self.palette().midlight().color().name() + " ;}") - - self.viewport().installEventFilter(WheelFilter(self.viewport(), True)) - - self._groups = dict() - self._arg_to_widgets = dict() + EntryTreeWidget.__init__(self) self._dock = dock if not self._dock.arguments: self.addTopLevelItem(QtWidgets.QTreeWidgetItem(["No arguments"])) - gradient = QtGui.QLinearGradient( - 0, 0, 0, QtGui.QFontMetrics(self.font()).lineSpacing()*2.5) - gradient.setColorAt(0, self.palette().base().color()) - gradient.setColorAt(1, self.palette().midlight().color()) for name, argument in self._dock.arguments.items(): - widgets = dict() - self._arg_to_widgets[name] = widgets + self.set_argument(name, argument) - entry = procdesc_to_entry(argument["desc"])(argument) - widget_item = QtWidgets.QTreeWidgetItem([name]) - if argument["tooltip"]: - widget_item.setToolTip(0, argument["tooltip"]) - widgets["entry"] = entry - widgets["widget_item"] = widget_item - - for col in range(3): - widget_item.setBackground(col, gradient) - font = widget_item.font(0) - font.setBold(True) - widget_item.setFont(0, font) - - if argument["group"] is None: - self.addTopLevelItem(widget_item) - else: - self._get_group(argument["group"]).addChild(widget_item) - fix_layout = LayoutWidget() - widgets["fix_layout"] = fix_layout - fix_layout.addWidget(entry) - self.setItemWidget(widget_item, 1, fix_layout) - - recompute_argument = QtWidgets.QToolButton() - recompute_argument.setToolTip("Re-run the experiment's build " - "method and take the default value") - recompute_argument.setIcon( - QtWidgets.QApplication.style().standardIcon( - QtWidgets.QStyle.SP_BrowserReload)) - recompute_argument.clicked.connect( - partial(self._recompute_argument_clicked, name)) - fix_layout = LayoutWidget() - fix_layout.addWidget(recompute_argument) - self.setItemWidget(widget_item, 2, fix_layout) - - widget_item = QtWidgets.QTreeWidgetItem() - self.addTopLevelItem(widget_item) recompute_arguments = QtWidgets.QPushButton("Recompute all arguments") recompute_arguments.setIcon( QtWidgets.QApplication.style().standardIcon( @@ -100,7 +37,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget): load = QtWidgets.QPushButton("Set arguments from HDF5") load.setToolTip("Set arguments from currently selected HDF5 file") load.setIcon(QtWidgets.QApplication.style().standardIcon( - QtWidgets.QStyle.SP_DialogApplyButton)) + QtWidgets.QStyle.SP_DialogApplyButton)) load.clicked.connect(self._load_clicked) buttons = LayoutWidget() @@ -108,21 +45,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget): buttons.addWidget(load, 1, 2) for i, s in enumerate((1, 0, 0, 1)): buttons.layout.setColumnStretch(i, s) - self.setItemWidget(widget_item, 1, buttons) - - def _get_group(self, name): - if name in self._groups: - return self._groups[name] - group = QtWidgets.QTreeWidgetItem([name]) - for col in range(3): - group.setBackground(col, self.palette().mid()) - group.setForeground(col, self.palette().brightText()) - font = group.font(col) - font.setBold(True) - group.setFont(col, font) - self.addTopLevelItem(group) - self._groups[name] = group - return group + self.setItemWidget(self.bottom_item, 1, buttons) def _load_clicked(self): asyncio.ensure_future(self._dock.load_hdf5_task()) @@ -130,8 +53,8 @@ class _ArgumentEditor(QtWidgets.QTreeWidget): def _recompute_arguments_clicked(self): asyncio.ensure_future(self._dock._recompute_arguments()) - def _recompute_argument_clicked(self, name): - asyncio.ensure_future(self._recompute_argument(name)) + def reset_entry(self, key): + asyncio.ensure_future(self._recompute_argument(key)) async def _recompute_argument(self, name): try: @@ -146,29 +69,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget): state = procdesc_to_entry(procdesc).default_state(procdesc) argument["desc"] = procdesc argument["state"] = state - - widgets = self._arg_to_widgets[name] - - widgets["entry"].deleteLater() - widgets["entry"] = procdesc_to_entry(procdesc)(argument) - widgets["fix_layout"] = LayoutWidget() - widgets["fix_layout"].addWidget(widgets["entry"]) - self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"]) - self.updateGeometries() - - def save_state(self): - expanded = [] - for k, v in self._groups.items(): - if v.isExpanded(): - expanded.append(k) - return {"expanded": expanded} - - def restore_state(self, state): - for e in state["expanded"]: - try: - self._groups[e].setExpanded(True) - except KeyError: - pass + self.update_argument(name, argument) log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] From 13a36bf9118fff8f8bf45cf2be27fe3ce2d8b9a0 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 20 Mar 2024 10:18:47 +0800 Subject: [PATCH 181/296] browser, dashboard: fix restore scrollbar state --- artiq/browser/experiments.py | 2 +- artiq/dashboard/experiments.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/artiq/browser/experiments.py b/artiq/browser/experiments.py index 4c0e42f77..8e41063a9 100644 --- a/artiq/browser/experiments.py +++ b/artiq/browser/experiments.py @@ -178,8 +178,8 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): state = self.argeditor.save_state() self.argeditor.deleteLater() self.argeditor = _ArgumentEditor(self) - self.argeditor.restore_state(state) self.layout.addWidget(self.argeditor, 0, 0, 1, 5) + self.argeditor.restore_state(state) async def load_hdf5_task(self, filename=None): if filename is None: diff --git a/artiq/dashboard/experiments.py b/artiq/dashboard/experiments.py index a1bc2ec00..ba493f93c 100644 --- a/artiq/dashboard/experiments.py +++ b/artiq/dashboard/experiments.py @@ -299,8 +299,8 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): editor_class = self.manager.get_argument_editor_class(self.expurl) self.argeditor = editor_class(self.manager, self, self.expurl) - self.argeditor.restore_state(argeditor_state) self.layout.addWidget(self.argeditor, 0, 0, 1, 5) + self.argeditor.restore_state(argeditor_state) def contextMenuEvent(self, event): menu = QtWidgets.QMenu(self) From 329e7189ccb5e0abe9bfef59dbb94305268bbddf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Mon, 25 Mar 2024 14:55:17 +0800 Subject: [PATCH 182/296] example: add interactive args --- .../no_hardware/repository/interactive.py | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 artiq/examples/no_hardware/repository/interactive.py diff --git a/artiq/examples/no_hardware/repository/interactive.py b/artiq/examples/no_hardware/repository/interactive.py new file mode 100644 index 000000000..5effb1583 --- /dev/null +++ b/artiq/examples/no_hardware/repository/interactive.py @@ -0,0 +1,20 @@ +from artiq.experiment import * + + +class InteractiveDemo(EnvExperiment): + def build(self): + pass + + def run(self): + print("Waiting for user input...") + with self.interactive() as interactive: + interactive.setattr_argument("number", NumberValue(42e-6, + unit="us", + precision=4)) + interactive.setattr_argument("integer", NumberValue(42, + step=1, precision=0)) + interactive.setattr_argument("string", StringValue("Hello World")) + print("Done! Values:") + print(interactive.number, type(interactive.number)) + print(interactive.integer, type(interactive.integer)) + print(interactive.string) From 5a8bc17e4d05cbbb1f8b74ff1960c8a1ace33d09 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 25 Mar 2024 15:30:29 +0800 Subject: [PATCH 183/296] example: expand interactive --- .../no_hardware/repository/interactive.py | 22 +++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/artiq/examples/no_hardware/repository/interactive.py b/artiq/examples/no_hardware/repository/interactive.py index 5effb1583..657721505 100644 --- a/artiq/examples/no_hardware/repository/interactive.py +++ b/artiq/examples/no_hardware/repository/interactive.py @@ -7,14 +7,28 @@ class InteractiveDemo(EnvExperiment): def run(self): print("Waiting for user input...") - with self.interactive() as interactive: + with self.interactive(title="Interactive Demo") as interactive: + interactive.setattr_argument("pyon_value", + PYONValue(self.get_dataset("foo", default=42))) interactive.setattr_argument("number", NumberValue(42e-6, - unit="us", - precision=4)) + unit="us", + precision=4)) interactive.setattr_argument("integer", NumberValue(42, - step=1, precision=0)) + step=1, precision=0)) interactive.setattr_argument("string", StringValue("Hello World")) + interactive.setattr_argument("scan", Scannable(global_max=400, + default=NoScan(325), + precision=6)) + interactive.setattr_argument("boolean", BooleanValue(True), "Group") + interactive.setattr_argument("enum", + EnumerationValue(["foo", "bar", "quux"], "foo"), + "Group") print("Done! Values:") + print(interactive.pyon_value) + print(interactive.boolean) + print(interactive.enum) print(interactive.number, type(interactive.number)) print(interactive.integer, type(interactive.integer)) print(interactive.string) + for i in interactive.scan: + print(i) From 7fa770fba9dca291fc1bb08f84dfee7f338bf98f Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 25 Mar 2024 16:26:53 +0800 Subject: [PATCH 184/296] artiq_client: cancel interactive arguments --- artiq/frontend/artiq_client.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/artiq/frontend/artiq_client.py b/artiq/frontend/artiq_client.py index f552c1f33..39502aad9 100755 --- a/artiq/frontend/artiq_client.py +++ b/artiq/frontend/artiq_client.py @@ -121,6 +121,11 @@ def get_argparser(): "arguments", metavar="ARGUMENTS", nargs="*", help="interactive arguments") + parser_cancel_interactive = subparsers.add_parser( + "cancel-interactive", help="cancel interactive arguments") + parser_cancel_interactive.add_argument( + "rid", metavar="RID", type=int, help="RID of target experiment") + parser_show = subparsers.add_parser( "show", help="show schedule, log, devices or datasets") parser_show.add_argument( @@ -222,6 +227,10 @@ def _action_supply_interactive(remote, args): remote.supply(args.rid, arguments) +def _action_cancel_interactive(remote, args): + remote.cancel(args.rid) + + def _action_scan_repository(remote, args): if getattr(args, "async"): remote.scan_repository_async(args.revision) @@ -382,6 +391,7 @@ def main(): "del_dataset": "dataset_db", "scan_devices": "device_db", "supply_interactive": "interactive_arg_db", + "cancel_interactive": "interactive_arg_db", "scan_repository": "experiment_db", "ls": "experiment_db", "terminate": "master_management", From d0f893c01c46c1adf2a7f102603d74b3da0606df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Tue, 26 Mar 2024 15:57:44 +0800 Subject: [PATCH 185/296] flake: export openocd-bscanspi-f --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 19e5ab65d..d195ccc5c 100644 --- a/flake.nix +++ b/flake.nix @@ -440,7 +440,7 @@ }; }; - inherit makeArtiqBoardPackage; + inherit makeArtiqBoardPackage openocd-bscanspi-f; defaultPackage.x86_64-linux = pkgs.python3.withPackages(ps: [ packages.x86_64-linux.artiq ]); From aea5f04d747d1f57dade7ae9f6b1b7ef3b45f876 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 26 Mar 2024 16:19:06 +0800 Subject: [PATCH 186/296] dashboard, browser: fix missing recompute arguments --- artiq/browser/experiments.py | 2 +- artiq/dashboard/experiments.py | 2 +- artiq/gui/entries.py | 5 ----- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/artiq/browser/experiments.py b/artiq/browser/experiments.py index 8e41063a9..7bacba15b 100644 --- a/artiq/browser/experiments.py +++ b/artiq/browser/experiments.py @@ -23,7 +23,7 @@ class _ArgumentEditor(EntryTreeWidget): self._dock = dock if not self._dock.arguments: - self.addTopLevelItem(QtWidgets.QTreeWidgetItem(["No arguments"])) + self.insertTopLevelItem(0, QtWidgets.QTreeWidgetItem(["No arguments"])) for name, argument in self._dock.arguments.items(): self.set_argument(name, argument) diff --git a/artiq/dashboard/experiments.py b/artiq/dashboard/experiments.py index ba493f93c..c5409624c 100644 --- a/artiq/dashboard/experiments.py +++ b/artiq/dashboard/experiments.py @@ -34,7 +34,7 @@ class _ArgumentEditor(EntryTreeWidget): arguments = self.manager.get_submission_arguments(self.expurl) if not arguments: - self.addTopLevelItem(QtWidgets.QTreeWidgetItem(["No arguments"])) + self.insertTopLevelItem(0, QtWidgets.QTreeWidgetItem(["No arguments"])) for name, argument in arguments.items(): self.set_argument(name, argument) diff --git a/artiq/gui/entries.py b/artiq/gui/entries.py index 58044ef8c..3eea8b880 100644 --- a/artiq/gui/entries.py +++ b/artiq/gui/entries.py @@ -45,7 +45,6 @@ class EntryTreeWidget(QtWidgets.QTreeWidget): self.bottom_item = QtWidgets.QTreeWidgetItem() self.addTopLevelItem(self.bottom_item) - self.bottom_item.setHidden(True) def set_argument(self, key, argument): self._arguments[key] = argument @@ -60,9 +59,6 @@ class EntryTreeWidget(QtWidgets.QTreeWidget): widgets["entry"] = entry widgets["widget_item"] = widget_item - if len(self._arguments) > 1: - self.bottom_item.setHidden(False) - for col in range(3): widget_item.setBackground(col, self.gradient) font = widget_item.font(0) @@ -73,7 +69,6 @@ class EntryTreeWidget(QtWidgets.QTreeWidget): self.insertTopLevelItem(self.indexFromItem(self.bottom_item).row(), widget_item) else: self._get_group(argument["group"]).addChild(widget_item) - self.bottom_item.setHidden(False) fix_layout = LayoutWidget() widgets["fix_layout"] = fix_layout fix_layout.addWidget(entry) From bc8bc952d7e332bf41c875aa81578eaf2dedf3a5 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Mon, 25 Mar 2024 16:16:25 +0800 Subject: [PATCH 187/296] use nixpkgs outputcheck Signed-off-by: Florian Agbuya --- flake.nix | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/flake.nix b/flake.nix index d195ccc5c..d332dd7d1 100644 --- a/flake.nix +++ b/flake.nix @@ -92,22 +92,6 @@ disabledTestPaths = [ "tests/test_qeventloop.py" ]; }; - outputcheck = pkgs.python3Packages.buildPythonApplication rec { - pname = "outputcheck"; - version = "0.4.2"; - src = pkgs.fetchFromGitHub { - owner = "stp"; - repo = "OutputCheck"; - rev = "e0f533d3c5af2949349856c711bf4bca50022b48"; - sha256 = "1y27vz6jq6sywas07kz3v01sqjd0sga9yv9w2cksqac3v7wmf2a0"; - }; - prePatch = "echo ${version} > RELEASE-VERSION"; - postPatch = '' - substituteInPlace OutputCheck/Driver.py \ - --replace "argparse.FileType('rU')" "argparse.FileType('r')" - ''; - }; - libartiq-support = pkgs.stdenv.mkDerivation { name = "libartiq-support"; src = self; @@ -187,7 +171,7 @@ # FIXME: automatically propagate lld_14 llvm_14 dependencies # cacert is required in the check stage only, as certificates are to be # obtained from system elsewhere - nativeCheckInputs = [ pkgs.lld_14 pkgs.llvm_14 libartiq-support pkgs.lit outputcheck pkgs.cacert ]; + nativeCheckInputs = with pkgs; [ lld_14 llvm_14 lit outputcheck cacert ] ++ [ libartiq-support ]; checkPhase = '' python -m unittest discover -v artiq.test @@ -461,7 +445,7 @@ artiq-frontend-dev-wrappers # To manually run compiler tests: pkgs.lit - outputcheck + pkgs.outputcheck libartiq-support # use the vivado-env command to enter a FHS shell that lets you run the Vivado installer packages.x86_64-linux.vivadoEnv From dc0b803b19d2b2393d0086d6b9b2ed468dd820e4 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Wed, 27 Mar 2024 17:49:25 +0800 Subject: [PATCH 188/296] use nixpkgs sphinxcontrib-wavedrom Signed-off-by: Florian Agbuya --- flake.nix | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/flake.nix b/flake.nix index d332dd7d1..666d86659 100644 --- a/flake.nix +++ b/flake.nix @@ -335,17 +335,6 @@ paths = [ openocd-fixed bscan_spi_bitstreams-pkg ]; }; - sphinxcontrib-wavedrom = pkgs.python3Packages.buildPythonPackage rec { - pname = "sphinxcontrib-wavedrom"; - version = "3.0.4"; - format = "pyproject"; - src = pkgs.python3Packages.fetchPypi { - inherit pname version; - sha256 = "sha256-0zTHVBr9kXwMEo4VRTFsxdX2HI31DxdHfLUHCQmw1Ko="; - }; - nativeBuildInputs = [ pkgs.python3Packages.setuptools-scm ]; - propagatedBuildInputs = (with pkgs.python3Packages; [ wavedrom sphinx xcffib cairosvg ]); - }; latex-artiq-manual = pkgs.texlive.combine { inherit (pkgs.texlive) scheme-basic latexmk cmap collection-fontsrecommended fncychap @@ -379,14 +368,14 @@ target = "efc"; variant = "shuttler"; }; - inherit sphinxcontrib-wavedrom latex-artiq-manual; + inherit latex-artiq-manual; artiq-manual-html = pkgs.stdenvNoCC.mkDerivation rec { name = "artiq-manual-html-${version}"; version = artiqVersion; src = self; - buildInputs = [ - pkgs.python3Packages.sphinx pkgs.python3Packages.sphinx_rtd_theme - pkgs.python3Packages.sphinx-argparse sphinxcontrib-wavedrom + buildInputs = with pkgs.python3Packages; [ + sphinx sphinx_rtd_theme + sphinx-argparse sphinxcontrib-wavedrom ]; buildPhase = '' export VERSIONEER_OVERRIDE=${artiqVersion} @@ -404,11 +393,10 @@ name = "artiq-manual-pdf-${version}"; version = artiqVersion; src = self; - buildInputs = [ - pkgs.python3Packages.sphinx pkgs.python3Packages.sphinx_rtd_theme - pkgs.python3Packages.sphinx-argparse sphinxcontrib-wavedrom - latex-artiq-manual - ]; + buildInputs = with pkgs.python3Packages; [ + sphinx sphinx_rtd_theme + sphinx-argparse sphinxcontrib-wavedrom + ] ++ [ latex-artiq-manual ]; buildPhase = '' export VERSIONEER_OVERRIDE=${artiq.version} export SOURCE_DATE_EPOCH=${builtins.toString self.sourceInfo.lastModified} @@ -452,7 +440,7 @@ packages.x86_64-linux.vivado packages.x86_64-linux.openocd-bscanspi pkgs.python3Packages.sphinx pkgs.python3Packages.sphinx_rtd_theme - pkgs.python3Packages.sphinx-argparse sphinxcontrib-wavedrom latex-artiq-manual + pkgs.python3Packages.sphinx-argparse pkgs.python3Packages.sphinxcontrib-wavedrom latex-artiq-manual ]; shellHook = '' export LIBARTIQ_SUPPORT=`libartiq-support` From 19b652d4c06b6a1a135bb059973e32ddede0812c Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 22 Mar 2024 17:30:56 +0800 Subject: [PATCH 189/296] fix interactive args cancellation --- artiq/language/environment.py | 16 ++++++++++++++-- artiq/master/databases.py | 2 +- artiq/master/worker_impl.py | 5 +++-- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/artiq/language/environment.py b/artiq/language/environment.py index 8b4076a4c..fe5753184 100644 --- a/artiq/language/environment.py +++ b/artiq/language/environment.py @@ -12,7 +12,8 @@ from artiq.language.core import rpc __all__ = ["NoDefault", "DefaultMissing", "PYONValue", "BooleanValue", "EnumerationValue", "NumberValue", "StringValue", - "HasEnvironment", "Experiment", "EnvExperiment"] + "HasEnvironment", "Experiment", "EnvExperiment", + "CancelledArgsError"] class NoDefault: @@ -26,6 +27,12 @@ class DefaultMissing(Exception): pass +class CancelledArgsError(Exception): + """Raised by the ``interactive`` context manager when an interactive + arguments request is cancelled.""" + pass + + class _SimpleArgProcessor: def __init__(self, default=NoDefault): # If default is a list, it means multiple defaults are specified, with @@ -341,7 +348,10 @@ class HasEnvironment: When the context manager terminates, the experiment is blocked and the user is presented with the requested argument widgets. After the user enters values, the experiment is resumed and - the namespace contains the values of the arguments.""" + the namespace contains the values of the arguments. + + If the interactive arguments request is cancelled, raises + ``CancelledArgsError``.""" interactive_arglist = [] namespace = SimpleNamespace() def setattr_argument(key, processor=None, group=None, tooltip=None): @@ -350,6 +360,8 @@ class HasEnvironment: yield namespace del namespace.setattr_argument argdict = self.__argument_mgr.get_interactive(interactive_arglist, title) + if argdict is None: + raise CancelledArgsError for key, value in argdict.items(): setattr(namespace, key, value) diff --git a/artiq/master/databases.py b/artiq/master/databases.py index 7a4a6b153..6edcc966d 100644 --- a/artiq/master/databases.py +++ b/artiq/master/databases.py @@ -144,4 +144,4 @@ class InteractiveArgDB: if rid not in self.futures: raise ValueError("no experiment with this RID is " "waiting for interactive arguments") - self.futures[rid].cancel() + self.futures[rid].set_result(None) diff --git a/artiq/master/worker_impl.py b/artiq/master/worker_impl.py index 65c7f2a88..1f069a50c 100644 --- a/artiq/master/worker_impl.py +++ b/artiq/master/worker_impl.py @@ -222,8 +222,9 @@ class ArgumentManager(ProcessArgumentManager): arglist_desc = [(k, p.describe(), g, t) for k, p, g, t in interactive_arglist] arguments = ArgumentManager._get_interactive(arglist_desc, title) - for key, processor, _, _ in interactive_arglist: - arguments[key] = processor.process(arguments[key]) + if arguments is not None: + for key, processor, _, _ in interactive_arglist: + arguments[key] = processor.process(arguments[key]) return arguments From 8e68501081681730eae33e7244f55248a46bdd33 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 19 Mar 2024 14:35:11 +0800 Subject: [PATCH 190/296] applets: EntryArea return processed values --- artiq/gui/applets.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/artiq/gui/applets.py b/artiq/gui/applets.py index 0518d7c58..9609fbac0 100644 --- a/artiq/gui/applets.py +++ b/artiq/gui/applets.py @@ -7,6 +7,7 @@ import os import subprocess from functools import partial from itertools import count +from types import SimpleNamespace from PyQt5 import QtCore, QtGui, QtWidgets @@ -36,10 +37,12 @@ class EntryArea(EntryTreeWidget): buttons.layout.setColumnStretch(2, 1) buttons.addWidget(reset_all_button, 0, 1) self.setItemWidget(self.bottom_item, 1, buttons) + self._processors = dict() def setattr_argument(self, key, processor, group=None, tooltip=None): argument = dict() desc = processor.describe() + self._processors[key] = processor argument["desc"] = desc argument["group"] = group argument["tooltip"] = tooltip @@ -51,7 +54,8 @@ class EntryArea(EntryTreeWidget): def get_value(self, key): entry = self._arg_to_widgets[key]["entry"] argument = self._arguments[key] - return entry.state_to_value(argument["state"]) + processor = self._processors[key] + return processor.process(entry.state_to_value(argument["state"])) def set_value(self, key, value): ty = self._arguments[key]["desc"]["ty"] @@ -64,9 +68,9 @@ class EntryArea(EntryTreeWidget): self.update_value(key) def get_values(self): - d = dict() + d = SimpleNamespace() for key in self._arguments.keys(): - d[key] = self.get_value(key) + setattr(d, key, self.get_value(key)) return d def set_values(self, values): From 47716badef7ed6839fc57732078aca794f077d62 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 25 Mar 2024 14:37:47 +0800 Subject: [PATCH 191/296] add quickstyle option to EnumerationValue --- artiq/browser/experiments.py | 2 ++ artiq/dashboard/experiments.py | 2 ++ artiq/gui/entries.py | 43 +++++++++++++++++++++++++++------- artiq/language/environment.py | 4 +++- 4 files changed, 41 insertions(+), 10 deletions(-) diff --git a/artiq/browser/experiments.py b/artiq/browser/experiments.py index 7bacba15b..7c2267937 100644 --- a/artiq/browser/experiments.py +++ b/artiq/browser/experiments.py @@ -28,6 +28,8 @@ class _ArgumentEditor(EntryTreeWidget): for name, argument in self._dock.arguments.items(): self.set_argument(name, argument) + self.quickStyleClicked.connect(self._dock._run_clicked) + recompute_arguments = QtWidgets.QPushButton("Recompute all arguments") recompute_arguments.setIcon( QtWidgets.QApplication.style().standardIcon( diff --git a/artiq/dashboard/experiments.py b/artiq/dashboard/experiments.py index c5409624c..0defaa103 100644 --- a/artiq/dashboard/experiments.py +++ b/artiq/dashboard/experiments.py @@ -39,6 +39,8 @@ class _ArgumentEditor(EntryTreeWidget): for name, argument in arguments.items(): self.set_argument(name, argument) + self.quickStyleClicked.connect(dock.submit_clicked) + recompute_arguments = QtWidgets.QPushButton("Recompute all arguments") recompute_arguments.setIcon( QtWidgets.QApplication.style().standardIcon( diff --git a/artiq/gui/entries.py b/artiq/gui/entries.py index 3eea8b880..dd7231331 100644 --- a/artiq/gui/entries.py +++ b/artiq/gui/entries.py @@ -13,6 +13,8 @@ logger = logging.getLogger(__name__) class EntryTreeWidget(QtWidgets.QTreeWidget): + quickStyleClicked = QtCore.pyqtSignal() + def __init__(self): QtWidgets.QTreeWidget.__init__(self) self.setColumnCount(3) @@ -53,6 +55,8 @@ class EntryTreeWidget(QtWidgets.QTreeWidget): entry_class = procdesc_to_entry(argument["desc"]) argument["state"] = entry_class.default_state(argument["desc"]) entry = entry_class(argument) + if argument["desc"].get("quickstyle"): + entry.quickStyleClicked.connect(self.quickStyleClicked) widget_item = QtWidgets.QTreeWidgetItem([key]) if argument["tooltip"]: widget_item.setToolTip(0, argument["tooltip"]) @@ -193,17 +197,38 @@ class BooleanEntry(QtWidgets.QCheckBox): return procdesc.get("default", False) -class EnumerationEntry(QtWidgets.QComboBox): +class EnumerationEntry(QtWidgets.QWidget): + quickStyleClicked = QtCore.pyqtSignal() + def __init__(self, argument): - QtWidgets.QComboBox.__init__(self) + QtWidgets.QWidget.__init__(self) disable_scroll_wheel(self) - choices = argument["desc"]["choices"] - self.addItems(choices) - idx = choices.index(argument["state"]) - self.setCurrentIndex(idx) - def update(index): - argument["state"] = choices[index] - self.currentIndexChanged.connect(update) + layout = QtWidgets.QHBoxLayout() + self.setLayout(layout) + procdesc = argument["desc"] + choices = procdesc["choices"] + if procdesc["quickstyle"]: + self.btn_group = QtWidgets.QButtonGroup() + for i, choice in enumerate(choices): + button = QtWidgets.QPushButton(choice) + self.btn_group.addButton(button) + self.btn_group.setId(button, i) + layout.addWidget(button) + + def submit(index): + argument["state"] = choices[index] + self.quickStyleClicked.emit() + self.btn_group.idClicked.connect(submit) + else: + self.combo_box = QtWidgets.QComboBox() + self.combo_box.addItems(choices) + idx = choices.index(argument["state"]) + self.combo_box.setCurrentIndex(idx) + layout.addWidget(self.combo_box) + + def update(index): + argument["state"] = choices[index] + self.combo_box.currentIndexChanged.connect(update) @staticmethod def state_to_value(state): diff --git a/artiq/language/environment.py b/artiq/language/environment.py index fe5753184..edb8865b1 100644 --- a/artiq/language/environment.py +++ b/artiq/language/environment.py @@ -90,8 +90,9 @@ class EnumerationValue(_SimpleArgProcessor): :param choices: A list of string representing the possible values of the argument. """ - def __init__(self, choices, default=NoDefault): + def __init__(self, choices, default=NoDefault, quickstyle=False): self.choices = choices + self.quickstyle = quickstyle super().__init__(default) def process(self, x): @@ -102,6 +103,7 @@ class EnumerationValue(_SimpleArgProcessor): def describe(self): d = _SimpleArgProcessor.describe(self) d["choices"] = self.choices + d["quickstyle"] = self.quickstyle return d From 9934c756b22a10bc07363ffffac87a752c069f08 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 27 Mar 2024 15:17:59 +0800 Subject: [PATCH 192/296] RELEASE_NOTES: quickstyle EnumerationValue --- RELEASE_NOTES.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index fa801a11f..e4901d2c6 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -39,6 +39,8 @@ Highlights: - Hotkeys now organize experiment windows in the order they were last interacted with: + CTRL+SHIFT+T tiles experiment windows + CTRL+SHIFT+C cascades experiment windows + - By enabling the ``quickstyle`` option, ``EnumerationValue`` entry widgets can now alternatively display + its choices as buttons that submit the experiment on click. * Datasets can now be associated with units and scale factors, and displayed accordingly in the dashboard including applets, like widgets such as ``NumberValue`` already did in earlier ARTIQ versions. * Experiments can now request arguments interactively from the user at any time. From b4d070fa1bab29859162a97c7b9329ac5d2a30bb Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 27 Mar 2024 15:30:36 +0800 Subject: [PATCH 193/296] docs: add quickstyle param --- artiq/language/environment.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/artiq/language/environment.py b/artiq/language/environment.py index edb8865b1..a80701c78 100644 --- a/artiq/language/environment.py +++ b/artiq/language/environment.py @@ -89,6 +89,8 @@ class EnumerationValue(_SimpleArgProcessor): :param choices: A list of string representing the possible values of the argument. + :param quickstyle: Enables the choices to be displayed in the GUI as a + list of buttons that submit the experiment when clicked. """ def __init__(self, choices, default=NoDefault, quickstyle=False): self.choices = choices From d463ccb218f6c377007610f5e1bcc7875fae20c0 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 7 Mar 2024 16:07:45 +0800 Subject: [PATCH 194/296] interactive_args: add InteractiveArgsDock --- artiq/dashboard/interactive_args.py | 139 ++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) create mode 100644 artiq/dashboard/interactive_args.py diff --git a/artiq/dashboard/interactive_args.py b/artiq/dashboard/interactive_args.py new file mode 100644 index 000000000..54ca5f3ad --- /dev/null +++ b/artiq/dashboard/interactive_args.py @@ -0,0 +1,139 @@ +import logging +import asyncio + +from PyQt5 import QtCore, QtWidgets, QtGui + +from artiq.gui.models import DictSyncModel +from artiq.gui.entries import EntryTreeWidget, procdesc_to_entry + + +logger = logging.getLogger(__name__) + + +class Model(DictSyncModel): + def __init__(self, init): + DictSyncModel.__init__(self, ["RID", "Title", "Args"], init) + + def convert(self, k, v, column): + if column == 0: + return k + elif column == 1: + txt = ": " + v["title"] if v["title"] != "" else "" + return str(k) + txt + elif column == 2: + return v["arglist_desc"] + else: + raise ValueError + + def sort_key(self, k, v): + return k + + +class _InteractiveArgsRequest(QtWidgets.QWidget): + supplied = QtCore.pyqtSignal(int, dict) + cancelled = QtCore.pyqtSignal(int) + + def __init__(self, rid, arglist_desc): + QtWidgets.QWidget.__init__(self) + self.rid = rid + self.arguments = dict() + layout = QtWidgets.QGridLayout() + self.setLayout(layout) + self.entry_tree = EntryTreeWidget() + layout.addWidget(self.entry_tree, 0, 0, 1, 2) + for key, procdesc, group, tooltip in arglist_desc: + self.arguments[key] = {"desc": procdesc, "group": group, "tooltip": tooltip} + self.entry_tree.set_argument(key, self.arguments[key]) + self.cancel_btn = QtWidgets.QPushButton("Cancel") + self.cancel_btn.setIcon(QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_DialogCancelButton)) + self.cancel_btn.clicked.connect(self.cancel) + layout.addWidget(self.cancel_btn, 1, 0, 1, 1) + self.supply_btn = QtWidgets.QPushButton("Supply") + self.supply_btn.setIcon(QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_DialogOkButton)) + self.supply_btn.clicked.connect(self.supply) + layout.addWidget(self.supply_btn, 1, 1, 1, 1) + + def supply(self): + argument_values = dict() + for key, argument in self.arguments.items(): + entry_cls = procdesc_to_entry(argument["desc"]) + argument_values[key] = entry_cls.state_to_value(argument["state"]) + self.supplied.emit(self.rid, argument_values) + + def cancel(self): + self.cancelled.emit(self.rid) + + +class _InteractiveArgsView(QtWidgets.QTabWidget): + supplied = QtCore.pyqtSignal(int, dict) + cancelled = QtCore.pyqtSignal(int) + + def __init__(self): + QtWidgets.QTabWidget.__init__(self) + self.model = Model({}) + + def setModel(self, model): + for i in range(self.count()): + widget = self.widget(i) + self.removeTab(i) + widget.deleteLater() + self.model = model + self.model.rowsInserted.connect(self.rowsInserted) + self.model.rowsRemoved.connect(self.rowsRemoved) + for i in range(self.model.rowCount(QtCore.QModelIndex())): + self._insert_widget(i) + + def _insert_widget(self, row): + rid = self.model.data(self.model.index(row, 0), QtCore.Qt.DisplayRole) + title = self.model.data(self.model.index(row, 1), QtCore.Qt.DisplayRole) + arglist_desc = self.model.data(self.model.index(row, 2), QtCore.Qt.DisplayRole) + inter_args_request = _InteractiveArgsRequest(rid, arglist_desc) + inter_args_request.supplied.connect(self.supplied) + inter_args_request.cancelled.connect(self.cancelled) + self.insertTab(row, inter_args_request, title) + + def rowsInserted(self, parent, first, last): + assert first == last + self._insert_widget(first) + + def rowsRemoved(self, parent, first, last): + assert first == last + widget = self.widget(first) + self.removeTab(first) + widget.deleteLater() + + +class InteractiveArgsDock(QtWidgets.QDockWidget): + def __init__(self, interactive_args_sub, interactive_args_rpc): + QtWidgets.QDockWidget.__init__(self, "Interactive Args") + self.setObjectName("Interactive Args") + self.setFeatures( + QtWidgets.QDockWidget.DockWidgetMovable | QtWidgets.QDockWidget.DockWidgetFloatable) + self.interactive_args_rpc = interactive_args_rpc + self.request_view = _InteractiveArgsView() + self.request_view.supplied.connect(self.supply) + self.request_view.cancelled.connect(self.cancel) + self.setWidget(self.request_view) + interactive_args_sub.add_setmodel_callback(self.request_view.setModel) + + def supply(self, rid, values): + asyncio.ensure_future(self._supply_task(rid, values)) + + async def _supply_task(self, rid, values): + try: + await self.interactive_args_rpc.supply(rid, values) + except Exception: + logger.error("failed to supply interactive arguments for experiment: %d", + rid, exc_info=True) + + def cancel(self, rid): + asyncio.ensure_future(self._cancel_task(rid)) + + async def _cancel_task(self, rid): + try: + await self.interactive_args_rpc.cancel(rid) + except Exception: + logger.error("failed to cancel interactive args request for experiment: %d", + rid, exc_info=True) From 915d3613f17a007981eed9248fae83920383e3ca Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 7 Mar 2024 16:08:03 +0800 Subject: [PATCH 195/296] artiq_dashboard: add InteractiveArgsDock --- artiq/frontend/artiq_dashboard.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 05bc5e531..168b877bb 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -23,7 +23,7 @@ from artiq.gui.models import ModelSubscriber from artiq.gui import state, log from artiq.dashboard import (experiments, shortcuts, explorer, moninj, datasets, schedule, applets_ccb, - waveform) + waveform, interactive_args) def get_argparser(): @@ -141,7 +141,7 @@ def main(): # create connections to master rpc_clients = dict() - for target in "schedule", "experiment_db", "dataset_db", "device_db": + for target in "schedule", "experiment_db", "dataset_db", "device_db", "interactive_arg_db": client = AsyncioClient() loop.run_until_complete(client.connect_rpc( args.server, args.port_control, target)) @@ -166,7 +166,8 @@ def main(): for notifier_name, modelf in (("explist", explorer.Model), ("explist_status", explorer.StatusUpdater), ("datasets", datasets.Model), - ("schedule", schedule.Model)): + ("schedule", schedule.Model), + ("interactive_args", interactive_args.Model)): subscriber = ModelSubscriber(notifier_name, modelf, report_disconnect) loop.run_until_complete(subscriber.connect( @@ -244,6 +245,11 @@ def main(): loop.run_until_complete(devices_sub.connect(args.server, args.port_notify)) atexit_register_coroutine(devices_sub.close, loop=loop) + d_interactive_args = interactive_args.InteractiveArgsDock( + sub_clients["interactive_args"], + rpc_clients["interactive_arg_db"] + ) + d_schedule = schedule.ScheduleDock( rpc_clients["schedule"], sub_clients["schedule"]) smgr.register(d_schedule) @@ -257,7 +263,7 @@ def main(): right_docks = [ d_explorer, d_shortcuts, d_ttl_dds.ttl_dock, d_ttl_dds.dds_dock, d_ttl_dds.dac_dock, - d_datasets, d_applets, d_waveform + d_datasets, d_applets, d_waveform, d_interactive_args ] main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, right_docks[0]) for d1, d2 in zip(right_docks, right_docks[1:]): From fddff13842ee13aba18be6cf94ce61e7e4f84b73 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Fri, 15 Mar 2024 11:05:02 +0800 Subject: [PATCH 196/296] docs: mock interactive_args --- doc/manual/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/manual/conf.py b/doc/manual/conf.py index 33032b259..34f0af9bb 100644 --- a/doc/manual/conf.py +++ b/doc/manual/conf.py @@ -35,6 +35,7 @@ mock_modules = ["artiq.gui.waitingspinnerwidget", "artiq.compiler.module", "artiq.compiler.embedding", "artiq.dashboard.waveform", + "artiq.dashboard.interactive_args", "qasync", "pyqtgraph", "matplotlib", "lmdb", "numpy", "dateutil", "dateutil.parser", "prettytable", "PyQt5", "h5py", "serial", "scipy", "scipy.interpolate", From 5f49e582c86143afe94f2e93ba49bb40d8e2d27c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Tue, 2 Apr 2024 16:10:00 +0800 Subject: [PATCH 197/296] master: fix race condition in interactive args supply Closes #2375 --- artiq/master/databases.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/artiq/master/databases.py b/artiq/master/databases.py index 6edcc966d..d889933d7 100644 --- a/artiq/master/databases.py +++ b/artiq/master/databases.py @@ -133,7 +133,7 @@ class InteractiveArgDB: def supply(self, rid, values): # quick sanity checks - if rid not in self.futures: + if rid not in self.futures or self.futures[rid].done(): raise ValueError("no experiment with this RID is " "waiting for interactive arguments") if {i[0] for i in self.pending.raw_view[rid]["arglist_desc"]} != set(values.keys()): @@ -141,7 +141,7 @@ class InteractiveArgDB: self.futures[rid].set_result(values) def cancel(self, rid): - if rid not in self.futures: + if rid not in self.futures or self.futures[rid].done(): raise ValueError("no experiment with this RID is " "waiting for interactive arguments") self.futures[rid].set_result(None) From 9bf5695ab21c0566b02ba5b9ea9a58276ed626c7 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 2 Apr 2024 16:13:04 +0800 Subject: [PATCH 198/296] interactive_args: add quickstyle --- artiq/dashboard/interactive_args.py | 1 + 1 file changed, 1 insertion(+) diff --git a/artiq/dashboard/interactive_args.py b/artiq/dashboard/interactive_args.py index 54ca5f3ad..893749290 100644 --- a/artiq/dashboard/interactive_args.py +++ b/artiq/dashboard/interactive_args.py @@ -40,6 +40,7 @@ class _InteractiveArgsRequest(QtWidgets.QWidget): layout = QtWidgets.QGridLayout() self.setLayout(layout) self.entry_tree = EntryTreeWidget() + self.entry_tree.quickStyleClicked.connect(self.supply) layout.addWidget(self.entry_tree, 0, 0, 1, 2) for key, procdesc, group, tooltip in arglist_desc: self.arguments[key] = {"desc": procdesc, "group": group, "tooltip": tooltip} From 0fb31ddbb104889a95ec9284492ecb94e14d9ade Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Tue, 2 Apr 2024 17:01:19 +0800 Subject: [PATCH 199/296] flake: update dependencies --- flake.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index d42b650b2..d0085e11f 100644 --- a/flake.lock +++ b/flake.lock @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1707347730, - "narHash": "sha256-0etC/exQIaqC9vliKhc3eZE2Mm2wgLa0tj93ZF/egvM=", + "lastModified": 1711668574, + "narHash": "sha256-u1dfs0ASQIEr1icTVrsKwg2xToIpn7ZXxW3RHfHxshg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "6832d0d99649db3d65a0e15fa51471537b2c56a6", + "rev": "219951b495fc2eac67b1456824cc1ec1fd2ee659", "type": "github" }, "original": { @@ -92,11 +92,11 @@ ] }, "locked": { - "lastModified": 1701572254, - "narHash": "sha256-ixq8dlpyOytDr+d/OmW8v1Ioy9V2G2ibOlNj8GFDSq4=", + "lastModified": 1708937641, + "narHash": "sha256-Hkb9VYFzFgkYxfbh4kYcDSn7DbMUYehoQDeTALrxo2Q=", "owner": "m-labs", "repo": "sipyco", - "rev": "cceac0df537887135f99aa6b1bdd82853f16b4d6", + "rev": "4a28b311ce0069454b4e8fe1e6049db11b9f1296", "type": "github" }, "original": { From af11dc6b741d0f4b0c2e8a13a6ad965093ab87f6 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 2 Apr 2024 16:51:07 +0800 Subject: [PATCH 200/296] interactive_args: use bottom_item for supply, cancel --- artiq/dashboard/interactive_args.py | 35 ++++++++++++++++------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/artiq/dashboard/interactive_args.py b/artiq/dashboard/interactive_args.py index 893749290..c72a237af 100644 --- a/artiq/dashboard/interactive_args.py +++ b/artiq/dashboard/interactive_args.py @@ -5,6 +5,7 @@ from PyQt5 import QtCore, QtWidgets, QtGui from artiq.gui.models import DictSyncModel from artiq.gui.entries import EntryTreeWidget, procdesc_to_entry +from artiq.gui.tools import LayoutWidget logger = logging.getLogger(__name__) @@ -29,32 +30,34 @@ class Model(DictSyncModel): return k -class _InteractiveArgsRequest(QtWidgets.QWidget): +class _InteractiveArgsRequest(EntryTreeWidget): supplied = QtCore.pyqtSignal(int, dict) cancelled = QtCore.pyqtSignal(int) def __init__(self, rid, arglist_desc): - QtWidgets.QWidget.__init__(self) + EntryTreeWidget.__init__(self) self.rid = rid self.arguments = dict() - layout = QtWidgets.QGridLayout() - self.setLayout(layout) - self.entry_tree = EntryTreeWidget() - self.entry_tree.quickStyleClicked.connect(self.supply) - layout.addWidget(self.entry_tree, 0, 0, 1, 2) for key, procdesc, group, tooltip in arglist_desc: self.arguments[key] = {"desc": procdesc, "group": group, "tooltip": tooltip} - self.entry_tree.set_argument(key, self.arguments[key]) - self.cancel_btn = QtWidgets.QPushButton("Cancel") - self.cancel_btn.setIcon(QtWidgets.QApplication.style().standardIcon( + self.set_argument(key, self.arguments[key]) + self.quickStyleClicked.connect(self.supply) + cancel_btn = QtWidgets.QPushButton("Cancel") + cancel_btn.setIcon(QtWidgets.QApplication.style().standardIcon( QtWidgets.QStyle.SP_DialogCancelButton)) - self.cancel_btn.clicked.connect(self.cancel) - layout.addWidget(self.cancel_btn, 1, 0, 1, 1) - self.supply_btn = QtWidgets.QPushButton("Supply") - self.supply_btn.setIcon(QtWidgets.QApplication.style().standardIcon( + cancel_btn.clicked.connect(self.cancel) + supply_btn = QtWidgets.QPushButton("Supply") + supply_btn.setIcon(QtWidgets.QApplication.style().standardIcon( QtWidgets.QStyle.SP_DialogOkButton)) - self.supply_btn.clicked.connect(self.supply) - layout.addWidget(self.supply_btn, 1, 1, 1, 1) + supply_btn.clicked.connect(self.supply) + buttons = LayoutWidget() + buttons.addWidget(cancel_btn, 1, 1) + buttons.addWidget(supply_btn, 1, 2) + buttons.layout.setColumnStretch(0, 1) + buttons.layout.setColumnStretch(1, 0) + buttons.layout.setColumnStretch(2, 0) + buttons.layout.setColumnStretch(3, 1) + self.setItemWidget(self.bottom_item, 1, buttons) def supply(self): argument_values = dict() From 856e43fd6126158e8f230a79d30e271b36f805b5 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 8 Apr 2024 13:17:23 +0800 Subject: [PATCH 201/296] interactive_args: add default message --- artiq/dashboard/interactive_args.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/artiq/dashboard/interactive_args.py b/artiq/dashboard/interactive_args.py index c72a237af..d46e0d847 100644 --- a/artiq/dashboard/interactive_args.py +++ b/artiq/dashboard/interactive_args.py @@ -70,18 +70,27 @@ class _InteractiveArgsRequest(EntryTreeWidget): self.cancelled.emit(self.rid) -class _InteractiveArgsView(QtWidgets.QTabWidget): +class _InteractiveArgsView(QtWidgets.QStackedWidget): supplied = QtCore.pyqtSignal(int, dict) cancelled = QtCore.pyqtSignal(int) def __init__(self): - QtWidgets.QTabWidget.__init__(self) + QtWidgets.QStackedWidget.__init__(self) + self.tabs = QtWidgets.QTabWidget() + self.default_label = QtWidgets.QLabel("No pending interactive arguments requests.") + self.default_label.setAlignment(QtCore.Qt.AlignCenter) + font = QtGui.QFont(self.default_label.font()) + font.setItalic(True) + self.default_label.setFont(font) + self.addWidget(self.tabs) + self.addWidget(self.default_label) self.model = Model({}) def setModel(self, model): - for i in range(self.count()): - widget = self.widget(i) - self.removeTab(i) + self.setCurrentIndex(1) + for i in range(self.tabs.count()): + widget = self.tabs.widget(i) + self.tabs.removeTab(i) widget.deleteLater() self.model = model self.model.rowsInserted.connect(self.rowsInserted) @@ -96,17 +105,20 @@ class _InteractiveArgsView(QtWidgets.QTabWidget): inter_args_request = _InteractiveArgsRequest(rid, arglist_desc) inter_args_request.supplied.connect(self.supplied) inter_args_request.cancelled.connect(self.cancelled) - self.insertTab(row, inter_args_request, title) + self.tabs.insertTab(row, inter_args_request, title) def rowsInserted(self, parent, first, last): assert first == last + self.setCurrentIndex(0) self._insert_widget(first) def rowsRemoved(self, parent, first, last): assert first == last - widget = self.widget(first) - self.removeTab(first) + widget = self.tabs.widget(first) + self.tabs.removeTab(first) widget.deleteLater() + if self.tabs.count() == 0: + self.setCurrentIndex(1) class InteractiveArgsDock(QtWidgets.QDockWidget): From 6ac532a00e56e18a46add0d58ba4a0d23d37b3b6 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 10:32:33 +0800 Subject: [PATCH 202/296] moninj: clean up imports --- artiq/dashboard/moninj.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index c3fc75dee..a042ba797 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -3,14 +3,10 @@ import logging import textwrap from collections import namedtuple -from PyQt5 import QtCore, QtWidgets, QtGui +from PyQt5 import QtCore, QtWidgets -from artiq.coredevice.comm_moninj import * -from artiq.coredevice.ad9910 import ( - _AD9910_REG_PROFILE0, _AD9910_REG_PROFILE7, - _AD9910_REG_FTW, _AD9910_REG_CFR1 -) -from artiq.coredevice.ad9912_reg import AD9912_POW1, AD9912_SER_CONF +from artiq.coredevice.comm_moninj import CommMonInj, TTLOverride, TTLProbe +from artiq.coredevice.ad9912_reg import AD9912_SER_CONF from artiq.gui.tools import LayoutWidget from artiq.gui.flowlayout import FlowLayout From 65005ed45a86e2c8903011c9662d44db7f0e98ea Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 10:43:44 +0800 Subject: [PATCH 203/296] moninj: flake8 style fixes (NFC) --- artiq/dashboard/moninj.py | 76 +++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 40 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index a042ba797..d7bb4eac9 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -13,6 +13,7 @@ from artiq.gui.flowlayout import FlowLayout logger = logging.getLogger(__name__) + class _CancellableLineEdit(QtWidgets.QLineEdit): def escapePressedConnect(self, cb): self.esc_cb = cb @@ -127,7 +128,7 @@ class _TTLWidget(QtWidgets.QFrame): else: color = "" self.value.setText("{}".format( - color, value_s)) + color, value_s)) oe = self.cur_oe or self.force_out direction = "OUT" if oe else "IN" self.direction.setText("" + direction + "") @@ -185,7 +186,7 @@ class _DDSModel: self.cur_reg = 0 self.dds_type = dds_type self.is_urukul = dds_type in ["AD9910", "AD9912"] - + if dds_type == "AD9914": self.ftw_per_hz = 2**32 / ref_clk else: @@ -277,7 +278,7 @@ class _DDSWidget(QtWidgets.QFrame): set_btn.setText("Set") set_btn.setToolTip("Set frequency") set_grid.addWidget(set_btn, 0, 1, 1, 1) - + # for urukuls also allow switching off RF if self.dds_model.is_urukul: off_btn = QtWidgets.QToolButton() @@ -321,18 +322,17 @@ class _DDSWidget(QtWidgets.QFrame): def set_clicked(self, set): self.data_stack.setCurrentIndex(1) self.button_stack.setCurrentIndex(1) - self.value_edit.setText("{:.7f}" - .format(self.cur_frequency/1e6)) + self.value_edit.setText("{:.7f}".format(self.cur_frequency / 1e6)) self.value_edit.setFocus() self.value_edit.selectAll() def off_clicked(self, set): self.dm.dds_channel_toggle(self.dds_name, self.dds_model, sw=False) - + def apply_changes(self, apply): self.data_stack.setCurrentIndex(0) self.button_stack.setCurrentIndex(0) - frequency = float(self.value_edit.text())*1e6 + frequency = float(self.value_edit.text()) * 1e6 self.dm.dds_set_frequency(self.dds_name, self.dds_model, frequency) def cancel_changes(self, cancel): @@ -341,10 +341,8 @@ class _DDSWidget(QtWidgets.QFrame): def refresh_display(self): self.cur_frequency = self.dds_model.cur_frequency - self.value_label.setText("{:.7f}" - .format(self.cur_frequency/1e6)) - self.value_edit.setText("{:.7f}" - .format(self.cur_frequency/1e6)) + self.value_label.setText("{:.7f}".format(self.cur_frequency / 1e6)) + self.value_edit.setText("{:.7f}".format(self.cur_frequency / 1e6)) def sort_key(self): return (self.bus_channel, self.channel) @@ -359,7 +357,7 @@ class _DACWidget(_SimpleDisplayWidget): def refresh_display(self): self.value.setText("{:.3f} %" - .format(self.cur_value*100/2**16)) + .format(self.cur_value * 100 / 2**16)) def sort_key(self): return (self.spi_channel, self.channel) @@ -386,18 +384,16 @@ def setup_from_ddb(ddb): force_out = v["class"] == "TTLOut" widget = _WidgetDesc(k, comment, _TTLWidget, (channel, force_out, k)) description.add(widget) - elif (v["module"] == "artiq.coredevice.ad9914" - and v["class"] == "AD9914"): + elif (v["module"] == "artiq.coredevice.ad9914" and v["class"] == "AD9914"): bus_channel = v["arguments"]["bus_channel"] channel = v["arguments"]["channel"] dds_sysclk = v["arguments"]["sysclk"] model = _DDSModel(v["class"], dds_sysclk) - widget = _WidgetDesc(k, comment, _DDSWidget, (k, bus_channel, channel, model)) + widget = _WidgetDesc(k, comment, _DDSWidget, + (k, bus_channel, channel, model)) description.add(widget) - elif (v["module"] == "artiq.coredevice.ad9910" - and v["class"] == "AD9910") or \ - (v["module"] == "artiq.coredevice.ad9912" - and v["class"] == "AD9912"): + elif (v["module"] == "artiq.coredevice.ad9910" and v["class"] == "AD9910") or \ + (v["module"] == "artiq.coredevice.ad9912" and v["class"] == "AD9912"): channel = v["arguments"]["chip_select"] - 4 if channel < 0: continue @@ -407,18 +403,20 @@ def setup_from_ddb(ddb): pll = v["arguments"]["pll_n"] refclk = ddb[dds_cpld]["arguments"]["refclk"] clk_div = v["arguments"].get("clk_div", 0) - model = _DDSModel( v["class"], refclk, dds_cpld, pll, clk_div) - widget = _WidgetDesc(k, comment, _DDSWidget, (k, bus_channel, channel, model)) - description.add(widget) - elif ( (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53xx") - or (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino")): + model = _DDSModel(v["class"], refclk, dds_cpld, pll, clk_div) + widget = _WidgetDesc(k, comment, _DDSWidget, + (k, bus_channel, channel, model)) + description.add(widget) + elif (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53xx") or \ + (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino"): spi_device = v["arguments"]["spi_device"] spi_device = ddb[spi_device] while isinstance(spi_device, str): spi_device = ddb[spi_device] spi_channel = spi_device["arguments"]["channel"] for channel in range(32): - widget = _WidgetDesc((k, channel), comment, _DACWidget, (spi_channel, channel, k)) + widget = _WidgetDesc((k, channel), comment, _DACWidget, + (spi_channel, channel, k)) description.add(widget) elif v["type"] == "controller" and k == "core_moninj": mi_addr = v["host"] @@ -479,7 +477,7 @@ class _DeviceManager: self.setup_dac_monitoring(False, widget.spi_channel, widget.channel) widget.deleteLater() del self.dac_widgets[(widget.spi_channel, widget.channel)] - self.dac_cb() + self.dac_cb() else: raise ValueError @@ -558,7 +556,7 @@ class _DeviceManager: cpld_dev = """self.setattr_device("core_cache") self.setattr_device("{}")""".format(dds_model.cpld) - # `sta`/`rf_sw`` variables are guaranteed for urukuls + # `sta`/`rf_sw`` variables are guaranteed for urukuls # so {action} can use it # if there's no RF enabled, CPLD may have not been initialized # but if there is, it has been initialised - no need to do again @@ -617,8 +615,8 @@ class _DeviceManager: channel_init=channel_init)) asyncio.ensure_future( self._submit_by_content( - dds_exp, - title, + dds_exp, + title, log_msg)) def dds_set_frequency(self, dds_channel, dds_model, freq): @@ -633,8 +631,8 @@ class _DeviceManager: dds_channel, dds_model, action, - "SetDDS", - "Set DDS {} {}MHz".format(dds_channel, freq/1e6)) + "SetDDS", + "Set DDS {} {}MHz".format(dds_channel, freq / 1e6)) def dds_channel_toggle(self, dds_channel, dds_model, sw=True): # urukul only @@ -654,7 +652,7 @@ class _DeviceManager: dds_channel, dds_model, action, - "ToggleDDS", + "ToggleDDS", "Toggle DDS {} {}".format(dds_channel, "on" if sw else "off")) def setup_ttl_monitoring(self, enable, channel): @@ -712,11 +710,12 @@ class _DeviceManager: await self.mi_connection.close() self.mi_connection = None new_mi_connection = CommMonInj(self.monitor_cb, self.injection_status_cb, - self.disconnect_cb) + self.disconnect_cb) try: await new_mi_connection.connect(self.mi_addr, self.mi_port) except Exception: - logger.error("failed to connect to moninj. Is aqctl_moninj_proxy running?", exc_info=True) + logger.error("failed to connect to moninj. Is aqctl_moninj_proxy running?", + exc_info=True) await asyncio.sleep(10.) self.reconnect_mi.set() else: @@ -769,12 +768,9 @@ class MonInj: self.dac_dock = _MonInjDock("DAC") self.dm = _DeviceManager(schedule_ctl) - self.dm.ttl_cb = lambda: self.ttl_dock.layout_widgets( - self.dm.ttl_widgets.values()) - self.dm.dds_cb = lambda: self.dds_dock.layout_widgets( - self.dm.dds_widgets.values()) - self.dm.dac_cb = lambda: self.dac_dock.layout_widgets( - self.dm.dac_widgets.values()) + self.dm.ttl_cb = lambda: self.ttl_dock.layout_widgets(self.dm.ttl_widgets.values()) + self.dm.dds_cb = lambda: self.dds_dock.layout_widgets(self.dm.dds_widgets.values()) + self.dm.dac_cb = lambda: self.dac_dock.layout_widgets(self.dm.dac_widgets.values()) async def stop(self): if self.dm is not None: From b555f08ed855383a53392cdf044046235e130620 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 11:42:14 +0800 Subject: [PATCH 204/296] artiq_dashboard: clean imports --- artiq/frontend/artiq_dashboard.py | 1 - 1 file changed, 1 deletion(-) diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 168b877bb..c54e3a76b 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -6,7 +6,6 @@ import atexit import importlib import os import logging -import sys from PyQt5 import QtCore, QtGui, QtWidgets from qasync import QEventLoop From 9d3509d7b0f29533b3adc20599e12c776f5c0296 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 11:45:08 +0800 Subject: [PATCH 205/296] shortcuts: clean imports --- artiq/dashboard/shortcuts.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/artiq/dashboard/shortcuts.py b/artiq/dashboard/shortcuts.py index c7750d2a7..e8a4bc790 100644 --- a/artiq/dashboard/shortcuts.py +++ b/artiq/dashboard/shortcuts.py @@ -3,8 +3,6 @@ from functools import partial from PyQt5 import QtCore, QtWidgets -from artiq.gui.tools import LayoutWidget - logger = logging.getLogger(__name__) From 49930a2df2222f42b17c9ea51107256c9f21ee0c Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 11:49:28 +0800 Subject: [PATCH 206/296] datasets: clean imports --- artiq/dashboard/datasets.py | 1 - 1 file changed, 1 deletion(-) diff --git a/artiq/dashboard/datasets.py b/artiq/dashboard/datasets.py index f7c996849..b2492658c 100644 --- a/artiq/dashboard/datasets.py +++ b/artiq/dashboard/datasets.py @@ -8,7 +8,6 @@ from sipyco import pyon from artiq.tools import scale_from_metadata, short_format, exc_to_warning from artiq.gui.tools import LayoutWidget, QRecursiveFilterProxyModel from artiq.gui.models import DictSyncTreeSepModel -from artiq.gui.scientific_spinbox import ScientificSpinBox logger = logging.getLogger(__name__) From 43edffc67eb5750ac0a71f0c3a2bdb556079e6e4 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 11:52:38 +0800 Subject: [PATCH 207/296] waveform: clean up imports --- artiq/dashboard/waveform.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/dashboard/waveform.py b/artiq/dashboard/waveform.py index 5153dd807..c97ed8c16 100644 --- a/artiq/dashboard/waveform.py +++ b/artiq/dashboard/waveform.py @@ -17,7 +17,7 @@ from artiq.tools import exc_to_warning, short_format from artiq.coredevice import comm_analyzer from artiq.coredevice.comm_analyzer import WaveformType from artiq.gui.tools import LayoutWidget, get_open_file_name, get_save_file_name -from artiq.gui.models import DictSyncTreeSepModel, LocalModelManager +from artiq.gui.models import DictSyncTreeSepModel from artiq.gui.dndwidgets import VDragScrollArea, VDragDropSplitter From 7d9199a2eee2e6795747185ef9de9e09fc1bbc2d Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 12:11:51 +0800 Subject: [PATCH 208/296] artiq_dashboard: style (NFC) --- artiq/frontend/artiq_dashboard.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index c54e3a76b..634f5e29d 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -70,7 +70,7 @@ class MainWindow(QtWidgets.QMainWindow): self.setWindowTitle("ARTIQ Dashboard - {}".format(server)) qfm = QtGui.QFontMetrics(self.font()) - self.resize(140*qfm.averageCharWidth(), 38*qfm.lineSpacing()) + self.resize(140 * qfm.averageCharWidth(), 38 * qfm.lineSpacing()) self.exit_request = asyncio.Event() @@ -110,8 +110,8 @@ class MdiArea(QtWidgets.QMdiArea): def paintEvent(self, event): QtWidgets.QMdiArea.paintEvent(self, event) painter = QtGui.QPainter(self.viewport()) - x = (self.width() - self.pixmap.width())//2 - y = (self.height() - self.pixmap.height())//2 + x = (self.width() - self.pixmap.width()) // 2 + y = (self.height() - self.pixmap.height()) // 2 painter.setOpacity(0.5) painter.drawPixmap(x, y, self.pixmap) @@ -128,9 +128,9 @@ def main(): if args.db_file is None: args.db_file = os.path.join(get_user_config_dir(), - "artiq_dashboard_{server}_{port}.pyon".format( - server=args.server.replace(":","."), - port=args.port_notify)) + "artiq_dashboard_{server}_{port}.pyon".format( + server=args.server.replace(":", "."), + port=args.port_notify)) app = QtWidgets.QApplication(["ARTIQ Dashboard"]) loop = QEventLoop(app) @@ -154,6 +154,7 @@ def main(): master_management.close_rpc() disconnect_reported = False + def report_disconnect(): nonlocal disconnect_reported if not disconnect_reported: @@ -167,8 +168,7 @@ def main(): ("datasets", datasets.Model), ("schedule", schedule.Model), ("interactive_args", interactive_args.Model)): - subscriber = ModelSubscriber(notifier_name, modelf, - report_disconnect) + subscriber = ModelSubscriber(notifier_name, modelf, report_disconnect) loop.run_until_complete(subscriber.connect( args.server, args.port_notify)) atexit_register_coroutine(subscriber.close, loop=loop) @@ -288,7 +288,6 @@ def main(): if d_log0 is not None: main_window.tabifyDockWidget(d_schedule, d_log0) - if server_name is not None: server_description = server_name + " ({})".format(args.server) else: @@ -299,5 +298,6 @@ def main(): main_window.show() loop.run_until_complete(main_window.exit_request.wait()) + if __name__ == "__main__": main() From 1b2a18c9c8e3fe622692f399033a107c559aaa5d Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 12:14:06 +0800 Subject: [PATCH 209/296] applets_ccb: style (NFC) --- artiq/dashboard/applets_ccb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/dashboard/applets_ccb.py b/artiq/dashboard/applets_ccb.py index 78c7ab673..29afbbc1d 100644 --- a/artiq/dashboard/applets_ccb.py +++ b/artiq/dashboard/applets_ccb.py @@ -36,7 +36,7 @@ class AppletsCCBDock(applets.AppletsDock): ccbp_group_menu.addAction(self.ccbp_group_create) actiongroup.addAction(self.ccbp_group_create) self.ccbp_group_enable = QtWidgets.QAction("Create and enable/disable applets", - self.table) + self.table) self.ccbp_group_enable.setCheckable(True) self.ccbp_group_enable.triggered.connect(lambda: self.set_ccbp("enable")) ccbp_group_menu.addAction(self.ccbp_group_enable) From 400c1644b072182da8c5ea3348794909198cb11c Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 12:18:24 +0800 Subject: [PATCH 210/296] datasets: style (NFC) --- artiq/dashboard/datasets.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/artiq/dashboard/datasets.py b/artiq/dashboard/datasets.py index b2492658c..749828d61 100644 --- a/artiq/dashboard/datasets.py +++ b/artiq/dashboard/datasets.py @@ -109,9 +109,11 @@ class CreateEditDialog(QtWidgets.QDialog): # degenerates to float type value = float(value * scale) if self.key and self.key != key: - asyncio.ensure_future(exc_to_warning(rename(self.key, key, value, metadata, persist, self.dataset_ctl))) + asyncio.ensure_future(exc_to_warning(rename(self.key, key, value, metadata, persist, + self.dataset_ctl))) else: - asyncio.ensure_future(exc_to_warning(self.dataset_ctl.set(key, value, metadata=metadata, persist=persist))) + asyncio.ensure_future(exc_to_warning(self.dataset_ctl.set(key, value, metadata=metadata, + persist=persist))) self.key = key QtWidgets.QDialog.accept(self) @@ -161,7 +163,7 @@ class CreateEditDialog(QtWidgets.QDialog): class Model(DictSyncTreeSepModel): - def __init__(self, init): + def __init__(self, init): DictSyncTreeSepModel.__init__(self, ".", ["Dataset", "Persistent", "Value"], init) From 3ecd115252503f66f3c691909df3d8c3d72aca83 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 12:26:20 +0800 Subject: [PATCH 211/296] experiments: style (NFC) --- artiq/dashboard/experiments.py | 39 +++++++++++++++++----------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/artiq/dashboard/experiments.py b/artiq/dashboard/experiments.py index 0defaa103..10bfa5a0c 100644 --- a/artiq/dashboard/experiments.py +++ b/artiq/dashboard/experiments.py @@ -98,7 +98,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): def __init__(self, manager, expurl): QtWidgets.QMdiSubWindow.__init__(self) qfm = QtGui.QFontMetrics(self.font()) - self.resize(100*qfm.averageCharWidth(), 30*qfm.lineSpacing()) + self.resize(100 * qfm.averageCharWidth(), 30 * qfm.lineSpacing()) self.setWindowTitle(expurl) self.setWindowIcon(QtWidgets.QApplication.style().standardIcon( QtWidgets.QStyle.SP_FileDialogContentsView)) @@ -131,17 +131,17 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): datetime.setDate(QtCore.QDate.currentDate()) else: datetime.setDateTime(QtCore.QDateTime.fromMSecsSinceEpoch( - int(scheduling["due_date"]*1000))) + int(scheduling["due_date"] * 1000))) datetime_en.setChecked(scheduling["due_date"] is not None) def update_datetime(dt): - scheduling["due_date"] = dt.toMSecsSinceEpoch()/1000 + scheduling["due_date"] = dt.toMSecsSinceEpoch() / 1000 datetime_en.setChecked(True) datetime.dateTimeChanged.connect(update_datetime) def update_datetime_en(checked): if checked: - due_date = datetime.dateTime().toMSecsSinceEpoch()/1000 + due_date = datetime.dateTime().toMSecsSinceEpoch() / 1000 else: due_date = None scheduling["due_date"] = due_date @@ -237,7 +237,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): submit = QtWidgets.QPushButton("Submit") submit.setIcon(QtWidgets.QApplication.style().standardIcon( - QtWidgets.QStyle.SP_DialogOkButton)) + QtWidgets.QStyle.SP_DialogOkButton)) submit.setToolTip("Schedule the experiment (Ctrl+Return)") submit.setShortcut("CTRL+RETURN") submit.setSizePolicy(QtWidgets.QSizePolicy.Expanding, @@ -247,7 +247,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): reqterm = QtWidgets.QPushButton("Terminate instances") reqterm.setIcon(QtWidgets.QApplication.style().standardIcon( - QtWidgets.QStyle.SP_DialogCancelButton)) + QtWidgets.QStyle.SP_DialogCancelButton)) reqterm.setToolTip("Request termination of instances (Ctrl+Backspace)") reqterm.setShortcut("CTRL+BACKSPACE") reqterm.setSizePolicy(QtWidgets.QSizePolicy.Expanding, @@ -289,8 +289,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): arginfo = expdesc["arginfo"] for k, v in overrides.items(): # Some values (e.g. scans) may have multiple defaults in a list - if ("default" in arginfo[k][0] - and isinstance(arginfo[k][0]["default"], list)): + if ("default" in arginfo[k][0] and isinstance(arginfo[k][0]["default"], list)): arginfo[k][0]["default"].insert(0, v) else: arginfo[k][0]["default"] = v @@ -355,9 +354,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): unparse_devarg_override(expid["devarg_override"])) self.log_level.setCurrentIndex(log_levels.index( log_level_to_name(expid["log_level"]))) - if ("repo_rev" in expid and - expid["repo_rev"] != "N/A" and - hasattr(self, "repo_rev")): + if "repo_rev" in expid and \ + expid["repo_rev"] != "N/A" and \ + hasattr(self, "repo_rev"): self.repo_rev.setText(expid["repo_rev"]) except: logger.error("Could not set submission options from HDF5 expid", @@ -547,7 +546,7 @@ class ExperimentManager: self.submission_arguments[expurl] = arguments self.argument_ui_names[expurl] = ui_name return arguments - + def set_argument_value(self, expurl, name, value): try: argument = self.submission_arguments[expurl][name] @@ -560,7 +559,8 @@ class ExperimentManager: if expurl in self.open_experiments.keys(): self.open_experiments[expurl].argeditor.update_argument(name, argument) except: - logger.warn("Failed to set value for argument \"{}\" in experiment: {}.".format(name, expurl), exc_info=1) + logger.warn("Failed to set value for argument \"{}\" in experiment: {}." + .format(name, expurl), exc_info=1) def get_submission_arguments(self, expurl): if expurl in self.submission_arguments: @@ -570,8 +570,8 @@ class ExperimentManager: raise ValueError("Submission arguments must be preinitialized " "when not using repository") class_desc = self.explist[expurl[5:]] - return self.initialize_submission_arguments(expurl, - class_desc["arginfo"], class_desc.get("argument_ui", None)) + return self.initialize_submission_arguments(expurl, class_desc["arginfo"], + class_desc.get("argument_ui", None)) def open_experiment(self, expurl): if expurl in self.open_experiments: @@ -671,9 +671,9 @@ class ExperimentManager: repo_match = "repo_rev" in expid else: repo_match = "repo_rev" not in expid - if (repo_match and - ("file" in expid and expid["file"] == file) and - expid["class_name"] == class_name): + if repo_match and \ + ("file" in expid and expid["file"] == file) and \ + expid["class_name"] == class_name: rids.append(rid) asyncio.ensure_future(self._request_term_multiple(rids)) @@ -693,7 +693,7 @@ class ExperimentManager: for class_name, class_desc in description.items(): expurl = "file:{}@{}".format(class_name, file) self.initialize_submission_arguments(expurl, class_desc["arginfo"], - class_desc.get("argument_ui", None)) + class_desc.get("argument_ui", None)) if expurl in self.open_experiments: self.open_experiments[expurl].close() self.open_experiment(expurl) @@ -727,6 +727,7 @@ class ExperimentManager: self.is_quick_open_shown = True dialog = _QuickOpenDialog(self) + def closed(): self.is_quick_open_shown = False dialog.closed.connect(closed) From 4f302ee675fbfc3798ac534343126fe1077439a3 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 12:28:50 +0800 Subject: [PATCH 212/296] explorer: style (NFC) --- artiq/dashboard/explorer.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/artiq/dashboard/explorer.py b/artiq/dashboard/explorer.py index f9dab39dd..f8c8df1f0 100644 --- a/artiq/dashboard/explorer.py +++ b/artiq/dashboard/explorer.py @@ -94,7 +94,7 @@ class _OpenFileDialog(QtWidgets.QDialog): else: break self.explorer.current_directory = \ - self.explorer.current_directory[:idx+1] + self.explorer.current_directory[:idx + 1] if self.explorer.current_directory == "/": self.explorer.current_directory = "" asyncio.ensure_future(self.refresh_view()) @@ -103,6 +103,7 @@ class _OpenFileDialog(QtWidgets.QDialog): asyncio.ensure_future(self.refresh_view()) else: file = self.explorer.current_directory + selected + async def open_task(): try: await self.exp_manager.open_file(file) @@ -232,7 +233,7 @@ class ExplorerDock(QtWidgets.QDockWidget): set_shortcut_menu = QtWidgets.QMenu() for i in range(12): - action = QtWidgets.QAction("F" + str(i+1), self.el) + action = QtWidgets.QAction("F" + str(i + 1), self.el) action.triggered.connect(partial(self.set_shortcut, i)) set_shortcut_menu.addAction(action) @@ -246,12 +247,14 @@ class ExplorerDock(QtWidgets.QDockWidget): scan_repository_action = QtWidgets.QAction("Scan repository HEAD", self.el) + def scan_repository(): asyncio.ensure_future(experiment_db_ctl.scan_repository_async()) scan_repository_action.triggered.connect(scan_repository) self.el.addAction(scan_repository_action) scan_ddb_action = QtWidgets.QAction("Scan device database", self.el) + def scan_ddb(): asyncio.ensure_future(device_db_ctl.scan()) scan_ddb_action.triggered.connect(scan_ddb) @@ -292,7 +295,7 @@ class ExplorerDock(QtWidgets.QDockWidget): if expname is not None: expurl = "repo:" + expname self.d_shortcuts.set_shortcut(nr, expurl) - logger.info("Set shortcut F%d to '%s'", nr+1, expurl) + logger.info("Set shortcut F%d to '%s'", nr + 1, expurl) def update_scanning(self, scanning): if scanning: From c1e6ae219312492823824970764c92a076069ef8 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 12:32:11 +0800 Subject: [PATCH 213/296] schedule: style (NFC) --- artiq/dashboard/schedule.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/artiq/dashboard/schedule.py b/artiq/dashboard/schedule.py index d6b74a0a7..94c229258 100644 --- a/artiq/dashboard/schedule.py +++ b/artiq/dashboard/schedule.py @@ -15,9 +15,8 @@ logger = logging.getLogger(__name__) class Model(DictSyncModel): def __init__(self, init): DictSyncModel.__init__(self, - ["RID", "Pipeline", "Status", "Prio", "Due date", - "Revision", "File", "Class name"], - init) + ["RID", "Pipeline", "Status", "Prio", "Due date", + "Revision", "File", "Class name"], init) def sort_key(self, k, v): # order by priority, and then by due date and RID @@ -96,14 +95,14 @@ class ScheduleDock(QtWidgets.QDockWidget): cw = QtGui.QFontMetrics(self.font()).averageCharWidth() h = self.table.horizontalHeader() - h.resizeSection(0, 7*cw) - h.resizeSection(1, 12*cw) - h.resizeSection(2, 16*cw) - h.resizeSection(3, 6*cw) - h.resizeSection(4, 16*cw) - h.resizeSection(5, 30*cw) - h.resizeSection(6, 20*cw) - h.resizeSection(7, 20*cw) + h.resizeSection(0, 7 * cw) + h.resizeSection(1, 12 * cw) + h.resizeSection(2, 16 * cw) + h.resizeSection(3, 6 * cw) + h.resizeSection(4, 16 * cw) + h.resizeSection(5, 30 * cw) + h.resizeSection(6, 20 * cw) + h.resizeSection(7, 20 * cw) def set_model(self, model): self.table_model = model @@ -143,7 +142,7 @@ class ScheduleDock(QtWidgets.QDockWidget): selected_rid = self.table_model.row_to_key[row] pipeline = self.table_model.backing_store[selected_rid]["pipeline"] logger.info("Requesting termination of all " - "experiments in pipeline '%s'", pipeline) + "experiments in pipeline '%s'", pipeline) rids = set() for rid, info in self.table_model.backing_store.items(): @@ -151,7 +150,6 @@ class ScheduleDock(QtWidgets.QDockWidget): rids.add(rid) asyncio.ensure_future(self.request_term_multiple(rids)) - def save_state(self): return bytes(self.table.horizontalHeader().saveState()) From c4892cf285167502b00d48b48eb877b622afa80a Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 10 Apr 2024 12:33:54 +0800 Subject: [PATCH 214/296] shortcuts: style (NFC) --- artiq/dashboard/shortcuts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/artiq/dashboard/shortcuts.py b/artiq/dashboard/shortcuts.py index e8a4bc790..30217b5f6 100644 --- a/artiq/dashboard/shortcuts.py +++ b/artiq/dashboard/shortcuts.py @@ -33,7 +33,7 @@ class ShortcutsDock(QtWidgets.QDockWidget): for i in range(12): row = i + 1 - layout.addWidget(QtWidgets.QLabel("F" + str(i+1)), row, 0) + layout.addWidget(QtWidgets.QLabel("F" + str(i + 1)), row, 0) label = QtWidgets.QLabel() label.setSizePolicy(QtWidgets.QSizePolicy.Ignored, @@ -68,7 +68,7 @@ class ShortcutsDock(QtWidgets.QDockWidget): "open": open, "submit": submit } - shortcut = QtWidgets.QShortcut("F" + str(i+1), main_window) + shortcut = QtWidgets.QShortcut("F" + str(i + 1), main_window) shortcut.setContext(QtCore.Qt.ApplicationShortcut) shortcut.activated.connect(partial(self._activated, i)) From f9a447e8e0fe41c693d6446d7547c6809c7b6c79 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 11 Apr 2024 11:20:22 +0800 Subject: [PATCH 215/296] entries: fix EnumerationEntry disable_scroll_wheel --- artiq/gui/entries.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/gui/entries.py b/artiq/gui/entries.py index dd7231331..43daa7a5b 100644 --- a/artiq/gui/entries.py +++ b/artiq/gui/entries.py @@ -202,7 +202,6 @@ class EnumerationEntry(QtWidgets.QWidget): def __init__(self, argument): QtWidgets.QWidget.__init__(self) - disable_scroll_wheel(self) layout = QtWidgets.QHBoxLayout() self.setLayout(layout) procdesc = argument["desc"] @@ -221,6 +220,7 @@ class EnumerationEntry(QtWidgets.QWidget): self.btn_group.idClicked.connect(submit) else: self.combo_box = QtWidgets.QComboBox() + disable_scroll_wheel(self.combo_box) self.combo_box.addItems(choices) idx = choices.index(argument["state"]) self.combo_box.setCurrentIndex(idx) From 4a2352c2df21334593f01fc765a0d5c0b50808cd Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 11 Apr 2024 11:01:34 +0800 Subject: [PATCH 216/296] browser: disable quickstyle --- artiq/browser/experiments.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/artiq/browser/experiments.py b/artiq/browser/experiments.py index 7c2267937..c5cf9c54c 100644 --- a/artiq/browser/experiments.py +++ b/artiq/browser/experiments.py @@ -369,6 +369,8 @@ class ExperimentsArea(QtWidgets.QMdiArea): def initialize_submission_arguments(self, arginfo): arguments = OrderedDict() for name, (procdesc, group, tooltip) in arginfo.items(): + if procdesc["ty"] == "EnumerationValue" and procdesc["quickstyle"]: + procdesc["quickstyle"] = False state = procdesc_to_entry(procdesc).default_state(procdesc) arguments[name] = { "desc": procdesc, From ab206ac154da19c49f595ea83ddfee1e9429ca9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Thu, 11 Apr 2024 16:34:08 +0800 Subject: [PATCH 217/296] worker: import host_only from the right place --- artiq/master/worker_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/artiq/master/worker_impl.py b/artiq/master/worker_impl.py index 1f069a50c..bf0bfd587 100644 --- a/artiq/master/worker_impl.py +++ b/artiq/master/worker_impl.py @@ -28,10 +28,10 @@ from artiq.master.worker_db import DeviceManager, DatasetManager, DummyDevice from artiq.language.environment import ( is_public_experiment, TraceArgumentManager, ProcessArgumentManager ) -from artiq.language.core import set_watchdog_factory, TerminationRequested +from artiq.language.core import host_only, set_watchdog_factory, TerminationRequested from artiq.language.types import TBool from artiq.compiler import import_cache -from artiq.coredevice.core import CompileError, host_only, _render_diagnostic +from artiq.coredevice.core import CompileError, _render_diagnostic from artiq import __version__ as artiq_version From 1bcbee988d3638d548244e5175e42740c25e43dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Thu, 11 Apr 2024 16:35:44 +0800 Subject: [PATCH 218/296] update copyright year --- README.rst | 2 +- artiq/firmware/bootloader/main.rs | 2 +- doc/manual/conf.py | 2 +- doc/manual/introduction.rst | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index 9ec774741..c6014c8d4 100644 --- a/README.rst +++ b/README.rst @@ -29,7 +29,7 @@ Website: https://m-labs.hk/artiq License ======= -Copyright (C) 2014-2023 M-Labs Limited. +Copyright (C) 2014-2024 M-Labs Limited. ARTIQ is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by diff --git a/artiq/firmware/bootloader/main.rs b/artiq/firmware/bootloader/main.rs index 50772d8f7..a7ac7fd7c 100644 --- a/artiq/firmware/bootloader/main.rs +++ b/artiq/firmware/bootloader/main.rs @@ -500,7 +500,7 @@ pub extern fn main() -> i32 { println!(r"|_| |_|_|____/ \___/ \____|"); println!(""); println!("MiSoC Bootloader"); - println!("Copyright (c) 2017-2023 M-Labs Limited"); + println!("Copyright (c) 2017-2024 M-Labs Limited"); println!(""); #[cfg(has_ethmac)] diff --git a/doc/manual/conf.py b/doc/manual/conf.py index 34f0af9bb..2396b497a 100644 --- a/doc/manual/conf.py +++ b/doc/manual/conf.py @@ -97,7 +97,7 @@ master_doc = 'index' # General information about the project. project = 'ARTIQ' -copyright = '2014-2023, M-Labs Limited' +copyright = '2014-2024, M-Labs Limited' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/manual/introduction.rst b/doc/manual/introduction.rst index eff537f67..30ff1ae05 100644 --- a/doc/manual/introduction.rst +++ b/doc/manual/introduction.rst @@ -27,4 +27,4 @@ Website: https://m-labs.hk/artiq `Cite ARTIQ `_ as ``Bourdeauducq, Sébastien et al. (2016). ARTIQ 1.0. Zenodo. 10.5281/zenodo.51303``. -Copyright (C) 2014-2023 M-Labs Limited. Licensed under GNU LGPL version 3+. +Copyright (C) 2014-2024 M-Labs Limited. Licensed under GNU LGPL version 3+. From baa58343ac11cd86b06f5f2a3b2c1bfa742f69c5 Mon Sep 17 00:00:00 2001 From: Norman Krackow Date: Fri, 12 Apr 2024 03:03:17 +0200 Subject: [PATCH 219/296] urukul: fix `tune_sync_delay()` (#2374) --- artiq/coredevice/ad9910.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/artiq/coredevice/ad9910.py b/artiq/coredevice/ad9910.py index 801b689ca..8947abc28 100644 --- a/artiq/coredevice/ad9910.py +++ b/artiq/coredevice/ad9910.py @@ -999,7 +999,7 @@ class AD9910: """ if not self.cpld.sync_div: raise ValueError("parent cpld does not drive SYNC") - search_span = 31 + search_span = 13 # FIXME https://github.com/sinara-hw/Urukul/issues/16 # should both be 2-4 once kasli sync_in jitter is identified min_window = 0 From 76d704ac332ff665ddce7a8d56d4dc88d467fef9 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Tue, 16 Apr 2024 10:22:18 +0800 Subject: [PATCH 220/296] drtio: revert async flag message --- artiq/gateware/drtio/core.py | 6 +----- artiq/gateware/drtio/rt_controller_master.py | 6 ------ artiq/gateware/drtio/rt_controller_repeater.py | 7 ------- artiq/gateware/drtio/rt_packet_master.py | 7 ------- artiq/gateware/drtio/rt_packet_repeater.py | 4 +--- artiq/gateware/drtio/rt_packet_satellite.py | 12 ------------ artiq/gateware/drtio/rt_serializer.py | 1 - 7 files changed, 2 insertions(+), 41 deletions(-) diff --git a/artiq/gateware/drtio/core.py b/artiq/gateware/drtio/core.py index b344b9c95..c6db5d9af 100644 --- a/artiq/gateware/drtio/core.py +++ b/artiq/gateware/drtio/core.py @@ -78,7 +78,6 @@ class DRTIOSatellite(Module): self.reset = CSRStorage(reset=1) self.reset_phy = CSRStorage(reset=1) self.tsc_loaded = CSR() - self.async_messages_ready = CSR() # master interface in the sys domain self.cri = cri.Interface() self.async_errors = Record(async_errors_layout) @@ -130,9 +129,6 @@ class DRTIOSatellite(Module): link_layer_sync, interface=self.cri) self.comb += self.rt_packet.reset.eq(self.cd_rio.rst) - self.sync += If(self.async_messages_ready.re, self.rt_packet.async_msg_stb.eq(1)) - self.comb += self.async_messages_ready.w.eq(self.rt_packet.async_msg_ack) - self.comb += [ tsc.load.eq(self.rt_packet.tsc_load), tsc.load_value.eq(self.rt_packet.tsc_load_value) @@ -147,7 +143,7 @@ class DRTIOSatellite(Module): self.rt_packet, tsc, self.async_errors) def get_csrs(self): - return ([self.reset, self.reset_phy, self.tsc_loaded, self.async_messages_ready] + + return ([self.reset, self.reset_phy, self.tsc_loaded] + self.link_layer.get_csrs() + self.link_stats.get_csrs() + self.rt_errors.get_csrs()) diff --git a/artiq/gateware/drtio/rt_controller_master.py b/artiq/gateware/drtio/rt_controller_master.py index aa630254f..3ed22dbe7 100644 --- a/artiq/gateware/drtio/rt_controller_master.py +++ b/artiq/gateware/drtio/rt_controller_master.py @@ -17,7 +17,6 @@ class _CSRs(AutoCSR): self.set_time = CSR() self.underflow_margin = CSRStorage(16, reset=300) - self.async_messages_ready = CSR() self.force_destination = CSRStorage() self.destination = CSRStorage(8) @@ -61,11 +60,6 @@ class RTController(Module): If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1)) ] - self.sync += [ - If(rt_packet.async_messages_ready, self.csrs.async_messages_ready.w.eq(1)), - If(self.csrs.async_messages_ready.re, self.csrs.async_messages_ready.w.eq(0)) - ] - # chan_sel forcing chan_sel = Signal(24) self.comb += chan_sel.eq(Mux(self.csrs.force_destination.storage, diff --git a/artiq/gateware/drtio/rt_controller_repeater.py b/artiq/gateware/drtio/rt_controller_repeater.py index bdc96fe38..79b9559eb 100644 --- a/artiq/gateware/drtio/rt_controller_repeater.py +++ b/artiq/gateware/drtio/rt_controller_repeater.py @@ -14,7 +14,6 @@ class RTController(Module, AutoCSR): self.command_missed_cmd = CSRStatus(2) self.command_missed_chan_sel = CSRStatus(24) self.buffer_space_timeout_dest = CSRStatus(8) - self.async_messages_ready = CSR() self.sync += rt_packet.reset.eq(self.reset.storage) @@ -24,12 +23,6 @@ class RTController(Module, AutoCSR): ] self.comb += self.set_time.w.eq(rt_packet.set_time_stb) - self.sync += [ - If(rt_packet.async_messages_ready, self.async_messages_ready.w.eq(1)), - If(self.async_messages_ready.re, self.async_messages_ready.w.eq(0)) - ] - - errors = [ (rt_packet.err_unknown_packet_type, "rtio_rx", None, None), (rt_packet.err_packet_truncated, "rtio_rx", None, None), diff --git a/artiq/gateware/drtio/rt_packet_master.py b/artiq/gateware/drtio/rt_packet_master.py index 32d3a39a7..70d44ecaf 100644 --- a/artiq/gateware/drtio/rt_packet_master.py +++ b/artiq/gateware/drtio/rt_packet_master.py @@ -61,9 +61,6 @@ class RTPacketMaster(Module): # a set_time request pending self.tsc_value = Signal(64) - # async aux messages interface, only received - self.async_messages_ready = Signal() - # rx errors self.err_unknown_packet_type = Signal() self.err_packet_truncated = Signal() @@ -286,16 +283,12 @@ class RTPacketMaster(Module): echo_received_now = Signal() self.sync.rtio_rx += self.echo_received_now.eq(echo_received_now) - async_messages_ready = Signal() - self.sync.rtio_rx += self.async_messages_ready.eq(async_messages_ready) - rx_fsm.act("INPUT", If(rx_dp.frame_r, rx_dp.packet_buffer_load.eq(1), If(rx_dp.packet_last, Case(rx_dp.packet_type, { rx_plm.types["echo_reply"]: echo_received_now.eq(1), - rx_plm.types["async_messages_ready"]: async_messages_ready.eq(1), rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"), rx_plm.types["read_reply"]: NextState("READ_REPLY"), rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"), diff --git a/artiq/gateware/drtio/rt_packet_repeater.py b/artiq/gateware/drtio/rt_packet_repeater.py index 62abeeee1..728c24ae8 100644 --- a/artiq/gateware/drtio/rt_packet_repeater.py +++ b/artiq/gateware/drtio/rt_packet_repeater.py @@ -19,7 +19,6 @@ class RTPacketRepeater(Module): # in rtio_rx domain self.err_unknown_packet_type = Signal() self.err_packet_truncated = Signal() - self.async_messages_ready = Signal() # in rtio domain self.err_command_missed = Signal() @@ -305,7 +304,6 @@ class RTPacketRepeater(Module): rx_dp.packet_buffer_load.eq(1), If(rx_dp.packet_last, Case(rx_dp.packet_type, { - rx_plm.types["async_messages_ready"]: self.async_messages_ready.eq(1), rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"), rx_plm.types["read_reply"]: NextState("READ_REPLY"), rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"), @@ -333,4 +331,4 @@ class RTPacketRepeater(Module): read_not.eq(1), read_no_event.eq(1), NextState("INPUT") - ) \ No newline at end of file + ) diff --git a/artiq/gateware/drtio/rt_packet_satellite.py b/artiq/gateware/drtio/rt_packet_satellite.py index a4094d9db..79a48f493 100644 --- a/artiq/gateware/drtio/rt_packet_satellite.py +++ b/artiq/gateware/drtio/rt_packet_satellite.py @@ -19,9 +19,6 @@ class RTPacketSatellite(Module): self.tsc_load = Signal() self.tsc_load_value = Signal(64) - self.async_msg_stb = Signal() - self.async_msg_ack = Signal() - if interface is None: interface = cri.Interface() self.cri = interface @@ -81,8 +78,6 @@ class RTPacketSatellite(Module): ) ] - self.sync += If(self.async_msg_ack, self.async_msg_stb.eq(0)) - # RX FSM cri_read = Signal() cri_buffer_space = Signal() @@ -202,7 +197,6 @@ class RTPacketSatellite(Module): tx_fsm.act("IDLE", If(echo_req, NextState("ECHO")), - If(self.async_msg_stb, NextState("ASYNC_MESSAGES_READY")), If(buffer_space_req, NextState("BUFFER_SPACE")), If(read_request_pending & ~self.cri.i_status[2], NextState("READ"), @@ -216,12 +210,6 @@ class RTPacketSatellite(Module): If(tx_dp.packet_last, NextState("IDLE")) ) - tx_fsm.act("ASYNC_MESSAGES_READY", - self.async_msg_ack.eq(1), - tx_dp.send("async_messages_ready"), - If(tx_dp.packet_last, NextState("IDLE")) - ) - tx_fsm.act("BUFFER_SPACE", buffer_space_ack.eq(1), tx_dp.send("buffer_space_reply", space=buffer_space), diff --git a/artiq/gateware/drtio/rt_serializer.py b/artiq/gateware/drtio/rt_serializer.py index 9a77263a4..01e5cf19e 100644 --- a/artiq/gateware/drtio/rt_serializer.py +++ b/artiq/gateware/drtio/rt_serializer.py @@ -69,7 +69,6 @@ def get_s2m_layouts(alignment): plm.add_type("read_reply", ("timestamp", 64), ("data", 32)) plm.add_type("read_reply_noevent", ("overflow", 1)) # overflow=0→timeout - plm.add_type("async_messages_ready") return plm From b6ac052e9f83e96b3f4f312f8afea966eb9012d3 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 18 Apr 2024 16:55:35 +0800 Subject: [PATCH 221/296] aux_controller: multiple receiver buffers --- artiq/gateware/drtio/aux_controller.py | 70 +++++++++++-------- artiq/gateware/targets/efc.py | 5 +- artiq/gateware/targets/kasli.py | 21 +++--- artiq/gateware/targets/kc705.py | 14 ++-- .../test/drtio/test_aux_controller.py | 24 ++++--- 5 files changed, 81 insertions(+), 53 deletions(-) diff --git a/artiq/gateware/drtio/aux_controller.py b/artiq/gateware/drtio/aux_controller.py index 5f91b1f45..27ca2a15d 100644 --- a/artiq/gateware/drtio/aux_controller.py +++ b/artiq/gateware/drtio/aux_controller.py @@ -10,6 +10,7 @@ from misoc.interconnect import wishbone max_packet = 1024 +aux_buffer_count = 8 class Transmitter(Module, AutoCSR): @@ -95,13 +96,13 @@ class Transmitter(Module, AutoCSR): class Receiver(Module, AutoCSR): def __init__(self, link_layer, min_mem_dw): - self.aux_rx_length = CSRStatus(bits_for(max_packet)) self.aux_rx_present = CSR() self.aux_rx_error = CSR() + self.aux_read_pointer = CSR(log2_int(aux_buffer_count)) ll_dw = len(link_layer.rx_aux_data) mem_dw = max(min_mem_dw, ll_dw) - self.specials.mem = Memory(mem_dw, max_packet//(mem_dw//8)) + self.specials.mem = Memory(mem_dw, aux_buffer_count*max_packet//(mem_dw//8)) converter = ClockDomainsRenamer("rtio_rx")( stream.Converter(ll_dw, mem_dw)) @@ -123,30 +124,45 @@ class Receiver(Module, AutoCSR): mem_port = self.mem.get_port(write_capable=True, clock_domain="rtio_rx") self.specials += mem_port + # write pointer represents where the gateware is + write_pointer = Signal(log2_int(aux_buffer_count)) + write_pointer_sys = Signal.like(write_pointer) + # read pointer represents where CPU is + # write reaching read is an error, read reaching write is buffer clear + read_pointer = Signal.like(write_pointer) + read_pointer_rx = Signal.like(write_pointer) + + read_pointer.attr.add("no_retiming") + write_pointer.attr.add("no_retiming") + signal_error = PulseSynchronizer("rtio_rx", "sys") + + self.specials += [ + MultiReg(read_pointer, read_pointer_rx, "rtio_rx"), + MultiReg(write_pointer, write_pointer_sys) + ] + frame_counter_nbits = bits_for(max_packet) - log2_int(mem_dw//8) frame_counter = Signal(frame_counter_nbits) + # frame counter requires one more bit to represent overflow (bits_for) + # actual valid packet will be addressed with one bit less + packet_nbits = frame_counter_nbits - 1 self.comb += [ - mem_port.adr.eq(frame_counter), + mem_port.adr[:packet_nbits].eq(frame_counter), + # bits above the frame counter point to current frame + mem_port.adr[packet_nbits:].eq(write_pointer), mem_port.dat_w.eq(converter.source.data), - converter.source.ack.eq(1) + converter.source.ack.eq(1), + self.aux_read_pointer.w.eq(read_pointer) ] - frame_counter.attr.add("no_retiming") - frame_counter_sys = Signal(frame_counter_nbits) - self.specials += MultiReg(frame_counter, frame_counter_sys) - self.comb += self.aux_rx_length.status.eq(frame_counter_sys << log2_int(mem_dw//8)) - - signal_frame = PulseSynchronizer("rtio_rx", "sys") - frame_ack = PulseSynchronizer("sys", "rtio_rx") - signal_error = PulseSynchronizer("rtio_rx", "sys") - self.submodules += signal_frame, frame_ack, signal_error + self.submodules += signal_error self.sync += [ - If(self.aux_rx_present.re, self.aux_rx_present.w.eq(0)), - If(signal_frame.o, self.aux_rx_present.w.eq(1)), If(self.aux_rx_error.re, self.aux_rx_error.w.eq(0)), - If(signal_error.o, self.aux_rx_error.w.eq(1)) + If(signal_error.o, self.aux_rx_error.w.eq(1)), + self.aux_rx_present.w.eq(~(read_pointer == write_pointer_sys)), + If(self.aux_rx_present.re & self.aux_rx_present.w, + read_pointer.eq(read_pointer + 1)), ] - self.comb += frame_ack.i.eq(self.aux_rx_present.re) fsm = ClockDomainsRenamer("rtio_rx")(FSM(reset_state="IDLE")) self.submodules += fsm @@ -171,7 +187,7 @@ class Receiver(Module, AutoCSR): NextState("FRAME") ) ).Else( - NextValue(frame_counter, 0) + NextValue(frame_counter, 0), ) ) fsm.act("FRAME", @@ -189,14 +205,12 @@ class Receiver(Module, AutoCSR): ) ) fsm.act("SIGNAL_FRAME", - signal_frame.i.eq(1), - NextState("WAIT_ACK"), - If(converter.source.stb, signal_error.i.eq(1)) - ) - fsm.act("WAIT_ACK", - If(frame_ack.o, - NextValue(frame_counter, 0), - NextState("IDLE") + NextState("IDLE"), + If((write_pointer + 1) == read_pointer_rx, + # on full buffer, overwrite only current frame + signal_error.i.eq(1) + ).Else( + NextValue(write_pointer, write_pointer + 1) ), If(converter.source.stb, signal_error.i.eq(1)) ) @@ -215,8 +229,8 @@ class DRTIOAuxController(Module): tx_sdram_if = wishbone.SRAM(self.transmitter.mem, read_only=False, data_width=dw) rx_sdram_if = wishbone.SRAM(self.receiver.mem, read_only=True, data_width=dw) decoder = wishbone.Decoder(self.bus, - [(lambda a: a[log2_int(max_packet)-wsb] == 0, tx_sdram_if.bus), - (lambda a: a[log2_int(max_packet)-wsb] == 1, rx_sdram_if.bus)], + [(lambda a: a[log2_int(max_packet*aux_buffer_count)-wsb] == 0, tx_sdram_if.bus), + (lambda a: a[log2_int(max_packet*aux_buffer_count)-wsb] == 1, rx_sdram_if.bus)], register=True) self.submodules += tx_sdram_if, rx_sdram_if, decoder diff --git a/artiq/gateware/targets/efc.py b/artiq/gateware/targets/efc.py index 42a4f031b..ee26c5e0e 100644 --- a/artiq/gateware/targets/efc.py +++ b/artiq/gateware/targets/efc.py @@ -82,9 +82,10 @@ class Satellite(BaseSoC, AMPSoC): core.link_layer, self.cpu_dw)) self.csr_devices.append("drtioaux0") + drtio_aux_mem_size = 1024 * 16 # max_packet * 8 buffers * 2 (tx, rx halves) memory_address = self.mem_map["drtioaux"] - self.add_wb_slave(memory_address, 0x800, self.drtioaux0.bus) - self.add_memory_region("drtioaux0_mem", memory_address | self.shadow_base, 0x800) + self.add_wb_slave(memory_address, drtio_aux_mem_size, self.drtioaux0.bus) + self.add_memory_region("drtioaux0_mem", memory_address | self.shadow_base, drtio_aux_mem_size) self.config["HAS_DRTIO"] = None self.add_csr_group("drtioaux", ["drtioaux0"]) diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py index d9ed6f446..cd1404322 100755 --- a/artiq/gateware/targets/kasli.py +++ b/artiq/gateware/targets/kasli.py @@ -236,10 +236,11 @@ class MasterBase(MiniSoC, AMPSoC): setattr(self.submodules, coreaux_name, coreaux) self.csr_devices.append(coreaux_name) - memory_address = self.mem_map["drtioaux"] + 0x800*i - self.add_wb_slave(memory_address, 0x800, + drtio_aux_mem_size = 1024 * 16 # max_packet * 8 buffers * 2 (tx, rx halves) + memory_address = self.mem_map["drtioaux"] + drtio_aux_mem_size*i + self.add_wb_slave(memory_address, drtio_aux_mem_size, coreaux.bus) - self.add_memory_region(memory_name, memory_address | self.shadow_base, 0x800) + self.add_memory_region(memory_name, memory_address | self.shadow_base, drtio_aux_mem_size) self.config["HAS_DRTIO"] = None self.config["HAS_DRTIO_ROUTING"] = None self.config["DRTIO_ROLE"] = "master" @@ -318,10 +319,11 @@ class MasterBase(MiniSoC, AMPSoC): setattr(self.submodules, coreaux_name, coreaux) self.csr_devices.append(coreaux_name) - memory_address = self.mem_map["drtioaux"] + 0x800*channel - self.add_wb_slave(memory_address, 0x800, + drtio_aux_mem_size = 1024 * 16 # max_packet * 8 buffers * 2 (tx, rx halves) + memory_address = self.mem_map["drtioaux"] + drtio_aux_mem_size*channel + self.add_wb_slave(memory_address, drtio_aux_mem_size, coreaux.bus) - self.add_memory_region(memory_name, memory_address | self.shadow_base, 0x800) + self.add_memory_region(memory_name, memory_address | self.shadow_base, drtio_aux_mem_size) def add_drtio_cpuif_groups(self): self.add_csr_group("drtio", self.drtio_csr_group) @@ -486,10 +488,11 @@ class SatelliteBase(BaseSoC, AMPSoC): setattr(self.submodules, coreaux_name, coreaux) self.csr_devices.append(coreaux_name) - memory_address = self.mem_map["drtioaux"] + 0x800*i - self.add_wb_slave(memory_address, 0x800, + drtio_aux_mem_size = 1024 * 16 # max_packet * 8 buffers * 2 (tx, rx halves) + memory_address = self.mem_map["drtioaux"] + drtio_aux_mem_size * i + self.add_wb_slave(memory_address, drtio_aux_mem_size, coreaux.bus) - self.add_memory_region(memory_name, memory_address | self.shadow_base, 0x800) + self.add_memory_region(memory_name, memory_address | self.shadow_base, drtio_aux_mem_size) self.config["HAS_DRTIO"] = None self.config["HAS_DRTIO_ROUTING"] = None self.config["DRTIO_ROLE"] = "satellite" diff --git a/artiq/gateware/targets/kc705.py b/artiq/gateware/targets/kc705.py index b3b5a4af7..4200919ae 100755 --- a/artiq/gateware/targets/kc705.py +++ b/artiq/gateware/targets/kc705.py @@ -247,10 +247,11 @@ class _MasterBase(MiniSoC, AMPSoC): setattr(self.submodules, coreaux_name, coreaux) self.csr_devices.append(coreaux_name) - memory_address = self.mem_map["drtioaux"] + 0x800*i - self.add_wb_slave(memory_address, 0x800, + drtio_aux_mem_size = 1024 * 16 # max_packet * 8 buffers * 2 (tx, rx halves) + memory_address = self.mem_map["drtioaux"] + drtio_aux_mem_size*i + self.add_wb_slave(memory_address, drtio_aux_mem_size, coreaux.bus) - self.add_memory_region(memory_name, memory_address | self.shadow_base, 0x800) + self.add_memory_region(memory_name, memory_address | self.shadow_base, drtio_aux_mem_size) self.config["HAS_DRTIO"] = None self.config["HAS_DRTIO_ROUTING"] = None self.add_csr_group("drtio", drtio_csr_group) @@ -405,10 +406,11 @@ class _SatelliteBase(BaseSoC, AMPSoC): setattr(self.submodules, coreaux_name, coreaux) self.csr_devices.append(coreaux_name) - memory_address = self.mem_map["drtioaux"] + 0x800*i - self.add_wb_slave(memory_address, 0x800, + drtio_aux_mem_size = 1024 * 16 # max_packet * 8 buffers * 2 (tx, rx halves) + memory_address = self.mem_map["drtioaux"] + drtio_aux_mem_size*i + self.add_wb_slave(memory_address, drtio_aux_mem_size, coreaux.bus) - self.add_memory_region(memory_name, memory_address | self.shadow_base, 0x800) + self.add_memory_region(memory_name, memory_address | self.shadow_base, drtio_aux_mem_size) self.config["HAS_DRTIO"] = None self.config["HAS_DRTIO_ROUTING"] = None self.add_csr_group("drtioaux", drtioaux_csr_group) diff --git a/artiq/gateware/test/drtio/test_aux_controller.py b/artiq/gateware/test/drtio/test_aux_controller.py index 6c65307ed..1ea809c0b 100644 --- a/artiq/gateware/test/drtio/test_aux_controller.py +++ b/artiq/gateware/test/drtio/test_aux_controller.py @@ -60,28 +60,36 @@ class TestAuxController(unittest.TestCase): while (yield from dut[dw].aux_controller.transmitter.aux_tx.read()): yield - def receive_packet(dw): + def receive_packet(pkt_no, len, dw): while not (yield from dut[dw].aux_controller.receiver.aux_rx_present.read()): yield - length = yield from dut[dw].aux_controller.receiver.aux_rx_length.read() + p = yield from dut[dw].aux_controller.receiver.aux_read_pointer.read() + self.assertEqual(pkt_no, p) r = [] - for i in range(length//(dw//8)): + # packet length is derived by software now, so we pass it for the test + for i in range(len): + if dw == 64: + offset = 2048 + if dw == 32: + offset = 1024 + print(dw) + max_packet = 1024 r.append((yield from dut[dw].aux_controller.bus.read(256+i))) yield from dut[dw].aux_controller.receiver.aux_rx_present.write(1) return r prng = random.Random(0) - def send_and_check_packet(dw): + def send_and_check_packet(i, dw): data = [prng.randrange(2**dw-1) for _ in range(prng.randrange(1, 16))] yield from send_packet(data, dw) - received = yield from receive_packet(dw) + received = yield from receive_packet(i, len(data), dw) self.assertEqual(data, received) def sim(dw): yield from link_init(dw) for i in range(8): - yield from send_and_check_packet(dw) + yield from send_and_check_packet(i, dw) @passive def rt_traffic(dw): @@ -95,5 +103,5 @@ class TestAuxController(unittest.TestCase): yield dut[dw].link_layer.tx_rt_frame.eq(0) yield - run_simulation(dut[32], [sim(32), rt_traffic(32)]) - run_simulation(dut[64], [sim(64), rt_traffic(64)]) + run_simulation(dut[32], [sim(32), rt_traffic(32)], vcd_name="32bit.vcd") + run_simulation(dut[64], [sim(64), rt_traffic(64)], vcd_name="64bit.vcd") From b1c305fd113aa4941577097236c6ba778d17c897 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 19 Apr 2024 16:27:51 +0800 Subject: [PATCH 222/296] drtioaux: adjust firmware for multiple buffers --- artiq/firmware/libboard_artiq/drtioaux.rs | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/artiq/firmware/libboard_artiq/drtioaux.rs b/artiq/firmware/libboard_artiq/drtioaux.rs index 818775d7b..40cc8bb54 100644 --- a/artiq/firmware/libboard_artiq/drtioaux.rs +++ b/artiq/firmware/libboard_artiq/drtioaux.rs @@ -69,9 +69,9 @@ fn receive(linkno: u8, f: F) -> Result, Error> let linkidx = linkno as usize; unsafe { if (DRTIOAUX[linkidx].aux_rx_present_read)() == 1 { - let ptr = DRTIOAUX_MEM[linkidx].base + DRTIOAUX_MEM[linkidx].size / 2; - let len = (DRTIOAUX[linkidx].aux_rx_length_read)(); - let result = f(slice::from_raw_parts(ptr as *mut u8, len as usize)); + let read_ptr = (DRTIOAUX[linkidx].aux_read_pointer_read)() as usize; + let ptr = DRTIOAUX_MEM[linkidx].base + (DRTIOAUX_MEM[linkidx].size / 2) + (read_ptr * 0x400); + let result = f(slice::from_raw_parts(ptr as *mut u8, 0x400 as usize)); (DRTIOAUX[linkidx].aux_rx_present_write)(1); Ok(Some(result?)) } else { @@ -86,21 +86,17 @@ pub fn recv(linkno: u8) -> Result, Error> { } receive(linkno, |buffer| { - if buffer.len() < 8 { - return Err(IoError::UnexpectedEnd.into()) - } - let mut reader = Cursor::new(buffer); - let checksum_at = buffer.len() - 4; + let packet = Packet::read_from(&mut reader)?; + let padding = (12 - (reader.position() % 8)) % 8; + let checksum_at = reader.position() + padding; let checksum = crc::crc32::checksum_ieee(&reader.get_ref()[0..checksum_at]); reader.set_position(checksum_at); if reader.read_u32()? != checksum { return Err(Error::CorruptedPacket) } - reader.set_position(0); - - Ok(Packet::read_from(&mut reader)?) + Ok(packet) }) } From a49ba3e35008a6ef28551787360ebf321d104d14 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 19 Apr 2024 16:28:26 +0800 Subject: [PATCH 223/296] master: support unsolicited async messages --- .../firmware/libproto_artiq/drtioaux_proto.rs | 8 -- artiq/firmware/runtime/rtio_mgt.rs | 129 ++++++++---------- 2 files changed, 57 insertions(+), 80 deletions(-) diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index f58217f9c..2779b7207 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -77,8 +77,6 @@ pub enum Packet { RoutingSetPath { destination: u8, hops: [u8; 32] }, RoutingSetRank { rank: u8 }, - RoutingRetrievePackets, - RoutingNoPackets, RoutingAck, MonitorRequest { destination: u8, channel: u16, probe: u8 }, @@ -170,8 +168,6 @@ impl Packet { rank: reader.read_u8()? }, 0x32 => Packet::RoutingAck, - 0x33 => Packet::RoutingRetrievePackets, - 0x34 => Packet::RoutingNoPackets, 0x40 => Packet::MonitorRequest { destination: reader.read_u8()?, @@ -456,10 +452,6 @@ impl Packet { }, Packet::RoutingAck => writer.write_u8(0x32)?, - Packet::RoutingRetrievePackets => - writer.write_u8(0x33)?, - Packet::RoutingNoPackets => - writer.write_u8(0x34)?, Packet::MonitorRequest { destination, channel, probe } => { writer.write_u8(0x40)?; diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 55d97d532..3f2c9e7f3 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -78,16 +78,6 @@ pub mod drtio { } } - fn link_has_async_ready(linkno: u8) -> bool { - let linkno = linkno as usize; - let async_ready; - unsafe { - async_ready = (csr::DRTIO[linkno].async_messages_ready_read)() == 1; - (csr::DRTIO[linkno].async_messages_ready_write)(1); - } - async_ready - } - fn recv_aux_timeout(io: &Io, linkno: u8, timeout: u32) -> Result { let max_time = clock::get_ms() + timeout as u64; loop { @@ -97,6 +87,7 @@ pub mod drtio { if clock::get_ms() > max_time { return Err(Error::Timeout); } + // todo: reinsert handling of async messages match drtioaux::recv(linkno) { Ok(Some(packet)) => return Ok(packet), Ok(None) => (), @@ -106,62 +97,51 @@ pub mod drtio { } } - fn process_async_packets(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, - routing_table: &drtio_routing::RoutingTable, linkno: u8) - { - if link_has_async_ready(linkno) { - loop { - let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingRetrievePackets); - if let Ok(packet) = reply { - match packet { - // packets to be consumed locally - drtioaux::Packet::DmaPlaybackStatus { id, source, destination: 0, error, channel, timestamp } => { - remote_dma::playback_done(io, ddma_mutex, id, source, error, channel, timestamp); - }, - drtioaux::Packet::SubkernelFinished { id, destination: 0, with_exception, exception_src } => { - subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception, exception_src); - }, - drtioaux::Packet::SubkernelMessage { id, source: from, destination: 0, status, length, data } => { - subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data); - // acknowledge receiving part of the message - drtioaux::send(linkno, - &drtioaux::Packet::SubkernelMessageAck { destination: from } - ).unwrap(); - // give the satellite some time to process the message - io.sleep(10).unwrap(); - }, - // routable packets - drtioaux::Packet::DmaAddTraceRequest { destination, .. } | - drtioaux::Packet::DmaAddTraceReply { destination, .. } | - drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } | - drtioaux::Packet::DmaRemoveTraceReply { destination, .. } | - drtioaux::Packet::DmaPlaybackRequest { destination, .. } | - drtioaux::Packet::DmaPlaybackReply { destination, .. } | - drtioaux::Packet::SubkernelLoadRunRequest { destination, .. } | - drtioaux::Packet::SubkernelLoadRunReply { destination, .. } | - drtioaux::Packet::SubkernelMessage { destination, .. } | - drtioaux::Packet::SubkernelMessageAck { destination, .. } | - drtioaux::Packet::DmaPlaybackStatus { destination, .. } | - drtioaux::Packet::SubkernelFinished { destination, .. } => { - let dest_link = routing_table.0[destination as usize][0] - 1; - if dest_link == linkno { - warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet); - } else if destination == 0 { - warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet) - } else { - drtioaux::send(dest_link, &packet).unwrap(); - } - } - - drtioaux::Packet::RoutingNoPackets => break, - - other => warn!("[LINK#{}] Received an unroutable packet: {:?}", linkno, other) - } + fn process_async_packets(io: &Io, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, packet: drtioaux::Packet + ) -> Option { + match packet { + // packets to be consumed locally + drtioaux::Packet::DmaPlaybackStatus { id, source, destination: 0, error, channel, timestamp } => { + remote_dma::playback_done(io, ddma_mutex, id, source, error, channel, timestamp); + None + }, + drtioaux::Packet::SubkernelFinished { id, destination: 0, with_exception, exception_src } => { + subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception, exception_src); + None + }, + drtioaux::Packet::SubkernelMessage { id, source: from, destination: 0, status, length, data } => { + subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data); + // acknowledge receiving part of the message + drtioaux::send(linkno, + &drtioaux::Packet::SubkernelMessageAck { destination: from } + ).unwrap(); + None + }, + // routable packets + drtioaux::Packet::DmaAddTraceRequest { destination, .. } | + drtioaux::Packet::DmaAddTraceReply { destination, .. } | + drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } | + drtioaux::Packet::DmaRemoveTraceReply { destination, .. } | + drtioaux::Packet::DmaPlaybackRequest { destination, .. } | + drtioaux::Packet::DmaPlaybackReply { destination, .. } | + drtioaux::Packet::SubkernelLoadRunRequest { destination, .. } | + drtioaux::Packet::SubkernelLoadRunReply { destination, .. } | + drtioaux::Packet::SubkernelMessage { destination, .. } | + drtioaux::Packet::SubkernelMessageAck { destination, .. } | + drtioaux::Packet::DmaPlaybackStatus { destination, .. } | + drtioaux::Packet::SubkernelFinished { destination, .. } => { + let dest_link = routing_table.0[destination as usize][0] - 1; + if dest_link == linkno { + warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet); + } else if destination == 0 { + warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet) } else { - warn!("[LINK#{}] Error handling async packets ({})", linkno, reply.unwrap_err()); - return; + drtioaux::send(dest_link, &packet).unwrap(); } - } + None + } + other => Some(other) } } @@ -268,12 +248,19 @@ pub mod drtio { } } - fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, linkno: u8) { + fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8) { let _lock = aux_mutex.lock(io).unwrap(); - match drtioaux::recv(linkno) { - Ok(Some(packet)) => warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet), - Ok(None) => (), - Err(_) => warn!("[LINK#{}] aux packet error", linkno) + loop { + match drtioaux::recv(linkno) { + Ok(Some(packet)) => { + if let Some(packet) = process_async_packets(&io, ddma_mutex, subkernel_mutex, routing_table, linkno, packet) { + warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet); + } + }, + Ok(None) => return, + Err(_) => { warn!("[LINK#{}] aux packet error", linkno); return } + } } } @@ -403,8 +390,7 @@ pub mod drtio { if up_links[linkno as usize] { /* link was previously up */ if link_rx_up(linkno) { - process_async_packets(&io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno); - process_unsolicited_aux(&io, aux_mutex, linkno); + process_unsolicited_aux(&io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno); process_local_errors(linkno); } else { info!("[LINK#{}] link is down", linkno); @@ -636,7 +622,6 @@ pub mod drtio { } }) } - } #[cfg(not(has_drtio))] From acebc3d691fdf7b5aacf32f5002152b3a58f29ae Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 19 Apr 2024 17:21:10 +0800 Subject: [PATCH 224/296] satellite: send async packets directly --- artiq/firmware/satman/main.rs | 16 ++------------ artiq/firmware/satman/repeater.rs | 36 +++++-------------------------- artiq/firmware/satman/routing.rs | 15 ------------- 3 files changed, 7 insertions(+), 60 deletions(-) diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 1b9a4c53f..ed1cd238c 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -66,12 +66,6 @@ fn drtiosat_tsc_loaded() -> bool { } } -fn drtiosat_async_ready() { - unsafe { - csr::drtiosat::async_messages_ready_write(1); - } -} - #[derive(Clone, Copy)] pub enum RtioMaster { Drtio, @@ -252,12 +246,6 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg drtioaux::send(0, &drtioaux::Packet::RoutingAck) } - drtioaux::Packet::RoutingRetrievePackets => { - let packet = router.get_upstream_packet().or( - Some(drtioaux::Packet::RoutingNoPackets)).unwrap(); - drtioaux::send(0, &packet) - } - drtioaux::Packet::MonitorRequest { destination: _destination, channel, probe } => { forward!(_routing_table, _destination, *rank, _repeaters, &packet); let value; @@ -808,8 +796,8 @@ pub extern fn main() -> i32 { } } - if router.any_upstream_waiting() { - drtiosat_async_ready(); + if let Some(packet) = router.get_upstream_packet() { + drtioaux::send(0, &packet).unwrap(); } } diff --git a/artiq/firmware/satman/repeater.rs b/artiq/firmware/satman/repeater.rs index 544527cda..cd18badc2 100644 --- a/artiq/firmware/satman/repeater.rs +++ b/artiq/firmware/satman/repeater.rs @@ -49,7 +49,7 @@ impl Repeater { self.state == RepeaterState::Up } - pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8, destination: u8, router: &mut Router) { + pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8, self_destination: u8, router: &mut Router) { self.process_local_errors(); match self.state { @@ -107,16 +107,11 @@ impl Repeater { } } RepeaterState::Up => { - self.process_unsolicited_aux(); + self.process_unsolicited_aux(routing_table, rank, self_destination, router); if !rep_link_rx_up(self.repno) { info!("[REP#{}] link is down", self.repno); self.state = RepeaterState::Down; } - if self.async_messages_ready() { - if let Err(e) = self.handle_async(routing_table, rank, destination, router) { - warn!("[REP#{}] Error handling async messages ({})", self.repno, e); - } - } } RepeaterState::Failed => { if !rep_link_rx_up(self.repno) { @@ -127,9 +122,10 @@ impl Repeater { } } - fn process_unsolicited_aux(&self) { + fn process_unsolicited_aux(&self, routing_table: &drtio_routing::RoutingTable, + rank: u8, self_destination: u8, router: &mut Router) { match drtioaux::recv(self.auxno) { - Ok(Some(packet)) => warn!("[REP#{}] unsolicited aux packet: {:?}", self.repno, packet), + Ok(Some(packet)) => router.route(packet, routing_table, rank, self_destination), Ok(None) => (), Err(_) => warn!("[REP#{}] aux packet error", self.repno) } @@ -185,28 +181,6 @@ impl Repeater { } } - fn async_messages_ready(&self) -> bool { - let async_rdy; - unsafe { - async_rdy = (csr::DRTIOREP[self.repno as usize].async_messages_ready_read)(); - (csr::DRTIOREP[self.repno as usize].async_messages_ready_write)(0); - } - async_rdy == 1 - } - - fn handle_async(&self, routing_table: &drtio_routing::RoutingTable, rank: u8, self_destination: u8, router: &mut Router - ) -> Result<(), drtioaux::Error> { - loop { - drtioaux::send(self.auxno, &drtioaux::Packet::RoutingRetrievePackets).unwrap(); - let reply = self.recv_aux_timeout(200)?; - match reply { - drtioaux::Packet::RoutingNoPackets => break, - packet => router.route(packet, routing_table, rank, self_destination) - } - } - Ok(()) - } - pub fn aux_forward(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error> { self.aux_send(request)?; let reply = self.recv_aux_timeout(200)?; diff --git a/artiq/firmware/satman/routing.rs b/artiq/firmware/satman/routing.rs index 169630dbd..cb17d6822 100644 --- a/artiq/firmware/satman/routing.rs +++ b/artiq/firmware/satman/routing.rs @@ -75,7 +75,6 @@ pub struct Router { local_queue: VecDeque, #[cfg(has_drtio_routing)] downstream_queue: VecDeque<(usize, drtioaux::Packet)>, - upstream_notified: bool, } impl Router { @@ -85,7 +84,6 @@ impl Router { local_queue: VecDeque::new(), #[cfg(has_drtio_routing)] downstream_queue: VecDeque::new(), - upstream_notified: false, } } @@ -155,21 +153,8 @@ impl Router { } } - pub fn any_upstream_waiting(&mut self) -> bool { - let empty = self.upstream_queue.is_empty(); - if !empty && !self.upstream_notified { - self.upstream_notified = true; // so upstream will not get spammed with notifications - true - } else { - false - } - } - pub fn get_upstream_packet(&mut self) -> Option { let packet = self.upstream_queue.pop_front(); - if packet.is_none() { - self.upstream_notified = false; - } packet } From 7204feae1f504e4a1dcd54b95c59e8f36c62b701 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Mon, 22 Apr 2024 17:27:04 +0800 Subject: [PATCH 225/296] master: aux_transact support for async messages --- artiq/firmware/runtime/analyzer.rs | 13 +-- artiq/firmware/runtime/kern_hwreq.rs | 77 +++++++++------- artiq/firmware/runtime/kernel.rs | 24 ++--- artiq/firmware/runtime/main.rs | 8 +- artiq/firmware/runtime/moninj.rs | 41 +++++---- artiq/firmware/runtime/rtio_dma.rs | 16 ++-- artiq/firmware/runtime/rtio_mgt.rs | 129 +++++++++++++++------------ artiq/firmware/runtime/session.rs | 34 +++---- 8 files changed, 193 insertions(+), 149 deletions(-) diff --git a/artiq/firmware/runtime/analyzer.rs b/artiq/firmware/runtime/analyzer.rs index 41cca6e46..355da1977 100644 --- a/artiq/firmware/runtime/analyzer.rs +++ b/artiq/firmware/runtime/analyzer.rs @@ -52,7 +52,7 @@ pub mod remote_analyzer { pub data: Vec } - pub fn get_data(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, + pub fn get_data(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, up_destinations: &Urc> ) -> Result { // gets data from satellites and returns consolidated data @@ -62,7 +62,7 @@ pub mod remote_analyzer { let mut remote_total_bytes = 0; let data_vec = drtio::analyzer_query( - io, aux_mutex, routing_table, up_destinations + io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, up_destinations )?; for data in data_vec { remote_total_bytes += data.total_byte_count; @@ -82,7 +82,8 @@ pub mod remote_analyzer { -fn worker(stream: &mut TcpStream, _io: &Io, _aux_mutex: &Mutex, +fn worker(stream: &mut TcpStream, _io: &Io, _aux_mutex: &Mutex, + _ddma_mutex: &Mutex, _subkernel_mutex: &Mutex, _routing_table: &drtio_routing::RoutingTable, _up_destinations: &Urc> ) -> Result<(), IoError> { @@ -96,7 +97,7 @@ fn worker(stream: &mut TcpStream, _io: &Io, _aux_mutex: &Mutex, #[cfg(has_drtio)] let remote = remote_analyzer::get_data( - _io, _aux_mutex, _routing_table, _up_destinations); + _io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, _up_destinations); #[cfg(has_drtio)] let (header, remote_data) = match remote { Ok(remote) => (Header { @@ -143,7 +144,7 @@ fn worker(stream: &mut TcpStream, _io: &Io, _aux_mutex: &Mutex, Ok(()) } -pub fn thread(io: Io, aux_mutex: &Mutex, +pub fn thread(io: Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &Urc>, up_destinations: &Urc>) { let listener = TcpListener::new(&io, 65535); @@ -158,7 +159,7 @@ pub fn thread(io: Io, aux_mutex: &Mutex, disarm(); let routing_table = routing_table.borrow(); - match worker(&mut stream, &io, aux_mutex, &routing_table, up_destinations) { + match worker(&mut stream, &io, aux_mutex, ddma_mutex, subkernel_mutex, &routing_table, up_destinations) { Ok(()) => (), Err(err) => error!("analyzer aborted: {}", err) } diff --git a/artiq/firmware/runtime/kern_hwreq.rs b/artiq/firmware/runtime/kern_hwreq.rs index 49aa2d8af..9dea387d5 100644 --- a/artiq/firmware/runtime/kern_hwreq.rs +++ b/artiq/firmware/runtime/kern_hwreq.rs @@ -11,13 +11,15 @@ use board_artiq::spi as local_spi; #[cfg(has_drtio)] mod remote_i2c { use drtioaux; + use drtio_routing; use rtio_mgt::drtio; use sched::{Io, Mutex}; - pub fn start(io: &Io, aux_mutex: &Mutex, + pub fn start(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, busno: u8 ) -> Result<(), &'static str> { - let reply = drtio::aux_transact(io, aux_mutex, linkno, + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::I2cStartRequest { destination: destination, busno: busno @@ -37,10 +39,11 @@ mod remote_i2c { } } - pub fn restart(io: &Io, aux_mutex: &Mutex, + pub fn restart(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, busno: u8 ) -> Result<(), &'static str> { - let reply = drtio::aux_transact(io, aux_mutex, linkno, + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::I2cRestartRequest { destination: destination, busno: busno @@ -60,10 +63,11 @@ mod remote_i2c { } } - pub fn stop(io: &Io, aux_mutex: &Mutex, + pub fn stop(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, busno: u8 ) -> Result<(), &'static str> { - let reply = drtio::aux_transact(io, aux_mutex, linkno, + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::I2cStopRequest { destination: destination, busno: busno @@ -83,10 +87,11 @@ mod remote_i2c { } } - pub fn write(io: &Io, aux_mutex: &Mutex, + pub fn write(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, busno: u8, data: u8 ) -> Result { - let reply = drtio::aux_transact(io, aux_mutex, linkno, + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::I2cWriteRequest { destination: destination, busno: busno, @@ -107,10 +112,11 @@ mod remote_i2c { } } - pub fn read(io: &Io, aux_mutex: &Mutex, + pub fn read(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, busno: u8, ack: bool ) -> Result { - let reply = drtio::aux_transact(io, aux_mutex, linkno, + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::I2cReadRequest { destination: destination, busno: busno, @@ -131,10 +137,11 @@ mod remote_i2c { } } - pub fn switch_select(io: &Io, aux_mutex: &Mutex, + pub fn switch_select(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, busno: u8, address: u8, mask: u8 ) -> Result<(), &'static str> { - let reply = drtio::aux_transact(io, aux_mutex, linkno, + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::I2cSwitchSelectRequest { destination: destination, busno: busno, @@ -160,13 +167,15 @@ mod remote_i2c { #[cfg(has_drtio)] mod remote_spi { use drtioaux; + use drtio_routing; use rtio_mgt::drtio; use sched::{Io, Mutex}; - pub fn set_config(io: &Io, aux_mutex: &Mutex, + pub fn set_config(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, busno: u8, flags: u8, length: u8, div: u8, cs: u8 ) -> Result<(), ()> { - let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SpiSetConfigRequest { + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::SpiSetConfigRequest { destination: destination, busno: busno, flags: flags, @@ -189,10 +198,11 @@ mod remote_spi { } } - pub fn write(io: &Io, aux_mutex: &Mutex, + pub fn write(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, busno: u8, data: u32 ) -> Result<(), ()> { - let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SpiWriteRequest { + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::SpiWriteRequest { destination: destination, busno: busno, data: data @@ -212,9 +222,10 @@ mod remote_spi { } } - pub fn read(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, busno: u8 + pub fn read(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, busno: u8 ) -> Result { - let reply = drtio::aux_transact(io, aux_mutex, linkno, + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::SpiReadRequest { destination: destination, busno: busno @@ -238,7 +249,7 @@ mod remote_spi { #[cfg(has_drtio)] macro_rules! dispatch { - ($io:ident, $aux_mutex:ident, $mod_local:ident, $mod_remote:ident, $routing_table:ident, $busno:expr, $func:ident $(, $param:expr)*) => {{ + ($io:ident, $aux_mutex:ident, $ddma_mutex:ident, $subkernel_mutex:ident, $mod_local:ident, $mod_remote:ident, $routing_table:ident, $busno:expr, $func:ident $(, $param:expr)*) => {{ let destination = ($busno >> 16) as u8; let busno = $busno as u8; let hop = $routing_table.0[destination as usize][0]; @@ -246,27 +257,27 @@ macro_rules! dispatch { $mod_local::$func(busno, $($param, )*) } else { let linkno = hop - 1; - $mod_remote::$func($io, $aux_mutex, linkno, destination, busno, $($param, )*) + $mod_remote::$func($io, $aux_mutex, $ddma_mutex, $subkernel_mutex, $routing_table, linkno, destination, busno, $($param, )*) } }} } #[cfg(not(has_drtio))] macro_rules! dispatch { - ($io:ident, $aux_mutex:ident, $mod_local:ident, $mod_remote:ident, $routing_table:ident, $busno:expr, $func:ident $(, $param:expr)*) => {{ + ($io:ident, $aux_mutex:ident, $ddma_mutex:ident, $subkernel_mutex:ident, $mod_local:ident, $mod_remote:ident, $routing_table:ident, $busno:expr, $func:ident $(, $param:expr)*) => {{ let busno = $busno as u8; $mod_local::$func(busno, $($param, )*) }} } -pub fn process_kern_hwreq(io: &Io, aux_mutex: &Mutex, - _routing_table: &drtio_routing::RoutingTable, +pub fn process_kern_hwreq(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, _up_destinations: &Urc>, request: &kern::Message) -> Result> { match request { &kern::RtioInitRequest => { info!("resetting RTIO"); - rtio_mgt::reset(io, aux_mutex); + rtio_mgt::reset(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table); kern_acknowledge() } @@ -282,47 +293,47 @@ pub fn process_kern_hwreq(io: &Io, aux_mutex: &Mutex, } &kern::I2cStartRequest { busno } => { - let succeeded = dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, start).is_ok(); + let succeeded = dispatch!(io, aux_mutex, ddma_mutex, subkernel_mutex, local_i2c, remote_i2c, routing_table, busno, start).is_ok(); kern_send(io, &kern::I2cBasicReply { succeeded: succeeded }) } &kern::I2cRestartRequest { busno } => { - let succeeded = dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, restart).is_ok(); + let succeeded = dispatch!(io, aux_mutex, ddma_mutex, subkernel_mutex, local_i2c, remote_i2c, routing_table, busno, restart).is_ok(); kern_send(io, &kern::I2cBasicReply { succeeded: succeeded }) } &kern::I2cStopRequest { busno } => { - let succeeded = dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, stop).is_ok(); + let succeeded = dispatch!(io, aux_mutex, ddma_mutex, subkernel_mutex, local_i2c, remote_i2c, routing_table, busno, stop).is_ok(); kern_send(io, &kern::I2cBasicReply { succeeded: succeeded }) } &kern::I2cWriteRequest { busno, data } => { - match dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, write, data) { + match dispatch!(io, aux_mutex, ddma_mutex, subkernel_mutex, local_i2c, remote_i2c, routing_table, busno, write, data) { Ok(ack) => kern_send(io, &kern::I2cWriteReply { succeeded: true, ack: ack }), Err(_) => kern_send(io, &kern::I2cWriteReply { succeeded: false, ack: false }) } } &kern::I2cReadRequest { busno, ack } => { - match dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, read, ack) { + match dispatch!(io, aux_mutex, ddma_mutex, subkernel_mutex, local_i2c, remote_i2c, routing_table, busno, read, ack) { Ok(data) => kern_send(io, &kern::I2cReadReply { succeeded: true, data: data }), Err(_) => kern_send(io, &kern::I2cReadReply { succeeded: false, data: 0xff }) } } &kern::I2cSwitchSelectRequest { busno, address, mask } => { - let succeeded = dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, + let succeeded = dispatch!(io, aux_mutex, ddma_mutex, subkernel_mutex, local_i2c, remote_i2c, routing_table, busno, switch_select, address, mask).is_ok(); kern_send(io, &kern::I2cBasicReply { succeeded: succeeded }) } &kern::SpiSetConfigRequest { busno, flags, length, div, cs } => { - let succeeded = dispatch!(io, aux_mutex, local_spi, remote_spi, _routing_table, busno, + let succeeded = dispatch!(io, aux_mutex, ddma_mutex, subkernel_mutex, local_spi, remote_spi, routing_table, busno, set_config, flags, length, div, cs).is_ok(); kern_send(io, &kern::SpiBasicReply { succeeded: succeeded }) }, &kern::SpiWriteRequest { busno, data } => { - let succeeded = dispatch!(io, aux_mutex, local_spi, remote_spi, _routing_table, busno, + let succeeded = dispatch!(io, aux_mutex, ddma_mutex, subkernel_mutex, local_spi, remote_spi, routing_table, busno, write, data).is_ok(); kern_send(io, &kern::SpiBasicReply { succeeded: succeeded }) } &kern::SpiReadRequest { busno } => { - match dispatch!(io, aux_mutex, local_spi, remote_spi, _routing_table, busno, read) { + match dispatch!(io, aux_mutex, ddma_mutex, subkernel_mutex, local_spi, remote_spi, routing_table, busno, read) { Ok(data) => kern_send(io, &kern::SpiReadReply { succeeded: true, data: data }), Err(_) => kern_send(io, &kern::SpiReadReply { succeeded: false, data: 0 }) } diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index acbce47d7..dda190cd9 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -181,17 +181,17 @@ pub mod subkernel { Ok(()) } - pub fn upload(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + pub fn upload(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32) -> Result<(), Error> { let _lock = subkernel_mutex.lock(io)?; let subkernel = unsafe { SUBKERNELS.get_mut(&id).unwrap() }; - drtio::subkernel_upload(io, aux_mutex, routing_table, id, + drtio::subkernel_upload(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, subkernel.destination, &subkernel.data)?; subkernel.state = SubkernelState::Uploaded; Ok(()) } - pub fn load(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, + pub fn load(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32, run: bool) -> Result<(), Error> { let _lock = subkernel_mutex.lock(io)?; let subkernel = unsafe { SUBKERNELS.get_mut(&id).unwrap() }; @@ -199,7 +199,7 @@ pub mod subkernel { error!("for id: {} expected Uploaded, got: {:?}", id, subkernel.state); return Err(Error::IncorrectState); } - drtio::subkernel_load(io, aux_mutex, routing_table, id, subkernel.destination, run)?; + drtio::subkernel_load(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, subkernel.destination, run)?; if run { subkernel.state = SubkernelState::Running; } @@ -234,14 +234,14 @@ pub mod subkernel { } } - pub fn destination_changed(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + pub fn destination_changed(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, destination: u8, up: bool) { let _lock = subkernel_mutex.lock(io).unwrap(); let subkernels_iter = unsafe { SUBKERNELS.iter_mut() }; for (id, subkernel) in subkernels_iter { if subkernel.destination == destination { if up { - match drtio::subkernel_upload(io, aux_mutex, routing_table, *id, destination, &subkernel.data) + match drtio::subkernel_upload(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, *id, destination, &subkernel.data) { Ok(_) => subkernel.state = SubkernelState::Uploaded, Err(e) => error!("Error adding subkernel on destination {}: {}", destination, e) @@ -256,7 +256,7 @@ pub mod subkernel { } } - pub fn retrieve_finish_status(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + pub fn retrieve_finish_status(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32) -> Result { let _lock = subkernel_mutex.lock(io)?; let mut subkernel = unsafe { SUBKERNELS.get_mut(&id).unwrap() }; @@ -267,7 +267,7 @@ pub mod subkernel { id: id, comm_lost: status == FinishStatus::CommLost, exception: if let FinishStatus::Exception(dest) = status { - Some(drtio::subkernel_retrieve_exception(io, aux_mutex, + Some(drtio::subkernel_retrieve_exception(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, dest)?) } else { None } }) @@ -278,7 +278,7 @@ pub mod subkernel { } } - pub fn await_finish(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + pub fn await_finish(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32, timeout: i64) -> Result { { let _lock = subkernel_mutex.lock(io)?; @@ -309,7 +309,7 @@ pub mod subkernel { error!("Remote subkernel finish await timed out"); return Err(Error::Timeout); } - retrieve_finish_status(io, aux_mutex, subkernel_mutex, routing_table, id) + retrieve_finish_status(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id) } pub struct Message { @@ -418,7 +418,7 @@ pub mod subkernel { } } - pub fn message_send<'a>(io: &Io, aux_mutex: &Mutex, subkernel_mutex: &Mutex, + pub fn message_send<'a>(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32, destination: Option, count: u8, tag: &'a [u8], message: *const *const () ) -> Result<(), Error> { let mut writer = Cursor::new(Vec::new()); @@ -433,7 +433,7 @@ pub mod subkernel { let data = &mut writer.into_inner()[3..]; data[0] = count; Ok(drtio::subkernel_send_message( - io, aux_mutex, routing_table, id, destination, data + io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, destination, data )?) } } \ No newline at end of file diff --git a/artiq/firmware/runtime/main.rs b/artiq/firmware/runtime/main.rs index a27c337ee..6d26432a7 100644 --- a/artiq/firmware/runtime/main.rs +++ b/artiq/firmware/runtime/main.rs @@ -214,15 +214,19 @@ fn startup() { #[cfg(any(has_rtio_moninj, has_drtio))] { let aux_mutex = aux_mutex.clone(); + let ddma_mutex = ddma_mutex.clone(); + let subkernel_mutex = subkernel_mutex.clone(); let drtio_routing_table = drtio_routing_table.clone(); - io.spawn(4096, move |io| { moninj::thread(io, &aux_mutex, &drtio_routing_table) }); + io.spawn(4096, move |io| { moninj::thread(io, &aux_mutex, &ddma_mutex, &subkernel_mutex, &drtio_routing_table) }); } #[cfg(has_rtio_analyzer)] { let aux_mutex = aux_mutex.clone(); + let ddma_mutex = ddma_mutex.clone(); + let subkernel_mutex = subkernel_mutex.clone(); let drtio_routing_table = drtio_routing_table.clone(); let up_destinations = up_destinations.clone(); - io.spawn(8192, move |io| { analyzer::thread(io, &aux_mutex, &drtio_routing_table, &up_destinations) }); + io.spawn(8192, move |io| { analyzer::thread(io, &aux_mutex, &ddma_mutex, &subkernel_mutex, &drtio_routing_table, &up_destinations) }); } #[cfg(has_grabber)] diff --git a/artiq/firmware/runtime/moninj.rs b/artiq/firmware/runtime/moninj.rs index 1ef6b4215..b66b65d95 100644 --- a/artiq/firmware/runtime/moninj.rs +++ b/artiq/firmware/runtime/moninj.rs @@ -50,12 +50,15 @@ mod local_moninj { #[cfg(has_drtio)] mod remote_moninj { use drtioaux; + use drtio_routing; use rtio_mgt::drtio; use sched::{Io, Mutex}; - pub fn read_probe(io: &Io, aux_mutex: &Mutex, linkno: u8, + pub fn read_probe(io: &Io, aux_mutex: &Mutex, + ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, channel: u16, probe: u8) -> u64 { - let reply = drtio::aux_transact(io, aux_mutex, linkno, + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::MonitorRequest { destination: destination, channel: channel, @@ -69,7 +72,9 @@ mod remote_moninj { 0 } - pub fn inject(io: &Io, aux_mutex: &Mutex, linkno: u8, + pub fn inject(io: &Io, aux_mutex: &Mutex, + _ddma_mutex: &Mutex, _subkernel_mutex: &Mutex, + _routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, channel: u16, overrd: u8, value: u8) { let _lock = aux_mutex.lock(io).unwrap(); drtioaux::send(linkno, &drtioaux::Packet::InjectionRequest { @@ -80,9 +85,11 @@ mod remote_moninj { }).unwrap(); } - pub fn read_injection_status(io: &Io, aux_mutex: &Mutex, linkno: u8, + pub fn read_injection_status(io: &Io, aux_mutex: &Mutex, + ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, destination: u8, channel: u16, overrd: u8) -> u8 { - let reply = drtio::aux_transact(io, aux_mutex, linkno, + let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::InjectionStatusRequest { destination: destination, channel: channel, @@ -99,7 +106,7 @@ mod remote_moninj { #[cfg(has_drtio)] macro_rules! dispatch { - ($io:ident, $aux_mutex:ident, $routing_table:ident, $channel:expr, $func:ident $(, $param:expr)*) => {{ + ($io:ident, $aux_mutex:ident, $ddma_mutex:ident, $subkernel_mutex:ident, $routing_table:ident, $channel:expr, $func:ident $(, $param:expr)*) => {{ let destination = ($channel >> 16) as u8; let channel = $channel as u16; let hop = $routing_table.0[destination as usize][0]; @@ -107,21 +114,21 @@ macro_rules! dispatch { local_moninj::$func(channel, $($param, )*) } else { let linkno = hop - 1; - remote_moninj::$func($io, $aux_mutex, linkno, destination, channel, $($param, )*) + remote_moninj::$func($io, $aux_mutex, $ddma_mutex, $subkernel_mutex, $routing_table, linkno, destination, channel, $($param, )*) } }} } #[cfg(not(has_drtio))] macro_rules! dispatch { - ($io:ident, $aux_mutex:ident, $routing_table:ident, $channel:expr, $func:ident $(, $param:expr)*) => {{ + ($io:ident, $aux_mutex:ident, $ddma_mutex:ident, $subkernel_mutex:ident, $routing_table:ident, $channel:expr, $func:ident $(, $param:expr)*) => {{ let channel = $channel as u16; local_moninj::$func(channel, $($param, )*) }} } -fn connection_worker(io: &Io, _aux_mutex: &Mutex, _routing_table: &drtio_routing::RoutingTable, - mut stream: &mut TcpStream) -> Result<(), Error> { +fn connection_worker(io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subkernel_mutex: &Mutex, + _routing_table: &drtio_routing::RoutingTable, mut stream: &mut TcpStream) -> Result<(), Error> { let mut probe_watch_list = BTreeMap::new(); let mut inject_watch_list = BTreeMap::new(); let mut next_check = 0; @@ -150,9 +157,9 @@ fn connection_worker(io: &Io, _aux_mutex: &Mutex, _routing_table: &drtio_routing } }, HostMessage::Inject { channel, overrd, value } => dispatch!( - io, _aux_mutex, _routing_table, channel, inject, overrd, value), + io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, channel, inject, overrd, value), HostMessage::GetInjectionStatus { channel, overrd } => { - let value = dispatch!(io, _aux_mutex, _routing_table, channel, read_injection_status, overrd); + let value = dispatch!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, channel, read_injection_status, overrd); let reply = DeviceMessage::InjectionStatus { channel: channel, overrd: overrd, @@ -169,7 +176,7 @@ fn connection_worker(io: &Io, _aux_mutex: &Mutex, _routing_table: &drtio_routing if clock::get_ms() > next_check { for (&(channel, probe), previous) in probe_watch_list.iter_mut() { - let current = dispatch!(io, _aux_mutex, _routing_table, channel, read_probe, probe); + let current = dispatch!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, channel, read_probe, probe); if previous.is_none() || previous.unwrap() != current { let message = DeviceMessage::MonitorStatus { channel: channel, @@ -184,7 +191,7 @@ fn connection_worker(io: &Io, _aux_mutex: &Mutex, _routing_table: &drtio_routing } } for (&(channel, overrd), previous) in inject_watch_list.iter_mut() { - let current = dispatch!(io, _aux_mutex, _routing_table, channel, read_injection_status, overrd); + let current = dispatch!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, channel, read_injection_status, overrd); if previous.is_none() || previous.unwrap() != current { let message = DeviceMessage::InjectionStatus { channel: channel, @@ -205,18 +212,20 @@ fn connection_worker(io: &Io, _aux_mutex: &Mutex, _routing_table: &drtio_routing } } -pub fn thread(io: Io, aux_mutex: &Mutex, routing_table: &Urc>) { +pub fn thread(io: Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &Urc>) { let listener = TcpListener::new(&io, 2047); listener.listen(1383).expect("moninj: cannot listen"); loop { let aux_mutex = aux_mutex.clone(); + let ddma_mutex = ddma_mutex.clone(); + let subkernel_mutex = subkernel_mutex.clone(); let routing_table = routing_table.clone(); let stream = listener.accept().expect("moninj: cannot accept").into_handle(); io.spawn(16384, move |io| { let routing_table = routing_table.borrow(); let mut stream = TcpStream::from_handle(&io, stream); - match connection_worker(&io, &aux_mutex, &routing_table, &mut stream) { + match connection_worker(&io, &aux_mutex, &ddma_mutex, &subkernel_mutex, &routing_table, &mut stream) { Ok(()) => {}, Err(err) => error!("moninj aborted: {}", err) } diff --git a/artiq/firmware/runtime/rtio_dma.rs b/artiq/firmware/runtime/rtio_dma.rs index 666986919..e1efa5567 100644 --- a/artiq/firmware/runtime/rtio_dma.rs +++ b/artiq/firmware/runtime/rtio_dma.rs @@ -120,12 +120,12 @@ pub mod remote_dma { Ok(playback_state) } - pub fn erase(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, + pub fn erase(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32) -> Result<(), Error> { let _lock = ddma_mutex.lock(io)?; let destinations = unsafe { TRACES.get(&id).unwrap() }; for destination in destinations.keys() { - match drtio::ddma_send_erase(io, aux_mutex, routing_table, id, *destination) { + match drtio::ddma_send_erase(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, *destination) { Ok(_) => (), Err(e) => error!("Error erasing trace on DMA: {}", e) } @@ -134,18 +134,18 @@ pub mod remote_dma { Ok(()) } - pub fn upload_traces(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, + pub fn upload_traces(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32) -> Result<(), Error> { let _lock = ddma_mutex.lock(io)?; let traces = unsafe { TRACES.get_mut(&id).unwrap() }; for (destination, mut trace) in traces { - drtio::ddma_upload_trace(io, aux_mutex, routing_table, id, *destination, trace.get_trace())?; + drtio::ddma_upload_trace(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, *destination, trace.get_trace())?; trace.state = RemoteState::Loaded; } Ok(()) } - pub fn playback(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, + pub fn playback(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, id: u32, timestamp: u64) -> Result<(), Error>{ // triggers playback on satellites let destinations = unsafe { @@ -161,7 +161,7 @@ pub mod remote_dma { return Err(Error::IncorrectState); } } - drtio::ddma_send_playback(io, aux_mutex, routing_table, id, *destination, timestamp)?; + drtio::ddma_send_playback(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, *destination, timestamp)?; } Ok(()) } @@ -178,7 +178,7 @@ pub mod remote_dma { }; } - pub fn destination_changed(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, + pub fn destination_changed(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable, destination: u8, up: bool) { // update state of the destination, resend traces if it's up let _lock = ddma_mutex.lock(io).unwrap(); @@ -186,7 +186,7 @@ pub mod remote_dma { for (id, dest_traces) in traces_iter { if let Some(trace) = dest_traces.get_mut(&destination) { if up { - match drtio::ddma_upload_trace(io, aux_mutex, routing_table, *id, destination, trace.get_trace()) + match drtio::ddma_upload_trace(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, *id, destination, trace.get_trace()) { Ok(_) => trace.state = RemoteState::Loaded, Err(e) => error!("Error adding DMA trace on destination {}: {}", destination, e) diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 3f2c9e7f3..f08737691 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -87,7 +87,6 @@ pub mod drtio { if clock::get_ms() > max_time { return Err(Error::Timeout); } - // todo: reinsert handling of async messages match drtioaux::recv(linkno) { Ok(Some(packet)) => return Ok(packet), Ok(None) => (), @@ -118,7 +117,7 @@ pub mod drtio { ).unwrap(); None }, - // routable packets + // (potentially) routable packets drtioaux::Packet::DmaAddTraceRequest { destination, .. } | drtioaux::Packet::DmaAddTraceReply { destination, .. } | drtioaux::Packet::DmaRemoveTraceRequest { destination, .. } | @@ -131,26 +130,42 @@ pub mod drtio { drtioaux::Packet::SubkernelMessageAck { destination, .. } | drtioaux::Packet::DmaPlaybackStatus { destination, .. } | drtioaux::Packet::SubkernelFinished { destination, .. } => { - let dest_link = routing_table.0[destination as usize][0] - 1; - if dest_link == linkno { - warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet); - } else if destination == 0 { - warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet) + if destination == 0 { + Some(packet) } else { - drtioaux::send(dest_link, &packet).unwrap(); + let dest_link = routing_table.0[destination as usize][0] - 1; + if dest_link == linkno { + warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet); + } else { + drtioaux::send(dest_link, &packet).unwrap(); + } + None } - None } other => Some(other) } } - pub fn aux_transact(io: &Io, aux_mutex: &Mutex, linkno: u8, request: &drtioaux::Packet + pub fn aux_transact(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, linkno: u8, request: &drtioaux::Packet ) -> Result { let _lock = aux_mutex.lock(io)?; drtioaux::send(linkno, request).unwrap(); - let reply = recv_aux_timeout(io, linkno, 200)?; - Ok(reply) + loop { + let reply = recv_aux_timeout(io, linkno, 200)?; + if let Some(reply) = process_async_packets(io, ddma_mutex, subkernel_mutex, routing_table, linkno, reply) { + // returns none if it was an async packet + return Ok(reply); + } + } + } + + fn setup_transact(io: &Io, aux_mutex: &Mutex, linkno: u8, request: &drtioaux::Packet) -> Result { + /* shorter aux_transact for setup purposes, no checking for async packets, + as they should not be generated yet */ + let _lock = aux_mutex.lock(io)?; + drtioaux::send(linkno, request).unwrap(); + recv_aux_timeout(io, linkno, 200) } pub fn clear_buffers(io: &Io, aux_mutex: &Mutex) { @@ -173,7 +188,7 @@ pub mod drtio { if count > 100 { return 0; } - let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::EchoRequest); + let reply = setup_transact(io, aux_mutex, linkno, &drtioaux::Packet::EchoRequest); match reply { Ok(drtioaux::Packet::EchoReply) => { // make sure receive buffer is drained @@ -212,7 +227,7 @@ pub mod drtio { fn load_routing_table(io: &Io, aux_mutex: &Mutex, linkno: u8, routing_table: &drtio_routing::RoutingTable) -> Result<(), Error> { for i in 0..drtio_routing::DEST_COUNT { - let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingSetPath { + let reply = setup_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingSetPath { destination: i as u8, hops: routing_table.0[i] })?; @@ -225,7 +240,7 @@ pub mod drtio { fn set_rank(io: &Io, aux_mutex: &Mutex, linkno: u8, rank: u8) -> Result<(), Error> { - let reply = aux_transact(io, aux_mutex, linkno, + let reply = setup_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingSetRank { rank: rank })?; @@ -321,7 +336,8 @@ pub mod drtio { let linkno = hop - 1; if destination_up(up_destinations, destination) { if up_links[linkno as usize] { - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, + ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::DestinationStatusRequest { destination: destination }); @@ -329,8 +345,8 @@ pub mod drtio { match reply { drtioaux::Packet::DestinationDownReply => { destination_set_up(routing_table, up_destinations, destination, false); - remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false); - subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false); + remote_dma::destination_changed(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, destination, false); + subkernel::destination_changed(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, destination, false); } drtioaux::Packet::DestinationOkReply => (), drtioaux::Packet::DestinationSequenceErrorReply { channel } => { @@ -353,12 +369,13 @@ pub mod drtio { } } else { destination_set_up(routing_table, up_destinations, destination, false); - remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, false); - subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, false); + remote_dma::destination_changed(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, destination, false); + subkernel::destination_changed(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, destination, false); } } else { if up_links[linkno as usize] { - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, + subkernel_mutex, routing_table, linkno, &drtioaux::Packet::DestinationStatusRequest { destination: destination }); @@ -367,8 +384,8 @@ pub mod drtio { Ok(drtioaux::Packet::DestinationOkReply) => { destination_set_up(routing_table, up_destinations, destination, true); init_buffer_space(destination as u8, linkno); - remote_dma::destination_changed(io, aux_mutex, ddma_mutex, routing_table, destination, true); - subkernel::destination_changed(io, aux_mutex, subkernel_mutex, routing_table, destination, true); + remote_dma::destination_changed(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, destination, true); + subkernel::destination_changed(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, destination, true); }, Ok(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet), Err(e) => error!("[DEST#{}] communication failed ({:?})", destination, e) @@ -425,7 +442,7 @@ pub mod drtio { } } - pub fn reset(io: &Io, aux_mutex: &Mutex) { + pub fn reset(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable) { for linkno in 0..csr::DRTIO.len() { unsafe { (csr::DRTIO[linkno].reset_write)(1); @@ -441,7 +458,7 @@ pub mod drtio { for linkno in 0..csr::DRTIO.len() { let linkno = linkno as u8; if link_rx_up(linkno) { - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::ResetRequest); match reply { Ok(drtioaux::Packet::ResetAck) => (), @@ -468,12 +485,12 @@ pub mod drtio { Ok(()) } - pub fn ddma_upload_trace(io: &Io, aux_mutex: &Mutex, - routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8, trace: &[u8]) -> Result<(), Error> { + pub fn ddma_upload_trace(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, trace: &[u8] + ) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; partition_data(trace, |slice, status, len: usize| { - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::DmaAddTraceRequest { id: id, source: 0, destination: destination, status: status, length: len as u16, trace: *slice})?; match reply { @@ -484,10 +501,10 @@ pub mod drtio { }) } - pub fn ddma_send_erase(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8) -> Result<(), Error> { + pub fn ddma_send_erase(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::DmaRemoveTraceRequest { id: id, source: 0, destination: destination })?; match reply { drtioaux::Packet::DmaRemoveTraceReply { destination: 0, succeeded: true } => Ok(()), @@ -496,10 +513,10 @@ pub mod drtio { } } - pub fn ddma_send_playback(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8, timestamp: u64) -> Result<(), Error> { + pub fn ddma_send_playback(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, timestamp: u64) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::DmaPlaybackRequest{ id: id, source: 0, destination: destination, timestamp: timestamp })?; match reply { drtioaux::Packet::DmaPlaybackReply { destination: 0, succeeded: true } => Ok(()), @@ -510,10 +527,10 @@ pub mod drtio { } #[cfg(has_rtio_analyzer)] - fn analyzer_get_data(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - destination: u8) -> Result { + fn analyzer_get_data(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, destination: u8) -> Result { let linkno = routing_table.0[destination as usize][0] - 1; - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::AnalyzerHeaderRequest { destination: destination })?; let (sent, total, overflow) = match reply { drtioaux::Packet::AnalyzerHeader { sent_bytes, total_byte_count, overflow_occurred } => @@ -525,7 +542,7 @@ pub mod drtio { if sent > 0 { let mut last_packet = false; while !last_packet { - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::AnalyzerDataRequest { destination: destination })?; match reply { drtioaux::Packet::AnalyzerData { last, length, data } => { @@ -546,23 +563,23 @@ pub mod drtio { } #[cfg(has_rtio_analyzer)] - pub fn analyzer_query(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - up_destinations: &Urc> + pub fn analyzer_query(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, up_destinations: &Urc> ) -> Result, Error> { let mut remote_buffers: Vec = Vec::new(); for i in 1..drtio_routing::DEST_COUNT { if destination_up(up_destinations, i as u8) { - remote_buffers.push(analyzer_get_data(io, aux_mutex, routing_table, i as u8)?); + remote_buffers.push(analyzer_get_data(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, i as u8)?); } } Ok(remote_buffers) } - pub fn subkernel_upload(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8, data: &Vec) -> Result<(), Error> { + pub fn subkernel_upload(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, data: &Vec) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; partition_data(data, |slice, status, len: usize| { - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::SubkernelAddDataRequest { id: id, destination: destination, status: status, length: len as u16, data: *slice})?; match reply { @@ -574,10 +591,11 @@ pub mod drtio { }) } - pub fn subkernel_load(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, - id: u32, destination: u8, run: bool) -> Result<(), Error> { + pub fn subkernel_load(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, run: bool + ) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::SubkernelLoadRunRequest{ id: id, source: 0, destination: destination, run: run })?; match reply { drtioaux::Packet::SubkernelLoadRunReply { destination: 0, succeeded: true } => Ok(()), @@ -587,13 +605,13 @@ pub mod drtio { } } - pub fn subkernel_retrieve_exception(io: &Io, aux_mutex: &Mutex, + pub fn subkernel_retrieve_exception(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, destination: u8 ) -> Result, Error> { let linkno = routing_table.0[destination as usize][0] - 1; let mut remote_data: Vec = Vec::new(); loop { - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::SubkernelExceptionRequest { destination: destination })?; match reply { drtioaux::Packet::SubkernelException { last, length, data } => { @@ -607,12 +625,12 @@ pub mod drtio { } } - pub fn subkernel_send_message(io: &Io, aux_mutex: &Mutex, + pub fn subkernel_send_message(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, message: &[u8] ) -> Result<(), Error> { let linkno = routing_table.0[destination as usize][0] - 1; partition_data(message, |slice, status, len: usize| { - let reply = aux_transact(io, aux_mutex, linkno, + let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno, &drtioaux::Packet::SubkernelMessage { source: 0, destination: destination, id: id, status: status, length: len as u16, data: *slice})?; @@ -632,7 +650,7 @@ pub mod drtio { _routing_table: &Urc>, _up_destinations: &Urc>, _ddma_mutex: &Mutex, _subkernel_mutex: &Mutex) {} - pub fn reset(_io: &Io, _aux_mutex: &Mutex) {} + pub fn reset(_io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subkernel_mutex: &Mutex, _routing_table: &drtio_routing::RoutingTable) {} } static mut SEEN_ASYNC_ERRORS: u8 = 0; @@ -704,9 +722,10 @@ pub fn startup(io: &Io, aux_mutex: &Mutex, io.spawn(4096, async_error_thread); } -pub fn reset(io: &Io, aux_mutex: &Mutex) { +pub fn reset(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable) { unsafe { csr::rtio_core::reset_write(1); } - drtio::reset(io, aux_mutex) + drtio::reset(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table) } diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 52daaa2d0..388a74337 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -316,7 +316,7 @@ fn kern_run(session: &mut Session) -> Result<(), Error> { } -fn process_flash_kernel(io: &Io, _aux_mutex: &Mutex, _subkernel_mutex: &Mutex, +fn process_flash_kernel(io: &Io, _aux_mutex: &Mutex, _subkernel_mutex: &Mutex, _ddma_mutex: &Mutex, _routing_table: &drtio_routing::RoutingTable, _up_destinations: &Urc>, session: &mut Session, kernel: &[u8] @@ -355,7 +355,7 @@ fn process_flash_kernel(io: &Io, _aux_mutex: &Mutex, _subkernel_mutex: &Mutex, if up { let subkernel_lib = entry.data().to_vec(); subkernel::add_subkernel(io, _subkernel_mutex, sid, dest, subkernel_lib)?; - subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, sid)?; + subkernel::upload(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, sid)?; } else { return Err(Error::DestinationDown); } @@ -468,7 +468,7 @@ fn process_host_message(io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subke #[cfg(has_drtio)] { subkernel::add_subkernel(io, _subkernel_mutex, _id, _dest, _kernel)?; - match subkernel::upload(io, _aux_mutex, _subkernel_mutex, _routing_table, _id) { + match subkernel::upload(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, _id) { Ok(_) => host_write(stream, host::Reply::LoadCompleted)?, Err(error) => { subkernel::clear_subkernels(io, _subkernel_mutex)?; @@ -489,7 +489,7 @@ fn process_host_message(io: &Io, _aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subke fn process_kern_message(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, up_destinations: &Urc>, - ddma_mutex: &Mutex, _subkernel_mutex: &Mutex, mut stream: Option<&mut TcpStream>, + ddma_mutex: &Mutex, subkernel_mutex: &Mutex, mut stream: Option<&mut TcpStream>, session: &mut Session) -> Result> { kern_recv_notrace(io, |request| { match (request, session.kernel_state) { @@ -507,7 +507,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, kern_recv_dotrace(request); - if kern_hwreq::process_kern_hwreq(io, aux_mutex, routing_table, up_destinations, request)? { + if kern_hwreq::process_kern_hwreq(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, up_destinations, request)? { return Ok(false) } @@ -531,7 +531,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, if let Some(_id) = session.congress.dma_manager.record_start(name) { // replace the record #[cfg(has_drtio)] - remote_dma::erase(io, aux_mutex, ddma_mutex, routing_table, _id)?; + remote_dma::erase(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, _id)?; } kern_acknowledge() } @@ -543,7 +543,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, let _id = session.congress.dma_manager.record_stop(duration, enable_ddma, io, ddma_mutex)?; #[cfg(has_drtio)] if enable_ddma { - remote_dma::upload_traces(io, aux_mutex, ddma_mutex, routing_table, _id)?; + remote_dma::upload_traces(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, _id)?; } cache::flush_l2_cache(); kern_acknowledge() @@ -551,7 +551,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, &kern::DmaEraseRequest { name } => { #[cfg(has_drtio)] if let Some(id) = session.congress.dma_manager.get_id(name) { - remote_dma::erase(io, aux_mutex, ddma_mutex, routing_table, *id)?; + remote_dma::erase(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, *id)?; } session.congress.dma_manager.erase(name); kern_acknowledge() @@ -574,7 +574,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, } &kern::DmaStartRemoteRequest { id: _id, timestamp: _timestamp } => { #[cfg(has_drtio)] - remote_dma::playback(io, aux_mutex, ddma_mutex, routing_table, _id as u32, _timestamp as u64)?; + remote_dma::playback(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, _id as u32, _timestamp as u64)?; kern_acknowledge() } &kern::DmaAwaitRemoteRequest { id: _id } => { @@ -633,7 +633,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, session.kernel_state = KernelState::Absent; unsafe { session.congress.cache.unborrow() } #[cfg(has_drtio)] - subkernel::clear_subkernels(io, _subkernel_mutex)?; + subkernel::clear_subkernels(io, subkernel_mutex)?; match stream { None => return Ok(true), @@ -652,7 +652,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, session.kernel_state = KernelState::Absent; unsafe { session.congress.cache.unborrow() } #[cfg(has_drtio)] - subkernel::clear_subkernels(io, _subkernel_mutex)?; + subkernel::clear_subkernels(io, subkernel_mutex)?; match stream { None => { @@ -675,7 +675,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, #[cfg(has_drtio)] &kern::SubkernelLoadRunRequest { id, destination: _, run } => { let succeeded = match subkernel::load( - io, aux_mutex, _subkernel_mutex, routing_table, id, run) { + io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, run) { Ok(()) => true, Err(e) => { error!("Error loading subkernel: {}", e); false } }; @@ -683,7 +683,7 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, } #[cfg(has_drtio)] &kern::SubkernelAwaitFinishRequest{ id, timeout } => { - let res = subkernel::await_finish(io, aux_mutex, _subkernel_mutex, routing_table, + let res = subkernel::await_finish(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, timeout); let status = match res { Ok(ref res) => { @@ -705,18 +705,18 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, } #[cfg(has_drtio)] &kern::SubkernelMsgSend { id, destination, count, tag, data } => { - subkernel::message_send(io, aux_mutex, _subkernel_mutex, routing_table, id, destination, count, tag, data)?; + subkernel::message_send(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, destination, count, tag, data)?; kern_acknowledge() } #[cfg(has_drtio)] &kern::SubkernelMsgRecvRequest { id, timeout, tags } => { - let message_received = subkernel::message_await(io, _subkernel_mutex, id as u32, timeout); + let message_received = subkernel::message_await(io, subkernel_mutex, id as u32, timeout); let (status, count) = match message_received { Ok(ref message) => (kern::SubkernelStatus::NoError, message.count), Err(SubkernelError::Timeout) => (kern::SubkernelStatus::Timeout, 0), Err(SubkernelError::IncorrectState) => (kern::SubkernelStatus::IncorrectState, 0), Err(SubkernelError::SubkernelFinished) => { - let res = subkernel::retrieve_finish_status(io, aux_mutex, _subkernel_mutex, + let res = subkernel::retrieve_finish_status(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id as u32)?; if res.comm_lost { (kern::SubkernelStatus::CommLost, 0) @@ -847,7 +847,7 @@ fn flash_kernel_worker(io: &Io, aux_mutex: &Mutex, match result { Ok(kernel) => { // process .ELF or .TAR kernels - let res = process_flash_kernel(io, aux_mutex, subkernel_mutex, routing_table, up_destinations, &mut session, kernel); + let res = process_flash_kernel(io, aux_mutex, subkernel_mutex, ddma_mutex, routing_table, up_destinations, &mut session, kernel); #[cfg(has_drtio)] match res { // wait to establish the DRTIO connection From 24fe885b5cb6ea40e891a3c5b1cc1174d07c3823 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Fri, 26 Apr 2024 23:33:43 +0800 Subject: [PATCH 226/296] flake: update dependencies --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index d0085e11f..415e4cea2 100644 --- a/flake.lock +++ b/flake.lock @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1711668574, - "narHash": "sha256-u1dfs0ASQIEr1icTVrsKwg2xToIpn7ZXxW3RHfHxshg=", + "lastModified": 1713995372, + "narHash": "sha256-fFE3M0vCoiSwCX02z8VF58jXFRj9enYUSTqjyHAjrds=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "219951b495fc2eac67b1456824cc1ec1fd2ee659", + "rev": "dd37924974b9202f8226ed5d74a252a9785aedf8", "type": "github" }, "original": { From 5fe47129ed2280cde16e10595568e47be548b12a Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 30 Apr 2024 16:41:17 +0800 Subject: [PATCH 227/296] fix missing get_dataset_metadata --- artiq/frontend/artiq_master.py | 1 + artiq/master/worker_impl.py | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/artiq/frontend/artiq_master.py b/artiq/frontend/artiq_master.py index dfd22565b..26debb1a2 100755 --- a/artiq/frontend/artiq_master.py +++ b/artiq/frontend/artiq_master.py @@ -120,6 +120,7 @@ def main(): "get_device_db": device_db.get_device_db, "get_device": device_db.get, "get_dataset": dataset_db.get, + "get_dataset_metadata": dataset_db.get_metadata, "update_dataset": dataset_db.update, "get_interactive_arguments": get_interactive_arguments, "scheduler_submit": scheduler.submit, diff --git a/artiq/master/worker_impl.py b/artiq/master/worker_impl.py index bf0bfd587..9f928b4d0 100644 --- a/artiq/master/worker_impl.py +++ b/artiq/master/worker_impl.py @@ -73,6 +73,7 @@ class ParentDeviceDB: class ParentDatasetDB: get = make_parent_action("get_dataset") update = make_parent_action("update_dataset") + get_metadata = make_parent_action("get_dataset_metadata") class Watchdog: @@ -186,6 +187,10 @@ class ExamineDatasetMgr: def get(key, archive=False): return ParentDatasetDB.get(key) + @staticmethod + def get_metadata(key): + return ParentDatasetDB.get_metadata(key) + def examine(device_mgr, dataset_mgr, file): previous_keys = set(sys.modules.keys()) From 193962f31e1e4fc942f0248e0baf160cec4c8da7 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Mon, 6 May 2024 12:44:45 +0800 Subject: [PATCH 228/296] flake: update to nixpkgs unstable (soon to be 24.05) --- flake.lock | 8 ++++---- flake.nix | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/flake.lock b/flake.lock index 415e4cea2..3e41fe5bc 100644 --- a/flake.lock +++ b/flake.lock @@ -60,16 +60,16 @@ }, "nixpkgs": { "locked": { - "lastModified": 1713995372, - "narHash": "sha256-fFE3M0vCoiSwCX02z8VF58jXFRj9enYUSTqjyHAjrds=", + "lastModified": 1714906307, + "narHash": "sha256-UlRZtrCnhPFSJlDQE7M0eyhgvuuHBTe1eJ9N9AQlJQ0=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "dd37924974b9202f8226ed5d74a252a9785aedf8", + "rev": "25865a40d14b3f9cf19f19b924e2ab4069b09588", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.11", + "ref": "nixos-unstable", "repo": "nixpkgs", "type": "github" } diff --git a/flake.nix b/flake.nix index 666d86659..1b2015c2d 100644 --- a/flake.nix +++ b/flake.nix @@ -1,7 +1,7 @@ { description = "A leading-edge control system for quantum information experiments"; - inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-23.11; + inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-unstable; inputs.mozilla-overlay = { url = github:mozilla/nixpkgs-mozilla; flake = false; }; inputs.sipyco.url = github:m-labs/sipyco; inputs.sipyco.inputs.nixpkgs.follows = "nixpkgs"; From a8157cd5c9c8ee8cb6daa34052d300fc30583948 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Tue, 7 May 2024 10:40:52 +0800 Subject: [PATCH 229/296] enable dynamic address configuration in Kasli I2C EEPROM Signed-off-by: Florian Agbuya --- artiq/coredevice/kasli_i2c.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/artiq/coredevice/kasli_i2c.py b/artiq/coredevice/kasli_i2c.py index 1e031fa2b..c332bb5ce 100644 --- a/artiq/coredevice/kasli_i2c.py +++ b/artiq/coredevice/kasli_i2c.py @@ -25,14 +25,14 @@ port_mapping = { class KasliEEPROM: - def __init__(self, dmgr, port, busno=0, + def __init__(self, dmgr, port, address=0xa0, busno=0, core_device="core", sw0_device="i2c_switch0", sw1_device="i2c_switch1"): self.core = dmgr.get(core_device) self.sw0 = dmgr.get(sw0_device) self.sw1 = dmgr.get(sw1_device) self.busno = busno self.port = port_mapping[port] - self.address = 0xa0 # i2c 8 bit + self.address = address # i2c 8 bit @kernel def select(self): From 7dff78e849ceab4992df521875f66b5917a7a8ab Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Tue, 7 May 2024 15:07:37 +0800 Subject: [PATCH 230/296] moninj: move _DDSModel constructor Signed-off-by: Simon Renblad --- artiq/dashboard/moninj.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index d7bb4eac9..6d5874858 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -211,13 +211,14 @@ class _DDSModel: class _DDSWidget(QtWidgets.QFrame): - def __init__(self, dm, title, bus_channel=0, channel=0, dds_model=None): + def __init__(self, dm, title, bus_channel, channel, + dds_type, ref_clk, cpld=None, pll=1, clk_div=0): self.dm = dm self.bus_channel = bus_channel self.channel = channel self.dds_name = title self.cur_frequency = 0 - self.dds_model = dds_model + self.dds_model = _DDSModel(dds_type, ref_clk, cpld, pll, clk_div) QtWidgets.QFrame.__init__(self) @@ -388,9 +389,8 @@ def setup_from_ddb(ddb): bus_channel = v["arguments"]["bus_channel"] channel = v["arguments"]["channel"] dds_sysclk = v["arguments"]["sysclk"] - model = _DDSModel(v["class"], dds_sysclk) widget = _WidgetDesc(k, comment, _DDSWidget, - (k, bus_channel, channel, model)) + (k, bus_channel, channel, v["class"], dds_sysclk)) description.add(widget) elif (v["module"] == "artiq.coredevice.ad9910" and v["class"] == "AD9910") or \ (v["module"] == "artiq.coredevice.ad9912" and v["class"] == "AD9912"): @@ -403,9 +403,9 @@ def setup_from_ddb(ddb): pll = v["arguments"]["pll_n"] refclk = ddb[dds_cpld]["arguments"]["refclk"] clk_div = v["arguments"].get("clk_div", 0) - model = _DDSModel(v["class"], refclk, dds_cpld, pll, clk_div) widget = _WidgetDesc(k, comment, _DDSWidget, - (k, bus_channel, channel, model)) + (k, bus_channel, channel, v["class"], + refclk, dds_cpld, pll, clk_div)) description.add(widget) elif (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53xx") or \ (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino"): From 16e4b616ca745df2f7530b50bf7dd6c37d57c389 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Sowi=C5=84ski?= Date: Mon, 13 May 2024 16:21:52 +0200 Subject: [PATCH 231/296] Updated EEM FMC Carrier support for v1.1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Mikołaj Sowiński --- artiq/firmware/satman/main.rs | 40 +++++++++++++++++++++++++++++------ artiq/frontend/artiq_flash.py | 15 +++++++++---- artiq/gateware/targets/efc.py | 6 +++++- 3 files changed, 50 insertions(+), 11 deletions(-) diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index ed1cd238c..ec0c781bb 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -669,6 +669,20 @@ pub extern fn main() -> i32 { let mut io_expander; #[cfg(soc_platform = "efc")] { + let p3v3_fmc_en_pin; + let vadj_fmc_en_pin; + + #[cfg(hw_rev = "v1.0")] + { + p3v3_fmc_en_pin = 0; + vadj_fmc_en_pin = 1; + } + #[cfg(hw_rev = "v1.1")] + { + p3v3_fmc_en_pin = 1; + vadj_fmc_en_pin = 7; + } + io_expander = board_misoc::io_expander::IoExpander::new().unwrap(); io_expander.init().expect("I2C I/O expander initialization failed"); @@ -676,10 +690,10 @@ pub extern fn main() -> i32 { io_expander.set_oe(0, 1 << 5 | 1 << 6 | 1 << 7).unwrap(); // Enable VADJ and P3V3_FMC - io_expander.set_oe(1, 1 << 0 | 1 << 1).unwrap(); + io_expander.set_oe(1, 1 << p3v3_fmc_en_pin | 1 << vadj_fmc_en_pin).unwrap(); - io_expander.set(1, 0, true); - io_expander.set(1, 1, true); + io_expander.set(1, p3v3_fmc_en_pin, true); + io_expander.set(1, vadj_fmc_en_pin, true); io_expander.service().unwrap(); } @@ -812,6 +826,20 @@ pub extern fn main() -> i32 { #[cfg(soc_platform = "efc")] fn enable_error_led() { + let p3v3_fmc_en_pin; + let vadj_fmc_en_pin; + + #[cfg(hw_rev = "v1.0")] + { + p3v3_fmc_en_pin = 0; + vadj_fmc_en_pin = 1; + } + #[cfg(hw_rev = "v1.1")] + { + p3v3_fmc_en_pin = 1; + vadj_fmc_en_pin = 7; + } + let mut io_expander = board_misoc::io_expander::IoExpander::new().unwrap(); // Keep LEDs enabled @@ -820,10 +848,10 @@ fn enable_error_led() { io_expander.set(0, 7, true); // Keep VADJ and P3V3_FMC enabled - io_expander.set_oe(1, 1 << 0 | 1 << 1).unwrap(); + io_expander.set_oe(1, 1 << p3v3_fmc_en_pin | 1 << vadj_fmc_en_pin).unwrap(); - io_expander.set(1, 0, true); - io_expander.set(1, 1, true); + io_expander.set(1, p3v3_fmc_en_pin, true); + io_expander.set(1, vadj_fmc_en_pin, true); io_expander.service().unwrap(); } diff --git a/artiq/frontend/artiq_flash.py b/artiq/frontend/artiq_flash.py index 6c2037ca1..9a36d6ce8 100755 --- a/artiq/frontend/artiq_flash.py +++ b/artiq/frontend/artiq_flash.py @@ -261,12 +261,19 @@ def main(): "storage": ("spi0", 0x440000), "firmware": ("spi0", 0x450000), }, - "efc": { + "efc1v0": { "programmer": partial(ProgrammerXC7, board="efc", proxy="bscan_spi_xc7a100t.bit"), "gateware": ("spi0", 0x000000), - "bootloader": ("spi0", 0x400000), - "storage": ("spi0", 0x440000), - "firmware": ("spi0", 0x450000), + "bootloader": ("spi0", 0x600000), + "storage": ("spi0", 0x640000), + "firmware": ("spi0", 0x650000), + }, + "efc1v1": { + "programmer": partial(ProgrammerXC7, board="efc", proxy="bscan_spi_xc7a200t.bit"), + "gateware": ("spi0", 0x000000), + "bootloader": ("spi0", 0x600000), + "storage": ("spi0", 0x640000), + "firmware": ("spi0", 0x650000), }, "kc705": { "programmer": partial(ProgrammerXC7, board="kc705", proxy="bscan_spi_xc7k325t.bit"), diff --git a/artiq/gateware/targets/efc.py b/artiq/gateware/targets/efc.py index ee26c5e0e..5410eafa3 100644 --- a/artiq/gateware/targets/efc.py +++ b/artiq/gateware/targets/efc.py @@ -29,9 +29,10 @@ class Satellite(BaseSoC, AMPSoC): } mem_map.update(BaseSoC.mem_map) - def __init__(self, gateware_identifier_str=None, **kwargs): + def __init__(self, gateware_identifier_str=None, hw_rev="v1.1", **kwargs): BaseSoC.__init__(self, cpu_type="vexriscv", + hw_rev=hw_rev, cpu_bus_width=64, sdram_controller_type="minicon", l2_size=128*1024, @@ -243,12 +244,15 @@ def main(): builder_args(parser) parser.set_defaults(output_dir="artiq_efc") parser.add_argument("-V", "--variant", default="shuttler") + parser.add_argument("--hw-rev", choices=["v1.0", "v1.1"], default="v1.1", + help="Hardware revision") parser.add_argument("--gateware-identifier-str", default=None, help="Override ROM identifier") args = parser.parse_args() argdict = dict() argdict["gateware_identifier_str"] = args.gateware_identifier_str + argdict["hw_rev"] = args.hw_rev soc = Satellite(**argdict) build_artiq_soc(soc, builder_argdict(args)) From 6d0821ecaf458ccb55e5d0530ebe68de675a3c76 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Tue, 14 May 2024 08:49:29 +0800 Subject: [PATCH 232/296] flake: update dependencies --- flake.lock | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/flake.lock b/flake.lock index 3e41fe5bc..2c8d3ee78 100644 --- a/flake.lock +++ b/flake.lock @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1714906307, - "narHash": "sha256-UlRZtrCnhPFSJlDQE7M0eyhgvuuHBTe1eJ9N9AQlJQ0=", + "lastModified": 1715534503, + "narHash": "sha256-5ZSVkFadZbFP1THataCaSf0JH2cAH3S29hU9rrxTEqk=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "25865a40d14b3f9cf19f19b924e2ab4069b09588", + "rev": "2057814051972fa1453ddfb0d98badbea9b83c06", "type": "github" }, "original": { @@ -108,11 +108,11 @@ "src-migen": { "flake": false, "locked": { - "lastModified": 1702942348, - "narHash": "sha256-gKIfHZxsv+jcgDFRW9mPqmwqbZXuRvXefkZcSFjOGHw=", + "lastModified": 1715484909, + "narHash": "sha256-4DCHBUBfc/VA+7NW2Hr0+JP4NnKPru2uVJyZjCCk0Ws=", "owner": "m-labs", "repo": "migen", - "rev": "50934ad10a87ade47219b796535978b9bdf24023", + "rev": "4790bb577681a8c3a8d226bc196a4e5deb39e4df", "type": "github" }, "original": { @@ -124,11 +124,11 @@ "src-misoc": { "flake": false, "locked": { - "lastModified": 1699352904, - "narHash": "sha256-SglyTmXOPv8jJOjwAjJrj/WhAkItQfUbvKfUqrynwRg=", + "lastModified": 1715647536, + "narHash": "sha256-q+USDcaKHABwW56Jzq8u94iGPWlyLXMyVt0j/Gyg+IE=", "ref": "refs/heads/master", - "rev": "a53859f2167c31ab5225b6c09f30cf05527b94f4", - "revCount": 2452, + "rev": "fea9de558c730bc394a5936094ae95bb9d6fa726", + "revCount": 2455, "submodules": true, "type": "git", "url": "https://github.com/m-labs/misoc.git" From c33c1df07fd73180f43694275f632dedbcec1a67 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 16 May 2024 15:44:25 +0800 Subject: [PATCH 233/296] remove cargo-xbuild, fix nix build --- artiq/firmware/Cargo.lock | 229 +++++++++++++++++++++++++-- artiq/firmware/bootloader/Cargo.toml | 19 ++- artiq/firmware/bootloader/Makefile | 2 - artiq/firmware/ksupport/Makefile | 2 - artiq/firmware/libboard_misoc/lib.rs | 1 + artiq/firmware/libeh/Cargo.toml | 3 +- artiq/firmware/runtime/Cargo.toml | 2 +- artiq/firmware/runtime/Makefile | 2 - artiq/firmware/satman/Makefile | 2 - flake.nix | 7 - 10 files changed, 233 insertions(+), 36 deletions(-) diff --git a/artiq/firmware/Cargo.lock b/artiq/firmware/Cargo.lock index 71ea74d8b..f129d0d28 100644 --- a/artiq/firmware/Cargo.lock +++ b/artiq/firmware/Cargo.lock @@ -1,10 +1,42 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. [[package]] -name = "aho-corasick" -version = "0.7.18" +name = "addr2line" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +dependencies = [ + "cpp_demangle", + "fallible-iterator", + "gimli", + "object", + "rustc-demangle", + "smallvec", +] + +[[package]] +name = "adler" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0453232ace82dee0dd0b4c87a59bd90f7b53b314f3e0f61fe2ee7c8a16482289" + +[[package]] +name = "aho-corasick" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" dependencies = [ "memchr", ] @@ -30,9 +62,9 @@ dependencies = [ [[package]] name = "bit_field" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4" +checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" [[package]] name = "bitflags" @@ -72,12 +104,26 @@ dependencies = [ name = "bootloader" version = "0.0.0" dependencies = [ + "addr2line", "board_misoc", "build_misoc", "byteorder", + "cc", + "compiler_builtins", "crc", + "dlmalloc", + "fortanix-sgx-abi", + "getopts", + "hashbrown", + "hermit-abi", + "libc 0.2.79", + "memchr", + "miniz_oxide 0.4.0", "riscv", + "rustc-demangle", "smoltcp", + "unicode-width", + "wasi", ] [[package]] @@ -98,9 +144,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "cc" -version = "1.0.70" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" +checksum = "ef611cc68ff783f18535d77ddd080185275713d852c4f5cbb6122c462a7a825c" [[package]] name = "cfg-if" @@ -120,6 +166,15 @@ version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3748f82c7d366a0b4950257d19db685d4958d2fa27c6d164a3f069fec42b748b" +[[package]] +name = "cpp_demangle" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeaa953eaad386a53111e47172c2fedba671e5684c8dd601a5f474f4f118710f" +dependencies = [ + "cfg-if 1.0.0", +] + [[package]] name = "crc" version = "1.8.1" @@ -129,12 +184,30 @@ dependencies = [ "build_const", ] +[[package]] +name = "crc32fast" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +dependencies = [ + "cfg-if 1.0.0", +] + [[package]] name = "cslice" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f8cb7306107e4b10e64994de6d3274bd08996a7c1322a27b86482392f96be0a" +[[package]] +name = "dlmalloc" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332570860c2edf2d57914987bf9e24835425f75825086b6ba7d1e6a3e4f1f254" +dependencies = [ + "libc 0.2.79", +] + [[package]] name = "dyld" version = "0.0.0" @@ -143,7 +216,6 @@ version = "0.0.0" name = "eh" version = "0.0.0" dependencies = [ - "compiler_builtins", "cslice", "libc 0.1.0", "unwind", @@ -166,12 +238,71 @@ dependencies = [ "synstructure", ] +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "flate2" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +dependencies = [ + "crc32fast", + "miniz_oxide 0.7.2", +] + +[[package]] +name = "fortanix-sgx-abi" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6" + [[package]] name = "fringe" version = "1.2.1" source = "git+https://git.m-labs.hk/M-Labs/libfringe.git?rev=3ecbe5#3ecbe53f7644b18ee46ebd5b2ca12c9cbceec43a" dependencies = [ - "libc 0.2.101", + "libc 0.2.79", +] + +[[package]] +name = "getopts" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "gimli" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" +dependencies = [ + "fallible-iterator", + "stable_deref_trait", +] + +[[package]] +name = "hashbrown" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00d63df3d41950fb462ed38308eea019113ad1508da725bbedcd0fa5a85ef5f7" +dependencies = [ + "ahash", +] + +[[package]] +name = "hermit-abi" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +dependencies = [ + "libc 0.2.79", ] [[package]] @@ -212,9 +343,9 @@ version = "0.1.0" [[package]] name = "libc" -version = "0.2.101" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" +checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" [[package]] name = "log" @@ -254,9 +385,37 @@ checksum = "0ca88d725a0a943b096803bd34e73a4437208b6077654cc4ecb2947a5f91618d" [[package]] name = "memchr" -version = "2.4.1" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" + +[[package]] +name = "miniz_oxide" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +dependencies = [ + "adler 0.2.3", +] + +[[package]] +name = "miniz_oxide" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +dependencies = [ + "adler 1.0.2", +] + +[[package]] +name = "object" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" +dependencies = [ + "flate2", + "wasmparser", +] [[package]] name = "proto_artiq" @@ -280,9 +439,9 @@ checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" [[package]] name = "regex" -version = "1.5.4" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" dependencies = [ "aho-corasick", "memchr", @@ -291,9 +450,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "riscv" @@ -342,6 +501,12 @@ dependencies = [ "unwind_backtrace", ] +[[package]] +name = "rustc-demangle" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" + [[package]] name = "rustc_version" version = "0.2.3" @@ -382,6 +547,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + [[package]] name = "smoltcp" version = "0.8.2" @@ -393,6 +564,12 @@ dependencies = [ "managed 0.8.0", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "syn" version = "0.11.11" @@ -433,6 +610,12 @@ dependencies = [ "log", ] +[[package]] +name = "unicode-width" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" + [[package]] name = "unicode-xid" version = "0.0.4" @@ -454,3 +637,15 @@ dependencies = [ "libc 0.1.0", "unwind", ] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasmparser" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32fddd575d477c6e9702484139cf9f23dcd554b06d185ed0f56c857dd3a47aa6" diff --git a/artiq/firmware/bootloader/Cargo.toml b/artiq/firmware/bootloader/Cargo.toml index ac0df1001..58cf68e33 100644 --- a/artiq/firmware/bootloader/Cargo.toml +++ b/artiq/firmware/bootloader/Cargo.toml @@ -13,8 +13,25 @@ path = "main.rs" build_misoc = { path = "../libbuild_misoc" } [dependencies] -byteorder = { version = "1.0", default-features = false } +byteorder = { version = "=1.4.3", default-features = false } crc = { version = "1.7", default-features = false } board_misoc = { path = "../libboard_misoc", features = ["uart_console", "smoltcp"] } smoltcp = { version = "0.8.2", default-features = false, features = ["medium-ethernet", "proto-ipv4", "proto-ipv6", "socket-tcp"] } riscv = { version = "0.6.0", features = ["inline-asm"] } + +# insanity required by using cargo build-std over xbuild with nix +[dev-dependencies] +getopts = "=0.2.21" +libc = "=0.2.79" +unicode-width = "=0.1.8" +addr2line = "=0.14.0" +memchr = "=2.3.4" +hashbrown = "=0.9.0" +miniz_oxide = "=0.4.0" +rustc-demangle = "=0.1.18" +hermit-abi = "=0.1.17" +dlmalloc = "=0.2.1" +wasi = "=0.9.0" +fortanix-sgx-abi = "=0.3.3" +cc = "=1.0.60" +compiler_builtins = "=0.1.39" \ No newline at end of file diff --git a/artiq/firmware/bootloader/Makefile b/artiq/firmware/bootloader/Makefile index 82baef3eb..032f6df8c 100644 --- a/artiq/firmware/bootloader/Makefile +++ b/artiq/firmware/bootloader/Makefile @@ -3,8 +3,6 @@ include $(MISOC_DIRECTORY)/software/common.mak RUSTFLAGS += -Cpanic=abort -export XBUILD_SYSROOT_PATH=$(BUILDINC_DIRECTORY)/../sysroot - all:: bootloader.bin .PHONY: $(RUSTOUT)/libbootloader.a diff --git a/artiq/firmware/ksupport/Makefile b/artiq/firmware/ksupport/Makefile index 7a7e26d85..ff73b4b65 100644 --- a/artiq/firmware/ksupport/Makefile +++ b/artiq/firmware/ksupport/Makefile @@ -14,8 +14,6 @@ LDFLAGS += --eh-frame-hdr \ RUSTFLAGS += -Cpanic=unwind -export XBUILD_SYSROOT_PATH=$(BUILDINC_DIRECTORY)/../sysroot - all:: ksupport.elf .PHONY: $(RUSTOUT)/libksupport.a diff --git a/artiq/firmware/libboard_misoc/lib.rs b/artiq/firmware/libboard_misoc/lib.rs index 7ba835dcf..4374a44f9 100644 --- a/artiq/firmware/libboard_misoc/lib.rs +++ b/artiq/firmware/libboard_misoc/lib.rs @@ -1,5 +1,6 @@ #![no_std] #![feature(llvm_asm)] +#![feature(asm)] extern crate byteorder; #[cfg(feature = "log")] diff --git a/artiq/firmware/libeh/Cargo.toml b/artiq/firmware/libeh/Cargo.toml index 6998fcd35..3d6837df0 100644 --- a/artiq/firmware/libeh/Cargo.toml +++ b/artiq/firmware/libeh/Cargo.toml @@ -10,5 +10,4 @@ path = "lib.rs" [dependencies] cslice = { version = "0.3" } libc = { path = "../libc" } -unwind = { path = "../libunwind" } -compiler_builtins = "=0.1.39" # A dependency of alloc, libeh doesn't need it +unwind = { path = "../libunwind" } \ No newline at end of file diff --git a/artiq/firmware/runtime/Cargo.toml b/artiq/firmware/runtime/Cargo.toml index af09ca646..d8dbe38d6 100644 --- a/artiq/firmware/runtime/Cargo.toml +++ b/artiq/firmware/runtime/Cargo.toml @@ -17,7 +17,7 @@ failure = { version = "0.1", default-features = false } failure_derive = { version = "0.1", default-features = false } byteorder = { version = "1.0", default-features = false } cslice = { version = "0.3" } -log = { version = "0.4", default-features = false } +log = { version = "=0.4.14", default-features = false } managed = { version = "^0.7.1", default-features = false, features = ["alloc", "map"] } dyld = { path = "../libdyld" } eh = { path = "../libeh" } diff --git a/artiq/firmware/runtime/Makefile b/artiq/firmware/runtime/Makefile index 2508b66b4..17b587f88 100644 --- a/artiq/firmware/runtime/Makefile +++ b/artiq/firmware/runtime/Makefile @@ -10,8 +10,6 @@ LDFLAGS += \ RUSTFLAGS += -Cpanic=unwind -export XBUILD_SYSROOT_PATH=$(BUILDINC_DIRECTORY)/../sysroot - all:: runtime.bin runtime.fbi .PHONY: $(RUSTOUT)/libruntime.a diff --git a/artiq/firmware/satman/Makefile b/artiq/firmware/satman/Makefile index a7aab9e21..fdd867288 100644 --- a/artiq/firmware/satman/Makefile +++ b/artiq/firmware/satman/Makefile @@ -10,8 +10,6 @@ LDFLAGS += \ RUSTFLAGS += -Cpanic=unwind -export XBUILD_SYSROOT_PATH=$(BUILDINC_DIRECTORY)/../sysroot - all:: satman.bin satman.fbi .PHONY: $(RUSTOUT)/libsatman.a diff --git a/flake.nix b/flake.nix index 1b2015c2d..a5816e931 100644 --- a/flake.nix +++ b/flake.nix @@ -43,10 +43,6 @@ cargo = rust; }); - cargo-xbuild = pkgs.cargo-xbuild.overrideAttrs(oa: { - postPatch = "substituteInPlace src/sysroot.rs --replace 2021 2018"; - }); - vivadoDeps = pkgs: with pkgs; let # Apply patch from https://github.com/nix-community/nix-environments/pull/54 # to fix ncurses libtinfo.so's soname issue @@ -250,7 +246,6 @@ nativeBuildInputs = [ (pkgs.python3.withPackages(ps: [ migen misoc (artiq.withExperimentalFeatures experimentalFeatures) ps.packaging ])) rust - cargo-xbuild pkgs.llvmPackages_14.clang-unwrapped pkgs.llvm_14 pkgs.lld_14 @@ -425,7 +420,6 @@ buildInputs = [ (pkgs.python3.withPackages(ps: with packages.x86_64-linux; [ migen misoc ps.paramiko microscope ps.packaging ] ++ artiq.propagatedBuildInputs )) rust - cargo-xbuild pkgs.llvmPackages_14.clang-unwrapped pkgs.llvm_14 pkgs.lld_14 @@ -456,7 +450,6 @@ buildInputs = [ (pkgs.python3.withPackages(ps: with packages.x86_64-linux; [ migen misoc artiq ps.packaging ])) rust - cargo-xbuild pkgs.llvmPackages_14.clang-unwrapped pkgs.llvm_14 pkgs.lld_14 From 688e643078e59416a93c92cd53952e3ab6b9bb8d Mon Sep 17 00:00:00 2001 From: mwojcik Date: Fri, 3 May 2024 15:19:20 +0800 Subject: [PATCH 234/296] firmware: update rust to 2021-09-01 nightly --- artiq/firmware/Cargo.lock | 107 +++++++++++------- artiq/firmware/bootloader/Cargo.toml | 23 ++-- artiq/firmware/ksupport/eh_artiq.rs | 27 ++--- artiq/firmware/ksupport/lib.rs | 56 +++------ artiq/firmware/libboard_artiq/lib.rs | 2 +- artiq/firmware/libboard_artiq/mailbox.rs | 6 +- artiq/firmware/libboard_misoc/lib.rs | 1 - artiq/firmware/libboard_misoc/riscv32/boot.rs | 25 ++-- .../firmware/libboard_misoc/riscv32/cache.rs | 22 ++-- artiq/firmware/libeh/lib.rs | 2 +- artiq/firmware/libunwind/src/lib.rs | 2 +- artiq/firmware/libunwind/src/libunwind.rs | 20 +++- artiq/firmware/riscv32g-unknown-none-elf.json | 16 +-- .../firmware/riscv32ima-unknown-none-elf.json | 16 +-- artiq/firmware/runtime/rtio_mgt.rs | 32 +++--- artiq/firmware/runtime/session.rs | 9 +- artiq/firmware/satman/kernel.rs | 16 +-- artiq/firmware/satman/main.rs | 2 +- artiq/test/libartiq_support/lib.rs | 2 +- flake.nix | 4 +- 20 files changed, 189 insertions(+), 201 deletions(-) diff --git a/artiq/firmware/Cargo.lock b/artiq/firmware/Cargo.lock index f129d0d28..5bfb45676 100644 --- a/artiq/firmware/Cargo.lock +++ b/artiq/firmware/Cargo.lock @@ -1,10 +1,13 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +# And yet, manual edits have been made. Crate yanking should be illegal. +version = 3 + [[package]] name = "addr2line" -version = "0.14.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" dependencies = [ "cpp_demangle", "fallible-iterator", @@ -28,15 +31,20 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.4.8" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0453232ace82dee0dd0b4c87a59bd90f7b53b314f3e0f61fe2ee7c8a16482289" +checksum = "efa60d2eadd8b12a996add391db32bd1153eac697ba4869660c0016353611426" +dependencies = [ + "getrandom", + "once_cell", + "version_check", +] [[package]] name = "aho-corasick" -version = "0.7.15" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -105,6 +113,7 @@ name = "bootloader" version = "0.0.0" dependencies = [ "addr2line", + "ahash", "board_misoc", "build_misoc", "byteorder", @@ -114,15 +123,18 @@ dependencies = [ "dlmalloc", "fortanix-sgx-abi", "getopts", + "getrandom", "hashbrown", "hermit-abi", - "libc 0.2.79", - "memchr", + "libc 0.2.99", "miniz_oxide 0.4.0", + "object", + "once_cell", "riscv", "rustc-demangle", "smoltcp", "unicode-width", + "version_check", "wasi", ] @@ -144,9 +156,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "cc" -version = "1.0.60" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef611cc68ff783f18535d77ddd080185275713d852c4f5cbb6122c462a7a825c" +checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" [[package]] name = "cfg-if" @@ -162,9 +174,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "compiler_builtins" -version = "0.1.39" +version = "0.1.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3748f82c7d366a0b4950257d19db685d4958d2fa27c6d164a3f069fec42b748b" +checksum = "20b1438ef42c655665a8ab2c1c6d605a305f031d38d9be689ddfef41a20f3aa2" [[package]] name = "cpp_demangle" @@ -205,7 +217,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "332570860c2edf2d57914987bf9e24835425f75825086b6ba7d1e6a3e4f1f254" dependencies = [ - "libc 0.2.79", + "libc 0.2.99", ] [[package]] @@ -265,7 +277,7 @@ name = "fringe" version = "1.2.1" source = "git+https://git.m-labs.hk/M-Labs/libfringe.git?rev=3ecbe5#3ecbe53f7644b18ee46ebd5b2ca12c9cbceec43a" dependencies = [ - "libc 0.2.79", + "libc 0.2.99", ] [[package]] @@ -278,10 +290,21 @@ dependencies = [ ] [[package]] -name = "gimli" -version = "0.23.0" +name = "getrandom" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" +checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +dependencies = [ + "cfg-if 0.1.10", + "libc 0.2.99", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" dependencies = [ "fallible-iterator", "stable_deref_trait", @@ -289,20 +312,20 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.9.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00d63df3d41950fb462ed38308eea019113ad1508da725bbedcd0fa5a85ef5f7" +checksum = "362385356d610bd1e5a408ddf8d022041774b683f345a1d2cfcb4f60f8ae2db5" dependencies = [ "ahash", ] [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ - "libc 0.2.79", + "libc 0.2.99", ] [[package]] @@ -343,9 +366,9 @@ version = "0.1.0" [[package]] name = "libc" -version = "0.2.79" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" +checksum = "a7f823d141fe0a24df1e23b4af4e3c7ba9e5966ec514ea068c93024aa7deb765" [[package]] name = "log" @@ -385,9 +408,9 @@ checksum = "0ca88d725a0a943b096803bd34e73a4437208b6077654cc4ecb2947a5f91618d" [[package]] name = "memchr" -version = "2.3.4" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "miniz_oxide" @@ -409,14 +432,20 @@ dependencies = [ [[package]] name = "object" -version = "0.22.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" +checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2" dependencies = [ "flate2", - "wasmparser", + "memchr", ] +[[package]] +name = "once_cell" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" + [[package]] name = "proto_artiq" version = "0.0.0" @@ -439,9 +468,9 @@ checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" [[package]] name = "regex" -version = "1.4.6" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -503,9 +532,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.18" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc_version" @@ -638,14 +667,14 @@ dependencies = [ "unwind", ] +[[package]] +name = "version_check" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasmparser" -version = "0.57.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32fddd575d477c6e9702484139cf9f23dcd554b06d185ed0f56c857dd3a47aa6" diff --git a/artiq/firmware/bootloader/Cargo.toml b/artiq/firmware/bootloader/Cargo.toml index 58cf68e33..911864e30 100644 --- a/artiq/firmware/bootloader/Cargo.toml +++ b/artiq/firmware/bootloader/Cargo.toml @@ -20,18 +20,23 @@ smoltcp = { version = "0.8.2", default-features = false, features = ["medium-eth riscv = { version = "0.6.0", features = ["inline-asm"] } # insanity required by using cargo build-std over xbuild with nix +# cargo update does not work thanks to ahash 0.7 problems [dev-dependencies] getopts = "=0.2.21" -libc = "=0.2.79" +libc = "=0.2.99" unicode-width = "=0.1.8" -addr2line = "=0.14.0" -memchr = "=2.3.4" -hashbrown = "=0.9.0" +addr2line = "=0.16.0" +hashbrown = "=0.11.0" # must be injected into lockfile manually +ahash = "=0.7.0" # must be injected into lockfile manually miniz_oxide = "=0.4.0" -rustc-demangle = "=0.1.18" -hermit-abi = "=0.1.17" +rustc-demangle = "=0.1.21" +hermit-abi = "=0.1.19" dlmalloc = "=0.2.1" -wasi = "=0.9.0" fortanix-sgx-abi = "=0.3.3" -cc = "=1.0.60" -compiler_builtins = "=0.1.39" \ No newline at end of file +cc = "=1.0.69" +compiler_builtins = "=0.1.49" +version_check = "=0.9.3" +once_cell = "=1.8.0" +wasi = "=0.9.0" +getrandom = "=0.2.0" +object = "=0.26.2" \ No newline at end of file diff --git a/artiq/firmware/ksupport/eh_artiq.rs b/artiq/firmware/ksupport/eh_artiq.rs index 80d999304..04c6e723e 100644 --- a/artiq/firmware/ksupport/eh_artiq.rs +++ b/artiq/firmware/ksupport/eh_artiq.rs @@ -55,12 +55,14 @@ struct ExceptionBuffer { exception_count: usize, } +const EXCEPTION: uw::_Unwind_Exception = uw::_Unwind_Exception { + exception_class: EXCEPTION_CLASS, + exception_cleanup: cleanup, + private: [0; uw::unwinder_private_data_size], +}; + static mut EXCEPTION_BUFFER: ExceptionBuffer = ExceptionBuffer { - uw_exceptions: [uw::_Unwind_Exception { - exception_class: EXCEPTION_CLASS, - exception_cleanup: cleanup, - private: [0; uw::unwinder_private_data_size], - }; MAX_INFLIGHT_EXCEPTIONS], + uw_exceptions: [EXCEPTION; MAX_INFLIGHT_EXCEPTIONS], exceptions: [None; MAX_INFLIGHT_EXCEPTIONS + 1], exception_stack: [-1; MAX_INFLIGHT_EXCEPTIONS + 1], backtrace: [(0, 0); MAX_BACKTRACE_SIZE], @@ -74,11 +76,7 @@ static mut EXCEPTION_BUFFER: ExceptionBuffer = ExceptionBuffer { }; pub unsafe extern fn reset_exception_buffer(payload_addr: usize) { - EXCEPTION_BUFFER.uw_exceptions = [uw::_Unwind_Exception { - exception_class: EXCEPTION_CLASS, - exception_cleanup: cleanup, - private: [0; uw::unwinder_private_data_size], - }; MAX_INFLIGHT_EXCEPTIONS]; + EXCEPTION_BUFFER.uw_exceptions = [EXCEPTION; MAX_INFLIGHT_EXCEPTIONS]; EXCEPTION_BUFFER.exceptions = [None; MAX_INFLIGHT_EXCEPTIONS + 1]; EXCEPTION_BUFFER.exception_stack = [-1; MAX_INFLIGHT_EXCEPTIONS + 1]; EXCEPTION_BUFFER.backtrace_size = 0; @@ -151,8 +149,7 @@ pub extern fn personality(version: c_int, } #[export_name="__artiq_raise"] -#[unwind(allowed)] -pub unsafe extern fn raise(exception: *const Exception) -> ! { +pub unsafe extern "C-unwind" fn raise(exception: *const Exception) -> ! { let count = EXCEPTION_BUFFER.exception_count; let stack = &mut EXCEPTION_BUFFER.exception_stack; let diff = exception as isize - EXCEPTION_BUFFER.exceptions.as_ptr() as isize; @@ -222,8 +219,7 @@ pub unsafe extern fn raise(exception: *const Exception) -> ! { #[export_name="__artiq_resume"] -#[unwind(allowed)] -pub unsafe extern fn resume() -> ! { +pub unsafe extern "C-unwind" fn resume() -> ! { assert!(EXCEPTION_BUFFER.exception_count != 0); let i = EXCEPTION_BUFFER.exception_stack[EXCEPTION_BUFFER.exception_count - 1]; assert!(i != -1); @@ -233,8 +229,7 @@ pub unsafe extern fn resume() -> ! { } #[export_name="__artiq_end_catch"] -#[unwind(allowed)] -pub unsafe extern fn end_catch() { +pub unsafe extern "C-unwind" fn end_catch() { let mut count = EXCEPTION_BUFFER.exception_count; assert!(count != 0); // we remove all exceptions with SP <= current exception SP diff --git a/artiq/firmware/ksupport/lib.rs b/artiq/firmware/ksupport/lib.rs index 0bbb40df1..42f9de3e5 100644 --- a/artiq/firmware/ksupport/lib.rs +++ b/artiq/firmware/ksupport/lib.rs @@ -1,5 +1,5 @@ -#![feature(lang_items, llvm_asm, panic_unwind, libc, unwind_attributes, - panic_info_message, nll, const_in_array_repeat_expressions)] +#![feature(lang_items, asm, panic_unwind, libc, + panic_info_message, nll, c_unwind)] #![no_std] extern crate libc; @@ -30,8 +30,9 @@ fn send(request: &Message) { } fn recv R>(f: F) -> R { - while mailbox::receive() == 0 {} - let result = f(unsafe { &*(mailbox::receive() as *const Message) }); + let mut msg_ptr = 0; + while msg_ptr == 0 { msg_ptr = mailbox::receive(); } + let result = f(unsafe { &*(msg_ptr as *const Message) }); mailbox::acknowledge(); result } @@ -121,7 +122,6 @@ pub extern fn send_to_rtio_log(text: CSlice) { rtio::log(text.as_ref()) } -#[unwind(aborts)] extern fn rpc_send(service: u32, tag: &CSlice, data: *const *const ()) { while !rpc_queue::empty() {} send(&RpcSend { @@ -132,7 +132,6 @@ extern fn rpc_send(service: u32, tag: &CSlice, data: *const *const ()) { }) } -#[unwind(aborts)] extern fn rpc_send_async(service: u32, tag: &CSlice, data: *const *const ()) { while rpc_queue::full() {} rpc_queue::enqueue(|mut slice| { @@ -170,8 +169,7 @@ extern fn rpc_send_async(service: u32, tag: &CSlice, data: *const *const ()) /// to the maximum required for any of the possible types according to the target ABI). /// /// If the RPC call resulted in an exception, it is reconstructed and raised. -#[unwind(allowed)] -extern fn rpc_recv(slot: *mut ()) -> usize { +extern "C-unwind" fn rpc_recv(slot: *mut ()) -> usize { send(&RpcRecvRequest(slot)); recv!(&RpcRecvReply(ref result) => { match result { @@ -203,7 +201,6 @@ fn terminate(exceptions: &'static [Option>], loop {} } -#[unwind(aborts)] extern fn cache_get<'a>(key: &CSlice) -> *const CSlice<'a, i32> { send(&CacheGetRequest { key: str::from_utf8(key.as_ref()).unwrap() @@ -213,8 +210,7 @@ extern fn cache_get<'a>(key: &CSlice) -> *const CSlice<'a, i32> { }) } -#[unwind(allowed)] -extern fn cache_put(key: &CSlice, list: &CSlice) { +extern "C-unwind" fn cache_put(key: &CSlice, list: &CSlice) { send(&CachePutRequest { key: str::from_utf8(key.as_ref()).unwrap(), value: list.as_ref() @@ -247,8 +243,7 @@ fn dma_record_flush() { } } -#[unwind(allowed)] -extern fn dma_record_start(name: &CSlice) { +extern "C-unwind" fn dma_record_start(name: &CSlice) { let name = str::from_utf8(name.as_ref()).unwrap(); unsafe { @@ -267,8 +262,7 @@ extern fn dma_record_start(name: &CSlice) { } } -#[unwind(allowed)] -extern fn dma_record_stop(duration: i64, enable_ddma: bool) { +extern "C-unwind" fn dma_record_stop(duration: i64, enable_ddma: bool) { unsafe { dma_record_flush(); @@ -290,7 +284,6 @@ extern fn dma_record_stop(duration: i64, enable_ddma: bool) { } } -#[unwind(aborts)] #[inline(always)] unsafe fn dma_record_output_prepare(timestamp: i64, target: i32, words: usize) -> &'static mut [u8] { @@ -327,7 +320,6 @@ unsafe fn dma_record_output_prepare(timestamp: i64, target: i32, data } -#[unwind(aborts)] extern fn dma_record_output(target: i32, word: i32) { unsafe { let timestamp = ((csr::rtio::now_hi_read() as i64) << 32) | (csr::rtio::now_lo_read() as i64); @@ -341,7 +333,6 @@ extern fn dma_record_output(target: i32, word: i32) { } } -#[unwind(aborts)] extern fn dma_record_output_wide(target: i32, words: &CSlice) { assert!(words.len() <= 16); // enforce the hardware limit @@ -360,7 +351,6 @@ extern fn dma_record_output_wide(target: i32, words: &CSlice) { } } -#[unwind(aborts)] extern fn dma_erase(name: &CSlice) { let name = str::from_utf8(name.as_ref()).unwrap(); @@ -374,8 +364,7 @@ struct DmaTrace { uses_ddma: bool, } -#[unwind(allowed)] -extern fn dma_retrieve(name: &CSlice) -> DmaTrace { +extern "C-unwind" fn dma_retrieve(name: &CSlice) -> DmaTrace { let name = str::from_utf8(name.as_ref()).unwrap(); send(&DmaRetrieveRequest { name: name }); @@ -396,8 +385,7 @@ extern fn dma_retrieve(name: &CSlice) -> DmaTrace { } #[cfg(kernel_has_rtio_dma)] -#[unwind(allowed)] -extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { +extern "C-unwind" fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { assert!(ptr % 64 == 0); unsafe { @@ -454,15 +442,13 @@ extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { } #[cfg(all(not(kernel_has_rtio_dma), not(has_rtio_dma)))] -#[unwind(allowed)] -extern fn dma_playback(_timestamp: i64, _ptr: i32, _uses_ddma: bool) { +extern "C-unwind" fn dma_playback(_timestamp: i64, _ptr: i32, _uses_ddma: bool) { unimplemented!("not(kernel_has_rtio_dma)") } // for satellite (has_rtio_dma but not in kernel) #[cfg(all(not(kernel_has_rtio_dma), has_rtio_dma))] -#[unwind(allowed)] -extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { +extern "C-unwind" fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { // DDMA is always used on satellites, so the `uses_ddma` setting is ignored // StartRemoteRequest reused as "normal" start request send(&DmaStartRemoteRequest { id: ptr as i32, timestamp: timestamp }); @@ -486,8 +472,7 @@ extern fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) { } -#[unwind(allowed)] -extern fn subkernel_load_run(id: u32, destination: u8, run: bool) { +extern "C-unwind" fn subkernel_load_run(id: u32, destination: u8, run: bool) { send(&SubkernelLoadRunRequest { id: id, destination: destination, run: run }); recv!(&SubkernelLoadRunReply { succeeded } => { if !succeeded { @@ -497,8 +482,7 @@ extern fn subkernel_load_run(id: u32, destination: u8, run: bool) { }); } -#[unwind(allowed)] -extern fn subkernel_await_finish(id: u32, timeout: i64) { +extern "C-unwind" fn subkernel_await_finish(id: u32, timeout: i64) { send(&SubkernelAwaitFinishRequest { id: id, timeout: timeout }); recv!(SubkernelAwaitFinishReply { status } => { match status { @@ -515,7 +499,6 @@ extern fn subkernel_await_finish(id: u32, timeout: i64) { }) } -#[unwind(aborts)] extern fn subkernel_send_message(id: u32, is_return: bool, destination: u8, count: u8, tag: &CSlice, data: *const *const ()) { send(&SubkernelMsgSend { @@ -527,8 +510,7 @@ extern fn subkernel_send_message(id: u32, is_return: bool, destination: u8, }); } -#[unwind(allowed)] -extern fn subkernel_await_message(id: i32, timeout: i64, tags: &CSlice, min: u8, max: u8) -> u8 { +extern "C-unwind" fn subkernel_await_message(id: i32, timeout: i64, tags: &CSlice, min: u8, max: u8) -> u8 { send(&SubkernelMsgRecvRequest { id: id, timeout: timeout, tags: tags.as_ref() }); recv!(SubkernelMsgRecvReply { status, count } => { match status { @@ -653,8 +635,7 @@ pub unsafe fn main() { } #[no_mangle] -#[unwind(allowed)] -pub unsafe extern fn exception(_regs: *const u32) { +pub unsafe extern "C-unwind" fn exception(_regs: *const u32) { let pc = mepc::read(); let cause = mcause::read().cause(); let mtval = mtval::read(); @@ -672,7 +653,6 @@ pub unsafe extern fn exception(_regs: *const u32) { } #[no_mangle] -#[unwind(allowed)] -pub extern fn abort() { +pub extern "C-unwind" fn abort() { panic!("aborted") } diff --git a/artiq/firmware/libboard_artiq/lib.rs b/artiq/firmware/libboard_artiq/lib.rs index f564c98e4..e18dd1ddd 100644 --- a/artiq/firmware/libboard_artiq/lib.rs +++ b/artiq/firmware/libboard_artiq/lib.rs @@ -1,4 +1,4 @@ -#![feature(lang_items, never_type)] +#![feature(asm, lang_items, never_type)] #![no_std] extern crate failure; diff --git a/artiq/firmware/libboard_artiq/mailbox.rs b/artiq/firmware/libboard_artiq/mailbox.rs index 9c1f374f6..fd4ed7d67 100644 --- a/artiq/firmware/libboard_artiq/mailbox.rs +++ b/artiq/firmware/libboard_artiq/mailbox.rs @@ -6,7 +6,11 @@ static mut LAST: usize = 0; pub unsafe fn send(data: usize) { LAST = data; - write_volatile(MAILBOX, data) + // after Rust toolchain update to LLVM12, this empty asm! block is required + // to ensure that the compiler doesn't take any shortcuts + // otherwise, the comm CPU will read garbage data and crash + asm!("", options(preserves_flags, readonly, nostack)); + write_volatile(MAILBOX, data); } pub fn acknowledged() -> bool { diff --git a/artiq/firmware/libboard_misoc/lib.rs b/artiq/firmware/libboard_misoc/lib.rs index 4374a44f9..8f56e3f65 100644 --- a/artiq/firmware/libboard_misoc/lib.rs +++ b/artiq/firmware/libboard_misoc/lib.rs @@ -1,5 +1,4 @@ #![no_std] -#![feature(llvm_asm)] #![feature(asm)] extern crate byteorder; diff --git a/artiq/firmware/libboard_misoc/riscv32/boot.rs b/artiq/firmware/libboard_misoc/riscv32/boot.rs index 0d2254da1..1ef0bd44f 100644 --- a/artiq/firmware/libboard_misoc/riscv32/boot.rs +++ b/artiq/firmware/libboard_misoc/riscv32/boot.rs @@ -2,29 +2,26 @@ use super::{cache, pmp}; use riscv::register::*; pub unsafe fn reset() -> ! { - llvm_asm!(r#" - j _reset_handler - nop - "# : : : : "volatile"); - loop {} + asm!("j _reset_handler", + "nop", + options(nomem, nostack, noreturn) + ); } pub unsafe fn jump(addr: usize) -> ! { cache::flush_cpu_icache(); - llvm_asm!(r#" - jalr x0, 0($0) - nop - "# : : "r"(addr) : : "volatile"); - loop {} + asm!("jalr x0, 0({0})", + "nop", + in(reg) addr, + options(nomem, nostack, noreturn) + ); } pub unsafe fn start_user(addr: usize) -> ! { pmp::enable_user_memory(); mstatus::set_mpp(mstatus::MPP::User); mepc::write(addr); - llvm_asm!( - "mret" - : : : : "volatile" + asm!("mret", + options(nomem, nostack, noreturn) ); - unreachable!() } diff --git a/artiq/firmware/libboard_misoc/riscv32/cache.rs b/artiq/firmware/libboard_misoc/riscv32/cache.rs index 12fc9f3bb..ac0b4ed12 100644 --- a/artiq/firmware/libboard_misoc/riscv32/cache.rs +++ b/artiq/firmware/libboard_misoc/riscv32/cache.rs @@ -7,20 +7,24 @@ use mem; pub fn flush_cpu_icache() { unsafe { - llvm_asm!(r#" - fence.i - nop - nop - nop - nop - nop - "# : : : : "volatile"); + asm!( + "fence.i", + "nop", + "nop", + "nop", + "nop", + "nop", + options(preserves_flags, nostack) + ); } } pub fn flush_cpu_dcache() { unsafe { - llvm_asm!(".word(0x500F)" : : : : "volatile"); + asm!( + ".word(0x500F)", + options(preserves_flags) + ); } } diff --git a/artiq/firmware/libeh/lib.rs b/artiq/firmware/libeh/lib.rs index 4fdc4b880..dfdbf6034 100644 --- a/artiq/firmware/libeh/lib.rs +++ b/artiq/firmware/libeh/lib.rs @@ -1,4 +1,4 @@ -#![feature(lang_items, panic_unwind, libc, unwind_attributes, int_bits_const)] +#![feature(lang_items, panic_unwind, libc)] #![no_std] extern crate cslice; diff --git a/artiq/firmware/libunwind/src/lib.rs b/artiq/firmware/libunwind/src/lib.rs index a087a59ec..5567320c3 100644 --- a/artiq/firmware/libunwind/src/lib.rs +++ b/artiq/firmware/libunwind/src/lib.rs @@ -3,8 +3,8 @@ #![feature(link_cfg)] #![feature(nll)] #![feature(staged_api)] -#![feature(unwind_attributes)] #![feature(static_nobundle)] +#![feature(c_unwind)] #![cfg_attr(not(target_env = "msvc"), feature(libc))] mod libunwind; diff --git a/artiq/firmware/libunwind/src/libunwind.rs b/artiq/firmware/libunwind/src/libunwind.rs index 646bf912e..35a5ed51f 100644 --- a/artiq/firmware/libunwind/src/libunwind.rs +++ b/artiq/firmware/libunwind/src/libunwind.rs @@ -81,9 +81,14 @@ pub type _Unwind_Exception_Cleanup_Fn = all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")), link(name = "unwind", kind = "static") )] -extern "C" { - #[unwind(allowed)] +extern "C-unwind" { pub fn _Unwind_Resume(exception: *mut _Unwind_Exception) -> !; +} +#[cfg_attr( + all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")), + link(name = "unwind", kind = "static") +)] +extern "C" { pub fn _Unwind_DeleteException(exception: *mut _Unwind_Exception); pub fn _Unwind_GetLanguageSpecificData(ctx: *mut _Unwind_Context) -> *mut c_void; pub fn _Unwind_GetRegionStart(ctx: *mut _Unwind_Context) -> _Unwind_Ptr; @@ -230,9 +235,13 @@ if #[cfg(not(all(target_os = "ios", target_arch = "arm")))] { #[cfg_attr(all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")), link(name = "unwind", kind = "static"))] - extern "C" { - #[unwind(allowed)] + extern "C-unwind" { pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception) -> _Unwind_Reason_Code; + } + #[cfg_attr(all(feature = "llvm-libunwind", + any(target_os = "fuchsia", target_os = "linux")), + link(name = "unwind", kind = "static"))] + extern "C" { pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn, trace_argument: *mut c_void) -> _Unwind_Reason_Code; @@ -242,8 +251,7 @@ if #[cfg(not(all(target_os = "ios", target_arch = "arm")))] { #[cfg_attr(all(feature = "llvm-libunwind", any(target_os = "fuchsia", target_os = "linux")), link(name = "unwind", kind = "static"))] - extern "C" { - #[unwind(allowed)] + extern "C-unwind" { pub fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception) -> _Unwind_Reason_Code; } diff --git a/artiq/firmware/riscv32g-unknown-none-elf.json b/artiq/firmware/riscv32g-unknown-none-elf.json index 2a3fb8bfb..5e980f11f 100644 --- a/artiq/firmware/riscv32g-unknown-none-elf.json +++ b/artiq/firmware/riscv32g-unknown-none-elf.json @@ -21,20 +21,6 @@ }, "relro-level": "full", "target-family": "unix", - "target-pointer-width": "32", - "unsupported-abis": [ - "cdecl", - "stdcall", - "fastcall", - "vectorcall", - "thiscall", - "aapcs", - "win64", - "sysv64", - "ptx-kernel", - "msp430-interrupt", - "x86-interrupt", - "amdgpu-kernel" - ] + "target-pointer-width": "32" } \ No newline at end of file diff --git a/artiq/firmware/riscv32ima-unknown-none-elf.json b/artiq/firmware/riscv32ima-unknown-none-elf.json index e41408842..064347edb 100644 --- a/artiq/firmware/riscv32ima-unknown-none-elf.json +++ b/artiq/firmware/riscv32ima-unknown-none-elf.json @@ -13,19 +13,5 @@ "max-atomic-width": 32, "panic-strategy": "unwind", "relocation-model": "static", - "target-pointer-width": "32", - "unsupported-abis": [ - "cdecl", - "stdcall", - "fastcall", - "vectorcall", - "thiscall", - "aapcs", - "win64", - "sysv64", - "ptx-kernel", - "msp430-interrupt", - "x86-interrupt", - "amdgpu-kernel" - ] + "target-pointer-width": "32" } diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index f08737691..1bdec0dcb 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -97,25 +97,25 @@ pub mod drtio { } fn process_async_packets(io: &Io, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, - routing_table: &drtio_routing::RoutingTable, linkno: u8, packet: drtioaux::Packet - ) -> Option { + routing_table: &drtio_routing::RoutingTable, linkno: u8, packet: &drtioaux::Packet + ) -> bool { match packet { // packets to be consumed locally drtioaux::Packet::DmaPlaybackStatus { id, source, destination: 0, error, channel, timestamp } => { - remote_dma::playback_done(io, ddma_mutex, id, source, error, channel, timestamp); - None + remote_dma::playback_done(io, ddma_mutex, *id, *source, *error, *channel, *timestamp); + true }, drtioaux::Packet::SubkernelFinished { id, destination: 0, with_exception, exception_src } => { - subkernel::subkernel_finished(io, subkernel_mutex, id, with_exception, exception_src); - None + subkernel::subkernel_finished(io, subkernel_mutex, *id, *with_exception, *exception_src); + true }, drtioaux::Packet::SubkernelMessage { id, source: from, destination: 0, status, length, data } => { - subkernel::message_handle_incoming(io, subkernel_mutex, id, status, length as usize, &data); + subkernel::message_handle_incoming(io, subkernel_mutex, *id, *status, *length as usize, data); // acknowledge receiving part of the message drtioaux::send(linkno, - &drtioaux::Packet::SubkernelMessageAck { destination: from } + &drtioaux::Packet::SubkernelMessageAck { destination: *from } ).unwrap(); - None + true }, // (potentially) routable packets drtioaux::Packet::DmaAddTraceRequest { destination, .. } | @@ -130,19 +130,19 @@ pub mod drtio { drtioaux::Packet::SubkernelMessageAck { destination, .. } | drtioaux::Packet::DmaPlaybackStatus { destination, .. } | drtioaux::Packet::SubkernelFinished { destination, .. } => { - if destination == 0 { - Some(packet) + if *destination == 0 { + false } else { - let dest_link = routing_table.0[destination as usize][0] - 1; + let dest_link = routing_table.0[*destination as usize][0] - 1; if dest_link == linkno { warn!("[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}", linkno, packet); } else { drtioaux::send(dest_link, &packet).unwrap(); } - None + true } } - other => Some(other) + _ => false } } @@ -153,7 +153,7 @@ pub mod drtio { drtioaux::send(linkno, request).unwrap(); loop { let reply = recv_aux_timeout(io, linkno, 200)?; - if let Some(reply) = process_async_packets(io, ddma_mutex, subkernel_mutex, routing_table, linkno, reply) { + if !process_async_packets(io, ddma_mutex, subkernel_mutex, routing_table, linkno, &reply) { // returns none if it was an async packet return Ok(reply); } @@ -269,7 +269,7 @@ pub mod drtio { loop { match drtioaux::recv(linkno) { Ok(Some(packet)) => { - if let Some(packet) = process_async_packets(&io, ddma_mutex, subkernel_mutex, routing_table, linkno, packet) { + if !process_async_packets(&io, ddma_mutex, subkernel_mutex, routing_table, linkno, &packet) { warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet); } }, diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 388a74337..5478e33ee 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -243,12 +243,13 @@ pub fn kern_send(io: &Io, request: &kern::Message) -> Result<(), Error(io: &Io, f: F) -> Result> where F: FnOnce(&kern::Message) -> Result> { - io.until(|| mailbox::receive() != 0)?; - if !kernel::validate(mailbox::receive()) { - return Err(Error::InvalidPointer(mailbox::receive())) + let mut msg_ptr = 0; + io.until(|| { msg_ptr = mailbox::receive(); msg_ptr != 0 })?; + if !kernel::validate(msg_ptr) { + return Err(Error::InvalidPointer(msg_ptr)) } - f(unsafe { &*(mailbox::receive() as *const kern::Message) }) + f(unsafe { &*(msg_ptr as *const kern::Message) }) } fn kern_recv_dotrace(reply: &kern::Message) { diff --git a/artiq/firmware/satman/kernel.rs b/artiq/firmware/satman/kernel.rs index 250f6a263..b00861abb 100644 --- a/artiq/firmware/satman/kernel.rs +++ b/artiq/firmware/satman/kernel.rs @@ -1,4 +1,4 @@ -use core::{mem, option::NoneError}; +use core::mem; use alloc::{string::String, format, vec::Vec, collections::btree_map::BTreeMap}; use cslice::AsCSlice; @@ -85,12 +85,6 @@ pub enum Error { DmaError(DmaError), } -impl From for Error { - fn from(_: NoneError) -> Error { - Error::KernelNotFound - } -} - impl From> for Error { fn from(_value: io::Error) -> Error { Error::SubkernelIoError @@ -330,7 +324,7 @@ impl Manager { self.kernels.insert(id, KernelLibrary { library: Vec::new(), complete: false }); - self.kernels.get_mut(&id)? + self.kernels.get_mut(&id).unwrap() } else { kernel } @@ -339,7 +333,7 @@ impl Manager { self.kernels.insert(id, KernelLibrary { library: Vec::new(), complete: false }); - self.kernels.get_mut(&id)? + self.kernels.get_mut(&id).unwrap() }, }; kernel.library.extend(&data[0..data_len]); @@ -404,7 +398,7 @@ impl Manager { if self.current_id == id && self.session.kernel_state == KernelState::Loaded { return Ok(()) } - if !self.kernels.get(&id)?.complete { + if !self.kernels.get(&id).ok_or(Error::KernelNotFound)?.complete { return Err(Error::KernelNotFound) } self.current_id = id; @@ -414,7 +408,7 @@ impl Manager { unsafe { kernel_cpu::start(); - kern_send(&kern::LoadRequest(&self.kernels.get(&id)?.library)).unwrap(); + kern_send(&kern::LoadRequest(&self.kernels.get(&id).unwrap().library)).unwrap(); kern_recv(|reply| { match reply { kern::LoadReply(Ok(())) => { diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index ec0c781bb..e8e26b59e 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -1,4 +1,4 @@ -#![feature(never_type, panic_info_message, llvm_asm, default_alloc_error_handler, try_trait)] +#![feature(never_type, panic_info_message, asm, default_alloc_error_handler)] #![no_std] #[macro_use] diff --git a/artiq/test/libartiq_support/lib.rs b/artiq/test/libartiq_support/lib.rs index 4216c85ec..4a940fd70 100644 --- a/artiq/test/libartiq_support/lib.rs +++ b/artiq/test/libartiq_support/lib.rs @@ -1,4 +1,4 @@ -#![feature(libc, panic_unwind, unwind_attributes, rustc_private, int_bits_const, const_in_array_repeat_expressions)] +#![feature(libc, panic_unwind, rustc_private, c_unwind)] #![crate_name = "artiq_support"] #![crate_type = "cdylib"] diff --git a/flake.nix b/flake.nix index a5816e931..000962e3b 100644 --- a/flake.nix +++ b/flake.nix @@ -25,8 +25,8 @@ artiqRev = self.sourceInfo.rev or "unknown"; rustManifest = pkgs.fetchurl { - url = "https://static.rust-lang.org/dist/2021-01-29/channel-rust-nightly.toml"; - sha256 = "sha256-EZKgw89AH4vxaJpUHmIMzMW/80wAFQlfcxRoBD9nz0c="; + url = "https://static.rust-lang.org/dist/2021-09-01/channel-rust-nightly.toml"; + sha256 = "sha256-KYLZHfOkotnM6BZd7CU+vBA3w/VtiWxth3ngJlmA41U="; }; targets = []; From c5d656ba3251e1583bce8dc4aaf66fbcab0a96cd Mon Sep 17 00:00:00 2001 From: mwojcik Date: Mon, 13 May 2024 11:50:01 +0800 Subject: [PATCH 235/296] drtio: increase maximum payload size --- artiq/firmware/libproto_artiq/drtioaux_proto.rs | 2 +- artiq/firmware/runtime/rtio_mgt.rs | 2 +- artiq/firmware/runtime/session.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs index 2779b7207..a2e51ca65 100644 --- a/artiq/firmware/libproto_artiq/drtioaux_proto.rs +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -16,7 +16,7 @@ impl From> for Error { // maximum size of arbitrary payloads // used by satellite -> master analyzer, subkernel exceptions -pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2; +pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/1024 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2; // used by DDMA, subkernel program data (need to provide extra ID and destination) pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*source*/1 - /*destination*/1 - /*ID*/4; diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 1bdec0dcb..2d0cbad55 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -65,7 +65,7 @@ pub mod drtio { let up_destinations = up_destinations.clone(); let ddma_mutex = ddma_mutex.clone(); let subkernel_mutex = subkernel_mutex.clone(); - io.spawn(8192, move |io| { + io.spawn(16384, move |io| { let routing_table = routing_table.borrow(); link_thread(io, &aux_mutex, &routing_table, &up_destinations, &ddma_mutex, &subkernel_mutex); }); diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 5478e33ee..d972414bd 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -893,7 +893,7 @@ fn respawn(io: &Io, handle: &mut Option, f: F) } } - *handle = Some(io.spawn(24576, f)) + *handle = Some(io.spawn(32768, f)) } pub fn thread(io: Io, aux_mutex: &Mutex, From 531640fa9125bd8a689ed6eed8feea1ffce86a24 Mon Sep 17 00:00:00 2001 From: linuswck Date: Tue, 21 May 2024 17:54:00 +0800 Subject: [PATCH 236/296] kasli: Add Shuttler Support on Satellite --- artiq/firmware/satman/main.rs | 2 +- artiq/gateware/targets/kasli.py | 63 +++++++++++++++++++++++++++------ 2 files changed, 54 insertions(+), 11 deletions(-) diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index e8e26b59e..62494714e 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -698,7 +698,7 @@ pub extern fn main() -> i32 { io_expander.service().unwrap(); } - #[cfg(not(has_drtio_eem))] + #[cfg(not(soc_platform = "efc"))] unsafe { csr::gt_drtio::txenable_write(0xffffffffu32 as _); } diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py index cd1404322..5807f0868 100755 --- a/artiq/gateware/targets/kasli.py +++ b/artiq/gateware/targets/kasli.py @@ -455,15 +455,15 @@ class SatelliteBase(BaseSoC, AMPSoC): self.submodules.rtio_tsc = rtio.TSC(glbl_fine_ts_width=3) - drtioaux_csr_group = [] - drtioaux_memory_group = [] - drtiorep_csr_group = [] + self.drtioaux_csr_group = [] + self.drtioaux_memory_group = [] + self.drtiorep_csr_group = [] self.drtio_cri = [] for i in range(len(self.gt_drtio.channels)): coreaux_name = "drtioaux" + str(i) memory_name = "drtioaux" + str(i) + "_mem" - drtioaux_csr_group.append(coreaux_name) - drtioaux_memory_group.append(memory_name) + self.drtioaux_csr_group.append(coreaux_name) + self.drtioaux_memory_group.append(memory_name) cdr = ClockDomainsRenamer({"rtio_rx": "rtio_rx" + str(i)}) @@ -476,7 +476,7 @@ class SatelliteBase(BaseSoC, AMPSoC): self.csr_devices.append("drtiosat") else: corerep_name = "drtiorep" + str(i-1) - drtiorep_csr_group.append(corerep_name) + self.drtiorep_csr_group.append(corerep_name) core = cdr(DRTIORepeater( self.rtio_tsc, self.gt_drtio.channels[i])) @@ -496,9 +496,6 @@ class SatelliteBase(BaseSoC, AMPSoC): self.config["HAS_DRTIO"] = None self.config["HAS_DRTIO_ROUTING"] = None self.config["DRTIO_ROLE"] = "satellite" - self.add_csr_group("drtioaux", drtioaux_csr_group) - self.add_memory_group("drtioaux_mem", drtioaux_memory_group) - self.add_csr_group("drtiorep", drtiorep_csr_group) i2c = self.platform.request("i2c") self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) @@ -566,6 +563,45 @@ class SatelliteBase(BaseSoC, AMPSoC): self.get_native_sdram_if(), cpu_dw=self.cpu_dw) self.csr_devices.append("rtio_analyzer") + def add_eem_drtio(self, eem_drtio_channels): + # Must be called before invoking add_rtio() to construct the CRI + # interconnect properly + self.submodules.eem_transceiver = eem_serdes.EEMSerdes(self.platform, eem_drtio_channels) + self.csr_devices.append("eem_transceiver") + self.config["HAS_DRTIO_EEM"] = None + self.config["EEM_DRTIO_COUNT"] = len(eem_drtio_channels) + + cdr = ClockDomainsRenamer({"rtio_rx": "sys"}) + for i in range(len(self.eem_transceiver.channels)): + channel = i + len(self.gt_drtio.channels) + corerep_name = "drtiorep" + str(channel-1) + coreaux_name = "drtioaux" + str(channel) + memory_name = "drtioaux" + str(channel) + "_mem" + self.drtiorep_csr_group.append(corerep_name) + self.drtioaux_csr_group.append(coreaux_name) + self.drtioaux_memory_group.append(memory_name) + + core = cdr(DRTIORepeater( + self.rtio_tsc, self.eem_transceiver.channels[i])) + setattr(self.submodules, corerep_name, core) + self.drtio_cri.append(core.cri) + self.csr_devices.append(corerep_name) + + coreaux = cdr(DRTIOAuxController(core.link_layer, self.cpu_dw)) + setattr(self.submodules, coreaux_name, coreaux) + self.csr_devices.append(coreaux_name) + + drtio_aux_mem_size = 1024 * 16 # max_packet * 8 buffers * 2 (tx, rx halves) + memory_address = self.mem_map["drtioaux"] + drtio_aux_mem_size*channel + self.add_wb_slave(memory_address, drtio_aux_mem_size, + coreaux.bus) + self.add_memory_region(memory_name, memory_address | self.shadow_base, drtio_aux_mem_size) + + def add_drtio_cpuif_groups(self): + self.add_csr_group("drtiorep", self.drtiorep_csr_group) + self.add_csr_group("drtioaux", self.drtioaux_csr_group) + self.add_memory_group("drtioaux_mem", self.drtioaux_memory_group) + class GenericStandalone(StandaloneBase): def __init__(self, description, hw_rev=None,**kwargs): if hw_rev is None: @@ -673,15 +709,18 @@ class GenericSatellite(SatelliteBase): if hw_rev is None: hw_rev = description["hw_rev"] self.class_name_override = description["variant"] + has_drtio_over_eem = any(peripheral["type"] == "shuttler" for peripheral in description["peripherals"]) SatelliteBase.__init__(self, hw_rev=hw_rev, rtio_clk_freq=description["rtio_frequency"], enable_sata=description["enable_sata_drtio"], + enable_sys5x=has_drtio_over_eem, **kwargs) if hw_rev == "v1.0": # EEM clock fan-out from Si5324, not MMCX self.comb += self.platform.request("clk_sel").eq(1) - + if has_drtio_over_eem: + self.eem_drtio_channels = [] has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) if has_grabber: self.grabber_csr_group = [] @@ -699,6 +738,10 @@ class GenericSatellite(SatelliteBase): self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) self.rtio_channels.append(rtio.LogChannel()) + if has_drtio_over_eem: + self.add_eem_drtio(self.eem_drtio_channels) + self.add_drtio_cpuif_groups() + self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) if has_grabber: self.config["HAS_GRABBER"] = None From 4de3273e7a314c84e8cd7d83faa84a04237ad5e5 Mon Sep 17 00:00:00 2001 From: linuswck Date: Thu, 23 May 2024 13:29:09 +0800 Subject: [PATCH 237/296] sinara_tester: add Shuttler test --- artiq/frontend/artiq_sinara_tester.py | 133 ++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) diff --git a/artiq/frontend/artiq_sinara_tester.py b/artiq/frontend/artiq_sinara_tester.py index e2d48b903..ba6337cd0 100755 --- a/artiq/frontend/artiq_sinara_tester.py +++ b/artiq/frontend/artiq_sinara_tester.py @@ -9,6 +9,7 @@ import sys from artiq.experiment import * from artiq.coredevice.ad9910 import AD9910, SyncDataEeprom from artiq.coredevice.phaser import PHASER_GW_BASE, PHASER_GW_MIQRO +from artiq.coredevice.shuttler import shuttler_volt_to_mu from artiq.master.databases import DeviceDB from artiq.master.worker_db import DeviceManager @@ -61,6 +62,7 @@ class SinaraTester(EnvExperiment): self.suservos = dict() self.suschannels = dict() self.almaznys = dict() + self.shuttler = dict() ddb = self.get_device_db() for name, desc in ddb.items(): @@ -100,6 +102,17 @@ class SinaraTester(EnvExperiment): self.suschannels[name] = self.get_device(name) elif (module, cls) == ("artiq.coredevice.almazny", "AlmaznyLegacy"): self.almaznys[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.shuttler", "Config"): + shuttler_name = name.replace("_config", "") + self.shuttler[shuttler_name] = ({ + "config": self.get_device(name), + "trigger": self.get_device("{}_trigger".format(shuttler_name)), + "leds": [self.get_device("{}_led{}".format(shuttler_name, i)) for i in range(2)], + "dcbias": [self.get_device("{}_dcbias{}".format(shuttler_name, i)) for i in range(16)], + "dds": [self.get_device("{}_dds{}".format(shuttler_name, i)) for i in range(16)], + "relay": self.get_device("{}_relay".format(shuttler_name)), + "adc": self.get_device("{}_adc".format(shuttler_name)), + }) # Remove Urukul, Sampler, Zotino and Mirny control signals # from TTL outs (tested separately) and remove Urukuls covered by @@ -148,6 +161,7 @@ class SinaraTester(EnvExperiment): self.mirnies = sorted(self.mirnies.items(), key=lambda x: (x[1].cpld.bus.channel, x[1].channel)) self.suservos = sorted(self.suservos.items(), key=lambda x: x[1].channel) self.suschannels = sorted(self.suschannels.items(), key=lambda x: x[1].channel) + self.shuttler = sorted(self.shuttler.items(), key=lambda x: x[1]["leds"][0].channel) @kernel def test_led(self, led): @@ -747,6 +761,125 @@ class SinaraTester(EnvExperiment): print("Press ENTER when done.") input() + @kernel + def setup_shuttler_init(self, relay, adc, dcbias, dds, trigger, config): + self.core.break_realtime() + # Reset Shuttler Output Relay + relay.init() + delay_mu(int64(self.core.ref_multiplier)) + + relay.enable(0x0000) + delay_mu(int64(self.core.ref_multiplier)) + + # Setup ADC and and Calibration + delay_mu(int64(self.core.ref_multiplier)) + adc.power_up() + + delay_mu(int64(self.core.ref_multiplier)) + if adc.read_id() >> 4 != 0x038d: + print("Remote AFE Board's ADC is not found. Check Remote AFE Board's Cables Connections") + assert adc.read_id() >> 4 == 0x038d + + delay_mu(int64(self.core.ref_multiplier)) + adc.calibrate(dcbias, trigger, config) + + #Reset Shuttler DAC Output + for ch in range(16): + self.setup_shuttler_set_output(dcbias, dds, trigger, ch, 0.0) + + @kernel + def set_shuttler_relay(self, relay, val): + self.core.break_realtime() + relay.enable(val) + + @kernel + def get_shuttler_output_voltage(self, adc, ch, cb): + self.core.break_realtime() + cb(adc.read_ch(ch)) + + @kernel + def setup_shuttler_set_output(self, dcbias, dds, trigger, ch, volt): + self.core.break_realtime() + dcbias[ch].set_waveform( + a0=shuttler_volt_to_mu(volt), + a1=0, + a2=0, + a3=0, + ) + delay_mu(int64(self.core.ref_multiplier)) + + dds[ch].set_waveform( + b0=0, + b1=0, + b2=0, + b3=0, + c0=0, + c1=0, + c2=0, + ) + delay_mu(int64(self.core.ref_multiplier)) + + trigger.trigger(1 << ch) + delay_mu(int64(self.core.ref_multiplier)) + + @kernel + def shuttler_relay_led_wave(self, relay): + while not is_enter_pressed(): + self.core.break_realtime() + # do not fill the FIFOs too much to avoid long response times + t = now_mu() - self.core.seconds_to_mu(.2) + while self.core.get_rtio_counter_mu() < t: + pass + for ch in range(16): + relay.enable(1 << ch) + delay(100*ms) + relay.enable(0x0000) + delay(100*ms) + + def test_shuttler(self): + print("*** Testing Shuttler.") + + for card_n, (card_name, card_dev) in enumerate(self.shuttler): + print("Testing: ", card_name) + + output_voltage = 0.0 + def setv(x): + nonlocal output_voltage + output_voltage = x + + self.setup_shuttler_init(card_dev["relay"], card_dev["adc"], card_dev["dcbias"], card_dev["dds"], card_dev["trigger"], card_dev["config"]) + + print("Check Remote AFE Board Relay LED Indicators.") + print("Press Enter to Continue.") + self.shuttler_relay_led_wave(card_dev["relay"]) + + self.set_shuttler_relay(card_dev["relay"], 0xFFFF) + + passed = True + adc_readings = [] + volt_set = [(-1)**i*(2.*card_n + .1*(i//2 + 1)) for i in range(16)] + + print("Testing Shuttler DAC") + print("Voltages:", " ".join(["{:.1f}".format(x) for x in volt_set])) + + for ch, volt in enumerate(volt_set): + self.setup_shuttler_set_output(card_dev["dcbias"], card_dev["dds"], card_dev["trigger"], ch, volt) + self.get_shuttler_output_voltage(card_dev["adc"], ch, setv) + if (abs(volt) - abs(output_voltage)) > 0.1: + passed = False + adc_readings.append(output_voltage) + + print("Press Enter to Continue.") + input() + self.set_shuttler_relay(card_dev["relay"], 0x0000) + + if passed: + print("PASSED") + else: + print("FAILED") + print("Shuttler Remote AFE Board ADC has abnormal readings.") + print(f"ADC Readings:", " ".join(["{:.2f}".format(x) for x in adc_readings])) + def run(self, tests): print("****** Sinara system tester ******") print("") From c2d645ed0a712a1f28a7e6d479fc3894eb63641d Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 23 May 2024 16:07:40 +0800 Subject: [PATCH 238/296] enable spread in satellite, use high watermark --- RELEASE_NOTES.rst | 1 + artiq/gateware/drtio/core.py | 4 ++-- artiq/gateware/rtio/sed/core.py | 7 +++++-- artiq/gateware/rtio/sed/fifos.py | 3 ++- artiq/gateware/rtio/sed/lane_distributor.py | 8 ++++---- artiq/gateware/rtio/sed/layouts.py | 1 + 6 files changed, 15 insertions(+), 9 deletions(-) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index e4901d2c6..d0edd2a60 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -28,6 +28,7 @@ Highlights: clock, to facilitate implementation of local processing on DRTIO satellites, and to slightly reduce RTIO latency. * Support for DRTIO-over-EEM, used with Shuttler. +* Enabled event spreading on DRTIO satellites, using high watermark for lane switching. * Added channel names to RTIO error messages. * GUI: - Implemented Applet Request Interfaces which allow applets to modify datasets and set the diff --git a/artiq/gateware/drtio/core.py b/artiq/gateware/drtio/core.py index c6db5d9af..8928f833e 100644 --- a/artiq/gateware/drtio/core.py +++ b/artiq/gateware/drtio/core.py @@ -61,8 +61,8 @@ class SyncRTIO(Module): self.submodules.outputs = ClockDomainsRenamer("rio")( SED(channels, tsc.glbl_fine_ts_width, lane_count=lane_count, fifo_depth=fifo_depth, - enable_spread=False, report_buffer_space=True, - interface=self.cri)) + enable_spread=True, fifo_high_watermark=0.75, + report_buffer_space=True, interface=self.cri)) self.comb += self.outputs.coarse_timestamp.eq(tsc.coarse_ts) self.sync += self.outputs.minimum_coarse_timestamp.eq(tsc.coarse_ts + 16) diff --git a/artiq/gateware/rtio/sed/core.py b/artiq/gateware/rtio/sed/core.py index 8953ff9d2..32ed90996 100644 --- a/artiq/gateware/rtio/sed/core.py +++ b/artiq/gateware/rtio/sed/core.py @@ -12,10 +12,13 @@ __all__ = ["SED"] class SED(Module): def __init__(self, channels, glbl_fine_ts_width, - lane_count=8, fifo_depth=128, enable_spread=True, + lane_count=8, fifo_depth=128, fifo_high_watermark=1.0, enable_spread=True, quash_channels=[], report_buffer_space=False, interface=None): seqn_width = layouts.seqn_width(lane_count, fifo_depth) + fifo_high_watermark = int(fifo_high_watermark * fifo_depth) + assert fifo_depth >= fifo_high_watermark + self.submodules.lane_dist = LaneDistributor(lane_count, seqn_width, layouts.fifo_payload(channels), [channel.interface.o.delay for channel in channels], @@ -23,7 +26,7 @@ class SED(Module): enable_spread=enable_spread, quash_channels=quash_channels, interface=interface) - self.submodules.fifos = FIFOs(lane_count, fifo_depth, + self.submodules.fifos = FIFOs(lane_count, fifo_depth, fifo_high_watermark, layouts.fifo_payload(channels), report_buffer_space) self.submodules.gates = Gates(lane_count, seqn_width, layouts.fifo_payload(channels), diff --git a/artiq/gateware/rtio/sed/fifos.py b/artiq/gateware/rtio/sed/fifos.py index 3bf1bfc5a..81a260678 100644 --- a/artiq/gateware/rtio/sed/fifos.py +++ b/artiq/gateware/rtio/sed/fifos.py @@ -11,7 +11,7 @@ __all__ = ["FIFOs"] class FIFOs(Module): - def __init__(self, lane_count, fifo_depth, layout_payload, report_buffer_space=False): + def __init__(self, lane_count, fifo_depth, high_watermark, layout_payload, report_buffer_space=False): seqn_width = layouts.seqn_width(lane_count, fifo_depth) self.input = [Record(layouts.fifo_ingress(seqn_width, layout_payload)) for _ in range(lane_count)] @@ -33,6 +33,7 @@ class FIFOs(Module): fifo.din.eq(Cat(input.seqn, input.payload.raw_bits())), fifo.we.eq(input.we), input.writable.eq(fifo.writable), + input.high_watermark.eq(fifo.level >= high_watermark), Cat(output.seqn, output.payload.raw_bits()).eq(fifo.dout), output.readable.eq(fifo.readable), diff --git a/artiq/gateware/rtio/sed/lane_distributor.py b/artiq/gateware/rtio/sed/lane_distributor.py index 08a3fa716..f14010362 100644 --- a/artiq/gateware/rtio/sed/lane_distributor.py +++ b/artiq/gateware/rtio/sed/lane_distributor.py @@ -154,8 +154,10 @@ class LaneDistributor(Module): self.comb += lio.payload.timestamp.eq(compensated_timestamp) # cycle #3, read status + current_lane_high_watermark = Signal() current_lane_writable = Signal() self.comb += [ + current_lane_high_watermark.eq(Array(lio.high_watermark for lio in self.output)[current_lane]), current_lane_writable.eq(Array(lio.writable for lio in self.output)[current_lane]), o_status_wait.eq(~current_lane_writable) ] @@ -170,12 +172,10 @@ class LaneDistributor(Module): self.sequence_error_channel.eq(self.cri.chan_sel[:16]) ] - # current lane has been full, spread events by switching to the next. + # current lane has reached high watermark, spread events by switching to the next. if enable_spread: - current_lane_writable_r = Signal(reset=1) self.sync += [ - current_lane_writable_r.eq(current_lane_writable), - If(~current_lane_writable_r & current_lane_writable, + If(current_lane_high_watermark | ~current_lane_writable, force_laneB.eq(1) ), If(do_write, diff --git a/artiq/gateware/rtio/sed/layouts.py b/artiq/gateware/rtio/sed/layouts.py index 1fbb8f6ec..c3b69d64b 100644 --- a/artiq/gateware/rtio/sed/layouts.py +++ b/artiq/gateware/rtio/sed/layouts.py @@ -31,6 +31,7 @@ def fifo_ingress(seqn_width, layout_payload): return [ ("we", 1, DIR_M_TO_S), ("writable", 1, DIR_S_TO_M), + ("high_watermark", 1, DIR_S_TO_M), ("seqn", seqn_width, DIR_M_TO_S), ("payload", [(a, b, DIR_M_TO_S) for a, b in layout_payload]) ] From 2b31d38084416a9ef473f28e21e266ced04e6610 Mon Sep 17 00:00:00 2001 From: Egor Savkin Date: Mon, 20 May 2024 10:21:26 +0800 Subject: [PATCH 239/296] docs: update MSYS2 to include offline installer and openocd included by default docs: update MSYS2 mingw -> clang Signed-off-by: Egor Savkin --- doc/manual/installing.rst | 13 +++++++------ doc/manual/list_of_ndsps.rst | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/doc/manual/installing.rst b/doc/manual/installing.rst index c1615eda3..86e42ccd4 100644 --- a/doc/manual/installing.rst +++ b/doc/manual/installing.rst @@ -154,16 +154,19 @@ This will set your user as a trusted user, allowing the use of any untrusted sub Installing via MSYS2 (Windows) ------------------------------ -Install `MSYS2 `_, then edit ``C:\MINGW64\etc\pacman.conf`` and add at the end: :: +We recommend using our `offline installer `_, which contains all the necessary packages and no additional configuration is needed. +After installation, launch ``MSYS2 with ARTIQ`` from the Windows Start menu. + +Alternatively, you may install `MSYS2 `_, then edit ``C:\MINGW64\etc\pacman.conf`` and add at the end: :: [artiq] SigLevel = Optional TrustAll Server = https://msys2.m-labs.hk/artiq-beta -Launch ``MSYS2 MINGW64`` from the Windows Start menu to open the MSYS2 shell, and enter the following commands: :: +Launch ``MSYS2 CLANG64`` from the Windows Start menu to open the MSYS2 shell, and enter the following commands: :: pacman -Syy - pacman -S mingw-w64-x86_64-artiq + pacman -S mingw-w64-clang-x86_64-artiq If your favorite package is not available with MSYS2, contact us using the helpdesk@ email. @@ -259,9 +262,7 @@ OpenOCD can be used to write the binary images into the core device FPGA board's With Nix, add ``aqmain.openocd-bscanspi`` to the shell packages. Be careful not to add ``pkgs.openocd`` instead - this would install OpenOCD from the NixOS package collection, which does not support ARTIQ boards. -With MSYS2, install ``openocd`` and ``bscan-spi-bitstreams`` as follows:: - - pacman -S mingw-w64-x86_64-openocd mingw-w64-x86_64-bscan-spi-bitstreams +With MSYS2, ``openocd`` and ``bscan-spi-bitstreams`` are included with ``artiq`` by default. With Conda, install ``openocd`` as follows:: diff --git a/doc/manual/list_of_ndsps.rst b/doc/manual/list_of_ndsps.rst index 65953485c..03d3a1b3d 100644 --- a/doc/manual/list_of_ndsps.rst +++ b/doc/manual/list_of_ndsps.rst @@ -29,4 +29,4 @@ The following network device support packages are available for ARTIQ. If you wo | InfluxDB database | Not available | Not available | `HTML `_ | https://gitlab.com/charlesbaynham/artiq_influx_generic | +---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+--------------------------------------------------------+ -MSYS2 packages all start with the ``mingw-w64-x86_64-`` prefix. +MSYS2 packages all start with the ``mingw-w64-clang-x86_64-`` prefix. From 0a044cf4241cec78a3675041a1a1a304348d697b Mon Sep 17 00:00:00 2001 From: mwojcik Date: Mon, 27 May 2024 12:40:03 +0800 Subject: [PATCH 240/296] schema: add efc hardware version --- artiq/coredevice/coredevice_generic.schema.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/artiq/coredevice/coredevice_generic.schema.json b/artiq/coredevice/coredevice_generic.schema.json index 13de70c20..0558fb44c 100644 --- a/artiq/coredevice/coredevice_generic.schema.json +++ b/artiq/coredevice/coredevice_generic.schema.json @@ -634,6 +634,10 @@ }, "drtio_destination": { "type": "integer" + }, + "hw_rev": { + "type": "string", + "enum": ["v1.0", "v1.1"] } }, "required": ["ports"] From 51c15ac777578416613ec0e6ab8da7c96b30653c Mon Sep 17 00:00:00 2001 From: mwojcik Date: Mon, 27 May 2024 15:24:44 +0800 Subject: [PATCH 241/296] update hw_rev in shuttler json example --- artiq/examples/kasli_shuttler/kasli_shuttler.json | 1 + 1 file changed, 1 insertion(+) diff --git a/artiq/examples/kasli_shuttler/kasli_shuttler.json b/artiq/examples/kasli_shuttler/kasli_shuttler.json index 7d938ae11..2e36191b0 100644 --- a/artiq/examples/kasli_shuttler/kasli_shuttler.json +++ b/artiq/examples/kasli_shuttler/kasli_shuttler.json @@ -6,6 +6,7 @@ "peripherals": [ { "type": "shuttler", + "hw_rev": "v1.1", "ports": [0] }, { From 13830a27af9708f1bd64e1577d7466f27936e536 Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 10:50:13 +0800 Subject: [PATCH 242/296] riscv: add IRQ control --- artiq/firmware/libboard_misoc/riscv32/irq.rs | 49 ++++++++++++++++++++ artiq/firmware/libboard_misoc/riscv32/mod.rs | 1 + 2 files changed, 50 insertions(+) create mode 100644 artiq/firmware/libboard_misoc/riscv32/irq.rs diff --git a/artiq/firmware/libboard_misoc/riscv32/irq.rs b/artiq/firmware/libboard_misoc/riscv32/irq.rs new file mode 100644 index 000000000..3c632e952 --- /dev/null +++ b/artiq/firmware/libboard_misoc/riscv32/irq.rs @@ -0,0 +1,49 @@ +use riscv::register::{mie, mstatus}; + +fn vmim_write(val: usize) { + unsafe { + asm!("csrw {csr}, {rs}", rs = in(reg) val, csr = const 0xBC0); + } +} + +fn vmim_read() -> usize { + let r: usize; + unsafe { + asm!("csrr {rd}, {csr}", rd = out(reg) r, csr = const 0xBC0); + } + r +} + +fn vmip_read() -> usize { + let r: usize; + unsafe { + asm!("csrr {rd}, {csr}", rd = out(reg) r, csr = const 0xFC0); + } + r +} + +pub fn enable_interrupts() { + unsafe { + mstatus::set_mie(); + mie::set_mext(); + } +} + +pub fn disable_interrupts() { + unsafe { + mstatus::clear_mie(); + mie::clear_mext(); + } +} + +pub fn enable(id: u32) { + vmim_write(vmim_read() | (1 << id)); +} + +pub fn disable(id: u32) { + vmim_write(vmim_read() & !(1 << id)); +} + +pub fn is_pending(id: u32) -> bool { + (vmip_read() >> id) & 1 == 1 +} diff --git a/artiq/firmware/libboard_misoc/riscv32/mod.rs b/artiq/firmware/libboard_misoc/riscv32/mod.rs index a8f498adc..2408c8664 100644 --- a/artiq/firmware/libboard_misoc/riscv32/mod.rs +++ b/artiq/firmware/libboard_misoc/riscv32/mod.rs @@ -1,3 +1,4 @@ pub mod cache; pub mod boot; +pub mod irq; pub mod pmp; From 14a618b48d9710bbdf54674849fbcb1d8c2f1831 Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 10:53:14 +0800 Subject: [PATCH 243/296] kasli: enable interrupts --- artiq/firmware/runtime/main.rs | 5 +++++ artiq/firmware/satman/main.rs | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/artiq/firmware/runtime/main.rs b/artiq/firmware/runtime/main.rs index 6d26432a7..f1519b646 100644 --- a/artiq/firmware/runtime/main.rs +++ b/artiq/firmware/runtime/main.rs @@ -36,6 +36,8 @@ use smoltcp::wire::HardwareAddress; use board_misoc::{csr, ident, clock, spiflash, config, net_settings, pmp, boot}; #[cfg(has_ethmac)] use board_misoc::ethmac; +#[cfg(soc_platform = "kasli")] +use board_misoc::irq; use board_misoc::net_settings::{Ipv4AddrConfig}; #[cfg(has_drtio)] use board_artiq::drtioaux; @@ -265,6 +267,9 @@ pub extern fn main() -> i32 { pmp::init_stack_guard(&_sstack_guard as *const u8 as usize); + #[cfg(soc_platform = "kasli")] + irq::enable_interrupts(); + logger_artiq::BufferLogger::new(&mut LOG_BUFFER[..]).register(|| boot::start_user(startup as usize) ); diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index 62494714e..f865ee406 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -17,6 +17,8 @@ use core::convert::TryFrom; use board_misoc::{csr, ident, clock, uart_logger, i2c, pmp}; #[cfg(has_si5324)] use board_artiq::si5324; +#[cfg(soc_platform = "kasli")] +use board_misoc::irq; use board_artiq::{spi, drtioaux, drtio_routing}; #[cfg(soc_platform = "efc")] use board_artiq::ad9117; @@ -630,6 +632,8 @@ pub extern fn main() -> i32 { ALLOC.add_range(&mut _fheap, &mut _eheap); pmp::init_stack_guard(&_sstack_guard as *const u8 as usize); } + #[cfg(soc_platform = "kasli")] + irq::enable_interrupts(); clock::init(); uart_logger::ConsoleLogger::register(); From 57780e36bef54873aacdf468f8b1337fc084bb94 Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 11:18:52 +0800 Subject: [PATCH 244/296] cargo: update libfringe --- artiq/firmware/Cargo.lock | 2 +- artiq/firmware/runtime/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/artiq/firmware/Cargo.lock b/artiq/firmware/Cargo.lock index 5bfb45676..527a662ee 100644 --- a/artiq/firmware/Cargo.lock +++ b/artiq/firmware/Cargo.lock @@ -275,7 +275,7 @@ checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6" [[package]] name = "fringe" version = "1.2.1" -source = "git+https://git.m-labs.hk/M-Labs/libfringe.git?rev=3ecbe5#3ecbe53f7644b18ee46ebd5b2ca12c9cbceec43a" +source = "git+https://git.m-labs.hk/M-Labs/libfringe.git?rev=53a964#53a964a63d2d384b22ae1949a471a732003a30b9" dependencies = [ "libc 0.2.99", ] diff --git a/artiq/firmware/runtime/Cargo.toml b/artiq/firmware/runtime/Cargo.toml index d8dbe38d6..0d132d5a9 100644 --- a/artiq/firmware/runtime/Cargo.toml +++ b/artiq/firmware/runtime/Cargo.toml @@ -37,7 +37,7 @@ features = ["alloc", "medium-ethernet", "proto-ipv4", "proto-ipv6", "socket-tcp" [dependencies.fringe] git = "https://git.m-labs.hk/M-Labs/libfringe.git" -rev = "3ecbe5" +rev = "53a964" default-features = false features = ["alloc"] From 1b0586e6a82ba0168ac00f81fcf335c463ce739d Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 10:54:13 +0800 Subject: [PATCH 245/296] Gateware: si549 & WRPLL ddmtd: add DDMTD and deglitcher wrpll: add helper clockdomain wrpll: add frequency counter wrpll: add skewtester wrpll: add gtx & main tag collection wrpll: add gtx & main tag eventmanager for interrupt si549: add i2c and adpll programmer --- artiq/gateware/wrpll/__init__.py | 0 artiq/gateware/wrpll/ddmtd.py | 119 +++++++++++++ artiq/gateware/wrpll/si549.py | 277 +++++++++++++++++++++++++++++++ artiq/gateware/wrpll/wrpll.py | 173 +++++++++++++++++++ 4 files changed, 569 insertions(+) create mode 100644 artiq/gateware/wrpll/__init__.py create mode 100644 artiq/gateware/wrpll/ddmtd.py create mode 100644 artiq/gateware/wrpll/si549.py create mode 100644 artiq/gateware/wrpll/wrpll.py diff --git a/artiq/gateware/wrpll/__init__.py b/artiq/gateware/wrpll/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/artiq/gateware/wrpll/ddmtd.py b/artiq/gateware/wrpll/ddmtd.py new file mode 100644 index 000000000..2d0380810 --- /dev/null +++ b/artiq/gateware/wrpll/ddmtd.py @@ -0,0 +1,119 @@ +from migen import * +from migen.genlib.cdc import PulseSynchronizer, MultiReg +from misoc.interconnect.csr import * + + +class DDMTDSampler(Module): + def __init__(self, cd_ref, main_clk_se): + self.ref_beating = Signal() + self.main_beating = Signal() + + # # # + + ref_clk = Signal() + self.specials +=[ + # ISERDESE2 can only be driven from fabric via IDELAYE2 (see UG471) + Instance("IDELAYE2", + p_DELAY_SRC="DATAIN", + p_HIGH_PERFORMANCE_MODE="TRUE", + p_REFCLK_FREQUENCY=208.3, # REFCLK frequency from IDELAYCTRL + p_IDELAY_VALUE=0, + + i_DATAIN=cd_ref.clk, + + o_DATAOUT=ref_clk + ), + Instance("ISERDESE2", + p_IOBDELAY="IFD", # use DDLY as input + p_DATA_RATE="SDR", + p_DATA_WIDTH=2, # min is 2 + p_NUM_CE=1, + + i_DDLY=ref_clk, + i_CE1=1, + i_CLK=ClockSignal("helper"), + i_CLKDIV=ClockSignal("helper"), + + o_Q1=self.ref_beating + ), + Instance("ISERDESE2", + p_DATA_RATE="SDR", + p_DATA_WIDTH=2, # min is 2 + p_NUM_CE=1, + + i_D=main_clk_se, + i_CE1=1, + i_CLK=ClockSignal("helper"), + i_CLKDIV=ClockSignal("helper"), + + o_Q1=self.main_beating, + ), + ] + + +class DDMTDDeglitcherMedianEdge(Module): + def __init__(self, counter, input_signal, stable_0_period=100, stable_1_period=100): + self.tag = Signal(len(counter)) + self.detect = Signal() + + stable_0_counter = Signal(reset=stable_0_period - 1, max=stable_0_period) + stable_1_counter = Signal(reset=stable_1_period - 1, max=stable_1_period) + + # # # + + # Based on CERN's median edge deglitcher FSM + # https://white-rabbit.web.cern.ch/documents/Precise_time_and_frequency_transfer_in_a_White_Rabbit_network.pdf (p.72) + fsm = ClockDomainsRenamer("helper")(FSM(reset_state="WAIT_STABLE_0")) + self.submodules += fsm + + fsm.act("WAIT_STABLE_0", + If(stable_0_counter != 0, + NextValue(stable_0_counter, stable_0_counter - 1) + ).Else( + NextValue(stable_0_counter, stable_0_period - 1), + NextState("WAIT_EDGE") + ), + If(input_signal, + NextValue(stable_0_counter, stable_0_period - 1) + ), + ) + fsm.act("WAIT_EDGE", + If(input_signal, + NextValue(self.tag, counter), + NextState("GOT_EDGE") + ) + ) + fsm.act("GOT_EDGE", + If(stable_1_counter != 0, + NextValue(stable_1_counter, stable_1_counter - 1) + ).Else( + NextValue(stable_1_counter, stable_1_period - 1), + self.detect.eq(1), + NextState("WAIT_STABLE_0") + ), + If(~input_signal, + NextValue(self.tag, self.tag + 1), + NextValue(stable_1_counter, stable_1_period - 1) + ), + ) + + +class DDMTD(Module): + def __init__(self, counter, input_signal): + + # in helper clock domain + self.h_tag = Signal(len(counter)) + self.h_tag_update = Signal() + + # # # + + deglitcher = DDMTDDeglitcherMedianEdge(counter, input_signal) + self.submodules += deglitcher + + self.sync.helper += [ + self.h_tag_update.eq(0), + If(deglitcher.detect, + self.h_tag_update.eq(1), + self.h_tag.eq(deglitcher.tag) + ) + ] \ No newline at end of file diff --git a/artiq/gateware/wrpll/si549.py b/artiq/gateware/wrpll/si549.py new file mode 100644 index 000000000..3ae894832 --- /dev/null +++ b/artiq/gateware/wrpll/si549.py @@ -0,0 +1,277 @@ +from migen import * +from migen.genlib.fsm import * + +from misoc.interconnect.csr import * + + +class I2CClockGen(Module): + def __init__(self, width): + self.load = Signal(width) + self.clk2x = Signal() + + cnt = Signal.like(self.load) + self.comb += [ + self.clk2x.eq(cnt == 0), + ] + self.sync += [ + If(self.clk2x, + cnt.eq(self.load), + ).Else( + cnt.eq(cnt - 1), + ) + ] + + +class I2CMasterMachine(Module): + def __init__(self, clock_width): + self.scl = Signal(reset=1) + self.sda_o = Signal(reset=1) + self.sda_i = Signal() + + self.submodules.cg = CEInserter()(I2CClockGen(clock_width)) + self.start = Signal() + self.stop = Signal() + self.write = Signal() + self.ack = Signal() + self.data = Signal(8) + self.ready = Signal() + + # # # + + bits = Signal(4) + data = Signal(8) + + fsm = CEInserter()(FSM("IDLE")) + self.submodules += fsm + + fsm.act("IDLE", + self.ready.eq(1), + If(self.start, + NextState("START0"), + ).Elif(self.stop, + NextState("STOP0"), + ).Elif(self.write, + NextValue(bits, 8), + NextValue(data, self.data), + NextState("WRITE0") + ) + ) + + fsm.act("START0", + NextValue(self.scl, 1), + NextState("START1") + ) + fsm.act("START1", + NextValue(self.sda_o, 0), + NextState("IDLE") + ) + + fsm.act("STOP0", + NextValue(self.scl, 0), + NextState("STOP1") + ) + fsm.act("STOP1", + NextValue(self.sda_o, 0), + NextState("STOP2") + ) + fsm.act("STOP2", + NextValue(self.scl, 1), + NextState("STOP3") + ) + fsm.act("STOP3", + NextValue(self.sda_o, 1), + NextState("IDLE") + ) + + fsm.act("WRITE0", + NextValue(self.scl, 0), + NextState("WRITE1") + ) + fsm.act("WRITE1", + If(bits == 0, + NextValue(self.sda_o, 1), + NextState("READACK0"), + ).Else( + NextValue(self.sda_o, data[7]), + NextState("WRITE2"), + ) + ) + fsm.act("WRITE2", + NextValue(self.scl, 1), + NextValue(data[1:], data[:-1]), + NextValue(bits, bits - 1), + NextState("WRITE0"), + ) + fsm.act("READACK0", + NextValue(self.scl, 1), + NextState("READACK1"), + ) + fsm.act("READACK1", + NextValue(self.ack, ~self.sda_i), + NextState("IDLE") + ) + + run = Signal() + idle = Signal() + self.comb += [ + run.eq((self.start | self.stop | self.write) & self.ready), + idle.eq(~run & fsm.ongoing("IDLE")), + self.cg.ce.eq(~idle), + fsm.ce.eq(run | self.cg.clk2x), + ] + + +class ADPLLProgrammer(Module): + def __init__(self): + self.i2c_divider = Signal(16) + self.i2c_address = Signal(7) + + self.adpll = Signal(24) + self.stb = Signal() + self.busy = Signal() + self.nack = Signal() + + self.scl = Signal() + self.sda_i = Signal() + self.sda_o = Signal() + + # # # + + master = I2CMasterMachine(16) + self.submodules += master + + self.comb += [ + master.cg.load.eq(self.i2c_divider), + self.scl.eq(master.scl), + master.sda_i.eq(self.sda_i), + self.sda_o.eq(master.sda_o) + ] + + fsm = FSM() + self.submodules += fsm + + fsm.act("IDLE", + If(self.stb, + NextValue(self.nack, 0), + NextState("START") + ) + ) + fsm.act("START", + master.start.eq(1), + If(master.ready, NextState("DEVADDRESS")) + ) + fsm.act("DEVADDRESS", + master.data.eq(self.i2c_address << 1), + master.write.eq(1), + If(master.ready, NextState("REGADRESS")) + ) + fsm.act("REGADRESS", + master.data.eq(231), + master.write.eq(1), + If(master.ready, + If(master.ack, + NextState("DATA0") + ).Else( + NextValue(self.nack, 1), + NextState("STOP") + ) + ) + ) + fsm.act("DATA0", + master.data.eq(self.adpll[0:8]), + master.write.eq(1), + If(master.ready, + If(master.ack, + NextState("DATA1") + ).Else( + NextValue(self.nack, 1), + NextState("STOP") + ) + ) + ) + fsm.act("DATA1", + master.data.eq(self.adpll[8:16]), + master.write.eq(1), + If(master.ready, + If(master.ack, + NextState("DATA2") + ).Else( + NextValue(self.nack, 1), + NextState("STOP") + ) + ) + ) + fsm.act("DATA2", + master.data.eq(self.adpll[16:24]), + master.write.eq(1), + If(master.ready, + If(~master.ack, NextValue(self.nack, 1)), + NextState("STOP") + ) + ) + fsm.act("STOP", + master.stop.eq(1), + If(master.ready, + If(~master.ack, NextValue(self.nack, 1)), + NextState("IDLE") + ) + ) + + self.comb += self.busy.eq(~fsm.ongoing("IDLE")) + + +class Si549(Module, AutoCSR): + def __init__(self, pads): + self.i2c_divider = CSRStorage(16, reset=75) + self.i2c_address = CSRStorage(7) + + self.adpll = CSRStorage(24) + self.adpll_stb = CSR() + self.adpll_busy = CSRStatus() + self.nack = CSRStatus() + + self.bitbang_enable = CSRStorage() + + self.sda_oe = CSRStorage() + self.sda_out = CSRStorage() + self.sda_in = CSRStatus() + self.scl_oe = CSRStorage() + self.scl_out = CSRStorage() + + # # # + + self.submodules.programmer = ADPLLProgrammer() + + self.sync += self.programmer.stb.eq(self.adpll_stb.re) + + self.comb += [ + self.programmer.i2c_divider.eq(self.i2c_divider.storage), + self.programmer.i2c_address.eq(self.i2c_address.storage), + self.programmer.adpll.eq(self.adpll.storage), + self.adpll_busy.status.eq(self.programmer.busy), + self.nack.status.eq(self.programmer.nack) + ] + + # I2C with bitbang/gateware mode select + sda_t = TSTriple(1) + scl_t = TSTriple(1) + self.specials += [ + sda_t.get_tristate(pads.sda), + scl_t.get_tristate(pads.scl) + ] + + self.comb += [ + If(self.bitbang_enable.storage, + sda_t.oe.eq(self.sda_oe.storage), + sda_t.o.eq(self.sda_out.storage), + self.sda_in.status.eq(sda_t.i), + scl_t.oe.eq(self.scl_oe.storage), + scl_t.o.eq(self.scl_out.storage) + ).Else( + sda_t.oe.eq(~self.programmer.sda_o), + sda_t.o.eq(0), + self.programmer.sda_i.eq(sda_t.i), + scl_t.oe.eq(~self.programmer.scl), + scl_t.o.eq(0), + ) + ] \ No newline at end of file diff --git a/artiq/gateware/wrpll/wrpll.py b/artiq/gateware/wrpll/wrpll.py new file mode 100644 index 000000000..ef5a15290 --- /dev/null +++ b/artiq/gateware/wrpll/wrpll.py @@ -0,0 +1,173 @@ +from migen import * +from migen.genlib.cdc import MultiReg, AsyncResetSynchronizer, PulseSynchronizer +from misoc.interconnect.csr import * +from misoc.interconnect.csr_eventmanager import * + +from artiq.gateware.wrpll.ddmtd import DDMTDSampler, DDMTD +from artiq.gateware.wrpll.si549 import Si549 + +class FrequencyCounter(Module, AutoCSR): + def __init__(self, domains, counter_width=24): + self.update = CSR() + self.busy = CSRStatus() + + counter_reset = Signal() + counter_stb = Signal() + timer = Signal(counter_width) + + # # # + + fsm = FSM() + self.submodules += fsm + + fsm.act("IDLE", + counter_reset.eq(1), + If(self.update.re, + NextValue(timer, 2**counter_width - 1), + NextState("COUNTING") + ) + ) + fsm.act("COUNTING", + self.busy.status.eq(1), + If(timer != 0, + NextValue(timer, timer - 1) + ).Else( + counter_stb.eq(1), + NextState("IDLE") + ) + ) + + for domain in domains: + name = "counter_" + domain + counter_csr = CSRStatus(counter_width, name=name) + setattr(self, name, counter_csr) + + divider = Signal(2) + divided = Signal() + divided_sys = Signal() + divided_sys_r = Signal() + divided_tick = Signal() + counter = Signal(counter_width) + + # # # + + sync_domain = getattr(self.sync, domain) + sync_domain +=[ + divider.eq(divider + 1), + divided.eq(divider[-1]) + ] + self.specials += MultiReg(divided, divided_sys) + self.sync += divided_sys_r.eq(divided_sys) + self.comb += divided_tick.eq(divided_sys & ~divided_sys_r) + + self.sync += [ + If(counter_stb, counter_csr.status.eq(counter)), + If(divided_tick, counter.eq(counter + 1)), + If(counter_reset, counter.eq(0)) + ] + +class SkewTester(Module, AutoCSR): + def __init__(self, rx_synchronizer): + self.error = CSR() + + # # # + + # The RX synchronizer is tested for setup/hold violations by feeding it a + # toggling pattern and checking that the same toggling pattern comes out. + toggle_in = Signal() + self.sync.rtio_rx0 += toggle_in.eq(~toggle_in) + toggle_out = rx_synchronizer.resync(toggle_in) + + toggle_out_expected = Signal() + self.sync += toggle_out_expected.eq(~toggle_out) + + error = Signal() + self.sync += [ + If(toggle_out != toggle_out_expected, error.eq(1)), + If(self.error.re, error.eq(0)) + ] + self.specials += MultiReg(error, self.error.w) + + +class WRPLL(Module, AutoCSR): + def __init__(self, platform, cd_ref, main_clk_se, COUNTER_BIT=32): + self.helper_reset = CSRStorage(reset=1) + self.ref_tag = CSRStatus(COUNTER_BIT) + self.main_tag = CSRStatus(COUNTER_BIT) + + ddmtd_counter = Signal(COUNTER_BIT) + + ref_tag_sys = Signal(COUNTER_BIT) + main_tag_sys = Signal(COUNTER_BIT) + ref_tag_stb_sys = Signal() + main_tag_stb_sys = Signal() + + # # # + + self.submodules.main_dcxo = Si549(platform.request("ddmtd_main_dcxo_i2c")) + self.submodules.helper_dcxo = Si549(platform.request("ddmtd_helper_dcxo_i2c")) + + helper_dcxo_pads = platform.request("ddmtd_helper_clk") + self.clock_domains.cd_helper = ClockDomain() + self.specials += [ + Instance("IBUFGDS", + i_I=helper_dcxo_pads.p, i_IB=helper_dcxo_pads.n, + o_O=self.cd_helper.clk), + AsyncResetSynchronizer(self.cd_helper, self.helper_reset.storage) + ] + + self.submodules.frequency_counter = FrequencyCounter(["sys", cd_ref.name]) + + self.submodules.ddmtd_sampler = DDMTDSampler(cd_ref, main_clk_se) + + self.sync.helper += ddmtd_counter.eq(ddmtd_counter + 1) + self.submodules.ddmtd_ref = DDMTD(ddmtd_counter, self.ddmtd_sampler.ref_beating) + self.submodules.ddmtd_main = DDMTD(ddmtd_counter, self.ddmtd_sampler.main_beating) + + # DDMTD tags collection + + self.specials += [ + MultiReg(self.ddmtd_ref.h_tag, ref_tag_sys), + MultiReg(self.ddmtd_main.h_tag, main_tag_sys) + ] + + ref_tag_stb_ps = PulseSynchronizer("helper", "sys") + main_tag_stb_ps = PulseSynchronizer("helper", "sys") + self.submodules += [ + ref_tag_stb_ps, + main_tag_stb_ps + ] + self.sync.helper += [ + ref_tag_stb_ps.i.eq(self.ddmtd_ref.h_tag_update), + main_tag_stb_ps.i.eq(self.ddmtd_main.h_tag_update) + ] + self.sync += [ + ref_tag_stb_sys.eq(ref_tag_stb_ps.o), + main_tag_stb_sys.eq(main_tag_stb_ps.o) + ] + + self.sync += [ + If(ref_tag_stb_sys, + self.ref_tag.status.eq(ref_tag_sys), + ), + If(main_tag_stb_sys, + self.main_tag.status.eq(main_tag_sys) + ) + ] + + # EventMangers for firmware interrupt + + self.submodules.ref_tag_ev = EventManager() + self.ref_tag_ev.stb = EventSourcePulse() + self.ref_tag_ev.finalize() + + self.submodules.main_tag_ev = EventManager() + self.main_tag_ev.stb = EventSourcePulse() + self.main_tag_ev.finalize() + + self.sync += [ + self.ref_tag_ev.stb.trigger.eq(ref_tag_stb_sys), + self.main_tag_ev.stb.trigger.eq(main_tag_stb_sys) + ] + + self.submodules.ev = SharedIRQ(self.ref_tag_ev, self.main_tag_ev) From 0d78e65f7aba437b233a854bfec5ed84f555ce88 Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 10:54:49 +0800 Subject: [PATCH 246/296] Gateware: kasli satellite WRPLL setup kasli: use enable_wrpll from json to switch from si5324 to si549 kasli: add wrpll kasli: add wrpll interrupt kasli: add clk_synth_se kasli: add skewtester kasli: add WRPLL_REF_CLK config for firmware --- artiq/gateware/targets/kasli.py | 42 +++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py index 5807f0868..619cdb90d 100755 --- a/artiq/gateware/targets/kasli.py +++ b/artiq/gateware/targets/kasli.py @@ -26,6 +26,7 @@ from artiq.gateware.drtio.transceiver import gtp_7series, eem_serdes from artiq.gateware.drtio.siphaser import SiPhaser7Series from artiq.gateware.drtio.rx_synchronizer import XilinxRXSynchronizer from artiq.gateware.drtio import * +from artiq.gateware.wrpll import wrpll from artiq.build_soc import * from artiq.coredevice import jsondesc @@ -377,7 +378,7 @@ class SatelliteBase(BaseSoC, AMPSoC): } mem_map.update(BaseSoC.mem_map) - def __init__(self, rtio_clk_freq=125e6, enable_sata=False, *, gateware_identifier_str=None, hw_rev="v2.0", **kwargs): + def __init__(self, rtio_clk_freq=125e6, enable_sata=False, with_wrpll=False, *, gateware_identifier_str=None, hw_rev="v2.0", **kwargs): if hw_rev in ("v1.0", "v1.1"): cpu_bus_width = 32 else: @@ -505,17 +506,33 @@ class SatelliteBase(BaseSoC, AMPSoC): rtio_clk_period = 1e9/rtio_clk_freq self.config["RTIO_FREQUENCY"] = str(rtio_clk_freq/1e6) - self.submodules.siphaser = SiPhaser7Series( - si5324_clkin=platform.request("cdr_clk") if platform.hw_rev == "v2.0" - else platform.request("si5324_clkin"), - rx_synchronizer=self.rx_synchronizer, - ref_clk=self.crg.clk125_div2, ref_div2=True, - rtio_clk_freq=rtio_clk_freq) - platform.add_false_path_constraints( - self.crg.cd_sys.clk, self.siphaser.mmcm_freerun_output) - self.csr_devices.append("siphaser") - self.config["HAS_SI5324"] = None - self.config["SI5324_SOFT_RESET"] = None + if with_wrpll: + clk_synth = platform.request("cdr_clk_clean_fabric") + clk_synth_se = Signal() + platform.add_period_constraint(clk_synth.p, 8.0) + self.specials += Instance("IBUFGDS", p_DIFF_TERM="TRUE", p_IBUF_LOW_PWR="FALSE", i_I=clk_synth.p, i_IB=clk_synth.n, o_O=clk_synth_se) + self.submodules.wrpll = wrpll.WRPLL( + platform=self.platform, + cd_ref=self.gt_drtio.cd_rtio_rx0, + main_clk_se=clk_synth_se) + self.submodules.wrpll_skewtester = wrpll.SkewTester(self.rx_synchronizer) + self.csr_devices.append("wrpll_skewtester") + self.csr_devices.append("wrpll") + self.interrupt_devices.append("wrpll") + self.config["HAS_SI549"] = None + self.config["WRPLL_REF_CLK"] = "GT_CDR" + else: + self.submodules.siphaser = SiPhaser7Series( + si5324_clkin=platform.request("cdr_clk") if platform.hw_rev == "v2.0" + else platform.request("si5324_clkin"), + rx_synchronizer=self.rx_synchronizer, + ref_clk=self.crg.clk125_div2, ref_div2=True, + rtio_clk_freq=rtio_clk_freq) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, self.siphaser.mmcm_freerun_output) + self.csr_devices.append("siphaser") + self.config["HAS_SI5324"] = None + self.config["SI5324_SOFT_RESET"] = None gtp = self.gt_drtio.gtps[0] txout_buf = Signal() @@ -715,6 +732,7 @@ class GenericSatellite(SatelliteBase): rtio_clk_freq=description["rtio_frequency"], enable_sata=description["enable_sata_drtio"], enable_sys5x=has_drtio_over_eem, + with_wrpll=description["enable_wrpll"], **kwargs) if hw_rev == "v1.0": # EEM clock fan-out from Si5324, not MMCX From 5971d9e9583d95f3edea22686ca1ade791d53608 Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 10:59:10 +0800 Subject: [PATCH 247/296] Firmware: set CLK_SEL in io_expander init io_expander init: set initial out_target instead of 0x00 io_expander0: gate CLK_SEL direction & output --- artiq/firmware/libboard_misoc/io_expander.rs | 31 ++++++++++++++++---- 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/artiq/firmware/libboard_misoc/io_expander.rs b/artiq/firmware/libboard_misoc/io_expander.rs index b86f4bd80..4bc84e5cf 100644 --- a/artiq/firmware/libboard_misoc/io_expander.rs +++ b/artiq/firmware/libboard_misoc/io_expander.rs @@ -10,6 +10,25 @@ struct Registers { gpiob: u8, // Output Port 1 } +#[cfg(has_si549)] +const IODIR_CLK_SEL: u8 = 0x80; // out +#[cfg(has_si5324)] +const IODIR_CLK_SEL: u8 = 0x00; // in + +#[cfg(has_si549)] +const CLK_SEL_OUT: u8 = 1 << 7; +#[cfg(has_si5324)] +const CLK_SEL_OUT: u8 = 0; + +const IODIR0 : [u8; 2] = [ + 0xFF, + 0xFF & !IODIR_CLK_SEL +]; + +const OUT_TAR0 : [u8; 2] = [ + 0, + CLK_SEL_OUT +]; pub struct IoExpander { busno: u8, port: u8, @@ -34,9 +53,9 @@ impl IoExpander { port: 11, address: 0x40, virtual_led_mapping: &VIRTUAL_LED_MAPPING0, - iodir: [0xff; 2], + iodir: IODIR0, out_current: [0; 2], - out_target: [0; 2], + out_target: OUT_TAR0, registers: Registers { iodira: 0x00, iodirb: 0x01, @@ -153,10 +172,10 @@ impl IoExpander { } self.update_iodir()?; - self.out_current[0] = 0x00; - self.write(self.registers.gpioa, 0x00)?; - self.out_current[1] = 0x00; - self.write(self.registers.gpiob, 0x00)?; + self.write(self.registers.gpioa, self.out_target[0])?; + self.out_current[0] = self.out_target[0]; + self.write(self.registers.gpiob, self.out_target[1])?; + self.out_current[1] = self.out_target[1]; Ok(()) } From 5d9bc930fedccd223ac13f7b6428d23c3f4f5e5d Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 11:04:43 +0800 Subject: [PATCH 248/296] Firmware: si549 si549: add bit bang i2c si549: add si549 programming si549: add main & helper setup --- artiq/firmware/libboard_artiq/lib.rs | 2 + artiq/firmware/libboard_artiq/si549.rs | 323 +++++++++++++++++++++++++ 2 files changed, 325 insertions(+) create mode 100644 artiq/firmware/libboard_artiq/si549.rs diff --git a/artiq/firmware/libboard_artiq/lib.rs b/artiq/firmware/libboard_artiq/lib.rs index e18dd1ddd..906a84e1a 100644 --- a/artiq/firmware/libboard_artiq/lib.rs +++ b/artiq/firmware/libboard_artiq/lib.rs @@ -24,6 +24,8 @@ pub mod rpc_queue; #[cfg(has_si5324)] pub mod si5324; +#[cfg(has_si549)] +pub mod si549; #[cfg(has_grabber)] pub mod grabber; diff --git a/artiq/firmware/libboard_artiq/si549.rs b/artiq/firmware/libboard_artiq/si549.rs new file mode 100644 index 000000000..8bf6a6622 --- /dev/null +++ b/artiq/firmware/libboard_artiq/si549.rs @@ -0,0 +1,323 @@ +use board_misoc::{clock, csr}; +use log::info; + +const ADDRESS: u8 = 0x67; + +const ADPLL_MAX: i32 = (950.0 / 0.0001164) as i32; + +pub struct DividerConfig { + pub hsdiv: u16, + pub lsdiv: u8, + pub fbdiv: u64, +} + +pub struct FrequencySetting { + pub main: DividerConfig, + pub helper: DividerConfig, +} + +mod i2c { + use super::*; + + #[derive(Clone, Copy)] + pub enum DCXO { + Main, + Helper, + } + + fn half_period() { + clock::spin_us(1) + } + + fn sda_i(dcxo: DCXO) -> bool { + match dcxo { + DCXO::Main => unsafe { csr::wrpll::main_dcxo_sda_in_read() == 1 }, + DCXO::Helper => unsafe { csr::wrpll::helper_dcxo_sda_in_read() == 1 }, + } + } + + fn sda_oe(dcxo: DCXO, oe: bool) { + let val = if oe { 1 } else { 0 }; + match dcxo { + DCXO::Main => unsafe { csr::wrpll::main_dcxo_sda_oe_write(val) }, + DCXO::Helper => unsafe { csr::wrpll::helper_dcxo_sda_oe_write(val) }, + }; + } + + fn sda_o(dcxo: DCXO, o: bool) { + let val = if o { 1 } else { 0 }; + match dcxo { + DCXO::Main => unsafe { csr::wrpll::main_dcxo_sda_out_write(val) }, + DCXO::Helper => unsafe { csr::wrpll::helper_dcxo_sda_out_write(val) }, + }; + } + + fn scl_oe(dcxo: DCXO, oe: bool) { + let val = if oe { 1 } else { 0 }; + match dcxo { + DCXO::Main => unsafe { csr::wrpll::main_dcxo_scl_oe_write(val) }, + DCXO::Helper => unsafe { csr::wrpll::helper_dcxo_scl_oe_write(val) }, + }; + } + + fn scl_o(dcxo: DCXO, o: bool) { + let val = if o { 1 } else { 0 }; + match dcxo { + DCXO::Main => unsafe { csr::wrpll::main_dcxo_scl_out_write(val) }, + DCXO::Helper => unsafe { csr::wrpll::helper_dcxo_scl_out_write(val) }, + }; + } + + pub fn init(dcxo: DCXO) -> Result<(), &'static str> { + // Set SCL as output, and high level + scl_o(dcxo, true); + scl_oe(dcxo, true); + // Prepare a zero level on SDA so that sda_oe pulls it down + sda_o(dcxo, false); + // Release SDA + sda_oe(dcxo, false); + + // Check the I2C bus is ready + half_period(); + half_period(); + if !sda_i(dcxo) { + // Try toggling SCL a few times + for _bit in 0..8 { + scl_o(dcxo, false); + half_period(); + scl_o(dcxo, true); + half_period(); + } + } + + if !sda_i(dcxo) { + return Err("SDA is stuck low and doesn't get unstuck"); + } + Ok(()) + } + + pub fn start(dcxo: DCXO) { + // Set SCL high then SDA low + scl_o(dcxo, true); + half_period(); + sda_oe(dcxo, true); + half_period(); + } + + pub fn stop(dcxo: DCXO) { + // First, make sure SCL is low, so that the target releases the SDA line + scl_o(dcxo, false); + half_period(); + // Set SCL high then SDA high + sda_oe(dcxo, true); + scl_o(dcxo, true); + half_period(); + sda_oe(dcxo, false); + half_period(); + } + + pub fn write(dcxo: DCXO, data: u8) -> bool { + // MSB first + for bit in (0..8).rev() { + // Set SCL low and set our bit on SDA + scl_o(dcxo, false); + sda_oe(dcxo, data & (1 << bit) == 0); + half_period(); + // Set SCL high ; data is shifted on the rising edge of SCL + scl_o(dcxo, true); + half_period(); + } + // Check ack + // Set SCL low, then release SDA so that the I2C target can respond + scl_o(dcxo, false); + half_period(); + sda_oe(dcxo, false); + // Set SCL high and check for ack + scl_o(dcxo, true); + half_period(); + // returns true if acked (I2C target pulled SDA low) + !sda_i(dcxo) + } + + pub fn read(dcxo: DCXO, ack: bool) -> u8 { + // Set SCL low first, otherwise setting SDA as input may cause a transition + // on SDA with SCL high which will be interpreted as START/STOP condition. + scl_o(dcxo, false); + half_period(); // make sure SCL has settled low + sda_oe(dcxo, false); + + let mut data: u8 = 0; + + // MSB first + for bit in (0..8).rev() { + scl_o(dcxo, false); + half_period(); + // Set SCL high and shift data + scl_o(dcxo, true); + half_period(); + if sda_i(dcxo) { + data |= 1 << bit + } + } + // Send ack + // Set SCL low and pull SDA low when acking + scl_o(dcxo, false); + if ack { + sda_oe(dcxo, true) + } + half_period(); + // then set SCL high + scl_o(dcxo, true); + half_period(); + + data + } +} + +fn write(dcxo: i2c::DCXO, reg: u8, val: u8) -> Result<(), &'static str> { + i2c::start(dcxo); + if !i2c::write(dcxo, ADDRESS << 1) { + return Err("Si549 failed to ack write address"); + } + if !i2c::write(dcxo, reg) { + return Err("Si549 failed to ack register"); + } + if !i2c::write(dcxo, val) { + return Err("Si549 failed to ack value"); + } + i2c::stop(dcxo); + Ok(()) +} + +fn read(dcxo: i2c::DCXO, reg: u8) -> Result { + i2c::start(dcxo); + if !i2c::write(dcxo, ADDRESS << 1) { + return Err("Si549 failed to ack write address"); + } + if !i2c::write(dcxo, reg) { + return Err("Si549 failed to ack register"); + } + i2c::stop(dcxo); + + i2c::start(dcxo); + if !i2c::write(dcxo, (ADDRESS << 1) | 1) { + return Err("Si549 failed to ack read address"); + } + let val = i2c::read(dcxo, false); + i2c::stop(dcxo); + Ok(val) +} + +fn setup(dcxo: i2c::DCXO, config: &DividerConfig) -> Result<(), &'static str> { + i2c::init(dcxo)?; + + write(dcxo, 255, 0x00)?; // PAGE + write(dcxo, 69, 0x00)?; // Disable FCAL override. + write(dcxo, 17, 0x00)?; // Synchronously disable output + + // The Si549 has no ID register, so we check that it responds correctly + // by writing values to a RAM-like register and reading them back. + for test_value in 0..255 { + write(dcxo, 23, test_value)?; + let readback = read(dcxo, 23)?; + if readback != test_value { + return Err("Si549 detection failed"); + } + } + + write(dcxo, 23, config.hsdiv as u8)?; + write(dcxo, 24, (config.hsdiv >> 8) as u8 | (config.lsdiv << 4))?; + write(dcxo, 26, config.fbdiv as u8)?; + write(dcxo, 27, (config.fbdiv >> 8) as u8)?; + write(dcxo, 28, (config.fbdiv >> 16) as u8)?; + write(dcxo, 29, (config.fbdiv >> 24) as u8)?; + write(dcxo, 30, (config.fbdiv >> 32) as u8)?; + write(dcxo, 31, (config.fbdiv >> 40) as u8)?; + + write(dcxo, 7, 0x08)?; // Start FCAL + clock::spin_us(30_000); // Internal FCAL VCO calibration + write(dcxo, 17, 0x01)?; // Synchronously enable output + + Ok(()) +} + +pub fn main_setup(settings: &FrequencySetting) -> Result<(), &'static str> { + unsafe { + csr::wrpll::main_dcxo_bitbang_enable_write(1); + csr::wrpll::main_dcxo_i2c_address_write(ADDRESS); + } + + setup(i2c::DCXO::Main, &settings.main)?; + + // Si549 maximum settling time for large frequency change. + clock::spin_us(40_000); + + unsafe { + csr::wrpll::main_dcxo_bitbang_enable_write(0); + } + + info!("Main Si549 started"); + Ok(()) +} + +pub fn helper_setup(settings: &FrequencySetting) -> Result<(), &'static str> { + unsafe { + csr::wrpll::helper_reset_write(1); + csr::wrpll::helper_dcxo_bitbang_enable_write(1); + csr::wrpll::helper_dcxo_i2c_address_write(ADDRESS); + } + + setup(i2c::DCXO::Helper, &settings.helper)?; + + // Si549 maximum settling time for large frequency change. + clock::spin_us(40_000); + + unsafe { + csr::wrpll::helper_reset_write(0); + csr::wrpll::helper_dcxo_bitbang_enable_write(0); + } + info!("Helper Si549 started"); + Ok(()) +} + +fn set_adpll(dcxo: i2c::DCXO, adpll: i32) -> Result<(), &'static str> { + if adpll.abs() > ADPLL_MAX { + return Err("adpll is too large"); + } + + match dcxo { + i2c::DCXO::Main => unsafe { + if csr::wrpll::main_dcxo_bitbang_enable_read() == 1 { + return Err("Main si549 bitbang mode is active when using gateware i2c"); + } + + while csr::wrpll::main_dcxo_adpll_busy_read() == 1 {} + if csr::wrpll::main_dcxo_nack_read() == 1 { + return Err("Main si549 failed to ack adpll write"); + } + + csr::wrpll::main_dcxo_i2c_address_write(ADDRESS); + csr::wrpll::main_dcxo_adpll_write(adpll as u32); + + csr::wrpll::main_dcxo_adpll_stb_write(1); + }, + i2c::DCXO::Helper => unsafe { + if csr::wrpll::helper_dcxo_bitbang_enable_read() == 1 { + return Err("Helper si549 bitbang mode is active when using gateware i2c"); + } + + while csr::wrpll::helper_dcxo_adpll_busy_read() == 1 {} + if csr::wrpll::helper_dcxo_nack_read() == 1 { + return Err("Helper si549 failed to ack adpll write"); + } + + csr::wrpll::helper_dcxo_i2c_address_write(ADDRESS); + csr::wrpll::helper_dcxo_adpll_write(adpll as u32); + + csr::wrpll::helper_dcxo_adpll_stb_write(1); + }, + }; + + Ok(()) +} + From 0ac0e081705028741f6b4d6df2fdcb4becba0f23 Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 11:13:51 +0800 Subject: [PATCH 249/296] Firmware: WRPLL wrpll: add tag collector to process gtx & main tags wrpll: add frequency counter to set BASE_ADPLL wrpll: add TAG_OFFSET and calibration for Satman wrpll: add 100MHz & 125MHz fixed point low pass filter wrpll: add main & helper PLL --- artiq/firmware/libboard_artiq/Cargo.toml | 1 + artiq/firmware/libboard_artiq/si549.rs | 399 +++++++++++++++++++++++ 2 files changed, 400 insertions(+) diff --git a/artiq/firmware/libboard_artiq/Cargo.toml b/artiq/firmware/libboard_artiq/Cargo.toml index 8405ee892..f884b0fa0 100644 --- a/artiq/firmware/libboard_artiq/Cargo.toml +++ b/artiq/firmware/libboard_artiq/Cargo.toml @@ -25,3 +25,4 @@ proto_artiq = { path = "../libproto_artiq" } [features] uart_console = [] alloc = [] +calibrate_wrpll_skew = [] diff --git a/artiq/firmware/libboard_artiq/si549.rs b/artiq/firmware/libboard_artiq/si549.rs index 8bf6a6622..4c90c02df 100644 --- a/artiq/firmware/libboard_artiq/si549.rs +++ b/artiq/firmware/libboard_artiq/si549.rs @@ -321,3 +321,402 @@ fn set_adpll(dcxo: i2c::DCXO, adpll: i32) -> Result<(), &'static str> { Ok(()) } +#[cfg(has_wrpll)] +pub mod wrpll { + + use super::*; + + const BEATING_PERIOD: i32 = 0x8000; + const BEATING_HALFPERIOD: i32 = 0x4000; + const COUNTER_WIDTH: u32 = 24; + const DIV_WIDTH: u32 = 2; + + // y[n] = b0*x[n] + b1*x[n-1] + b2*x[n-2] - a1*y[n-1] - a2*y[n-2] + struct FilterParameters { + pub b0: i64, + pub b1: i64, + pub b2: i64, + pub a1: i64, + pub a2: i64, + } + + #[cfg(rtio_frequency = "100.0")] + const LPF: FilterParameters = FilterParameters { + b0: 10905723400, // 0.03967479060647884 * 1 << 38 + b1: 21811446800, // 0.07934958121295768 * 1 << 38 + b2: 10905723400, // 0.03967479060647884 * 1 << 38 + a1: -381134538612, // -1.3865593741228928 * 1 << 38 + a2: 149879525269, // 0.5452585365488082 * 1 << 38 + }; + + #[cfg(rtio_frequency = "125.0")] + const LPF: FilterParameters = FilterParameters { + b0: 19816511911, // 0.07209205036273991 * 1 << 38 + b1: 39633023822, // 0.14418410072547982 * 1 << 38 + b2: 19816511911, // 0.07209205036273991 * 1 << 38 + a1: -168062510414, // -0.6114078511562919 * 1 << 38 + a2: -27549348884, // -0.10022394739274834 * 1 << 38 + }; + + static mut H_ADPLL1: i32 = 0; + static mut H_ADPLL2: i32 = 0; + static mut PERIOD_ERR1: i32 = 0; + static mut PERIOD_ERR2: i32 = 0; + + static mut M_ADPLL1: i32 = 0; + static mut M_ADPLL2: i32 = 0; + static mut PHASE_ERR1: i32 = 0; + static mut PHASE_ERR2: i32 = 0; + + static mut BASE_ADPLL: i32 = 0; + + #[derive(Clone, Copy)] + pub enum ISR { + RefTag, + MainTag, + } + + mod tag_collector { + use super::*; + + #[cfg(wrpll_ref_clk = "GT_CDR")] + static mut TAG_OFFSET: u32 = 23890; + #[cfg(wrpll_ref_clk = "SMA_CLKIN")] + static mut TAG_OFFSET: u32 = 0; + static mut REF_TAG: u32 = 0; + static mut REF_TAG_READY: bool = false; + static mut MAIN_TAG: u32 = 0; + static mut MAIN_TAG_READY: bool = false; + + pub fn reset() { + clear_phase_diff_ready(); + unsafe { + REF_TAG = 0; + MAIN_TAG = 0; + } + } + + pub fn clear_phase_diff_ready() { + unsafe { + REF_TAG_READY = false; + MAIN_TAG_READY = false; + } + } + + pub fn collect_tags(interrupt: ISR) { + match interrupt { + ISR::RefTag => unsafe { + REF_TAG = csr::wrpll::ref_tag_read(); + REF_TAG_READY = true; + }, + ISR::MainTag => unsafe { + MAIN_TAG = csr::wrpll::main_tag_read(); + MAIN_TAG_READY = true; + }, + } + } + + pub fn phase_diff_ready() -> bool { + unsafe { REF_TAG_READY && MAIN_TAG_READY } + } + + #[cfg(feature = "calibrate_wrpll_skew")] + pub fn set_tag_offset(offset: u32) { + unsafe { + TAG_OFFSET = offset; + } + } + + #[cfg(feature = "calibrate_wrpll_skew")] + pub fn get_tag_offset() -> u32 { + unsafe { TAG_OFFSET } + } + + pub fn get_period_error() -> i32 { + // n * BEATING_PERIOD - REF_TAG(n) mod BEATING_PERIOD + let mut period_error = unsafe { + REF_TAG + .overflowing_neg() + .0 + .rem_euclid(BEATING_PERIOD as u32) as i32 + }; + // mapping tags from [0, 2π] -> [-π, π] + if period_error > BEATING_HALFPERIOD { + period_error -= BEATING_PERIOD + } + period_error + } + + pub fn get_phase_error() -> i32 { + // MAIN_TAG(n) - REF_TAG(n) - TAG_OFFSET mod BEATING_PERIOD + let mut phase_error = unsafe { + MAIN_TAG + .overflowing_sub(REF_TAG + TAG_OFFSET) + .0 + .rem_euclid(BEATING_PERIOD as u32) as i32 + }; + + // mapping tags from [0, 2π] -> [-π, π] + if phase_error > BEATING_HALFPERIOD { + phase_error -= BEATING_PERIOD + } + phase_error + } + } + + fn set_isr(en: bool) { + let val = if en { 1 } else { 0 }; + unsafe { + csr::wrpll::ref_tag_ev_enable_write(val); + csr::wrpll::main_tag_ev_enable_write(val); + } + } + + fn set_base_adpll() -> Result<(), &'static str> { + let count2adpll = |error: i32| { + ((error as f64 * 1e6) / (0.0001164 * (1 << (COUNTER_WIDTH - DIV_WIDTH)) as f64)) as i32 + }; + + let (ref_count, main_count) = get_freq_counts(); + unsafe { + BASE_ADPLL = count2adpll(ref_count as i32 - main_count as i32); + set_adpll(i2c::DCXO::Main, BASE_ADPLL)?; + set_adpll(i2c::DCXO::Helper, BASE_ADPLL)?; + } + Ok(()) + } + + fn get_freq_counts() -> (u32, u32) { + unsafe { + csr::wrpll::frequency_counter_update_write(1); + while csr::wrpll::frequency_counter_busy_read() == 1 {} + #[cfg(wrpll_ref_clk = "GT_CDR")] + let ref_count = csr::wrpll::frequency_counter_counter_rtio_rx0_read(); + #[cfg(wrpll_ref_clk = "SMA_CLKIN")] + let ref_count = csr::wrpll::frequency_counter_counter_ref_read(); + let main_count = csr::wrpll::frequency_counter_counter_sys_read(); + + (ref_count, main_count) + } + } + + fn reset_plls() -> Result<(), &'static str> { + unsafe { + H_ADPLL1 = 0; + H_ADPLL2 = 0; + PERIOD_ERR1 = 0; + PERIOD_ERR2 = 0; + M_ADPLL1 = 0; + M_ADPLL2 = 0; + PHASE_ERR1 = 0; + PHASE_ERR2 = 0; + } + set_adpll(i2c::DCXO::Main, 0)?; + set_adpll(i2c::DCXO::Helper, 0)?; + // wait for adpll to transfer and DCXO to settle + clock::spin_us(200); + Ok(()) + } + + fn clear_pending(interrupt: ISR) { + match interrupt { + ISR::RefTag => unsafe { csr::wrpll::ref_tag_ev_pending_write(1) }, + ISR::MainTag => unsafe { csr::wrpll::main_tag_ev_pending_write(1) }, + }; + } + + fn is_pending(interrupt: ISR) -> bool { + match interrupt { + ISR::RefTag => unsafe { csr::wrpll::ref_tag_ev_pending_read() == 1 }, + ISR::MainTag => unsafe { csr::wrpll::main_tag_ev_pending_read() == 1 }, + } + } + + pub fn interrupt_handler() { + if is_pending(ISR::RefTag) { + tag_collector::collect_tags(ISR::RefTag); + clear_pending(ISR::RefTag); + helper_pll().expect("failed to run helper DCXO PLL"); + } + + if is_pending(ISR::MainTag) { + tag_collector::collect_tags(ISR::MainTag); + clear_pending(ISR::MainTag); + } + + if tag_collector::phase_diff_ready() { + main_pll().expect("failed to run main DCXO PLL"); + tag_collector::clear_phase_diff_ready(); + } + } + + fn helper_pll() -> Result<(), &'static str> { + let period_err = tag_collector::get_period_error(); + unsafe { + let adpll = (((LPF.b0 * period_err as i64) + + (LPF.b1 * PERIOD_ERR1 as i64) + + (LPF.b2 * PERIOD_ERR2 as i64) + - (LPF.a1 * H_ADPLL1 as i64) + - (LPF.a2 * H_ADPLL2 as i64)) + >> 38) as i32; + set_adpll(i2c::DCXO::Helper, BASE_ADPLL + adpll)?; + H_ADPLL2 = H_ADPLL1; + PERIOD_ERR2 = PERIOD_ERR1; + H_ADPLL1 = adpll; + PERIOD_ERR1 = period_err; + }; + Ok(()) + } + + fn main_pll() -> Result<(), &'static str> { + let phase_err = tag_collector::get_phase_error(); + unsafe { + let adpll = (((LPF.b0 * phase_err as i64) + + (LPF.b1 * PHASE_ERR1 as i64) + + (LPF.b2 * PHASE_ERR2 as i64) + - (LPF.a1 * M_ADPLL1 as i64) + - (LPF.a2 * M_ADPLL2 as i64)) + >> 38) as i32; + set_adpll(i2c::DCXO::Main, BASE_ADPLL + adpll)?; + M_ADPLL2 = M_ADPLL1; + PHASE_ERR2 = PHASE_ERR1; + M_ADPLL1 = adpll; + PHASE_ERR1 = phase_err; + }; + Ok(()) + } + + #[cfg(wrpll_ref_clk = "GT_CDR")] + fn test_skew() -> Result<(), &'static str> { + // wait for PLL to stabilize + clock::spin_us(20_000); + + info!("testing the skew of SYS CLK..."); + if has_timing_error() { + return Err("the skew cannot satisfy setup/hold time constraint of RX synchronizer"); + } + info!("the skew of SYS CLK met the timing constraint"); + Ok(()) + } + + #[cfg(wrpll_ref_clk = "GT_CDR")] + fn has_timing_error() -> bool { + unsafe { + csr::wrpll_skewtester::error_write(1); + } + clock::spin_us(5_000); + unsafe { csr::wrpll_skewtester::error_read() == 1 } + } + + #[cfg(feature = "calibrate_wrpll_skew")] + fn find_edge(target: bool) -> Result { + const STEP: u32 = 8; + const STABLE_THRESHOLD: u32 = 10; + + enum FSM { + Init, + WaitEdge, + GotEdge, + } + + let mut state: FSM = FSM::Init; + let mut offset: u32 = tag_collector::get_tag_offset(); + let mut median_edge: u32 = 0; + let mut stable_counter: u32 = 0; + + for _ in 0..(BEATING_PERIOD as u32 / STEP) as usize { + tag_collector::set_tag_offset(offset); + offset += STEP; + // wait for PLL to stabilize + clock::spin_us(20_000); + + let error = has_timing_error(); + // A median edge deglitcher + match state { + FSM::Init => { + if error != target { + stable_counter += 1; + } else { + stable_counter = 0; + } + + if stable_counter >= STABLE_THRESHOLD { + state = FSM::WaitEdge; + stable_counter = 0; + } + } + FSM::WaitEdge => { + if error == target { + state = FSM::GotEdge; + median_edge = offset; + } + } + FSM::GotEdge => { + if error != target { + median_edge += STEP; + stable_counter = 0; + } else { + stable_counter += 1; + } + + if stable_counter >= STABLE_THRESHOLD { + return Ok(median_edge); + } + } + } + } + return Err("failed to find timing error edge"); + } + + #[cfg(feature = "calibrate_wrpll_skew")] + fn calibrate_skew() -> Result<(), &'static str> { + info!("calibrating skew to meet timing constraint..."); + + // clear calibrated value + tag_collector::set_tag_offset(0); + let rising = find_edge(true)? as i32; + let falling = find_edge(false)? as i32; + + let width = BEATING_PERIOD - (falling - rising); + let result = falling + width / 2; + tag_collector::set_tag_offset(result as u32); + + info!( + "calibration successful, error zone: {} -> {}, width: {} ({}deg), middle of working region: {}", + rising, + falling, + width, + 360 * width / BEATING_PERIOD, + result, + ); + + Ok(()) + } + + pub fn select_recovered_clock(rc: bool) { + set_isr(false); + + if rc { + tag_collector::reset(); + reset_plls().expect("failed to reset main and helper PLL"); + + // get within capture range + set_base_adpll().expect("failed to set base adpll"); + + // clear gateware pending flag + clear_pending(ISR::RefTag); + clear_pending(ISR::MainTag); + + // use nFIQ to avoid IRQ being disabled by mutex lock and mess up PLL + set_isr(true); + info!("WRPLL interrupt enabled"); + + #[cfg(feature = "calibrate_wrpll_skew")] + calibrate_skew().expect("failed to set the correct skew"); + + #[cfg(wrpll_ref_clk = "GT_CDR")] + test_skew().expect("skew test failed"); + } + } +} + From a10dd0520c5fc5458c209ac4c21da959c319869e Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 11:16:15 +0800 Subject: [PATCH 250/296] Firmware: satman WRPLL satman: enable WRPLL interrupt satman: add WRPLL interrupt handler satman: add main & helper si549 setup satman: add WRPLL select_recovered_clock --- artiq/firmware/satman/main.rs | 85 ++++++++++++++++++++++++++++------- 1 file changed, 70 insertions(+), 15 deletions(-) diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index f865ee406..335c353df 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -17,6 +17,8 @@ use core::convert::TryFrom; use board_misoc::{csr, ident, clock, uart_logger, i2c, pmp}; #[cfg(has_si5324)] use board_artiq::si5324; +#[cfg(has_si549)] +use board_artiq::si549; #[cfg(soc_platform = "kasli")] use board_misoc::irq; use board_artiq::{spi, drtioaux, drtio_routing}; @@ -597,6 +599,36 @@ const SI5324_SETTINGS: si5324::FrequencySettings crystal_as_ckin2: true }; +#[cfg(all(has_si549, rtio_frequency = "125.0"))] +const SI549_SETTINGS: si549::FrequencySetting = si549::FrequencySetting { + main: si549::DividerConfig { + hsdiv: 0x058, + lsdiv: 0, + fbdiv: 0x04815791F25, + }, + helper: si549::DividerConfig { + // 125MHz*32767/32768 + hsdiv: 0x058, + lsdiv: 0, + fbdiv: 0x04814E8F442, + }, +}; + +#[cfg(all(has_si549, rtio_frequency = "100.0"))] +const SI549_SETTINGS: si549::FrequencySetting = si549::FrequencySetting { + main: si549::DividerConfig { + hsdiv: 0x06C, + lsdiv: 0, + fbdiv: 0x046C5F49797, + }, + helper: si549::DividerConfig { + // 100MHz*32767/32768 + hsdiv: 0x06C, + lsdiv: 0, + fbdiv: 0x046C5670BBD, + }, +}; + #[cfg(not(soc_platform = "efc"))] fn sysclk_setup() { let switched = unsafe { @@ -609,6 +641,9 @@ fn sysclk_setup() { else { #[cfg(has_si5324)] si5324::setup(&SI5324_SETTINGS, si5324::Input::Ckin1).expect("cannot initialize Si5324"); + #[cfg(has_si549)] + si549::main_setup(&SI549_SETTINGS).expect("cannot initialize main Si549"); + info!("Switching sys clock, rebooting..."); // delay for clean UART log, wait until UART FIFO is empty clock::spin_us(3000); @@ -634,6 +669,8 @@ pub extern fn main() -> i32 { } #[cfg(soc_platform = "kasli")] irq::enable_interrupts(); + #[cfg(has_wrpll)] + irq::enable(csr::WRPLL_INTERRUPT); clock::init(); uart_logger::ConsoleLogger::register(); @@ -669,6 +706,9 @@ pub extern fn main() -> i32 { #[cfg(not(soc_platform = "efc"))] sysclk_setup(); + #[cfg(has_si549)] + si549::helper_setup(&SI549_SETTINGS).expect("cannot initialize helper Si549"); + #[cfg(soc_platform = "efc")] let mut io_expander; #[cfg(soc_platform = "efc")] @@ -758,6 +798,9 @@ pub extern fn main() -> i32 { si5324::siphaser::calibrate_skew().expect("failed to calibrate skew"); } + #[cfg(has_wrpll)] + si549::wrpll::select_recovered_clock(true); + // various managers created here, so when link is dropped, DMA traces, // analyzer logs, kernels are cleared and/or stopped for a clean slate // on subsequent connections, without a manual intervention. @@ -825,6 +868,8 @@ pub extern fn main() -> i32 { info!("uplink is down, switching to local oscillator clock"); #[cfg(has_si5324)] si5324::siphaser::select_recovered_clock(false).expect("failed to switch clocks"); + #[cfg(has_wrpll)] + si549::wrpll::select_recovered_clock(false); } } @@ -864,23 +909,33 @@ fn enable_error_led() { pub extern fn exception(_regs: *const u32) { let pc = mepc::read(); let cause = mcause::read().cause(); - - fn hexdump(addr: u32) { - let addr = (addr - addr % 4) as *const u32; - let mut ptr = addr; - println!("@ {:08p}", ptr); - for _ in 0..4 { - print!("+{:04x}: ", ptr as usize - addr as usize); - print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); - print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); - print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); - print!("{:08x}\n", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + match cause { + mcause::Trap::Interrupt(_source) => { + #[cfg(has_wrpll)] + if irq::is_pending(csr::WRPLL_INTERRUPT) { + si549::wrpll::interrupt_handler(); + } + }, + + mcause::Trap::Exception(e) => { + fn hexdump(addr: u32) { + let addr = (addr - addr % 4) as *const u32; + let mut ptr = addr; + println!("@ {:08p}", ptr); + for _ in 0..4 { + print!("+{:04x}: ", ptr as usize - addr as usize); + print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + print!("{:08x}\n", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + } + } + + hexdump(u32::try_from(pc).unwrap()); + let mtval = mtval::read(); + panic!("exception {:?} at PC 0x{:x}, trap value 0x{:x}", e, u32::try_from(pc).unwrap(), mtval) } } - - hexdump(u32::try_from(pc).unwrap()); - let mtval = mtval::read(); - panic!("exception {:?} at PC 0x{:x}, trap value 0x{:x}", cause, u32::try_from(pc).unwrap(), mtval) } #[no_mangle] From eefc07b495882894e03c067e7ada38fdb1c2826d Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Tue, 28 May 2024 17:30:23 +0800 Subject: [PATCH 251/296] flake: move to nixos 24.05 --- flake.lock | 8 ++++---- flake.nix | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/flake.lock b/flake.lock index 2c8d3ee78..14f9278fd 100644 --- a/flake.lock +++ b/flake.lock @@ -60,16 +60,16 @@ }, "nixpkgs": { "locked": { - "lastModified": 1715534503, - "narHash": "sha256-5ZSVkFadZbFP1THataCaSf0JH2cAH3S29hU9rrxTEqk=", + "lastModified": 1716542732, + "narHash": "sha256-0Y9fRr0CUqWT4KgBITmaGwlnNIGMYuydu2L8iLTfHU4=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2057814051972fa1453ddfb0d98badbea9b83c06", + "rev": "d12251ef6e8e6a46e05689eeccd595bdbd3c9e60", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-unstable", + "ref": "nixos-24.05", "repo": "nixpkgs", "type": "github" } diff --git a/flake.nix b/flake.nix index 000962e3b..a73a98912 100644 --- a/flake.nix +++ b/flake.nix @@ -1,7 +1,7 @@ { description = "A leading-edge control system for quantum information experiments"; - inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-unstable; + inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-24.05; inputs.mozilla-overlay = { url = github:mozilla/nixpkgs-mozilla; flake = false; }; inputs.sipyco.url = github:m-labs/sipyco; inputs.sipyco.inputs.nixpkgs.follows = "nixpkgs"; From 77c50324ef6267b7de69d1a125e755701ad8c6e4 Mon Sep 17 00:00:00 2001 From: morgan Date: Wed, 29 May 2024 11:01:43 +0800 Subject: [PATCH 252/296] flake: update libfringe hash --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index a73a98912..74a01c561 100644 --- a/flake.nix +++ b/flake.nix @@ -239,7 +239,7 @@ cargoDeps = rustPlatform.importCargoLock { lockFile = ./artiq/firmware/Cargo.lock; outputHashes = { - "fringe-1.2.1" = "sha256-m4rzttWXRlwx53LWYpaKuU5AZe4GSkbjHS6oINt5d3Y="; + "fringe-1.2.1" = "sha256-u7NyZBzGrMii79V+Xs4Dx9tCpiby6p8IumkUl7oGBm0="; "tar-no-std-0.1.8" = "sha256-xm17108v4smXOqxdLvHl9CxTCJslmeogjm4Y87IXFuM="; }; }; From dad62c1aec4764f1194ac29800efec89c073b642 Mon Sep 17 00:00:00 2001 From: morgan Date: Wed, 29 May 2024 11:21:37 +0800 Subject: [PATCH 253/296] io_expander: fix efc shuttler compilation error --- artiq/firmware/libboard_misoc/io_expander.rs | 39 ++++++++++---------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/artiq/firmware/libboard_misoc/io_expander.rs b/artiq/firmware/libboard_misoc/io_expander.rs index 4bc84e5cf..bce02cf94 100644 --- a/artiq/firmware/libboard_misoc/io_expander.rs +++ b/artiq/firmware/libboard_misoc/io_expander.rs @@ -10,25 +10,6 @@ struct Registers { gpiob: u8, // Output Port 1 } -#[cfg(has_si549)] -const IODIR_CLK_SEL: u8 = 0x80; // out -#[cfg(has_si5324)] -const IODIR_CLK_SEL: u8 = 0x00; // in - -#[cfg(has_si549)] -const CLK_SEL_OUT: u8 = 1 << 7; -#[cfg(has_si5324)] -const CLK_SEL_OUT: u8 = 0; - -const IODIR0 : [u8; 2] = [ - 0xFF, - 0xFF & !IODIR_CLK_SEL -]; - -const OUT_TAR0 : [u8; 2] = [ - 0, - CLK_SEL_OUT -]; pub struct IoExpander { busno: u8, port: u8, @@ -46,6 +27,26 @@ impl IoExpander { const VIRTUAL_LED_MAPPING0: [(u8, u8, u8); 2] = [(0, 0, 6), (1, 1, 6)]; const VIRTUAL_LED_MAPPING1: [(u8, u8, u8); 2] = [(2, 0, 6), (3, 1, 6)]; + #[cfg(has_si549)] + const IODIR_CLK_SEL: u8 = 0x80; // out + #[cfg(has_si5324)] + const IODIR_CLK_SEL: u8 = 0x00; // in + + #[cfg(has_si549)] + const CLK_SEL_OUT: u8 = 1 << 7; + #[cfg(has_si5324)] + const CLK_SEL_OUT: u8 = 0; + + const IODIR0 : [u8; 2] = [ + 0xFF, + 0xFF & !IODIR_CLK_SEL + ]; + + const OUT_TAR0 : [u8; 2] = [ + 0, + CLK_SEL_OUT + ]; + // Both expanders on SHARED I2C bus let mut io_expander = match index { 0 => IoExpander { From d5b1f04dcc7b27ae9868f6ded0068789976469b4 Mon Sep 17 00:00:00 2001 From: morgan Date: Tue, 28 May 2024 12:50:13 +0800 Subject: [PATCH 254/296] Gateware: frequency multiplier for WRPLL wrpll: add mmcm with DRP to generate 125Mhz refclk --- artiq/gateware/wrpll/wrpll.py | 63 +++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/artiq/gateware/wrpll/wrpll.py b/artiq/gateware/wrpll/wrpll.py index ef5a15290..992f41612 100644 --- a/artiq/gateware/wrpll/wrpll.py +++ b/artiq/gateware/wrpll/wrpll.py @@ -171,3 +171,66 @@ class WRPLL(Module, AutoCSR): ] self.submodules.ev = SharedIRQ(self.ref_tag_ev, self.main_tag_ev) + +class FrequencyMultiplier(Module, AutoCSR): + def __init__(self, clkin): + clkin_se = Signal() + mmcm_locked = Signal() + mmcm_fb_clk = Signal() + ref_clk = Signal() + self.clock_domains.cd_ref = ClockDomain() + self.refclk_reset = CSRStorage(reset=1) + + self.mmcm_bypass = CSRStorage() + self.mmcm_locked = CSRStatus() + self.mmcm_reset = CSRStorage(reset=1) + + self.mmcm_daddr = CSRStorage(7) + self.mmcm_din = CSRStorage(16) + self.mmcm_dwen = CSRStorage() + self.mmcm_den = CSRStorage() + self.mmcm_dclk = CSRStorage() + self.mmcm_dout = CSRStatus(16) + self.mmcm_dready = CSRStatus() + + # # # + + self.specials += [ + Instance("IBUFDS", + i_I=clkin.p, i_IB=clkin.n, + o_O=clkin_se), + # MMCME2 is capable to accept 10MHz input while PLLE2 only support down to 19MHz input (DS191) + # The MMCME2 can be reconfiged during runtime using the Dynamic Reconfiguration Ports + Instance("MMCME2_ADV", + p_BANDWIDTH="HIGH", # lower output jitter (see https://support.xilinx.com/s/question/0D52E00006iHqRqSAK) + o_LOCKED=self.mmcm_locked.status, + i_RST=self.mmcm_reset.storage, + + p_CLKIN1_PERIOD=8, # ns + i_CLKIN1=clkin_se, + i_CLKINSEL=1, # 1=CLKIN1 0=CLKIN2 + + # VCO @ 1.25GHz + p_CLKFBOUT_MULT_F=10, p_DIVCLK_DIVIDE=1, + i_CLKFBIN=mmcm_fb_clk, o_CLKFBOUT=mmcm_fb_clk, + + # 125MHz for WRPLL + p_CLKOUT0_DIVIDE_F=10, p_CLKOUT0_PHASE=0.0, o_CLKOUT0=ref_clk, + + # Dynamic Reconfiguration Ports + i_DADDR = self.mmcm_daddr.storage, + i_DI = self.mmcm_din.storage, + i_DWE = self.mmcm_dwen.storage, + i_DEN = self.mmcm_den.storage, + i_DCLK = self.mmcm_dclk.storage, + o_DO = self.mmcm_dout.status, + o_DRDY = self.mmcm_dready.status + ), + Instance("BUFGMUX", + i_I0=ref_clk, + i_I1=clkin_se, + i_S=self.mmcm_bypass.storage, + o_O=self.cd_ref.clk + ), + AsyncResetSynchronizer(self.cd_ref, self.refclk_reset.storage), + ] From c5147d7744f77cc9ac8632186ca5a1e83bdc4e03 Mon Sep 17 00:00:00 2001 From: morgan Date: Tue, 28 May 2024 12:49:45 +0800 Subject: [PATCH 255/296] Gateware: kasli runtime WRPLL setup kasli: use enable_wrpll from json to switch from si5324 to si549 kasli: add wrpll kasli: add wrpll interrupt kasli: add clk_synth_se kasli: add wrpll_refclk for runtime kasli: add WRPLL_REF_CLK config for firmware --- artiq/gateware/targets/kasli.py | 58 ++++++++++++++++++++++++++++----- 1 file changed, 49 insertions(+), 9 deletions(-) diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py index 619cdb90d..4a1bece55 100755 --- a/artiq/gateware/targets/kasli.py +++ b/artiq/gateware/targets/kasli.py @@ -57,7 +57,7 @@ class StandaloneBase(MiniSoC, AMPSoC): } mem_map.update(MiniSoC.mem_map) - def __init__(self, gateware_identifier_str=None, hw_rev="v2.0", **kwargs): + def __init__(self, gateware_identifier_str=None, with_wrpll=False, hw_rev="v2.0", **kwargs): if hw_rev in ("v1.0", "v1.1"): cpu_bus_width = 32 else: @@ -83,7 +83,6 @@ class StandaloneBase(MiniSoC, AMPSoC): self.submodules.error_led = gpio.GPIOOut(Cat( self.platform.request("error_led"))) self.csr_devices.append("error_led") - self.submodules += SMAClkinForward(self.platform) cdr_clk_out = self.platform.request("cdr_clk_clean") else: cdr_clk_out = self.platform.request("si5324_clkout") @@ -105,12 +104,30 @@ class StandaloneBase(MiniSoC, AMPSoC): self.crg.configure(cdr_clk_buf) + if with_wrpll: + clk_synth = self.platform.request("cdr_clk_clean_fabric") + clk_synth_se = Signal() + self.platform.add_period_constraint(clk_synth.p, 8.0) + self.specials += Instance("IBUFGDS", p_DIFF_TERM="TRUE", p_IBUF_LOW_PWR="FALSE", i_I=clk_synth.p, i_IB=clk_synth.n, o_O=clk_synth_se) + self.submodules.wrpll_refclk = wrpll.FrequencyMultiplier(self.platform.request("sma_clkin")) + self.submodules.wrpll = wrpll.WRPLL( + platform=self.platform, + cd_ref=self.wrpll_refclk.cd_ref, + main_clk_se=clk_synth_se) + self.csr_devices.append("wrpll_refclk") + self.csr_devices.append("wrpll") + self.interrupt_devices.append("wrpll") + self.config["HAS_SI549"] = None + self.config["WRPLL_REF_CLK"] = "SMA_CLKIN" + else: + self.submodules += SMAClkinForward(self.platform) + self.config["HAS_SI5324"] = None + self.config["SI5324_SOFT_RESET"] = None + i2c = self.platform.request("i2c") self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) self.csr_devices.append("i2c") self.config["I2C_BUS_COUNT"] = 1 - self.config["HAS_SI5324"] = None - self.config["SI5324_SOFT_RESET"] = None def add_rtio(self, rtio_channels, sed_lanes=8): fix_serdes_timing_path(self.platform) @@ -147,7 +164,7 @@ class MasterBase(MiniSoC, AMPSoC): } mem_map.update(MiniSoC.mem_map) - def __init__(self, rtio_clk_freq=125e6, enable_sata=False, gateware_identifier_str=None, hw_rev="v2.0", **kwargs): + def __init__(self, rtio_clk_freq=125e6, enable_sata=False, with_wrpll=False, gateware_identifier_str=None, hw_rev="v2.0", **kwargs): if hw_rev in ("v1.0", "v1.1"): cpu_bus_width = 32 else: @@ -173,14 +190,33 @@ class MasterBase(MiniSoC, AMPSoC): self.submodules.error_led = gpio.GPIOOut(Cat( self.platform.request("error_led"))) self.csr_devices.append("error_led") - self.submodules += SMAClkinForward(platform) i2c = self.platform.request("i2c") self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) self.csr_devices.append("i2c") self.config["I2C_BUS_COUNT"] = 1 - self.config["HAS_SI5324"] = None - self.config["SI5324_SOFT_RESET"] = None + + if with_wrpll: + clk_synth = platform.request("cdr_clk_clean_fabric") + clk_synth_se = Signal() + platform.add_period_constraint(clk_synth.p, 8.0) + self.specials += Instance("IBUFGDS", p_DIFF_TERM="TRUE", p_IBUF_LOW_PWR="FALSE", i_I=clk_synth.p, i_IB=clk_synth.n, o_O=clk_synth_se) + self.submodules.wrpll_refclk = wrpll.FrequencyMultiplier(platform.request("sma_clkin")) + self.submodules.wrpll = wrpll.WRPLL( + platform=self.platform, + cd_ref=self.wrpll_refclk.cd_ref, + main_clk_se=clk_synth_se) + self.csr_devices.append("wrpll_refclk") + self.csr_devices.append("wrpll") + self.interrupt_devices.append("wrpll") + self.config["HAS_SI549"] = None + self.config["WRPLL_REF_CLK"] = "SMA_CLKIN" + else: + if platform.hw_rev == "v2.0": + self.submodules += SMAClkinForward(self.platform) + self.config["HAS_SI5324"] = None + self.config["SI5324_SOFT_RESET"] = None + self.config["RTIO_FREQUENCY"] = str(rtio_clk_freq/1e6) drtio_data_pads = [] @@ -624,7 +660,10 @@ class GenericStandalone(StandaloneBase): if hw_rev is None: hw_rev = description["hw_rev"] self.class_name_override = description["variant"] - StandaloneBase.__init__(self, hw_rev=hw_rev, **kwargs) + StandaloneBase.__init__(self, + hw_rev=hw_rev, + with_wrpll=description["enable_wrpll"], + **kwargs) self.config["RTIO_FREQUENCY"] = "{:.1f}".format(description["rtio_frequency"]/1e6) if "ext_ref_frequency" in description: self.config["SI5324_EXT_REF"] = None @@ -679,6 +718,7 @@ class GenericMaster(MasterBase): rtio_clk_freq=description["rtio_frequency"], enable_sata=description["enable_sata_drtio"], enable_sys5x=has_drtio_over_eem, + with_wrpll=description["enable_wrpll"], **kwargs) if "ext_ref_frequency" in description: self.config["SI5324_EXT_REF"] = None From 44cfacf2c48b1f5685d50a422ca991482f886d39 Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 11:15:22 +0800 Subject: [PATCH 256/296] Firmware: frequency multipler for WRPLL si549: add bit bang mmcm dynamic configuration si549: add 125Mhz wrpll refclk setup --- artiq/firmware/libboard_artiq/si549.rs | 140 +++++++++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/artiq/firmware/libboard_artiq/si549.rs b/artiq/firmware/libboard_artiq/si549.rs index 4c90c02df..f610e9a6d 100644 --- a/artiq/firmware/libboard_artiq/si549.rs +++ b/artiq/firmware/libboard_artiq/si549.rs @@ -720,3 +720,143 @@ pub mod wrpll { } } +#[cfg(has_wrpll_refclk)] +pub mod wrpll_refclk { + use super::*; + + pub struct MmcmSetting { + pub clkout0_reg1: u16, //0x08 + pub clkout0_reg2: u16, //0x09 + pub clkfbout_reg1: u16, //0x14 + pub clkfbout_reg2: u16, //0x15 + pub div_reg: u16, //0x16 + pub lock_reg1: u16, //0x18 + pub lock_reg2: u16, //0x19 + pub lock_reg3: u16, //0x1A + pub power_reg: u16, //0x28 + pub filt_reg1: u16, //0x4E + pub filt_reg2: u16, //0x4F + } + + fn one_clock_cycle() { + unsafe { + csr::wrpll_refclk::mmcm_dclk_write(1); + csr::wrpll_refclk::mmcm_dclk_write(0); + } + } + + fn set_addr(address: u8) { + unsafe { + csr::wrpll_refclk::mmcm_daddr_write(address); + } + } + + fn set_data(value: u16) { + unsafe { + csr::wrpll_refclk::mmcm_din_write(value); + } + } + + fn set_enable(en: bool) { + let val = if en { 1 } else { 0 }; + unsafe { + csr::wrpll_refclk::mmcm_den_write(val); + } + } + + fn set_write_enable(en: bool) { + let val = if en { 1 } else { 0 }; + unsafe { + csr::wrpll_refclk::mmcm_dwen_write(val); + } + } + + fn get_data() -> u16 { + unsafe { csr::wrpll_refclk::mmcm_dout_read() } + } + + fn drp_ready() -> bool { + unsafe { csr::wrpll_refclk::mmcm_dready_read() == 1 } + } + + #[allow(dead_code)] + fn read(address: u8) -> u16 { + set_addr(address); + set_enable(true); + // Set DADDR on the mmcm and assert DEN for one clock cycle + one_clock_cycle(); + + set_enable(false); + while !drp_ready() { + // keep the clock signal until data is ready + one_clock_cycle(); + } + get_data() + } + + fn write(address: u8, value: u16) { + set_addr(address); + set_data(value); + set_write_enable(true); + set_enable(true); + // Set DADDR, DI on the mmcm and assert DWE, DEN for one clock cycle + one_clock_cycle(); + + set_write_enable(false); + set_enable(false); + while !drp_ready() { + // keep the clock signal until write is finished + one_clock_cycle(); + } + } + + fn reset(rst: bool) { + let val = if rst { 1 } else { 0 }; + unsafe { + csr::wrpll_refclk::mmcm_reset_write(val) + } + } + + pub fn setup(settings: MmcmSetting, mmcm_bypass: bool) -> Result<(), &'static str> { + unsafe { + csr::wrpll_refclk::refclk_reset_write(1); + } + + if mmcm_bypass { + info!("Bypassing mmcm"); + unsafe { + csr::wrpll_refclk::mmcm_bypass_write(1); + } + } else { + // Based on "DRP State Machine" from XAPP888 + // hold reset HIGH during mmcm config + reset(true); + write(0x08, settings.clkout0_reg1); + write(0x09, settings.clkout0_reg2); + write(0x14, settings.clkfbout_reg1); + write(0x15, settings.clkfbout_reg2); + write(0x16, settings.div_reg); + write(0x18, settings.lock_reg1); + write(0x19, settings.lock_reg2); + write(0x1A, settings.lock_reg3); + write(0x28, settings.power_reg); + write(0x4E, settings.filt_reg1); + write(0x4F, settings.filt_reg2); + reset(false); + + // wait for the mmcm to lock + clock::spin_us(100); + + let locked = unsafe { csr::wrpll_refclk::mmcm_locked_read() == 1 }; + if !locked { + return Err("mmcm failed to generate 125MHz ref clock from SMA CLKIN"); + } + } + + unsafe { + csr::wrpll_refclk::refclk_reset_write(0); + } + + Ok(()) + } +} From 49e402780bebba437c6098047ab1dc68eaf5a17c Mon Sep 17 00:00:00 2001 From: morgan Date: Fri, 24 May 2024 11:17:50 +0800 Subject: [PATCH 257/296] Firmware: runtime WRPLL runtime: enable WRPLL interrupt runtime: add WRPLL interrupt handler rtio_clocking: add main si549 setup rtio_clocking: add 125Mhz wrpll refclk & helper si549 setup --- artiq/firmware/runtime/main.rs | 11 +- artiq/firmware/runtime/rtio_clocking.rs | 181 +++++++++++++++++++++++- 2 files changed, 183 insertions(+), 9 deletions(-) diff --git a/artiq/firmware/runtime/main.rs b/artiq/firmware/runtime/main.rs index f1519b646..ca942caa5 100644 --- a/artiq/firmware/runtime/main.rs +++ b/artiq/firmware/runtime/main.rs @@ -44,6 +44,8 @@ use board_artiq::drtioaux; use board_artiq::drtio_routing; use board_artiq::{mailbox, rpc_queue}; use proto_artiq::{mgmt_proto, moninj_proto, rpc_proto, session_proto, kernel_proto}; +#[cfg(has_wrpll)] +use board_artiq::si549; #[cfg(has_drtio_eem)] use board_artiq::drtio_eem; #[cfg(has_rtio_analyzer)] @@ -269,6 +271,8 @@ pub extern fn main() -> i32 { #[cfg(soc_platform = "kasli")] irq::enable_interrupts(); + #[cfg(has_wrpll)] + irq::enable(csr::WRPLL_INTERRUPT); logger_artiq::BufferLogger::new(&mut LOG_BUFFER[..]).register(|| boot::start_user(startup as usize) @@ -304,8 +308,11 @@ pub extern fn exception(regs: *const TrapFrame) { let pc = mepc::read(); let cause = mcause::read().cause(); match cause { - mcause::Trap::Interrupt(source) => { - info!("Called interrupt with {:?}", source); + mcause::Trap::Interrupt(_source) => { + #[cfg(has_wrpll)] + if irq::is_pending(csr::WRPLL_INTERRUPT) { + si549::wrpll::interrupt_handler(); + } }, mcause::Trap::Exception(mcause::Exception::UserEnvCall) => { diff --git a/artiq/firmware/runtime/rtio_clocking.rs b/artiq/firmware/runtime/rtio_clocking.rs index 3f11da950..64236c749 100644 --- a/artiq/firmware/runtime/rtio_clocking.rs +++ b/artiq/firmware/runtime/rtio_clocking.rs @@ -1,8 +1,11 @@ use board_misoc::config; +#[cfg(has_si5324)] use board_artiq::si5324; +#[cfg(has_si549)] +use board_artiq::si549; use board_misoc::{csr, clock}; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Copy, Clone)] #[allow(non_camel_case_types)] pub enum RtioClock { Default, @@ -89,13 +92,14 @@ pub mod crg { // Si5324 input to select for locking to an external clock (as opposed to // a recovered link clock in DRTIO satellites, which is handled elsewhere). -#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] +#[cfg(all(has_si5324, soc_platform = "kasli", hw_rev = "v2.0"))] const SI5324_EXT_INPUT: si5324::Input = si5324::Input::Ckin1; -#[cfg(all(soc_platform = "kasli", not(hw_rev = "v2.0")))] +#[cfg(all(has_si5324, soc_platform = "kasli", not(hw_rev = "v2.0")))] const SI5324_EXT_INPUT: si5324::Input = si5324::Input::Ckin2; -#[cfg(all(soc_platform = "kc705"))] +#[cfg(all(has_si5324, soc_platform = "kc705"))] const SI5324_EXT_INPUT: si5324::Input = si5324::Input::Ckin2; +#[cfg(has_si5324)] fn setup_si5324_pll(cfg: RtioClock) { let (si5324_settings, si5324_ref_input) = match cfg { RtioClock::Ext0_Synth0_10to125 => { // 125 MHz output from 10 MHz CLKINx reference, 504 Hz BW @@ -214,7 +218,7 @@ fn setup_si5324_pll(cfg: RtioClock) { si5324::setup(&si5324_settings, si5324_ref_input).expect("cannot initialize Si5324"); } -fn setup_si5324(clock_cfg: RtioClock) { +fn sysclk_setup(clock_cfg: RtioClock) { let switched = unsafe { csr::crg::switch_done_read() }; @@ -222,6 +226,8 @@ fn setup_si5324(clock_cfg: RtioClock) { info!("Clocking has already been set up."); return; } + + #[cfg(has_si5324)] match clock_cfg { RtioClock::Ext0_Bypass => { info!("using external RTIO clock with PLL bypass"); @@ -230,7 +236,10 @@ fn setup_si5324(clock_cfg: RtioClock) { _ => setup_si5324_pll(clock_cfg), } - // switch sysclk source to si5324 + #[cfg(has_si549)] + si549::main_setup(&get_si549_setting(clock_cfg)).expect("cannot initialize main Si549"); + + // switch sysclk source #[cfg(not(has_drtio))] { info!("Switching sys clock, rebooting..."); @@ -244,9 +253,153 @@ fn setup_si5324(clock_cfg: RtioClock) { } +#[cfg(all(has_si549, has_wrpll))] +fn wrpll_setup(clk: RtioClock, si549_settings: &si549::FrequencySetting) { + // register values are directly copied from preconfigured mmcm + let (mmcm_setting, mmcm_bypass) = match clk { + RtioClock::Ext0_Synth0_10to125 => ( + si549::wrpll_refclk::MmcmSetting { + // CLKFBOUT_MULT = 62.5, DIVCLK_DIVIDE = 1 , CLKOUT0_DIVIDE = 5 + clkout0_reg1: 0x1083, + clkout0_reg2: 0x0080, + clkfbout_reg1: 0x179e, + clkfbout_reg2: 0x4c00, + div_reg: 0x1041, + lock_reg1: 0x00fa, + lock_reg2: 0x7c01, + lock_reg3: 0xffe9, + power_reg: 0x9900, + filt_reg1: 0x1008, + filt_reg2: 0x8800, + }, + false, + ), + RtioClock::Ext0_Synth0_80to125 => ( + si549::wrpll_refclk::MmcmSetting { + // CLKFBOUT_MULT = 15.625, DIVCLK_DIVIDE = 1 , CLKOUT0_DIVIDE = 10 + clkout0_reg1: 0x1145, + clkout0_reg2: 0x0000, + clkfbout_reg1: 0x11c7, + clkfbout_reg2: 0x5880, + div_reg: 0x1041, + lock_reg1: 0x028a, + lock_reg2: 0x7c01, + lock_reg3: 0xffe9, + power_reg: 0x9900, + filt_reg1: 0x9908, + filt_reg2: 0x8100, + }, + false, + ), + RtioClock::Ext0_Synth0_100to125 => ( + si549::wrpll_refclk::MmcmSetting { + // CLKFBOUT_MULT = 12.5, DIVCLK_DIVIDE = 1 , CLKOUT0_DIVIDE = 10 + clkout0_reg1: 0x1145, + clkout0_reg2: 0x0000, + clkfbout_reg1: 0x1145, + clkfbout_reg2: 0x4c00, + div_reg: 0x1041, + lock_reg1: 0x0339, + lock_reg2: 0x7c01, + lock_reg3: 0xffe9, + power_reg: 0x9900, + filt_reg1: 0x9108, + filt_reg2: 0x0100, + }, + false, + ), + RtioClock::Ext0_Synth0_125to125 => ( + si549::wrpll_refclk::MmcmSetting { + // CLKFBOUT_MULT = 10, DIVCLK_DIVIDE = 1 , CLKOUT0_DIVIDE = 10 + clkout0_reg1: 0x1145, + clkout0_reg2: 0x0000, + clkfbout_reg1: 0x1145, + clkfbout_reg2: 0x0000, + div_reg: 0x1041, + lock_reg1: 0x03e8, + lock_reg2: 0x7001, + lock_reg3: 0xf3e9, + power_reg: 0x0100, + filt_reg1: 0x9908, + filt_reg2: 0x1100, + }, + true, + ), + _ => unreachable!(), + }; + + si549::helper_setup(&si549_settings).expect("cannot initialize helper Si549"); + si549::wrpll_refclk::setup(mmcm_setting, mmcm_bypass).expect("cannot initialize ref clk for wrpll"); + si549::wrpll::select_recovered_clock(true); +} + +#[cfg(has_si549)] +fn get_si549_setting(clk: RtioClock) -> si549::FrequencySetting { + match clk { + RtioClock::Ext0_Synth0_10to125 => { + info!("using 10MHz reference to make 125MHz RTIO clock with WRPLL"); + } + RtioClock::Ext0_Synth0_80to125 => { + info!("using 80MHz reference to make 125MHz RTIO clock with WRPLL"); + } + RtioClock::Ext0_Synth0_100to125 => { + info!("using 100MHz reference to make 125MHz RTIO clock with WRPLL"); + } + RtioClock::Ext0_Synth0_125to125 => { + info!("using 125MHz reference to make 125MHz RTIO clock with WRPLL"); + } + RtioClock::Int_100 => { + info!("using internal 100MHz RTIO clock"); + } + RtioClock::Int_125 => { + info!("using internal 125MHz RTIO clock"); + } + _ => { + warn!( + "rtio_clock setting '{:?}' is unsupported. Falling back to default internal 125MHz RTIO clock.", + clk + ); + } + }; + + match clk { + RtioClock::Int_100 => { + si549::FrequencySetting { + main: si549::DividerConfig { + hsdiv: 0x06C, + lsdiv: 0, + fbdiv: 0x046C5F49797, + }, + helper: si549::DividerConfig { + // 100MHz*32767/32768 + hsdiv: 0x06C, + lsdiv: 0, + fbdiv: 0x046C5670BBD, + }, + } + } + _ => { + // Everything else use 125MHz + si549::FrequencySetting { + main: si549::DividerConfig { + hsdiv: 0x058, + lsdiv: 0, + fbdiv: 0x04815791F25, + }, + helper: si549::DividerConfig { + // 125MHz*32767/32768 + hsdiv: 0x058, + lsdiv: 0, + fbdiv: 0x04814E8F442, + }, + } + } + } +} + pub fn init() { let clock_cfg = get_rtio_clock_cfg(); - setup_si5324(clock_cfg); + sysclk_setup(clock_cfg); #[cfg(has_drtio)] { @@ -282,4 +435,18 @@ pub fn init() { error!("RTIO clock failed"); } } + + #[cfg(all(has_si549, has_wrpll))] + { + // SYS CLK switch will reset CSRs that are used by WRPLL + match clock_cfg { + RtioClock::Ext0_Synth0_10to125 + | RtioClock::Ext0_Synth0_80to125 + | RtioClock::Ext0_Synth0_100to125 + | RtioClock::Ext0_Synth0_125to125 => { + wrpll_setup(clock_cfg, &get_si549_setting(clock_cfg)); + } + _ => {} + } + } } From 48f3071ee8ff95d8ed74a5f8a01248a3064c0244 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 30 May 2024 16:55:23 +0800 Subject: [PATCH 258/296] adf5356: sync before setting muxout --- artiq/coredevice/adf5356.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/artiq/coredevice/adf5356.py b/artiq/coredevice/adf5356.py index 0bffb91be..38e5b7d6c 100644 --- a/artiq/coredevice/adf5356.py +++ b/artiq/coredevice/adf5356.py @@ -84,10 +84,11 @@ class ADF5356: :param blind: Do not attempt to verify presence. """ + self.sync() if not blind: # MUXOUT = VDD self.regs[4] = ADF5356_REG4_MUXOUT_UPDATE(self.regs[4], 1) - self.sync() + self.write(self.regs[4]) delay(1000 * us) if not self.read_muxout(): raise ValueError("MUXOUT not high") @@ -95,7 +96,7 @@ class ADF5356: # MUXOUT = DGND self.regs[4] = ADF5356_REG4_MUXOUT_UPDATE(self.regs[4], 2) - self.sync() + self.write(self.regs[4]) delay(1000 * us) if self.read_muxout(): raise ValueError("MUXOUT not low") @@ -103,8 +104,7 @@ class ADF5356: # MUXOUT = digital lock-detect self.regs[4] = ADF5356_REG4_MUXOUT_UPDATE(self.regs[4], 6) - else: - self.sync() + self.write(self.regs[4]) @kernel def set_att(self, att): From d609ed4a58b97777b100f88fa9a06281312d24e4 Mon Sep 17 00:00:00 2001 From: architeuthis Date: Thu, 30 May 2024 15:14:02 +0800 Subject: [PATCH 259/296] docs: Minor manual fix --- doc/manual/developing.rst | 4 ++-- doc/manual/getting_started_core.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/manual/developing.rst b/doc/manual/developing.rst index 481920993..a2c0d5da4 100644 --- a/doc/manual/developing.rst +++ b/doc/manual/developing.rst @@ -9,14 +9,14 @@ Developing ARTIQ The easiest way to obtain an ARTIQ development environment is via the Nix package manager on Linux. The Nix system is used on the `M-Labs Hydra server `_ to build ARTIQ and its dependencies continuously; it ensures that all build instructions are up-to-date and allows binary packages to be used on developers' machines, in particular for large tools such as the Rust compiler. ARTIQ itself does not depend on Nix, and it is also possible to compile everything from source (look into the ``flake.nix`` file and/or nixpkgs, and run the commands manually) - but Nix makes the process a lot easier. -* Download Vivado from Xilinx and install it (by running the official installer in a FHS chroot environment if using NixOS; the ARTIQ flake provides such an environment). If you do not want to write to ``/opt``, you can install it in a folder of your home directory. The "appropriate" Vivado version to use for building the bitstream can vary. Some versions contain bugs that lead to hidden or visible failures, others work fine. Refer to `Hydra `_ and/or the ``flake.nix`` file from the ARTIQ repository in order to determine which version is used at M-Labs. If the Vivado GUI installer crashes, you may be able to work around the problem by running it in unattended mode with a command such as ``./xsetup -a XilinxEULA,3rdPartyEULA,WebTalkTerms -b Install -e 'Vitis Unified Software Platform' -l /opt/Xilinx/``. +* Download Vivado from Xilinx and install it (by running the official installer in a FHS chroot environment if using NixOS; the ARTIQ flake provides such an environment, which can be entered with the command `vivado-env`). If you do not want to write to ``/opt``, you can install it in a folder of your home directory. The "appropriate" Vivado version to use for building the bitstream can vary. Some versions contain bugs that lead to hidden or visible failures, others work fine. Refer to `Hydra `_ and/or the ``flake.nix`` file from the ARTIQ repository in order to determine which version is used at M-Labs. If the Vivado GUI installer crashes, you may be able to work around the problem by running it in unattended mode with a command such as ``./xsetup -a XilinxEULA,3rdPartyEULA,WebTalkTerms -b Install -e 'Vitis Unified Software Platform' -l /opt/Xilinx/``. * During the Vivado installation, uncheck ``Install cable drivers`` (they are not required as we use better and open source alternatives). * Install the `Nix package manager `_, version 2.4 or later. Prefer a single-user installation for simplicity. * If you did not install Vivado in its default location ``/opt``, clone the ARTIQ Git repository and edit ``flake.nix`` accordingly. * Enable flakes in Nix by e.g. adding ``experimental-features = nix-command flakes`` to ``nix.conf`` (for example ``~/.config/nix/nix.conf``). * Clone the ARTIQ Git repository and run ``nix develop`` at the root (where ``flake.nix`` is). * Make the current source code of ARTIQ available to the Python interpreter by running ``export PYTHONPATH=`pwd`:$PYTHONPATH``. -* You can then build the firmware and gateware with a command such as ``$ python -m artiq.gateware.targets.kasli file.json``. +* You can then build the firmware and gateware with a command such as ``$ python -m artiq.gateware.targets.kasli .json``, using a JSON system description file. * Flash the binaries into the FPGA board with a command such as ``$ artiq_flash --srcbuild -d artiq_kasli/``. You need to configure OpenOCD as explained :ref:`in the user section `. OpenOCD is already part of the flake's development environment. * Check that the board boots and examine the UART messages by running a serial terminal program, e.g. ``$ flterm /dev/ttyUSB1`` (``flterm`` is part of MiSoC and installed in the flake's development environment). Leave the terminal running while you are flashing the board, so that you see the startup messages when the board boots immediately after flashing. You can also restart the board (without reflashing it) with ``$ artiq_flash start``. * The communication parameters are 115200 8-N-1. Ensure that your user has access to the serial device (e.g. by adding the user account to the ``dialout`` group). diff --git a/doc/manual/getting_started_core.rst b/doc/manual/getting_started_core.rst index 1ffaccb62..867b5b544 100644 --- a/doc/manual/getting_started_core.rst +++ b/doc/manual/getting_started_core.rst @@ -23,10 +23,10 @@ As a very first step, we will turn on a LED on the core device. Create a file `` The central part of our code is our ``LED`` class, which derives from :class:`artiq.language.environment.EnvExperiment`. Among other features, :class:`~artiq.language.environment.EnvExperiment` calls our :meth:`~artiq.language.environment.Experiment.build` method and provides the :meth:`~artiq.language.environment.HasEnvironment.setattr_device` method that interfaces to the device database to create the appropriate device drivers and make those drivers accessible as ``self.core`` and ``self.led``. The :func:`~artiq.language.core.kernel` decorator (``@kernel``) tells the system that the :meth:`~artiq.language.environment.Experiment.run` method must be compiled for and executed on the core device (instead of being interpreted and executed as regular Python code on the host). The decorator uses ``self.core`` internally, which is why we request the core device using :meth:`~artiq.language.environment.HasEnvironment.setattr_device` like any other. -Copy the file ``device_db.py`` (containing the device database) from the ``examples/master`` folder of ARTIQ into the same directory as ``led.py`` (alternatively, you can use the ``--device-db`` option of ``artiq_run``). You will probably want to set the IP address of the core device in ``device_db.py`` so that the computer can connect to it (it is the ``host`` parameter of the ``comm`` entry). See :ref:`device-db` for more information. The example device database is designed for the ``nist_clock`` hardware adapter on the KC705; see :ref:`board-ports` for RTIO channel assignments if you need to adapt the device database to a different hardware platform. +You will need to supply the correct device database for your core device; it is generated by a Python script typically called ``device_db.py`` (see also :ref:`device_db`). If you purchased a system from M-Labs, the device database is provided either on the USB stick or inside ~/artiq on the NUC; otherwise, you can also find examples in the ``examples`` folder of ARTIQ, sorted inside the corresponding subfolder for your core device. Copy ``device_db.py`` into the same directory as ``led.py`` (or use the ``--device-db`` option of ``artiq_run``). The field ``core_addr``, placed at the top of the file, needs to match the IP address of your core device so your computer can communicate with it. If you purchased a pre-assembled system it is normally already set correctly. .. note:: - To obtain the examples, you can find where the ARTIQ package is installed on your machine with: :: + To access the examples, you can find where the ARTIQ package is installed on your machine with: :: python3 -c "import artiq; print(artiq.__path__[0])" From f496e6da7c1efbd85bc5c4993ec5abc00a36610f Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Sat, 1 Jun 2024 11:28:22 +0800 Subject: [PATCH 260/296] flake: update dependencies --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 14f9278fd..4433d4a2c 100644 --- a/flake.lock +++ b/flake.lock @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1716542732, - "narHash": "sha256-0Y9fRr0CUqWT4KgBITmaGwlnNIGMYuydu2L8iLTfHU4=", + "lastModified": 1717144377, + "narHash": "sha256-F/TKWETwB5RaR8owkPPi+SPJh83AQsm6KrQAlJ8v/uA=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "d12251ef6e8e6a46e05689eeccd595bdbd3c9e60", + "rev": "805a384895c696f802a9bf5bf4720f37385df547", "type": "github" }, "original": { From 793f8a3c8c5d2501d0aacbadb2bbc3a0e293e487 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 30 May 2024 11:30:37 +0800 Subject: [PATCH 261/296] legacy almazny: fix missing units, remove dead code --- artiq/coredevice/almazny.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/artiq/coredevice/almazny.py b/artiq/coredevice/almazny.py index 33aaf6b02..01c8f0428 100644 --- a/artiq/coredevice/almazny.py +++ b/artiq/coredevice/almazny.py @@ -1,4 +1,5 @@ from artiq.language.core import kernel, portable +from artiq.language.units import us from numpy import int32 @@ -117,11 +118,6 @@ class AlmaznyLegacy: ) delay(100 * us) - @kernel - def _update_all_registers(self): - for i in range(4): - self._update_register(i) - class AlmaznyChannel: """ From 20c67aca237db56ab772a8a2fe7239e3138c9c05 Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 30 May 2024 11:31:02 +0800 Subject: [PATCH 262/296] sinara_tester: break apart legacyalmazy, add almazny tests --- artiq/frontend/artiq_sinara_tester.py | 71 +++++++++++++++++++++------ 1 file changed, 55 insertions(+), 16 deletions(-) diff --git a/artiq/frontend/artiq_sinara_tester.py b/artiq/frontend/artiq_sinara_tester.py index ba6337cd0..d60a6fa58 100755 --- a/artiq/frontend/artiq_sinara_tester.py +++ b/artiq/frontend/artiq_sinara_tester.py @@ -61,6 +61,7 @@ class SinaraTester(EnvExperiment): self.mirnies = dict() self.suservos = dict() self.suschannels = dict() + self.legacy_almaznys = dict() self.almaznys = dict() self.shuttler = dict() @@ -101,6 +102,8 @@ class SinaraTester(EnvExperiment): elif (module, cls) == ("artiq.coredevice.suservo", "Channel"): self.suschannels[name] = self.get_device(name) elif (module, cls) == ("artiq.coredevice.almazny", "AlmaznyLegacy"): + self.legacy_almaznys[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.almazny", "AlmaznyChannel"): self.almaznys[name] = self.get_device(name) elif (module, cls) == ("artiq.coredevice.shuttler", "Config"): shuttler_name = name.replace("_config", "") @@ -370,36 +373,35 @@ class SinaraTester(EnvExperiment): channel.pulse(100*ms) delay(100*ms) @kernel - def init_almazny(self, almazny): + def init_legacy_almazny(self, almazny): self.core.break_realtime() almazny.init() almazny.output_toggle(True) @kernel - def almazny_set_attenuators_mu(self, almazny, ch, atts): + def legacy_almazny_set_attenuators_mu(self, almazny, ch, atts): self.core.break_realtime() almazny.set_att_mu(ch, atts) @kernel - def almazny_set_attenuators(self, almazny, ch, atts): + def legacy_almazny_set_attenuators(self, almazny, ch, atts): self.core.break_realtime() almazny.set_att(ch, atts) @kernel - def almazny_toggle_output(self, almazny, rf_on): + def legacy_almazny_toggle_output(self, almazny, rf_on): self.core.break_realtime() almazny.output_toggle(rf_on) - def test_almaznys(self): - print("*** Testing Almaznys.") - for name, almazny in sorted(self.almaznys.items(), key=lambda x: x[0]): + def test_legacy_almaznys(self): + print("*** Testing legacy Almaznys (v1.1 or older).") + for name, almazny in sorted(self.legacy_almaznys.items(), key=lambda x: x[0]): print(name + "...") print("Initializing Mirny CPLDs...") for name, cpld in sorted(self.mirny_cplds.items(), key=lambda x: x[0]): print(name + "...") self.init_mirny(cpld) print("...done") - print("Testing attenuators. Frequencies:") for card_n, channels in enumerate(chunker(self.mirnies, 4)): for channel_n, (channel_name, channel_dev) in enumerate(channels): @@ -407,31 +409,68 @@ class SinaraTester(EnvExperiment): print("{}\t{}MHz".format(channel_name, frequency*2)) self.setup_mirny(channel_dev, frequency) print("{} info: {}".format(channel_name, channel_dev.info())) - self.init_almazny(almazny) + self.init_legacy_almazny(almazny) print("RF ON, all attenuators ON. Press ENTER when done.") for i in range(4): - self.almazny_set_attenuators_mu(almazny, i, 63) + self.legacy_almazny_set_attenuators_mu(almazny, i, 63) input() print("RF ON, half power attenuators ON. Press ENTER when done.") for i in range(4): - self.almazny_set_attenuators(almazny, i, 15.5) + self.legacy_almazny_set_attenuators(almazny, i, 15.5) input() print("RF ON, all attenuators OFF. Press ENTER when done.") for i in range(4): - self.almazny_set_attenuators(almazny, i, 0) + self.legacy_almazny_set_attenuators(almazny, i, 0) input() print("SR outputs are OFF. Press ENTER when done.") - self.almazny_toggle_output(almazny, False) + self.legacy_almazny_toggle_output(almazny, False) input() print("RF ON, all attenuators are ON. Press ENTER when done.") for i in range(4): - self.almazny_set_attenuators(almazny, i, 31.5) - self.almazny_toggle_output(almazny, True) + self.legacy_almazny_set_attenuators(almazny, i, 31.5) + self.legacy_almazny_toggle_output(almazny, True) input() print("RF OFF. Press ENTER when done.") - self.almazny_toggle_output(almazny, False) + self.legacy_almazny_toggle_output(almazny, False) input() + def almazny_led_wave(self, almaznys): + while not is_enter_pressed(): + self.core.break_realtime() + # do not fill the FIFOs too much to avoid long response times + t = now_mu() - self.core.seconds_to_mu(0.2) + while self.core.get_rtio_counter_mu() < t: + pass + for almazny in almaznys: + almazny.set(31.5, False, True) + delay(100*ms) + almazny.set(31.5, False, False) + + def almazny_set_att_all(self, almaznys, att): + for almazny in almaznys: + almazny.set(att, False, True) + + def test_almaznys(self): + print("*** Testing Almaznys (v1.2+).") + print("Initializing Mirny CPLDs...") + for name, cpld in sorted(self.mirny_cplds.items(), key=lambda x: x[0]): + print(name + "...") + self.init_mirny(cpld) + print("...done") + print("Frequencies:") + for card_n, channels in enumerate(chunker(self.mirnies, 4)): + for channel_n, (channel_name, channel_dev) in enumerate(channels): + frequency = 2000 + card_n * 250 + channel_n * 50 + print("{}\t{}MHz".format(channel_name, frequency*2)) + self.setup_mirny(channel_dev, frequency) + print("RF On, half power attenuators ON. Press ENTER when done.") + self.almazny_set_att_all(self.almaznys, 15.5) + input() + print("RF On, all attenuators ON. Press ENTER when done.") + self.almazny_set_att_all(self.almaznys, 31.5) + print("RF Off, testing LEDs. Press ENTER when done.") + self.almazny_led_wave(self.almaznys) + def test_mirnies(self): print("*** Testing Mirny PLLs.") From f1e1e54940822d44bf09482e489d50f6b69f1e5d Mon Sep 17 00:00:00 2001 From: mwojcik Date: Thu, 30 May 2024 14:40:07 +0800 Subject: [PATCH 263/296] sinara_tester: simplify almazny tests --- artiq/frontend/artiq_sinara_tester.py | 78 +++++++++++++++------------ 1 file changed, 43 insertions(+), 35 deletions(-) diff --git a/artiq/frontend/artiq_sinara_tester.py b/artiq/frontend/artiq_sinara_tester.py index d60a6fa58..f273a3d4b 100755 --- a/artiq/frontend/artiq_sinara_tester.py +++ b/artiq/frontend/artiq_sinara_tester.py @@ -384,9 +384,21 @@ class SinaraTester(EnvExperiment): almazny.set_att_mu(ch, atts) @kernel - def legacy_almazny_set_attenuators(self, almazny, ch, atts): - self.core.break_realtime() - almazny.set_att(ch, atts) + def legacy_almazny_att_test(self, almazny): + # change attenuation bit by bit over time for all channels + att_mu = 0 + while not is_enter_pressed(): + self.core.break_realtime() + t = now_mu() - self.core.seconds_to_mu(0.5) + while self.core.get_rtio_counter_mu() < t: + pass + for ch in range(4): + almazny.set_att_mu(ch, att_mu) + delay(250*ms) + if att_mu == 0: + att_mu = 1 + else: + att_mu = (att_mu << 1) & 0x3F @kernel def legacy_almazny_toggle_output(self, almazny, rf_on): @@ -408,32 +420,16 @@ class SinaraTester(EnvExperiment): frequency = 2000 + card_n * 250 + channel_n * 50 print("{}\t{}MHz".format(channel_name, frequency*2)) self.setup_mirny(channel_dev, frequency) - print("{} info: {}".format(channel_name, channel_dev.info())) self.init_legacy_almazny(almazny) - print("RF ON, all attenuators ON. Press ENTER when done.") - for i in range(4): - self.legacy_almazny_set_attenuators_mu(almazny, i, 63) - input() - print("RF ON, half power attenuators ON. Press ENTER when done.") - for i in range(4): - self.legacy_almazny_set_attenuators(almazny, i, 15.5) - input() - print("RF ON, all attenuators OFF. Press ENTER when done.") - for i in range(4): - self.legacy_almazny_set_attenuators(almazny, i, 0) - input() print("SR outputs are OFF. Press ENTER when done.") self.legacy_almazny_toggle_output(almazny, False) input() - print("RF ON, all attenuators are ON. Press ENTER when done.") - for i in range(4): - self.legacy_almazny_set_attenuators(almazny, i, 31.5) + print("RF ON, attenuators are tested. Press ENTER when done.") self.legacy_almazny_toggle_output(almazny, True) - input() - print("RF OFF. Press ENTER when done.") + self.legacy_almazny_att_test(almazny) self.legacy_almazny_toggle_output(almazny, False) - input() + @kernel def almazny_led_wave(self, almaznys): while not is_enter_pressed(): self.core.break_realtime() @@ -441,14 +437,29 @@ class SinaraTester(EnvExperiment): t = now_mu() - self.core.seconds_to_mu(0.2) while self.core.get_rtio_counter_mu() < t: pass - for almazny in almaznys: - almazny.set(31.5, False, True) + for ch in almaznys: + ch.set(31.5, False, True) delay(100*ms) - almazny.set(31.5, False, False) + ch.set(31.5, False, False) - def almazny_set_att_all(self, almaznys, att): - for almazny in almaznys: - almazny.set(att, False, True) + @kernel + def almazny_att_test(self, almaznys): + rf_en = 1 + led = 1 + att_mu = 0 + while not is_enter_pressed(): + self.core.break_realtime() + t = now_mu() - self.core.seconds_to_mu(0.2) + while self.core.get_rtio_counter_mu() < t: + pass + setting = led << 7 | rf_en << 6 | (att_mu & 0x3F) + for ch in almaznys: + ch.set_mu(setting) + delay(250*ms) + if att_mu == 0: + att_mu = 1 + else: + att_mu = (att_mu << 1) & 0x3F def test_almaznys(self): print("*** Testing Almaznys (v1.2+).") @@ -463,13 +474,10 @@ class SinaraTester(EnvExperiment): frequency = 2000 + card_n * 250 + channel_n * 50 print("{}\t{}MHz".format(channel_name, frequency*2)) self.setup_mirny(channel_dev, frequency) - print("RF On, half power attenuators ON. Press ENTER when done.") - self.almazny_set_att_all(self.almaznys, 15.5) - input() - print("RF On, all attenuators ON. Press ENTER when done.") - self.almazny_set_att_all(self.almaznys, 31.5) - print("RF Off, testing LEDs. Press ENTER when done.") - self.almazny_led_wave(self.almaznys) + print("RF ON, attenuators are tested. Press ENTER when done.") + self.almazny_att_test([ch for _, ch in self.almaznys.items()]) + print("RF OFF, testing LEDs. Press ENTER when done.") + self.almazny_led_wave([ch for _, ch in self.almaznys.items()]) def test_mirnies(self): print("*** Testing Mirny PLLs.") From d252b12cf6f635a562998c4593f6cde82ec5256c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Mon, 3 Jun 2024 16:16:32 +0800 Subject: [PATCH 264/296] README: remove outdated and unmaintainable Sinara crate count --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index c6014c8d4..01a2f914b 100644 --- a/README.rst +++ b/README.rst @@ -5,7 +5,7 @@ :target: https://m-labs.hk/artiq ARTIQ (Advanced Real-Time Infrastructure for Quantum physics) is a leading-edge control and data acquisition system for quantum information experiments. -It is maintained and developed by `M-Labs `_ and the initial development was for and in partnership with the `Ion Storage Group at NIST `_. ARTIQ is free software and offered to the entire research community as a solution equally applicable to other challenging control tasks, including outside the field of ion trapping. Many laboratories around the world have adopted ARTIQ as their control system, with over a hundred Sinara hardware crates deployed, and some have `contributed `_ to it. +It is maintained and developed by `M-Labs `_ and the initial development was for and in partnership with the `Ion Storage Group at NIST `_. ARTIQ is free software and offered to the entire research community as a solution equally applicable to other challenging control tasks, including outside the field of ion trapping. Many laboratories around the world have adopted ARTIQ as their control system and some have `contributed `_ to it. The system features a high-level programming language that helps describing complex experiments, which is compiled and executed on dedicated hardware with nanosecond timing resolution and sub-microsecond latency. It includes graphical user interfaces to parametrize and schedule experiments and to visualize and explore the results. From 9a770c15c57158657322c05a4c5270f068433351 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bourdeauducq?= Date: Wed, 5 Jun 2024 16:51:23 +0800 Subject: [PATCH 265/296] RELEASE_NOTES: update --- RELEASE_NOTES.rst | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index d0edd2a60..1a93d91c7 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -28,12 +28,16 @@ Highlights: clock, to facilitate implementation of local processing on DRTIO satellites, and to slightly reduce RTIO latency. * Support for DRTIO-over-EEM, used with Shuttler. +* Support for WRPLL low-noise clock recovery. * Enabled event spreading on DRTIO satellites, using high watermark for lane switching. * Added channel names to RTIO error messages. +* The RTIO analyzer is now proxied by ``aqctl_coreanalyzer_proxy`` typically running on the master + machine, similarly to ``aqctl_moninj_proxy``. * GUI: + - Integrated waveform analyzer, removing the need for external VCD viewers such as GtkWave. - Implemented Applet Request Interfaces which allow applets to modify datasets and set the current values of widgets in the dashboard's experiment windows. - - Implemented a new EntryArea widget which allows argument entry widgets to be used in applets. + - Implemented a new ``EntryArea`` widget which allows argument entry widgets to be used in applets. - The "Close all applets" command (shortcut: Ctrl-Alt-W) now ignores docked applets, making it a convenient way to clean up after exploratory work without destroying a carefully arranged default workspace. @@ -48,11 +52,14 @@ Highlights: * Persistent datasets are now stored in a LMDB database for improved performance. * Python's built-in types (such as ``float``, or ``List[...]``) can now be used in type annotations on kernel functions. -* Full Python 3.11 support. * MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to support legacy installations, but may be removed in a future release. * Experiments can now be submitted with revisions set to a branch / tag name instead of only git hashes. * Grabber image input now has an optional timeout. +* On NAR3-supported devices (Kasli-SoC, ZC706), when a Rust panic occurs, a minimal environment is started + where the network and ``artiq_coremgmt`` can be used. This allows the user to inspect logs, change + configuration options, update the firmware, and reboot the device. +* Full Python 3.11 support. Breaking changes: From e742dc95034628673782bf373475d4c54a121727 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Mon, 15 Apr 2024 16:04:22 +0800 Subject: [PATCH 266/296] moninj: refactor _DACWidget --- artiq/dashboard/moninj.py | 59 ++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 35 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index 6d5874858..b38ffde55 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -146,39 +146,6 @@ class _TTLWidget(QtWidgets.QFrame): return self.channel -class _SimpleDisplayWidget(QtWidgets.QFrame): - def __init__(self, title): - QtWidgets.QFrame.__init__(self) - - self.setFrameShape(QtWidgets.QFrame.Box) - self.setFrameShadow(QtWidgets.QFrame.Raised) - - grid = QtWidgets.QGridLayout() - grid.setContentsMargins(0, 0, 0, 0) - grid.setHorizontalSpacing(0) - grid.setVerticalSpacing(0) - self.setLayout(grid) - label = QtWidgets.QLabel(title) - label.setAlignment(QtCore.Qt.AlignCenter) - grid.addWidget(label, 1, 1) - - self.value = QtWidgets.QLabel() - self.value.setAlignment(QtCore.Qt.AlignCenter) - grid.addWidget(self.value, 2, 1, 6, 1) - - grid.setRowStretch(1, 1) - grid.setRowStretch(2, 0) - grid.setRowStretch(3, 1) - - self.refresh_display() - - def refresh_display(self): - raise NotImplementedError - - def sort_key(self): - raise NotImplementedError - - class _DDSModel: def __init__(self, dds_type, ref_clk, cpld=None, pll=1, clk_div=0): self.cpld = cpld @@ -349,12 +316,34 @@ class _DDSWidget(QtWidgets.QFrame): return (self.bus_channel, self.channel) -class _DACWidget(_SimpleDisplayWidget): +class _DACWidget(QtWidgets.QFrame): def __init__(self, dm, spi_channel, channel, title): + QtWidgets.QFrame.__init__(self) self.spi_channel = spi_channel self.channel = channel self.cur_value = 0 - _SimpleDisplayWidget.__init__(self, "{} ch{}".format(title, channel)) + + self.setFrameShape(QtWidgets.QFrame.Box) + self.setFrameShadow(QtWidgets.QFrame.Raised) + + grid = QtWidgets.QGridLayout() + grid.setContentsMargins(0, 0, 0, 0) + grid.setHorizontalSpacing(0) + grid.setVerticalSpacing(0) + self.setLayout(grid) + label = QtWidgets.QLabel("{} ch{}".format(title, channel)) + label.setAlignment(QtCore.Qt.AlignCenter) + grid.addWidget(label, 1, 1) + + self.value = QtWidgets.QLabel() + self.value.setAlignment(QtCore.Qt.AlignCenter) + grid.addWidget(self.value, 2, 1, 6, 1) + + grid.setRowStretch(1, 1) + grid.setRowStretch(2, 0) + grid.setRowStretch(3, 1) + + self.refresh_display() def refresh_display(self): self.value.setText("{:.3f} %" From 5cd21c7a6d0f22a2bfda190794cae437666a454a Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 14:06:39 +0800 Subject: [PATCH 267/296] moninj: add uid method to widgets --- artiq/dashboard/moninj.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index b38ffde55..27386ca1c 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -32,6 +32,7 @@ class _TTLWidget(QtWidgets.QFrame): self.channel = channel self.set_mode = dm.ttl_set_mode self.force_out = force_out + self.title = title self.setFrameShape(QtWidgets.QFrame.Box) self.setFrameShadow(QtWidgets.QFrame.Raised) @@ -145,6 +146,9 @@ class _TTLWidget(QtWidgets.QFrame): def sort_key(self): return self.channel + def uid(self): + return self.title + class _DDSModel: def __init__(self, dds_type, ref_clk, cpld=None, pll=1, clk_div=0): @@ -186,6 +190,7 @@ class _DDSWidget(QtWidgets.QFrame): self.dds_name = title self.cur_frequency = 0 self.dds_model = _DDSModel(dds_type, ref_clk, cpld, pll, clk_div) + self.title = title QtWidgets.QFrame.__init__(self) @@ -315,6 +320,9 @@ class _DDSWidget(QtWidgets.QFrame): def sort_key(self): return (self.bus_channel, self.channel) + def uid(self): + return self.title + class _DACWidget(QtWidgets.QFrame): def __init__(self, dm, spi_channel, channel, title): @@ -322,6 +330,7 @@ class _DACWidget(QtWidgets.QFrame): self.spi_channel = spi_channel self.channel = channel self.cur_value = 0 + self.title = title self.setFrameShape(QtWidgets.QFrame.Box) self.setFrameShadow(QtWidgets.QFrame.Raised) @@ -352,6 +361,9 @@ class _DACWidget(QtWidgets.QFrame): def sort_key(self): return (self.spi_channel, self.channel) + def uid(self): + return (self.title, self.channel) + _WidgetDesc = namedtuple("_WidgetDesc", "uid comment cls arguments") From 51837ce1a28decc7fb0f4ec2b44b0d74edc72a87 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 14:10:47 +0800 Subject: [PATCH 268/296] moninj: consistent sort keys --- artiq/dashboard/moninj.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index 27386ca1c..37e9727c0 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -144,7 +144,7 @@ class _TTLWidget(QtWidgets.QFrame): self.programmatic_change = False def sort_key(self): - return self.channel + return (0, self.channel, 0) def uid(self): return self.title @@ -318,7 +318,7 @@ class _DDSWidget(QtWidgets.QFrame): self.value_edit.setText("{:.7f}".format(self.cur_frequency / 1e6)) def sort_key(self): - return (self.bus_channel, self.channel) + return (1, self.bus_channel, self.channel) def uid(self): return self.title @@ -359,7 +359,7 @@ class _DACWidget(QtWidgets.QFrame): .format(self.cur_value * 100 / 2**16)) def sort_key(self): - return (self.spi_channel, self.channel) + return (2, self.spi_channel, self.channel) def uid(self): return (self.title, self.channel) From 2fde21152a374e0f9375c5c2dd90226639bfa622 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 14:20:36 +0800 Subject: [PATCH 269/296] moninj: merge docks --- artiq/dashboard/moninj.py | 21 ++++++--------------- artiq/frontend/artiq_dashboard.py | 7 +++---- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index 37e9727c0..eba6024e1 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -442,12 +442,10 @@ class _DeviceManager: self.widgets_by_uid = dict() self.dds_sysclk = 0 - self.ttl_cb = lambda: None self.ttl_widgets = dict() - self.dds_cb = lambda: None self.dds_widgets = dict() - self.dac_cb = lambda: None self.dac_widgets = dict() + self.channels_cb = lambda: None def init_ddb(self, ddb): self.ddb = ddb @@ -468,17 +466,14 @@ class _DeviceManager: self.setup_ttl_monitoring(False, widget.channel) widget.deleteLater() del self.ttl_widgets[widget.channel] - self.ttl_cb() elif isinstance(widget, _DDSWidget): self.setup_dds_monitoring(False, widget.bus_channel, widget.channel) widget.deleteLater() del self.dds_widgets[(widget.bus_channel, widget.channel)] - self.dds_cb() elif isinstance(widget, _DACWidget): self.setup_dac_monitoring(False, widget.spi_channel, widget.channel) widget.deleteLater() del self.dac_widgets[(widget.spi_channel, widget.channel)] - self.dac_cb() else: raise ValueError @@ -490,19 +485,19 @@ class _DeviceManager: if isinstance(widget, _TTLWidget): self.ttl_widgets[widget.channel] = widget - self.ttl_cb() self.setup_ttl_monitoring(True, widget.channel) elif isinstance(widget, _DDSWidget): self.dds_widgets[(widget.bus_channel, widget.channel)] = widget - self.dds_cb() self.setup_dds_monitoring(True, widget.bus_channel, widget.channel) elif isinstance(widget, _DACWidget): self.dac_widgets[(widget.spi_channel, widget.channel)] = widget - self.dac_cb() self.setup_dac_monitoring(True, widget.spi_channel, widget.channel) else: raise ValueError + if description != self.description: + self.channels_cb() + self.description = description def ttl_set_mode(self, channel, mode): @@ -764,14 +759,10 @@ class _MonInjDock(QtWidgets.QDockWidget): class MonInj: def __init__(self, schedule_ctl): - self.ttl_dock = _MonInjDock("TTL") - self.dds_dock = _MonInjDock("DDS") - self.dac_dock = _MonInjDock("DAC") + self.dock = _MonInjDock("MonInj") self.dm = _DeviceManager(schedule_ctl) - self.dm.ttl_cb = lambda: self.ttl_dock.layout_widgets(self.dm.ttl_widgets.values()) - self.dm.dds_cb = lambda: self.dds_dock.layout_widgets(self.dm.dds_widgets.values()) - self.dm.dac_cb = lambda: self.dac_dock.layout_widgets(self.dm.dac_widgets.values()) + self.dm.channels_cb = lambda: self.dock.layout_widgets(self.dm.widgets_by_uid.values()) async def stop(self): if self.dm is not None: diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 634f5e29d..1e95def0b 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -261,8 +261,8 @@ def main(): # lay out docks right_docks = [ d_explorer, d_shortcuts, - d_ttl_dds.ttl_dock, d_ttl_dds.dds_dock, d_ttl_dds.dac_dock, - d_datasets, d_applets, d_waveform, d_interactive_args + d_ttl_dds.dock, d_datasets, d_applets, + d_waveform, d_interactive_args ] main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, right_docks[0]) for d1, d2 in zip(right_docks, right_docks[1:]): @@ -280,8 +280,7 @@ def main(): atexit_register_coroutine(smgr.stop, loop=loop) # work around for https://github.com/m-labs/artiq/issues/1307 - d_ttl_dds.ttl_dock.show() - d_ttl_dds.dds_dock.show() + d_ttl_dds.dock.show() # create first log dock if not already in state d_log0 = logmgr.first_log_dock() From 9fc4cdea6bb4ceb0114816b5c6a98cd8f2148597 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 14:43:42 +0800 Subject: [PATCH 270/296] moninj: dock add layoutwidget --- artiq/dashboard/moninj.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index eba6024e1..b7cf182cd 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -741,21 +741,21 @@ class _MonInjDock(QtWidgets.QDockWidget): self.setObjectName(name) self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable | QtWidgets.QDockWidget.DockWidgetFloatable) + grid = LayoutWidget() + self.setWidget(grid) - def layout_widgets(self, widgets): scroll_area = QtWidgets.QScrollArea() - self.setWidget(scroll_area) - - grid = FlowLayout() + grid.addWidget(scroll_area, 0, 0) + self.flow = FlowLayout() grid_widget = QtWidgets.QWidget() - grid_widget.setLayout(grid) - - for widget in sorted(widgets, key=lambda w: w.sort_key()): - grid.addWidget(widget) - + grid_widget.setLayout(self.flow) scroll_area.setWidgetResizable(True) scroll_area.setWidget(grid_widget) + def layout_widgets(self, widgets): + for widget in sorted(widgets, key=lambda w: w.sort_key()): + self.flow.addWidget(widget) + class MonInj: def __init__(self, schedule_ctl): From ad170b469ca6cca5f7254ba799ddc862a54b7855 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 14:35:59 +0800 Subject: [PATCH 271/296] moninj: multiple docks --- artiq/dashboard/moninj.py | 63 ++++++++++++++++++++++++++----- artiq/frontend/artiq_dashboard.py | 10 ++--- 2 files changed, 59 insertions(+), 14 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index b7cf182cd..a3c86adc1 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -2,12 +2,13 @@ import asyncio import logging import textwrap from collections import namedtuple +from functools import partial from PyQt5 import QtCore, QtWidgets from artiq.coredevice.comm_moninj import CommMonInj, TTLOverride, TTLProbe from artiq.coredevice.ad9912_reg import AD9912_SER_CONF -from artiq.gui.tools import LayoutWidget +from artiq.gui.tools import LayoutWidget, QDockWidgetCloseDetect from artiq.gui.flowlayout import FlowLayout @@ -735,17 +736,24 @@ class _DeviceManager: await self.mi_connection.close() -class _MonInjDock(QtWidgets.QDockWidget): - def __init__(self, name): - QtWidgets.QDockWidget.__init__(self, name) +class _MonInjDock(QDockWidgetCloseDetect): + def __init__(self, name, manager): + QtWidgets.QDockWidget.__init__(self, "MonInj") self.setObjectName(name) self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable | QtWidgets.QDockWidget.DockWidgetFloatable) grid = LayoutWidget() self.setWidget(grid) + newdock = QtWidgets.QToolButton() + newdock.setToolTip("Create new moninj dock") + newdock.setIcon(QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_FileDialogNewFolder)) + newdock.clicked.connect(lambda: manager.create_new_dock()) + grid.addWidget(newdock, 0, 0) + scroll_area = QtWidgets.QScrollArea() - grid.addWidget(scroll_area, 0, 0) + grid.addWidget(scroll_area, 1, 0, 1, 10) self.flow = FlowLayout() grid_widget = QtWidgets.QWidget() grid_widget.setLayout(self.flow) @@ -758,11 +766,48 @@ class _MonInjDock(QtWidgets.QDockWidget): class MonInj: - def __init__(self, schedule_ctl): - self.dock = _MonInjDock("MonInj") - + def __init__(self, schedule_ctl, main_window): + self.docks = dict() + self.main_window = main_window self.dm = _DeviceManager(schedule_ctl) - self.dm.channels_cb = lambda: self.dock.layout_widgets(self.dm.widgets_by_uid.values()) + self.dm.channels_cb = \ + lambda: self.docks["moninj0"].layout_widgets(self.dm.widgets_by_uid.values()) + + def create_new_dock(self, add_to_area=True): + n = 0 + name = "moninj0" + while name in self.docks: + n += 1 + name = "moninj" + str(n) + + dock = _MonInjDock(name, self) + self.docks[name] = dock + if add_to_area: + self.main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, dock) + dock.setFloating(True) + dock.sigClosed.connect(partial(self.on_dock_closed, name)) + self.update_closable() + return dock + + def on_dock_closed(self, name): + dock = self.docks[name] + dock.deleteLater() + del self.docks[name] + self.update_closable() + + def update_closable(self): + flags = (QtWidgets.QDockWidget.DockWidgetMovable | + QtWidgets.QDockWidget.DockWidgetFloatable) + if len(self.docks) > 1: + flags |= QtWidgets.QDockWidget.DockWidgetClosable + for dock in self.docks.values(): + dock.setFeatures(flags) + + def first_moninj_dock(self): + if self.docks: + return None + dock = self.create_new_dock(False) + return dock async def stop(self): if self.dm is not None: diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 1e95def0b..45996a537 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -226,7 +226,7 @@ def main(): smgr.register(d_applets) broadcast_clients["ccb"].notify_cbs.append(d_applets.ccb_notify) - d_ttl_dds = moninj.MonInj(rpc_clients["schedule"]) + d_ttl_dds = moninj.MonInj(rpc_clients["schedule"], main_window) atexit_register_coroutine(d_ttl_dds.stop, loop=loop) d_waveform = waveform.WaveformDock( @@ -261,7 +261,7 @@ def main(): # lay out docks right_docks = [ d_explorer, d_shortcuts, - d_ttl_dds.dock, d_datasets, d_applets, + d_datasets, d_applets, d_waveform, d_interactive_args ] main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, right_docks[0]) @@ -279,13 +279,13 @@ def main(): smgr.start(loop=loop) atexit_register_coroutine(smgr.stop, loop=loop) - # work around for https://github.com/m-labs/artiq/issues/1307 - d_ttl_dds.dock.show() - # create first log dock if not already in state d_log0 = logmgr.first_log_dock() if d_log0 is not None: main_window.tabifyDockWidget(d_schedule, d_log0) + d_moninj0 = d_ttl_dds.first_moninj_dock() + if d_moninj0 is not None: + main_window.tabifyDockWidget(right_docks[-1], d_moninj0) if server_name is not None: server_description = server_name + " ({})".format(args.server) From 154f186f180f52accba3592bbfd0f2fdf99775d3 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 14:56:49 +0800 Subject: [PATCH 272/296] moninj: add _AddChannelDialog --- artiq/dashboard/moninj.py | 86 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 2 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index a3c86adc1..1efacb845 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -10,6 +10,7 @@ from artiq.coredevice.comm_moninj import CommMonInj, TTLOverride, TTLProbe from artiq.coredevice.ad9912_reg import AD9912_SER_CONF from artiq.gui.tools import LayoutWidget, QDockWidgetCloseDetect from artiq.gui.flowlayout import FlowLayout +from artiq.gui.models import DictSyncTreeSepModel logger = logging.getLogger(__name__) @@ -150,6 +151,9 @@ class _TTLWidget(QtWidgets.QFrame): def uid(self): return self.title + def to_model_path(self): + return "ttl/{}".format(self.title) + class _DDSModel: def __init__(self, dds_type, ref_clk, cpld=None, pll=1, clk_div=0): @@ -324,6 +328,9 @@ class _DDSWidget(QtWidgets.QFrame): def uid(self): return self.title + def to_model_path(self): + return "dds/{}".format(self.title) + class _DACWidget(QtWidgets.QFrame): def __init__(self, dm, spi_channel, channel, title): @@ -365,6 +372,9 @@ class _DACWidget(QtWidgets.QFrame): def uid(self): return (self.title, self.channel) + def to_model_path(self): + return "dac/{} ch{}".format(self.title, self.channel) + _WidgetDesc = namedtuple("_WidgetDesc", "uid comment cls arguments") @@ -736,6 +746,58 @@ class _DeviceManager: await self.mi_connection.close() +class Model(DictSyncTreeSepModel): + def __init__(self, init): + DictSyncTreeSepModel.__init__(self, "/", ["Channels"], init) + + def clear(self): + for k in self.backing_store: + self._del_item(self, k.split(self.separator)) + self.backing_store.clear() + + def update(self, d): + for k, v in d.items(): + self[v.to_model_path()] = v + + +class _AddChannelDialog(QtWidgets.QDialog): + def __init__(self, parent, model): + QtWidgets.QDialog.__init__(self, parent=parent) + self.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu) + self.setWindowTitle("Add channels") + + layout = QtWidgets.QVBoxLayout() + self.setLayout(layout) + + self._model = model + self._tree_view = QtWidgets.QTreeView() + self._tree_view.setHeaderHidden(True) + self._tree_view.setSelectionBehavior( + QtWidgets.QAbstractItemView.SelectItems) + self._tree_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + self._tree_view.setModel(self._model) + layout.addWidget(self._tree_view) + + self._button_box = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel + ) + self._button_box.setCenterButtons(True) + self._button_box.accepted.connect(self.add_channels) + self._button_box.rejected.connect(self.reject) + layout.addWidget(self._button_box) + + def add_channels(self): + selection = self._tree_view.selectedIndexes() + channels = [] + for select in selection: + key = self._model.index_to_key(select) + if key is not None: + channels.append(self._model[key].ref) + self.channels = channels + self.accept() + + class _MonInjDock(QDockWidgetCloseDetect): def __init__(self, name, manager): QtWidgets.QDockWidget.__init__(self, "MonInj") @@ -752,6 +814,17 @@ class _MonInjDock(QDockWidgetCloseDetect): newdock.clicked.connect(lambda: manager.create_new_dock()) grid.addWidget(newdock, 0, 0) + self.channel_dialog = _AddChannelDialog(self, manager.channel_model) + self.channel_dialog.accepted.connect(self.add_channels) + + dialog_btn = QtWidgets.QToolButton() + dialog_btn.setToolTip("Add channels") + dialog_btn.setIcon( + QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_FileDialogListView)) + dialog_btn.clicked.connect(self.channel_dialog.open) + grid.addWidget(dialog_btn, 0, 1) + scroll_area = QtWidgets.QScrollArea() grid.addWidget(scroll_area, 1, 0, 1, 10) self.flow = FlowLayout() @@ -760,8 +833,13 @@ class _MonInjDock(QDockWidgetCloseDetect): scroll_area.setWidgetResizable(True) scroll_area.setWidget(grid_widget) + def add_channels(self): + channels = self.channel_dialog.channels + self.layout_widgets(channels) + def layout_widgets(self, widgets): for widget in sorted(widgets, key=lambda w: w.sort_key()): + widget.show() self.flow.addWidget(widget) @@ -770,8 +848,12 @@ class MonInj: self.docks = dict() self.main_window = main_window self.dm = _DeviceManager(schedule_ctl) - self.dm.channels_cb = \ - lambda: self.docks["moninj0"].layout_widgets(self.dm.widgets_by_uid.values()) + self.dm.channels_cb = self.add_channels + self.channel_model = Model({}) + + def add_channels(self): + self.channel_model.clear() + self.channel_model.update(self.dm.widgets_by_uid) def create_new_dock(self, add_to_area=True): n = 0 From d3d50d790af54ad876a47497c16b60c88138b793 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 15:22:38 +0800 Subject: [PATCH 273/296] gui.tools: add DoubleClickLineEdit --- artiq/gui/tools.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/artiq/gui/tools.py b/artiq/gui/tools.py index 2171ab915..957823780 100644 --- a/artiq/gui/tools.py +++ b/artiq/gui/tools.py @@ -4,6 +4,40 @@ import logging from PyQt5 import QtCore, QtWidgets +class DoubleClickLineEdit(QtWidgets.QLineEdit): + finished = QtCore.pyqtSignal() + + def __init__(self, init): + QtWidgets.QLineEdit.__init__(self, init) + self.setFrame(False) + self.setReadOnly(True) + self.returnPressed.connect(self._return_pressed) + self.editingFinished.connect(self._editing_finished) + self._text = init + + def mouseDoubleClickEvent(self, event): + if self.isReadOnly(): + self.setReadOnly(False) + self.setFrame(True) + QtWidgets.QLineEdit.mouseDoubleClickEvent(self, event) + + def _return_pressed(self): + self._text = self.text() + + def _editing_finished(self): + self.setReadOnly(True) + self.setFrame(False) + self.setText(self._text) + self.finished.emit() + + def keyPressEvent(self, event): + key = event.key() + if key == QtCore.Qt.Key_Escape and not self.isReadOnly(): + self.editingFinished.emit() + else: + QtWidgets.QLineEdit.keyPressEvent(self, event) + + def log_level_to_name(level): if level >= logging.CRITICAL: return "CRITICAL" From 7fae395b883dba4b88493fc0cc092a5a9185c75d Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 15:24:36 +0800 Subject: [PATCH 274/296] moninj: add dock label --- artiq/dashboard/moninj.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index 1efacb845..10276d30d 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -8,7 +8,7 @@ from PyQt5 import QtCore, QtWidgets from artiq.coredevice.comm_moninj import CommMonInj, TTLOverride, TTLProbe from artiq.coredevice.ad9912_reg import AD9912_SER_CONF -from artiq.gui.tools import LayoutWidget, QDockWidgetCloseDetect +from artiq.gui.tools import LayoutWidget, QDockWidgetCloseDetect, DoubleClickLineEdit from artiq.gui.flowlayout import FlowLayout from artiq.gui.models import DictSyncTreeSepModel @@ -825,6 +825,10 @@ class _MonInjDock(QDockWidgetCloseDetect): dialog_btn.clicked.connect(self.channel_dialog.open) grid.addWidget(dialog_btn, 0, 1) + self.label = DoubleClickLineEdit(name) + self.label.setStyleSheet("background:transparent;") + grid.addWidget(self.label, 0, 2) + scroll_area = QtWidgets.QScrollArea() grid.addWidget(scroll_area, 1, 0, 1, 10) self.flow = FlowLayout() From 88903fb38c41f6464aa28dee5665a6da430701e7 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 15:38:37 +0800 Subject: [PATCH 275/296] artiq_dashboard: connect Devices sub after loading state --- artiq/frontend/artiq_dashboard.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 45996a537..6544a7f08 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -236,14 +236,6 @@ def main(): ) atexit_register_coroutine(d_waveform.stop, loop=loop) - def init_cbs(ddb): - d_ttl_dds.dm.init_ddb(ddb) - d_waveform.init_ddb(ddb) - return ddb - devices_sub = Subscriber("devices", init_cbs, [d_ttl_dds.dm.notify_ddb, d_waveform.notify_ddb]) - loop.run_until_complete(devices_sub.connect(args.server, args.port_notify)) - atexit_register_coroutine(devices_sub.close, loop=loop) - d_interactive_args = interactive_args.InteractiveArgsDock( sub_clients["interactive_args"], rpc_clients["interactive_arg_db"] @@ -276,6 +268,15 @@ def main(): # QDockWidgets fail to be embedded. main_window.show() smgr.load() + + def init_cbs(ddb): + d_ttl_dds.dm.init_ddb(ddb) + d_waveform.init_ddb(ddb) + return ddb + devices_sub = Subscriber("devices", init_cbs, [d_ttl_dds.dm.notify_ddb, d_waveform.notify_ddb]) + loop.run_until_complete(devices_sub.connect(args.server, args.port_notify)) + atexit_register_coroutine(devices_sub.close, loop=loop) + smgr.start(loop=loop) atexit_register_coroutine(smgr.stop, loop=loop) From 33d3688bfc711656521270c7ee4ce1cf6cb84230 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 15:43:28 +0800 Subject: [PATCH 276/296] moninj: state management --- artiq/dashboard/moninj.py | 61 +++++++++++++++++++++++++++++-- artiq/frontend/artiq_dashboard.py | 1 + 2 files changed, 59 insertions(+), 3 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index 10276d30d..e77e9d374 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -806,15 +806,17 @@ class _MonInjDock(QDockWidgetCloseDetect): QtWidgets.QDockWidget.DockWidgetFloatable) grid = LayoutWidget() self.setWidget(grid) + self.manager = manager + self.widget_uids = None newdock = QtWidgets.QToolButton() newdock.setToolTip("Create new moninj dock") newdock.setIcon(QtWidgets.QApplication.style().standardIcon( QtWidgets.QStyle.SP_FileDialogNewFolder)) - newdock.clicked.connect(lambda: manager.create_new_dock()) + newdock.clicked.connect(lambda: self.manager.create_new_dock()) grid.addWidget(newdock, 0, 0) - self.channel_dialog = _AddChannelDialog(self, manager.channel_model) + self.channel_dialog = _AddChannelDialog(self, self.manager.channel_model) self.channel_dialog.accepted.connect(self.add_channels) dialog_btn = QtWidgets.QToolButton() @@ -846,6 +848,43 @@ class _MonInjDock(QDockWidgetCloseDetect): widget.show() self.flow.addWidget(widget) + def restore_widgets(self): + if self.widget_uids is not None: + widgets_by_uid = self.manager.dm.widgets_by_uid + widgets = list() + for uid in self.widget_uids: + if uid in widgets_by_uid: + widgets.append(widgets_by_uid[uid]) + else: + logger.warning("removing moninj widget {}".format(uid)) + self.layout_widgets(widgets) + self.widget_uids = None + + def _save_widget_uids(self): + uids = [] + for i in range(self.flow.count()): + uids.append(self.flow.itemAt(i).widget().uid()) + return uids + + def save_state(self): + return { + "dock_label": self.label.text(), + "widget_uids": self._save_widget_uids() + } + + def restore_state(self, state): + try: + label = state["dock_label"] + except KeyError: + pass + else: + self.label._text = label + self.label.setText(label) + try: + self.widget_uids = state["widget_uids"] + except KeyError: + pass + class MonInj: def __init__(self, schedule_ctl, main_window): @@ -858,6 +897,8 @@ class MonInj: def add_channels(self): self.channel_model.clear() self.channel_model.update(self.dm.widgets_by_uid) + for dock in self.docks.values(): + dock.restore_widgets() def create_new_dock(self, add_to_area=True): n = 0 @@ -877,7 +918,7 @@ class MonInj: def on_dock_closed(self, name): dock = self.docks[name] - dock.deleteLater() + dock.hide() # dock may be parent, only delete on exit del self.docks[name] self.update_closable() @@ -895,6 +936,20 @@ class MonInj: dock = self.create_new_dock(False) return dock + def save_state(self): + return {name: dock.save_state() for name, dock in self.docks.items()} + + def restore_state(self, state): + if self.docks: + raise NotImplementedError + for name, dock_state in state.items(): + dock = _MonInjDock(name, self) + self.docks[name] = dock + dock.restore_state(dock_state) + self.main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, dock) + dock.sigClosed.connect(partial(self.on_dock_closed, name)) + self.update_closable() + async def stop(self): if self.dm is not None: await self.dm.close() diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 6544a7f08..1d0d35284 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -227,6 +227,7 @@ def main(): broadcast_clients["ccb"].notify_cbs.append(d_applets.ccb_notify) d_ttl_dds = moninj.MonInj(rpc_clients["schedule"], main_window) + smgr.register(d_ttl_dds) atexit_register_coroutine(d_ttl_dds.stop, loop=loop) d_waveform = waveform.WaveformDock( From c256d113de2f9d4c37c281e25124cf4df248e5dd Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 15:58:54 +0800 Subject: [PATCH 277/296] dndwidgets: add DragDropFlowLayoutWidget --- artiq/gui/dndwidgets.py | 66 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/artiq/gui/dndwidgets.py b/artiq/gui/dndwidgets.py index 03e8a5e63..1f7b3fc07 100644 --- a/artiq/gui/dndwidgets.py +++ b/artiq/gui/dndwidgets.py @@ -1,4 +1,6 @@ -from PyQt5 import QtCore, QtWidgets +from PyQt5 import QtCore, QtWidgets, QtGui + +from artiq.gui.flowlayout import FlowLayout class VDragDropSplitter(QtWidgets.QSplitter): @@ -98,3 +100,65 @@ class VDragScrollArea(QtWidgets.QScrollArea): dy = self._direction * self._speed new_val = min(max_, max(min_, val + dy)) self.verticalScrollBar().setValue(new_val) + + +# Widget with FlowLayout and drag and drop support between widgets +class DragDropFlowLayoutWidget(QtWidgets.QWidget): + def __init__(self): + QtWidgets.QWidget.__init__(self) + self.layout = FlowLayout() + self.setLayout(self.layout) + self.setAcceptDrops(True) + + def _get_index(self, pos): + for i in range(self.layout.count()): + if self.itemAt(i).geometry().contains(pos): + return i + return -1 + + def mousePressEvent(self, event): + if event.buttons() == QtCore.Qt.LeftButton \ + and event.modifiers() == QtCore.Qt.ShiftModifier: + index = self._get_index(event.pos()) + if index == -1: + return + drag = QtGui.QDrag(self) + mime = QtCore.QMimeData() + mime.setData("index", str(index).encode()) + drag.setMimeData(mime) + pixmapi = QtWidgets.QApplication.style().standardIcon( + QtWidgets.QStyle.SP_FileIcon) + drag.setPixmap(pixmapi.pixmap(32)) + drag.exec_(QtCore.Qt.MoveAction) + event.accept() + + def dragEnterEvent(self, event): + event.accept() + + def dropEvent(self, event): + index = self._get_index(event.pos()) + source_layout = event.source() + source_index = int(bytes(event.mimeData().data("index")).decode()) + if source_layout == self: + if index == source_index: + return + widget = self.layout.itemAt(source_index).widget() + self.layout.removeWidget(widget) + self.layout.addWidget(widget) + self.layout.itemList.insert(index, self.layout.itemList.pop()) + else: + widget = source_layout.layout.itemAt(source_index).widget() + source_layout.layout.removeWidget(widget) + self.layout.addWidget(widget) + if index != -1: + self.layout.itemList.insert(index, self.layout.itemList.pop()) + event.accept() + + def addWidget(self, widget): + self.layout.addWidget(widget) + + def count(self): + return self.layout.count() + + def itemAt(self, i): + return self.layout.itemAt(i) From b74beac6b996980dd7e589fece7078c70536ad0b Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 16:01:44 +0800 Subject: [PATCH 278/296] moninj: add drag drop --- artiq/dashboard/moninj.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index e77e9d374..a0ebd58f4 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -9,7 +9,7 @@ from PyQt5 import QtCore, QtWidgets from artiq.coredevice.comm_moninj import CommMonInj, TTLOverride, TTLProbe from artiq.coredevice.ad9912_reg import AD9912_SER_CONF from artiq.gui.tools import LayoutWidget, QDockWidgetCloseDetect, DoubleClickLineEdit -from artiq.gui.flowlayout import FlowLayout +from artiq.gui.dndwidgets import VDragScrollArea, DragDropFlowLayoutWidget from artiq.gui.models import DictSyncTreeSepModel @@ -831,13 +831,11 @@ class _MonInjDock(QDockWidgetCloseDetect): self.label.setStyleSheet("background:transparent;") grid.addWidget(self.label, 0, 2) - scroll_area = QtWidgets.QScrollArea() + scroll_area = VDragScrollArea(self) grid.addWidget(scroll_area, 1, 0, 1, 10) - self.flow = FlowLayout() - grid_widget = QtWidgets.QWidget() - grid_widget.setLayout(self.flow) + self.flow = DragDropFlowLayoutWidget() scroll_area.setWidgetResizable(True) - scroll_area.setWidget(grid_widget) + scroll_area.setWidget(self.flow) def add_channels(self): channels = self.channel_dialog.channels From 5e73245cefe677bb7d1ade263e474c1ff259ed84 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Thu, 2 May 2024 17:02:52 +0800 Subject: [PATCH 279/296] moninj: delete widget action --- artiq/dashboard/moninj.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index a0ebd58f4..08f989e59 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -836,6 +836,23 @@ class _MonInjDock(QDockWidgetCloseDetect): self.flow = DragDropFlowLayoutWidget() scroll_area.setWidgetResizable(True) scroll_area.setWidget(self.flow) + self.flow.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.flow.customContextMenuRequested.connect(self.custom_context_menu) + + def custom_context_menu(self, pos): + index = self.flow._get_index(pos) + if index == -1: + return + menu = QtWidgets.QMenu() + delete_action = QtWidgets.QAction("Delete widget", menu) + delete_action.triggered.connect(partial(self.delete_widget, index)) + menu.addAction(delete_action) + menu.exec_(self.flow.mapToGlobal(pos)) + + def delete_widget(self, index, checked): + widget = self.flow.itemAt(index).widget() + widget.hide() + self.flow.layout.takeAt(index) def add_channels(self): channels = self.channel_dialog.channels From 1bb3c503d9a7d9cb2c43fff9a87286a6baae0ad1 Mon Sep 17 00:00:00 2001 From: Simon Renblad Date: Wed, 5 Jun 2024 16:24:00 +0800 Subject: [PATCH 280/296] moninj: sub on display, unsub on hide --- artiq/dashboard/moninj.py | 83 ++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 35 deletions(-) diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index 08f989e59..e83497289 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -472,21 +472,8 @@ class _DeviceManager: for to_remove in self.description - description: widget = self.widgets_by_uid[to_remove.uid] del self.widgets_by_uid[to_remove.uid] - - if isinstance(widget, _TTLWidget): - self.setup_ttl_monitoring(False, widget.channel) - widget.deleteLater() - del self.ttl_widgets[widget.channel] - elif isinstance(widget, _DDSWidget): - self.setup_dds_monitoring(False, widget.bus_channel, widget.channel) - widget.deleteLater() - del self.dds_widgets[(widget.bus_channel, widget.channel)] - elif isinstance(widget, _DACWidget): - self.setup_dac_monitoring(False, widget.spi_channel, widget.channel) - widget.deleteLater() - del self.dac_widgets[(widget.spi_channel, widget.channel)] - else: - raise ValueError + self.setup_monitoring(False, widget) + widget.deleteLater() for to_add in description - self.description: widget = to_add.cls(self, *to_add.arguments) @@ -494,18 +481,6 @@ class _DeviceManager: widget.setToolTip(to_add.comment) self.widgets_by_uid[to_add.uid] = widget - if isinstance(widget, _TTLWidget): - self.ttl_widgets[widget.channel] = widget - self.setup_ttl_monitoring(True, widget.channel) - elif isinstance(widget, _DDSWidget): - self.dds_widgets[(widget.bus_channel, widget.channel)] = widget - self.setup_dds_monitoring(True, widget.bus_channel, widget.channel) - elif isinstance(widget, _DACWidget): - self.dac_widgets[(widget.spi_channel, widget.channel)] = widget - self.setup_dac_monitoring(True, widget.spi_channel, widget.channel) - else: - raise ValueError - if description != self.description: self.channels_cb() @@ -513,7 +488,8 @@ class _DeviceManager: def ttl_set_mode(self, channel, mode): if self.mi_connection is not None: - widget = self.ttl_widgets[channel] + widget_uid = self.ttl_widgets[channel] + widget = self.widgets_by_uid[widget_uid] if mode == "0": widget.cur_override = True widget.cur_level = False @@ -662,6 +638,31 @@ class _DeviceManager: "ToggleDDS", "Toggle DDS {} {}".format(dds_channel, "on" if sw else "off")) + def setup_monitoring(self, enable, widget): + if isinstance(widget, _TTLWidget): + key = widget.channel + args = (key,) + subscribers = self.ttl_widgets + subscribe_func = self.setup_ttl_monitoring + elif isinstance(widget, _DDSWidget): + key = (widget.bus_channel, widget.channel) + args = key + subscribers = self.dds_widgets + subscribe_func = self.setup_dds_monitoring + elif isinstance(widget, _DACWidget): + key = (widget.spi_channel, widget.channel) + args = key + subscribers = self.dac_widgets + subscribe_func = self.setup_dac_monitoring + else: + raise ValueError + if enable and key not in subscribers: + subscribers[key] = widget.uid() + subscribe_func(enable, *args) + elif not enable and key in subscribers: + subscribe_func(enable, *args) + del subscribers[key] + def setup_ttl_monitoring(self, enable, channel): if self.mi_connection is not None: self.mi_connection.monitor_probe(enable, channel, TTLProbe.level.value) @@ -681,24 +682,28 @@ class _DeviceManager: def monitor_cb(self, channel, probe, value): if channel in self.ttl_widgets: - widget = self.ttl_widgets[channel] + widget_uid = self.ttl_widgets[channel] + widget = self.widgets_by_uid[widget_uid] if probe == TTLProbe.level.value: widget.cur_level = bool(value) elif probe == TTLProbe.oe.value: widget.cur_oe = bool(value) widget.refresh_display() elif (channel, probe) in self.dds_widgets: - widget = self.dds_widgets[(channel, probe)] + widget_uid = self.dds_widgets[(channel, probe)] + widget = self.widgets_by_uid[widget_uid] widget.dds_model.monitor_update(probe, value) widget.refresh_display() elif (channel, probe) in self.dac_widgets: - widget = self.dac_widgets[(channel, probe)] + widget_uid = self.dac_widgets[(channel, probe)] + widget = self.widgets_by_uid[widget_uid] widget.cur_value = value widget.refresh_display() def injection_status_cb(self, channel, override, value): if channel in self.ttl_widgets: - widget = self.ttl_widgets[channel] + widget_uid = self.ttl_widgets[channel] + widget = self.widgets_by_uid[widget_uid] if override == TTLOverride.en.value: widget.cur_override = bool(value) if override == TTLOverride.level.value: @@ -732,9 +737,9 @@ class _DeviceManager: for ttl_channel in self.ttl_widgets.keys(): self.setup_ttl_monitoring(True, ttl_channel) for bus_channel, channel in self.dds_widgets.keys(): - self.setup_dds_monitoring(True, bus_channel, channel) + self.setup_dds_monitoring(True, bus_channel, channel) for spi_channel, channel in self.dac_widgets.keys(): - self.setup_dac_monitoring(True, spi_channel, channel) + self.setup_dac_monitoring(True, spi_channel, channel) async def close(self): self.mi_connector_task.cancel() @@ -849,9 +854,15 @@ class _MonInjDock(QDockWidgetCloseDetect): menu.addAction(delete_action) menu.exec_(self.flow.mapToGlobal(pos)) + def delete_all_widgets(self): + for index in reversed(range(self.flow.count())): + self.delete_widget(index, True) + + def delete_widget(self, index, checked): widget = self.flow.itemAt(index).widget() widget.hide() + self.manager.dm.setup_monitoring(False, widget) self.flow.layout.takeAt(index) def add_channels(self): @@ -861,6 +872,7 @@ class _MonInjDock(QDockWidgetCloseDetect): def layout_widgets(self, widgets): for widget in sorted(widgets, key=lambda w: w.sort_key()): widget.show() + self.manager.dm.setup_monitoring(True, widget) self.flow.addWidget(widget) def restore_widgets(self): @@ -933,9 +945,10 @@ class MonInj: def on_dock_closed(self, name): dock = self.docks[name] - dock.hide() # dock may be parent, only delete on exit del self.docks[name] self.update_closable() + dock.delete_all_widgets() + dock.hide() # dock may be parent, only delete on exit def update_closable(self): flags = (QtWidgets.QDockWidget.DockWidgetMovable | From 927bb3b6b4269d413d28aad264348432a9a672c0 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Thu, 6 Jun 2024 09:35:05 +0800 Subject: [PATCH 281/296] flake: update dependencies --- flake.lock | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/flake.lock b/flake.lock index 4433d4a2c..7ea9f95ac 100644 --- a/flake.lock +++ b/flake.lock @@ -11,11 +11,11 @@ ] }, "locked": { - "lastModified": 1707216368, - "narHash": "sha256-ZXoqzG2QsVsybALLYXs473avXcyKSZNh2kIgcPo60XQ=", + "lastModified": 1717637438, + "narHash": "sha256-BXFidNm3Em8iChPGu1L0s2bY+f2yQ0VVid4MuOoTehw=", "owner": "m-labs", "repo": "artiq-comtools", - "rev": "e5d0204490bccc07ef9141b0d7c405ab01cb8273", + "rev": "78d27026efe76a13f7b4698a554f55811369ec4d", "type": "github" }, "original": { @@ -29,11 +29,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1694529238, - "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1717144377, - "narHash": "sha256-F/TKWETwB5RaR8owkPPi+SPJh83AQsm6KrQAlJ8v/uA=", + "lastModified": 1717281328, + "narHash": "sha256-evZPzpf59oNcDUXxh2GHcxHkTEG4fjae2ytWP85jXRo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "805a384895c696f802a9bf5bf4720f37385df547", + "rev": "b3b2b28c1daa04fe2ae47c21bb76fd226eac4ca1", "type": "github" }, "original": { @@ -92,11 +92,11 @@ ] }, "locked": { - "lastModified": 1708937641, - "narHash": "sha256-Hkb9VYFzFgkYxfbh4kYcDSn7DbMUYehoQDeTALrxo2Q=", + "lastModified": 1717637367, + "narHash": "sha256-4mSm9wl5EMgzzrW6w86IDUevkEOT99FESHGcxcyQbD0=", "owner": "m-labs", "repo": "sipyco", - "rev": "4a28b311ce0069454b4e8fe1e6049db11b9f1296", + "rev": "02b96ec2473a3c3d3c980899de2564ddce949dab", "type": "github" }, "original": { From ebc1e3fb767d6c0eac6eac20c3afeaba2ab70d1a Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Thu, 6 Jun 2024 09:45:54 +0800 Subject: [PATCH 282/296] bump major version number --- flake.nix | 2 +- versioneer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 74a01c561..55a4fb296 100644 --- a/flake.nix +++ b/flake.nix @@ -18,7 +18,7 @@ pkgs = import nixpkgs { system = "x86_64-linux"; overlays = [ (import mozilla-overlay) ]; }; pkgs-aarch64 = import nixpkgs { system = "aarch64-linux"; }; - artiqVersionMajor = 8; + artiqVersionMajor = 9; artiqVersionMinor = self.sourceInfo.revCount or 0; artiqVersionId = self.sourceInfo.shortRev or "unknown"; artiqVersion = (builtins.toString artiqVersionMajor) + "." + (builtins.toString artiqVersionMinor) + "+" + artiqVersionId + ".beta"; diff --git a/versioneer.py b/versioneer.py index dcb8bf1b2..eae28b2c6 100644 --- a/versioneer.py +++ b/versioneer.py @@ -11,7 +11,7 @@ def get_rev(): """ def get_version(): - return os.getenv("VERSIONEER_OVERRIDE", default="8.0+unknown.beta") + return os.getenv("VERSIONEER_OVERRIDE", default="9.0+unknown.beta") def get_rev(): return os.getenv("VERSIONEER_REV", default="unknown") From 59302da71cba58eee58c349553630285666270d1 Mon Sep 17 00:00:00 2001 From: architeuthis Date: Wed, 5 Jun 2024 16:53:01 +0800 Subject: [PATCH 283/296] docs: 'Installing ARTIQ' manual page overhaul --- doc/manual/core_device.rst | 15 +-- doc/manual/developing.rst | 2 +- doc/manual/getting_started_core.rst | 2 +- doc/manual/installing.rst | 186 ++++++++++++++-------------- doc/manual/utilities.rst | 2 + 5 files changed, 103 insertions(+), 104 deletions(-) diff --git a/doc/manual/core_device.rst b/doc/manual/core_device.rst index 24e9768c0..8949836e2 100644 --- a/doc/manual/core_device.rst +++ b/doc/manual/core_device.rst @@ -1,7 +1,7 @@ Core device =========== -The core device is a FPGA-based hardware component that contains a softcore CPU tightly coupled with the so-called RTIO core that provides precision timing. The CPU executes Python code that is statically compiled by the ARTIQ compiler, and communicates with the core device peripherals (TTL, DDS, etc.) over the RTIO core. This architecture provides high timing resolution, low latency, low jitter, high level programming capabilities, and good integration with the rest of the Python experiment code. +The core device is a FPGA-based hardware component that contains a softcore or hardcore CPU tightly coupled with the so-called RTIO core, which runs in gateware and provides precision timing. The CPU executes Python code that is statically compiled by the ARTIQ compiler and communicates with peripherals (TTL, DDS, etc.) through the RTIO core, as described in :ref:`artiq-real-time-i-o-concepts`. This architecture provides high timing resolution, low latency, low jitter, high-level programming capabilities, and good integration with the rest of the Python experiment code. While it is possible to use all the other parts of ARTIQ (controllers, master, GUI, dataset management, etc.) without a core device, many experiments require it. @@ -11,13 +11,9 @@ While it is possible to use all the other parts of ARTIQ (controllers, master, G Flash storage ************* -The core device contains some flash space that can be used to store configuration data. +The core device contains some flash storage space which is largely used to store configuration data. It is one sector (typically 64 kB) large and organized as a list of key-value records, accessible by using ``artiq_coremgmt`` (see: :ref:`core-device-management-tool`). -This storage area is used to store the core device MAC address, IP address and even the idle kernel. - -The flash storage area is one sector (typically 64 kB) large and is organized as a list of key-value records. - -This flash storage space can be accessed by using ``artiq_coremgmt`` (see: :ref:`core-device-management-tool`). +This area is used to store a variety of configurations, in particular the core device IP address and, if present, the startup and/or idle kernels (see also: :ref:`miscellaneous-configuration-of-the-core-device`). .. _board-ports: @@ -29,7 +25,7 @@ All boards have a serial interface running at 115200bps 8-N-1 that can be used f Kasli ----- -`Kasli `_ is a versatile core device designed for ARTIQ as part of the `Sinara `_ family of boards. All variants support interfacing to various EEM daughterboards (TTL, DDS, ADC, DAC...) connected directly to it. +`Kasli `_ is a versatile core device designed for ARTIQ as part of the `Sinara `_ family of boards. All variants support interfacing to various EEM daughterboards (TTL, DDS, ADC, DAC...) connected directly to it. Standalone variants +++++++++++++++++++ @@ -161,6 +157,7 @@ To avoid I/O contention, the startup kernel should first program the TCA6424A ex See :mod:`artiq.coredevice.i2c` for more details. +.. _core-device-clocking: Clocking ++++++++ @@ -169,7 +166,7 @@ The KC705 in standalone variants supports an internal 125 MHz RTIO clock (based * ``int_125`` - internal crystal oscillator, 125 MHz output (default), * ``ext0_bypass`` - external clock. -KC705 in DRTIO variants and Kasli generates the RTIO clock using a PLL locked either to an internal crystal or to an external frequency reference. Valid values are: +KC705 in DRTIO variants and Kasli generate the RTIO clock using a PLL locked either to an internal crystal or to an external frequency reference. Valid values are: * ``int_125`` - internal crystal oscillator using PLL, 125 MHz output (default), * ``int_100`` - internal crystal oscillator using PLL, 100 MHz output, diff --git a/doc/manual/developing.rst b/doc/manual/developing.rst index a2c0d5da4..25bbef4a6 100644 --- a/doc/manual/developing.rst +++ b/doc/manual/developing.rst @@ -17,6 +17,6 @@ ARTIQ itself does not depend on Nix, and it is also possible to compile everythi * Clone the ARTIQ Git repository and run ``nix develop`` at the root (where ``flake.nix`` is). * Make the current source code of ARTIQ available to the Python interpreter by running ``export PYTHONPATH=`pwd`:$PYTHONPATH``. * You can then build the firmware and gateware with a command such as ``$ python -m artiq.gateware.targets.kasli .json``, using a JSON system description file. -* Flash the binaries into the FPGA board with a command such as ``$ artiq_flash --srcbuild -d artiq_kasli/``. You need to configure OpenOCD as explained :ref:`in the user section `. OpenOCD is already part of the flake's development environment. +* Flash the binaries into the FPGA board with a command such as ``$ artiq_flash --srcbuild -d artiq_kasli/``. You need to configure OpenOCD as explained :ref:`in the user section `. OpenOCD is already part of the flake's development environment. * Check that the board boots and examine the UART messages by running a serial terminal program, e.g. ``$ flterm /dev/ttyUSB1`` (``flterm`` is part of MiSoC and installed in the flake's development environment). Leave the terminal running while you are flashing the board, so that you see the startup messages when the board boots immediately after flashing. You can also restart the board (without reflashing it) with ``$ artiq_flash start``. * The communication parameters are 115200 8-N-1. Ensure that your user has access to the serial device (e.g. by adding the user account to the ``dialout`` group). diff --git a/doc/manual/getting_started_core.rst b/doc/manual/getting_started_core.rst index 867b5b544..0cbeeffc6 100644 --- a/doc/manual/getting_started_core.rst +++ b/doc/manual/getting_started_core.rst @@ -23,7 +23,7 @@ As a very first step, we will turn on a LED on the core device. Create a file `` The central part of our code is our ``LED`` class, which derives from :class:`artiq.language.environment.EnvExperiment`. Among other features, :class:`~artiq.language.environment.EnvExperiment` calls our :meth:`~artiq.language.environment.Experiment.build` method and provides the :meth:`~artiq.language.environment.HasEnvironment.setattr_device` method that interfaces to the device database to create the appropriate device drivers and make those drivers accessible as ``self.core`` and ``self.led``. The :func:`~artiq.language.core.kernel` decorator (``@kernel``) tells the system that the :meth:`~artiq.language.environment.Experiment.run` method must be compiled for and executed on the core device (instead of being interpreted and executed as regular Python code on the host). The decorator uses ``self.core`` internally, which is why we request the core device using :meth:`~artiq.language.environment.HasEnvironment.setattr_device` like any other. -You will need to supply the correct device database for your core device; it is generated by a Python script typically called ``device_db.py`` (see also :ref:`device_db`). If you purchased a system from M-Labs, the device database is provided either on the USB stick or inside ~/artiq on the NUC; otherwise, you can also find examples in the ``examples`` folder of ARTIQ, sorted inside the corresponding subfolder for your core device. Copy ``device_db.py`` into the same directory as ``led.py`` (or use the ``--device-db`` option of ``artiq_run``). The field ``core_addr``, placed at the top of the file, needs to match the IP address of your core device so your computer can communicate with it. If you purchased a pre-assembled system it is normally already set correctly. +You will need to supply the correct device database for your core device; it is generated by a Python script typically called ``device_db.py`` (see also :ref:`device-db`). If you purchased a system from M-Labs, the device database is provided either on the USB stick or inside ~/artiq on the NUC; otherwise, you can also find examples in the ``examples`` folder of ARTIQ, sorted inside the corresponding subfolder for your core device. Copy ``device_db.py`` into the same directory as ``led.py`` (or use the ``--device-db`` option of ``artiq_run``). The field ``core_addr``, placed at the top of the file, needs to match the IP address of your core device so your computer can communicate with it. If you purchased a pre-assembled system it is normally already set correctly. .. note:: To access the examples, you can find where the ARTIQ package is installed on your machine with: :: diff --git a/doc/manual/installing.rst b/doc/manual/installing.rst index 86e42ccd4..8ae5b23cc 100644 --- a/doc/manual/installing.rst +++ b/doc/manual/installing.rst @@ -193,10 +193,10 @@ Set up the Conda channel and install ARTIQ into a new Conda environment: :: $ conda create -n artiq artiq .. note:: - If you do not need to flash boards, the ``artiq`` package is sufficient. The packages named ``artiq-board-*`` contain only firmware for the FPGA board, and you should not install them unless you are reflashing an FPGA board. Controllers for third-party devices (e.g. Thorlabs TCube, Lab Brick Digital Attenuator, etc.) that are not shipped with ARTIQ can also be installed with Conda. Browse `Hydra `_ or see the list of NDSPs in this manual to find the names of the corresponding packages. + If you do not need to flash boards, the ``artiq`` package is sufficient. The packages named ``artiq-board-*`` contain only firmware for the FPGA board, and you should not install them unless you are reflashing an FPGA board. .. note:: - On Windows, if the last command that creates and installs the ARTIQ environment fails with an error similar to "seeking backwards is not allowed", try to re-run the command with admin rights. + On Windows, if the last command that creates and installs the ARTIQ environment fails with an error similar to "seeking backwards is not allowed", try re-running the command with admin rights. .. note:: For commercial use you might need a license for Anaconda/Miniconda or for using the Anaconda package channel. `Miniforge `_ might be an alternative in a commercial environment as it does not include the Anaconda package channel by default. If you want to use Anaconda/Miniconda/Miniforge in a commercial environment, please check the license and the latest terms of service. @@ -210,75 +210,80 @@ This activation has to be performed in every new shell you open to make the ARTI .. note:: Some ARTIQ examples also require matplotlib and numba, and they must be installed manually for running those examples. They are available in Conda. -Upgrading ARTIQ (with Nix) --------------------------- +Upgrading ARTIQ +--------------- -Run ``$ nix profile upgrade`` if you installed ARTIQ into your user profile. If you used a ``flake.nix`` shell environment, make a back-up copy of the ``flake.lock`` file to enable rollback, then run ``$ nix flake update`` and re-enter ``$ nix shell``. +.. note:: + When you upgrade ARTIQ, as well as updating the software on your host machine, it may also be necessary to reflash the gateware and firmware of your core device to keep them compatible. New numbered release versions in particular incorporate breaking changes and are not generally compatible. See :ref:`reflashing-core-device` below for instructions on reflashing. + +Upgrading with Nix +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Run ``$ nix profile upgrade`` if you installed ARTIQ into your user profile. If you used a ``flake.nix`` shell environment, make a back-up copy of the ``flake.lock`` file to enable rollback, then run ``$ nix flake update`` and re-enter the environment with ``$ nix shell``. To rollback to the previous version, respectively use ``$ nix profile rollback`` or restore the backed-up version of the ``flake.lock`` file. -You may need to reflash the gateware and firmware of the core device to keep it synchronized with the software. - -Upgrading ARTIQ (with MSYS2) ----------------------------- +Upgrading with MSYS2 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Run ``pacman -Syu`` to update all MSYS2 packages including ARTIQ. If you get a message telling you that the shell session must be restarted after a partial update, open the shell again after the partial update and repeat the command. See the MSYS2 and Pacman manual for information on how to update individual packages if required. -Upgrading ARTIQ (with Conda) ----------------------------- +Upgrading with Conda +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When upgrading ARTIQ or when testing different versions it is recommended that new Conda environments are created instead of upgrading the packages in existing environments. -Keep previous environments around until you are certain that they are not needed anymore and a new environment is known to work correctly. +As a rule, keep previous environments around unless you are certain that they are no longer needed and the new environment is working correctly. -To install the latest version, just select a different environment name and run the installation command again. +To install the latest version, simply select a different environment name and run the installation commands again. -Switching between Conda environments using commands such as ``$ conda deactivate artiq-6`` and ``$ conda activate artiq-5`` is the recommended way to roll back to previous versions of ARTIQ. - -You may need to reflash the gateware and firmware of the core device to keep it synchronized with the software. +Switching between Conda environments using commands such as ``$ conda deactivate artiq-7`` and ``$ conda activate artiq-8`` is the recommended way to roll back to previous versions of ARTIQ. You can list the environments you have created using:: $ conda env list -Flashing gateware and firmware into the core device ---------------------------------------------------- +.. _reflashing-core-device: +Reflashing core device gateware and firmware +------------------------------------------- .. note:: - If you have purchased a pre-assembled system from M-Labs or QUARTIQ, the gateware and firmware are already flashed and you can skip those steps, unless you want to replace them with a different version of ARTIQ. + If you have purchased a pre-assembled system from M-Labs or QUARTIQ, the gateware and firmware of your device will already be flashed to the newest version of ARTIQ. These steps are only necessary if you obtained your hardware in a different way, or if you want to change or upgrade your ARTIQ version after purchase. -You need to write three binary images onto the FPGA board: -1. The FPGA gateware bitstream -2. The bootloader -3. The ARTIQ runtime or satellite manager +Obtaining the board binaries +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Installing OpenOCD -^^^^^^^^^^^^^^^^^^ +If you have an active firmware subscription with M-Labs or QUARTIQ, you can obtain firmware that corresponds to your currently installed version of ARTIQ using AFWS (ARTIQ firmware service). One year of subscription is included with most hardware purchases. You may purchase or extend firmware subscriptions by writing to the sales@ email. + +Run the command:: + + $ afws_client [username] build [afws_directory] [variant] + +Replace ``[username]`` with the login name that was given to you with the subscription, ``[variant]`` with the name of your system variant, and ``[afws_directory]`` with the name of an empty directory, which will be created by the command if it does not exist. Enter your password when prompted and wait for the build (if applicable) and download to finish. If you experience issues with the AFWS client, write to the helpdesk@ email. + +Without a subscription, you may build the firmware yourself from the open source code. See the section :ref:`Developing ARTIQ `. + +.. _installing-configuring-openocd: + +Installing and configuring OpenOCD +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. note:: - This version of OpenOCD is not applicable to Kasli-SoC. + These instructions are not applicable to Kasli-SoC, which does not use the utility ``artiq_flash`` to reflash. If your core device is a Kasli SoC, skip straight to :ref:`writing-flash`. -OpenOCD can be used to write the binary images into the core device FPGA board's flash memory. +ARTIQ supplies the utility ``artiq_flash``, which uses OpenOCD to write the binary images into an FPGA board's flash memory. -With Nix, add ``aqmain.openocd-bscanspi`` to the shell packages. Be careful not to add ``pkgs.openocd`` instead - this would install OpenOCD from the NixOS package collection, which does not support ARTIQ boards. +* With Nix, add ``aqmain.openocd-bscanspi`` to the shell packages. Be careful not to add ``pkgs.openocd`` instead - this would install OpenOCD from the NixOS package collection, which does not support ARTIQ boards. -With MSYS2, ``openocd`` and ``bscan-spi-bitstreams`` are included with ``artiq`` by default. +* With MSYS2, ``openocd`` and ``bscan-spi-bitstreams`` are included with ``artiq`` by default. -With Conda, install ``openocd`` as follows:: +* With Conda, install ``openocd`` as follows:: $ conda install -c m-labs openocd -.. _configuring-openocd: +Some additional steps are necessary to ensure that OpenOCD can communicate with the FPGA board: -Configuring OpenOCD -^^^^^^^^^^^^^^^^^^^ - -.. note:: - These instructions are not applicable to Kasli-SoC. - -Some additional steps are necessary to ensure that OpenOCD can communicate with the FPGA board. - -On Linux, first ensure that the current user belongs to the ``plugdev`` group (i.e. ``plugdev`` shown when you run ``$ groups``). If it does not, run ``$ sudo adduser $USER plugdev`` and re-login. +* On Linux, first ensure that the current user belongs to the ``plugdev`` group (i.e. ``plugdev`` shown when you run ``$ groups``). If it does not, run ``$ sudo adduser $USER plugdev`` and re-login. If you installed OpenOCD on Linux using Nix, use the ``which`` command to determine the path to OpenOCD, and then copy the udev rules: :: @@ -294,7 +299,7 @@ If you installed OpenOCD on Linux using Conda and are using the Conda environmen $ sudo cp ~/.conda/envs/artiq/share/openocd/contrib/60-openocd.rules /etc/udev/rules.d $ sudo udevadm trigger -On Windows, a third-party tool, `Zadig `_, is necessary. Use it as follows: +* On Windows, a third-party tool, `Zadig `_, is necessary. Use it as follows: 1. Make sure the FPGA board's JTAG USB port is connected to your computer. 2. Activate Options → List All Devices. @@ -305,35 +310,22 @@ On Windows, a third-party tool, `Zadig `_, is necessary. You may need to repeat these steps every time you plug the FPGA board into a port where it has not been plugged into previously on the same system. -Obtaining the board binaries -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you have an active firmware subscription with M-Labs or QUARTIQ, you can obtain firmware that corresponds to the currently installed version of ARTIQ using AFWS (ARTIQ firmware service). One year of subscription is included with most hardware purchases. You may purchase or extend firmware subscriptions by writing to the sales@ email. - -Run the command:: - - $ afws_client [username] build [afws_directory] [variant] - -Replace ``[username]`` with the login name that was given to you with the subscription, ``[variant]`` with the name of your system variant, and ``[afws_directory]`` with the name of an empty directory, which will be created by the command if it does not exist. Enter your password when prompted and wait for the build (if applicable) and download to finish. If you experience issues with the AFWS client, write to the helpdesk@ email. - -Without a subscription, you may build the firmware yourself from the open source code. See the section :ref:`Developing ARTIQ `. +.. _writing-flash: Writing the flash ^^^^^^^^^^^^^^^^^ -Then, you can write the flash: - -* For Kasli:: - - $ artiq_flash -d [afws_directory] - -The JTAG adapter is integrated into the Kasli board; for flashing (and debugging) you simply need to connect your computer to the micro-USB connector on the Kasli front panel. +First ensure the board is connected to your computer. In the case of Kasli, the JTAG adapter is integrated into the Kasli board; for flashing (and debugging) you simply need to connect your computer to the micro-USB connector on the Kasli front panel. For Kasli-SoC, which uses ``artiq_coremgmt``, an IP address supplied either with the ``-D`` option or in a correctly specified ``device_db.py`` suffices. * For Kasli-SoC:: $ artiq_coremgmt [-D 192.168.1.75] config write -f boot [afws_directory]/boot.bin -If the Kasli-SoC won't boot due to corrupted firmware and ``artiq_coremgmt`` cannot access it, extract the SD card and replace ``boot.bin`` manually. +If the Kasli-SoC won't boot due to nonexistent or corrupted firmware, extract the SD card and copy ``boot.bin`` onto it manually. + +* For Kasli:: + + $ artiq_flash -d [afws_directory] * For the KC705 board:: @@ -341,76 +333,83 @@ If the Kasli-SoC won't boot due to corrupted firmware and ``artiq_coremgmt`` can The SW13 switches need to be set to 00001. +Flashing over network is also possible for Kasli and KC705, assuming IP networking has been set up. In this case, the ``-H HOSTNAME`` option is used; see the entry for ``artiq_flash`` in the :ref:`Utilities ` reference. + Setting up the core device IP networking ---------------------------------------- -For Kasli, insert a SFP/RJ45 transceiver (normally included with purchases from M-Labs and QUARTIQ) into the SFP0 port and connect it to an Ethernet port in your network. If the port is 10Mbps or 100Mbps and not 1000Mbps, make sure that the SFP/RJ45 transceiver supports the lower rate. Many SFP/RJ45 transceivers only support the 1000Mbps rate. If you do not have a SFP/RJ45 transceiver that supports 10Mbps and 100Mbps rates, you may instead use a gigabit Ethernet switch in the middle to perform rate conversion. +For Kasli, insert a SFP/RJ45 transceiver (normally included with purchases from M-Labs and QUARTIQ) into the SFP0 port and connect it to an Ethernet port in your network. If the port is 10Mbps or 100Mbps and not 1000Mbps, make sure that the SFP/RJ45 transceiver supports the lower rate. Many SFP/RJ45 transceivers only support the 1000Mbps rate. If you do not have a SFP/RJ45 transceiver that supports 10Mbps and 100Mbps rates, you may instead use a gigabit Ethernet switch in the middle to perform rate conversion. -You can also insert other types of SFP transceivers into Kasli if you wish to use it directly in e.g. an optical fiber Ethernet network. +You can also insert other types of SFP transceivers into Kasli if you wish to use it directly in e.g. an optical fiber Ethernet network. -If you purchased a Kasli device from M-Labs, it usually comes with the IP address ``192.168.1.75``. Once you can reach this IP, it can be changed with: :: +Kasli-SoC already directly features RJ45 10/100/1000T Ethernet, but the same is still true of its SFP ports. + +If you purchased a Kasli or Kasli-SoC device from M-Labs, it usually comes with the IP address ``192.168.1.75``. Once you can reach this IP, it can be changed by running: :: $ artiq_coremgmt -D 192.168.1.75 config write -s ip [new IP] -and then reboot the device (with ``artiq_flash start`` or a power cycle). +and then rebooting the device (with ``artiq_flash start`` or a power cycle). -If the ``ip`` config field is not set, or set to ``use_dhcp`` then the device will -attempt to obtain an IP address and default gateway using DHCP. If a static IP -address is wanted, install OpenOCD as before, and flash the IP, default gateway -(and, if necessary, MAC and IPv6) addresses directly: :: +.. note:: + Kasli-SoC is not a valid target for ``artiq_flash``; it is easiest to reboot by power cycle. For a KC705, it is necessary to specify ``artiq_flash -t kc705 start``. - $ artiq_mkfs flash_storage.img -s mac xx:xx:xx:xx:xx:xx -s ip xx.xx.xx.xx/xx -s ipv4_default_route xx.xx.xx.xx -s ip6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx/xx -s ipv6_default_route xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx +* For Kasli-SoC: + +If the ``ip`` config is not set, Kasli-SoC firmware defaults to using the IP address ``192.168.1.56``. It can then be changed with the procedure above. + +* For Kasli or KC705: + +If the ``ip`` config field is not set or set to ``use_dhcp``, the device will attempt to obtain an IP address and default gateway using DHCP. If a static IP address is nonetheless wanted, it can be flashed directly (OpenOCD must be installed and configured, as above), along with, as necessary, default gateway, IPv6, and/or MAC address: + + $ artiq_mkfs flash_storage.img [-s mac xx:xx:xx:xx:xx:xx] [-s ip xx.xx.xx.xx/xx] [-s ipv4_default_route xx.xx.xx.xx] [-s ip6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx/xx] [-s ipv6_default_route xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx] $ artiq_flash -t [board] -V [variant] -f flash_storage.img storage start -For Kasli devices, flashing a MAC address is not necessary as they can obtain it from their EEPROM. -If you only want to access the core device from the same subnet you may -omit the default gateway and IPv4 prefix length: :: - - $ artiq_mkfs flash_storage.img -s mac xx:xx:xx:xx:xx:xx -s ip xx.xx.xx.xx +On Kasli or Kasli SoC devices, specifying the MAC address is unnecessary, as they can obtain it from their EEPROM. If you only want to access the core device from the same subnet, default gateway and IPv4 prefix length may also be ommitted. Regardless of board, once a device is reachable by ``artiq_coremgmt``, any of these fields can be accessed using ``artiq_coremgmt config write`` and ``artiq_coremgt config read``; see also :ref:`Utilities `. If DHCP has been used the address can be found in the console output, which can be viewed using: :: $ python -m misoc.tools.flterm /dev/ttyUSB2 - Check that you can ping the device. If ping fails, check that the Ethernet link LED is ON - on Kasli, it is the LED next to the SFP0 connector. As a next step, look at the messages emitted on the UART during boot. Use a program such as flterm or PuTTY to connect to the device's serial port at 115200bps 8-N-1 and reboot the device. On Kasli, the serial port is on FTDI channel 2 with v1.1 hardware (with channel 0 being JTAG) and on FTDI channel 1 with v1.0 hardware. Note that on Windows you might need to install the `FTDI drivers `_ first. -If you want to use IPv6, the device also has a link-local address that corresponds to its EUI-64, and an additional arbitrary IPv6 address can be defined by using the ``ip6`` configuration key. All IPv4 and IPv6 addresses can be used at the same time. +Regarding use of IPv6, note that the device also has a link-local address that corresponds to its EUI-64, which can be used simultaneously to the IPv6 address defined by using the ``ip6`` configuration key, which may be of arbitrary nature. Miscellaneous configuration of the core device ---------------------------------------------- -Those steps are optional. The core device usually needs to be restarted for changes to take effect. +These steps are optional, and only need to be executed if necessary for your specific purposes. In all cases, the core device generally needs to be restarted for changes to take effect. -* Load the idle kernel +Flash idle or startup kernel +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The idle kernel is the kernel (some piece of code running on the core device) which the core device runs whenever it is not connected to a PC via Ethernet. -This kernel is therefore stored in the :ref:`core device configuration flash storage `. +The idle kernel is the kernel (that is, a piece of code running on the core device; see :ref:`next topic ` for more information about kernels) which the core device runs whenever it is not connected to the host via Ethernet. This kernel is therefore stored immediately in the :ref:`core device configuration flash storage `. -To flash the idle kernel, first compile the idle experiment. The idle experiment's ``run()`` method must be a kernel: it must be decorated with the ``@kernel`` decorator (see :ref:`next topic ` for more information about kernels). Since the core device is not connected to the PC, RPCs (calling Python code running on the PC from the kernel) are forbidden in the idle experiment. Then write it into the core device configuration flash storage: :: +To flash the idle kernel, first compile an idle experiment. Since the core device is not connected to the host, RPCs (calling Python code running on the host from the kernel) are forbidden, and its ``run()`` method must be a kernel, marked correctly with the ``@kernel`` decorator. Write the compiled experiment to the core device configuration flash storage, under the key ``idle_kernel``: $ artiq_compile idle.py $ artiq_coremgmt config write -f idle_kernel idle.elf -.. note:: You can find more information about how to use the ``artiq_coremgmt`` utility on the :ref:`Utilities ` page. - -* Load the startup kernel - -The startup kernel is executed once when the core device powers up. It should initialize DDSes, set up TTL directions, etc. Proceed as with the idle kernel, but using the ``startup_kernel`` key in the ``artiq_coremgmt`` command. +The startup kernel is the kernel executed once immediately whenever the core device powers on. Uses include initializing DDSes, setting TTL directions etc. Proceed as with the idle kernel, but using the ``startup_kernel`` key in the ``artiq_coremgmt`` command. For DRTIO systems, the startup kernel should wait until the desired destinations (including local RTIO) are up, using :meth:`artiq.coredevice.Core.get_rtio_destination_status`. -* Load the DRTIO routing table +Load the DRTIO routing table +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you are using DRTIO and the default routing table (for a star topology) is not suitable to your needs, prepare and load a different routing table. See :ref:`Using DRTIO `. -* Select the RTIO clock source (KC705 and Kasli) +Select the RTIO clock source +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The KC705 may use either an external clock signal, or its internal clock with external frequency or internal crystal reference. The clock is selected at power-up. Setting the RTIO clock source to "ext0_bypass" would bypass the Si5324 synthesiser, requiring that an input clock be present. To select the source, use one of these commands: :: +The core device may use any of: an external clock signal, its internal clock with external frequency reference, or its internal clock with internal crystal reference. Clock source and timing are set at power-up. To find out what clock signal you are using, check startup logs with ``artiq_coremgmt log``. + +By default, an internal clock is used; to select another source, use a command of the form: $ artiq_coremgmt config write -s rtio_clock int_125 # internal 125MHz clock (default) $ artiq_coremgmt config write -s rtio_clock ext0_bypass # external clock (bypass) +If set to ``ext0_bypass``, the Si5324 synthesizer is bypassed entirely in favor of an input clock, requiring that an input clock be present. + Other options include: - ``ext0_synth0_10to125`` - external 10MHz reference clock used by Si5324 to synthesize a 125MHz RTIO clock, - ``ext0_synth0_80to125`` - external 80MHz reference clock used by Si5324 to synthesize a 125MHz RTIO clock, @@ -420,14 +419,15 @@ Other options include: - ``int_150`` - internal crystal reference is used by Si5324 to synthesize a 150MHz RTIO clock. - ``ext0_bypass_125`` and ``ext0_bypass_100`` - explicit aliases for ``ext0_bypass``. -Availability of these options depends on the board and their configuration - specific setting may or may not be supported. +Availability of these options depends on specific board and configuration - specific settings may or may not be supported. See also :ref:`core-device-clocking`. -* Setup resolving RTIO channels to their names +Set up resolving RTIO channels to their names +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This feature allows you to print the channels' respective names alongside with their numbers in RTIO error messages. To enable it, run the ``artiq_rtiomap`` tool and write its result into the device config at the ``device_map`` key: :: $ artiq_rtiomap dev_map.bin $ artiq_coremgmt config write -f device_map dev_map.bin -.. note:: You can find more information about how to use the ``artiq_rtiomap`` utility on the :ref:`Utilities ` page. +.. note:: More information on the ``artiq_rtiomap`` utility can be found on the :ref:`Utilities ` page. diff --git a/doc/manual/utilities.rst b/doc/manual/utilities.rst index 187ad39e4..248e1b88a 100644 --- a/doc/manual/utilities.rst +++ b/doc/manual/utilities.rst @@ -32,6 +32,8 @@ This tool compiles key/value pairs into a binary image suitable for flashing int :ref: artiq.frontend.artiq_mkfs.get_argparser :prog: artiq_mkfs +.. _flashing-loading-tool: + Flashing/Loading tool --------------------- From 4509ad86f8c7156e4e2aed6ca4833839cb3f7a55 Mon Sep 17 00:00:00 2001 From: architeuthis Date: Wed, 5 Jun 2024 17:08:12 +0800 Subject: [PATCH 284/296] Remove outdated references to examples/master, fix labels --- doc/manual/core_device.rst | 4 ++-- doc/manual/getting_started_mgmt.rst | 2 +- doc/manual/installing.rst | 7 +++++-- doc/manual/rtio.rst | 2 ++ doc/manual/utilities.rst | 3 +-- 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/doc/manual/core_device.rst b/doc/manual/core_device.rst index 8949836e2..470935aef 100644 --- a/doc/manual/core_device.rst +++ b/doc/manual/core_device.rst @@ -5,7 +5,6 @@ The core device is a FPGA-based hardware component that contains a softcore or h While it is possible to use all the other parts of ARTIQ (controllers, master, GUI, dataset management, etc.) without a core device, many experiments require it. - .. _core-device-flash-storage: Flash storage @@ -13,7 +12,7 @@ Flash storage The core device contains some flash storage space which is largely used to store configuration data. It is one sector (typically 64 kB) large and organized as a list of key-value records, accessible by using ``artiq_coremgmt`` (see: :ref:`core-device-management-tool`). -This area is used to store a variety of configurations, in particular the core device IP address and, if present, the startup and/or idle kernels (see also: :ref:`miscellaneous-configuration-of-the-core-device`). +This area is used to store a variety of configurations, in particular the core device IP address and, if present, the startup and/or idle kernels (see also: :ref:`miscellaneous_config_core_device`). .. _board-ports: @@ -158,6 +157,7 @@ To avoid I/O contention, the startup kernel should first program the TCA6424A ex See :mod:`artiq.coredevice.i2c` for more details. .. _core-device-clocking: + Clocking ++++++++ diff --git a/doc/manual/getting_started_mgmt.rst b/doc/manual/getting_started_mgmt.rst index e9398c7bc..334d6389c 100644 --- a/doc/manual/getting_started_mgmt.rst +++ b/doc/manual/getting_started_mgmt.rst @@ -10,7 +10,7 @@ Starting your first experiment with the master In the previous tutorial, we used the ``artiq_run`` utility to execute our experiments, which is a simple stand-alone tool that bypasses the ARTIQ management system. We will now see how to run an experiment using the master (the central program in the management system that schedules and executes experiments) and the dashboard (that connects to the master and controls it). -First, create a folder ``~/artiq-master`` and copy the file ``device_db.py`` (containing the device database) found in the ``examples/master`` directory from the ARTIQ sources. The master uses those files in the same way as ``artiq_run``. +First, create a folder ``~/artiq-master`` and copy your ``device_db.py`` into it (the file containing the device database, as described in :ref:`connecting-to-the-core-device`).The master uses those files in the same way as ``artiq_run``. Then create a ``~/artiq-master/repository`` sub-folder to contain experiments. The master scans this ``repository`` folder to determine what experiments are available (the name of the folder can be changed using ``-r``). diff --git a/doc/manual/installing.rst b/doc/manual/installing.rst index 8ae5b23cc..dfbaeccf8 100644 --- a/doc/manual/installing.rst +++ b/doc/manual/installing.rst @@ -243,8 +243,9 @@ You can list the environments you have created using:: $ conda env list .. _reflashing-core-device: + Reflashing core device gateware and firmware -------------------------------------------- +-------------------------------------------- .. note:: If you have purchased a pre-assembled system from M-Labs or QUARTIQ, the gateware and firmware of your device will already be flashed to the newest version of ARTIQ. These steps are only necessary if you obtained your hardware in a different way, or if you want to change or upgrade your ARTIQ version after purchase. @@ -374,6 +375,8 @@ Check that you can ping the device. If ping fails, check that the Ethernet link Regarding use of IPv6, note that the device also has a link-local address that corresponds to its EUI-64, which can be used simultaneously to the IPv6 address defined by using the ``ip6`` configuration key, which may be of arbitrary nature. +.. _miscellaneous_config_core_device: + Miscellaneous configuration of the core device ---------------------------------------------- @@ -422,7 +425,7 @@ Other options include: Availability of these options depends on specific board and configuration - specific settings may or may not be supported. See also :ref:`core-device-clocking`. Set up resolving RTIO channels to their names -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This feature allows you to print the channels' respective names alongside with their numbers in RTIO error messages. To enable it, run the ``artiq_rtiomap`` tool and write its result into the device config at the ``device_map`` key: :: diff --git a/doc/manual/rtio.rst b/doc/manual/rtio.rst index f5901ab0d..6db6bbbf7 100644 --- a/doc/manual/rtio.rst +++ b/doc/manual/rtio.rst @@ -1,3 +1,5 @@ +.. _artiq-real-time-i-o-concepts: + ARTIQ Real-Time I/O Concepts ============================ diff --git a/doc/manual/utilities.rst b/doc/manual/utilities.rst index 248e1b88a..14dd2eb87 100644 --- a/doc/manual/utilities.rst +++ b/doc/manual/utilities.rst @@ -12,7 +12,6 @@ Local running tool :ref: artiq.frontend.artiq_run.get_argparser :prog: artiq_run - Static compiler --------------- @@ -48,7 +47,7 @@ Core device management tool The artiq_coremgmt utility gives remote access to the core device logs, the :ref:`core-device-flash-storage`, and other management functions. -To use this tool, you need to specify a ``device_db.py`` device database file which contains a ``comm`` device (an example is provided in ``examples/master/device_db.py``). This tells the tool how to connect to the core device and with which parameters (e.g. IP address, TCP port). When not specified, the artiq_coremgmt utility will assume that there is a file named ``device_db.py`` in the current directory. +To use this tool, it is necessary to specify the IP address your core device can be contacted at. If no option is used, the utility will assume there is a file named ``device_db.py`` in the current directory containing the device database; otherwise, a device database file can be provided with ``--device-db`` or an address directly with ``--device`` (see also below). To read core device logs:: From 8b64315ecf39006127d718dc4bd273647b3c6a0b Mon Sep 17 00:00:00 2001 From: architeuthis Date: Thu, 6 Jun 2024 10:33:24 +0800 Subject: [PATCH 285/296] Deleted reference to board packages --- doc/manual/core_device.rst | 2 +- doc/manual/installing.rst | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/doc/manual/core_device.rst b/doc/manual/core_device.rst index 470935aef..893c199d9 100644 --- a/doc/manual/core_device.rst +++ b/doc/manual/core_device.rst @@ -10,7 +10,7 @@ While it is possible to use all the other parts of ARTIQ (controllers, master, G Flash storage ************* -The core device contains some flash storage space which is largely used to store configuration data. It is one sector (typically 64 kB) large and organized as a list of key-value records, accessible by using ``artiq_coremgmt`` (see: :ref:`core-device-management-tool`). +The core device contains some flash storage space which is used to store configuration data. It is one sector (typically 64 kB) large and organized as a list of key-value records, accessible by using ``artiq_coremgmt`` (see: :ref:`core-device-management-tool`). This area is used to store a variety of configurations, in particular the core device IP address and, if present, the startup and/or idle kernels (see also: :ref:`miscellaneous_config_core_device`). diff --git a/doc/manual/installing.rst b/doc/manual/installing.rst index dfbaeccf8..2a4cb6578 100644 --- a/doc/manual/installing.rst +++ b/doc/manual/installing.rst @@ -192,9 +192,6 @@ Set up the Conda channel and install ARTIQ into a new Conda environment: :: $ conda config --append channels conda-forge $ conda create -n artiq artiq -.. note:: - If you do not need to flash boards, the ``artiq`` package is sufficient. The packages named ``artiq-board-*`` contain only firmware for the FPGA board, and you should not install them unless you are reflashing an FPGA board. - .. note:: On Windows, if the last command that creates and installs the ARTIQ environment fails with an error similar to "seeking backwards is not allowed", try re-running the command with admin rights. @@ -217,19 +214,19 @@ Upgrading ARTIQ When you upgrade ARTIQ, as well as updating the software on your host machine, it may also be necessary to reflash the gateware and firmware of your core device to keep them compatible. New numbered release versions in particular incorporate breaking changes and are not generally compatible. See :ref:`reflashing-core-device` below for instructions on reflashing. Upgrading with Nix -^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^ Run ``$ nix profile upgrade`` if you installed ARTIQ into your user profile. If you used a ``flake.nix`` shell environment, make a back-up copy of the ``flake.lock`` file to enable rollback, then run ``$ nix flake update`` and re-enter the environment with ``$ nix shell``. To rollback to the previous version, respectively use ``$ nix profile rollback`` or restore the backed-up version of the ``flake.lock`` file. Upgrading with MSYS2 -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^ Run ``pacman -Syu`` to update all MSYS2 packages including ARTIQ. If you get a message telling you that the shell session must be restarted after a partial update, open the shell again after the partial update and repeat the command. See the MSYS2 and Pacman manual for information on how to update individual packages if required. Upgrading with Conda -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^ When upgrading ARTIQ or when testing different versions it is recommended that new Conda environments are created instead of upgrading the packages in existing environments. As a rule, keep previous environments around unless you are certain that they are no longer needed and the new environment is working correctly. @@ -262,6 +259,8 @@ Run the command:: Replace ``[username]`` with the login name that was given to you with the subscription, ``[variant]`` with the name of your system variant, and ``[afws_directory]`` with the name of an empty directory, which will be created by the command if it does not exist. Enter your password when prompted and wait for the build (if applicable) and download to finish. If you experience issues with the AFWS client, write to the helpdesk@ email. +For certain configurations (KC705 or ZC705 only) it is also possible to source firmware from `the M-Labs Hydra server ` (in ``main`` and ``zynq`` respectively). + Without a subscription, you may build the firmware yourself from the open source code. See the section :ref:`Developing ARTIQ `. .. _installing-configuring-openocd: From a901ab74b5d0c44265b5e2033cff89d532489d18 Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Fri, 7 Jun 2024 12:56:55 +0800 Subject: [PATCH 286/296] compiler: fix int boundary checks Signed-off-by: Florian Agbuya --- artiq/compiler/embedding.py | 4 ++-- artiq/compiler/transforms/int_monomorphizer.py | 4 ++-- artiq/coredevice/comm_kernel.py | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/artiq/compiler/embedding.py b/artiq/compiler/embedding.py index 380656e44..1c08bbdfe 100644 --- a/artiq/compiler/embedding.py +++ b/artiq/compiler/embedding.py @@ -747,9 +747,9 @@ class StitchingInferencer(Inferencer): if elt.__class__ == float: state |= IS_FLOAT elif elt.__class__ == int: - if -2**31 < elt < 2**31-1: + if -2**31 <= elt <= 2**31-1: state |= IS_INT32 - elif -2**63 < elt < 2**63-1: + elif -2**63 <= elt <= 2**63-1: state |= IS_INT64 else: state = -1 diff --git a/artiq/compiler/transforms/int_monomorphizer.py b/artiq/compiler/transforms/int_monomorphizer.py index adab3b165..5c7d22dce 100644 --- a/artiq/compiler/transforms/int_monomorphizer.py +++ b/artiq/compiler/transforms/int_monomorphizer.py @@ -14,9 +14,9 @@ class IntMonomorphizer(algorithm.Visitor): def visit_NumT(self, node): if builtins.is_int(node.type): if types.is_var(node.type["width"]): - if -2**31 < node.n < 2**31-1: + if -2**31 <= node.n <= 2**31-1: width = 32 - elif -2**63 < node.n < 2**63-1: + elif -2**63 <= node.n <= 2**63-1: width = 64 else: diag = diagnostic.Diagnostic("error", diff --git a/artiq/coredevice/comm_kernel.py b/artiq/coredevice/comm_kernel.py index b6ffb8ee7..73a480ef4 100644 --- a/artiq/coredevice/comm_kernel.py +++ b/artiq/coredevice/comm_kernel.py @@ -465,12 +465,12 @@ class CommKernel: self._write_bool(value) elif tag == "i": check(isinstance(value, (int, numpy.int32)) and - (-2**31 <= value < 2**31), + (-2**31 <= value <= 2**31-1), lambda: "32-bit int") self._write_int32(value) elif tag == "I": check(isinstance(value, (int, numpy.int32, numpy.int64)) and - (-2**63 <= value < 2**63), + (-2**63 <= value <= 2**63-1), lambda: "64-bit int") self._write_int64(value) elif tag == "f": @@ -479,8 +479,8 @@ class CommKernel: self._write_float64(value) elif tag == "F": check(isinstance(value, Fraction) and - (-2**63 <= value.numerator < 2**63) and - (-2**63 <= value.denominator < 2**63), + (-2**63 <= value.numerator <= 2**63-1) and + (-2**63 <= value.denominator <= 2**63-1), lambda: "64-bit Fraction") self._write_int64(value.numerator) self._write_int64(value.denominator) From bfeac30c4445b41f5dd4b5bebf5793f2d232c21d Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Fri, 7 Jun 2024 13:00:01 +0800 Subject: [PATCH 287/296] test_embedding: add int boundary test from 25168422a Signed-off-by: Florian Agbuya --- artiq/test/coredevice/test_embedding.py | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/artiq/test/coredevice/test_embedding.py b/artiq/test/coredevice/test_embedding.py index f5ec95451..46cb8b296 100644 --- a/artiq/test/coredevice/test_embedding.py +++ b/artiq/test/coredevice/test_embedding.py @@ -573,3 +573,29 @@ class NumpyQuotingTest(ExperimentCase): def test_issue_1871(self): """Ensure numpy.array() does not break NumPy math functions""" self.create(_NumpyQuoting).run() + + +class _IntBoundary(EnvExperiment): + def build(self): + self.setattr_device("core") + self.int32_min = numpy.iinfo(numpy.int32).min + self.int32_max = numpy.iinfo(numpy.int32).max + self.int64_min = numpy.iinfo(numpy.int64).min + self.int64_max = numpy.iinfo(numpy.int64).max + + @kernel + def test_int32_bounds(self, min_val: TInt32, max_val: TInt32): + return min_val == self.int32_min and max_val == self.int32_max + + @kernel + def test_int64_bounds(self, min_val: TInt64, max_val: TInt64): + return min_val == self.int64_min and max_val == self.int64_max + + @kernel + def run(self): + self.test_int32_bounds(self.int32_min, self.int32_max) + self.test_int64_bounds(self.int64_min, self.int64_max) + +class IntBoundaryTest(ExperimentCase): + def test_int_boundary(self): + self.create(_IntBoundary).run() From f43252901410ee784628e96ac0ebbcf842e0d140 Mon Sep 17 00:00:00 2001 From: architeuthidae <93191635+architeuthidae@users.noreply.github.com> Date: Wed, 12 Jun 2024 14:50:48 +0800 Subject: [PATCH 288/296] doc: 'Core device' manual page overhaul (#2430) --- doc/manual/core_device.rst | 84 +++++++++++++++----------------------- doc/manual/installing.rst | 23 ++++------- doc/manual/rtio.rst | 2 + 3 files changed, 42 insertions(+), 67 deletions(-) diff --git a/doc/manual/core_device.rst b/doc/manual/core_device.rst index 893c199d9..764884f45 100644 --- a/doc/manual/core_device.rst +++ b/doc/manual/core_device.rst @@ -3,73 +3,51 @@ Core device The core device is a FPGA-based hardware component that contains a softcore or hardcore CPU tightly coupled with the so-called RTIO core, which runs in gateware and provides precision timing. The CPU executes Python code that is statically compiled by the ARTIQ compiler and communicates with peripherals (TTL, DDS, etc.) through the RTIO core, as described in :ref:`artiq-real-time-i-o-concepts`. This architecture provides high timing resolution, low latency, low jitter, high-level programming capabilities, and good integration with the rest of the Python experiment code. -While it is possible to use all the other parts of ARTIQ (controllers, master, GUI, dataset management, etc.) without a core device, many experiments require it. +While it is possible to use the other parts of ARTIQ (controllers, master, GUI, dataset management, etc.) without a core device, many experiments require it. .. _core-device-flash-storage: Flash storage -************* +^^^^^^^^^^^^^ -The core device contains some flash storage space which is used to store configuration data. It is one sector (typically 64 kB) large and organized as a list of key-value records, accessible by using ``artiq_coremgmt`` (see: :ref:`core-device-management-tool`). - -This area is used to store a variety of configurations, in particular the core device IP address and, if present, the startup and/or idle kernels (see also: :ref:`miscellaneous_config_core_device`). +The core device contains some flash storage space which is used to store configuration data. It is one sector (typically 64 kB) large and organized as a list of key-value records, accessible by using ``artiq_coremgmt`` (see :ref:`core-device-management-tool`). The core device IP and MAC addresses, as well as, if present, the startup and/or idle kernels (see :ref:`miscellaneous_config_core_device`) are stored here. .. _board-ports: FPGA board ports -**************** +^^^^^^^^^^^^^^^^ All boards have a serial interface running at 115200bps 8-N-1 that can be used for debugging. -Kasli ------ +Kasli and Kasli SoC +^^^^^^^^^^^^^^^^^^^ -`Kasli `_ is a versatile core device designed for ARTIQ as part of the `Sinara `_ family of boards. All variants support interfacing to various EEM daughterboards (TTL, DDS, ADC, DAC...) connected directly to it. +`Kasli `_ and `Kasli-SoC `_ are versatile core devices designed for ARTIQ as part of the open-source `Sinara `_ family of boards. All support interfacing to various EEM daughterboards (TTL, DDS, ADC, DAC...) through twelve onboard EEM ports. Kasli-SoC, which runs on a separate `Zynq port `_ of the ARTIQ firmware, is architecturally separate, among other things being capable of performing much heavier software computations quickly locally to the board, but provides generally similar features to Kasli. Kasli itself exists in two versions, of which the improved Kasli v2.0 is now in more common use, but the original Kasli v1.0 remains supported by ARTIQ. -Standalone variants -+++++++++++++++++++ +Kasli can be connected to the network using a 10000Base-X SFP module, installed into the SFP0 cage. Kasli-SoC features a built-in Ethernet port to use instead. If configured as a DRTIO satellite, both boards instead reserve SFP0 for the upstream DRTIO connection; remaining SFP cages are available for downstream connections. Equally, if used as a DRTIO master, all free SFP cages are available for downstream connections (i.e. all but SFP0 on Kasli, all four on Kasli-SoC). -Kasli is connected to the network using a 1000Base-X SFP module. `No-name `_ BiDi (1000Base-BX) modules have been used successfully. The SFP module for the network should be installed into the SFP0 cage. -The other SFP cages are not used. - -The RTIO clock frequency is 125MHz or 150MHz, which is generated by the Si5324. - -DRTIO master variants -+++++++++++++++++++++ - -Kasli can be used as a DRTIO master that provides local RTIO channels and can additionally control one DRTIO satellite. - -The RTIO clock frequency is 125MHz or 150MHz, which is generated by the Si5324. The DRTIO line rate is 2.5Gbps or 3Gbps. - -As with the standalone configuration, the SFP module for the Ethernet network should be installed into the SFP0 cage. The DRTIO connections are on SFP1 and SFP2, and optionally on the SATA connector. - -DRTIO satellite/repeater variants -+++++++++++++++++++++++++++++++++ - -Kasli can be used as a DRTIO satellite with a 125MHz or 150MHz RTIO clock and a 2.5Gbps or 3Gbps DRTIO line rate. - -The DRTIO upstream connection is on SFP0 or optionally on the SATA connector, and the remaining SFPs are downstream ports. +The DRTIO line rate depends upon the RTIO clock frequency running, e.g., at 125MHz the line rate is 2.5Gbps, at 150MHz 3.0Gbps, etc. See below for information on RTIO clocks. KC705 ------ +^^^^^ An alternative target board for the ARTIQ core device is the KC705 development board from Xilinx. It supports the NIST CLOCK and QC2 hardware (FMC). Common problems -+++++++++++++++ +--------------- * The SW13 switches on the board need to be set to 00001. * When connected, the CLOCK adapter breaks the JTAG chain due to TDI not being connected to TDO on the FMC mezzanine. * On some boards, the JTAG USB connector is not correctly soldered. VADJ -++++ +---- With the NIST CLOCK and QC2 adapters, for safe operation of the DDS buses (to prevent damage to the IO banks of the FPGA), the FMC VADJ rail of the KC705 should be changed to 3.3V. Plug the Texas Instruments USB-TO-GPIO PMBus adapter into the PMBus connector in the corner of the KC705 and use the Fusion Digital Power Designer software to configure (requires Windows). Write to chip number U55 (address 52), channel 4, which is the VADJ rail, to make it 3.3V instead of 2.5V. Power cycle the KC705 board to check that the startup voltage on the VADJ rail is now 3.3V. NIST CLOCK -++++++++++ +---------- With the CLOCK hardware, the TTL lines are mapped as follows: @@ -113,7 +91,7 @@ The DDS bus is on channel 27. NIST QC2 -++++++++ +-------- With the QC2 hardware, the TTL lines are mapped as follows: @@ -159,24 +137,28 @@ See :mod:`artiq.coredevice.i2c` for more details. .. _core-device-clocking: Clocking -++++++++ +^^^^^^^^ -The KC705 in standalone variants supports an internal 125 MHz RTIO clock (based on its crystal oscillator, or external reference for PLL for DRTIO variants) and an external clock, that can be selected using the ``rtio_clock`` configuration entry. Valid values are: +The core device generates the RTIO clock using a PLL locked either to an internal crystal or to an external frequency reference. If choosing the latter, external reference must be provided (via front panel SMA input on Kasli boards). Valid configuration options include: - * ``int_125`` - internal crystal oscillator, 125 MHz output (default), - * ``ext0_bypass`` - external clock. + * ``int_100`` - internal crystal reference is used to synthesize a 100MHz RTIO clock, + * ``int_125`` - internal crystal reference is used to synthesize a 125MHz RTIO clock (default option), + * ``int_150`` - internal crystal reference is used to synthesize a 150MHz RTIO clock. + * ``ext0_synth0_10to125`` - external 10MHz reference clock used to synthesize a 125MHz RTIO clock, + * ``ext0_synth0_80to125`` - external 80MHz reference clock used to synthesize a 125MHz RTIO clock, + * ``ext0_synth0_100to125`` - external 100MHz reference clock used to synthesize a 125MHz RTIO clock, + * ``ext0_synth0_125to125`` - external 125MHz reference clock used to synthesize a 125MHz RTIO clock. -KC705 in DRTIO variants and Kasli generate the RTIO clock using a PLL locked either to an internal crystal or to an external frequency reference. Valid values are: +The selected option can be observed in the core device boot logs and accessed using ``artiq_coremgmt config`` with key ``rtio_clock``. - * ``int_125`` - internal crystal oscillator using PLL, 125 MHz output (default), - * ``int_100`` - internal crystal oscillator using PLL, 100 MHz output, - * ``int_150`` - internal crystal oscillator using PLL, 150 MHz output, - * ``ext0_synth0_10to125`` - external 10 MHz reference using PLL, 125 MHz output, - * ``ext0_synth0_80to125`` - external 80 MHz reference using PLL, 125 MHz output, - * ``ext0_synth0_100to125`` - external 100 MHz reference using PLL, 125 MHz output, - * ``ext0_synth0_125to125`` - external 125 MHz reference using PLL, 125 MHz output, - * ``ext0_bypass``, ``ext0_bypass_125``, ``ext0_bypass_100`` - external clock - with explicit aliases available. +As of ARTIQ 8, it is now possible for Kasli and Kasli-SoC configurations to enable WRPLL -- a clock recovery method using `DDMTD `_ and Si549 oscillators -- both to lock the main RTIO clock and (in DRTIO configurations) to lock satellites to master. This is set by the ``enable_wrpll`` option in the JSON description file. Because WRPLL requires slightly different gateware and firmware, it is necessary to re-flash devices to enable or disable it in extant systems. If you would like to obtain the firmware for a different WRPLL setting through ``awfs_client``, write to the helpdesk@ email. -The selected option can be observed in the core device boot logs. +If phase noise performance is the priority, it is recommended to use ``ext0_synth0_125to125`` over other ``ext0`` options, as this bypasses the (noisy) MMCM. -Options ``rtio_clock=int_XXX`` and ``rtio_clock=ext0_synth0_XXXXX`` generate the RTIO clock using a PLL locked either to an internal crystal or to an external frequency reference (depending on exact option). ``rtio_clock=ext0_bypass`` bypasses that PLL and the user must supply the RTIO clock (typically 125 MHz) at the Kasli front panel SMA input. Bypassing the PLL ensures the skews between input clock, Kasli downstream clock outputs, and RTIO clock are deterministic accross reboots of the system. This is useful when phase determinism is required in situtations where the reference clock fans out to other devices before reaching Kasli. +If not using WRPLL, PLL can also be bypassed entirely with the options + + * ``ext0_bypass`` (input clock used directly) + * ``ext0_bypass_125`` (explicit alias) + * ``ext0_bypass_100`` (explicit alias) + +Bypassing the PLL ensures the skews between input clock, downstream clock outputs, and RTIO clock are deterministic across reboots of the system. This is useful when phase determinism is required in situations where the reference clock fans out to other devices before reaching the master. \ No newline at end of file diff --git a/doc/manual/installing.rst b/doc/manual/installing.rst index 2a4cb6578..5e0f61723 100644 --- a/doc/manual/installing.rst +++ b/doc/manual/installing.rst @@ -259,7 +259,7 @@ Run the command:: Replace ``[username]`` with the login name that was given to you with the subscription, ``[variant]`` with the name of your system variant, and ``[afws_directory]`` with the name of an empty directory, which will be created by the command if it does not exist. Enter your password when prompted and wait for the build (if applicable) and download to finish. If you experience issues with the AFWS client, write to the helpdesk@ email. -For certain configurations (KC705 or ZC705 only) it is also possible to source firmware from `the M-Labs Hydra server ` (in ``main`` and ``zynq`` respectively). +For certain configurations (KC705 or ZC705 only) it is also possible to source firmware from `the M-Labs Hydra server `_ (in ``main`` and ``zynq`` respectively). Without a subscription, you may build the firmware yourself from the open source code. See the section :ref:`Developing ARTIQ `. @@ -333,7 +333,9 @@ If the Kasli-SoC won't boot due to nonexistent or corrupted firmware, extract th The SW13 switches need to be set to 00001. -Flashing over network is also possible for Kasli and KC705, assuming IP networking has been set up. In this case, the ``-H HOSTNAME`` option is used; see the entry for ``artiq_flash`` in the :ref:`Utilities ` reference. +Flashing over network is also possible for Kasli and KC705, assuming IP networking has already been set up. In this case, the ``-H HOSTNAME`` option is used; see the entry for ``artiq_flash`` in the :ref:`Utilities ` reference. + +.. _core-device-networking: Setting up the core device IP networking ---------------------------------------- @@ -405,23 +407,12 @@ Select the RTIO clock source The core device may use any of: an external clock signal, its internal clock with external frequency reference, or its internal clock with internal crystal reference. Clock source and timing are set at power-up. To find out what clock signal you are using, check startup logs with ``artiq_coremgmt log``. -By default, an internal clock is used; to select another source, use a command of the form: +The default is to use an internal 125MHz clock. To select a source, use a command of the form: $ artiq_coremgmt config write -s rtio_clock int_125 # internal 125MHz clock (default) - $ artiq_coremgmt config write -s rtio_clock ext0_bypass # external clock (bypass) + $ artiq_coremgmt config write -s rtio_clock ext0_synth0_10to125 # external 10MHz reference used to synthesize internal 125MHz -If set to ``ext0_bypass``, the Si5324 synthesizer is bypassed entirely in favor of an input clock, requiring that an input clock be present. - -Other options include: - - ``ext0_synth0_10to125`` - external 10MHz reference clock used by Si5324 to synthesize a 125MHz RTIO clock, - - ``ext0_synth0_80to125`` - external 80MHz reference clock used by Si5324 to synthesize a 125MHz RTIO clock, - - ``ext0_synth0_100to125`` - external 100MHz reference clock used by Si5324 to synthesize a 125MHz RTIO clock, - - ``ext0_synth0_125to125`` - external 125MHz reference clock used by Si5324 to synthesize a 125MHz RTIO clock, - - ``int_100`` - internal crystal reference is used by Si5324 to synthesize a 100MHz RTIO clock, - - ``int_150`` - internal crystal reference is used by Si5324 to synthesize a 150MHz RTIO clock. - - ``ext0_bypass_125`` and ``ext0_bypass_100`` - explicit aliases for ``ext0_bypass``. - -Availability of these options depends on specific board and configuration - specific settings may or may not be supported. See also :ref:`core-device-clocking`. +See :ref:`core-device-clocking` for availability of specific options. Set up resolving RTIO channels to their names ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/manual/rtio.rst b/doc/manual/rtio.rst index 6db6bbbf7..922654ae6 100644 --- a/doc/manual/rtio.rst +++ b/doc/manual/rtio.rst @@ -121,6 +121,8 @@ To track down ``RTIOUnderflows`` in an experiment there are a few approaches: code. * The :any:`integrated logic analyzer ` shows the timeline context that lead to the exception. The analyzer is always active and supports plotting of RTIO slack. RTIO slack is the difference between timeline cursor and wall clock time (``now - rtio_counter``). +.. _sequence_errors: + Sequence errors --------------- A sequence error happens when the sequence of coarse timestamps cannot be supported by the gateware. For example, there may have been too many timeline rewinds. From 2c945f260ec34240efe9489275f9bfab1908708b Mon Sep 17 00:00:00 2001 From: architeuthidae <93191635+architeuthidae@users.noreply.github.com> Date: Wed, 12 Jun 2024 15:43:53 +0800 Subject: [PATCH 289/296] doc: 'Getting started with core device' manual page edit (#2431) --- doc/manual/getting_started_core.rst | 111 +++++++++++++++------------- doc/manual/rtio.rst | 2 +- 2 files changed, 60 insertions(+), 53 deletions(-) diff --git a/doc/manual/getting_started_core.rst b/doc/manual/getting_started_core.rst index 0cbeeffc6..72c7bfa0e 100644 --- a/doc/manual/getting_started_core.rst +++ b/doc/manual/getting_started_core.rst @@ -21,16 +21,18 @@ As a very first step, we will turn on a LED on the core device. Create a file `` self.core.reset() self.led.on() -The central part of our code is our ``LED`` class, which derives from :class:`artiq.language.environment.EnvExperiment`. Among other features, :class:`~artiq.language.environment.EnvExperiment` calls our :meth:`~artiq.language.environment.Experiment.build` method and provides the :meth:`~artiq.language.environment.HasEnvironment.setattr_device` method that interfaces to the device database to create the appropriate device drivers and make those drivers accessible as ``self.core`` and ``self.led``. The :func:`~artiq.language.core.kernel` decorator (``@kernel``) tells the system that the :meth:`~artiq.language.environment.Experiment.run` method must be compiled for and executed on the core device (instead of being interpreted and executed as regular Python code on the host). The decorator uses ``self.core`` internally, which is why we request the core device using :meth:`~artiq.language.environment.HasEnvironment.setattr_device` like any other. +The central part of our code is our ``LED`` class, which derives from :class:`artiq.language.environment.EnvExperiment`. Among other features, :class:`~artiq.language.environment.EnvExperiment` calls our :meth:`~artiq.language.environment.Experiment.build` method and provides the :meth:`~artiq.language.environment.HasEnvironment.setattr_device` method that interfaces with the device database to create the appropriate device drivers and make those drivers accessible as ``self.core`` and ``self.led``. The :func:`~artiq.language.core.kernel` decorator (``@kernel``) tells the system that the :meth:`~artiq.language.environment.Experiment.run` method must be compiled for and executed on the core device (instead of being interpreted and executed as regular Python code on the host). The decorator uses ``self.core`` internally, which is why we request the core device using :meth:`~artiq.language.environment.HasEnvironment.setattr_device` like any other. -You will need to supply the correct device database for your core device; it is generated by a Python script typically called ``device_db.py`` (see also :ref:`device-db`). If you purchased a system from M-Labs, the device database is provided either on the USB stick or inside ~/artiq on the NUC; otherwise, you can also find examples in the ``examples`` folder of ARTIQ, sorted inside the corresponding subfolder for your core device. Copy ``device_db.py`` into the same directory as ``led.py`` (or use the ``--device-db`` option of ``artiq_run``). The field ``core_addr``, placed at the top of the file, needs to match the IP address of your core device so your computer can communicate with it. If you purchased a pre-assembled system it is normally already set correctly. +It is important that you supply the correct device database for your system configuration; it is generated by a Python script typically called ``device_db.py`` (see also :ref:`the device database `). If you purchased a system from M-Labs, the ``device_db.py`` for your system will normally already have been provided to you (either on the USB stick, inside ``~/artiq`` on the NUC, or by email). If you have the JSON description file for your system on hand, you can use the ARTIQ front-end tool ``artiq_ddb_template`` to generate a matching device database file. Otherwise, you can also find examples in the ``examples`` folder of ARTIQ (sorted in corresponding subfolders per core device) which you can edit to match your system. .. note:: To access the examples, you can find where the ARTIQ package is installed on your machine with: :: python3 -c "import artiq; print(artiq.__path__[0])" -Run your code using ``artiq_run``, which is part of the ARTIQ front-end tools: :: +Make sure ``device_db.py`` is in the same directory as ``led.py``. The field ``core_addr``, placed at the top of the file, needs to match the current IP address of your core device in order for your host machine to contact it. If you purchased a pre-assembled system and haven't changed the IP address it is normally already set correctly. + +Run your code using ``artiq_run``, which is one of the ARTIQ front-end tools: :: $ artiq_run led.py @@ -39,9 +41,9 @@ The process should terminate quietly and the LED of the device should turn on. C Host/core device interaction (RPC) ---------------------------------- -A method or function running on the core device (which we call a "kernel") may communicate with the host by calling non-kernel functions that may accept parameters and may return a value. The "remote procedure call" (RPC) mechanisms handle automatically the communication between the host and the device of which function to call, with which parameters, and what the returned value is. +A method or function running on the core device (which we call a "kernel") may communicate with the host by calling non-kernel functions that may accept parameters and may return a value. The "remote procedure call" (RPC) mechanisms automatically handle the communication between the host and the device, conveying between them what function to call, what parameters to call it with, and the resulting value, once returned. -Modify the code as follows: :: +Modify ``led.py`` as follows: :: def input_led_state() -> TBool: return input("Enter desired LED state: ") == "1" @@ -69,15 +71,11 @@ You can then turn the LED off and on by entering 0 or 1 at the prompt that appea $ artiq_run led.py Enter desired LED state: 0 -What happens is the ARTIQ compiler notices that the :meth:`input_led_state` function does not have a ``@kernel`` decorator (:func:`~artiq.language.core.kernel`) and thus must be executed on the host. When the core device calls it, it sends a request to the host to execute it. The host displays the prompt, collects user input, and sends the result back to the core device, which sets the LED state accordingly. +What happens is that the ARTIQ compiler notices that the :meth:`input_led_state` function does not have a ``@kernel`` decorator (:func:`~artiq.language.core.kernel`) and thus must be executed on the host. When the function is called on the core device, it sends a request to the host, which executes it. The core device waits until the host returns, and then continues the kernel; in this case, the host displays the prompt, collects user input, and the core device sets the LED state accordingly. -RPC functions must always return a value of the same type. When they return a value that is not ``None``, the compiler should be informed in advance of the type of the value, which is what the ``-> TBool`` annotation is for. - -Without the :meth:`~artiq.coredevice.core.Core.break_realtime` call, the RTIO events emitted by :func:`self.led.on()` or :func:`self.led.off()` would be scheduled at a fixed and very short delay after entering :meth:`~artiq.language.environment.Experiment.run()`. -These events would fail because the RPC to :meth:`input_led_state()` can take an arbitrary amount of time and therefore the deadline for submission of RTIO events would have long passed when :func:`self.led.on()` or :func:`self.led.off()` are called. -The :meth:`~artiq.coredevice.core.Core.break_realtime` call is necessary to waive the real-time requirements of the LED state change. -It advances the timeline far enough to ensure that events can meet the submission deadline. +The return type of all RPC functions must be known in advance. If the return value is not ``None``, the compiler requires a type annotation, like ``-> TBool`` in the example above. +Without the :meth:`~artiq.coredevice.core.Core.break_realtime` call, the RTIO events emitted by :func:`self.led.on()` or :func:`self.led.off()` would be scheduled at a fixed and very short delay after entering :meth:`~artiq.language.environment.Experiment.run()`. These events would fail because the RPC to :meth:`input_led_state()` can take an arbitrarily long amount of time, and therefore the deadline for the submission of RTIO events would have long passed when :func:`self.led.on()` or :func:`self.led.off()` are called (that is, the ``rtio_counter`` wall clock will have advanced far ahead of the timeline cursor ``now``, and an :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` would result; see :ref:`artiq-real-time-i-o-concepts` for the full explanation of wall clock vs. timeline.) The :meth:`~artiq.coredevice.core.Core.break_realtime` call is necessary to waive the real-time requirements of the LED state change. Rather than delaying by any particular time interval, it reads ``rtio_counter`` and moves up the ``now`` cursor far enough to ensure it's once again safely ahead of the wall clock. Real-time Input/Output (RTIO) ----------------------------- @@ -112,13 +110,13 @@ There are no input-only TTL channels. The experiment then drives one million 2 µs long pulses separated by 2 µs each. Connect an oscilloscope or logic analyzer to TTL0 and run ``artiq_run.py rtio.py``. Notice that the generated signal's period is precisely 4 µs, and that it has a duty cycle of precisely 50%. -This is not what you would expect if the delay and the pulse were implemented with register-based general purpose input output (GPIO) that is CPU-controlled. -The signal's period would depend on CPU speed, and overhead from the loop, memory management, function calls, etc, all of which are hard to predict and variable. +This is not what one would expect if the delay and the pulse were implemented with register-based general purpose input output (GPIO) that is CPU-controlled. +The signal's period would depend on CPU speed, and overhead from the loop, memory management, function calls, etc., all of which are hard to predict and variable. Any asymmetry in the overhead would manifest itself in a distorted and variable duty cycle. Instead, inside the core device, output timing is generated by the gateware and the CPU only programs switching commands with certain timestamps that the CPU computes. -This guarantees precise timing as long as the CPU can keep generating timestamps that are increasing fast enough. In case it fails to do that (and attempts to program an event with a timestamp smaller than the current RTIO clock timestamp), a :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` exception is raised. The kernel causing it may catch it (using a regular ``try... except...`` construct), or it will be propagated to the host. +This guarantees precise timing as long as the CPU can keep generating timestamps that are increasing fast enough. In the case that it fails to do so (and attempts to program an event with a timestamp smaller than the current RTIO clock timestamp), :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` is raised. The kernel causing it may catch it (using a regular ``try... except...`` construct), or allow it to propagate to the host. Try reducing the period of the generated waveform until the CPU cannot keep up with the generation of switching events and the underflow exception is raised. Then try catching it: :: @@ -147,23 +145,33 @@ Try reducing the period of the generated waveform until the CPU cannot keep up w Parallel and sequential blocks ------------------------------ -It is often necessary that several pulses overlap one another. This can be expressed through the use of ``with parallel`` constructs, in which the events generated by the individual statements are executed at the same time. The duration of the ``parallel`` block is the duration of its longest statement. +It is often necessary for several pulses to overlap one another. This can be expressed through the use of ``with parallel`` constructs, in which the events generated by the individual statements are executed at the same time. The duration of the ``parallel`` block is the duration of its longest statement. Try the following code and observe the generated pulses on a 2-channel oscilloscope or logic analyzer: :: - for i in range(1000000): - with parallel: - self.ttl0.pulse(2*us) - self.ttl1.pulse(4*us) - delay(4*us) + from artiq.experiment import * + + class Tutorial(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("ttl0") + self.setattr_device("ttl1") + + @kernel + def run(self): + self.core.reset() + for i in range(1000000): + with parallel: + self.ttl0.pulse(2*us) + self.ttl1.pulse(4*us) + delay(4*us) ARTIQ can implement ``with parallel`` blocks without having to resort to any of the typical parallel processing approaches. It simply remembers the position on the timeline when entering the ``parallel`` block and then seeks back to that position after submitting the events generated by each statement. In other words, the statements in the ``parallel`` block are actually executed sequentially, only the RTIO events generated by them are scheduled to be executed in parallel. -Note that if a statement takes a lot of CPU time to execute (this different from the events scheduled by a statement taking a long time), it may cause a subsequent statement to miss the deadline for timely submission of its events. -This then causes a :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` exception to be raised. +Note that accordingly if a statement takes a lot of CPU time to execute (which is different from -- and has nothing to do with! -- the events *scheduled* by the statement taking a long time), it may cause a subsequent statement to miss the deadline for timely submission of its events (and raise :exc:`~artiq.coredevice.exceptions.RTIOUnderflow`), while earlier statements in the parallel block would have submitted their events without problems. -Within a parallel block, some statements can be made sequential again using a ``with sequential`` construct. Observe the pulses generated by this code: :: +Within a parallel block, some statements can be scheduled sequentially again using a ``with sequential`` block. Observe the pulses generated by this code: :: for i in range(1000000): with parallel: @@ -174,11 +182,11 @@ Within a parallel block, some statements can be made sequential again using a `` self.ttl1.pulse(4*us) delay(4*us) -Particular care needs to be taken when working with ``parallel`` blocks in cases where a large number of RTIO events are generated as it possible to create sequencing errors (`RTIO sequence error`). Sequence errors do not halt execution of the kernel for performance reasons and instead are reported in the core log. If the ``aqctl_corelog`` process has been started with ``artiq_ctlmgr``, then these errors will be posted to the master log. However, if an experiment is executed through ``artiq_run``, these errors will not be visible outside of the core log. +Particular care needs to be taken when working with ``parallel`` blocks which generate large numbers of RTIO events, as it is possible to create sequence errors. A sequence error is caused when the scalable event dispatcher (SED) cannot queue an RTIO event due to its timestamp being the same as or earlier than another event in its queue. By default, the SED has 8 lanes, which suffice in most cases to avoid sequence errors; however, if many (>8) events are queued with interlaced timestamps the problem can still surface. See :ref:`sequence-errors`. -A sequence error is caused when the scalable event dispatcher (SED) cannot queue an RTIO event due to its timestamp being the same as or earlier than another event in its queue. By default, the SED has 8 lanes which allows ``parallel`` events to work without sequence errors in most cases, however if many (>8) events are queued with conflicting timestamps this error can surface. +Note that for performance reasons sequence errors do not halt execution of the kernel. Instead, they are reported in the core log. If the ``aqctl_corelog`` process has been started with ``artiq_ctlmgr``, then these errors will be posted to the master log. If an experiment is executed through ``artiq_run``, the errors will only be visible in the core log. -These errors can usually be overcome by reordering the generation of the events. Alternatively, the number of SED lanes can be increased in the gateware. +Sequence errors can usually be overcome by reordering the generation of the events (again, different from and unrelated to reordering the events themselves). Alternatively, the number of SED lanes can be increased in the gateware. .. _rtio-analyzer-example: @@ -203,13 +211,12 @@ The core device records the real-time I/O waveforms into a circular buffer. It i rtio_log("ttl0", "i", i) delay(...) -Afterwards, the recorded data can be extracted and written to a VCD file using ``artiq_coreanalyzer -w rtio.vcd`` (see: :ref:`core-device-rtio-analyzer-tool`). VCD files can be viewed using third-party tools such as GtkWave. - +Afterwards, the recorded data can be extracted and written to a VCD file using ``artiq_coreanalyzer -w rtio.vcd`` (see :ref:`core-device-rtio-analyzer-tool`). VCD files can be viewed using third-party tools such as GtkWave. Direct Memory Access (DMA) -------------------------- -DMA allows you to store fixed sequences of pulses in system memory, and have the DMA core in the FPGA play them back at high speed. Pulse sequences that are too fast for the CPU (i.e. would cause RTIO underflows) can still be generated using DMA. The only modification of the sequence that the DMA core supports is shifting it in time (so it can be played back at any position of the timeline), everything else is fixed at the time of recording the sequence. +DMA allows for storing fixed sequences of RTIO events in system memory and having the DMA core in the FPGA play them back at high speed. Provided that the specifications of a desired event sequence are known far enough in advance, and no other RTIO issues (collisions, sequence errors) are provoked, even extremely fast and detailed event sequences are always possible to generate and execute. However, if they are time-consuming for the CPU to generate, they may require very large amounts of positive slack in order to allow the CPU enough time to complete the generation before the wall clock 'catches up' (that is, without running into RTIO underflows). A better option is to record these sequences to the DMA core. Once recorded, events sequences are fixed and cannot be modified, but can be safely replayed at any position in the timeline, potentially repeatedly. Try this: :: @@ -244,12 +251,14 @@ Try this: :: # each playback advances the timeline by 50*(100+100) ns self.core_dma.playback_handle(pulses_handle) +For more documentation on the methods used, see the :mod:`artiq.coredevice.dma` reference. + Distributed Direct Memory Access (DDMA) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ By default on DRTIO systems, all events recorded by the DMA core are kept and played back on the master. -With distributed DMA, RTIO events that should be played back on remote destinations, are distributed to the corresponding satellites. In some cases (typically, large buffers on several satellites with high event throughput), it allows for better performance and higher bandwidth, as the RTIO events do not have to be sent over the DRTIO link(s) during playback. +With distributed DMA, RTIO events that should be played back on remote destinations are distributed to the corresponding satellites. In some cases (typically, large buffers on several satellites with high event throughput), it allows for better performance and higher bandwidth, as the RTIO events do not have to be sent over the DRTIO link(s) during playback. To enable distributed DMA, simply provide an ``enable_ddma=True`` argument for the :meth:`~artiq.coredevice.dma.CoreDMA.record` method - taking a snippet from the previous example: :: @@ -262,28 +271,26 @@ To enable distributed DMA, simply provide an ``enable_ddma=True`` argument for t self.ttl0.pulse(100*ns) delay(100*ns) -This argument is ignored on standalone systems, as it does not apply there. +In standalone systems this argument is ignored and has no effect. -Enabling DDMA on a purely local sequence on a DRTIO system introduces an overhead during trace recording which comes from additional processing done on the record, so careful use is advised. - -Due to the extra time that communicating with relevant satellites takes, an additional delay before playback may be necessary to prevent a :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` when playing back a DDMA-enabled sequence. +Enabling DDMA on a purely local sequence on a DRTIO system introduces an overhead during trace recording which comes from additional processing done on the record, so careful use is advised. Due to the extra time that communicating with relevant satellites takes, an additional delay before playback may be necessary to prevent a :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` when playing back a DDMA-enabled sequence. Subkernels ---------- -Subkernels refer to kernels running on a satellite device. This allows you to offload some of the processing and control over remote RTIO devices, freeing up resources on the master. +Subkernels refers to kernels running on a satellite device. This allows offloading some processing and control over remote RTIO devices, freeing up resources on the master. -Subkernels behave in most part as regular kernels, they accept arguments and can return values. However, there are few caveats: +Subkernels behave for the most part like regular kernels; they accept arguments and can return values. However, there are few caveats: - they do not support RPCs, - they do not support DRTIO, - their return value must be fully annotated with an ARTIQ type, - their arguments should be annotated, and only basic ARTIQ types are supported, - - while ``self`` is allowed, there is no attribute writeback - any changes to it will be discarded when the subkernel is done, - - they can raise exceptions, but they cannot be caught by the master, - - they begin execution as soon as possible when called, and they can be awaited. + - while ``self`` is allowed, there is no attribute writeback - any changes will be discarded when the subkernel is completed, + - they can raise exceptions, but the exceptions cannot be caught by the master (rather, they are propagated directly to the host), + - they begin execution as soon as possible when called, and can be awaited. -To define a subkernel, use the subkernel decorator (``@subkernel(destination=X)``). The destination is the satellite number as defined in the routing table, and must be between 1 and 255. To call a subkernel, call it like a normal function; and to await its result, use ``subkernel_await(function, [timeout])`` built-in function. +To define a subkernel, use the subkernel decorator (``@subkernel(destination=X)``). The destination is the satellite number as defined in the routing table, and must be between 1 and 255. To call a subkernel, call it like a normal function; and to await its result, use ``subkernel_await(function, [timeout])``. For example, a subkernel performing integer addition: :: @@ -304,11 +311,11 @@ For example, a subkernel performing integer addition: :: result = subkernel_await(subkernel_add) assert result == 4 -Sometimes the subkernel execution may take more time. By default, the await function will wait forever. However, if timeout is needed it can be set, as ``subkernel_await()`` accepts an optional argument. The value is interpreted in milliseconds and if it is negative, timeout is disabled. +Sometimes subkernel execution may take large amounts of time. By default, the await function will wait as long as necessary. If a timeout is needed, it can be set using the optional argument of ``subkernel_await()``. The value given is interpreted in milliseconds. If a negative value is given, timeout is disabled. -Subkernels are compiled after the main kernel, and then immediately uploaded to satellites. When called, master instructs the appropriate satellite to load the subkernel into their kernel core and to run it. If the subkernel is complex, and its binary relatively big, the delay between the call and actually running the subkernel may be substantial; if that delay has to be minimized, ``subkernel_preload(function)`` should be used before the call. +Subkernels are compiled after the main kernel and immediately uploaded to satellites. When called, the master instructs the appropriate satellite to load the subkernel into their kernel core and run it. If the subkernel is complex, and its binary relatively large, the delay between the call and actually running the subkernel may be substantial; if it's necessary to minimize this delay, ``subkernel_preload(function)`` should be used before the call. -While ``self`` is accepted as an argument for subkernels, it is embedded into the compiled data. Any changes made by the main kernel or other subkernels, will not be available. +While ``self`` is accepted as an argument for subkernels, it is embedded into the compiled data. Any changes made by the main kernel or other subkernels will not be available. Subkernels can call other kernels and subkernels. For a more complex example: :: @@ -341,17 +348,17 @@ Subkernels can call other kernels and subkernels. For a more complex example: :: assert result == 4 self.pulse_ttl(20) -Without the preload, the delay after the core reset would need to be longer. It's still an operation that can take some time, depending on the connection. Notice that the method ``pulse_ttl()`` can be also called both within a subkernel, and on its own. +Without the preload, the delay after the core reset would need to be longer. The operation may still take some time, depending on the connection. Notice that the method ``pulse_ttl()`` can be called both within a subkernel and on its own. -In general, subkernels do not have to be awaited, but awaiting is required to retrieve returned values and exceptions. +It is not necessary for subkernels to always be awaited, but awaiting is required to retrieve returned values and exceptions. .. note:: - When a subkernel is running, regardless of devices used by it, RTIO devices on that satellite are not available to the master. Control is returned to master after the subkernel finishes - to be sure that you can use the device, the subkernel should be awaited before any RTIO operations on the affected satellite are performed. + While a subkernel is running, regardless of what devices it makes use of, none of the RTIO devices on that satellite (or on any satellites downstream) will be available to the master. Control is returned to master after the subkernel completes - to be certain a device is usable, await the subkernel before performing any RTIO operations on the affected satellites. Message passing ^^^^^^^^^^^^^^^ -Subkernels besides arguments and returns, can also pass messages between each other or the master with built-in ``subkernel_send()`` and ``subkernel_recv()`` functions. This can be used for communication between subkernels, passing additional data, or partially computed data. Consider the following example: :: +Apart from arguments and returns, subkernels can also pass messages between each other or the master with built-in ``subkernel_send()`` and ``subkernel_recv()`` functions. This can be used for communication between subkernels, to pass additional data, or to send partially computed data. Consider the following example: :: from artiq.experiment import * @@ -371,10 +378,10 @@ Subkernels besides arguments and returns, can also pass messages between each ot result = subkernel_await(simple_self) assert result == 170 -The ``subkernel_send(destination, name, value)`` function requires three arguments: destination, name of the message that will be linked with the ``subkernel_recv()``, and the passed value. +The ``subkernel_send(destination, name, value)`` function takes three arguments: a destination, a name for the message (to be used for identification in the corresponding ``subkernel_recv()``), and the passed value. -The ``subkernel_recv(name, type, [timeout])`` function requires two arguments: message name (matching the name provided in ``subkernel_send``) and expected type. Optionally, it accepts a third argument - timeout for the operation in milliseconds. If the value is negative, timeout is disabled. The default value is no timeout. +The ``subkernel_recv(name, type, [timeout])`` function requires two arguments: message name (matching exactly the name provided in ``subkernel_send``) and expected type. Optionally, it accepts a third argument, a timeout for the operation in milliseconds. If this value is negative, timeout is disabled. By default, it waits as long as necessary. -The "name" argument in both ``send`` and ``recv`` functions acts as a link, and must match exactly between the two for a successful message transaction. The type of the value sent by ``subkernel_send`` is checked against the type declared in ``subkernel_recv`` with the same name, to avoid misinterpretation of the data. The compiler also checks if all subkernel message names have both a sending and receiving functions to help with typos. However, it cannot help if wrong names are used - the receiver will wait only for a matching message for the duration of the timeout. +To avoid misinterpretation of the data the compiler type-checks the value sent by ``subkernel_send`` against the type declared in ``subkernel_recv``. To guard against common errors, it also checks that all message names are used in both a sending and receiving function. -A message can be received only when a subkernel is running, and is put into a buffer to be taken when required - thus whatever sending order will not cause a deadlock. However, a subkernel may timeout or wait forever, if destination or names do not match (e.g. message sent to wrong destination, or under different than expected name even if types match). \ No newline at end of file +A message can only be received while a subkernel is running, and is placed into a buffer to be retrieved when required; therefore send executes independently of any receive and never deadlocks. However, a receive function may timeout or wait forever if no message with the correct name and destination is ever sent. \ No newline at end of file diff --git a/doc/manual/rtio.rst b/doc/manual/rtio.rst index 922654ae6..fad626c1d 100644 --- a/doc/manual/rtio.rst +++ b/doc/manual/rtio.rst @@ -121,7 +121,7 @@ To track down ``RTIOUnderflows`` in an experiment there are a few approaches: code. * The :any:`integrated logic analyzer ` shows the timeline context that lead to the exception. The analyzer is always active and supports plotting of RTIO slack. RTIO slack is the difference between timeline cursor and wall clock time (``now - rtio_counter``). -.. _sequence_errors: +.. _sequence-errors: Sequence errors --------------- From 1ee39881885dd24697c541370b153409484417e3 Mon Sep 17 00:00:00 2001 From: architeuthis Date: Tue, 11 Jun 2024 12:09:59 +0800 Subject: [PATCH 290/296] doc: Refactor manual table of contents --- doc/manual/index.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/manual/index.rst b/doc/manual/index.rst index 76b0b8f61..6207838c1 100644 --- a/doc/manual/index.rst +++ b/doc/manual/index.rst @@ -6,21 +6,21 @@ ARTIQ documentation :maxdepth: 2 introduction + release_notes installing developing - release_notes rtio getting_started_core - compiler getting_started_mgmt - core_device - management_system environment - drtio + compiler + management_system + drtio + core_device core_language_reference - core_drivers_reference - list_of_ndsps - developing_a_ndsp + core_drivers_reference utilities + developing_a_ndsp + list_of_ndsps default_network_ports faq From a167cc60434617783d889936f913255ef6a804fb Mon Sep 17 00:00:00 2001 From: architeuthis Date: Thu, 13 Jun 2024 11:33:34 +0800 Subject: [PATCH 291/296] doc: fix formatting in Installing page --- doc/manual/installing.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/manual/installing.rst b/doc/manual/installing.rst index 5e0f61723..32ef206d8 100644 --- a/doc/manual/installing.rst +++ b/doc/manual/installing.rst @@ -361,7 +361,7 @@ If the ``ip`` config is not set, Kasli-SoC firmware defaults to using the IP add * For Kasli or KC705: -If the ``ip`` config field is not set or set to ``use_dhcp``, the device will attempt to obtain an IP address and default gateway using DHCP. If a static IP address is nonetheless wanted, it can be flashed directly (OpenOCD must be installed and configured, as above), along with, as necessary, default gateway, IPv6, and/or MAC address: +If the ``ip`` config field is not set or set to ``use_dhcp``, the device will attempt to obtain an IP address and default gateway using DHCP. If a static IP address is nonetheless wanted, it can be flashed directly (OpenOCD must be installed and configured, as above), along with, as necessary, default gateway, IPv6, and/or MAC address: :: $ artiq_mkfs flash_storage.img [-s mac xx:xx:xx:xx:xx:xx] [-s ip xx.xx.xx.xx/xx] [-s ipv4_default_route xx.xx.xx.xx] [-s ip6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx/xx] [-s ipv6_default_route xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx] $ artiq_flash -t [board] -V [variant] -f flash_storage.img storage start @@ -388,7 +388,7 @@ Flash idle or startup kernel The idle kernel is the kernel (that is, a piece of code running on the core device; see :ref:`next topic ` for more information about kernels) which the core device runs whenever it is not connected to the host via Ethernet. This kernel is therefore stored immediately in the :ref:`core device configuration flash storage `. -To flash the idle kernel, first compile an idle experiment. Since the core device is not connected to the host, RPCs (calling Python code running on the host from the kernel) are forbidden, and its ``run()`` method must be a kernel, marked correctly with the ``@kernel`` decorator. Write the compiled experiment to the core device configuration flash storage, under the key ``idle_kernel``: +To flash the idle kernel, first compile an idle experiment. Since the core device is not connected to the host, RPCs (calling Python code running on the host from the kernel) are forbidden, and its ``run()`` method must be a kernel, marked correctly with the ``@kernel`` decorator. Write the compiled experiment to the core device configuration flash storage, under the key ``idle_kernel``: :: $ artiq_compile idle.py $ artiq_coremgmt config write -f idle_kernel idle.elf @@ -407,7 +407,7 @@ Select the RTIO clock source The core device may use any of: an external clock signal, its internal clock with external frequency reference, or its internal clock with internal crystal reference. Clock source and timing are set at power-up. To find out what clock signal you are using, check startup logs with ``artiq_coremgmt log``. -The default is to use an internal 125MHz clock. To select a source, use a command of the form: +The default is to use an internal 125MHz clock. To select a source, use a command of the form: :: $ artiq_coremgmt config write -s rtio_clock int_125 # internal 125MHz clock (default) $ artiq_coremgmt config write -s rtio_clock ext0_synth0_10to125 # external 10MHz reference used to synthesize internal 125MHz From ff79854c464004d00b4e89a51b8c576eb60d103f Mon Sep 17 00:00:00 2001 From: Florian Agbuya Date: Thu, 13 Jun 2024 11:10:49 +0800 Subject: [PATCH 292/296] plot_xy: fix missing x values handling Signed-off-by: Florian Agbuya --- artiq/applets/plot_xy.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/artiq/applets/plot_xy.py b/artiq/applets/plot_xy.py index d7d67803b..df3dc2aaf 100755 --- a/artiq/applets/plot_xy.py +++ b/artiq/applets/plot_xy.py @@ -24,11 +24,11 @@ class XYPlot(pyqtgraph.PlotWidget): y = value[self.args.y] except KeyError: return - x = value.get(self.args.x, (False, None)) + x = value.get(self.args.x) if x is None: x = np.arange(len(y)) - error = value.get(self.args.error, (False, None)) - fit = value.get(self.args.fit, (False, None)) + error = value.get(self.args.error) + fit = value.get(self.args.fit) if not len(y) or len(y) != len(x): self.mismatch['X values'] = True From 84b97976c0f77abc330c7fda49b7c9989dda585e Mon Sep 17 00:00:00 2001 From: morgan Date: Wed, 19 Jun 2024 11:28:07 +0800 Subject: [PATCH 293/296] kasli: fix v1.0 & v1.1 compilation error --- artiq/gateware/targets/kasli.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py index 4a1bece55..4dcc0026a 100755 --- a/artiq/gateware/targets/kasli.py +++ b/artiq/gateware/targets/kasli.py @@ -120,7 +120,8 @@ class StandaloneBase(MiniSoC, AMPSoC): self.config["HAS_SI549"] = None self.config["WRPLL_REF_CLK"] = "SMA_CLKIN" else: - self.submodules += SMAClkinForward(self.platform) + if self.platform.hw_rev == "v2.0": + self.submodules += SMAClkinForward(self.platform) self.config["HAS_SI5324"] = None self.config["SI5324_SOFT_RESET"] = None From 77580b5bf678d28d20ce683d9e716c1ef64cf088 Mon Sep 17 00:00:00 2001 From: morgan Date: Wed, 19 Jun 2024 12:08:35 +0800 Subject: [PATCH 294/296] kasli: raise error when enabling WRPLL with v1.x --- artiq/gateware/targets/kasli.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py index 4dcc0026a..0bdc50f28 100755 --- a/artiq/gateware/targets/kasli.py +++ b/artiq/gateware/targets/kasli.py @@ -843,6 +843,8 @@ def main(): has_shuttler = any(peripheral["type"] == "shuttler" for peripheral in description["peripherals"]) if has_shuttler and (description["drtio_role"] == "standalone"): raise ValueError("Shuttler requires DRTIO, please switch role to master") + if description["enable_wrpll"] and description["hw_rev"] in ["v1.0", "v1.1"]: + raise ValueError("Kasli {} does not support WRPLL".format(description["hw_rev"])) soc = cls(description, gateware_identifier_str=args.gateware_identifier_str, **soc_kasli_argdict(args)) args.variant = description["variant"] From 85545a84476cb6f97c06d37011bc09400e376eb2 Mon Sep 17 00:00:00 2001 From: Sebastien Bourdeauducq Date: Wed, 19 Jun 2024 12:43:36 +0800 Subject: [PATCH 295/296] flake: update dependencies --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 7ea9f95ac..1e28d8359 100644 --- a/flake.lock +++ b/flake.lock @@ -60,11 +60,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1717281328, - "narHash": "sha256-evZPzpf59oNcDUXxh2GHcxHkTEG4fjae2ytWP85jXRo=", + "lastModified": 1718437845, + "narHash": "sha256-ZT7Oc1g4I4pHVGGjQFnewFVDRLH5cIZhEzODLz9YXeY=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "b3b2b28c1daa04fe2ae47c21bb76fd226eac4ca1", + "rev": "752c634c09ceb50c45e751f8791cb45cb3d46c9e", "type": "github" }, "original": { From 19132ae0e322fccfae2b57b52343215f9edac16b Mon Sep 17 00:00:00 2001 From: Egor Savkin Date: Tue, 18 Jun 2024 12:12:15 +0800 Subject: [PATCH 296/296] Fix missing jquery in docs - fixes broken search Signed-off-by: Egor Savkin --- doc/manual/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/manual/conf.py b/doc/manual/conf.py index 2396b497a..49c1e068b 100644 --- a/doc/manual/conf.py +++ b/doc/manual/conf.py @@ -79,6 +79,7 @@ extensions = [ 'sphinx.ext.napoleon', 'sphinxarg.ext', 'sphinxcontrib.wavedrom', # see also below for config + "sphinxcontrib.jquery", ] mathjax_path = "https://m-labs.hk/MathJax/MathJax.js?config=TeX-AMS-MML_HTMLorMML.js"