diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index e239cab25..7c722c936 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -20,6 +20,8 @@ Highlights: - Exposes upconverter calibration and enabling/disabling of upconverter LO & RF outputs. - Add helpers to align Phaser updates to the RTIO timeline (``get_next_frame_mu()``) * ``get()``, ``get_mu()``, ``get_att()``, and ``get_att_mu()`` functions added for AD9910 and AD9912 +* On Kasli, the number of FIFO lanes in the scalable events dispatcher (SED) can now be configured in + the JSON hardware description file. * New hardware support: - HVAMP_8CH 8 channel HV amplifier for Fastino / Zotino * ``artiq_ddb_template`` generates edge-counter keys that start with the key of the corresponding @@ -28,6 +30,13 @@ Highlights: repository when building the list of experiments. * The configuration entry ``rtio_clock`` supports multiple clocking settings, deprecating the usage of compile-time options. +* DRTIO: added support for 100MHz clock. +* Previously detected RTIO async errors are reported to the host after each kernel terminates and a + warning is logged. The warning is additional to the one already printed in the core device log upon + detection of the error. +* HDF5 options can now be passed when creating datasets with ``set_dataset``. This allows + in particular to use transparent compression filters as follows: + ``set_dataset(name, value, hdf5_options={"compression": "gzip"})``. Breaking changes: @@ -38,6 +47,11 @@ Breaking changes: * Phaser: fixed coarse mixer frequency configuration * Mirny: Added extra delays in ``ADF5356.sync()``. This avoids the need of an extra delay before calling `ADF5356.init()`. +* DRTIO: Changed message alignment from 32-bits to 64-bits. +* The deprecated ``set_dataset(..., save=...)`` is no longer supported. +* The internal dataset representation was changed to support tracking HDF5 options like e.g. + a compression method. This requires changes to code reading the dataset persistence file + (``dataset_db.pyon``) and to custom applets. ARTIQ-6 @@ -107,9 +121,13 @@ Breaking changes: * ``quamash`` has been replaced with ``qasync``. * Protocols are updated to use device endian. * Analyzer dump format includes a byte for device endianness. +* To support variable numbers of Urukul cards in the future, the + ``artiq.coredevice.suservo.SUServo`` constructor now accepts two device name lists, + ``cpld_devices`` and ``dds_devices``, rather than four individual arguments. * Experiment classes with underscore-prefixed names are now ignored when ``artiq_client`` determines which experiment to submit (consistent with ``artiq_run``). + ARTIQ-5 ------- diff --git a/artiq/applets/big_number.py b/artiq/applets/big_number.py index 62348c8cf..a0714b734 100755 --- a/artiq/applets/big_number.py +++ b/artiq/applets/big_number.py @@ -13,7 +13,7 @@ class NumberWidget(QtWidgets.QLCDNumber): def data_changed(self, data, mods): try: - n = float(data[self.dataset_name][1]) + n = float(data[self.dataset_name]["value"]) except (KeyError, ValueError, TypeError): n = "---" self.display(n) diff --git a/artiq/applets/image.py b/artiq/applets/image.py index b7d36c1a1..4bc6b5f86 100755 --- a/artiq/applets/image.py +++ b/artiq/applets/image.py @@ -13,7 +13,7 @@ class Image(pyqtgraph.ImageView): def data_changed(self, data, mods): try: - img = data[self.args.img][1] + img = data[self.args.img]["value"] except KeyError: return self.setImage(img) diff --git a/artiq/applets/plot_hist.py b/artiq/applets/plot_hist.py index b3493c836..298246a5c 100755 --- a/artiq/applets/plot_hist.py +++ b/artiq/applets/plot_hist.py @@ -17,11 +17,11 @@ class HistogramPlot(pyqtgraph.PlotWidget): def data_changed(self, data, mods, title): try: - y = data[self.args.y][1] + y = data[self.args.y]["value"] if self.args.x is None: x = None else: - x = data[self.args.x][1] + x = data[self.args.x]["value"] except KeyError: return if x is None: diff --git a/artiq/applets/plot_xy.py b/artiq/applets/plot_xy.py index 8eaf9786a..32bf95b50 100755 --- a/artiq/applets/plot_xy.py +++ b/artiq/applets/plot_xy.py @@ -6,6 +6,7 @@ from PyQt5.QtCore import QTimer import pyqtgraph from artiq.applets.simple import TitleApplet +from artiq.master.databases import make_dataset as empty_dataset class XYPlot(pyqtgraph.PlotWidget): @@ -21,14 +22,14 @@ class XYPlot(pyqtgraph.PlotWidget): def data_changed(self, data, mods, title): try: - y = data[self.args.y][1] + y = data[self.args.y]["value"] except KeyError: return - x = data.get(self.args.x, (False, None))[1] + x = data.get(self.args.x, empty_dataset())["value"] if x is None: x = np.arange(len(y)) - error = data.get(self.args.error, (False, None))[1] - fit = data.get(self.args.fit, (False, None))[1] + error = data.get(self.args.error, empty_dataset())["value"] + fit = data.get(self.args.fit, empty_dataset())["value"] if not len(y) or len(y) != len(x): self.mismatch['X values'] = True diff --git a/artiq/applets/plot_xy_hist.py b/artiq/applets/plot_xy_hist.py index 39bd49098..01eab5421 100755 --- a/artiq/applets/plot_xy_hist.py +++ b/artiq/applets/plot_xy_hist.py @@ -126,9 +126,9 @@ class XYHistPlot(QtWidgets.QSplitter): def data_changed(self, data, mods): try: - xs = data[self.args.xs][1] - histogram_bins = data[self.args.histogram_bins][1] - histograms_counts = data[self.args.histograms_counts][1] + xs = data[self.args.xs]["value"] + histogram_bins = data[self.args.histogram_bins]["value"] + histograms_counts = data[self.args.histograms_counts]["value"] except KeyError: return if len(xs) != histograms_counts.shape[0]: diff --git a/artiq/applets/simple.py b/artiq/applets/simple.py index e5776310a..db6f2334d 100644 --- a/artiq/applets/simple.py +++ b/artiq/applets/simple.py @@ -10,6 +10,7 @@ from sipyco.sync_struct import Subscriber, process_mod from sipyco import pyon from sipyco.pipe_ipc import AsyncioChildComm +from artiq.master.databases import make_dataset as empty_dataset logger = logging.getLogger(__name__) @@ -251,7 +252,7 @@ class TitleApplet(SimpleApplet): def emit_data_changed(self, data, mod_buffer): if self.args.title is not None: - title_values = {k.replace(".", "/"): data.get(k, (False, None))[1] + title_values = {k.replace(".", "/"): data.get(k, empty_dataset())["value"] for k in self.dataset_title} try: title = self.args.title.format(**title_values) diff --git a/artiq/browser/datasets.py b/artiq/browser/datasets.py index b66b18216..d3d8171ac 100644 --- a/artiq/browser/datasets.py +++ b/artiq/browser/datasets.py @@ -104,8 +104,8 @@ class DatasetsDock(QtWidgets.QDockWidget): idx = self.table_model_filter.mapToSource(idx[0]) key = self.table_model.index_to_key(idx) if key is not None: - persist, value = self.table_model.backing_store[key] - asyncio.ensure_future(self._upload_dataset(key, value)) + dataset = self.table_model.backing_store[key] + asyncio.ensure_future(self._upload_dataset(key, dataset["value"])) def save_state(self): return bytes(self.table.header().saveState()) diff --git a/artiq/compiler/transforms/llvm_ir_generator.py b/artiq/compiler/transforms/llvm_ir_generator.py index 084e5ae09..5f61db822 100644 --- a/artiq/compiler/transforms/llvm_ir_generator.py +++ b/artiq/compiler/transforms/llvm_ir_generator.py @@ -1174,26 +1174,32 @@ class LLVMIRGenerator: else: llfun = self.map(insn.static_target_function) llenv = self.llbuilder.extract_value(llclosure, 0, name="env.fun") - return llfun, [llenv] + list(llargs) + return llfun, [llenv] + list(llargs), {} def _prepare_ffi_call(self, insn): llargs = [] - byvals = [] + llarg_attrs = {} for i, arg in enumerate(insn.arguments()): llarg = self.map(arg) if isinstance(llarg.type, (ll.LiteralStructType, ll.IdentifiedStructType)): llslot = self.llbuilder.alloca(llarg.type) self.llbuilder.store(llarg, llslot) llargs.append(llslot) - byvals.append(i) + llarg_attrs[i] = "byval" else: llargs.append(llarg) + llretty = self.llty_of_type(insn.type, for_return=True) + is_sret = self.needs_sret(llretty) + if is_sret: + llarg_attrs = {i + 1: a for (i, a) in llarg_attrs.items()} + llarg_attrs[0] = "sret" + llfunname = insn.target_function().type.name llfun = self.llmodule.globals.get(llfunname) if llfun is None: - llretty = self.llty_of_type(insn.type, for_return=True) - if self.needs_sret(llretty): + # Function has not been declared in the current LLVM module, do it now. + if is_sret: llfunty = ll.FunctionType(llvoid, [llretty.as_pointer()] + [llarg.type for llarg in llargs]) else: @@ -1201,17 +1207,14 @@ class LLVMIRGenerator: llfun = ll.Function(self.llmodule, llfunty, insn.target_function().type.name) - if self.needs_sret(llretty): - llfun.args[0].add_attribute('sret') - byvals = [i + 1 for i in byvals] - for i in byvals: - llfun.args[i].add_attribute('byval') + for idx, attr in llarg_attrs.items(): + llfun.args[idx].add_attribute(attr) if 'nounwind' in insn.target_function().type.flags: llfun.attributes.add('nounwind') if 'nowrite' in insn.target_function().type.flags: llfun.attributes.add('inaccessiblememonly') - return llfun, list(llargs) + return llfun, list(llargs), llarg_attrs def _build_rpc(self, fun_loc, fun_type, args, llnormalblock, llunwindblock): llservice = ll.Constant(lli32, fun_type.service) @@ -1347,20 +1350,21 @@ class LLVMIRGenerator: insn.arguments(), llnormalblock=None, llunwindblock=None) elif types.is_external_function(functiontyp): - llfun, llargs = self._prepare_ffi_call(insn) + llfun, llargs, llarg_attrs = self._prepare_ffi_call(insn) else: - llfun, llargs = self._prepare_closure_call(insn) + llfun, llargs, llarg_attrs = self._prepare_closure_call(insn) if self.has_sret(functiontyp): llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), []) llresultslot = self.llbuilder.alloca(llfun.type.pointee.args[0].pointee) - llcall = self.llbuilder.call(llfun, [llresultslot] + llargs) + self.llbuilder.call(llfun, [llresultslot] + llargs, arg_attrs=llarg_attrs) llresult = self.llbuilder.load(llresultslot) self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) else: - llcall = llresult = self.llbuilder.call(llfun, llargs, name=insn.name) + llresult = self.llbuilder.call(llfun, llargs, name=insn.name, + arg_attrs=llarg_attrs) if isinstance(llresult.type, ll.VoidType): # We have NoneType-returning functions return void, but None is @@ -1379,16 +1383,17 @@ class LLVMIRGenerator: insn.arguments(), llnormalblock, llunwindblock) elif types.is_external_function(functiontyp): - llfun, llargs = self._prepare_ffi_call(insn) + llfun, llargs, llarg_attrs = self._prepare_ffi_call(insn) else: - llfun, llargs = self._prepare_closure_call(insn) + llfun, llargs, llarg_attrs = self._prepare_closure_call(insn) if self.has_sret(functiontyp): llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), []) llresultslot = self.llbuilder.alloca(llfun.type.pointee.args[0].pointee) llcall = self.llbuilder.invoke(llfun, [llresultslot] + llargs, - llnormalblock, llunwindblock, name=insn.name) + llnormalblock, llunwindblock, name=insn.name, + arg_attrs=llarg_attrs) self.llbuilder.position_at_start(llnormalblock) llresult = self.llbuilder.load(llresultslot) @@ -1396,7 +1401,7 @@ class LLVMIRGenerator: self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) else: llcall = self.llbuilder.invoke(llfun, llargs, llnormalblock, llunwindblock, - name=insn.name) + name=insn.name, arg_attrs=llarg_attrs) llresult = llcall # The !tbaa metadata is not legal to use with the invoke instruction, diff --git a/artiq/coredevice/ad9910.py b/artiq/coredevice/ad9910.py index e55ddce95..3a79a064b 100644 --- a/artiq/coredevice/ad9910.py +++ b/artiq/coredevice/ad9910.py @@ -374,18 +374,25 @@ class AD9910: data[(n - preload) + i] = self.bus.read() @kernel - def set_cfr1(self, power_down: TInt32 = 0b0000, + def set_cfr1(self, + power_down: TInt32 = 0b0000, phase_autoclear: TInt32 = 0, - drg_load_lrr: TInt32 = 0, drg_autoclear: TInt32 = 0, - internal_profile: TInt32 = 0, ram_destination: TInt32 = 0, - ram_enable: TInt32 = 0, manual_osk_external: TInt32 = 0, - osk_enable: TInt32 = 0, select_auto_osk: TInt32 = 0): + drg_load_lrr: TInt32 = 0, + drg_autoclear: TInt32 = 0, + phase_clear: TInt32 = 0, + internal_profile: TInt32 = 0, + ram_destination: TInt32 = 0, + ram_enable: TInt32 = 0, + manual_osk_external: TInt32 = 0, + osk_enable: TInt32 = 0, + select_auto_osk: TInt32 = 0): """Set CFR1. See the AD9910 datasheet for parameter meanings. This method does not pulse IO_UPDATE. :param power_down: Power down bits. :param phase_autoclear: Autoclear phase accumulator. + :param phase_clear: Asynchronous, static reset of the phase accumulator. :param drg_load_lrr: Load digital ramp generator LRR. :param drg_autoclear: Autoclear digital ramp generator. :param internal_profile: Internal profile control. @@ -405,11 +412,41 @@ class AD9910: (drg_load_lrr << 15) | (drg_autoclear << 14) | (phase_autoclear << 13) | + (phase_clear << 11) | (osk_enable << 9) | (select_auto_osk << 8) | (power_down << 4) | 2) # SDIO input only, MSB first + @kernel + def set_cfr2(self, + asf_profile_enable: TInt32 = 1, + drg_enable: TInt32 = 0, + effective_ftw: TInt32 = 1, + sync_validation_disable: TInt32 = 0, + matched_latency_enable: TInt32 = 0): + """Set CFR2. See the AD9910 datasheet for parameter meanings. + + This method does not pulse IO_UPDATE. + + :param asf_profile_enable: Enable amplitude scale from single tone profiles. + :param drg_enable: Digital ramp enable. + :param effective_ftw: Read effective FTW. + :param sync_validation_disable: Disable the SYNC_SMP_ERR pin indicating + (active high) detection of a synchronization pulse sampling error. + :param matched_latency_enable: Simultaneous application of amplitude, + phase, and frequency changes to the DDS arrive at the output + + * matched_latency_enable = 0: in the order listed + * matched_latency_enable = 1: simultaneously. + """ + self.write32(_AD9910_REG_CFR2, + (asf_profile_enable << 24) | + (drg_enable << 19) | + (effective_ftw << 16) | + (matched_latency_enable << 7) | + (sync_validation_disable << 5)) + @kernel def init(self, blind: TBool = False): """Initialize and configure the DDS. @@ -442,7 +479,7 @@ class AD9910: # enable amplitude scale from profiles # read effective FTW # sync timing validation disable (enabled later) - self.write32(_AD9910_REG_CFR2, 0x01010020) + self.set_cfr2(sync_validation_disable=1) self.cpld.io_update.pulse(1 * us) cfr3 = (0x0807c000 | (self.pll_vco << 24) | (self.pll_cp << 19) | (self.pll_en << 8) | @@ -465,7 +502,7 @@ class AD9910: if i >= 100 - 1: raise ValueError("PLL lock timeout") delay(10 * us) # slack - if self.sync_data.sync_delay_seed >= 0: + if self.sync_data.sync_delay_seed >= 0 and not blind: self.tune_sync_delay(self.sync_data.sync_delay_seed) delay(1 * ms) @@ -481,7 +518,7 @@ class AD9910: @kernel def set_mu(self, ftw: TInt32, pow_: TInt32 = 0, asf: TInt32 = 0x3fff, phase_mode: TInt32 = _PHASE_MODE_DEFAULT, - ref_time_mu: TInt64 = int64(-1), profile: TInt32 = 7): + ref_time_mu: TInt64 = int64(-1), profile: TInt32 = 7) -> TInt32: """Set DDS data in machine units. This uses machine units (FTW, POW, ASF). The frequency tuning word @@ -792,7 +829,7 @@ class AD9910: @kernel def set(self, frequency: TFloat, phase: TFloat = 0.0, amplitude: TFloat = 1.0, phase_mode: TInt32 = _PHASE_MODE_DEFAULT, - ref_time_mu: TInt64 = int64(-1), profile: TInt32 = 7): + ref_time_mu: TInt64 = int64(-1), profile: TInt32 = 7) -> TFloat: """Set DDS data in SI units. .. seealso:: :meth:`set_mu` @@ -881,20 +918,26 @@ class AD9910: self.cpld.cfg_sw(self.chip_select - 4, state) @kernel - def set_sync(self, in_delay: TInt32, window: TInt32): + def set_sync(self, + in_delay: TInt32, + window: TInt32, + en_sync_gen: TInt32 = 0): """Set the relevant parameters in the multi device synchronization register. See the AD9910 datasheet for details. The SYNC clock generator preset value is set to zero, and the SYNC_OUT generator is - disabled. + disabled by default. :param in_delay: SYNC_IN delay tap (0-31) in steps of ~75ps :param window: Symmetric SYNC_IN validation window (0-15) in steps of ~75ps for both hold and setup margin. + :param en_sync_gen: Whether to enable the DDS-internal sync generator + (SYNC_OUT, cf. sync_sel == 1). Should be left off for the normal + use case, where the SYNC clock is supplied by the core device. """ self.write32(_AD9910_REG_SYNC, (window << 28) | # SYNC S/H validation delay (1 << 27) | # SYNC receiver enable - (0 << 26) | # SYNC generator disable + (en_sync_gen << 26) | # SYNC generator enable (0 << 25) | # SYNC generator SYS rising edge (0 << 18) | # SYNC preset (0 << 11) | # SYNC output delay @@ -910,9 +953,10 @@ class AD9910: Also modifies CFR2. """ - self.write32(_AD9910_REG_CFR2, 0x01010020) # clear SMP_ERR + self.set_cfr2(sync_validation_disable=1) # clear SMP_ERR self.cpld.io_update.pulse(1 * us) - self.write32(_AD9910_REG_CFR2, 0x01010000) # enable SMP_ERR + delay(10 * us) # slack + self.set_cfr2(sync_validation_disable=0) # enable SMP_ERR self.cpld.io_update.pulse(1 * us) @kernel @@ -990,7 +1034,7 @@ class AD9910: # set up DRG self.set_cfr1(drg_load_lrr=1, drg_autoclear=1) # DRG -> FTW, DRG enable - self.write32(_AD9910_REG_CFR2, 0x01090000) + self.set_cfr2(drg_enable=1) # no limits self.write64(_AD9910_REG_RAMP_LIMIT, -1, 0) # DRCTL=0, dt=1 t_SYNC_CLK @@ -1011,7 +1055,7 @@ class AD9910: ftw = self.read32(_AD9910_REG_FTW) # read out effective FTW delay(100 * us) # slack # disable DRG - self.write32(_AD9910_REG_CFR2, 0x01010000) + self.set_cfr2(drg_enable=0) self.cpld.io_update.pulse_mu(8) return ftw & 1 diff --git a/artiq/coredevice/ad9912.py b/artiq/coredevice/ad9912.py index b214b9496..cb018c103 100644 --- a/artiq/coredevice/ad9912.py +++ b/artiq/coredevice/ad9912.py @@ -156,7 +156,7 @@ class AD9912: return self.cpld.get_channel_att(self.chip_select - 4) @kernel - def set_mu(self, ftw: TInt64, pow_: TInt32): + def set_mu(self, ftw: TInt64, pow_: TInt32 = 0): """Set profile 0 data in machine units. After the SPI transfer, the shared IO update pin is pulsed to diff --git a/artiq/coredevice/comm_kernel.py b/artiq/coredevice/comm_kernel.py index cdb54a118..1b0111c49 100644 --- a/artiq/coredevice/comm_kernel.py +++ b/artiq/coredevice/comm_kernel.py @@ -621,6 +621,7 @@ class CommKernel: function = self._read_string() backtrace = [self._read_int32() for _ in range(self._read_int32())] + self._process_async_error() traceback = list(reversed(symbolizer(backtrace))) + \ [(filename, line, column, *demangler([function]), None)] @@ -635,6 +636,16 @@ class CommKernel: python_exn.artiq_core_exception = core_exn raise python_exn + def _process_async_error(self): + errors = self._read_int8() + if errors > 0: + map_name = lambda y, z: [f"{y}(s)"] if z else [] + errors = map_name("collision", errors & 2 ** 0) + \ + map_name("busy error", errors & 2 ** 1) + \ + map_name("sequence error", errors & 2 ** 2) + logger.warning(f"{(', '.join(errors[:-1]) + ' and ') if len(errors) > 1 else ''}{errors[-1]} " + f"reported during kernel execution") + def serve(self, embedding_map, symbolizer, demangler): while True: self._read_header() @@ -646,4 +657,5 @@ class CommKernel: raise exceptions.ClockFailure else: self._read_expect(Reply.KernelFinished) + self._process_async_error() return diff --git a/artiq/coredevice/coredevice_generic.schema.json b/artiq/coredevice/coredevice_generic.schema.json index 64e4a9251..274d7e2aa 100644 --- a/artiq/coredevice/coredevice_generic.schema.json +++ b/artiq/coredevice/coredevice_generic.schema.json @@ -64,6 +64,13 @@ "type": "boolean", "default": false }, + "sed_lanes": { + "type": "number", + "minimum": 1, + "maximum": 32, + "default": 8, + "description": "Number of FIFOs in the SED, must be a power of 2" + }, "peripherals": { "type": "array", "items": { diff --git a/artiq/coredevice/suservo.py b/artiq/coredevice/suservo.py index 932adf35b..1d0a72dad 100644 --- a/artiq/coredevice/suservo.py +++ b/artiq/coredevice/suservo.py @@ -57,32 +57,26 @@ class SUServo: :param channel: RTIO channel number :param pgia_device: Name of the Sampler PGIA gain setting SPI bus - :param cpld0_device: Name of the first Urukul CPLD SPI bus - :param cpld1_device: Name of the second Urukul CPLD SPI bus - :param dds0_device: Name of the AD9910 device for the DDS on the first - Urukul - :param dds1_device: Name of the AD9910 device for the DDS on the second - Urukul + :param cpld_devices: Names of the Urukul CPLD SPI buses + :param dds_devices: Names of the AD9910 devices :param gains: Initial value for PGIA gains shift register (default: 0x0000). Knowledge of this state is not transferred between experiments. :param core_device: Core device name """ - kernel_invariants = {"channel", "core", "pgia", "cpld0", "cpld1", - "dds0", "dds1", "ref_period_mu"} + kernel_invariants = {"channel", "core", "pgia", "cplds", "ddses", + "ref_period_mu"} def __init__(self, dmgr, channel, pgia_device, - cpld0_device, cpld1_device, - dds0_device, dds1_device, + cpld_devices, dds_devices, gains=0x0000, core_device="core"): self.core = dmgr.get(core_device) self.pgia = dmgr.get(pgia_device) self.pgia.update_xfer_duration_mu(div=4, length=16) - self.dds0 = dmgr.get(dds0_device) - self.dds1 = dmgr.get(dds1_device) - self.cpld0 = dmgr.get(cpld0_device) - self.cpld1 = dmgr.get(cpld1_device) + assert len(dds_devices) == len(cpld_devices) + self.ddses = [dmgr.get(dds) for dds in dds_devices] + self.cplds = [dmgr.get(cpld) for cpld in cpld_devices] self.channel = channel self.gains = gains self.ref_period_mu = self.core.seconds_to_mu( @@ -109,17 +103,15 @@ class SUServo: sampler.SPI_CONFIG | spi.SPI_END, 16, 4, sampler.SPI_CS_PGIA) - self.cpld0.init(blind=True) - cfg0 = self.cpld0.cfg_reg - self.cpld0.cfg_write(cfg0 | (0xf << urukul.CFG_MASK_NU)) - self.dds0.init(blind=True) - self.cpld0.cfg_write(cfg0) + for i in range(len(self.cplds)): + cpld = self.cplds[i] + dds = self.ddses[i] - self.cpld1.init(blind=True) - cfg1 = self.cpld1.cfg_reg - self.cpld1.cfg_write(cfg1 | (0xf << urukul.CFG_MASK_NU)) - self.dds1.init(blind=True) - self.cpld1.cfg_write(cfg1) + cpld.init(blind=True) + prev_cpld_cfg = cpld.cfg_reg + cpld.cfg_write(prev_cpld_cfg | (0xf << urukul.CFG_MASK_NU)) + dds.init(blind=True) + cpld.cfg_write(prev_cpld_cfg) @kernel def write(self, addr, value): @@ -257,9 +249,11 @@ class Channel: self.servo = dmgr.get(servo_device) self.core = self.servo.core self.channel = channel - # FIXME: this assumes the mem channel is right after the control - # channels - self.servo_channel = self.channel + 8 - self.servo.channel + # This assumes the mem channel is right after the control channels + # Make sure this is always the case in eem.py + self.servo_channel = (self.channel + 4 * len(self.servo.cplds) - + self.servo.channel) + self.dds = self.servo.ddses[self.servo_channel // 4] @kernel def set(self, en_out, en_iir=0, profile=0): @@ -311,12 +305,8 @@ class Channel: see :meth:`dds_offset_to_mu` :param phase: DDS phase in turns """ - if self.servo_channel < 4: - dds = self.servo.dds0 - else: - dds = self.servo.dds1 - ftw = dds.frequency_to_ftw(frequency) - pow_ = dds.turns_to_pow(phase) + ftw = self.dds.frequency_to_ftw(frequency) + pow_ = self.dds.turns_to_pow(phase) offs = self.dds_offset_to_mu(offset) self.set_dds_mu(profile, ftw, offs, pow_) diff --git a/artiq/dashboard/datasets.py b/artiq/dashboard/datasets.py index 10e51e906..23fc995cb 100644 --- a/artiq/dashboard/datasets.py +++ b/artiq/dashboard/datasets.py @@ -146,16 +146,16 @@ class Creator(QtWidgets.QDialog): class Model(DictSyncTreeSepModel): - def __init__(self, init): - DictSyncTreeSepModel.__init__(self, ".", - ["Dataset", "Persistent", "Value"], - init) + def __init__(self, init): + DictSyncTreeSepModel.__init__( + self, ".", ["Dataset", "Persistent", "Value"], init + ) def convert(self, k, v, column): if column == 1: - return "Y" if v[0] else "N" + return "Y" if v["persist"] else "N" elif column == 2: - return short_format(v[1]) + return short_format(v["value"]) else: raise ValueError @@ -223,8 +223,8 @@ class DatasetsDock(QtWidgets.QDockWidget): idx = self.table_model_filter.mapToSource(idx[0]) key = self.table_model.index_to_key(idx) if key is not None: - persist, value = self.table_model.backing_store[key] - t = type(value) + dataset = self.table_model.backing_store[key] + t = type(dataset["value"]) if np.issubdtype(t, np.number): dialog_cls = NumberEditor elif np.issubdtype(t, np.bool_): @@ -235,7 +235,7 @@ class DatasetsDock(QtWidgets.QDockWidget): logger.error("Cannot edit dataset %s: " "type %s is not supported", key, t) return - dialog_cls(self, self.dataset_ctl, key, value).open() + dialog_cls(self, self.dataset_ctl, key, dataset["value"]).open() def delete_clicked(self): idx = self.table.selectedIndexes() diff --git a/artiq/examples/kasli_suservo/device_db.py b/artiq/examples/kasli_suservo/device_db.py index d33bfb280..fdb85dc47 100644 --- a/artiq/examples/kasli_suservo/device_db.py +++ b/artiq/examples/kasli_suservo/device_db.py @@ -191,10 +191,8 @@ device_db = { "arguments": { "channel": 24, "pgia_device": "spi_sampler0_pgia", - "cpld0_device": "urukul0_cpld", - "cpld1_device": "urukul1_cpld", - "dds0_device": "urukul0_dds", - "dds1_device": "urukul1_dds" + "cpld_devices": ["urukul0_cpld", "urukul1_cpld"], + "dds_devices": ["urukul0_dds", "urukul1_dds"], } }, diff --git a/artiq/firmware/libboard_artiq/drtioaux.rs b/artiq/firmware/libboard_artiq/drtioaux.rs index f72072702..818775d7b 100644 --- a/artiq/firmware/libboard_artiq/drtioaux.rs +++ b/artiq/firmware/libboard_artiq/drtioaux.rs @@ -137,11 +137,10 @@ pub fn send(linkno: u8, packet: &Packet) -> Result<(), Error> { packet.write_to(&mut writer)?; - let padding = 4 - (writer.position() % 4); - if padding != 4 { - for _ in 0..padding { - writer.write_u8(0)?; - } + // Pad till offset 4, insert checksum there + let padding = (12 - (writer.position() % 8)) % 8; + for _ in 0..padding { + writer.write_u8(0)?; } let checksum = crc::crc32::checksum_ieee(&writer.get_ref()[0..writer.position()]); diff --git a/artiq/firmware/libproto_artiq/session_proto.rs b/artiq/firmware/libproto_artiq/session_proto.rs index 99412de10..0475a4489 100644 --- a/artiq/firmware/libproto_artiq/session_proto.rs +++ b/artiq/firmware/libproto_artiq/session_proto.rs @@ -90,7 +90,9 @@ pub enum Reply<'a> { LoadCompleted, LoadFailed(&'a str), - KernelFinished, + KernelFinished { + async_errors: u8 + }, KernelStartupFailed, KernelException { name: &'a str, @@ -100,7 +102,8 @@ pub enum Reply<'a> { line: u32, column: u32, function: &'a str, - backtrace: &'a [usize] + backtrace: &'a [usize], + async_errors: u8 }, RpcRequest { async: bool }, @@ -160,14 +163,16 @@ impl<'a> Reply<'a> { writer.write_string(reason)?; }, - Reply::KernelFinished => { + Reply::KernelFinished { async_errors } => { writer.write_u8(7)?; + writer.write_u8(async_errors)?; }, Reply::KernelStartupFailed => { writer.write_u8(8)?; }, Reply::KernelException { - name, message, param, file, line, column, function, backtrace + name, message, param, file, line, column, function, backtrace, + async_errors } => { writer.write_u8(9)?; writer.write_string(name)?; @@ -183,6 +188,7 @@ impl<'a> Reply<'a> { for &addr in backtrace { writer.write_u32(addr as u32)? } + writer.write_u8(async_errors)?; }, Reply::RpcRequest { async } => { diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 825900b78..1a1d1660b 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -326,6 +326,14 @@ pub mod drtio { pub fn reset(_io: &Io, _aux_mutex: &Mutex) {} } +static mut SEEN_ASYNC_ERRORS: u8 = 0; + +pub unsafe fn get_async_errors() -> u8 { + let mut errors = SEEN_ASYNC_ERRORS; + SEEN_ASYNC_ERRORS = 0; + errors +} + fn async_error_thread(io: Io) { loop { unsafe { @@ -343,6 +351,7 @@ fn async_error_thread(io: Io) { error!("RTIO sequence error involving channel {}", csr::rtio_core::sequence_error_channel_read()); } + SEEN_ASYNC_ERRORS = errors; csr::rtio_core::async_error_write(errors); } } diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 7d0935667..260a1b385 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -9,6 +9,7 @@ use urc::Urc; use sched::{ThreadHandle, Io, Mutex, TcpListener, TcpStream, Error as SchedError}; use rtio_clocking; use rtio_dma::Manager as DmaManager; +use rtio_mgt::get_async_errors; use cache::Cache; use kern_hwreq; use board_artiq::drtio_routing; @@ -431,7 +432,9 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, match stream { None => return Ok(true), Some(ref mut stream) => - host_write(stream, host::Reply::KernelFinished).map_err(|e| e.into()) + host_write(stream, host::Reply::KernelFinished { + async_errors: unsafe { get_async_errors() } + }).map_err(|e| e.into()) } } &kern::RunException { @@ -458,7 +461,8 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex, line: line, column: column, function: function, - backtrace: backtrace + backtrace: backtrace, + async_errors: unsafe { get_async_errors() } }).map_err(|e| e.into()) } } diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs index e0ec83612..c6685f895 100644 --- a/artiq/firmware/satman/main.rs +++ b/artiq/firmware/satman/main.rs @@ -447,6 +447,19 @@ const SI5324_SETTINGS: si5324::FrequencySettings crystal_ref: true }; +#[cfg(all(has_si5324, rtio_frequency = "100.0"))] +const SI5324_SETTINGS: si5324::FrequencySettings + = si5324::FrequencySettings { + n1_hs : 5, + nc1_ls : 10, + n2_hs : 10, + n2_ls : 250, + n31 : 50, + n32 : 50, + bwsel : 4, + crystal_ref: true +}; + #[no_mangle] pub extern fn main() -> i32 { extern { diff --git a/artiq/frontend/artiq_ddb_template.py b/artiq/frontend/artiq_ddb_template.py index 52408a0d4..0a14a06be 100755 --- a/artiq/frontend/artiq_ddb_template.py +++ b/artiq/frontend/artiq_ddb_template.py @@ -364,8 +364,7 @@ class PeripheralManager: def process_suservo(self, rtio_offset, peripheral): suservo_name = self.get_name("suservo") sampler_name = self.get_name("sampler") - urukul0_name = self.get_name("urukul") - urukul1_name = self.get_name("urukul") + urukul_names = [self.get_name("urukul") for _ in range(2)] channel = count(0) for i in range(8): self.gen(""" @@ -386,16 +385,14 @@ class PeripheralManager: "arguments": {{ "channel": 0x{suservo_channel:06x}, "pgia_device": "spi_{sampler_name}_pgia", - "cpld0_device": "{urukul0_name}_cpld", - "cpld1_device": "{urukul1_name}_cpld", - "dds0_device": "{urukul0_name}_dds", - "dds1_device": "{urukul1_name}_dds" + "cpld_devices": {cpld_names_list}, + "dds_devices": {dds_names_list} }} }}""", suservo_name=suservo_name, sampler_name=sampler_name, - urukul0_name=urukul0_name, - urukul1_name=urukul1_name, + cpld_names_list=[urukul_name + "_cpld" for urukul_name in urukul_names], + dds_names_list=[urukul_name + "_dds" for urukul_name in urukul_names], suservo_channel=rtio_offset+next(channel)) self.gen(""" device_db["spi_{sampler_name}_pgia"] = {{ @@ -407,7 +404,7 @@ class PeripheralManager: sampler_name=sampler_name, sampler_channel=rtio_offset+next(channel)) pll_vco = peripheral.get("pll_vco") - for urukul_name in (urukul0_name, urukul1_name): + for urukul_name in urukul_names: self.gen(""" device_db["spi_{urukul_name}"] = {{ "type": "local", diff --git a/artiq/frontend/artiq_flash.py b/artiq/frontend/artiq_flash.py index 1641e31fd..62b8dc39c 100755 --- a/artiq/frontend/artiq_flash.py +++ b/artiq/frontend/artiq_flash.py @@ -362,7 +362,10 @@ def main(): variants.remove("rtm") except ValueError: pass - if len(variants) == 0: + if all(action in ["rtm_gateware", "storage", "rtm_load", "erase", "start"] + for action in args.action) and args.action: + pass + elif len(variants) == 0: raise FileNotFoundError("no variants found, did you install a board binary package?") elif len(variants) == 1: variant = variants[0] diff --git a/artiq/frontend/artiq_sinara_tester.py b/artiq/frontend/artiq_sinara_tester.py index 22ba53740..2a0392eec 100755 --- a/artiq/frontend/artiq_sinara_tester.py +++ b/artiq/frontend/artiq_sinara_tester.py @@ -229,6 +229,17 @@ class SinaraTester(EnvExperiment): self.core.break_realtime() cpld.init() + @kernel + def test_urukul_att(self, cpld): + self.core.break_realtime() + for i in range(32): + test_word = 1 << i + cpld.set_all_att_mu(test_word) + readback_word = cpld.get_att_mu() + if readback_word != test_word: + print(readback_word, test_word) + raise ValueError + @kernel def calibrate_urukul(self, channel): self.core.break_realtime() @@ -268,11 +279,12 @@ class SinaraTester(EnvExperiment): def test_urukuls(self): print("*** Testing Urukul DDSes.") - print("Initializing CPLDs...") for name, cpld in sorted(self.urukul_cplds.items(), key=lambda x: x[0]): - print(name + "...") + print(name + ": initializing CPLD...") self.init_urukul(cpld) - print("...done") + print(name + ": testing attenuator digital control...") + self.test_urukul_att(cpld) + print(name + ": done") print("Calibrating inter-device synchronization...") for channel_name, channel_dev in self.urukuls: diff --git a/artiq/gateware/drtio/siphaser.py b/artiq/gateware/drtio/siphaser.py index 81dacaed0..5237b7453 100644 --- a/artiq/gateware/drtio/siphaser.py +++ b/artiq/gateware/drtio/siphaser.py @@ -4,7 +4,7 @@ from migen.genlib.cdc import MultiReg, PulseSynchronizer from misoc.interconnect.csr import * -# This code assumes 125/62.5MHz reference clock and 125MHz or 150MHz RTIO +# This code assumes 125/62.5MHz reference clock and 100MHz, 125MHz or 150MHz RTIO # frequency. class SiPhaser7Series(Module, AutoCSR): @@ -15,9 +15,9 @@ class SiPhaser7Series(Module, AutoCSR): self.phase_shift_done = CSRStatus(reset=1) self.error = CSR() - assert rtio_clk_freq in (125e6, 150e6) + assert rtio_clk_freq in (100e6, 125e6, 150e6) - # 125MHz/62.5MHz reference clock to 125MHz/150MHz. VCO @ 750MHz. + # 125MHz/62.5MHz reference clock to 100MHz/125MHz/150MHz. VCO @ 750MHz. # Used to provide a startup clock to the transceiver through the Si, # we do not use the crystal reference so that the PFD (f3) frequency # can be high. @@ -43,8 +43,8 @@ class SiPhaser7Series(Module, AutoCSR): else: mmcm_freerun_output = mmcm_freerun_output_raw - # 125MHz/150MHz to 125MHz/150MHz with controllable phase shift, - # VCO @ 1000MHz/1200MHz. + # 100MHz/125MHz/150MHz to 100MHz/125MHz/150MHz with controllable phase shift, + # VCO @ 800MHz/1000MHz/1200MHz. # Inserted between CDR and output to Si, used to correct # non-determinstic skew of Si5324. mmcm_ps_fb = Signal() diff --git a/artiq/gateware/suservo/__init__.py b/artiq/gateware/suservo/__init__.py index e69de29bb..7a1df77ac 100644 --- a/artiq/gateware/suservo/__init__.py +++ b/artiq/gateware/suservo/__init__.py @@ -0,0 +1,10 @@ +"""Gateware implementation of the Sampler-Urukul (AD9910) DDS amplitude servo. + +General conventions: + + - ``t_...`` signals and constants refer to time spans measured in the gateware + module's default clock (typically a 125 MHz RTIO clock). + - ``start`` signals cause modules to proceed with the next servo iteration iff + they are currently idle (i.e. their value is irrelevant while the module is + busy, so they are not necessarily one-clock-period strobes). +""" diff --git a/artiq/gateware/suservo/iir.py b/artiq/gateware/suservo/iir.py index 0ebab3f13..0ec9bfa09 100644 --- a/artiq/gateware/suservo/iir.py +++ b/artiq/gateware/suservo/iir.py @@ -1,9 +1,7 @@ from collections import namedtuple import logging - from migen import * - logger = logging.getLogger(__name__) @@ -222,31 +220,30 @@ class IIR(Module): assert w.word <= w.coeff # same memory assert w.state + w.coeff + 3 <= w.accu - # m_coeff of active profiles should only be accessed during + # m_coeff of active profiles should only be accessed externally during # ~processing self.specials.m_coeff = Memory( width=2*w.coeff, # Cat(pow/ftw/offset, cfg/a/b) depth=4 << w.profile + w.channel) - # m_state[x] should only be read during ~(shifting | - # loading) - # m_state[y] of active profiles should only be read during + # m_state[x] should only be read externally during ~(shifting | loading) + # m_state[y] of active profiles should only be read externally during # ~processing self.specials.m_state = Memory( width=w.state, # y1,x0,x1 depth=(1 << w.profile + w.channel) + (2 << w.channel)) # ctrl should only be updated synchronously self.ctrl = [Record([ - ("profile", w.profile), - ("en_out", 1), - ("en_iir", 1), - ("clip", 1), - ("stb", 1)]) - for i in range(1 << w.channel)] + ("profile", w.profile), + ("en_out", 1), + ("en_iir", 1), + ("clip", 1), + ("stb", 1)]) + for i in range(1 << w.channel)] # only update during ~loading self.adc = [Signal((w.adc, True), reset_less=True) for i in range(1 << w.channel)] # Cat(ftw0, ftw1, pow, asf) - # only read during ~processing + # only read externally during ~processing self.dds = [Signal(4*w.word, reset_less=True) for i in range(1 << w.channel)] # perform one IIR iteration, start with loading, @@ -270,100 +267,116 @@ class IIR(Module): en_iirs = Array([ch.en_iir for ch in self.ctrl]) clips = Array([ch.clip for ch in self.ctrl]) - # state counter - state = Signal(w.channel + 2) - # pipeline group activity flags (SR) - stage = Signal(3) + # Main state machine sequencing the steps of each servo iteration. The + # module IDLEs until self.start is asserted, and then runs through LOAD, + # PROCESS and SHIFT in order (see description of corresponding flags + # above). The steps share the same memory ports, and are executed + # strictly sequentially. + # + # LOAD/SHIFT just read/write one address per cycle; the duration needed + # to iterate over all channels is determined by counting cycles. + # + # The PROCESSing step is split across a three-stage pipeline, where each + # stage has up to four clock cycles latency. We feed the first stage + # using the (MSBs of) t_current_step, and, after all channels have been + # covered, proceed once the pipeline has completely drained. self.submodules.fsm = fsm = FSM("IDLE") - state_clr = Signal() - stage_en = Signal() + t_current_step = Signal(w.channel + 2) + t_current_step_clr = Signal() + + # pipeline group activity flags (SR) + # 0: load from memory + # 1: compute + # 2: write to output registers (DDS profiles, clip flags) + stages_active = Signal(3) fsm.act("IDLE", self.done.eq(1), - state_clr.eq(1), + t_current_step_clr.eq(1), If(self.start, NextState("LOAD") ) ) fsm.act("LOAD", self.loading.eq(1), - If(state == (1 << w.channel) - 1, - state_clr.eq(1), - stage_en.eq(1), + If(t_current_step == (1 << w.channel) - 1, + t_current_step_clr.eq(1), + NextValue(stages_active[0], 1), NextState("PROCESS") ) ) fsm.act("PROCESS", self.processing.eq(1), # this is technically wasting three cycles - # (one for setting stage, and phase=2,3 with stage[2]) - If(stage == 0, - state_clr.eq(1), - NextState("SHIFT") + # (one for setting stages_active, and phase=2,3 with stages_active[2]) + If(stages_active == 0, + t_current_step_clr.eq(1), + NextState("SHIFT"), ) ) fsm.act("SHIFT", self.shifting.eq(1), - If(state == (2 << w.channel) - 1, + If(t_current_step == (2 << w.channel) - 1, NextState("IDLE") ) ) self.sync += [ - state.eq(state + 1), - If(state_clr, - state.eq(0), - ), - If(stage_en, - stage[0].eq(1) + If(t_current_step_clr, + t_current_step.eq(0) + ).Else( + t_current_step.eq(t_current_step + 1) ) ] - # pipeline group channel pointer + # global pipeline phase (lower two bits of t_current_step) + pipeline_phase = Signal(2, reset_less=True) + # pipeline group channel pointer (SR) # for each pipeline stage, this is the channel currently being # processed channel = [Signal(w.channel, reset_less=True) for i in range(3)] + self.comb += Cat(pipeline_phase, channel[0]).eq(t_current_step) + self.sync += [ + If(pipeline_phase == 3, + Cat(channel[1:]).eq(Cat(channel[:-1])), + stages_active[1:].eq(stages_active[:-1]), + If(channel[0] == (1 << w.channel) - 1, + stages_active[0].eq(0) + ) + ) + ] + # pipeline group profile pointer (SR) # for each pipeline stage, this is the profile currently being # processed profile = [Signal(w.profile, reset_less=True) for i in range(2)] - # pipeline phase (lower two bits of state) - phase = Signal(2, reset_less=True) - - self.comb += Cat(phase, channel[0]).eq(state) self.sync += [ - Case(phase, { - 0: [ - profile[0].eq(profiles[channel[0]]), - profile[1].eq(profile[0]) - ], - 3: [ - Cat(channel[1:]).eq(Cat(channel[:-1])), - stage[1:].eq(stage[:-1]), - If(channel[0] == (1 << w.channel) - 1, - stage[0].eq(0) - ) - ] - }) + If(pipeline_phase == 0, + profile[0].eq(profiles[channel[0]]), + profile[1].eq(profile[0]), + ) ] m_coeff = self.m_coeff.get_port() m_state = self.m_state.get_port(write_capable=True) # mode=READ_FIRST self.specials += m_state, m_coeff + # + # Hook up main IIR filter. + # + dsp = DSP(w) self.submodules += dsp offset_clr = Signal() - self.comb += [ - m_coeff.adr.eq(Cat(phase, profile[0], - Mux(phase==0, channel[1], channel[0]))), + m_coeff.adr.eq(Cat(pipeline_phase, profile[0], + Mux(pipeline_phase == 0, channel[1], channel[0]))), dsp.offset[-w.coeff - 1:].eq(Mux(offset_clr, 0, Cat(m_coeff.dat_r[:w.coeff], m_coeff.dat_r[w.coeff - 1]) )), dsp.coeff.eq(m_coeff.dat_r[w.coeff:]), dsp.state.eq(m_state.dat_r), - Case(phase, { + Case(pipeline_phase, { 0: dsp.accu_clr.eq(1), 2: [ offset_clr.eq(1), @@ -373,6 +386,11 @@ class IIR(Module): }) ] + + # + # Arbitrate state memory access between steps. + # + # selected adc and profile delay (combinatorial from dat_r) # both share the same coeff word (sel in the lower 8 bits) sel_profile = Signal(w.channel) @@ -389,13 +407,13 @@ class IIR(Module): sel_profile.eq(m_coeff.dat_r[w.coeff:]), dly_profile.eq(m_coeff.dat_r[w.coeff + 8:]), If(self.shifting, - m_state.adr.eq(state | (1 << w.profile + w.channel)), + m_state.adr.eq(t_current_step | (1 << w.profile + w.channel)), m_state.dat_w.eq(m_state.dat_r), - m_state.we.eq(state[0]) + m_state.we.eq(t_current_step[0]) ), If(self.loading, - m_state.adr.eq((state << 1) | (1 << w.profile + w.channel)), - m_state.dat_w[-w.adc - 1:-1].eq(Array(self.adc)[state]), + m_state.adr.eq((t_current_step << 1) | (1 << w.profile + w.channel)), + m_state.dat_w[-w.adc - 1:-1].eq(Array(self.adc)[t_current_step]), m_state.dat_w[-1].eq(m_state.dat_w[-2]), m_state.we.eq(1) ), @@ -405,16 +423,20 @@ class IIR(Module): Cat(profile[1], channel[2]), # read old y Cat(profile[0], channel[0]), - # x0 (recent) + # read x0 (recent) 0 | (sel_profile << 1) | (1 << w.profile + w.channel), - # x1 (old) + # read x1 (old) 1 | (sel << 1) | (1 << w.profile + w.channel), - ])[phase]), + ])[pipeline_phase]), m_state.dat_w.eq(dsp.output), - m_state.we.eq((phase == 0) & stage[2] & en[1]), + m_state.we.eq((pipeline_phase == 0) & stages_active[2] & en[1]), ) ] + # + # Compute auxiliary signals (delayed servo enable, clip indicators, etc.). + # + # internal channel delay counters dlys = Array([Signal(w.dly) for i in range(1 << w.channel)]) @@ -434,51 +456,65 @@ class IIR(Module): en_out = Signal(reset_less=True) # latched channel en_iir en_iir = Signal(reset_less=True) + + self.sync += [ + Case(pipeline_phase, { + 0: [ + dly.eq(dlys[channel[0]]), + en_out.eq(en_outs[channel[0]]), + en_iir.eq(en_iirs[channel[0]]), + If(stages_active[2] & en[1] & dsp.clip, + clips[channel[2]].eq(1) + ) + ], + 2: [ + en[0].eq(0), + en[1].eq(en[0]), + sel.eq(sel_profile), + If(stages_active[0] & en_out, + If(dly != dly_profile, + dlys[channel[0]].eq(dly + 1) + ).Elif(en_iir, + en[0].eq(1) + ) + ) + ], + }), + ] + + # + # Update DDS profile with FTW/POW/ASF + # Stage 0 loads the POW, stage 1 the FTW, and stage 2 writes + # the ASF computed by the IIR filter. + # + # muxing ddss = Array(self.dds) self.sync += [ - Case(phase, { - 0: [ - dly.eq(dlys[channel[0]]), - en_out.eq(en_outs[channel[0]]), - en_iir.eq(en_iirs[channel[0]]), - If(stage[1], - ddss[channel[1]][:w.word].eq(m_coeff.dat_r) - ), - If(stage[2] & en[1] & dsp.clip, - clips[channel[2]].eq(1) - ) - ], - 1: [ - If(stage[1], - ddss[channel[1]][w.word:2*w.word].eq( - m_coeff.dat_r), - ), - If(stage[2], - ddss[channel[2]][3*w.word:].eq( - m_state.dat_r[w.state - w.asf - 1:w.state - 1]) - ) - ], - 2: [ - en[0].eq(0), - en[1].eq(en[0]), - sel.eq(sel_profile), - If(stage[0], - ddss[channel[0]][2*w.word:3*w.word].eq( - m_coeff.dat_r), - If(en_out, - If(dly != dly_profile, - dlys[channel[0]].eq(dly + 1) - ).Elif(en_iir, - en[0].eq(1) - ) - ) - ) - ], - 3: [ - ], - }), + Case(pipeline_phase, { + 0: [ + If(stages_active[1], + ddss[channel[1]][:w.word].eq(m_coeff.dat_r), # ftw0 + ), + ], + 1: [ + If(stages_active[1], + ddss[channel[1]][w.word:2 * w.word].eq(m_coeff.dat_r), # ftw1 + ), + If(stages_active[2], + ddss[channel[2]][3*w.word:].eq( # asf + m_state.dat_r[w.state - w.asf - 1:w.state - 1]) + ) + ], + 2: [ + If(stages_active[0], + ddss[channel[0]][2*w.word:3*w.word].eq(m_coeff.dat_r), # pow + ), + ], + 3: [ + ], + }), ] def _coeff(self, channel, profile, coeff): diff --git a/artiq/gateware/suservo/servo.py b/artiq/gateware/suservo/servo.py index 08b31a3bc..1aec95f02 100644 --- a/artiq/gateware/suservo/servo.py +++ b/artiq/gateware/suservo/servo.py @@ -5,32 +5,76 @@ from .iir import IIR, IIRWidths from .dds_ser import DDS, DDSParams +def predict_timing(adc_p, iir_p, dds_p): + """ + The following is a sketch of the timing for 1 Sampler (8 ADCs) and N Urukuls + Shown here, the cycle duration is limited by the IIR loading+processing time. + + ADC|CONVH|CONV|READ|RTT|IDLE|CONVH|CONV|READ|RTT|IDLE|CONVH|CONV|READ|RTT|... + |4 |57 |16 |8 | .. |4 |57 |16 |8 | .. |4 |57 |16 |8 |... + ---+-------------------+------------------------+------------------------+--- + IIR| |LOAD|PROC |SHIFT|LOAD|PROC |SHIFT|... + | |8 |16*N+9 |16 |8 |16*N+9 |16 |... + ---+--------------------------------------+------------------------+--------- + DDS| |CMD|PROF|WAIT|IO_UP|IDLE|CMD|PR... + | |16 |128 |1 |1 | .. |16 | ... + + IIR loading starts once the ADC presents its data, the DDSes are updated + once the IIR processing is over. These are the only blocking processes. + IIR shifting happens in parallel to writing to the DDSes and ADC conversions + take place while the IIR filter is processing or the DDSes are being + written to, depending on the cycle duration (given by whichever module + takes the longest). + """ + t_adc = (adc_p.t_cnvh + adc_p.t_conv + adc_p.t_rtt + + adc_p.channels*adc_p.width//adc_p.lanes) + 1 + # load adc_p.channels values, process dds_p.channels + # (4 processing phases and 2 additional stages à 4 phases + # to complete the processing of the last channel) + t_iir = adc_p.channels + 4*dds_p.channels + 8 + 1 + t_dds = (dds_p.width*2 + 1)*dds_p.clk + 1 + t_cycle = max(t_adc, t_iir, t_dds) + return t_adc, t_iir, t_dds, t_cycle + class Servo(Module): def __init__(self, adc_pads, dds_pads, adc_p, iir_p, dds_p): + t_adc, t_iir, t_dds, t_cycle = predict_timing(adc_p, iir_p, dds_p) + assert t_iir + 2*adc_p.channels < t_cycle, "need shifting time" + self.submodules.adc = ADC(adc_pads, adc_p) self.submodules.iir = IIR(iir_p) self.submodules.dds = DDS(dds_pads, dds_p) # adc channels are reversed on Sampler - for i, j, k, l in zip(reversed(self.adc.data), self.iir.adc, - self.iir.dds, self.dds.profile): - self.comb += j.eq(i), l.eq(k) - - t_adc = (adc_p.t_cnvh + adc_p.t_conv + adc_p.t_rtt + - adc_p.channels*adc_p.width//adc_p.lanes) + 1 - t_iir = ((1 + 4 + 1) << iir_p.channel) + 1 - t_dds = (dds_p.width*2 + 1)*dds_p.clk + 1 - - t_cycle = max(t_adc, t_iir, t_dds) - assert t_iir + (2 << iir_p.channel) < t_cycle, "need shifting time" + for iir, adc in zip(self.iir.adc, reversed(self.adc.data)): + self.comb += iir.eq(adc) + for dds, iir in zip(self.dds.profile, self.iir.dds): + self.comb += dds.eq(iir) + # If high, a new cycle is started if the current cycle (if any) is + # finished. Consequently, if low, servo iterations cease after the + # current cycle is finished. Don't care while the first step (ADC) + # is active. self.start = Signal() + + # Counter for delay between end of ADC cycle and start of next one, + # depending on the duration of the other steps. t_restart = t_cycle - t_adc + 1 assert t_restart > 1 cnt = Signal(max=t_restart) cnt_done = Signal() active = Signal(3) + + # Indicates whether different steps (0: ADC, 1: IIR, 2: DDS) are + # currently active (exposed for simulation only), with each bit being + # reset once the successor step is launched. Depending on the + # timing details of the different steps, any number can be concurrently + # active (e.g. ADC read from iteration n, IIR computation from iteration + # n - 1, and DDS write from iteration n - 2). + + # Asserted once per cycle when the DDS write has been completed. self.done = Signal() + self.sync += [ If(self.dds.done, active[2].eq(0) diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py index 311028fcb..71ee933a5 100755 --- a/artiq/gateware/targets/kasli.py +++ b/artiq/gateware/targets/kasli.py @@ -135,12 +135,12 @@ class StandaloneBase(MiniSoC, AMPSoC): self.config["HAS_SI5324"] = None self.config["SI5324_SOFT_RESET"] = None - def add_rtio(self, rtio_channels): + def add_rtio(self, rtio_channels, sed_lanes=8): self.submodules.rtio_crg = _RTIOCRG(self.platform) self.csr_devices.append("rtio_crg") fix_serdes_timing_path(self.platform) self.submodules.rtio_tsc = rtio.TSC("async", glbl_fine_ts_width=3) - self.submodules.rtio_core = rtio.Core(self.rtio_tsc, rtio_channels) + self.submodules.rtio_core = rtio.Core(self.rtio_tsc, rtio_channels, lane_count=sed_lanes) self.csr_devices.append("rtio_core") self.submodules.rtio = rtio.KernelInitiator(self.rtio_tsc) self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( @@ -228,9 +228,9 @@ class SUServo(StandaloneBase): ttl_serdes_7series.Output_8X, ttl_serdes_7series.Output_8X) # EEM3/2: Sampler, EEM5/4: Urukul, EEM7/6: Urukul - eem.SUServo.add_std( - self, eems_sampler=(3, 2), - eems_urukul0=(5, 4), eems_urukul1=(7, 6)) + eem.SUServo.add_std(self, + eems_sampler=(3, 2), + eems_urukul=[[5, 4], [7, 6]]) for i in (1, 2): sfp_ctl = self.platform.request("sfp_ctl", i) @@ -375,13 +375,13 @@ class MasterBase(MiniSoC, AMPSoC): self.csr_devices.append("rtio_crg") fix_serdes_timing_path(platform) - def add_rtio(self, rtio_channels): + def add_rtio(self, rtio_channels, sed_lanes=8): # Only add MonInj core if there is anything to monitor if any([len(c.probes) for c in rtio_channels]): self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) self.csr_devices.append("rtio_moninj") - self.submodules.rtio_core = rtio.Core(self.rtio_tsc, rtio_channels) + self.submodules.rtio_core = rtio.Core(self.rtio_tsc, rtio_channels, lane_count=sed_lanes) self.csr_devices.append("rtio_core") self.submodules.rtio = rtio.KernelInitiator(self.rtio_tsc) @@ -608,13 +608,13 @@ class SatelliteBase(BaseSoC): self.csr_devices.append("rtio_crg") fix_serdes_timing_path(platform) - def add_rtio(self, rtio_channels): + def add_rtio(self, rtio_channels, sed_lanes=8): # Only add MonInj core if there is anything to monitor if any([len(c.probes) for c in rtio_channels]): self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) self.csr_devices.append("rtio_moninj") - self.submodules.local_io = SyncRTIO(self.rtio_tsc, rtio_channels) + self.submodules.local_io = SyncRTIO(self.rtio_tsc, rtio_channels, lane_count=sed_lanes) self.comb += self.drtiosat.async_errors.eq(self.local_io.async_errors) self.submodules.cri_con = rtio.CRIInterconnectShared( [self.drtiosat.cri], diff --git a/artiq/gateware/targets/kasli_generic.py b/artiq/gateware/targets/kasli_generic.py index 122c3e0cf..bb822f41e 100755 --- a/artiq/gateware/targets/kasli_generic.py +++ b/artiq/gateware/targets/kasli_generic.py @@ -57,7 +57,8 @@ class GenericStandalone(StandaloneBase): self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) self.rtio_channels.append(rtio.LogChannel()) - self.add_rtio(self.rtio_channels) + self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) + if has_grabber: self.config["HAS_GRABBER"] = None self.add_csr_group("grabber", self.grabber_csr_group) @@ -94,7 +95,7 @@ class GenericMaster(MasterBase): self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) self.rtio_channels.append(rtio.LogChannel()) - self.add_rtio(self.rtio_channels) + self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) if has_grabber: self.config["HAS_GRABBER"] = None self.add_csr_group("grabber", self.grabber_csr_group) @@ -127,7 +128,7 @@ class GenericSatellite(SatelliteBase): self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) self.rtio_channels.append(rtio.LogChannel()) - self.add_rtio(self.rtio_channels) + self.add_rtio(self.rtio_channels, sed_lanes=description["sed_lanes"]) if has_grabber: self.config["HAS_GRABBER"] = None self.add_csr_group("grabber", self.grabber_csr_group) diff --git a/artiq/gateware/targets/kc705.py b/artiq/gateware/targets/kc705.py index 4cec96e87..e04088a92 100755 --- a/artiq/gateware/targets/kc705.py +++ b/artiq/gateware/targets/kc705.py @@ -129,7 +129,7 @@ class _StandaloneBase(MiniSoC, AMPSoC): } mem_map.update(MiniSoC.mem_map) - def __init__(self, gateware_identifier_str=None, **kwargs): + def __init__(self, gateware_identifier_str=None, drtio_100mhz=False, **kwargs): MiniSoC.__init__(self, cpu_type="vexriscv", cpu_bus_width=64, @@ -207,7 +207,7 @@ class _MasterBase(MiniSoC, AMPSoC): } mem_map.update(MiniSoC.mem_map) - def __init__(self, gateware_identifier_str=None, **kwargs): + def __init__(self, gateware_identifier_str=None, drtio_100mhz=False, **kwargs): MiniSoC.__init__(self, cpu_type="vexriscv", cpu_bus_width=64, @@ -236,11 +236,14 @@ class _MasterBase(MiniSoC, AMPSoC): platform.request("sfp"), platform.request("user_sma_mgt") ] - # 1000BASE_BX10 Ethernet compatible, 125MHz RTIO clock + rtio_clk_freq = 100e6 if drtio_100mhz else 125e6 + + # 1000BASE_BX10 Ethernet compatible, 100/125MHz RTIO clock self.submodules.drtio_transceiver = gtx_7series.GTX( clock_pads=platform.request("si5324_clkout"), pads=data_pads, - sys_clk_freq=self.clk_freq) + sys_clk_freq=self.clk_freq, + rtio_clk_freq=rtio_clk_freq) self.csr_devices.append("drtio_transceiver") self.submodules.rtio_tsc = rtio.TSC("async", glbl_fine_ts_width=3) @@ -341,7 +344,7 @@ class _SatelliteBase(BaseSoC): } mem_map.update(BaseSoC.mem_map) - def __init__(self, gateware_identifier_str=None, sma_as_sat=False, **kwargs): + def __init__(self, gateware_identifier_str=None, sma_as_sat=False, drtio_100mhz=False, **kwargs): BaseSoC.__init__(self, cpu_type="vexriscv", cpu_bus_width=64, @@ -369,11 +372,14 @@ class _SatelliteBase(BaseSoC): if sma_as_sat: data_pads = data_pads[::-1] - # 1000BASE_BX10 Ethernet compatible, 125MHz RTIO clock + rtio_clk_freq = 100e6 if drtio_100mhz else 125e6 + + # 1000BASE_BX10 Ethernet compatible, 100/125MHz RTIO clock self.submodules.drtio_transceiver = gtx_7series.GTX( clock_pads=platform.request("si5324_clkout"), pads=data_pads, - sys_clk_freq=self.clk_freq) + sys_clk_freq=self.clk_freq, + rtio_clk_freq=rtio_clk_freq) self.csr_devices.append("drtio_transceiver") self.submodules.rtio_tsc = rtio.TSC("sync", glbl_fine_ts_width=3) @@ -673,6 +679,8 @@ def main(): "(default: %(default)s)") parser.add_argument("--gateware-identifier-str", default=None, help="Override ROM identifier") + parser.add_argument("--drtio100mhz", action="store_true", default=False, + help="DRTIO systems only - use 100MHz RTIO clock") args = parser.parse_args() variant = args.variant.lower() @@ -681,7 +689,7 @@ def main(): except KeyError: raise SystemExit("Invalid variant (-V/--variant)") - soc = cls(gateware_identifier_str=args.gateware_identifier_str, **soc_kc705_argdict(args)) + soc = cls(gateware_identifier_str=args.gateware_identifier_str, drtio_100mhz=args.drtio100mhz, **soc_kc705_argdict(args)) build_artiq_soc(soc, builder_argdict(args)) diff --git a/artiq/language/environment.py b/artiq/language/environment.py index 7992fe3af..ad7a165cb 100644 --- a/artiq/language/environment.py +++ b/artiq/language/environment.py @@ -1,4 +1,3 @@ -import warnings from collections import OrderedDict from inspect import isclass @@ -331,7 +330,8 @@ class HasEnvironment: @rpc(flags={"async"}) def set_dataset(self, key, value, - broadcast=False, persist=False, archive=True, save=None): + broadcast=False, persist=False, archive=True, + hdf5_options=None): """Sets the contents and handling modes of a dataset. Datasets must be scalars (``bool``, ``int``, ``float`` or NumPy scalar) @@ -343,13 +343,16 @@ class HasEnvironment: broadcast. :param archive: the data is saved into the local storage of the current run (archived as a HDF5 file). - :param save: deprecated. + :param hdf5_options: dict of keyword arguments to pass to + :meth:`h5py.Group.create_dataset`. For example, pass ``{"compression": "gzip"}`` + to enable transparent zlib compression of this dataset in the HDF5 archive. + See the `h5py documentation `_ + for a list of valid options. """ - if save is not None: - warnings.warn("set_dataset save parameter is deprecated, " - "use archive instead", FutureWarning) - archive = save - self.__dataset_mgr.set(key, value, broadcast, persist, archive) + + self.__dataset_mgr.set( + key, value, broadcast, persist, archive, hdf5_options + ) @rpc(flags={"async"}) def mutate_dataset(self, key, index, value): diff --git a/artiq/master/databases.py b/artiq/master/databases.py index 14cfae4cd..8ef71c6a2 100644 --- a/artiq/master/databases.py +++ b/artiq/master/databases.py @@ -35,6 +35,15 @@ class DeviceDB: return desc +def make_dataset(*, persist=False, value=None, hdf5_options=None): + "PYON-serializable representation of a dataset in the DatasetDB" + return { + "persist": persist, + "value": value, + "hdf5_options": hdf5_options or {}, + } + + class DatasetDB(TaskObject): def __init__(self, persist_file, autosave_period=30): self.persist_file = persist_file @@ -44,10 +53,23 @@ class DatasetDB(TaskObject): file_data = pyon.load_file(self.persist_file) except FileNotFoundError: file_data = dict() - self.data = Notifier({k: (True, v) for k, v in file_data.items()}) + self.data = Notifier( + { + k: make_dataset( + persist=True, + value=v["value"], + hdf5_options=v["hdf5_options"] + ) + for k, v in file_data.items() + } + ) def save(self): - data = {k: v[1] for k, v in self.data.raw_view.items() if v[0]} + data = { + k: d + for k, d in self.data.raw_view.items() + if d["persist"] + } pyon.store_file(self.persist_file, data) async def _do(self): @@ -59,20 +81,23 @@ class DatasetDB(TaskObject): self.save() def get(self, key): - return self.data.raw_view[key][1] + return self.data.raw_view[key] def update(self, mod): process_mod(self.data, mod) # convenience functions (update() can be used instead) - def set(self, key, value, persist=None): + def set(self, key, value, persist=None, hdf5_options=None): if persist is None: if key in self.data.raw_view: - persist = self.data.raw_view[key][0] + persist = self.data.raw_view[key]["persist"] else: persist = False - self.data[key] = (persist, value) + self.data[key] = make_dataset( + persist=persist, + value=value, + hdf5_options=hdf5_options, + ) def delete(self, key): del self.data[key] - # diff --git a/artiq/master/worker_db.py b/artiq/master/worker_db.py index 172846145..8a2200e05 100644 --- a/artiq/master/worker_db.py +++ b/artiq/master/worker_db.py @@ -8,9 +8,12 @@ from operator import setitem import importlib import logging +import numpy as np + from sipyco.sync_struct import Notifier from sipyco.pc_rpc import AutoTarget, Client, BestEffortClient +from artiq.master.databases import make_dataset logger = logging.getLogger(__name__) @@ -115,7 +118,8 @@ class DatasetManager: self.ddb = ddb self._broadcaster.publish = ddb.update - def set(self, key, value, broadcast=False, persist=False, archive=True): + def set(self, key, value, broadcast=False, persist=False, archive=True, + hdf5_options=None): if key in self.archive: logger.warning("Modifying dataset '%s' which is in archive, " "archive will remain untouched", @@ -125,12 +129,20 @@ class DatasetManager: broadcast = True if broadcast: - self._broadcaster[key] = persist, value + self._broadcaster[key] = make_dataset( + persist=persist, + value=value, + hdf5_options=hdf5_options, + ) elif key in self._broadcaster.raw_view: del self._broadcaster[key] if archive: - self.local[key] = value + self.local[key] = make_dataset( + persist=persist, + value=value, + hdf5_options=hdf5_options, + ) elif key in self.local: del self.local[key] @@ -138,11 +150,11 @@ class DatasetManager: target = self.local.get(key, None) if key in self._broadcaster.raw_view: if target is not None: - assert target is self._broadcaster.raw_view[key][1] - return self._broadcaster[key][1] + assert target["value"] is self._broadcaster.raw_view[key]["value"] + return self._broadcaster[key]["value"] if target is None: raise KeyError("Cannot mutate nonexistent dataset '{}'".format(key)) - return target + return target["value"] def mutate(self, key, index, value): target = self._get_mutation_target(key) @@ -158,15 +170,15 @@ class DatasetManager: def get(self, key, archive=False): if key in self.local: - return self.local[key] - - data = self.ddb.get(key) + return self.local[key]["value"] + + dataset = self.ddb.get(key) if archive: if key in self.archive: logger.warning("Dataset '%s' is already in archive, " "overwriting", key, stack_info=True) - self.archive[key] = data - return data + self.archive[key] = dataset + return dataset["value"] def write_hdf5(self, f): datasets_group = f.create_group("datasets") @@ -182,7 +194,7 @@ def _write(group, k, v): # Add context to exception message when the user writes a dataset that is # not representable in HDF5. try: - group[k] = v + group.create_dataset(k, data=v["value"], **v["hdf5_options"]) except TypeError as e: raise TypeError("Error writing dataset '{}' of type '{}': {}".format( - k, type(v), e)) + k, type(v["value"]), e)) diff --git a/artiq/test/lit/embedding/syscall_arg_attrs.py b/artiq/test/lit/embedding/syscall_arg_attrs.py new file mode 100644 index 000000000..67207dc32 --- /dev/null +++ b/artiq/test/lit/embedding/syscall_arg_attrs.py @@ -0,0 +1,30 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * + +# Make sure `byval` and `sret` are specified both at the call site and the +# declaration. This isn't caught by the LLVM IR validator, but mismatches +# lead to miscompilations (at least in LLVM 11). + + +@kernel +def entrypoint(): + # CHECK: call void @accept_str\({ i8\*, i32 }\* nonnull byval + accept_str("foo") + + # CHECK: call void @return_str\({ i8\*, i32 }\* nonnull sret + return_str() + + +# CHECK: declare void @accept_str\({ i8\*, i32 }\* byval\) +@syscall +def accept_str(name: TStr) -> TNone: + pass + + +# CHECK: declare void @return_str\({ i8\*, i32 }\* sret\) +@syscall +def return_str() -> TStr: + pass diff --git a/artiq/test/test_dataset_db.py b/artiq/test/test_dataset_db.py new file mode 100644 index 000000000..3d087a806 --- /dev/null +++ b/artiq/test/test_dataset_db.py @@ -0,0 +1,111 @@ +"""Test internal dataset representation (persistence, applets)""" +import unittest +import tempfile + +from artiq.master.databases import DatasetDB +from sipyco import pyon + +KEY1 = "key1" +KEY2 = "key2" +KEY3 = "key3" +DATA = list(range(10)) +COMP = "gzip" + + +class TestDatasetDB(unittest.TestCase): + def setUp(self): + # empty dataset persistance file + self.persist_file = tempfile.NamedTemporaryFile(mode="w+") + print("{}", file=self.persist_file, flush=True) + + self.ddb = DatasetDB(self.persist_file.name) + + self.ddb.set(KEY1, DATA, persist=True) + self.ddb.set(KEY2, DATA, persist=True, hdf5_options=dict(compression=COMP)) + self.ddb.set(KEY3, DATA, hdf5_options=dict(shuffle=True)) + + self.save_ddb_to_disk() + + def save_ddb_to_disk(self): + self.ddb.save() + self.persist_file.flush() + + def load_ddb_from_disk(self): + return pyon.load_file(self.persist_file.name) + + def test_persist_format(self): + data = pyon.load_file(self.persist_file.name) + + for key in [KEY1, KEY2]: + self.assertTrue(data[key]["persist"]) + self.assertEqual(data[key]["value"], DATA) + + self.assertEqual(data[KEY2]["hdf5_options"]["compression"], COMP) + self.assertEqual(data[KEY1]["hdf5_options"], dict()) + + def test_only_persist_marked_datasets(self): + data = self.load_ddb_from_disk() + + with self.assertRaises(KeyError): + data[KEY3] + + def test_memory_format(self): + ds = self.ddb.get(KEY2) + self.assertTrue(ds["persist"]) + self.assertEqual(ds["value"], DATA) + self.assertEqual(ds["hdf5_options"]["compression"], COMP) + + ds = self.ddb.get(KEY3) + self.assertFalse(ds["persist"]) + self.assertEqual(ds["value"], DATA) + self.assertTrue(ds["hdf5_options"]["shuffle"]) + + def test_delete(self): + self.ddb.delete(KEY1) + self.save_ddb_to_disk() + + data = self.load_ddb_from_disk() + + with self.assertRaises(KeyError): + data[KEY1] + + self.assertTrue(data[KEY2]["persist"]) + + def test_update(self): + self.assertFalse(self.ddb.get(KEY3)["persist"]) + + mod = { + "action": "setitem", + "path": [KEY3], + "key": "persist", + "value": True, + } + + self.ddb.update(mod) + self.assertTrue(self.ddb.get(KEY3)["persist"]) + + def test_update_hdf5_options(self): + with self.assertRaises(KeyError): + self.ddb.get(KEY1)["hdf5_options"]["shuffle"] + + mod = { + "action": "setitem", + "path": [KEY1, "hdf5_options"], + "key": "shuffle", + "value": False, + } + + self.ddb.update(mod) + self.assertFalse(self.ddb.get(KEY1)["hdf5_options"]["shuffle"]) + + def test_reset_copies_persist(self): + self.assertTrue(self.ddb.get(KEY1)["persist"]) + self.ddb.set(KEY1, DATA) + self.assertTrue(self.ddb.get(KEY1)["persist"]) + + self.assertFalse(self.ddb.get(KEY3)["persist"]) + self.ddb.set(KEY3, DATA) + self.assertFalse(self.ddb.get(KEY3)["persist"]) + + self.ddb.set(KEY3, DATA, persist=True) + self.assertTrue(self.ddb.get(KEY3)["persist"]) diff --git a/artiq/test/test_datasets.py b/artiq/test/test_datasets.py index 871568a2a..3fa6d6bb7 100644 --- a/artiq/test/test_datasets.py +++ b/artiq/test/test_datasets.py @@ -3,6 +3,9 @@ import copy import unittest +import h5py +import numpy as np + from sipyco.sync_struct import process_mod from artiq.experiment import EnvExperiment @@ -14,7 +17,7 @@ class MockDatasetDB: self.data = dict() def get(self, key): - return self.data[key][1] + return self.data[key]["value"] def update(self, mod): # Copy mod before applying to avoid sharing references to objects @@ -82,9 +85,9 @@ class ExperimentDatasetCase(unittest.TestCase): def test_append_broadcast(self): self.exp.set(KEY, [], broadcast=True) self.exp.append(KEY, 0) - self.assertEqual(self.dataset_db.data[KEY][1], [0]) + self.assertEqual(self.dataset_db.data[KEY]["value"], [0]) self.exp.append(KEY, 1) - self.assertEqual(self.dataset_db.data[KEY][1], [0, 1]) + self.assertEqual(self.dataset_db.data[KEY]["value"], [0, 1]) def test_append_array(self): for broadcast in (True, False): @@ -103,3 +106,44 @@ class ExperimentDatasetCase(unittest.TestCase): with self.assertRaises(KeyError): self.exp.append(KEY, 0) + def test_write_hdf5_options(self): + data = np.random.randint(0, 1024, 1024) + self.exp.set( + KEY, + data, + hdf5_options=dict( + compression="gzip", + compression_opts=6, + shuffle=True, + fletcher32=True + ), + ) + + with h5py.File("test.h5", "a", "core", backing_store=False) as f: + self.dataset_mgr.write_hdf5(f) + + self.assertTrue(np.array_equal(f["datasets"][KEY][()], data)) + self.assertEqual(f["datasets"][KEY].compression, "gzip") + self.assertEqual(f["datasets"][KEY].compression_opts, 6) + self.assertTrue(f["datasets"][KEY].shuffle) + self.assertTrue(f["datasets"][KEY].fletcher32) + + def test_write_hdf5_no_options(self): + data = np.random.randint(0, 1024, 1024) + self.exp.set(KEY, data) + + with h5py.File("test.h5", "a", "core", backing_store=False) as f: + self.dataset_mgr.write_hdf5(f) + self.assertTrue(np.array_equal(f["datasets"][KEY][()], data)) + self.assertIsNone(f["datasets"][KEY].compression) + + def test_write_hdf5_invalid_type(self): + class CustomType: + def __init__(self, x): + self.x = x + + self.exp.set(KEY, CustomType(42)) + + with h5py.File("test.h5", "w", "core", backing_store=False) as f: + with self.assertRaisesRegex(TypeError, "CustomType"): + self.dataset_mgr.write_hdf5(f) diff --git a/artiq/test/test_scheduler.py b/artiq/test/test_scheduler.py index ad4f243bd..5a8cdb6bc 100644 --- a/artiq/test/test_scheduler.py +++ b/artiq/test/test_scheduler.py @@ -7,6 +7,7 @@ from time import time, sleep from artiq.experiment import * from artiq.master.scheduler import Scheduler +from artiq.master.databases import make_dataset class EmptyExperiment(EnvExperiment): @@ -291,8 +292,13 @@ class SchedulerCase(unittest.TestCase): nonlocal termination_ok self.assertEqual( mod, - {"action": "setitem", "key": "termination_ok", - "value": (False, True), "path": []}) + { + "action": "setitem", + "key": "termination_ok", + "value": make_dataset(value=True), + "path": [] + } + ) termination_ok = True handlers = { "update_dataset": check_termination diff --git a/flake.lock b/flake.lock index b01ba2a41..35c1fb29a 100644 --- a/flake.lock +++ b/flake.lock @@ -3,11 +3,11 @@ "mozilla-overlay": { "flake": false, "locked": { - "lastModified": 1636569584, - "narHash": "sha256-iDFogua24bhFJZSxG/jhZbbNxDXuKP9S/pyRIYzrRPM=", + "lastModified": 1637337116, + "narHash": "sha256-LKqAcdL+woWeYajs02bDQ7q8rsqgXuzhC354NoRaV80=", "owner": "mozilla", "repo": "nixpkgs-mozilla", - "rev": "9f70f86d73fa97e043bebeb58e5676d157069cfb", + "rev": "cbc7435f5b0b3d17b16fb1d20cf7b616eec5e093", "type": "github" }, "original": { @@ -18,16 +18,16 @@ }, "nixpkgs": { "locked": { - "lastModified": 1636552551, - "narHash": "sha256-k7Hq/bvUnRlAfFjPGuw3FsSqqspQdRHsCHpgadw6UkQ=", + "lastModified": 1638279546, + "narHash": "sha256-1KCwN7twjp1dBdp0jPgVdYFztDkCR8+roo0B34J9oBY=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9e86f5f7a19db6da2445f07bafa6694b556f9c6d", + "rev": "96b4157790fc96e70d6e6c115e3f34bba7be490f", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-21.05", + "ref": "nixos-21.11", "repo": "nixpkgs", "type": "github" } @@ -61,11 +61,11 @@ "src-misoc": { "flake": false, "locked": { - "lastModified": 1636527305, - "narHash": "sha256-/2XTejqj0Bo81HaTrlTSWwInnWwsuqnq+CURXbpIrkA=", + "lastModified": 1638683371, + "narHash": "sha256-sm2SxHmEGfE56+V+joDHMjpOaxg8+t3EJEk1d11C1E0=", "ref": "master", - "rev": "f5203e406520874e15ab5d070058ef642fc57fd9", - "revCount": 2417, + "rev": "71b74f87b41c56a6c6d767cdfde0356c15a379a7", + "revCount": 2418, "submodules": true, "type": "git", "url": "https://github.com/m-labs/misoc.git" diff --git a/flake.nix b/flake.nix index 4630d3ec8..f5a29ff4a 100644 --- a/flake.nix +++ b/flake.nix @@ -1,7 +1,7 @@ { description = "A leading-edge control system for quantum information experiments"; - inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-21.05; + inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-21.11; inputs.mozilla-overlay = { url = github:mozilla/nixpkgs-mozilla; flake = false; }; inputs.src-sipyco = { url = github:m-labs/sipyco; flake = false; }; inputs.src-pythonparser = { url = github:m-labs/pythonparser; flake = false; }; @@ -270,7 +270,6 @@ ln -s $ARTIQ_PATH/firmware/Cargo.lock . cargoSetupPostUnpackHook cargoSetupPostPatchHook - export TARGET_AR=llvm-ar ${buildCommand} ''; doCheck = true; @@ -384,7 +383,6 @@ packages.x86_64-linux.vivado packages.x86_64-linux.openocd-bscanspi ]; - TARGET_AR="llvm-ar"; }; hydraJobs = { @@ -405,17 +403,18 @@ phases = [ "buildPhase" ]; buildPhase = '' + whoami export HOME=`mktemp -d` mkdir $HOME/.ssh - cp /opt/hydra_id_rsa $HOME/.ssh/id_rsa - cp /opt/hydra_id_rsa.pub $HOME/.ssh/id_rsa.pub + cp /opt/hydra_id_ed25519 $HOME/.ssh/id_ed25519 + cp /opt/hydra_id_ed25519.pub $HOME/.ssh/id_ed25519.pub echo "rpi-1 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPOBQVcsvk6WgRj18v4m0zkFeKrcN9gA+r6sxQxNwFpv" > $HOME/.ssh/known_hosts - chmod 600 $HOME/.ssh/id_rsa + chmod 600 $HOME/.ssh/id_ed25519 LOCKCTL=$(mktemp -d) mkfifo $LOCKCTL/lockctl cat $LOCKCTL/lockctl | ${pkgs.openssh}/bin/ssh \ - -i $HOME/.ssh/id_rsa \ + -i $HOME/.ssh/id_ed25519 \ -o UserKnownHostsFile=$HOME/.ssh/known_hosts \ rpi-1 \ 'mkdir -p /tmp/board_lock && flock /tmp/board_lock/kc705-1 -c "echo Ok; cat"' \