mirror of
https://github.com/m-labs/artiq.git
synced 2025-02-07 16:15:22 +08:00
Merge branch 'master' into nac3
This commit is contained in:
commit
0daa743aff
@ -6,14 +6,25 @@ Release notes
|
||||
ARTIQ-9 (Unreleased)
|
||||
--------------------
|
||||
|
||||
* GUI state files are now automatically backed up upon successful loading.
|
||||
* Zotino monitoring in the dashboard now displays the values in volts.
|
||||
* Dashboard:
|
||||
- Experiment windows can have different colors, selected by the user.
|
||||
- Zotino monitoring now displays the values in volts.
|
||||
- Schedule display columns can now be reordered and shown/hidden using the table
|
||||
header context menu.
|
||||
- State files are now automatically backed up upon successful loading.
|
||||
* afws_client now uses the "happy eyeballs" algorithm (RFC 6555) for a faster and more
|
||||
reliable connection to the server.
|
||||
* The Zadig driver installer was added to the MSYS2 offline installer.
|
||||
* Fastino monitoring with Moninj is now supported.
|
||||
* Qt6 support.
|
||||
* Python 3.12 support.
|
||||
* Compiler can now give automatic suggestions for ``kernel_invariants``.
|
||||
* Idle kernels now restart when written with ``artiq_coremgmt`` and stop when erased/removed from config.
|
||||
* New support for the EBAZ4205 Zynq-SoC control card.
|
||||
* New core device driver for the AD9834 DDS, tested with the ZonRi Technology Co., Ltd. AD9834-Module.
|
||||
* Support for coredevice reflashing through the new ``flash`` tool in ``artiq_coremgmt``.
|
||||
* ``artiq_coremgmt`` now supports configuring satellites.
|
||||
* ``artiq.coredevice.fmcdio_vhdci_eem`` has been removed.
|
||||
|
||||
ARTIQ-8
|
||||
-------
|
||||
|
397
artiq/coredevice/ad9834.py
Normal file
397
artiq/coredevice/ad9834.py
Normal file
@ -0,0 +1,397 @@
|
||||
"""
|
||||
RTIO Driver for the Analog Devices AD9834 DDS via 3-wire SPI interface.
|
||||
"""
|
||||
|
||||
# https://www.analog.com/media/en/technical-documentation/data-sheets/AD9834.pdf
|
||||
# https://www.analog.com/media/en/technical-documentation/app-notes/an-1070.pdf
|
||||
|
||||
from artiq.coredevice import spi2 as spi
|
||||
from artiq.experiment import *
|
||||
from artiq.language.core import *
|
||||
from artiq.language.types import *
|
||||
from artiq.language.units import *
|
||||
|
||||
AD9834_B28 = 1 << 13
|
||||
AD9834_HLB = 1 << 12
|
||||
AD9834_FSEL = 1 << 11
|
||||
AD9834_PSEL = 1 << 10
|
||||
AD9834_PIN_SW = 1 << 9
|
||||
AD9834_RESET = 1 << 8
|
||||
AD9834_SLEEP1 = 1 << 7
|
||||
AD9834_SLEEP12 = 1 << 6
|
||||
AD9834_OPBITEN = 1 << 5
|
||||
AD9834_SIGN_PIB = 1 << 4
|
||||
AD9834_DIV2 = 1 << 3
|
||||
AD9834_MODE = 1 << 1
|
||||
|
||||
AD9834_FREQ_REG_0 = 0b01 << 14
|
||||
AD9834_FREQ_REG_1 = 0b10 << 14
|
||||
FREQ_REGS = [AD9834_FREQ_REG_0, AD9834_FREQ_REG_1]
|
||||
|
||||
AD9834_PHASE_REG = 0b11 << 14
|
||||
AD9834_PHASE_REG_0 = AD9834_PHASE_REG | (0 << 13)
|
||||
AD9834_PHASE_REG_1 = AD9834_PHASE_REG | (1 << 13)
|
||||
PHASE_REGS = [AD9834_PHASE_REG_0, AD9834_PHASE_REG_1]
|
||||
|
||||
|
||||
class AD9834:
|
||||
"""
|
||||
AD9834 DDS driver.
|
||||
|
||||
This class provides control for the DDS AD9834.
|
||||
|
||||
The driver utilizes bit-controlled :const:`AD9834_FSEL`, :const:`AD9834_PSEL`, and
|
||||
:const:`AD9834_RESET`. To pin control ``FSELECT``, ``PSELECT``, and ``RESET`` set
|
||||
:const:`AD9834_PIN_SW`. The ``ctrl_reg`` attribute is used to maintain the state of
|
||||
the control register, enabling persistent management of various configurations.
|
||||
|
||||
:param spi_device: SPI bus device name.
|
||||
:param spi_freq: SPI bus clock frequency (default: 10 MHz, max: 40 MHz).
|
||||
:param clk_freq: DDS clock frequency (default: 75 MHz).
|
||||
:param core_device: Core device name (default: "core").
|
||||
"""
|
||||
|
||||
kernel_invariants = {"core", "bus", "spi_freq", "clk_freq"}
|
||||
|
||||
def __init__(
|
||||
self, dmgr, spi_device, spi_freq=10 * MHz, clk_freq=75 * MHz, core_device="core"
|
||||
):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.bus = dmgr.get(spi_device)
|
||||
assert spi_freq <= 40 * MHz, "SPI frequency exceeds maximum value of 40 MHz"
|
||||
self.spi_freq = spi_freq
|
||||
self.clk_freq = clk_freq
|
||||
self.ctrl_reg = 0x0000 # Reset control register
|
||||
|
||||
@kernel
|
||||
def init(self):
|
||||
"""
|
||||
Initialize the AD9834: configure the SPI bus and reset the DDS.
|
||||
|
||||
This method performs the necessary setup for the AD9834 device, including:
|
||||
- Configuring the SPI bus parameters (clock polarity, data width, and frequency).
|
||||
- Putting the AD9834 into a reset state to ensure proper initialization.
|
||||
|
||||
The SPI bus is configured to use 16 bits of data width with the clock frequency
|
||||
provided as a parameter when creating the AD9834 instance. After configuring
|
||||
the SPI bus, the method invokes :meth:`enable_reset()` to reset the AD9834.
|
||||
This is an essential step to prepare the device for subsequent configuration
|
||||
of frequency and phase.
|
||||
|
||||
This method should be called before any other operations are performed
|
||||
on the AD9834 to ensure that the device is in a known state.
|
||||
"""
|
||||
self.bus.set_config(spi.SPI_CLK_POLARITY | spi.SPI_END, 16, self.spi_freq, 1)
|
||||
self.enable_reset()
|
||||
|
||||
@kernel
|
||||
def set_frequency_reg(self, freq_reg, frequency: TFloat):
|
||||
"""
|
||||
Set the frequency for the specified frequency register.
|
||||
|
||||
This method calculates the frequency word based on the provided frequency in Hz
|
||||
and writes it to the specified frequency register.
|
||||
|
||||
:param freq_reg: The frequency register to write to, must be one of
|
||||
:const:`AD9834_FREQ_REG_0` or :const:`AD9834_FREQ_REG_1`.
|
||||
:param frequency: The desired frequency in Hz, which will be converted to a
|
||||
frequency word suitable for the AD9834.
|
||||
|
||||
The frequency word is calculated using the formula:
|
||||
|
||||
``freq_word = (frequency * (1 << 28)) / clk_freq``
|
||||
|
||||
The result is limited to the lower 28 bits for compatibility with the AD9834.
|
||||
|
||||
The method first sets the control register to enable the appropriate settings,
|
||||
then sends the fourteen least significant bits LSBs and fourteen most significant
|
||||
bits MSBs in two consecutive writes to the specified frequency register.
|
||||
"""
|
||||
if freq_reg not in FREQ_REGS:
|
||||
raise ValueError("Invalid frequency register")
|
||||
assert frequency <= 37.5 * MHz, "Frequency exceeds maximum value of 37.5 MHz"
|
||||
freq_word = int((frequency * (1 << 28)) / self.clk_freq) & 0x0FFFFFFF
|
||||
self.ctrl_reg |= AD9834_B28
|
||||
self.write(self.ctrl_reg)
|
||||
lsb = freq_word & 0x3FFF
|
||||
msb = (freq_word >> 14) & 0x3FFF
|
||||
self.write(freq_reg | lsb)
|
||||
self.write(freq_reg | msb)
|
||||
|
||||
@kernel
|
||||
def set_frequency_reg_msb(self, freq_reg, word: TInt32):
|
||||
"""
|
||||
Set the fourteen most significant bits MSBs of the specified frequency register.
|
||||
|
||||
This method updates the specified frequency register with the provided MSB value.
|
||||
It configures the control register to indicate that the MSB is being set.
|
||||
|
||||
:param freq_reg: The frequency register to update, must be one of
|
||||
:const:`AD9834_FREQ_REG_0` or :const:`AD9834_FREQ_REG_1`.
|
||||
:param word: The value to be written to the fourteen MSBs of the frequency register.
|
||||
|
||||
The method first clears the appropriate control bits, sets :const:`AD9834_HLB` to
|
||||
indicate that the MSB is being sent, and then writes the updated control register
|
||||
followed by the MSB value to the specified frequency register.
|
||||
"""
|
||||
if freq_reg not in FREQ_REGS:
|
||||
raise ValueError("Invalid frequency register")
|
||||
self.ctrl_reg &= ~AD9834_B28
|
||||
self.ctrl_reg |= AD9834_HLB
|
||||
self.write(self.ctrl_reg)
|
||||
self.write(freq_reg | (word & 0x3FFF))
|
||||
|
||||
@kernel
|
||||
def set_frequency_reg_lsb(self, freq_reg, word: TInt32):
|
||||
"""
|
||||
Set the fourteen least significant bits LSBs of the specified frequency register.
|
||||
|
||||
This method updates the specified frequency register with the provided LSB value.
|
||||
It configures the control register to indicate that the LSB is being set.
|
||||
|
||||
:param freq_reg: The frequency register to update, must be one of
|
||||
:const:`AD9834_FREQ_REG_0` or :const:`AD9834_FREQ_REG_1`.
|
||||
:param word: The value to be written to the fourteen LSBs of the frequency register.
|
||||
|
||||
The method first clears the appropriate control bits and writes the updated control
|
||||
register followed by the LSB value to the specified frequency register.
|
||||
"""
|
||||
if freq_reg not in FREQ_REGS:
|
||||
raise ValueError("Invalid frequency register")
|
||||
self.ctrl_reg &= ~AD9834_B28
|
||||
self.ctrl_reg &= ~AD9834_HLB
|
||||
self.write(self.ctrl_reg)
|
||||
self.write(freq_reg | (word & 0x3FFF))
|
||||
|
||||
@kernel
|
||||
def select_frequency_reg(self, freq_reg):
|
||||
"""
|
||||
Select the active frequency register for the phase accumulator.
|
||||
|
||||
This method chooses between the two available frequency registers in the AD9834 to
|
||||
control the frequency of the output waveform. The control register is updated
|
||||
to reflect the selected frequency register.
|
||||
|
||||
:param freq_reg: The frequency register to select. Must be one of
|
||||
:const:`AD9834_FREQ_REG_0` or :const:`AD9834_FREQ_REG_1`.
|
||||
"""
|
||||
if freq_reg not in FREQ_REGS:
|
||||
raise ValueError("Invalid frequency register")
|
||||
if freq_reg == FREQ_REGS[0]:
|
||||
self.ctrl_reg &= ~AD9834_FSEL
|
||||
else:
|
||||
self.ctrl_reg |= AD9834_FSEL
|
||||
|
||||
self.ctrl_reg &= ~AD9834_PIN_SW
|
||||
self.write(self.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def set_phase_reg(self, phase_reg, phase: TInt32):
|
||||
"""
|
||||
Set the phase for the specified phase register.
|
||||
|
||||
This method updates the specified phase register with the provided phase value.
|
||||
|
||||
:param phase_reg: The phase register to update, must be one of
|
||||
:const:`AD9834_PHASE_REG_0` or :const:`AD9834_PHASE_REG_1`.
|
||||
:param phase: The value to be written to the phase register.
|
||||
|
||||
The method masks the phase value to ensure it fits within the 12-bit limit
|
||||
and writes it to the specified phase register.
|
||||
"""
|
||||
if phase_reg not in PHASE_REGS:
|
||||
raise ValueError("Invalid phase register")
|
||||
phase_word = phase & 0x0FFF
|
||||
self.write(phase_reg | phase_word)
|
||||
|
||||
@kernel
|
||||
def select_phase_reg(self, phase_reg):
|
||||
"""
|
||||
Select the active phase register for the phase accumulator.
|
||||
|
||||
This method chooses between the two available phase registers in the AD9834 to
|
||||
control the phase of the output waveform. The control register is updated
|
||||
to reflect the selected phase register.
|
||||
|
||||
:param phase_reg: The phase register to select. Must be one of
|
||||
:const:`AD9834_PHASE_REG_0` or :const:`AD9834_PHASE_REG_1`.
|
||||
"""
|
||||
if phase_reg not in PHASE_REGS:
|
||||
raise ValueError("Invalid phase register")
|
||||
if phase_reg == PHASE_REGS[0]:
|
||||
self.ctrl_reg &= ~AD9834_PSEL
|
||||
else:
|
||||
self.ctrl_reg |= AD9834_PSEL
|
||||
|
||||
self.ctrl_reg &= ~AD9834_PIN_SW
|
||||
self.write(self.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def enable_reset(self):
|
||||
"""
|
||||
Enable the DDS reset.
|
||||
|
||||
This method sets :const:`AD9834_RESET`, putting the AD9834 into a reset state.
|
||||
While in this state, the digital-to-analog converter (DAC) is not operational.
|
||||
|
||||
This method should be called during initialization or when a reset is required
|
||||
to reinitialize the device and ensure proper operation.
|
||||
"""
|
||||
self.ctrl_reg |= AD9834_RESET
|
||||
self.write(self.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def output_enable(self):
|
||||
"""
|
||||
Disable the DDS reset and start signal generation.
|
||||
|
||||
This method clears :const:`AD9834_RESET`, allowing the AD9834 to begin generating
|
||||
signals. Once this method is called, the device will resume normal operation and
|
||||
output the generated waveform.
|
||||
|
||||
This method should be called after configuration of the frequency and phase
|
||||
settings to activate the output.
|
||||
"""
|
||||
self.ctrl_reg &= ~AD9834_RESET
|
||||
self.write(self.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def sleep(self, dac_pd: bool = False, clk_dis: bool = False):
|
||||
"""
|
||||
Put the AD9834 into sleep mode by selectively powering down the DAC and/or disabling
|
||||
the internal clock.
|
||||
|
||||
This method controls the sleep mode behavior of the AD9834 by setting or clearing the
|
||||
corresponding bits in the control register. Two independent options can be specified:
|
||||
|
||||
:param dac_pd: Set to ``True`` to power down the DAC (:const:`AD9834_SLEEP12` is set).
|
||||
``False`` will leave the DAC active.
|
||||
:param clk_dis: Set to ``True`` to disable the internal clock (:const:`AD9834_SLEEP1` is set).
|
||||
``False`` will keep the clock running.
|
||||
|
||||
Both options can be enabled independently, allowing the DAC and/or clock to be powered down as needed.
|
||||
|
||||
The method updates the control register and writes the changes to the AD9834 device.
|
||||
"""
|
||||
if dac_pd:
|
||||
self.ctrl_reg |= AD9834_SLEEP12
|
||||
else:
|
||||
self.ctrl_reg &= ~AD9834_SLEEP12
|
||||
|
||||
if clk_dis:
|
||||
self.ctrl_reg |= AD9834_SLEEP1
|
||||
else:
|
||||
self.ctrl_reg &= ~AD9834_SLEEP1
|
||||
|
||||
self.write(self.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def awake(self):
|
||||
"""
|
||||
Exit sleep mode and restore normal operation.
|
||||
|
||||
This method brings the AD9834 out of sleep mode by clearing any DAC power-down or
|
||||
internal clock disable settings. It calls :meth:`sleep()` with no arguments,
|
||||
effectively setting both ``dac_powerdown`` and ``internal_clk_disable`` to ``False``.
|
||||
|
||||
The device will resume generating output based on the current frequency and phase
|
||||
settings.
|
||||
"""
|
||||
self.sleep()
|
||||
|
||||
@kernel
|
||||
def config_sign_bit_out(
|
||||
self,
|
||||
high_z: bool = False,
|
||||
msb_2: bool = False,
|
||||
msb: bool = False,
|
||||
comp_out: bool = False,
|
||||
):
|
||||
"""
|
||||
Configure the ``SIGN BIT OUT`` pin for various output modes.
|
||||
|
||||
This method sets the output mode for the ``SIGN BIT OUT`` pin of the AD9834 based on the provided flags.
|
||||
The user can enable one of several modes, including high impedance, MSB/2 output, MSB output,
|
||||
or comparator output. These modes are mutually exclusive, and passing ``True`` to one flag will
|
||||
configure the corresponding mode, while other flags should be left as ``False``.
|
||||
|
||||
:param high_z: Set to ``True`` to place the ``SIGN BIT OUT`` pin in high impedance (disabled) mode.
|
||||
:param msb_2: Set to ``True`` to output DAC Data MSB divided by 2 on the ``SIGN BIT OUT`` pin.
|
||||
:param msb: Set to ``True`` to output DAC Data MSB on the ``SIGN BIT OUT`` pin.
|
||||
:param comp_out: Set to ``True`` to output the comparator signal on the ``SIGN BIT OUT`` pin.
|
||||
|
||||
Only one flag should be set to ``True`` at a time. If no valid mode is selected, the ``SIGN BIT OUT``
|
||||
pin will default to high impedance mode.
|
||||
|
||||
The method updates the control register with the appropriate configuration and writes it to the AD9834.
|
||||
"""
|
||||
if high_z:
|
||||
self.ctrl_reg &= ~AD9834_OPBITEN
|
||||
elif msb_2:
|
||||
self.ctrl_reg |= AD9834_OPBITEN
|
||||
self.ctrl_reg &= ~AD9834_MODE
|
||||
self.ctrl_reg &= ~AD9834_SIGN_PIB
|
||||
self.ctrl_reg &= ~AD9834_DIV2
|
||||
elif msb:
|
||||
self.ctrl_reg |= AD9834_OPBITEN
|
||||
self.ctrl_reg &= ~AD9834_MODE
|
||||
self.ctrl_reg &= ~AD9834_SIGN_PIB
|
||||
self.ctrl_reg |= AD9834_DIV2
|
||||
elif comp_out:
|
||||
self.ctrl_reg |= AD9834_OPBITEN
|
||||
self.ctrl_reg &= ~AD9834_MODE
|
||||
self.ctrl_reg |= AD9834_SIGN_PIB
|
||||
self.ctrl_reg |= AD9834_DIV2
|
||||
else:
|
||||
self.ctrl_reg &= ~AD9834_OPBITEN
|
||||
|
||||
self.write(self.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def enable_triangular_waveform(self):
|
||||
"""
|
||||
Enable triangular waveform generation.
|
||||
|
||||
This method configures the AD9834 to output a triangular waveform. It does so
|
||||
by clearing :const:`AD9834_OPBITEN` in the control register and setting :const:`AD9834_MODE`.
|
||||
Once this method is called, the AD9834 will begin generating a triangular waveform
|
||||
at the frequency set for the selected frequency register.
|
||||
|
||||
This method should be called when a triangular waveform is desired for signal
|
||||
generation. Ensure that the frequency is set appropriately before invoking this method.
|
||||
"""
|
||||
self.ctrl_reg &= ~AD9834_OPBITEN
|
||||
self.ctrl_reg |= AD9834_MODE
|
||||
self.write(self.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def disable_triangular_waveform(self):
|
||||
"""
|
||||
Disable triangular waveform generation.
|
||||
|
||||
This method disables the triangular waveform output by clearing :const:`AD9834_MODE`.
|
||||
After invoking this method, the AD9834 will cease generating a triangular waveform.
|
||||
The device can then be configured to output other waveform types if needed.
|
||||
|
||||
This method should be called when switching to a different waveform type or
|
||||
when the triangular waveform is no longer required.
|
||||
"""
|
||||
self.ctrl_reg &= ~AD9834_MODE
|
||||
self.write(self.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def write(self, data: TInt32):
|
||||
"""
|
||||
Write a 16-bit word to the AD9834.
|
||||
|
||||
This method sends a 16-bit data word to the AD9834 via the SPI bus. The input
|
||||
data is left-shifted by 16 bits to ensure proper alignment for the SPI controller,
|
||||
allowing for accurate processing of the command by the AD9834.
|
||||
|
||||
This method is used internally by other methods to update the control registers
|
||||
and frequency settings of the AD9834. It should not be called directly unless
|
||||
low-level register manipulation is required.
|
||||
|
||||
:param data: The 16-bit word to be sent to the AD9834.
|
||||
"""
|
||||
self.bus.write(data << 16)
|
@ -220,7 +220,7 @@ class AD9910:
|
||||
when changing frequency or phase. The DDS phase is the sum of the
|
||||
phase accumulator and the phase offset. The only discontinuous
|
||||
changes in the DDS output phase come from changes to the phase
|
||||
offset. This mode is also knows as "relative phase mode".
|
||||
offset. This mode is also known as "relative phase mode".
|
||||
:math:`\phi(t) = q(t^\prime) + p + (t - t^\prime) f`
|
||||
|
||||
* :const:`PHASE_MODE_ABSOLUTE`: the phase accumulator is reset when
|
||||
|
@ -1,5 +1,7 @@
|
||||
from enum import Enum
|
||||
import binascii
|
||||
import logging
|
||||
import io
|
||||
import struct
|
||||
|
||||
from sipyco.keepalive import create_connection
|
||||
@ -23,6 +25,8 @@ class Request(Enum):
|
||||
|
||||
DebugAllocator = 8
|
||||
|
||||
Flash = 9
|
||||
|
||||
|
||||
class Reply(Enum):
|
||||
Success = 1
|
||||
@ -46,15 +50,17 @@ class LogLevel(Enum):
|
||||
|
||||
|
||||
class CommMgmt:
|
||||
def __init__(self, host, port=1380):
|
||||
def __init__(self, host, port=1380, drtio_dest=0):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.drtio_dest = drtio_dest
|
||||
|
||||
def open(self):
|
||||
if hasattr(self, "socket"):
|
||||
return
|
||||
self.socket = create_connection(self.host, self.port)
|
||||
self.socket.sendall(b"ARTIQ management\n")
|
||||
self._write_int8(self.drtio_dest)
|
||||
endian = self._read(1)
|
||||
if endian == b"e":
|
||||
self.endian = "<"
|
||||
@ -194,3 +200,22 @@ class CommMgmt:
|
||||
|
||||
def debug_allocator(self):
|
||||
self._write_header(Request.DebugAllocator)
|
||||
|
||||
def flash(self, bin_paths):
|
||||
self._write_header(Request.Flash)
|
||||
|
||||
with io.BytesIO() as image_buf:
|
||||
for filename in bin_paths:
|
||||
with open(filename, "rb") as fi:
|
||||
bin_ = fi.read()
|
||||
if (len(bin_paths) > 1):
|
||||
image_buf.write(
|
||||
struct.pack(self.endian + "I", len(bin_)))
|
||||
image_buf.write(bin_)
|
||||
|
||||
crc = binascii.crc32(image_buf.getvalue())
|
||||
image_buf.write(struct.pack(self.endian + "I", crc))
|
||||
|
||||
self._write_bytes(image_buf.getvalue())
|
||||
|
||||
self._read_expect(Reply.RebootImminent)
|
||||
|
@ -55,6 +55,8 @@ class Core:
|
||||
(optional).
|
||||
:param analyze_at_run_end: automatically trigger the core device analyzer
|
||||
proxy after the Experiment's run stage finishes.
|
||||
:param report_invariants: report variables which are not changed inside
|
||||
kernels and are thus candidates for KernelInvariant annotation
|
||||
"""
|
||||
ref_period: KernelInvariant[float]
|
||||
ref_multiplier: KernelInvariant[int32]
|
||||
@ -64,7 +66,8 @@ class Core:
|
||||
host, ref_period,
|
||||
analyzer_proxy=None, analyze_at_run_end=False,
|
||||
ref_multiplier=8,
|
||||
target="rv32g", satellite_cpu_targets={}):
|
||||
target="rv32g", satellite_cpu_targets={},
|
||||
report_invariants=False):
|
||||
self.ref_period = ref_period
|
||||
self.ref_multiplier = ref_multiplier
|
||||
|
||||
@ -86,6 +89,7 @@ class Core:
|
||||
self.analyzer_proxy_name = analyzer_proxy
|
||||
self.analyze_at_run_end = analyze_at_run_end
|
||||
self.analyzer_proxy = None
|
||||
self.report_invariants = report_invariants
|
||||
|
||||
def notify_run_end(self):
|
||||
if self.analyze_at_run_end:
|
||||
@ -116,6 +120,7 @@ class Core:
|
||||
obj = method
|
||||
name = ""
|
||||
|
||||
# NAC3TODO: handle self.report_invariants
|
||||
if file_output is None:
|
||||
return self.compiler.compile_method_to_mem(obj, name, args, embedding_map)
|
||||
else:
|
||||
|
@ -508,7 +508,7 @@ class Channel:
|
||||
def get_y_mu(self, profile: int32) -> int32:
|
||||
"""Get a profile's IIR state (filter output, Y0) in machine units.
|
||||
|
||||
The IIR state is also know as the "integrator", or the DDS amplitude
|
||||
The IIR state is also known as the "integrator", or the DDS amplitude
|
||||
scale factor. It is 17 bits wide and unsigned.
|
||||
|
||||
This method does not advance the timeline but consumes all slack.
|
||||
@ -526,7 +526,7 @@ class Channel:
|
||||
def get_y(self, profile: int32) -> float:
|
||||
"""Get a profile's IIR state (filter output, Y0).
|
||||
|
||||
The IIR state is also know as the "integrator", or the DDS amplitude
|
||||
The IIR state is also known as the "integrator", or the DDS amplitude
|
||||
scale factor. It is 17 bits wide and unsigned.
|
||||
|
||||
This method does not advance the timeline but consumes all slack.
|
||||
@ -544,7 +544,7 @@ class Channel:
|
||||
def set_y_mu(self, profile: int32, y: int32):
|
||||
"""Set a profile's IIR state (filter output, Y0) in machine units.
|
||||
|
||||
The IIR state is also know as the "integrator", or the DDS amplitude
|
||||
The IIR state is also known as the "integrator", or the DDS amplitude
|
||||
scale factor. It is 17 bits wide and unsigned.
|
||||
|
||||
This method must not be used when the servo could be writing to the
|
||||
@ -564,7 +564,7 @@ class Channel:
|
||||
def set_y(self, profile: int32, y: float) -> int32:
|
||||
"""Set a profile's IIR state (filter output, Y0).
|
||||
|
||||
The IIR state is also know as the "integrator", or the DDS amplitude
|
||||
The IIR state is also known as the "integrator", or the DDS amplitude
|
||||
scale factor. It is 17 bits wide and unsigned.
|
||||
|
||||
This method must not be used when the servo could be writing to the
|
||||
|
@ -28,6 +28,7 @@ class _ArgumentEditor(EntryTreeWidget):
|
||||
def __init__(self, manager, dock, expurl):
|
||||
self.manager = manager
|
||||
self.expurl = expurl
|
||||
self.dock = dock
|
||||
|
||||
EntryTreeWidget.__init__(self)
|
||||
|
||||
@ -78,6 +79,18 @@ class _ArgumentEditor(EntryTreeWidget):
|
||||
argument["desc"] = procdesc
|
||||
argument["state"] = state
|
||||
self.update_argument(name, argument)
|
||||
self.dock.apply_colors()
|
||||
|
||||
def apply_color(self, palette, color):
|
||||
self.setPalette(palette)
|
||||
for child in self.findChildren(QtWidgets.QWidget):
|
||||
child.setPalette(palette)
|
||||
child.setAutoFillBackground(True)
|
||||
items = self.findItems("*",
|
||||
QtCore.Qt.MatchFlag.MatchWildcard | QtCore.Qt.MatchFlag.MatchRecursive)
|
||||
for item in items:
|
||||
for column in range(item.columnCount()):
|
||||
item.setBackground(column, QtGui.QColor(color))
|
||||
|
||||
# Hooks that allow user-supplied argument editors to react to imminent user
|
||||
# actions. Here, we always keep the manager-stored submission arguments
|
||||
@ -92,6 +105,19 @@ class _ArgumentEditor(EntryTreeWidget):
|
||||
log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
||||
|
||||
|
||||
class _ColoredTitleBar(QtWidgets.QProxyStyle):
|
||||
def __init__(self, color, style=None):
|
||||
super().__init__(style)
|
||||
self.color = color
|
||||
|
||||
def drawComplexControl(self, control, option, painter, widget=None):
|
||||
if control == QtWidgets.QStyle.ComplexControl.CC_TitleBar:
|
||||
option = QtWidgets.QStyleOptionTitleBar(option)
|
||||
option.palette.setColor(QtGui.QPalette.ColorRole.Window, QtGui.QColor(self.color))
|
||||
option.palette.setColor(QtGui.QPalette.ColorRole.Highlight, QtGui.QColor(self.color))
|
||||
self.baseStyle().drawComplexControl(control, option, painter, widget)
|
||||
|
||||
|
||||
class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||
sigClosed = QtCore.pyqtSignal()
|
||||
|
||||
@ -187,6 +213,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||
devarg_override.lineEdit().setPlaceholderText("Override device arguments")
|
||||
devarg_override.lineEdit().setClearButtonEnabled(True)
|
||||
devarg_override.insertItem(0, "core:analyze_at_run_end=True")
|
||||
devarg_override.insertItem(1, "core:report_invariants=True")
|
||||
self.layout.addWidget(devarg_override, 2, 3)
|
||||
|
||||
devarg_override.setCurrentText(options["devarg_override"])
|
||||
@ -302,14 +329,61 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||
self.argeditor = editor_class(self.manager, self, self.expurl)
|
||||
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
||||
self.argeditor.restore_state(argeditor_state)
|
||||
self.apply_colors()
|
||||
|
||||
def contextMenuEvent(self, event):
|
||||
menu = QtWidgets.QMenu(self)
|
||||
select_title_bar_color = menu.addAction("Select title bar color")
|
||||
select_window_color = menu.addAction("Select window color")
|
||||
reset_colors = menu.addAction("Reset to default colors")
|
||||
menu.addSeparator()
|
||||
reset_sched = menu.addAction("Reset scheduler settings")
|
||||
action = menu.exec(self.mapToGlobal(event.pos()))
|
||||
if action == reset_sched:
|
||||
if action == select_title_bar_color:
|
||||
self.select_color("title_bar")
|
||||
elif action == select_window_color:
|
||||
self.select_color("window")
|
||||
elif action == reset_colors:
|
||||
self.reset_colors()
|
||||
elif action == reset_sched:
|
||||
asyncio.ensure_future(self._recompute_sched_options_task())
|
||||
|
||||
def select_color(self, key):
|
||||
color = QtWidgets.QColorDialog.getColor(
|
||||
title=f"Select {key.replace('_', ' ').title()} color")
|
||||
if color.isValid():
|
||||
self.manager.set_color(self.expurl, key, color.name())
|
||||
self.apply_colors()
|
||||
|
||||
def apply_colors(self):
|
||||
colors = self.manager.get_colors(self.expurl)
|
||||
if colors is None:
|
||||
palette = QtWidgets.QApplication.palette()
|
||||
colors = {
|
||||
"window": palette.color(QtGui.QPalette.ColorRole.Window).name(),
|
||||
"title_bar": palette.color(QtGui.QPalette.ColorRole.Highlight).name(),
|
||||
}
|
||||
self.manager.colors[self.expurl] = colors
|
||||
colors["window_text"] = "#000000" if QtGui.QColor(
|
||||
colors["window"]).lightness() > 128 else "#FFFFFF"
|
||||
self.modify_palette(colors)
|
||||
self.setStyle(_ColoredTitleBar(colors["title_bar"]))
|
||||
self.argeditor.apply_color(self.palette(), (colors["window"]))
|
||||
|
||||
def modify_palette(self, colors):
|
||||
palette = self.palette()
|
||||
palette.setColor(QtGui.QPalette.ColorRole.Window, QtGui.QColor(colors["window"]))
|
||||
palette.setColor(QtGui.QPalette.ColorRole.Base, QtGui.QColor(colors["window"]))
|
||||
palette.setColor(QtGui.QPalette.ColorRole.Button, QtGui.QColor(colors["window"]))
|
||||
palette.setColor(QtGui.QPalette.ColorRole.Text, QtGui.QColor(colors["window_text"]))
|
||||
palette.setColor(QtGui.QPalette.ColorRole.ButtonText, QtGui.QColor(colors["window_text"]))
|
||||
palette.setColor(QtGui.QPalette.ColorRole.WindowText, QtGui.QColor(colors["window_text"]))
|
||||
self.setPalette(palette)
|
||||
|
||||
def reset_colors(self):
|
||||
self.manager.reset_colors(self.expurl)
|
||||
self.apply_colors()
|
||||
|
||||
async def _recompute_sched_options_task(self):
|
||||
try:
|
||||
expdesc, _ = await self.manager.compute_expdesc(self.expurl)
|
||||
@ -456,6 +530,7 @@ class ExperimentManager:
|
||||
self.submission_options = dict()
|
||||
self.submission_arguments = dict()
|
||||
self.argument_ui_names = dict()
|
||||
self.colors = dict()
|
||||
|
||||
self.datasets = dict()
|
||||
dataset_sub.add_setmodel_callback(self.set_dataset_model)
|
||||
@ -482,6 +557,18 @@ class ExperimentManager:
|
||||
def set_schedule_model(self, model):
|
||||
self.schedule = model.backing_store
|
||||
|
||||
def set_color(self, expurl, key, value):
|
||||
if expurl not in self.colors:
|
||||
self.colors[expurl] = {}
|
||||
self.colors[expurl][key] = value
|
||||
|
||||
def get_colors(self, expurl):
|
||||
return self.colors.get(expurl)
|
||||
|
||||
def reset_colors(self, expurl):
|
||||
if expurl in self.colors:
|
||||
del self.colors[expurl]
|
||||
|
||||
def resolve_expurl(self, expurl):
|
||||
if expurl[:5] == "repo:":
|
||||
expinfo = self.explist[expurl[5:]]
|
||||
@ -591,6 +678,7 @@ class ExperimentManager:
|
||||
self.open_experiments[expurl] = dock
|
||||
dock.setAttribute(QtCore.Qt.WidgetAttribute.WA_DeleteOnClose)
|
||||
self.main_window.centralWidget().addSubWindow(dock)
|
||||
dock.apply_colors()
|
||||
dock.show()
|
||||
dock.sigClosed.connect(partial(self.on_dock_closed, expurl))
|
||||
if expurl in self.dock_states:
|
||||
@ -707,7 +795,8 @@ class ExperimentManager:
|
||||
"arguments": self.submission_arguments,
|
||||
"docks": self.dock_states,
|
||||
"argument_uis": self.argument_ui_names,
|
||||
"open_docks": set(self.open_experiments.keys())
|
||||
"open_docks": set(self.open_experiments.keys()),
|
||||
"colors": self.colors
|
||||
}
|
||||
|
||||
def restore_state(self, state):
|
||||
@ -718,6 +807,7 @@ class ExperimentManager:
|
||||
self.submission_options = state["options"]
|
||||
self.submission_arguments = state["arguments"]
|
||||
self.argument_ui_names = state.get("argument_uis", {})
|
||||
self.colors = state.get("colors", {})
|
||||
for expurl in state["open_docks"]:
|
||||
self.open_experiment(expurl)
|
||||
|
||||
|
@ -847,7 +847,7 @@ class _MonInjDock(QDockWidgetCloseDetect):
|
||||
delete_action = QtGui.QAction("Delete widget", menu)
|
||||
delete_action.triggered.connect(partial(self.delete_widget, index))
|
||||
menu.addAction(delete_action)
|
||||
menu.exec_(self.flow.mapToGlobal(pos))
|
||||
menu.exec(self.flow.mapToGlobal(pos))
|
||||
|
||||
def delete_all_widgets(self):
|
||||
for index in reversed(range(self.flow.count())):
|
||||
|
@ -6,6 +6,7 @@ import logging
|
||||
from PyQt6 import QtCore, QtWidgets, QtGui
|
||||
|
||||
from artiq.gui.models import DictSyncModel
|
||||
from artiq.gui.tools import SelectableColumnTableView
|
||||
from artiq.tools import elide
|
||||
|
||||
|
||||
@ -66,7 +67,7 @@ class ScheduleDock(QtWidgets.QDockWidget):
|
||||
|
||||
self.schedule_ctl = schedule_ctl
|
||||
|
||||
self.table = QtWidgets.QTableView()
|
||||
self.table = SelectableColumnTableView()
|
||||
self.table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectionBehavior.SelectRows)
|
||||
self.table.setSelectionMode(QtWidgets.QAbstractItemView.SelectionMode.SingleSelection)
|
||||
self.table.verticalHeader().setSectionResizeMode(
|
||||
@ -104,6 +105,9 @@ class ScheduleDock(QtWidgets.QDockWidget):
|
||||
h.resizeSection(6, 20 * cw)
|
||||
h.resizeSection(7, 20 * cw)
|
||||
|
||||
# Allow user to reorder or disable columns.
|
||||
h.setSectionsMovable(True)
|
||||
|
||||
def set_model(self, model):
|
||||
self.table_model = model
|
||||
self.table.setModel(self.table_model)
|
||||
@ -154,4 +158,9 @@ class ScheduleDock(QtWidgets.QDockWidget):
|
||||
return bytes(self.table.horizontalHeader().saveState())
|
||||
|
||||
def restore_state(self, state):
|
||||
self.table.horizontalHeader().restoreState(QtCore.QByteArray(state))
|
||||
h = self.table.horizontalHeader()
|
||||
h.restoreState(QtCore.QByteArray(state))
|
||||
|
||||
# The state includes the sectionsMovable property, so set it again to be able to
|
||||
# deal with pre-existing save files from when we used not to enable it.
|
||||
h.setSectionsMovable(True)
|
||||
|
4
artiq/firmware/Cargo.lock
generated
4
artiq/firmware/Cargo.lock
generated
@ -513,6 +513,7 @@ dependencies = [
|
||||
"board_misoc",
|
||||
"build_misoc",
|
||||
"byteorder",
|
||||
"crc",
|
||||
"cslice",
|
||||
"dyld",
|
||||
"eh",
|
||||
@ -553,10 +554,13 @@ dependencies = [
|
||||
"board_artiq",
|
||||
"board_misoc",
|
||||
"build_misoc",
|
||||
"byteorder",
|
||||
"crc",
|
||||
"cslice",
|
||||
"eh",
|
||||
"io",
|
||||
"log",
|
||||
"logger_artiq",
|
||||
"proto_artiq",
|
||||
"riscv",
|
||||
]
|
||||
|
@ -37,6 +37,14 @@ fn recv<R, F: FnOnce(&Message) -> R>(f: F) -> R {
|
||||
result
|
||||
}
|
||||
|
||||
fn try_recv<F: FnOnce(&Message)>(f: F) {
|
||||
let msg_ptr = mailbox::receive();
|
||||
if msg_ptr != 0 {
|
||||
f(unsafe { &*(msg_ptr as *const Message) });
|
||||
mailbox::acknowledge();
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! recv {
|
||||
($p:pat => $e:expr) => {
|
||||
recv(move |request| {
|
||||
@ -473,7 +481,15 @@ extern "C-unwind" fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) {
|
||||
|
||||
|
||||
extern "C-unwind" fn subkernel_load_run(id: u32, destination: u8, run: bool) {
|
||||
send(&SubkernelLoadRunRequest { id: id, destination: destination, run: run });
|
||||
let timestamp = unsafe {
|
||||
((csr::rtio::now_hi_read() as u64) << 32) | (csr::rtio::now_lo_read() as u64)
|
||||
};
|
||||
send(&SubkernelLoadRunRequest {
|
||||
id: id,
|
||||
destination: destination,
|
||||
run: run,
|
||||
timestamp: timestamp,
|
||||
});
|
||||
recv!(&SubkernelLoadRunReply { succeeded } => {
|
||||
if !succeeded {
|
||||
raise!("SubkernelError",
|
||||
@ -601,6 +617,15 @@ pub unsafe fn main() {
|
||||
},
|
||||
Ok(library) => {
|
||||
send(&LoadReply(Ok(())));
|
||||
// Master kernel would just acknowledge kernel load
|
||||
// Satellites may send UpdateNow
|
||||
try_recv(move |msg| match msg {
|
||||
UpdateNow(timestamp) => unsafe {
|
||||
csr::rtio::now_hi_write((*timestamp >> 32) as u32);
|
||||
csr::rtio::now_lo_write(*timestamp as u32);
|
||||
}
|
||||
_ => unreachable!()
|
||||
});
|
||||
library
|
||||
}
|
||||
}
|
||||
|
@ -187,6 +187,24 @@ unsafe fn align_comma() {
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn align_wordslip(trx_no: u8) -> bool {
|
||||
csr::eem_transceiver::transceiver_sel_write(trx_no);
|
||||
|
||||
for slip in 0..=1 {
|
||||
csr::eem_transceiver::wordslip_write(slip as u8);
|
||||
clock::spin_us(1);
|
||||
csr::eem_transceiver::comma_align_reset_write(1);
|
||||
clock::spin_us(100);
|
||||
|
||||
if csr::eem_transceiver::comma_read() == 1 {
|
||||
debug!("comma alignment completed with {} wordslip", slip);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn init() {
|
||||
for trx_no in 0..csr::CONFIG_EEM_DRTIO_COUNT {
|
||||
unsafe {
|
||||
@ -211,9 +229,6 @@ pub fn init() {
|
||||
}
|
||||
});
|
||||
|
||||
unsafe {
|
||||
align_comma();
|
||||
csr::eem_transceiver::rx_ready_write(1);
|
||||
}
|
||||
unsafe { align_comma(); }
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +114,17 @@ pub unsafe fn write(mut addr: usize, mut data: &[u8]) {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(soc_platform = "kasli", soc_platform = "kc705"))]
|
||||
pub unsafe fn flash_binary(origin: usize, payload: &[u8]) {
|
||||
assert!((origin & (SECTOR_SIZE - 1)) == 0);
|
||||
let mut offset = 0;
|
||||
while offset < payload.len() {
|
||||
erase_sector(origin + offset);
|
||||
offset += SECTOR_SIZE;
|
||||
}
|
||||
write(origin, payload);
|
||||
}
|
||||
|
||||
#[cfg(any(soc_platform = "kasli", soc_platform = "kc705", soc_platform = "efc"))]
|
||||
pub unsafe fn reload () -> ! {
|
||||
csr::icap::iprog_write(1);
|
||||
loop {}
|
||||
|
@ -120,13 +120,32 @@ pub enum Packet {
|
||||
|
||||
SubkernelAddDataRequest { destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||
SubkernelAddDataReply { succeeded: bool },
|
||||
SubkernelLoadRunRequest { source: u8, destination: u8, id: u32, run: bool },
|
||||
SubkernelLoadRunRequest { source: u8, destination: u8, id: u32, run: bool, timestamp: u64 },
|
||||
SubkernelLoadRunReply { destination: u8, succeeded: bool },
|
||||
SubkernelFinished { destination: u8, id: u32, with_exception: bool, exception_src: u8 },
|
||||
SubkernelExceptionRequest { source: u8, destination: u8 },
|
||||
SubkernelException { destination: u8, last: bool, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||
SubkernelMessage { source: u8, destination: u8, id: u32, status: PayloadStatus, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||
SubkernelMessageAck { destination: u8 },
|
||||
|
||||
CoreMgmtGetLogRequest { destination: u8, clear: bool },
|
||||
CoreMgmtClearLogRequest { destination: u8 },
|
||||
CoreMgmtSetLogLevelRequest { destination: u8, log_level: u8 },
|
||||
CoreMgmtSetUartLogLevelRequest { destination: u8, log_level: u8 },
|
||||
CoreMgmtConfigReadRequest { destination: u8, length: u16, key: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||
CoreMgmtConfigReadContinue { destination: u8 },
|
||||
CoreMgmtConfigWriteRequest { destination: u8, last: bool, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||
CoreMgmtConfigRemoveRequest { destination: u8, length: u16, key: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||
CoreMgmtConfigEraseRequest { destination: u8 },
|
||||
CoreMgmtRebootRequest { destination: u8 },
|
||||
CoreMgmtAllocatorDebugRequest { destination: u8 },
|
||||
CoreMgmtFlashRequest { destination: u8, payload_length: u32 },
|
||||
CoreMgmtFlashAddDataRequest { destination: u8, last: bool, length: u16, data: [u8; MASTER_PAYLOAD_MAX_SIZE] },
|
||||
CoreMgmtDropLinkAck { destination: u8 },
|
||||
CoreMgmtDropLink,
|
||||
CoreMgmtGetLogReply { last: bool, length: u16, data: [u8; SAT_PAYLOAD_MAX_SIZE] },
|
||||
CoreMgmtConfigReadReply { last: bool, length: u16, value: [u8; SAT_PAYLOAD_MAX_SIZE] },
|
||||
CoreMgmtReply { succeeded: bool },
|
||||
}
|
||||
|
||||
impl Packet {
|
||||
@ -354,7 +373,8 @@ impl Packet {
|
||||
source: reader.read_u8()?,
|
||||
destination: reader.read_u8()?,
|
||||
id: reader.read_u32()?,
|
||||
run: reader.read_bool()?
|
||||
run: reader.read_bool()?,
|
||||
timestamp: reader.read_u64()?
|
||||
},
|
||||
0xc5 => Packet::SubkernelLoadRunReply {
|
||||
destination: reader.read_u8()?,
|
||||
@ -404,6 +424,115 @@ impl Packet {
|
||||
destination: reader.read_u8()?
|
||||
},
|
||||
|
||||
0xd0 => Packet::CoreMgmtGetLogRequest {
|
||||
destination: reader.read_u8()?,
|
||||
clear: reader.read_bool()?,
|
||||
},
|
||||
0xd1 => Packet::CoreMgmtClearLogRequest {
|
||||
destination: reader.read_u8()?,
|
||||
},
|
||||
0xd2 => Packet::CoreMgmtSetLogLevelRequest {
|
||||
destination: reader.read_u8()?,
|
||||
log_level: reader.read_u8()?,
|
||||
},
|
||||
0xd3 => Packet::CoreMgmtSetUartLogLevelRequest {
|
||||
destination: reader.read_u8()?,
|
||||
log_level: reader.read_u8()?,
|
||||
},
|
||||
0xd4 => {
|
||||
let destination = reader.read_u8()?;
|
||||
let length = reader.read_u16()?;
|
||||
let mut key: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||
reader.read_exact(&mut key[0..length as usize])?;
|
||||
Packet::CoreMgmtConfigReadRequest {
|
||||
destination: destination,
|
||||
length: length,
|
||||
key: key,
|
||||
}
|
||||
},
|
||||
0xd5 => Packet::CoreMgmtConfigReadContinue {
|
||||
destination: reader.read_u8()?,
|
||||
},
|
||||
0xd6 => {
|
||||
let destination = reader.read_u8()?;
|
||||
let last = reader.read_bool()?;
|
||||
let length = reader.read_u16()?;
|
||||
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||
reader.read_exact(&mut data[0..length as usize])?;
|
||||
Packet::CoreMgmtConfigWriteRequest {
|
||||
destination: destination,
|
||||
last: last,
|
||||
length: length,
|
||||
data: data,
|
||||
}
|
||||
},
|
||||
0xd7 => {
|
||||
let destination = reader.read_u8()?;
|
||||
let length = reader.read_u16()?;
|
||||
let mut key: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||
reader.read_exact(&mut key[0..length as usize])?;
|
||||
Packet::CoreMgmtConfigRemoveRequest {
|
||||
destination: destination,
|
||||
length: length,
|
||||
key: key,
|
||||
}
|
||||
},
|
||||
0xd8 => Packet::CoreMgmtConfigEraseRequest {
|
||||
destination: reader.read_u8()?,
|
||||
},
|
||||
0xd9 => Packet::CoreMgmtRebootRequest {
|
||||
destination: reader.read_u8()?,
|
||||
},
|
||||
0xda => Packet::CoreMgmtAllocatorDebugRequest {
|
||||
destination: reader.read_u8()?,
|
||||
},
|
||||
0xdb => Packet::CoreMgmtFlashRequest {
|
||||
destination: reader.read_u8()?,
|
||||
payload_length: reader.read_u32()?,
|
||||
},
|
||||
0xdc => {
|
||||
let destination = reader.read_u8()?;
|
||||
let last = reader.read_bool()?;
|
||||
let length = reader.read_u16()?;
|
||||
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||
reader.read_exact(&mut data[0..length as usize])?;
|
||||
Packet::CoreMgmtFlashAddDataRequest {
|
||||
destination: destination,
|
||||
last: last,
|
||||
length: length,
|
||||
data: data,
|
||||
}
|
||||
},
|
||||
0xdd => Packet::CoreMgmtDropLinkAck {
|
||||
destination: reader.read_u8()?,
|
||||
},
|
||||
0xde => Packet::CoreMgmtDropLink,
|
||||
0xdf => {
|
||||
let last = reader.read_bool()?;
|
||||
let length = reader.read_u16()?;
|
||||
let mut data: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||
reader.read_exact(&mut data[0..length as usize])?;
|
||||
Packet::CoreMgmtGetLogReply {
|
||||
last: last,
|
||||
length: length,
|
||||
data: data,
|
||||
}
|
||||
},
|
||||
0xe0 => {
|
||||
let last = reader.read_bool()?;
|
||||
let length = reader.read_u16()?;
|
||||
let mut value: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||
reader.read_exact(&mut value[0..length as usize])?;
|
||||
Packet::CoreMgmtConfigReadReply {
|
||||
last: last,
|
||||
length: length,
|
||||
value: value,
|
||||
}
|
||||
},
|
||||
0xe1 => Packet::CoreMgmtReply {
|
||||
succeeded: reader.read_bool()?,
|
||||
},
|
||||
|
||||
ty => return Err(Error::UnknownPacket(ty))
|
||||
})
|
||||
}
|
||||
@ -647,12 +776,13 @@ impl Packet {
|
||||
writer.write_u8(0xc1)?;
|
||||
writer.write_bool(succeeded)?;
|
||||
},
|
||||
Packet::SubkernelLoadRunRequest { source, destination, id, run } => {
|
||||
Packet::SubkernelLoadRunRequest { source, destination, id, run, timestamp } => {
|
||||
writer.write_u8(0xc4)?;
|
||||
writer.write_u8(source)?;
|
||||
writer.write_u8(destination)?;
|
||||
writer.write_u32(id)?;
|
||||
writer.write_bool(run)?;
|
||||
writer.write_u64(timestamp)?;
|
||||
},
|
||||
Packet::SubkernelLoadRunReply { destination, succeeded } => {
|
||||
writer.write_u8(0xc5)?;
|
||||
@ -691,6 +821,108 @@ impl Packet {
|
||||
writer.write_u8(0xcc)?;
|
||||
writer.write_u8(destination)?;
|
||||
},
|
||||
|
||||
Packet::CoreMgmtGetLogRequest { destination, clear } => {
|
||||
writer.write_u8(0xd0)?;
|
||||
writer.write_u8(destination)?;
|
||||
writer.write_bool(clear)?;
|
||||
},
|
||||
Packet::CoreMgmtClearLogRequest { destination } => {
|
||||
writer.write_u8(0xd1)?;
|
||||
writer.write_u8(destination)?;
|
||||
},
|
||||
Packet::CoreMgmtSetLogLevelRequest { destination, log_level } => {
|
||||
writer.write_u8(0xd2)?;
|
||||
writer.write_u8(destination)?;
|
||||
writer.write_u8(log_level)?;
|
||||
},
|
||||
Packet::CoreMgmtSetUartLogLevelRequest { destination, log_level } => {
|
||||
writer.write_u8(0xd3)?;
|
||||
writer.write_u8(destination)?;
|
||||
writer.write_u8(log_level)?;
|
||||
},
|
||||
Packet::CoreMgmtConfigReadRequest {
|
||||
destination,
|
||||
length,
|
||||
key,
|
||||
} => {
|
||||
writer.write_u8(0xd4)?;
|
||||
writer.write_u8(destination)?;
|
||||
writer.write_u16(length)?;
|
||||
writer.write_all(&key[0..length as usize])?;
|
||||
},
|
||||
Packet::CoreMgmtConfigReadContinue { destination } => {
|
||||
writer.write_u8(0xd5)?;
|
||||
writer.write_u8(destination)?;
|
||||
},
|
||||
Packet::CoreMgmtConfigWriteRequest {
|
||||
destination,
|
||||
last,
|
||||
length,
|
||||
data,
|
||||
} => {
|
||||
writer.write_u8(0xd6)?;
|
||||
writer.write_u8(destination)?;
|
||||
writer.write_bool(last)?;
|
||||
writer.write_u16(length)?;
|
||||
writer.write_all(&data[0..length as usize])?;
|
||||
},
|
||||
Packet::CoreMgmtConfigRemoveRequest {
|
||||
destination,
|
||||
length,
|
||||
key,
|
||||
} => {
|
||||
writer.write_u8(0xd7)?;
|
||||
writer.write_u8(destination)?;
|
||||
writer.write_u16(length)?;
|
||||
writer.write_all(&key[0..length as usize])?;
|
||||
},
|
||||
Packet::CoreMgmtConfigEraseRequest { destination } => {
|
||||
writer.write_u8(0xd8)?;
|
||||
writer.write_u8(destination)?;
|
||||
},
|
||||
Packet::CoreMgmtRebootRequest { destination } => {
|
||||
writer.write_u8(0xd9)?;
|
||||
writer.write_u8(destination)?;
|
||||
},
|
||||
Packet::CoreMgmtAllocatorDebugRequest { destination } => {
|
||||
writer.write_u8(0xda)?;
|
||||
writer.write_u8(destination)?;
|
||||
},
|
||||
Packet::CoreMgmtFlashRequest { destination, payload_length } => {
|
||||
writer.write_u8(0xdb)?;
|
||||
writer.write_u8(destination)?;
|
||||
writer.write_u32(payload_length)?;
|
||||
},
|
||||
Packet::CoreMgmtFlashAddDataRequest { destination, last, length, data } => {
|
||||
writer.write_u8(0xdc)?;
|
||||
writer.write_u8(destination)?;
|
||||
writer.write_bool(last)?;
|
||||
writer.write_u16(length)?;
|
||||
writer.write_all(&data[..length as usize])?;
|
||||
},
|
||||
Packet::CoreMgmtDropLinkAck { destination } => {
|
||||
writer.write_u8(0xdd)?;
|
||||
writer.write_u8(destination)?;
|
||||
},
|
||||
Packet::CoreMgmtDropLink =>
|
||||
writer.write_u8(0xde)?,
|
||||
Packet::CoreMgmtGetLogReply { last, length, data } => {
|
||||
writer.write_u8(0xdf)?;
|
||||
writer.write_bool(last)?;
|
||||
writer.write_u16(length)?;
|
||||
writer.write_all(&data[0..length as usize])?;
|
||||
},
|
||||
Packet::CoreMgmtConfigReadReply { last, length, value } => {
|
||||
writer.write_u8(0xe0)?;
|
||||
writer.write_bool(last)?;
|
||||
writer.write_u16(length)?;
|
||||
writer.write_all(&value[0..length as usize])?;
|
||||
},
|
||||
Packet::CoreMgmtReply { succeeded } => {
|
||||
writer.write_u8(0xe1)?;
|
||||
writer.write_bool(succeeded)?;
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -724,7 +956,8 @@ impl Packet {
|
||||
Packet::DmaAddTraceReply { .. } | Packet::DmaRemoveTraceReply { .. } |
|
||||
Packet::DmaPlaybackReply { .. } | Packet::SubkernelLoadRunReply { .. } |
|
||||
Packet::SubkernelMessageAck { .. } | Packet::DmaPlaybackStatus { .. } |
|
||||
Packet::SubkernelFinished { .. } => false,
|
||||
Packet::SubkernelFinished { .. } | Packet::CoreMgmtDropLinkAck { .. } |
|
||||
Packet::InjectionRequest { .. } => false,
|
||||
_ => true
|
||||
}
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ pub enum Message<'a> {
|
||||
SpiReadReply { succeeded: bool, data: u32 },
|
||||
SpiBasicReply { succeeded: bool },
|
||||
|
||||
SubkernelLoadRunRequest { id: u32, destination: u8, run: bool },
|
||||
SubkernelLoadRunRequest { id: u32, destination: u8, run: bool, timestamp: u64 },
|
||||
SubkernelLoadRunReply { succeeded: bool },
|
||||
SubkernelAwaitFinishRequest { id: u32, timeout: i64 },
|
||||
SubkernelAwaitFinishReply,
|
||||
@ -112,6 +112,8 @@ pub enum Message<'a> {
|
||||
SubkernelMsgRecvReply { count: u8 },
|
||||
SubkernelError(SubkernelStatus<'a>),
|
||||
|
||||
UpdateNow(u64),
|
||||
|
||||
Log(fmt::Arguments<'a>),
|
||||
LogSlice(&'a str)
|
||||
}
|
||||
|
@ -16,7 +16,9 @@ pub enum Error<T> {
|
||||
#[fail(display = "invalid UTF-8: {}", _0)]
|
||||
Utf8(Utf8Error),
|
||||
#[fail(display = "{}", _0)]
|
||||
Io(#[cause] IoError<T>)
|
||||
Io(#[cause] IoError<T>),
|
||||
#[fail(display = "drtio error")]
|
||||
DrtioError,
|
||||
}
|
||||
|
||||
impl<T> From<IoError<T>> for Error<T> {
|
||||
@ -65,6 +67,8 @@ pub enum Request {
|
||||
|
||||
Reboot,
|
||||
|
||||
Flash { image: Vec<u8> },
|
||||
|
||||
DebugAllocator,
|
||||
}
|
||||
|
||||
@ -123,6 +127,10 @@ impl Request {
|
||||
|
||||
8 => Request::DebugAllocator,
|
||||
|
||||
9 => Request::Flash {
|
||||
image: reader.read_bytes()?,
|
||||
},
|
||||
|
||||
ty => return Err(Error::UnknownPacket(ty))
|
||||
})
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ build_misoc = { path = "../libbuild_misoc" }
|
||||
failure = { version = "0.1", default-features = false }
|
||||
failure_derive = { version = "0.1", default-features = false }
|
||||
byteorder = { version = "1.0", default-features = false }
|
||||
crc = { version = "1.7", default-features = false }
|
||||
cslice = { version = "0.3" }
|
||||
log = { version = "=0.4.14", default-features = false }
|
||||
managed = { version = "^0.7.1", default-features = false, features = ["alloc", "map"] }
|
||||
|
@ -194,14 +194,15 @@ pub mod subkernel {
|
||||
}
|
||||
|
||||
pub fn load(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &RoutingTable,
|
||||
id: u32, run: bool) -> Result<(), Error> {
|
||||
id: u32, run: bool, timestamp: u64) -> Result<(), Error> {
|
||||
let _lock = subkernel_mutex.lock(io)?;
|
||||
let subkernel = unsafe { SUBKERNELS.get_mut(&id).unwrap() };
|
||||
if subkernel.state != SubkernelState::Uploaded {
|
||||
error!("for id: {} expected Uploaded, got: {:?}", id, subkernel.state);
|
||||
return Err(Error::IncorrectState);
|
||||
}
|
||||
drtio::subkernel_load(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, subkernel.destination, run)?;
|
||||
drtio::subkernel_load(io, aux_mutex, ddma_mutex, subkernel_mutex,
|
||||
routing_table, id, subkernel.destination, run, timestamp)?;
|
||||
if run {
|
||||
subkernel.state = SubkernelState::Running;
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#![feature(lang_items, panic_info_message, const_btree_new, iter_advance_by, never_type)]
|
||||
#![no_std]
|
||||
|
||||
extern crate crc;
|
||||
extern crate dyld;
|
||||
extern crate eh;
|
||||
#[macro_use]
|
||||
@ -29,9 +30,10 @@ extern crate riscv;
|
||||
extern crate tar_no_std;
|
||||
|
||||
use alloc::collections::BTreeMap;
|
||||
use core::cell::RefCell;
|
||||
use core::cell::{RefCell, Cell};
|
||||
use core::convert::TryFrom;
|
||||
use smoltcp::wire::HardwareAddress;
|
||||
use urc::Urc;
|
||||
|
||||
use board_misoc::{csr, ident, clock, spiflash, config, net_settings, pmp, boot};
|
||||
#[cfg(has_ethmac)]
|
||||
@ -196,6 +198,7 @@ fn startup() {
|
||||
|
||||
let ddma_mutex = sched::Mutex::new();
|
||||
let subkernel_mutex = sched::Mutex::new();
|
||||
let restart_idle = Urc::new(Cell::new(false));
|
||||
|
||||
let mut scheduler = sched::Scheduler::new(interface);
|
||||
let io = scheduler.io();
|
||||
@ -205,15 +208,22 @@ fn startup() {
|
||||
}
|
||||
|
||||
rtio_mgt::startup(&io, &aux_mutex, &drtio_routing_table, &up_destinations, &ddma_mutex, &subkernel_mutex);
|
||||
|
||||
io.spawn(4096, mgmt::thread);
|
||||
{
|
||||
let restart_idle = restart_idle.clone();
|
||||
let aux_mutex = aux_mutex.clone();
|
||||
let ddma_mutex = ddma_mutex.clone();
|
||||
let subkernel_mutex = subkernel_mutex.clone();
|
||||
let drtio_routing_table = drtio_routing_table.clone();
|
||||
io.spawn(4096, move |io| { mgmt::thread(io, &restart_idle, &aux_mutex, &ddma_mutex, &subkernel_mutex, &drtio_routing_table) });
|
||||
}
|
||||
{
|
||||
let aux_mutex = aux_mutex.clone();
|
||||
let drtio_routing_table = drtio_routing_table.clone();
|
||||
let up_destinations = up_destinations.clone();
|
||||
let ddma_mutex = ddma_mutex.clone();
|
||||
let subkernel_mutex = subkernel_mutex.clone();
|
||||
io.spawn(32768, move |io| { session::thread(io, &aux_mutex, &drtio_routing_table, &up_destinations, &ddma_mutex, &subkernel_mutex) });
|
||||
let restart_idle = restart_idle.clone();
|
||||
io.spawn(32768, move |io| { session::thread(io, &aux_mutex, &drtio_routing_table, &up_destinations, &ddma_mutex, &subkernel_mutex, &restart_idle) });
|
||||
}
|
||||
#[cfg(any(has_rtio_moninj, has_drtio))]
|
||||
{
|
||||
|
@ -1,10 +1,10 @@
|
||||
use log::{self, LevelFilter};
|
||||
use core::cell::{Cell, RefCell};
|
||||
|
||||
use io::{Write, ProtoWrite, Error as IoError};
|
||||
use board_misoc::{config, spiflash};
|
||||
use logger_artiq::BufferLogger;
|
||||
use board_artiq::drtio_routing::RoutingTable;
|
||||
use io::{ProtoRead, Write, Error as IoError};
|
||||
use mgmt_proto::*;
|
||||
use sched::{Io, TcpListener, TcpStream, Error as SchedError};
|
||||
use sched::{Io, Mutex, TcpListener, TcpStream, Error as SchedError};
|
||||
use urc::Urc;
|
||||
|
||||
impl From<SchedError> for Error<SchedError> {
|
||||
fn from(value: SchedError) -> Error<SchedError> {
|
||||
@ -12,121 +12,660 @@ impl From<SchedError> for Error<SchedError> {
|
||||
}
|
||||
}
|
||||
|
||||
fn worker(io: &Io, stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
mod local_coremgmt {
|
||||
use alloc::{string::String, vec::Vec};
|
||||
use byteorder::{ByteOrder, NativeEndian};
|
||||
use crc::crc32;
|
||||
use log::LevelFilter;
|
||||
|
||||
use board_misoc::{config, mem, spiflash};
|
||||
use io::ProtoWrite;
|
||||
use logger_artiq::BufferLogger;
|
||||
|
||||
use super::*;
|
||||
|
||||
|
||||
pub fn get_log(io: &Io, stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
BufferLogger::with(|logger| {
|
||||
let mut buffer = io.until_ok(|| logger.buffer())?;
|
||||
Reply::LogContent(buffer.extract()).write_to(stream)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn clear_log(io: &Io, stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
BufferLogger::with(|logger| -> Result<(), IoError<SchedError>> {
|
||||
let mut buffer = io.until_ok(|| logger.buffer())?;
|
||||
Ok(buffer.clear())
|
||||
})?;
|
||||
|
||||
Reply::Success.write_to(stream)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn pull_log(io: &Io, stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
BufferLogger::with(|logger| -> Result<(), IoError<SchedError>> {
|
||||
loop {
|
||||
// Do this *before* acquiring the buffer, since that sets the log level
|
||||
// to OFF.
|
||||
let log_level = log::max_level();
|
||||
|
||||
let mut buffer = io.until_ok(|| logger.buffer())?;
|
||||
if buffer.is_empty() { continue }
|
||||
|
||||
stream.write_string(buffer.extract())?;
|
||||
|
||||
if log_level == LevelFilter::Trace {
|
||||
// Hold exclusive access over the logger until we get positive
|
||||
// acknowledgement; otherwise we get an infinite loop of network
|
||||
// trace messages being transmitted and causing more network
|
||||
// trace messages to be emitted.
|
||||
//
|
||||
// Any messages unrelated to this management socket that arrive
|
||||
// while it is flushed are lost, but such is life.
|
||||
stream.flush()?;
|
||||
}
|
||||
|
||||
// Clear the log *after* flushing the network buffers, or we're just
|
||||
// going to resend all the trace messages on the next iteration.
|
||||
buffer.clear();
|
||||
}
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_log_filter(_io: &Io, stream: &mut TcpStream, level: LevelFilter) -> Result<(), Error<SchedError>> {
|
||||
info!("changing log level to {}", level);
|
||||
log::set_max_level(level);
|
||||
Reply::Success.write_to(stream)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_uart_log_filter(_io: &Io, stream: &mut TcpStream, level: LevelFilter) -> Result<(), Error<SchedError>> {
|
||||
info!("changing UART log level to {}", level);
|
||||
BufferLogger::with(|logger|
|
||||
logger.set_uart_log_level(level));
|
||||
Reply::Success.write_to(stream)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn config_read(_io: &Io, stream: &mut TcpStream, key: &String) -> Result<(), Error<SchedError>>{
|
||||
config::read(key, |result| {
|
||||
match result {
|
||||
Ok(value) => Reply::ConfigData(&value).write_to(stream),
|
||||
Err(_) => Reply::Error.write_to(stream)
|
||||
}
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn config_write(io: &Io, stream: &mut TcpStream, key: &String, value: &Vec<u8>, restart_idle: &Urc<Cell<bool>>) -> Result<(), Error<SchedError>> {
|
||||
match config::write(key, value) {
|
||||
Ok(_) => {
|
||||
if key == "idle_kernel" {
|
||||
io.until(|| !restart_idle.get())?;
|
||||
restart_idle.set(true);
|
||||
}
|
||||
Reply::Success.write_to(stream)
|
||||
},
|
||||
Err(_) => Reply::Error.write_to(stream)
|
||||
}?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn config_remove(io: &Io, stream: &mut TcpStream, key: &String, restart_idle: &Urc<Cell<bool>>) -> Result<(), Error<SchedError>> {
|
||||
match config::remove(key) {
|
||||
Ok(()) => {
|
||||
if key == "idle_kernel" {
|
||||
io.until(|| !restart_idle.get())?;
|
||||
restart_idle.set(true);
|
||||
}
|
||||
Reply::Success.write_to(stream)
|
||||
},
|
||||
Err(_) => Reply::Error.write_to(stream)
|
||||
}?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn config_erase(io: &Io, stream: &mut TcpStream, restart_idle: &Urc<Cell<bool>>) -> Result<(), Error<SchedError>> {
|
||||
match config::erase() {
|
||||
Ok(()) => {
|
||||
io.until(|| !restart_idle.get())?;
|
||||
restart_idle.set(true);
|
||||
Reply::Success.write_to(stream)
|
||||
},
|
||||
Err(_) => Reply::Error.write_to(stream)
|
||||
}?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn reboot(_io: &Io, stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
Reply::RebootImminent.write_to(stream)?;
|
||||
stream.close()?;
|
||||
stream.flush()?;
|
||||
|
||||
warn!("restarting");
|
||||
unsafe { spiflash::reload(); }
|
||||
}
|
||||
|
||||
pub fn debug_allocator(_io: &Io, _stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
unsafe { println!("{}", ::ALLOC) }
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn flash(_io: &Io, stream: &mut TcpStream, image: &[u8]) -> Result<(), Error<SchedError>> {
|
||||
let (expected_crc, mut image) = {
|
||||
let (image, crc_slice) = image.split_at(image.len() - 4);
|
||||
(NativeEndian::read_u32(crc_slice), image)
|
||||
};
|
||||
|
||||
let actual_crc = crc32::checksum_ieee(image);
|
||||
|
||||
if actual_crc == expected_crc {
|
||||
let bin_origins = [
|
||||
("gateware" , 0 ),
|
||||
("bootloader", mem::ROM_BASE ),
|
||||
("firmware" , mem::FLASH_BOOT_ADDRESS),
|
||||
];
|
||||
|
||||
for (name, origin) in bin_origins {
|
||||
info!("Flashing {} binary...", name);
|
||||
let size = NativeEndian::read_u32(&image[..4]) as usize;
|
||||
image = &image[4..];
|
||||
|
||||
let (bin, remaining) = image.split_at(size);
|
||||
image = remaining;
|
||||
|
||||
unsafe { spiflash::flash_binary(origin, bin) };
|
||||
}
|
||||
|
||||
reboot(_io, stream)?;
|
||||
} else {
|
||||
error!("CRC failed, images have not been written to flash.\n(actual {:08x}, expected {:08x})", actual_crc, expected_crc);
|
||||
Reply::Error.write_to(stream)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(has_drtio)]
|
||||
mod remote_coremgmt {
|
||||
use alloc::{string::String, vec::Vec};
|
||||
use log::LevelFilter;
|
||||
|
||||
use board_artiq::{drtioaux, drtioaux::Packet};
|
||||
use io::ProtoWrite;
|
||||
use rtio_mgt::drtio;
|
||||
use proto_artiq::drtioaux_proto::MASTER_PAYLOAD_MAX_SIZE;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl From<drtio::Error> for Error<SchedError> {
|
||||
fn from(_value: drtio::Error) -> Error<SchedError> {
|
||||
Error::DrtioError
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_log(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
let mut buffer = String::new();
|
||||
loop {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtGetLogRequest { destination, clear: false }
|
||||
);
|
||||
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtGetLogReply { last, length, data }) => {
|
||||
buffer.push_str(
|
||||
core::str::from_utf8(&data[..length as usize]).map_err(|_| Error::DrtioError)?);
|
||||
if last {
|
||||
Reply::LogContent(&buffer).write_to(stream)?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Reply::Error.write_to(stream)?;
|
||||
return Err(drtio::Error::UnexpectedReply.into());
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Reply::Error.write_to(stream)?;
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clear_log(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtClearLogRequest { destination }
|
||||
);
|
||||
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => {
|
||||
Reply::Success.write_to(stream)?;
|
||||
Ok(())
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(drtio::Error::UnexpectedReply.into())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pull_log(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
let mut buffer = Vec::new();
|
||||
loop {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtGetLogRequest { destination, clear: true }
|
||||
);
|
||||
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtGetLogReply { last, length, data }) => {
|
||||
buffer.extend(&data[..length as usize]);
|
||||
|
||||
if last {
|
||||
stream.write_bytes(&buffer)?;
|
||||
buffer.clear();
|
||||
}
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
return Err(drtio::Error::UnexpectedReply.into());
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_log_filter(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream, level: LevelFilter) -> Result<(), Error<SchedError>> {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtSetLogLevelRequest { destination, log_level: level as u8 }
|
||||
);
|
||||
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => {
|
||||
Reply::Success.write_to(stream)?;
|
||||
Ok(())
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(drtio::Error::UnexpectedReply.into())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_uart_log_filter(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream, level: LevelFilter) -> Result<(), Error<SchedError>> {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtSetUartLogLevelRequest { destination, log_level: level as u8 }
|
||||
);
|
||||
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => {
|
||||
Reply::Success.write_to(stream)?;
|
||||
Ok(())
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(drtio::Error::UnexpectedReply.into())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn config_read(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream, key: &String) -> Result<(), Error<SchedError>> {
|
||||
let mut config_key: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||
let len = key.len();
|
||||
config_key[..len].clone_from_slice(key.as_bytes());
|
||||
|
||||
let mut reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtConfigReadRequest {
|
||||
destination: destination,
|
||||
length: len as u16,
|
||||
key: config_key,
|
||||
}
|
||||
);
|
||||
|
||||
let mut buffer = Vec::<u8>::new();
|
||||
loop {
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtConfigReadReply { length, last, value }) => {
|
||||
buffer.extend(&value[..length as usize]);
|
||||
|
||||
if last {
|
||||
Reply::ConfigData(&buffer).write_to(stream)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtConfigReadContinue {
|
||||
destination: destination,
|
||||
}
|
||||
);
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Reply::Error.write_to(stream)?;
|
||||
return Err(drtio::Error::UnexpectedReply.into());
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Reply::Error.write_to(stream)?;
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn config_write(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream, key: &String, value: &Vec<u8>,
|
||||
_restart_idle: &Urc<Cell<bool>>) -> Result<(), Error<SchedError>> {
|
||||
let mut message = Vec::with_capacity(key.len() + value.len() + 4 * 2);
|
||||
message.write_string(key).unwrap();
|
||||
message.write_bytes(value).unwrap();
|
||||
|
||||
match drtio::partition_data(&message, |slice, status, len: usize| {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtConfigWriteRequest {
|
||||
destination: destination, length: len as u16, last: status.is_last(), data: *slice});
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => Ok(()),
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Err(drtio::Error::UnexpectedReply)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}) {
|
||||
Ok(()) => {
|
||||
Reply::Success.write_to(stream)?;
|
||||
Ok(())
|
||||
},
|
||||
Err(e) => {
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(e.into())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn config_remove(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream, key: &String,
|
||||
_restart_idle: &Urc<Cell<bool>>) -> Result<(), Error<SchedError>> {
|
||||
let mut config_key: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||
let len = key.len();
|
||||
config_key[..len].clone_from_slice(key.as_bytes());
|
||||
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtConfigRemoveRequest {
|
||||
destination: destination,
|
||||
length: key.len() as u16,
|
||||
key: config_key,
|
||||
});
|
||||
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => {
|
||||
Reply::Success.write_to(stream)?;
|
||||
Ok(())
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(drtio::Error::UnexpectedReply.into())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn config_erase(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream, _restart_idle: &Urc<Cell<bool>>) -> Result<(), Error<SchedError>> {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtConfigEraseRequest {
|
||||
destination: destination,
|
||||
});
|
||||
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => {
|
||||
Reply::Success.write_to(stream)?;
|
||||
Ok(())
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(drtio::Error::UnexpectedReply.into())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reboot(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtRebootRequest {
|
||||
destination: destination,
|
||||
});
|
||||
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => {
|
||||
Reply::RebootImminent.write_to(stream)?;
|
||||
Ok(())
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(drtio::Error::UnexpectedReply.into())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn debug_allocator(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, _stream: &mut TcpStream) -> Result<(), Error<SchedError>> {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtAllocatorDebugRequest {
|
||||
destination: destination,
|
||||
});
|
||||
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => Ok(()),
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Err(drtio::Error::UnexpectedReply.into())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flash(io: &Io, aux_mutex: &Mutex,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &RoutingTable, linkno: u8,
|
||||
destination: u8, stream: &mut TcpStream, image: &[u8]) -> Result<(), Error<SchedError>> {
|
||||
|
||||
let alloc_reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtFlashRequest {
|
||||
destination: destination,
|
||||
payload_length: image.len() as u32,
|
||||
});
|
||||
|
||||
match alloc_reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => Ok(()),
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(drtio::Error::UnexpectedReply)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(e)
|
||||
}
|
||||
}?;
|
||||
|
||||
match drtio::partition_data(&image, |slice, status, len: usize| {
|
||||
let reply = drtio::aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&Packet::CoreMgmtFlashAddDataRequest {
|
||||
destination: destination, length: len as u16, last: status.is_last(), data: *slice});
|
||||
match reply {
|
||||
Ok(Packet::CoreMgmtReply { succeeded: true }) => Ok(()),
|
||||
Ok(Packet::CoreMgmtDropLink) => {
|
||||
if status.is_last() {
|
||||
drtioaux::send(
|
||||
linkno, &Packet::CoreMgmtDropLinkAck { destination: destination }
|
||||
).map_err(|_| drtio::Error::AuxError)
|
||||
} else {
|
||||
error!("received unexpected drop link packet");
|
||||
Err(drtio::Error::UnexpectedReply)
|
||||
}
|
||||
}
|
||||
Ok(packet) => {
|
||||
error!("received unexpected aux packet: {:?}", packet);
|
||||
Err(drtio::Error::UnexpectedReply)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("aux packet error ({})", e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}) {
|
||||
Ok(()) => {
|
||||
Reply::RebootImminent.write_to(stream)?;
|
||||
Ok(())
|
||||
},
|
||||
Err(e) => {
|
||||
Reply::Error.write_to(stream)?;
|
||||
Err(e.into())
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(has_drtio)]
|
||||
macro_rules! process {
|
||||
($io:ident, $aux_mutex:ident, $ddma_mutex:ident, $subkernel_mutex:ident, $routing_table:ident, $tcp_stream:ident, $destination: ident, $func:ident $(, $param:expr)*) => {{
|
||||
let hop = $routing_table.0[$destination as usize][0];
|
||||
if hop == 0 {
|
||||
local_coremgmt::$func($io, $tcp_stream, $($param, )*)
|
||||
} else {
|
||||
let linkno = hop - 1;
|
||||
remote_coremgmt::$func($io, $aux_mutex, $ddma_mutex, $subkernel_mutex, $routing_table, linkno, $destination, $tcp_stream, $($param, )*)
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
#[cfg(not(has_drtio))]
|
||||
macro_rules! process {
|
||||
($io:ident, $aux_mutex:ident, $ddma_mutex:ident, $subkernel_mutex:ident, $routing_table:ident, $tcp_stream:ident, $_destination: ident, $func:ident $(, $param:expr)*) => {{
|
||||
local_coremgmt::$func($io, $tcp_stream, $($param, )*)
|
||||
}}
|
||||
}
|
||||
|
||||
fn worker(io: &Io, stream: &mut TcpStream, restart_idle: &Urc<Cell<bool>>,
|
||||
_aux_mutex: &Mutex, _ddma_mutex: &Mutex, _subkernel_mutex: &Mutex,
|
||||
_routing_table: &RoutingTable) -> Result<(), Error<SchedError>> {
|
||||
read_magic(stream)?;
|
||||
let _destination = stream.read_u8()?;
|
||||
Write::write_all(stream, "e".as_bytes())?;
|
||||
info!("new connection from {}", stream.remote_endpoint());
|
||||
|
||||
loop {
|
||||
match Request::read_from(stream)? {
|
||||
Request::GetLog => {
|
||||
BufferLogger::with(|logger| {
|
||||
let mut buffer = io.until_ok(|| logger.buffer())?;
|
||||
Reply::LogContent(buffer.extract()).write_to(stream)
|
||||
})?;
|
||||
}
|
||||
Request::ClearLog => {
|
||||
BufferLogger::with(|logger| -> Result<(), Error<SchedError>> {
|
||||
let mut buffer = io.until_ok(|| logger.buffer())?;
|
||||
Ok(buffer.clear())
|
||||
})?;
|
||||
|
||||
Reply::Success.write_to(stream)?;
|
||||
}
|
||||
Request::PullLog => {
|
||||
BufferLogger::with(|logger| -> Result<(), Error<SchedError>> {
|
||||
loop {
|
||||
// Do this *before* acquiring the buffer, since that sets the log level
|
||||
// to OFF.
|
||||
let log_level = log::max_level();
|
||||
|
||||
let mut buffer = io.until_ok(|| logger.buffer())?;
|
||||
if buffer.is_empty() { continue }
|
||||
|
||||
stream.write_string(buffer.extract())?;
|
||||
|
||||
if log_level == LevelFilter::Trace {
|
||||
// Hold exclusive access over the logger until we get positive
|
||||
// acknowledgement; otherwise we get an infinite loop of network
|
||||
// trace messages being transmitted and causing more network
|
||||
// trace messages to be emitted.
|
||||
//
|
||||
// Any messages unrelated to this management socket that arrive
|
||||
// while it is flushed are lost, but such is life.
|
||||
stream.flush()?;
|
||||
}
|
||||
|
||||
// Clear the log *after* flushing the network buffers, or we're just
|
||||
// going to resend all the trace messages on the next iteration.
|
||||
buffer.clear();
|
||||
}
|
||||
})?;
|
||||
}
|
||||
Request::SetLogFilter(level) => {
|
||||
info!("changing log level to {}", level);
|
||||
log::set_max_level(level);
|
||||
Reply::Success.write_to(stream)?;
|
||||
}
|
||||
Request::SetUartLogFilter(level) => {
|
||||
info!("changing UART log level to {}", level);
|
||||
BufferLogger::with(|logger|
|
||||
logger.set_uart_log_level(level));
|
||||
Reply::Success.write_to(stream)?;
|
||||
}
|
||||
|
||||
Request::ConfigRead { ref key } => {
|
||||
config::read(key, |result| {
|
||||
match result {
|
||||
Ok(value) => Reply::ConfigData(&value).write_to(stream),
|
||||
Err(_) => Reply::Error.write_to(stream)
|
||||
}
|
||||
})?;
|
||||
}
|
||||
Request::ConfigWrite { ref key, ref value } => {
|
||||
match config::write(key, value) {
|
||||
Ok(_) => Reply::Success.write_to(stream),
|
||||
Err(_) => Reply::Error.write_to(stream)
|
||||
}?;
|
||||
}
|
||||
Request::ConfigRemove { ref key } => {
|
||||
match config::remove(key) {
|
||||
Ok(()) => Reply::Success.write_to(stream),
|
||||
Err(_) => Reply::Error.write_to(stream)
|
||||
}?;
|
||||
|
||||
}
|
||||
Request::ConfigErase => {
|
||||
match config::erase() {
|
||||
Ok(()) => Reply::Success.write_to(stream),
|
||||
Err(_) => Reply::Error.write_to(stream)
|
||||
}?;
|
||||
}
|
||||
|
||||
Request::Reboot => {
|
||||
Reply::RebootImminent.write_to(stream)?;
|
||||
stream.close()?;
|
||||
stream.flush()?;
|
||||
|
||||
warn!("restarting");
|
||||
unsafe { spiflash::reload(); }
|
||||
}
|
||||
|
||||
Request::DebugAllocator =>
|
||||
unsafe { println!("{}", ::ALLOC) },
|
||||
};
|
||||
Request::GetLog => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, get_log),
|
||||
Request::ClearLog => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, clear_log),
|
||||
Request::PullLog => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, pull_log),
|
||||
Request::SetLogFilter(level) => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, set_log_filter, level),
|
||||
Request::SetUartLogFilter(level) => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, set_uart_log_filter, level),
|
||||
Request::ConfigRead { ref key } => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, config_read, key),
|
||||
Request::ConfigWrite { ref key, ref value } => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, config_write, key, value, restart_idle),
|
||||
Request::ConfigRemove { ref key } => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, config_remove, key, restart_idle),
|
||||
Request::ConfigErase => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, config_erase, restart_idle),
|
||||
Request::Reboot => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, reboot),
|
||||
Request::DebugAllocator => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, debug_allocator),
|
||||
Request::Flash { ref image } => process!(io, _aux_mutex, _ddma_mutex, _subkernel_mutex, _routing_table, stream, _destination, flash, &image[..]),
|
||||
}?;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn thread(io: Io) {
|
||||
pub fn thread(io: Io, restart_idle: &Urc<Cell<bool>>, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex, routing_table: &Urc<RefCell<RoutingTable>>) {
|
||||
let listener = TcpListener::new(&io, 8192);
|
||||
listener.listen(1380).expect("mgmt: cannot listen");
|
||||
info!("management interface active");
|
||||
|
||||
loop {
|
||||
let restart_idle = restart_idle.clone();
|
||||
let aux_mutex = aux_mutex.clone();
|
||||
let ddma_mutex = ddma_mutex.clone();
|
||||
let subkernel_mutex = subkernel_mutex.clone();
|
||||
let routing_table = routing_table.clone();
|
||||
let stream = listener.accept().expect("mgmt: cannot accept").into_handle();
|
||||
io.spawn(4096, move |io| {
|
||||
io.spawn(16384, move |io| {
|
||||
let routing_table = routing_table.borrow();
|
||||
let mut stream = TcpStream::from_handle(&io, stream);
|
||||
match worker(&io, &mut stream) {
|
||||
match worker(&io, &mut stream, &restart_idle, &aux_mutex, &ddma_mutex, &subkernel_mutex, &routing_table) {
|
||||
Ok(()) => (),
|
||||
Err(Error::Io(IoError::UnexpectedEnd)) => (),
|
||||
Err(err) => error!("aborted: {}", err)
|
||||
|
@ -16,6 +16,8 @@ const ASYNC_ERROR_SEQUENCE_ERROR: u8 = 1 << 2;
|
||||
pub mod drtio {
|
||||
use super::*;
|
||||
use alloc::vec::Vec;
|
||||
#[cfg(has_drtio_eem)]
|
||||
use board_artiq::drtio_eem;
|
||||
use drtioaux;
|
||||
use proto_artiq::drtioaux_proto::{MASTER_PAYLOAD_MAX_SIZE, PayloadStatus};
|
||||
use rtio_dma::remote_dma;
|
||||
@ -24,6 +26,9 @@ pub mod drtio {
|
||||
use kernel::subkernel;
|
||||
use sched::Error as SchedError;
|
||||
|
||||
#[cfg(has_drtio_eem)]
|
||||
const DRTIO_EEM_LINKNOS: core::ops::Range<usize> = (csr::DRTIO.len()-csr::CONFIG_EEM_DRTIO_COUNT as usize)..csr::DRTIO.len();
|
||||
|
||||
#[derive(Fail, Debug)]
|
||||
pub enum Error {
|
||||
#[fail(display = "timed out")]
|
||||
@ -73,6 +78,18 @@ pub mod drtio {
|
||||
|
||||
fn link_rx_up(linkno: u8) -> bool {
|
||||
let linkno = linkno as usize;
|
||||
#[cfg(has_drtio_eem)]
|
||||
if DRTIO_EEM_LINKNOS.contains(&linkno) {
|
||||
let eem_trx_no = linkno - DRTIO_EEM_LINKNOS.start;
|
||||
unsafe {
|
||||
csr::eem_transceiver::transceiver_sel_write(eem_trx_no as u8);
|
||||
csr::eem_transceiver::comma_align_reset_write(1);
|
||||
}
|
||||
clock::spin_us(100);
|
||||
return unsafe {
|
||||
csr::eem_transceiver::comma_read() == 1
|
||||
};
|
||||
}
|
||||
unsafe {
|
||||
(csr::DRTIO[linkno].rx_up_read)() == 1
|
||||
}
|
||||
@ -414,9 +431,27 @@ pub mod drtio {
|
||||
} else {
|
||||
info!("[LINK#{}] link is down", linkno);
|
||||
up_links[linkno as usize] = false;
|
||||
|
||||
#[cfg(has_drtio_eem)]
|
||||
if DRTIO_EEM_LINKNOS.contains(&(linkno as usize)) {
|
||||
unsafe { csr::eem_transceiver::rx_ready_write(0); }
|
||||
// Clear DRTIOAUX buffer
|
||||
while !matches!(drtioaux::recv(linkno), Ok(None)) {}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* link was previously down */
|
||||
#[cfg(has_drtio_eem)]
|
||||
if DRTIO_EEM_LINKNOS.contains(&(linkno as usize)) {
|
||||
let eem_trx_no = linkno - DRTIO_EEM_LINKNOS.start as u8;
|
||||
if !unsafe { drtio_eem::align_wordslip(eem_trx_no) } {
|
||||
continue;
|
||||
}
|
||||
unsafe {
|
||||
csr::eem_transceiver::rx_ready_write(1);
|
||||
}
|
||||
}
|
||||
|
||||
if link_rx_up(linkno) {
|
||||
info!("[LINK#{}] link RX became up, pinging", linkno);
|
||||
let ping_count = ping_remote(&io, aux_mutex, linkno);
|
||||
@ -471,7 +506,7 @@ pub mod drtio {
|
||||
}
|
||||
}
|
||||
|
||||
fn partition_data<F>(data: &[u8], send_f: F) -> Result<(), Error>
|
||||
pub fn partition_data<F>(data: &[u8], send_f: F) -> Result<(), Error>
|
||||
where F: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], PayloadStatus, usize) -> Result<(), Error> {
|
||||
let mut i = 0;
|
||||
while i < data.len() {
|
||||
@ -594,11 +629,14 @@ pub mod drtio {
|
||||
}
|
||||
|
||||
pub fn subkernel_load(io: &Io, aux_mutex: &Mutex, ddma_mutex: &Mutex, subkernel_mutex: &Mutex,
|
||||
routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, run: bool
|
||||
routing_table: &drtio_routing::RoutingTable, id: u32, destination: u8, run: bool, timestamp: u64
|
||||
) -> Result<(), Error> {
|
||||
let linkno = routing_table.0[destination as usize][0] - 1;
|
||||
let reply = aux_transact(io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, linkno,
|
||||
&drtioaux::Packet::SubkernelLoadRunRequest{ id: id, source: 0, destination: destination, run: run })?;
|
||||
&drtioaux::Packet::SubkernelLoadRunRequest{
|
||||
id: id, source: 0, destination: destination,
|
||||
run: run, timestamp: timestamp
|
||||
})?;
|
||||
match reply {
|
||||
drtioaux::Packet::SubkernelLoadRunReply { destination: 0, succeeded: true } => Ok(()),
|
||||
drtioaux::Packet::SubkernelLoadRunReply { destination: 0, succeeded: false } =>
|
||||
|
@ -402,16 +402,18 @@ impl<'a> TcpListener<'a> {
|
||||
socket.may_send() || socket.may_recv()
|
||||
})?;
|
||||
|
||||
let accepted = self.handle.get();
|
||||
let accepted = TcpStream {
|
||||
io: self.io,
|
||||
handle: self.handle.get(),
|
||||
};
|
||||
accepted.with_lower(|s| s.set_nagle_enabled(false));
|
||||
|
||||
self.handle.set(Self::new_lower(self.io, self.buffer_size.get()));
|
||||
match self.listen(self.endpoint.get()) {
|
||||
Ok(()) => (),
|
||||
_ => unreachable!()
|
||||
}
|
||||
Ok(TcpStream {
|
||||
io: self.io,
|
||||
handle: accepted
|
||||
})
|
||||
Ok(accepted)
|
||||
}
|
||||
|
||||
pub fn close(&self) {
|
||||
|
@ -661,9 +661,9 @@ fn process_kern_message(io: &Io, aux_mutex: &Mutex,
|
||||
}
|
||||
}
|
||||
#[cfg(has_drtio)]
|
||||
&kern::SubkernelLoadRunRequest { id, destination: _, run } => {
|
||||
&kern::SubkernelLoadRunRequest { id, destination: _, run, timestamp } => {
|
||||
let succeeded = match subkernel::load(
|
||||
io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, run) {
|
||||
io, aux_mutex, ddma_mutex, subkernel_mutex, routing_table, id, run, timestamp) {
|
||||
Ok(()) => true,
|
||||
Err(e) => { error!("Error loading subkernel: {}", e); false }
|
||||
};
|
||||
@ -839,7 +839,7 @@ fn flash_kernel_worker(io: &Io, aux_mutex: &Mutex,
|
||||
routing_table: &drtio_routing::RoutingTable,
|
||||
up_destinations: &Urc<RefCell<[bool; drtio_routing::DEST_COUNT]>>,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex, congress: &mut Congress,
|
||||
config_key: &str) -> Result<(), Error<SchedError>> {
|
||||
config_key: &str, restart_idle: Option<&Urc<Cell<bool>>>) -> Result<(), Error<SchedError>> {
|
||||
let mut session = Session::new(congress);
|
||||
|
||||
config::read(config_key, |result| {
|
||||
@ -859,11 +859,16 @@ fn flash_kernel_worker(io: &Io, aux_mutex: &Mutex,
|
||||
}
|
||||
})?;
|
||||
kern_run(&mut session)?;
|
||||
|
||||
loop {
|
||||
if !rpc_queue::empty() {
|
||||
unexpected!("unexpected background RPC in flash kernel")
|
||||
}
|
||||
|
||||
if let Some(r_idle) = restart_idle {
|
||||
if r_idle.get() {
|
||||
return Err(Error::KernelNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
if mailbox::receive() != 0 {
|
||||
if process_kern_message(io, aux_mutex, routing_table, up_destinations, ddma_mutex, subkernel_mutex, None, &mut session)? {
|
||||
@ -897,7 +902,7 @@ fn respawn<F>(io: &Io, handle: &mut Option<ThreadHandle>, f: F)
|
||||
pub fn thread(io: Io, aux_mutex: &Mutex,
|
||||
routing_table: &Urc<RefCell<drtio_routing::RoutingTable>>,
|
||||
up_destinations: &Urc<RefCell<[bool; drtio_routing::DEST_COUNT]>>,
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex) {
|
||||
ddma_mutex: &Mutex, subkernel_mutex: &Mutex, restart_idle: &Urc<Cell<bool>>) {
|
||||
let listener = TcpListener::new(&io, 65535);
|
||||
listener.listen(1381).expect("session: cannot listen");
|
||||
info!("accepting network sessions");
|
||||
@ -910,7 +915,7 @@ pub fn thread(io: Io, aux_mutex: &Mutex,
|
||||
let mut congress = congress.borrow_mut();
|
||||
info!("running startup kernel");
|
||||
match flash_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations,
|
||||
ddma_mutex, subkernel_mutex, &mut congress, "startup_kernel") {
|
||||
ddma_mutex, subkernel_mutex, &mut congress, "startup_kernel", None) {
|
||||
Ok(()) =>
|
||||
info!("startup kernel finished"),
|
||||
Err(Error::KernelNotFound) =>
|
||||
@ -994,11 +999,12 @@ pub fn thread(io: Io, aux_mutex: &Mutex,
|
||||
let congress = congress.clone();
|
||||
let ddma_mutex = ddma_mutex.clone();
|
||||
let subkernel_mutex = subkernel_mutex.clone();
|
||||
let restart_idle = restart_idle.clone();
|
||||
respawn(&io, &mut kernel_thread, move |io| {
|
||||
let routing_table = routing_table.borrow();
|
||||
let mut congress = congress.borrow_mut();
|
||||
match flash_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations,
|
||||
&ddma_mutex, &subkernel_mutex, &mut *congress, "idle_kernel") {
|
||||
&ddma_mutex, &subkernel_mutex, &mut *congress, "idle_kernel", Some(&restart_idle)) {
|
||||
Ok(()) =>
|
||||
info!("idle kernel finished, standing by"),
|
||||
Err(Error::Protocol(host::Error::Io(
|
||||
@ -1010,7 +1016,8 @@ pub fn thread(io: Io, aux_mutex: &Mutex,
|
||||
}
|
||||
Err(Error::KernelNotFound) => {
|
||||
debug!("no idle kernel found");
|
||||
while io.relinquish().is_ok() {}
|
||||
while !restart_idle.get() && io.relinquish().is_ok() {}
|
||||
restart_idle.set(false);
|
||||
}
|
||||
Err(err) => {
|
||||
error!("idle kernel aborted: {}", err);
|
||||
|
@ -15,9 +15,12 @@ build_misoc = { path = "../libbuild_misoc" }
|
||||
[dependencies]
|
||||
log = { version = "0.4", default-features = false }
|
||||
io = { path = "../libio", features = ["byteorder", "alloc"] }
|
||||
byteorder = { version = "1.0", default-features = false }
|
||||
crc = { version = "1.7", default-features = false }
|
||||
cslice = { version = "0.3" }
|
||||
board_misoc = { path = "../libboard_misoc", features = ["uart_console", "log"] }
|
||||
board_artiq = { path = "../libboard_artiq", features = ["alloc"] }
|
||||
logger_artiq = { path = "../liblogger_artiq" }
|
||||
alloc_list = { path = "../liballoc_list" }
|
||||
riscv = { version = "0.6.0", features = ["inline-asm"] }
|
||||
proto_artiq = { path = "../libproto_artiq", features = ["log", "alloc"] }
|
||||
|
@ -363,17 +363,17 @@ impl Manager {
|
||||
unsafe { self.cache.unborrow() }
|
||||
}
|
||||
|
||||
pub fn run(&mut self, source: u8, id: u32) -> Result<(), Error> {
|
||||
pub fn run(&mut self, source: u8, id: u32, timestamp: u64) -> Result<(), Error> {
|
||||
info!("starting subkernel #{}", id);
|
||||
if self.session.kernel_state != KernelState::Loaded
|
||||
|| self.current_id != id {
|
||||
self.load(id)?;
|
||||
}
|
||||
}
|
||||
self.session.source = source;
|
||||
self.session.kernel_state = KernelState::Running;
|
||||
cricon_select(RtioMaster::Kernel);
|
||||
|
||||
kern_acknowledge()
|
||||
kern_send(&kern::UpdateNow(timestamp))
|
||||
}
|
||||
|
||||
pub fn message_handle_incoming(&mut self, status: PayloadStatus, length: usize, id: u32, slice: &[u8; MASTER_PAYLOAD_MAX_SIZE]) {
|
||||
@ -825,21 +825,21 @@ impl Manager {
|
||||
// ID equal to -1 indicates wildcard for receiving arguments
|
||||
let id = if id == -1 { self.current_id } else { id as u32 };
|
||||
self.session.kernel_state = KernelState::MsgAwait {
|
||||
id: id, max_time: max_time, tags: tags.to_vec() };
|
||||
id, max_time, tags: tags.to_vec() };
|
||||
Ok(())
|
||||
},
|
||||
|
||||
&kern::SubkernelLoadRunRequest { id, destination: sk_destination, run } => {
|
||||
&kern::SubkernelLoadRunRequest { id, destination: sk_destination, run, timestamp } => {
|
||||
self.session.kernel_state = KernelState::SubkernelAwaitLoad;
|
||||
router.route(drtioaux::Packet::SubkernelLoadRunRequest {
|
||||
source: destination, destination: sk_destination, id: id, run: run
|
||||
source: destination, destination: sk_destination, id, run, timestamp
|
||||
}, routing_table, rank, destination);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
&kern::SubkernelAwaitFinishRequest{ id, timeout } => {
|
||||
&kern::SubkernelAwaitFinishRequest { id, timeout } => {
|
||||
let max_time = if timeout > 0 { clock::get_ms() as i64 + timeout } else { timeout };
|
||||
self.session.kernel_state = KernelState::SubkernelAwaitFinish { max_time: max_time, id: id };
|
||||
self.session.kernel_state = KernelState::SubkernelAwaitFinish { max_time, id };
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -6,21 +6,25 @@ extern crate log;
|
||||
#[macro_use]
|
||||
extern crate board_misoc;
|
||||
extern crate board_artiq;
|
||||
extern crate logger_artiq;
|
||||
extern crate riscv;
|
||||
extern crate alloc;
|
||||
extern crate proto_artiq;
|
||||
extern crate byteorder;
|
||||
extern crate crc;
|
||||
extern crate cslice;
|
||||
extern crate io;
|
||||
extern crate eh;
|
||||
|
||||
use core::convert::TryFrom;
|
||||
use board_misoc::{csr, ident, clock, config, uart_logger, i2c, pmp};
|
||||
use board_misoc::{csr, ident, clock, config, i2c, pmp};
|
||||
#[cfg(has_si5324)]
|
||||
use board_artiq::si5324;
|
||||
#[cfg(has_si549)]
|
||||
use board_artiq::si549;
|
||||
#[cfg(soc_platform = "kasli")]
|
||||
use board_misoc::irq;
|
||||
use board_misoc::{boot, spiflash};
|
||||
use board_artiq::{spi, drtioaux, drtio_routing};
|
||||
#[cfg(soc_platform = "efc")]
|
||||
use board_artiq::ad9117;
|
||||
@ -30,6 +34,7 @@ use board_artiq::drtio_eem;
|
||||
use riscv::register::{mcause, mepc, mtval};
|
||||
use dma::Manager as DmaManager;
|
||||
use kernel::Manager as KernelManager;
|
||||
use mgmt::Manager as CoreManager;
|
||||
use analyzer::Analyzer;
|
||||
|
||||
#[global_allocator]
|
||||
@ -41,6 +46,7 @@ mod dma;
|
||||
mod analyzer;
|
||||
mod kernel;
|
||||
mod cache;
|
||||
mod mgmt;
|
||||
|
||||
fn drtiosat_reset(reset: bool) {
|
||||
unsafe {
|
||||
@ -129,7 +135,7 @@ macro_rules! forward {
|
||||
($router:expr, $routing_table:expr, $destination:expr, $rank:expr, $self_destination:expr, $repeaters:expr, $packet:expr) => {}
|
||||
}
|
||||
|
||||
fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager,
|
||||
fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmgr: &mut KernelManager, coremgr: &mut CoreManager,
|
||||
_repeaters: &mut [repeater::Repeater], _routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8,
|
||||
router: &mut routing::Router, self_destination: &mut u8, packet: drtioaux::Packet
|
||||
) -> Result<(), drtioaux::Error<!>> {
|
||||
@ -427,7 +433,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||
drtioaux::send(0,
|
||||
&drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded })
|
||||
}
|
||||
drtioaux::Packet::SubkernelLoadRunRequest { source, destination: _destination, id, run } => {
|
||||
drtioaux::Packet::SubkernelLoadRunRequest { source, destination: _destination, id, run, timestamp } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
let mut succeeded = kernelmgr.load(id).is_ok();
|
||||
// allow preloading a kernel with delayed run
|
||||
@ -436,7 +442,7 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||
// cannot run kernel while DDMA is running
|
||||
succeeded = false;
|
||||
} else {
|
||||
succeeded |= kernelmgr.run(source, id).is_ok();
|
||||
succeeded |= kernelmgr.run(source, id, timestamp).is_ok();
|
||||
}
|
||||
}
|
||||
router.send(drtioaux::Packet::SubkernelLoadRunReply {
|
||||
@ -495,6 +501,167 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||
Ok(())
|
||||
}
|
||||
|
||||
drtioaux::Packet::CoreMgmtGetLogRequest { destination: _destination, clear } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
let mut data_slice = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||
if let Ok(meta) = coremgr.log_get_slice(&mut data_slice, clear) {
|
||||
drtioaux::send(
|
||||
0,
|
||||
&drtioaux::Packet::CoreMgmtGetLogReply {
|
||||
last: meta.status.is_last(),
|
||||
length: meta.len as u16,
|
||||
data: data_slice,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
|
||||
}
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtClearLogRequest { destination: _destination } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: mgmt::clear_log().is_ok() })
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtSetLogLevelRequest {destination: _destination, log_level } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
if let Ok(level_filter) = mgmt::byte_to_level_filter(log_level) {
|
||||
info!("changing log level to {}", level_filter);
|
||||
log::set_max_level(level_filter);
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
|
||||
} else {
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
|
||||
}
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtSetUartLogLevelRequest { destination: _destination, log_level } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
if let Ok(level_filter) = mgmt::byte_to_level_filter(log_level) {
|
||||
info!("changing UART log level to {}", level_filter);
|
||||
logger_artiq::BufferLogger::with(|logger|
|
||||
logger.set_uart_log_level(level_filter));
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
|
||||
} else {
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
|
||||
}
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtConfigReadRequest {
|
||||
destination: _destination,
|
||||
length,
|
||||
key,
|
||||
} => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
let mut value_slice = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||
|
||||
let key_slice = &key[..length as usize];
|
||||
if !key_slice.is_ascii() {
|
||||
error!("invalid key");
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
|
||||
} else {
|
||||
let key = core::str::from_utf8(key_slice).unwrap();
|
||||
if coremgr.fetch_config_value(key).is_ok() {
|
||||
let meta = coremgr.get_config_value_slice(&mut value_slice);
|
||||
drtioaux::send(
|
||||
0,
|
||||
&drtioaux::Packet::CoreMgmtConfigReadReply {
|
||||
length: meta.len as u16,
|
||||
last: meta.status.is_last(),
|
||||
value: value_slice,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false })
|
||||
}
|
||||
}
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtConfigReadContinue {
|
||||
destination: _destination,
|
||||
} => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
let mut value_slice = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||
let meta = coremgr.get_config_value_slice(&mut value_slice);
|
||||
drtioaux::send(
|
||||
0,
|
||||
&drtioaux::Packet::CoreMgmtConfigReadReply {
|
||||
length: meta.len as u16,
|
||||
last: meta.status.is_last(),
|
||||
value: value_slice,
|
||||
},
|
||||
)
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtConfigWriteRequest { destination: _destination, last, length, data } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
coremgr.add_config_data(&data, length as usize);
|
||||
if last {
|
||||
coremgr.write_config()
|
||||
} else {
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
|
||||
}
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtConfigRemoveRequest { destination: _destination, length, key } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
let key = core::str::from_utf8(&key[..length as usize]).unwrap();
|
||||
let succeeded = config::remove(key)
|
||||
.map_err(|err| warn!("error on removing config: {:?}", err))
|
||||
.is_ok();
|
||||
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded })
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtConfigEraseRequest { destination: _destination } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
let succeeded = config::erase()
|
||||
.map_err(|err| warn!("error on erasing config: {:?}", err))
|
||||
.is_ok();
|
||||
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded })
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtRebootRequest { destination: _destination } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })?;
|
||||
warn!("restarting");
|
||||
unsafe { spiflash::reload(); }
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtFlashRequest { destination: _destination, payload_length } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
coremgr.allocate_image_buffer(payload_length as usize);
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtFlashAddDataRequest { destination: _destination, last, length, data } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
coremgr.add_image_data(&data, length as usize);
|
||||
if last {
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtDropLink)
|
||||
} else {
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: true })
|
||||
}
|
||||
}
|
||||
drtioaux::Packet::CoreMgmtDropLinkAck { destination: _destination } => {
|
||||
forward!(router, _routing_table, _destination, *rank, *self_destination, _repeaters, &packet);
|
||||
|
||||
#[cfg(not(has_drtio_eem))]
|
||||
unsafe {
|
||||
csr::gt_drtio::txenable_write(0);
|
||||
}
|
||||
|
||||
#[cfg(has_drtio_eem)]
|
||||
unsafe {
|
||||
csr::eem_transceiver::txenable_write(0);
|
||||
}
|
||||
|
||||
coremgr.flash_image();
|
||||
warn!("restarting");
|
||||
unsafe { spiflash::reload(); }
|
||||
}
|
||||
|
||||
_ => {
|
||||
warn!("received unexpected aux packet");
|
||||
Ok(())
|
||||
@ -503,13 +670,13 @@ fn process_aux_packet(dmamgr: &mut DmaManager, analyzer: &mut Analyzer, kernelmg
|
||||
}
|
||||
|
||||
fn process_aux_packets(dma_manager: &mut DmaManager, analyzer: &mut Analyzer,
|
||||
kernelmgr: &mut KernelManager, repeaters: &mut [repeater::Repeater],
|
||||
kernelmgr: &mut KernelManager, coremgr: &mut CoreManager, repeaters: &mut [repeater::Repeater],
|
||||
routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8, router: &mut routing::Router,
|
||||
destination: &mut u8) {
|
||||
let result =
|
||||
drtioaux::recv(0).and_then(|packet| {
|
||||
if let Some(packet) = packet.or_else(|| router.get_local_packet()) {
|
||||
process_aux_packet(dma_manager, analyzer, kernelmgr,
|
||||
process_aux_packet(dma_manager, analyzer, kernelmgr, coremgr,
|
||||
repeaters, routing_table, rank, router, destination, packet)
|
||||
} else {
|
||||
Ok(())
|
||||
@ -664,6 +831,27 @@ fn sysclk_setup() {
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_log_levels() {
|
||||
match config::read_str("log_level", |r| r.map(|s| s.parse())) {
|
||||
Ok(Ok(log_level_filter)) => {
|
||||
info!("log level set to {} by `log_level` config key",
|
||||
log_level_filter);
|
||||
log::set_max_level(log_level_filter);
|
||||
}
|
||||
_ => info!("log level set to INFO by default")
|
||||
}
|
||||
match config::read_str("uart_log_level", |r| r.map(|s| s.parse())) {
|
||||
Ok(Ok(uart_log_level_filter)) => {
|
||||
info!("UART log level set to {} by `uart_log_level` config key",
|
||||
uart_log_level_filter);
|
||||
logger_artiq::BufferLogger::with(|logger|
|
||||
logger.set_uart_log_level(uart_log_level_filter));
|
||||
}
|
||||
_ => info!("UART log level set to INFO by default")
|
||||
}
|
||||
}
|
||||
|
||||
static mut LOG_BUFFER: [u8; 1<<17] = [0; 1<<17];
|
||||
|
||||
#[no_mangle]
|
||||
pub extern fn main() -> i32 {
|
||||
@ -683,12 +871,21 @@ pub extern fn main() -> i32 {
|
||||
irq::enable(csr::WRPLL_INTERRUPT);
|
||||
|
||||
clock::init();
|
||||
uart_logger::ConsoleLogger::register();
|
||||
unsafe {
|
||||
logger_artiq::BufferLogger::new(&mut LOG_BUFFER[..]).register(||
|
||||
boot::start_user(startup as usize));
|
||||
}
|
||||
|
||||
0
|
||||
}
|
||||
|
||||
fn startup() {
|
||||
info!("ARTIQ satellite manager starting...");
|
||||
info!("software ident {}", csr::CONFIG_IDENTIFIER_STR);
|
||||
info!("gateware ident {}", ident::read(&mut [0; 64]));
|
||||
|
||||
setup_log_levels();
|
||||
|
||||
#[cfg(has_i2c)]
|
||||
i2c::init().expect("I2C initialization failed");
|
||||
#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))]
|
||||
@ -752,7 +949,7 @@ pub extern fn main() -> i32 {
|
||||
io_expander.service().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(soc_platform = "efc"))]
|
||||
#[cfg(not(has_drtio_eem))]
|
||||
unsafe {
|
||||
csr::gt_drtio::txenable_write(0xffffffffu32 as _);
|
||||
}
|
||||
@ -777,7 +974,12 @@ pub extern fn main() -> i32 {
|
||||
});
|
||||
|
||||
#[cfg(has_drtio_eem)]
|
||||
drtio_eem::init();
|
||||
{
|
||||
drtio_eem::init();
|
||||
unsafe {
|
||||
csr::eem_transceiver::rx_ready_write(1)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(has_drtio_routing)]
|
||||
let mut repeaters = [repeater::Repeater::default(); csr::DRTIOREP.len()];
|
||||
@ -829,6 +1031,7 @@ pub extern fn main() -> i32 {
|
||||
let mut dma_manager = DmaManager::new();
|
||||
let mut analyzer = Analyzer::new();
|
||||
let mut kernelmgr = KernelManager::new();
|
||||
let mut coremgr = CoreManager::new();
|
||||
|
||||
cricon_select(RtioMaster::Drtio);
|
||||
drtioaux::reset(0);
|
||||
@ -838,7 +1041,7 @@ pub extern fn main() -> i32 {
|
||||
while drtiosat_link_rx_up() {
|
||||
drtiosat_process_errors();
|
||||
process_aux_packets(&mut dma_manager, &mut analyzer,
|
||||
&mut kernelmgr, &mut repeaters, &mut routing_table,
|
||||
&mut kernelmgr, &mut coremgr, &mut repeaters, &mut routing_table,
|
||||
&mut rank, &mut router, &mut destination);
|
||||
for rep in repeaters.iter_mut() {
|
||||
rep.service(&routing_table, rank, destination, &mut router);
|
||||
|
149
artiq/firmware/satman/mgmt.rs
Normal file
149
artiq/firmware/satman/mgmt.rs
Normal file
@ -0,0 +1,149 @@
|
||||
use alloc::vec::Vec;
|
||||
use byteorder::{ByteOrder, NativeEndian};
|
||||
use crc::crc32;
|
||||
|
||||
use routing::{Sliceable, SliceMeta};
|
||||
use board_artiq::drtioaux;
|
||||
use board_misoc::{mem, config, spiflash};
|
||||
use log::LevelFilter;
|
||||
use logger_artiq::BufferLogger;
|
||||
use io::{Cursor, ProtoRead, ProtoWrite};
|
||||
use proto_artiq::drtioaux_proto::SAT_PAYLOAD_MAX_SIZE;
|
||||
|
||||
|
||||
pub fn clear_log() -> Result<(), ()> {
|
||||
BufferLogger::with(|logger| {
|
||||
let mut buffer = logger.buffer()?;
|
||||
Ok(buffer.clear())
|
||||
}).map_err(|()| error!("error on clearing log buffer"))
|
||||
}
|
||||
|
||||
pub fn byte_to_level_filter(level_byte: u8) -> Result<LevelFilter, ()> {
|
||||
Ok(match level_byte {
|
||||
0 => LevelFilter::Off,
|
||||
1 => LevelFilter::Error,
|
||||
2 => LevelFilter::Warn,
|
||||
3 => LevelFilter::Info,
|
||||
4 => LevelFilter::Debug,
|
||||
5 => LevelFilter::Trace,
|
||||
lv => {
|
||||
error!("unknown log level: {}", lv);
|
||||
return Err(());
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub struct Manager {
|
||||
config_payload: Cursor<Vec<u8>>,
|
||||
image_payload: Cursor<Vec<u8>>,
|
||||
last_value: Sliceable,
|
||||
last_log: Sliceable,
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
pub fn new() -> Manager {
|
||||
Manager {
|
||||
config_payload: Cursor::new(Vec::new()),
|
||||
image_payload: Cursor::new(Vec::new()),
|
||||
last_value: Sliceable::new(0, Vec::new()),
|
||||
last_log: Sliceable::new(0, Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fetch_config_value(&mut self, key: &str) -> Result<(), ()> {
|
||||
config::read(key, |result| result.map(
|
||||
|value| self.last_value = Sliceable::new(0, value.to_vec())
|
||||
)).map_err(|_err| warn!("read error: no such key"))
|
||||
}
|
||||
|
||||
pub fn log_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE], consume: bool) -> Result<SliceMeta, ()> {
|
||||
// Populate buffer if depleted
|
||||
if self.last_log.at_end() {
|
||||
BufferLogger::with(|logger| {
|
||||
let mut buffer = logger.buffer()?;
|
||||
self.last_log = Sliceable::new(0, buffer.extract().as_bytes().to_vec());
|
||||
if consume {
|
||||
buffer.clear();
|
||||
}
|
||||
Ok(())
|
||||
}).map_err(|()| error!("error on getting log buffer"))?;
|
||||
}
|
||||
|
||||
Ok(self.last_log.get_slice_satellite(data_slice))
|
||||
}
|
||||
|
||||
pub fn get_config_value_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
|
||||
self.last_value.get_slice_satellite(data_slice)
|
||||
}
|
||||
|
||||
pub fn add_config_data(&mut self, data: &[u8], data_len: usize) {
|
||||
self.config_payload.write_all(&data[..data_len]).unwrap();
|
||||
}
|
||||
|
||||
pub fn clear_config_data(&mut self) {
|
||||
self.config_payload.get_mut().clear();
|
||||
self.config_payload.set_position(0);
|
||||
}
|
||||
|
||||
pub fn write_config(&mut self) -> Result<(), drtioaux::Error<!>> {
|
||||
let key = match self.config_payload.read_string() {
|
||||
Ok(key) => key,
|
||||
Err(err) => {
|
||||
self.clear_config_data();
|
||||
error!("error on reading key: {:?}", err);
|
||||
return drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded: false });
|
||||
}
|
||||
};
|
||||
|
||||
let value = self.config_payload.read_bytes().unwrap();
|
||||
|
||||
let succeeded = config::write(&key, &value).map_err(|err| {
|
||||
error!("error on writing config: {:?}", err);
|
||||
}).is_ok();
|
||||
|
||||
self.clear_config_data();
|
||||
|
||||
drtioaux::send(0, &drtioaux::Packet::CoreMgmtReply { succeeded })
|
||||
}
|
||||
|
||||
pub fn allocate_image_buffer(&mut self, image_size: usize) {
|
||||
self.image_payload = Cursor::new(Vec::with_capacity(image_size));
|
||||
}
|
||||
|
||||
pub fn add_image_data(&mut self, data: &[u8], data_len: usize) {
|
||||
self.image_payload.write_all(&data[..data_len]).unwrap();
|
||||
}
|
||||
|
||||
pub fn flash_image(&self) {
|
||||
let image = &self.image_payload.get_ref()[..];
|
||||
|
||||
let (expected_crc, mut image) = {
|
||||
let (image, crc_slice) = image.split_at(image.len() - 4);
|
||||
(NativeEndian::read_u32(crc_slice), image)
|
||||
};
|
||||
|
||||
let actual_crc = crc32::checksum_ieee(image);
|
||||
|
||||
if actual_crc == expected_crc {
|
||||
let bin_origins = [
|
||||
("gateware" , 0 ),
|
||||
("bootloader", mem::ROM_BASE ),
|
||||
("firmware" , mem::FLASH_BOOT_ADDRESS),
|
||||
];
|
||||
|
||||
for (name, origin) in bin_origins {
|
||||
info!("flashing {} binary...", name);
|
||||
let size = NativeEndian::read_u32(&image[..4]) as usize;
|
||||
image = &image[4..];
|
||||
|
||||
let (bin, remaining) = image.split_at(size);
|
||||
image = remaining;
|
||||
|
||||
unsafe { spiflash::flash_binary(origin, bin) };
|
||||
}
|
||||
|
||||
} else {
|
||||
panic!("CRC failed, images have not been written to flash.\n(actual {:08x}, expected {:08x})", actual_crc, expected_crc);
|
||||
}
|
||||
}
|
||||
}
|
@ -4,6 +4,7 @@ use board_artiq::{drtioaux, drtio_routing};
|
||||
use board_misoc::csr;
|
||||
use core::cmp::min;
|
||||
use proto_artiq::drtioaux_proto::PayloadStatus;
|
||||
use SAT_PAYLOAD_MAX_SIZE;
|
||||
use MASTER_PAYLOAD_MAX_SIZE;
|
||||
|
||||
/* represents data that has to be sent with the aux protocol */
|
||||
@ -56,6 +57,7 @@ impl Sliceable {
|
||||
self.data.extend(data);
|
||||
}
|
||||
|
||||
get_slice_fn!(get_slice_satellite, SAT_PAYLOAD_MAX_SIZE);
|
||||
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
|
||||
}
|
||||
|
||||
|
@ -79,13 +79,16 @@ class Client:
|
||||
self.writer.write((" ".join(command) + "\n").encode())
|
||||
|
||||
async def read_line(self):
|
||||
return (await self.reader.readline()).decode("ascii")
|
||||
line = (await self.reader.readline()).decode("ascii")
|
||||
if not line and self.reader.at_eof():
|
||||
raise ConnectionError("connection was closed unexpectedly")
|
||||
return line
|
||||
|
||||
async def read_reply(self):
|
||||
return (await self.reader.readline()).decode("ascii").split()
|
||||
return (await self.read_line()).split()
|
||||
|
||||
async def read_json(self):
|
||||
return json.loads((await self.reader.readline()).decode("ascii"))
|
||||
return json.loads((await self.read_line()))
|
||||
|
||||
async def login(self, username, password):
|
||||
await self.send_command("LOGIN", username, password)
|
||||
|
@ -25,6 +25,9 @@ def get_argparser():
|
||||
help="Simulation - does not connect to device")
|
||||
parser.add_argument("core_addr", metavar="CORE_ADDR",
|
||||
help="hostname or IP address of the core device")
|
||||
parser.add_argument("-s", "--drtio-dest", default=0,
|
||||
metavar="DRTIO_DEST", type=int,
|
||||
help="specifies the DRTIO destination")
|
||||
return parser
|
||||
|
||||
|
||||
@ -39,7 +42,7 @@ async def get_logs_sim(host):
|
||||
log_with_name("firmware.simulation", logging.INFO, "hello " + host)
|
||||
|
||||
|
||||
async def get_logs(host):
|
||||
async def get_logs(host, drtio_dest):
|
||||
try:
|
||||
reader, writer = await async_open_connection(
|
||||
host,
|
||||
@ -49,6 +52,7 @@ async def get_logs(host):
|
||||
max_fails=3,
|
||||
)
|
||||
writer.write(b"ARTIQ management\n")
|
||||
writer.write(drtio_dest.to_bytes(1))
|
||||
endian = await reader.readexactly(1)
|
||||
if endian == b"e":
|
||||
endian = "<"
|
||||
@ -96,7 +100,7 @@ def main():
|
||||
signal_handler.setup()
|
||||
try:
|
||||
get_logs_task = asyncio.ensure_future(
|
||||
get_logs_sim(args.core_addr) if args.simulation else get_logs(args.core_addr),
|
||||
get_logs_sim(args.core_addr) if args.simulation else get_logs(args.core_addr, args.drtio_dest),
|
||||
loop=loop)
|
||||
try:
|
||||
server = Server({"corelog": PingTarget()}, None, True)
|
||||
|
@ -75,7 +75,7 @@ def get_argparser():
|
||||
parser_add.add_argument("file", metavar="FILE",
|
||||
help="file containing the experiment to run")
|
||||
parser_add.add_argument("arguments", metavar="ARGUMENTS", nargs="*",
|
||||
help="run arguments")
|
||||
help="run arguments, use format KEY=VALUE")
|
||||
|
||||
parser_delete = subparsers.add_parser("delete",
|
||||
help="delete an experiment "
|
||||
|
@ -1,7 +1,10 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import struct
|
||||
import tempfile
|
||||
import atexit
|
||||
|
||||
from sipyco import common_args
|
||||
|
||||
@ -9,6 +12,7 @@ from artiq import __version__ as artiq_version
|
||||
from artiq.master.databases import DeviceDB
|
||||
from artiq.coredevice.comm_kernel import CommKernel
|
||||
from artiq.coredevice.comm_mgmt import CommMgmt
|
||||
from artiq.frontend.flash_tools import bit2bin, fetch_bin
|
||||
|
||||
|
||||
def get_argparser():
|
||||
@ -85,6 +89,20 @@ def get_argparser():
|
||||
t_boot = tools.add_parser("reboot",
|
||||
help="reboot the running system")
|
||||
|
||||
# flashing
|
||||
t_flash = tools.add_parser("flash",
|
||||
help="flash the running system")
|
||||
|
||||
p_directory = t_flash.add_argument("directory",
|
||||
metavar="DIRECTORY", type=str,
|
||||
help="directory that contains the "
|
||||
"binaries")
|
||||
|
||||
p_srcbuild = t_flash.add_argument("--srcbuild",
|
||||
help="board binaries directory is laid "
|
||||
"out as a source build tree",
|
||||
default=False, action="store_true")
|
||||
|
||||
# misc debug
|
||||
t_debug = tools.add_parser("debug",
|
||||
help="specialized debug functions")
|
||||
@ -95,6 +113,12 @@ def get_argparser():
|
||||
p_allocator = subparsers.add_parser("allocator",
|
||||
help="show heap layout")
|
||||
|
||||
# manage target
|
||||
p_drtio_dest = parser.add_argument("-s", "--drtio-dest", default=0,
|
||||
metavar="DRTIO_DEST", type=int,
|
||||
help="specify DRTIO destination that "
|
||||
"receives this command")
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
@ -107,7 +131,7 @@ def main():
|
||||
core_addr = ddb.get("core", resolve_alias=True)["arguments"]["host"]
|
||||
else:
|
||||
core_addr = args.device
|
||||
mgmt = CommMgmt(core_addr)
|
||||
mgmt = CommMgmt(core_addr, drtio_dest=args.drtio_dest)
|
||||
|
||||
if args.tool == "log":
|
||||
if args.action == "set_level":
|
||||
@ -138,6 +162,39 @@ def main():
|
||||
if args.action == "erase":
|
||||
mgmt.config_erase()
|
||||
|
||||
if args.tool == "flash":
|
||||
retrieved_bins = []
|
||||
bin_dict = {
|
||||
"zynq":[
|
||||
["boot"]
|
||||
],
|
||||
"riscv": [
|
||||
["gateware"],
|
||||
["bootloader"],
|
||||
["runtime", "satman"],
|
||||
],
|
||||
}
|
||||
|
||||
for bin_list in bin_dict.values():
|
||||
try:
|
||||
bins = []
|
||||
for bin_name in bin_list:
|
||||
bins.append(fetch_bin(
|
||||
args.directory, bin_name, args.srcbuild))
|
||||
retrieved_bins.append(bins)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
if retrieved_bins is None:
|
||||
raise FileNotFoundError("neither risc-v nor zynq binaries were found")
|
||||
|
||||
if len(retrieved_bins) > 1:
|
||||
raise ValueError("both risc-v and zynq binaries were found, "
|
||||
"please clean up your build directory. ")
|
||||
|
||||
bins = retrieved_bins[0]
|
||||
mgmt.flash(bins)
|
||||
|
||||
if args.tool == "reboot":
|
||||
mgmt.reboot()
|
||||
|
||||
|
@ -14,7 +14,7 @@ from sipyco import common_args
|
||||
|
||||
from artiq import __version__ as artiq_version
|
||||
from artiq.remoting import SSHClient, LocalClient
|
||||
from artiq.frontend.bit2bin import bit2bin
|
||||
from artiq.frontend.flash_tools import artifact_path, bit2bin, fetch_bin
|
||||
|
||||
|
||||
def get_argparser():
|
||||
@ -302,46 +302,19 @@ def main():
|
||||
|
||||
programmer = config["programmer"](client, preinit_script=args.preinit_command)
|
||||
|
||||
def artifact_path(this_binary_dir, *path_filename):
|
||||
if args.srcbuild:
|
||||
# source tree - use path elements to locate file
|
||||
return os.path.join(this_binary_dir, *path_filename)
|
||||
else:
|
||||
# flat tree - all files in the same directory, discard path elements
|
||||
*_, filename = path_filename
|
||||
return os.path.join(this_binary_dir, filename)
|
||||
|
||||
def convert_gateware(bit_filename):
|
||||
bin_handle, bin_filename = tempfile.mkstemp(
|
||||
prefix="artiq_", suffix="_" + os.path.basename(bit_filename))
|
||||
with open(bit_filename, "rb") as bit_file, open(bin_handle, "wb") as bin_file:
|
||||
bit2bin(bit_file, bin_file)
|
||||
atexit.register(lambda: os.unlink(bin_filename))
|
||||
return bin_filename
|
||||
|
||||
for action in args.action:
|
||||
if action == "gateware":
|
||||
gateware_bin = convert_gateware(
|
||||
artifact_path(binary_dir, "gateware", "top.bit"))
|
||||
gateware_bin = fetch_bin(binary_dir, ["gateware"], args.srcbuild)
|
||||
programmer.write_binary(*config["gateware"], gateware_bin)
|
||||
elif action == "bootloader":
|
||||
bootloader_bin = artifact_path(binary_dir, "software", "bootloader", "bootloader.bin")
|
||||
bootloader_bin = fetch_bin(binary_dir, ["bootloader"], args.srcbuild)
|
||||
programmer.write_binary(*config["bootloader"], bootloader_bin)
|
||||
elif action == "storage":
|
||||
storage_img = args.storage
|
||||
programmer.write_binary(*config["storage"], storage_img)
|
||||
elif action == "firmware":
|
||||
firmware_fbis = []
|
||||
for firmware in "satman", "runtime":
|
||||
filename = artifact_path(binary_dir, "software", firmware, firmware + ".fbi")
|
||||
if os.path.exists(filename):
|
||||
firmware_fbis.append(filename)
|
||||
if not firmware_fbis:
|
||||
raise FileNotFoundError("no firmware found")
|
||||
if len(firmware_fbis) > 1:
|
||||
raise ValueError("more than one firmware file, please clean up your build directory. "
|
||||
"Found firmware files: {}".format(" ".join(firmware_fbis)))
|
||||
programmer.write_binary(*config["firmware"], firmware_fbis[0])
|
||||
firmware_fbi = fetch_bin(binary_dir, ["satman", "runtime"], args.srcbuild)
|
||||
programmer.write_binary(*config["firmware"], firmware_fbi)
|
||||
elif action == "load":
|
||||
gateware_bit = artifact_path(binary_dir, "gateware", "top.bit")
|
||||
programmer.load(gateware_bit, 0)
|
||||
|
@ -116,7 +116,7 @@ def get_argparser(with_file=True):
|
||||
parser.add_argument("file", metavar="FILE",
|
||||
help="file containing the experiment to run")
|
||||
parser.add_argument("arguments", metavar="ARGUMENTS", nargs="*",
|
||||
help="run arguments")
|
||||
help="run arguments, use format KEY=VALUE")
|
||||
|
||||
return parser
|
||||
|
||||
|
@ -1,69 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright 2014-2017 Robert Jordens <jordens@gmail.com>
|
||||
# after
|
||||
# https://github.com/mfischer/fpgadev-zynq/blob/master/top/python/bit_to_zynq_bin.py
|
||||
|
||||
import struct
|
||||
|
||||
|
||||
def flip32(data):
|
||||
sl = struct.Struct("<I")
|
||||
sb = struct.Struct(">I")
|
||||
b = memoryview(data)
|
||||
d = bytearray(len(data))
|
||||
for offset in range(0, len(data), sl.size):
|
||||
sb.pack_into(d, offset, *sl.unpack_from(b, offset))
|
||||
return d
|
||||
|
||||
|
||||
def bit2bin(bit, bin, flip=False):
|
||||
l, = struct.unpack(">H", bit.read(2))
|
||||
if l != 9:
|
||||
raise ValueError("Missing <0009> header, not a bit file")
|
||||
_ = bit.read(l) # unknown data
|
||||
l, = struct.unpack(">H", bit.read(2))
|
||||
if l != 1:
|
||||
raise ValueError("Missing <0001> header, not a bit file")
|
||||
|
||||
while True:
|
||||
key = bit.read(1).decode()
|
||||
if not key:
|
||||
break
|
||||
if key in "abcd":
|
||||
d = bit.read(*struct.unpack(">H", bit.read(2)))
|
||||
assert d.endswith(b"\x00")
|
||||
d = d[:-1].decode()
|
||||
name = {
|
||||
"a": "Design",
|
||||
"b": "Part name",
|
||||
"c": "Date",
|
||||
"d": "Time"
|
||||
}[key]
|
||||
print("{}: {}".format(name, d))
|
||||
elif key == "e":
|
||||
l, = struct.unpack(">I", bit.read(4))
|
||||
print("Bitstream payload length: {:#x}".format(l))
|
||||
d = bit.read(l)
|
||||
if flip:
|
||||
d = flip32(d)
|
||||
bin.write(d)
|
||||
else:
|
||||
d = bit.read(*struct.unpack(">H", bit.read(2)))
|
||||
print("Unexpected key: {}: {}".format(key, d))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Convert FPGA bit files to raw bin format "
|
||||
"suitable for flashing")
|
||||
parser.add_argument("-f", "--flip", dest="flip", action="store_true",
|
||||
default=False, help="Flip 32-bit endianess (needed for Zynq)")
|
||||
parser.add_argument("bitfile", metavar="BITFILE",
|
||||
help="Input bit file name")
|
||||
parser.add_argument("binfile", metavar="BINFILE",
|
||||
help="Output bin file name")
|
||||
args = parser.parse_args()
|
||||
|
||||
with open(args.bitfile, "rb") as f, open(args.binfile, "wb") as g:
|
||||
bit2bin(f, g, args.flip)
|
112
artiq/frontend/flash_tools.py
Normal file
112
artiq/frontend/flash_tools.py
Normal file
@ -0,0 +1,112 @@
|
||||
import atexit
|
||||
import os
|
||||
import tempfile
|
||||
import struct
|
||||
|
||||
|
||||
def artifact_path(this_binary_dir, *path_filename, srcbuild=False):
|
||||
if srcbuild:
|
||||
# source tree - use path elements to locate file
|
||||
return os.path.join(this_binary_dir, *path_filename)
|
||||
else:
|
||||
# flat tree - all files in the same directory, discard path elements
|
||||
*_, filename = path_filename
|
||||
return os.path.join(this_binary_dir, filename)
|
||||
|
||||
|
||||
def fetch_bin(binary_dir, components, srcbuild=False):
|
||||
def convert_gateware(bit_filename):
|
||||
bin_handle, bin_filename = tempfile.mkstemp(
|
||||
prefix="artiq_", suffix="_" + os.path.basename(bit_filename))
|
||||
with open(bit_filename, "rb") as bit_file, open(bin_handle, "wb") as bin_file:
|
||||
bit2bin(bit_file, bin_file)
|
||||
atexit.register(lambda: os.unlink(bin_filename))
|
||||
return bin_filename
|
||||
|
||||
if len(components) > 1:
|
||||
bins = []
|
||||
for option in components:
|
||||
try:
|
||||
bins.append(fetch_bin(binary_dir, [option], srcbuild))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
if bins is None:
|
||||
raise FileNotFoundError("multiple components not found: {}".format(
|
||||
" ".join(components)))
|
||||
|
||||
if len(bins) > 1:
|
||||
raise ValueError("more than one file, "
|
||||
"please clean up your build directory. "
|
||||
"Found files: {}".format(
|
||||
" ".join(bins)))
|
||||
|
||||
return bins[0]
|
||||
|
||||
else:
|
||||
component = components[0]
|
||||
path = artifact_path(binary_dir, *{
|
||||
"gateware": ["gateware", "top.bit"],
|
||||
"boot": ["boot.bin"],
|
||||
"bootloader": ["software", "bootloader", "bootloader.bin"],
|
||||
"runtime": ["software", "runtime", "runtime.fbi"],
|
||||
"satman": ["software", "satman", "satman.fbi"],
|
||||
}[component], srcbuild=srcbuild)
|
||||
|
||||
if not os.path.exists(path):
|
||||
raise FileNotFoundError("{} not found".format(component))
|
||||
|
||||
if component == "gateware":
|
||||
path = convert_gateware(path)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
# Copyright 2014-2017 Robert Jordens <jordens@gmail.com>
|
||||
# after
|
||||
# https://github.com/mfischer/fpgadev-zynq/blob/master/top/python/bit_to_zynq_bin.py
|
||||
|
||||
def flip32(data):
|
||||
sl = struct.Struct("<I")
|
||||
sb = struct.Struct(">I")
|
||||
b = memoryview(data)
|
||||
d = bytearray(len(data))
|
||||
for offset in range(0, len(data), sl.size):
|
||||
sb.pack_into(d, offset, *sl.unpack_from(b, offset))
|
||||
return d
|
||||
|
||||
|
||||
def bit2bin(bit, bin, flip=False):
|
||||
l, = struct.unpack(">H", bit.read(2))
|
||||
if l != 9:
|
||||
raise ValueError("Missing <0009> header, not a bit file")
|
||||
_ = bit.read(l) # unknown data
|
||||
l, = struct.unpack(">H", bit.read(2))
|
||||
if l != 1:
|
||||
raise ValueError("Missing <0001> header, not a bit file")
|
||||
|
||||
while True:
|
||||
key = bit.read(1).decode()
|
||||
if not key:
|
||||
break
|
||||
if key in "abcd":
|
||||
d = bit.read(*struct.unpack(">H", bit.read(2)))
|
||||
assert d.endswith(b"\x00")
|
||||
d = d[:-1].decode()
|
||||
name = {
|
||||
"a": "Design",
|
||||
"b": "Part name",
|
||||
"c": "Date",
|
||||
"d": "Time"
|
||||
}[key]
|
||||
print("{}: {}".format(name, d))
|
||||
elif key == "e":
|
||||
l, = struct.unpack(">I", bit.read(4))
|
||||
print("Bitstream payload length: {:#x}".format(l))
|
||||
d = bit.read(l)
|
||||
if flip:
|
||||
d = flip32(d)
|
||||
bin.write(d)
|
||||
else:
|
||||
d = bit.read(*struct.unpack(">H", bit.read(2)))
|
||||
print("Unexpected key: {}: {}".format(key, d))
|
@ -398,7 +398,7 @@ class _CompleterDelegate(QtWidgets.QStyledItemDelegate):
|
||||
# case, but causes unnecessary flickering and trashing of the user
|
||||
# selection when datasets are modified due to Qt's naive handler.
|
||||
# Doing this is of course convoluted due to Qt's arrogance
|
||||
# about private fields and not letting users knows what
|
||||
# about private fields and not letting users know what
|
||||
# slots are connected to signals, but thanks to the complicated
|
||||
# model system there is a short dirty hack in this particular case.
|
||||
nodatachanged_model = QtCore.QIdentityProxyModel()
|
||||
|
@ -151,15 +151,8 @@
|
||||
inkscape:connector-curvature="0" /></g><path
|
||||
d="M 28.084,368.98 0,429.872 v 1.124 h 14.16 l 4.202,-8.945 H 43.57 l 4.195,8.945 h 14.16 v -1.124 L 33.753,368.98 Z m -5.438,41.259 8.215,-19.134 8.424,19.134 z"
|
||||
id="path493"
|
||||
style="fill:#ffffff;fill-opacity:1"
|
||||
inkscape:connector-curvature="0" /><g
|
||||
id="text3371"
|
||||
style="font-style:normal;font-variant:normal;font-weight:600;font-stretch:expanded;font-size:45px;line-height:125%;font-family:'Novecento sans wide';-inkscape-font-specification:'Novecento sans wide, Semi-Bold Expanded';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"><g
|
||||
transform="translate(-1.7346398,0.84745763)"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:96px;line-height:25px;font-family:'Droid Sans Thai';-inkscape-font-specification:'Droid Sans Thai, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none"
|
||||
id="text861"
|
||||
aria-label="7"><path
|
||||
style="font-size:53.3333px;line-height:13.8889px;font-family:Intro;-inkscape-font-specification:Intro;fill:#ffffff"
|
||||
d="m 325.20597,358.80118 c 5.38667,-5.86667 2.18667,-16.74666 -9.43999,-16.74666 -11.62666,0 -14.87999,10.77333 -9.44,16.69332 -8.90666,6.4 -5.75999,21.91999 9.44,21.91999 15.09332,0 18.23999,-15.41332 9.43999,-21.86665 z m -9.43999,13.81332 c -6.4,0 -6.4,-9.27999 0,-9.27999 6.45333,0 6.45333,9.27999 0,9.27999 z m 0,-17.06665 c -4.64,0 -4.64,-5.97333 0,-5.97333 4.58666,0 4.58666,5.97333 0,5.97333 z"
|
||||
id="text248"
|
||||
aria-label="8" /></g> </g></svg>
|
||||
style="fill:#ffffff;fill-opacity:1" /><path
|
||||
d="m 313.73062,383.49498 10.87999,-18.07999 c 1.49333,-2.18667 1.97333,-4.8 1.97333,-7.36 0,-8.79999 -6.18666,-12.79999 -14.34666,-12.79999 -8.21332,0 -14.18665,4 -14.18665,12.79999 0,7.09333 5.11999,12.53333 13.01332,11.94666 l -7.94666,13.49333 z m -1.49334,-30.07999 c 2.93334,0 4.69333,2.02667 4.69333,4.64 0,2.88 -1.59999,4.69333 -4.69333,4.69333 -2.82666,0 -4.69333,-1.97333 -4.69333,-4.69333 0,-2.61333 1.86667,-4.64 4.69333,-4.64 z"
|
||||
id="text1"
|
||||
style="font-size:53.3333px;font-family:Intro;-inkscape-font-specification:Intro;fill:#ffffff"
|
||||
aria-label="9" /></svg>
|
||||
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 14 KiB |
@ -84,20 +84,20 @@ class DictSyncModel(QtCore.QAbstractTableModel):
|
||||
key=lambda k: self.sort_key(k, self.backing_store[k]))
|
||||
QtCore.QAbstractTableModel.__init__(self)
|
||||
|
||||
def rowCount(self, parent):
|
||||
def rowCount(self, parent=QtCore.QModelIndex()):
|
||||
return len(self.backing_store)
|
||||
|
||||
def columnCount(self, parent):
|
||||
def columnCount(self, parent=QtCore.QModelIndex()):
|
||||
return len(self.headers)
|
||||
|
||||
def data(self, index, role):
|
||||
def data(self, index, role=QtCore.Qt.ItemDataRole.DisplayRole):
|
||||
if not index.isValid() or role != QtCore.Qt.ItemDataRole.DisplayRole:
|
||||
return None
|
||||
else:
|
||||
k = self.row_to_key[index.row()]
|
||||
return self.convert(k, self.backing_store[k], index.column())
|
||||
|
||||
def headerData(self, col, orientation, role):
|
||||
def headerData(self, col, orientation, role=QtCore.Qt.ItemDataRole.DisplayRole):
|
||||
if (orientation == QtCore.Qt.Orientation.Horizontal and
|
||||
role == QtCore.Qt.ItemDataRole.DisplayRole):
|
||||
return self.headers[col]
|
||||
|
@ -1,7 +1,7 @@
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from PyQt6 import QtCore, QtWidgets
|
||||
from PyQt6 import QtCore, QtGui, QtWidgets
|
||||
|
||||
|
||||
class DoubleClickLineEdit(QtWidgets.QLineEdit):
|
||||
@ -88,6 +88,51 @@ class LayoutWidget(QtWidgets.QWidget):
|
||||
self.layout.addWidget(item, row, col, rowspan, colspan)
|
||||
|
||||
|
||||
class SelectableColumnTableView(QtWidgets.QTableView):
|
||||
"""A QTableView packaged up with a header row context menu that allows users to
|
||||
show/hide columns using checkable entries.
|
||||
|
||||
By default, all columns are shown. If only one shown column remains, the entry is
|
||||
disabled to prevent a situation where no columns are shown, which might be confusing
|
||||
to the user.
|
||||
|
||||
Qt considers whether columns are shown to be part of the header state, i.e. it is
|
||||
included in saveState()/restoreState().
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self.horizontalHeader().setContextMenuPolicy(
|
||||
QtCore.Qt.ContextMenuPolicy.CustomContextMenu)
|
||||
self.horizontalHeader().customContextMenuRequested.connect(
|
||||
self.show_header_context_menu)
|
||||
|
||||
def show_header_context_menu(self, pos):
|
||||
menu = QtWidgets.QMenu(self)
|
||||
|
||||
num_columns_total = self.model().columnCount()
|
||||
num_columns_shown = sum(
|
||||
(not self.isColumnHidden(i)) for i in range(num_columns_total))
|
||||
for i in range(num_columns_total):
|
||||
name = self.model().headerData(i, QtCore.Qt.Orientation.Horizontal)
|
||||
action = QtGui.QAction(name, self)
|
||||
action.setCheckable(True)
|
||||
|
||||
is_currently_hidden = self.isColumnHidden(i)
|
||||
action.setChecked(not is_currently_hidden)
|
||||
if not is_currently_hidden:
|
||||
if num_columns_shown == 1:
|
||||
# Don't allow hiding of the last visible column.
|
||||
action.setEnabled(False)
|
||||
|
||||
action.triggered.connect(
|
||||
lambda checked, i=i: self.setColumnHidden(i, not checked))
|
||||
menu.addAction(action)
|
||||
|
||||
menu.exec(self.horizontalHeader().mapToGlobal(pos))
|
||||
|
||||
|
||||
async def get_open_file_name(parent, caption, dir, filter):
|
||||
"""like QtWidgets.QFileDialog.getOpenFileName(), but a coroutine"""
|
||||
dialog = QtWidgets.QFileDialog(parent, caption, dir, filter)
|
||||
|
321
artiq/test/coredevice/test_ad9834.py
Normal file
321
artiq/test/coredevice/test_ad9834.py
Normal file
@ -0,0 +1,321 @@
|
||||
from artiq.coredevice.ad9834 import (
|
||||
AD9834_B28,
|
||||
AD9834_DIV2,
|
||||
AD9834_FSEL,
|
||||
AD9834_HLB,
|
||||
AD9834_MODE,
|
||||
AD9834_OPBITEN,
|
||||
AD9834_PIN_SW,
|
||||
AD9834_PSEL,
|
||||
AD9834_RESET,
|
||||
AD9834_SIGN_PIB,
|
||||
AD9834_SLEEP1,
|
||||
AD9834_SLEEP12,
|
||||
FREQ_REGS,
|
||||
PHASE_REGS,
|
||||
)
|
||||
from artiq.experiment import *
|
||||
from artiq.language.units import MHz
|
||||
from artiq.test.hardware_testbench import ExperimentCase
|
||||
|
||||
|
||||
class AD9834Exp(EnvExperiment):
|
||||
def build(self, runner):
|
||||
self.setattr_device("core")
|
||||
self.dev = self.get_device("dds0")
|
||||
self.runner = runner
|
||||
|
||||
def run(self):
|
||||
getattr(self, self.runner)()
|
||||
|
||||
@kernel
|
||||
def instantiate(self):
|
||||
pass
|
||||
|
||||
@kernel
|
||||
def init(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.init()
|
||||
self.set_dataset("spi_freq", self.dev.spi_freq)
|
||||
self.set_dataset("clk_freq", self.dev.clk_freq)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def set_frequency_reg_fail1(self):
|
||||
self.core.break_realtime()
|
||||
frequency = 10 * MHz
|
||||
self.dev.set_frequency_reg(19, frequency)
|
||||
|
||||
@kernel
|
||||
def set_frequency_reg_fail2(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.set_frequency_reg(FREQ_REGS[0], 37.6 * MHz)
|
||||
|
||||
@kernel
|
||||
def set_frequency_reg(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.init()
|
||||
self.dev.set_frequency_reg(FREQ_REGS[1], 19 * MHz)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def set_frequency_reg_msb(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.init()
|
||||
self.dev.ctrl_reg |= AD9834_B28
|
||||
self.dev.set_frequency_reg_msb(FREQ_REGS[0], 0x1111)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def set_frequency_reg_lsb(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.init()
|
||||
self.dev.ctrl_reg |= AD9834_B28 | AD9834_HLB
|
||||
self.dev.set_frequency_reg_lsb(FREQ_REGS[1], 0xFFFF)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def select_frequency_reg_0(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_FSEL | AD9834_PIN_SW
|
||||
self.dev.select_frequency_reg(FREQ_REGS[0])
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def select_frequency_reg_1(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_PIN_SW
|
||||
self.dev.select_frequency_reg(FREQ_REGS[1])
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def set_phase_reg_fail(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.set_phase_reg(19, 0x123)
|
||||
|
||||
@kernel
|
||||
def set_phase_reg(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.init()
|
||||
self.dev.set_phase_reg(PHASE_REGS[0], 0x123)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def select_phase_reg_0(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_PSEL | AD9834_PIN_SW
|
||||
self.dev.select_phase_reg(PHASE_REGS[0])
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def select_phase_reg_1(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_PIN_SW
|
||||
self.dev.select_phase_reg(PHASE_REGS[1])
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def enable_reset(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.enable_reset()
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def output_enable(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_RESET
|
||||
self.dev.output_enable()
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def sleep_dac_powerdown(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.sleep(dac_pd=True)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def sleep_internal_clk_disable(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.sleep(clk_dis=True)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def sleep(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.sleep(dac_pd=True, clk_dis=True)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def awake(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_SLEEP1 | AD9834_SLEEP12
|
||||
self.dev.sleep()
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def sign_bit_high_z(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_OPBITEN
|
||||
self.dev.config_sign_bit_out()
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def sign_bit_msb_2(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_MODE | AD9834_SIGN_PIB | AD9834_DIV2
|
||||
self.dev.config_sign_bit_out(msb_2=True)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def sign_bit_msb(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_MODE | AD9834_SIGN_PIB
|
||||
self.dev.config_sign_bit_out(msb=True)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def sign_bit_comp_out(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_MODE
|
||||
self.dev.config_sign_bit_out(comp_out=True)
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def enable_triangular_waveform(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_OPBITEN
|
||||
self.dev.enable_triangular_waveform()
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
@kernel
|
||||
def disable_triangular_waveform(self):
|
||||
self.core.break_realtime()
|
||||
self.dev.ctrl_reg |= AD9834_MODE
|
||||
self.dev.disable_triangular_waveform()
|
||||
self.set_dataset("ctrl_reg", self.dev.ctrl_reg)
|
||||
|
||||
|
||||
class AD9834Test(ExperimentCase):
|
||||
def test_instantiate(self):
|
||||
self.execute(AD9834Exp, "instantiate")
|
||||
|
||||
def test_init(self):
|
||||
self.execute(AD9834Exp, "init")
|
||||
spi_freq = self.dataset_mgr.get("spi_freq")
|
||||
clk_freq = self.dataset_mgr.get("clk_freq")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(spi_freq, 10 * MHz)
|
||||
self.assertEqual(clk_freq, 75 * MHz)
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_RESET)
|
||||
|
||||
def test_set_frequency_reg_fail(self):
|
||||
with self.assertRaises(ValueError):
|
||||
self.execute(AD9834Exp, "set_frequency_reg_fail1")
|
||||
with self.assertRaises(AssertionError):
|
||||
self.execute(AD9834Exp, "set_frequency_reg_fail2")
|
||||
|
||||
def test_set_frequency_reg(self):
|
||||
self.execute(AD9834Exp, "set_frequency_reg")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_RESET | AD9834_B28)
|
||||
|
||||
def test_set_frequency_reg_msb(self):
|
||||
self.execute(AD9834Exp, "set_frequency_reg_msb")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_RESET | AD9834_HLB)
|
||||
|
||||
def test_set_frequency_reg_lsb(self):
|
||||
self.execute(AD9834Exp, "set_frequency_reg_lsb")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_RESET)
|
||||
|
||||
def test_select_frequency_reg_0(self):
|
||||
self.execute(AD9834Exp, "select_frequency_reg_0")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000)
|
||||
|
||||
def test_select_frequency_reg_1(self):
|
||||
self.execute(AD9834Exp, "select_frequency_reg_1")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_FSEL)
|
||||
|
||||
def test_set_phase_reg_fail(self):
|
||||
with self.assertRaises(ValueError):
|
||||
self.execute(AD9834Exp, "set_phase_reg_fail")
|
||||
|
||||
def test_set_phase_reg(self):
|
||||
self.execute(AD9834Exp, "set_phase_reg")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_RESET)
|
||||
|
||||
def test_select_phase_reg_0(self):
|
||||
self.execute(AD9834Exp, "select_phase_reg_0")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000)
|
||||
|
||||
def test_select_phase_reg_1(self):
|
||||
self.execute(AD9834Exp, "select_phase_reg_1")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_PSEL)
|
||||
|
||||
def test_enable_reset(self):
|
||||
self.execute(AD9834Exp, "enable_reset")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_RESET)
|
||||
|
||||
def test_output_enable(self):
|
||||
self.execute(AD9834Exp, "output_enable")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000)
|
||||
|
||||
def test_sleep_dac_powerdown(self):
|
||||
self.execute(AD9834Exp, "sleep_dac_powerdown")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_SLEEP12)
|
||||
|
||||
def test_sleep_internal_clk_disable(self):
|
||||
self.execute(AD9834Exp, "sleep_internal_clk_disable")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_SLEEP1)
|
||||
|
||||
def test_sleep(self):
|
||||
self.execute(AD9834Exp, "sleep")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_SLEEP1 | AD9834_SLEEP12)
|
||||
|
||||
def test_awake(self):
|
||||
self.execute(AD9834Exp, "awake")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000)
|
||||
|
||||
def test_sign_bit_high_z(self):
|
||||
self.execute(AD9834Exp, "sign_bit_high_z")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000)
|
||||
|
||||
def test_sign_bit_msb_2(self):
|
||||
self.execute(AD9834Exp, "sign_bit_msb_2")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_OPBITEN)
|
||||
|
||||
def test_sign_bit_msb(self):
|
||||
self.execute(AD9834Exp, "sign_bit_msb")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_OPBITEN | AD9834_DIV2)
|
||||
|
||||
def test_sign_bit_comp_out(self):
|
||||
self.execute(AD9834Exp, "sign_bit_comp_out")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(
|
||||
ctrl_reg, 0x0000 | AD9834_OPBITEN | AD9834_SIGN_PIB | AD9834_DIV2
|
||||
)
|
||||
|
||||
def test_enble_triangular_waveform(self):
|
||||
self.execute(AD9834Exp, "enable_triangular_waveform")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000 | AD9834_MODE)
|
||||
|
||||
def test_disble_triangular_waveform(self):
|
||||
self.execute(AD9834Exp, "disable_triangular_waveform")
|
||||
ctrl_reg = self.dataset_mgr.get("ctrl_reg")
|
||||
self.assertEqual(ctrl_reg, 0x0000)
|
19
artiq/test/lit/exceptions/finally_catch_try.py
Normal file
19
artiq/test/lit/exceptions/finally_catch_try.py
Normal file
@ -0,0 +1,19 @@
|
||||
# RUN: %python -m artiq.compiler.testbench.jit %s >%t
|
||||
# RUN: OutputCheck %s --file-to-check=%t
|
||||
# REQUIRES: exceptions
|
||||
|
||||
def doit():
|
||||
try:
|
||||
try:
|
||||
raise RuntimeError("Error")
|
||||
except ValueError:
|
||||
print("ValueError")
|
||||
except RuntimeError:
|
||||
print("Caught")
|
||||
finally:
|
||||
print("Cleanup")
|
||||
|
||||
doit()
|
||||
|
||||
# CHECK-L: Caught
|
||||
# CHECK-NEXT-L: Cleanup
|
20
artiq/test/lit/exceptions/finally_try.py
Normal file
20
artiq/test/lit/exceptions/finally_try.py
Normal file
@ -0,0 +1,20 @@
|
||||
# RUN: %python -m artiq.compiler.testbench.jit %s >%t
|
||||
# RUN: OutputCheck %s --file-to-check=%t
|
||||
# REQUIRES: exceptions
|
||||
|
||||
def doit():
|
||||
try:
|
||||
try:
|
||||
raise RuntimeError("Error")
|
||||
except ValueError:
|
||||
print("ValueError")
|
||||
finally:
|
||||
print("Cleanup")
|
||||
|
||||
try:
|
||||
doit()
|
||||
except RuntimeError:
|
||||
print("Caught")
|
||||
|
||||
# CHECK-L: Cleanup
|
||||
# CHECK-NEXT-L: Caught
|
@ -61,18 +61,6 @@
|
||||
id="path319"
|
||||
d="m 69.975,234.695 c -3.226,0.31 -6.272,0.602 -9.352,0.754 -1.868,0.093 -3.594,0.139 -5.278,0.139 -7.127,0 -13.339,-0.867 -18.903,-2.646 -0.794,-0.254 -1.576,-0.526 -2.345,-0.817 -11.329,-4.29 -16.078,-12.875 -13.733,-24.827 2.135,-10.872 7.632,-19.988 13.253,-28.221 1.117,-1.634 2.315,-3.259 3.474,-4.83 0.454,-0.616 0.909,-1.233 1.364,-1.857 L 28.098,162.386 c -7.526,9.307 -16.644,21.933 -20.824,37.338 -3.192,11.767 -2.23,22.453 2.783,30.906 5.009,8.446 13.909,14.409 25.738,17.245 6.106,1.465 12.57,2.177 19.76,2.177 3.754,-0.001 7.687,-0.192 12.023,-0.588 2.495,-0.227 4.928,-0.557 7.504,-0.906 0.973,-0.132 1.95,-0.265 2.934,-0.392 l -3.897,-13.857 c -1.413,0.124 -2.791,0.256 -4.144,0.386 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path321"
|
||||
d="m 55.345,235.588 c 1.684,0 3.41,-0.046 5.278,-0.139 -1.868,0.093 -3.594,0.139 -5.278,0.139 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path323"
|
||||
d="m 36.442,232.942 c 5.564,1.778 11.776,2.646 18.903,2.646 -7.127,0 -13.339,-0.867 -18.903,-2.646 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path325"
|
||||
d="m 55.555,250.052 c -7.19,0 -13.654,-0.712 -19.76,-2.177 6.106,1.465 12.57,2.177 19.76,2.177 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path327"
|
||||
d="m 67.578,249.464 c -4.336,0.396 -8.269,0.587 -12.023,0.588 3.754,0 7.686,-0.193 12.023,-0.588 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path329"
|
||||
d="m 180.738,111.633 c -0.625,-4.189 -1.226,-8.218 -1.868,-12.238 -0.325,-2.036 -5.861,-6.224 -8.227,-6.224 -0.157,0 -0.292,0.02 -0.402,0.058 -4.172,1.46 -8.243,3.096 -12.552,4.827 -1.419,0.57 -2.854,1.146 -4.316,1.727 l 28,16.088 -0.635,-4.238 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
@ -82,9 +70,6 @@
|
||||
id="path333"
|
||||
d="m 116.53,81.312 0.102,-0.053 c 3.387,-1.754 6.785,-3.483 10.385,-5.316 l 4.048,-2.062 -17.233,-6.35 -3.985,13.866 c 1.804,0.81 2.685,1.17 3.452,1.17 0.585,0 1.174,-0.223 2.061,-0.658 0.341,-0.168 0.722,-0.364 1.17,-0.597 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path335"
|
||||
d="m 115.359,81.909 c 0.341,-0.168 0.723,-0.364 1.172,-0.597 l 0.101,-0.053 -0.102,0.053 c -0.448,0.233 -0.829,0.429 -1.171,0.597 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path337"
|
||||
d="M 231.324,171.353 221.01,161.63 c -0.9,2.513 -2.059,14.3 -1.457,19.737 l 11.771,-10.014 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
@ -103,11 +88,11 @@
|
||||
d="m -688.687,-1452.053 -28.083,60.892 0,1.124 14.16,0 4.202,-8.945 25.208,0 4.195,8.945 14.16,0 0,-1.124 -28.173,-60.892 -5.669,0 z m -5.437,41.26 8.215,-19.134 8.424,19.134 -16.639,0 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path349"
|
||||
d="m -601.487,-1412.259 c 7.498,-5.083 10.755,-15.119 7.922,-24.407 -2.772,-9.089 -10.521,-14.536 -20.727,-14.573 l -26.715,0 0,61.202 14.157,0 0,-18.602 9.978,0 10.836,18.602 16.479,0 0,-1.06 -12.726,-20.623 0.796,-0.539 z m -12.802,-8.73 -12.562,0 0,-17.809 1.003,0 c 1.255,0 2.529,-0.01 3.811,-0.02 2.599,-0.021 5.226,-0.041 7.771,0.02 5.329,0.052 7.74,4.621 7.719,8.845 -0.02,4.454 -2.687,8.964 -7.742,8.964 z" /><polygon
|
||||
d="m -601.487,-1412.259 c 7.498,-5.083 10.755,-15.119 7.922,-24.407 -2.772,-9.089 -10.521,-14.536 -20.727,-14.573 l -26.715,0 0,61.202 14.157,0 0,-18.602 9.978,0 10.836,18.602 16.479,0 0,-1.06 -12.726,-20.623 0.796,-0.539 z m -12.802,-8.73 -12.562,0 0,-17.809 1.003,0 c 1.255,0 2.529,-0.01 3.811,-0.02 2.599,-0.021 5.226,-0.041 7.771,0.02 5.329,0.052 7.74,4.621 7.719,8.845 -0.02,4.454 -2.687,8.964 -7.742,8.964 z" /><path
|
||||
id="polygon351"
|
||||
points="-564.883,-1438.798 -564.883,-1390.037 -550.906,-1390.037 -550.906,-1438.798 -535.466,-1438.798 -535.466,-1451.239 -580.414,-1451.239 -580.414,-1438.798 " /><polygon
|
||||
d="m -564.883,-1438.798 v 48.761 h 13.977 v -48.761 h 15.44 v -12.441 h -44.948 v 12.441 z" /><path
|
||||
id="polygon353"
|
||||
points="-524.427,-1451.239 -524.427,-1439.159 -517.022,-1439.159 -517.022,-1402.208 -525.059,-1402.208 -525.059,-1390.037 -495.01,-1390.037 -495.01,-1402.208 -503.046,-1402.208 -503.046,-1439.159 -495.642,-1439.159 -495.642,-1451.239 " /><path
|
||||
d="m -524.427,-1451.239 v 12.08 h 7.405 v 36.951 h -8.037 v 12.171 h 30.049 v -12.171 h -8.036 v -36.951 h 7.404 v -12.08 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path355"
|
||||
d="m -424.689,-1402.396 c 3.252,-5.109 4.9,-11.218 4.9,-18.153 0,-21.831 -16.284,-31.774 -31.414,-31.774 -15.13,0 -31.415,9.943 -31.415,31.774 0,21.683 16.173,31.56 31.199,31.56 5.542,0 10.948,-1.321 15.636,-3.82 l 0.886,-0.472 1.724,3.244 14.828,0 0,-0.794 -6.679,-11.039 0.335,-0.526 z m -26.514,0.053 c -8.094,0 -16.806,-5.697 -16.806,-18.206 0,-12.415 8.712,-18.069 16.806,-18.069 8.094,0 16.806,5.654 16.806,18.069 0,12.509 -8.712,18.206 -16.806,18.206 z" /></g><path
|
||||
@ -116,12 +101,10 @@
|
||||
d="m 165.162,221.757 0.005,-0.025 C 140.75,212.523 116.912,200.021 94.188,184.476 69.636,167.679 51.561,151.284 37.304,132.88 28.589,121.633 23.537,112.163 20.924,102.175 c -3.068,-11.729 0.105,-20.54 9.177,-25.482 2.277,-1.241 4.833,-2.269 7.596,-3.054 7.576,-2.153 15.72,-2.812 25.2,-2.015 1.245,0.104 2.521,0.217 3.806,0.332 1.402,0.123 2.801,0.242 4.208,0.368 l 3.177,0.281 3.845,-13.919 c -0.947,-0.121 -1.893,-0.245 -2.83,-0.37 -2.538,-0.337 -4.935,-0.656 -7.25,-0.857 -4.688,-0.406 -8.803,-0.604 -12.578,-0.604 -8.74,0 -16.341,1.076 -23.239,3.29 -14.58,4.68 -23.05,13.281 -25.893,26.297 -1.944,8.9 -0.569,18.38 4.327,29.833 6.099,14.267 15.625,27.692 29.978,42.251 31.706,32.162 69.878,56.911 116.697,75.662 3.183,1.274 6.384,2.416 9.773,3.624 1.433,0.511 2.888,1.029 4.368,1.568 l 2.397,-8.365 -8.521,-9.258 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path359"
|
||||
d="m 279.656,208.102 c -0.147,-0.262 -0.314,-0.56 -0.358,-0.905 -0.992,-8.005 -3.834,-16.142 -8.689,-24.875 -7.945,-14.297 -18.829,-27.683 -34.25,-42.126 -3.812,-3.572 -7.724,-6.949 -11.864,-10.523 -1.677,-1.448 -3.376,-2.915 -5.096,-4.419 -0.006,0.032 -0.011,0.062 -0.017,0.092 -0.062,0.355 -0.097,0.551 -0.09,0.713 l 0.149,3.794 c 0.176,4.559 0.358,9.272 0.669,13.896 0.046,0.706 0.615,1.672 1.521,2.583 2.133,2.144 4.345,4.286 6.484,6.358 3.806,3.687 7.742,7.5 11.388,11.467 11.612,12.634 19.076,24.245 23.489,36.543 2.048,5.705 2.706,10.802 2.011,15.581 -1.146,7.896 -6.144,13.235 -15.281,16.322 -2.455,0.829 -5.003,1.474 -7.658,1.956 l 9.738,12.6 c 1.551,-0.468 3.08,-0.975 4.576,-1.562 12.387,-4.858 19.753,-12.956 22.521,-24.758 l 0.87,-3.686 0,-8.847 c -0.036,-0.067 -0.075,-0.135 -0.113,-0.204 z" /><g
|
||||
id="g361"
|
||||
transform="translate(716.77,1821.033)"><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path363"
|
||||
d="m -528.273,-1731.087 c 0.581,4.945 1.224,9.971 1.846,14.831 1.416,11.057 2.879,22.489 3.712,33.785 0.808,10.944 0.86,22.254 0.165,34.1 l 13,16.818 c 0.335,-3.384 0.644,-6.817 0.903,-10.349 1.854,-25.214 1.066,-50.093 -2.342,-73.945 -0.708,-4.964 -1.549,-9.816 -2.438,-14.955 -0.378,-2.185 -0.759,-4.387 -1.133,-6.617 l -14.161,3.555 c 0.044,0.257 0.086,0.5 0.128,0.734 0.129,0.741 0.242,1.39 0.32,2.043 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path365"
|
||||
d="m -534.181,-1580.343 c -2.321,7.348 -4.98,14.184 -8.042,20.678 -3.967,8.416 -9.193,17.993 -17.877,25.219 -9.296,7.733 -19.083,7.701 -28.365,-0.092 -5.935,-4.982 -10.921,-11.633 -15.692,-20.929 -6.63,-12.926 -11.46,-27.311 -15.661,-46.642 l -0.072,-0.342 c -0.174,-0.828 -0.411,-1.962 -0.892,-2.284 -4.153,-2.786 -8.356,-5.448 -12.807,-8.267 -1.067,-0.677 -2.146,-1.359 -3.239,-2.054 0.164,0.969 0.321,1.911 0.475,2.834 0.433,2.596 0.842,5.047 1.304,7.478 4.702,24.702 10.704,42.76 19.462,58.551 7.542,13.604 17.86,28.05 37.208,32.08 l 8.319,0 c 17.949,-3.632 27.887,-16.568 35.241,-28.748 1.953,-3.234 3.717,-6.507 5.244,-9.726 2.388,-5.035 4.556,-10.249 6.533,-15.655 l -11.139,-12.101 z" /></g></svg>
|
||||
d="m 279.656,208.102 c -0.147,-0.262 -0.314,-0.56 -0.358,-0.905 -0.992,-8.005 -3.834,-16.142 -8.689,-24.875 -7.945,-14.297 -18.829,-27.683 -34.25,-42.126 -3.812,-3.572 -7.724,-6.949 -11.864,-10.523 -1.677,-1.448 -3.376,-2.915 -5.096,-4.419 -0.006,0.032 -0.011,0.062 -0.017,0.092 -0.062,0.355 -0.097,0.551 -0.09,0.713 l 0.149,3.794 c 0.176,4.559 0.358,9.272 0.669,13.896 0.046,0.706 0.615,1.672 1.521,2.583 2.133,2.144 4.345,4.286 6.484,6.358 3.806,3.687 7.742,7.5 11.388,11.467 11.612,12.634 19.076,24.245 23.489,36.543 2.048,5.705 2.706,10.802 2.011,15.581 -1.146,7.896 -6.144,13.235 -15.281,16.322 -2.455,0.829 -5.003,1.474 -7.658,1.956 l 9.738,12.6 c 1.551,-0.468 3.08,-0.975 4.576,-1.562 12.387,-4.858 19.753,-12.956 22.521,-24.758 l 0.87,-3.686 0,-8.847 c -0.036,-0.067 -0.075,-0.135 -0.113,-0.204 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path363"
|
||||
d="m 188.497,89.946 c 0.581,4.945 1.224,9.971 1.846,14.831 1.416,11.057 2.879,22.489 3.712,33.785 0.808,10.944 0.86,22.254 0.165,34.1 l 13,16.818 c 0.335,-3.384 0.644,-6.817 0.903,-10.349 1.854,-25.214 1.066,-50.093 -2.342,-73.945 -0.708,-4.964 -1.549,-9.816 -2.438,-14.955 -0.378,-2.185 -0.759,-4.387 -1.133,-6.617 l -14.161,3.555 c 0.044,0.257 0.086,0.5 0.128,0.734 0.129,0.741 0.242,1.39 0.32,2.043 z" /><path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path365"
|
||||
d="m 182.589,240.69 c -2.321,7.348 -4.98,14.184 -8.042,20.678 -3.967,8.416 -9.193,17.993 -17.877,25.219 -9.296,7.733 -19.083,7.701 -28.365,-0.092 -5.935,-4.982 -10.921,-11.633 -15.692,-20.929 -6.63,-12.926 -11.46,-27.311 -15.661,-46.642 l -0.072,-0.342 c -0.174,-0.828 -0.411,-1.962 -0.892,-2.284 -4.153,-2.786 -8.356,-5.448 -12.807,-8.267 -1.067,-0.677 -2.146,-1.359 -3.239,-2.054 0.164,0.969 0.321,1.911 0.475,2.834 0.433,2.596 0.842,5.047 1.304,7.478 4.702,24.702 10.704,42.76 19.462,58.551 7.542,13.604 17.86,28.05 37.208,32.08 h 8.319 c 17.949,-3.632 27.887,-16.568 35.241,-28.748 1.953,-3.234 3.717,-6.507 5.244,-9.726 2.388,-5.035 4.556,-10.249 6.533,-15.655 z" /></svg>
|
||||
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 12 KiB |
@ -33,13 +33,13 @@ Download and run the official installer. If using NixOS, note that this will req
|
||||
System description file
|
||||
-----------------------
|
||||
|
||||
ARTIQ gateware and firmware binaries are dependent on the system configuration. In other words, a specific set of ARTIQ binaries is bound to the exact arrangement of real-time hardware it was generated for: the core device itself, its role in a DRTIO context (master, satellite, or standalone), the (real-time) peripherals in use, the physical EEM ports they will be connected to, and various other basic specifications. This information is normally provided to the software in the form of a JSON file called the system description or system configuration file.
|
||||
ARTIQ gateware and firmware binaries are dependent on the system configuration. In other words, a specific set of ARTIQ binaries is bound to the exact arrangement of real-time hardware it was generated for: the core device itself, its role in a DRTIO context (master, satellite, or standalone), the (real-time) peripherals in use, the physical EEM ports they will be connected to, and various other basic specifications. This information is normally provided to the software in the form of a JSON file called the system description file.
|
||||
|
||||
.. warning::
|
||||
|
||||
System configuration files are only used with Kasli and Kasli-SoC boards. KC705 and ZC706 ARTIQ configurations, due to their relative rarity and specialization, are handled on a case-by-case basis and selected through a variant name such as ``nist_clock``, with no system description file necessary. See below in :ref:`building` for where to find the list of supported variants. Writing new KC705 or ZC706 variants is not a trivial task, and not particularly recommended, unless you are an FPGA developer and know what you're doing.
|
||||
Not all core devices use system description files. Devices that use system description files for configuration are referred to as JSON variants (see :ref:`JSON variant devices <devices-table>`). Some rare or specialized boards use hardcoded variants, selected by a variant name such as ``nist_clock``, without needing a system description file (see :ref:`Hardcoded variant devices <devices-table>`). For the list of supported variants, see the :ref:`building` section. Writing new hardcoded variants is not a trivial task and is generally not recommended unless you are an experienced FPGA developer.
|
||||
|
||||
If you already have your system configuration file on hand, you can edit it to reflect any changes in configuration. If you purchased your original system from M-Labs, or recently purchased new hardware to add to it, you can obtain your up-to-date system configuration file through AFWS at any time using the command ``$ afws_client get_json`` (see :ref:`AFWS client<afws-client>`). If you are starting from scratch, a close reading of ``coredevice_generic.schema.json`` in ``artiq/coredevice`` will be helpful.
|
||||
If you already have your system description file on hand, you can edit it to reflect any changes in configuration. If you purchased your original system from M-Labs, or recently purchased new hardware to add to it, you can obtain your up-to-date system description file through AFWS at any time using the command ``$ afws_client get_json`` (see :ref:`AFWS client<afws-client>`). If you are starting from scratch, a close reading of ``coredevice_generic.schema.json`` in ``artiq/coredevice`` will be helpful.
|
||||
|
||||
System descriptions do not need to be very complex. At its most basic, a system description looks something like: ::
|
||||
|
||||
@ -82,11 +82,31 @@ Nix development environment
|
||||
---------------------------
|
||||
|
||||
* Install `Nix <http://nixos.org/nix/>`_ if you haven't already. Prefer a single-user installation for simplicity.
|
||||
* Enable flakes in Nix, for example by adding ``experimental-features = nix-command flakes`` to ``nix.conf``; see the `NixOS Wiki on flakes <https://nixos.wiki/wiki/flakes>`_ for details and more options.
|
||||
* Clone `the ARTIQ Git repository <https://github.com/m-labs/artiq>`_, or `the ARTIQ-Zynq repository <https://git.m-labs.hk/M-Labs/artiq-zynq>`__ for Zynq devices (Kasli-SoC or ZC706). By default, you are working with the ``master`` branch, which represents the beta version and is not stable (see :doc:`releases`). Checkout the most recent release (``git checkout release-[number]``) for a stable version.
|
||||
* Configure Nix to support building ARTIQ:
|
||||
|
||||
- Enable flakes, for example by adding ``experimental-features = nix-command flakes`` to ``nix.conf``. See also the `NixOS Wiki on flakes <https://nixos.wiki/wiki/flakes>`_.
|
||||
- Add ``/opt`` (or your Vivado location) as an Nix sandbox, for example by adding ``extra-sandbox-paths = /opt`` to ``nix.conf``.
|
||||
- Create a file called ``trusted-settings.json`` in ``~/.local/share/nix/``, if it doesn't exist already. Make sure it contains the following:
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"extra-sandbox-paths":{
|
||||
"/opt":true
|
||||
},
|
||||
"extra-substituters":{
|
||||
"https://nixbld.m-labs.hk":true
|
||||
},
|
||||
"extra-trusted-public-keys":{
|
||||
"nixbld.m-labs.hk-1:5aSRVA5b320xbNvu30tqxVPXpld73bhtOeH6uAjRyHc=":true
|
||||
}
|
||||
}
|
||||
|
||||
- If using NixOS, instead make the equivalent changes to your ``configuration.nix``.
|
||||
|
||||
* Clone `the ARTIQ Git repository <https://github.com/m-labs/artiq>`_, or `the ARTIQ-Zynq repository <https://git.m-labs.hk/M-Labs/artiq-zynq>`__ for :ref:`Zynq devices <devices-table>` (Kasli-SoC, ZC706, or EBAZ4205). By default, you are working with the ``master`` branch, which represents the beta version and is not stable (see :doc:`releases`). Checkout the most recent release (``git checkout release-[number]``) for a stable version.
|
||||
* If your Vivado installation is not in its default location ``/opt``, open ``flake.nix`` and edit it accordingly (note that the edits must be made in the main ARTIQ flake, even if you are working with Zynq, see also tip below).
|
||||
* Run ``nix develop`` at the root of the repository, where ``flake.nix`` is.
|
||||
* Answer ``y``/'yes' to any Nix configuration questions if necessary, as in :ref:`installing-troubleshooting`.
|
||||
|
||||
.. note::
|
||||
You can also target legacy versions of ARTIQ; use Git to checkout older release branches. Note however that older releases of ARTIQ required different processes for developing and building, which you are broadly more likely to figure out by (also) consulting the corresponding older versions of the manual.
|
||||
@ -120,7 +140,7 @@ The parallel command does exist for ARTIQ-Zynq: ::
|
||||
|
||||
but if you are building ARTIQ-Zynq without intention to change the source, it is not actually necessary to enter the development environment at all; Nix is capable of accessing the official flake remotely for the build itself, eliminating the requirement for any particular environment.
|
||||
|
||||
This is equally possible for original ARTIQ, but not as useful, as the development environment (specifically the ``#boards`` shell) is still the easiest way to access the necessary tools for flashing the board. On the other hand, with Zynq, it is normally recommended to boot from SD card, which requires no further special tools. As long as you have a functioning Nix installation with flakes enabled, you can progress directly to the building instructions below.
|
||||
This is equally possible for original ARTIQ, but not as useful, as the development environment (specifically the ``#boards`` shell) is still the easiest way to access the necessary tools for flashing the board. On the other hand, Zynq boards can also be flashed by writing to the SD card directly, which requires no further special tools. As long as you have a functioning Nix/Vivado installation with flakes enabled, you can progress directly to the building instructions below.
|
||||
|
||||
.. _building:
|
||||
|
||||
@ -140,8 +160,9 @@ With KC705, use: ::
|
||||
|
||||
$ python -m artiq.gateware.targets.kc705 -V <variant>
|
||||
|
||||
This will create a directory ``artiq_kasli`` or ``artiq_kc705`` containing the binaries in a subdirectory named after your description file or variant. Flash the board as described in :ref:`writing-flash`, adding the option ``--srcbuild``, e.g., assuming your board is already connected by JTAG USB: ::
|
||||
This will create a directory ``artiq_kasli`` or ``artiq_kc705`` containing the binaries in a subdirectory named after your description file or variant. Flash the board as described in :ref:`writing-flash`, adding the option ``--srcbuild``, e.g., assuming your board is connected by network or JTAG USB respectively: ::
|
||||
|
||||
$ artiq_coremgmt flash --srcbuild artiq_<board>/<variant>
|
||||
$ artiq_flash --srcbuild [-t kc705] -d artiq_<board>/<variant>
|
||||
|
||||
.. note::
|
||||
@ -151,10 +172,10 @@ This will create a directory ``artiq_kasli`` or ``artiq_kc705`` containing the b
|
||||
|
||||
Look for the option ``-V VARIANT, --variant VARIANT``.
|
||||
|
||||
Kasli-SoC or ZC706 (ARTIQ on Zynq)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Kasli-SoC, ZC706 or EBAZ4205 (ARTIQ on Zynq)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The building process for Zynq devices is a little more complex. The easiest method is to leverage ``nix build`` and the ``makeArtiqZynqPackage`` utility provided by the official flake. The ensuing command is rather long, because it uses a multi-clause expression in the Nix language to describe the desired result; it can be executed piece-by-piece using the `Nix REPL <https://nix.dev/manual/nix/2.18/command-ref/new-cli/nix3-repl.html>`_, but ``nix build`` provides a lot of useful conveniences.
|
||||
The building process for :ref:`Zynq devices <devices-table>` is a little more complex. The easiest method is to leverage ``nix build`` and the ``makeArtiqZynqPackage`` utility provided by the official flake. The ensuing command is rather long, because it uses a multi-clause expression in the Nix language to describe the desired result; it can be executed piece-by-piece using the `Nix REPL <https://nix.dev/manual/nix/2.18/command-ref/new-cli/nix3-repl.html>`_, but ``nix build`` provides a lot of useful conveniences.
|
||||
|
||||
For Kasli-SoC, run: ::
|
||||
|
||||
@ -162,32 +183,40 @@ For Kasli-SoC, run: ::
|
||||
|
||||
Replace ``<variant>`` with ``master``, ``satellite``, or ``standalone``, depending on your targeted DRTIO role. Remove ``?ref=release-[number]`` to use the current beta version rather than a numbered release. If you have cloned the repository and prefer to use your local copy of the flake, replace the corresponding clause with ``builtins.getFlake "/absolute/path/to/your/artiq-zynq"``.
|
||||
|
||||
For ZC706, you can use a command of the same form: ::
|
||||
For ZC706 or EBAZ4205, you can use a command of the same form (replace ``<target>`` with ``zc706`` or ``ebaz4205``): ::
|
||||
|
||||
$ nix build --print-build-logs --impure --expr 'let fl = builtins.getFlake "git+https://git.m-labs.hk/m-labs/artiq-zynq?ref=release-[number]"; in (fl.makeArtiqZynqPackage {target="zc706"; variant="<variant>";}).zc706-<variant>-sd'
|
||||
$ nix build --print-build-logs --impure --expr 'let fl = builtins.getFlake "git+https://git.m-labs.hk/m-labs/artiq-zynq?ref=release-[number]"; in (fl.makeArtiqZynqPackage {target="<target>"; variant="<variant>";}).<target>-<variant>-sd'
|
||||
|
||||
or you can use the more direct version: ::
|
||||
|
||||
$ nix build --print-build-logs git+https://git.m-labs.hk/m-labs/artiq-zynq\?ref=release-[number]#zc706-<variant>-sd
|
||||
$ nix build --print-build-logs git+https://git.m-labs.hk/m-labs/artiq-zynq\?ref=release-[number]#<target>-<variant>-sd
|
||||
|
||||
(which is possible for ZC706 because there is no need to be able to specify a system description file in the arguments.)
|
||||
(which is possible for ZC706 and EBAZ4205 because there is no need to be able to specify a system description file in the arguments.)
|
||||
|
||||
.. note::
|
||||
To see supported ZC706 variants, you can run the following at the root of the repository: ::
|
||||
To see supported variants for ZC705 or EBA4205, you can run the following at the root of the repository: ::
|
||||
|
||||
$ src/gateware/zc706.py --help
|
||||
$ src/gateware/<target>.py --help
|
||||
|
||||
Look for the option ``-V VARIANT, --variant VARIANT``. If you have not cloned the repository or are not in the development environment, try: ::
|
||||
|
||||
$ nix flake show git+https://git.m-labs.hk/m-labs/artiq-zynq\?ref=release-[number] | grep "package 'zc706.*sd"
|
||||
$ nix flake show git+https://git.m-labs.hk/m-labs/artiq-zynq\?ref=release-[number] | grep "package '<target>.*sd"
|
||||
|
||||
to see the list of suitable build targets directly.
|
||||
|
||||
Any of these commands should produce a directory ``result`` which contains a file ``boot.bin``. As described in :ref:`writing-flash`, if your core device is currently accessible over the network, it can be flashed with :mod:`~artiq.frontend.artiq_coremgmt`. If it is not connected to the network:
|
||||
Any of these commands should produce a directory ``result`` which contains a file ``boot.bin``. If your core device is accessible by network, flash with: ::
|
||||
|
||||
$ artiq_coremgmt flash result
|
||||
|
||||
Otherwise:
|
||||
|
||||
1. Power off the board, extract the SD card and load ``boot.bin`` onto it manually.
|
||||
2. Insert the SD card back into the board.
|
||||
3. Ensure that the DIP switches (labeled BOOT MODE) are set correctly, to SD.
|
||||
3. Set to boot from SD card:
|
||||
|
||||
- For Kasli-SoC or ZC706, ensure that the DIP switches (labeled BOOT MODE) are set correctly, to SD.
|
||||
- For EBAZ4205, set up the `boot select resistor <https://github.com/xjtuecho/EBAZ4205>`_ to boot from SD card.
|
||||
|
||||
4. Power the board back on.
|
||||
|
||||
Optionally, the SD card may also be loaded at the same time with an additional file ``config.txt``, which can contain preset configuration values in the format ``key=value``, one per line. The keys are those used with :mod:`~artiq.frontend.artiq_coremgmt`. This allows e.g. presetting an IP address and any other configuration information.
|
||||
@ -199,7 +228,7 @@ After a successful boot, the "FPGA DONE" light should be illuminated and the boa
|
||||
Booting over JTAG/Ethernet
|
||||
""""""""""""""""""""""""""
|
||||
|
||||
It is also possible to boot Zynq devices over USB and Ethernet. Flip the DIP switches to JTAG. The scripts ``remote_run.sh`` and ``local_run.sh`` in the ARTIQ-Zynq repository, intended for use with a remote JTAG server or a local connection to the core device respectively, are used at M-Labs to accomplish this. Both make use of the netboot tool ``artiq_netboot``, see also its source `here <https://git.m-labs.hk/M-Labs/artiq-netboot>`__, which is included in the ARTIQ-Zynq development environment. Adapt the relevant script to your system or read it closely to understand the options and the commands being run; note for example that ``remote_run.sh`` as written only supports ZC706.
|
||||
It is also possible to boot :ref:`Zynq devices <devices-table>` over USB and Ethernet (EBAZ4205 not currently supported). Flip the DIP switches to JTAG. The scripts ``remote_run.sh`` and ``local_run.sh`` in the ARTIQ-Zynq repository, intended for use with a remote JTAG server or a local connection to the core device respectively, are used at M-Labs to accomplish this. Both make use of the netboot tool ``artiq_netboot``, see also its source `here <https://git.m-labs.hk/M-Labs/artiq-netboot>`__, which is included in the ARTIQ-Zynq development environment. Adapt the relevant script to your system or read it closely to understand the options and the commands being run; note for example that ``remote_run.sh`` as written only supports ZC706.
|
||||
|
||||
You will need to generate the gateware, firmware and bootloader first, either through ``nix build`` or incrementally as below. After an incremental build add the option ``-i`` when running either of the scripts. If using ``nix build``, note that target names of the form ``<board>-<variant>-jtag`` (run ``nix flake show`` to see all targets) will output the three necessary files without combining them into ``boot.bin``.
|
||||
|
||||
@ -220,16 +249,13 @@ For Kasli-SoC:
|
||||
$ gateware/kasli_soc.py -g ../build/gateware <description.json>
|
||||
$ make TARGET=kasli_soc GWARGS="path/to/description.json" <fw-type>
|
||||
|
||||
For ZC706:
|
||||
For ZC706 or EBAZ4205:
|
||||
::
|
||||
|
||||
$ gateware/zc706.py -g ../build/gateware -V <variant>
|
||||
$ make TARGET=zc706 GWARGS="-V <variant>" <fw-type>
|
||||
$ gateware/<target>.py -g ../build/gateware -V <variant>
|
||||
$ make TARGET=<target> GWARGS="-V <variant>" <fw-type>
|
||||
|
||||
where ``fw-type`` is ``runtime`` for standalone or DRTIO master builds and ``satman`` for DRTIO satellites. Both the gateware and the firmware will generate into the ``../build`` destination directory. At this stage you can :ref:`boot from JTAG <zynq-jtag-boot>`; either of the ``*_run.sh`` scripts will expect the gateware and firmware files at their default locations, and the ``szl.elf`` bootloader is retrieved automatically.
|
||||
|
||||
.. warning::
|
||||
Note that in between runs of ``make`` it is necessary to manually clear ``build``, even for different targets, or ``make`` will do nothing.
|
||||
where ``fw-type`` is ``runtime`` for standalone or DRTIO master builds and ``satman`` for DRTIO satellites. Both the gateware and the firmware will generate into the ``../build`` destination directory. At this stage, if supported, you can :ref:`boot from JTAG <zynq-jtag-boot>`; either of the ``*_run.sh`` scripts will expect the gateware and firmware files at their default locations, and the ``szl.elf`` bootloader is retrieved automatically.
|
||||
|
||||
If you prefer to boot from SD card, you will need to construct your own ``boot.bin``. Build ``szl.elf`` from source by running a command of the form: ::
|
||||
|
||||
|
@ -48,35 +48,37 @@ ARTIQ types
|
||||
|
||||
Python/NumPy types correspond to ARTIQ types as follows:
|
||||
|
||||
+---------------+-------------------------+
|
||||
| Python | ARTIQ |
|
||||
+===============+=========================+
|
||||
| NoneType | TNone |
|
||||
+---------------+-------------------------+
|
||||
| bool | TBool |
|
||||
+---------------+-------------------------+
|
||||
| int | TInt32 or TInt64 |
|
||||
+---------------+-------------------------+
|
||||
| float | TFloat |
|
||||
+---------------+-------------------------+
|
||||
| str | TStr |
|
||||
+---------------+-------------------------+
|
||||
| bytes | TBytes |
|
||||
+---------------+-------------------------+
|
||||
| bytearray | TByteArray |
|
||||
+---------------+-------------------------+
|
||||
| list of T | TList(T) |
|
||||
+---------------+-------------------------+
|
||||
| NumPy array | TArray(T, num_dims) |
|
||||
+---------------+-------------------------+
|
||||
| range | TRange32, TRange64 |
|
||||
+---------------+-------------------------+
|
||||
| numpy.int32 | TInt32 |
|
||||
+---------------+-------------------------+
|
||||
| numpy.int64 | TInt64 |
|
||||
+---------------+-------------------------+
|
||||
| numpy.float64 | TFloat |
|
||||
+---------------+-------------------------+
|
||||
+------------------------+-------------------------+
|
||||
| Python | ARTIQ |
|
||||
+========================+=========================+
|
||||
| NoneType | TNone |
|
||||
+------------------------+-------------------------+
|
||||
| bool | TBool |
|
||||
+------------------------+-------------------------+
|
||||
| int | TInt32 or TInt64 |
|
||||
+------------------------+-------------------------+
|
||||
| float | TFloat |
|
||||
+------------------------+-------------------------+
|
||||
| str | TStr |
|
||||
+------------------------+-------------------------+
|
||||
| bytes | TBytes |
|
||||
+------------------------+-------------------------+
|
||||
| bytearray | TByteArray |
|
||||
+------------------------+-------------------------+
|
||||
| list of T | TList(T) |
|
||||
+------------------------+-------------------------+
|
||||
| NumPy array | TArray(T, num_dims) |
|
||||
+------------------------+-------------------------+
|
||||
| tuple of (T1, T2, ...) | TTuple([T1, T2, ...]) |
|
||||
+------------------------+-------------------------+
|
||||
| range | TRange32, TRange64 |
|
||||
+------------------------+-------------------------+
|
||||
| numpy.int32 | TInt32 |
|
||||
+------------------------+-------------------------+
|
||||
| numpy.int64 | TInt64 |
|
||||
+------------------------+-------------------------+
|
||||
| numpy.float64 | TFloat |
|
||||
+------------------------+-------------------------+
|
||||
|
||||
Integers are 32-bit by default but may be converted to 64-bit with ``numpy.int64``.
|
||||
|
||||
@ -87,8 +89,12 @@ The ARTIQ compiler can be thought of as overriding all built-in Python types, an
|
||||
|
||||
Multidimensional arrays are allowed (using NumPy syntax). Element-wise operations (e.g. ``+``, ``/``), matrix multiplication (``@``) and multidimensional indexing are supported; slices and views (currently) are not.
|
||||
|
||||
Tuples may contain a mixture of different types. They cannot be iterated over or dynamically indexed, although they may be indexed by constants and multiple assignment is supported.
|
||||
|
||||
User-defined classes are supported, provided their attributes are of other supported types (attributes that are not used in the kernel are ignored and thus unrestricted). When several instances of a user-defined class are referenced from the same kernel, every attribute must have the same type in every instance of the class.
|
||||
|
||||
.. _basic-artiq-python:
|
||||
|
||||
Basic ARTIQ Python
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -99,6 +105,14 @@ Kernel code can call host functions without any additional ceremony. However, su
|
||||
def return_four() -> TInt32:
|
||||
return 4
|
||||
|
||||
.. tip::
|
||||
Multiple variables of different types can be sent in one RPC call by returning a tuple, e.g. ::
|
||||
|
||||
def return_many() -> TTuple([TInt32, TFloat, TStr]):
|
||||
return (4, 12.34, "hello",)
|
||||
|
||||
Which can be retrieved from a kernel with ``(a, b, c) = return_many()``
|
||||
|
||||
Kernels can freely modify attributes of objects shared with the host. However, by necessity, these modifications are actually applied to local copies of the objects, as the latency of immediate writeback would be unsupportable in a real-time environment. Instead, modifications are written back *when the kernel completes;* notably, this means RPCs called by a kernel itself will only have access to the unmodified host version of the object, as the kernel hasn't finished execution yet. In some cases, accessing data on the host is better handled by calling RPCs specifically to make the desired modifications.
|
||||
|
||||
.. warning::
|
||||
@ -117,7 +131,7 @@ Kernels can freely modify attributes of objects shared with the host. However, b
|
||||
# results in memory corruption
|
||||
return func([1, 2, 3])
|
||||
|
||||
will compile, **but corrupts at runtime.** On the other hand, lists, arrays, or strings can and should be used as inputs for RPCs, and this is the preferred method of returning data to the host. In this way the data is inherently read and sent before the kernel completes and there are no allocation issues.
|
||||
will compile, **but corrupts at runtime.** On the other hand, lists, arrays, or strings can and should be used as inputs for RPCs, and this is the preferred method of returning data to the host. In this way the data is sent before the kernel completes and there are no allocation issues.
|
||||
|
||||
Available built-in functions
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -253,3 +267,7 @@ In the synthetic example above, the compiler will be able to detect that the res
|
||||
for _ in range(100):
|
||||
delay_mu(precomputed_delay_mu)
|
||||
self.worker.work()
|
||||
|
||||
Kernel invariants are defined for every object by the ``kernel_invariants`` atttribute, which is a set containing the names of every invariant attribute of this object.
|
||||
|
||||
At compile time it is possible to automatically detect attributes that are never altered in a kernel, and thus may be good candidates for inclusion into ``kernel_invariants``. This is done by specifying ``report_invariants=True`` when initializing the core device driver (in the dashboard you can do this using the "Override device arguments" option).
|
@ -30,7 +30,7 @@ mock_modules = ["artiq.gui.waitingspinnerwidget",
|
||||
"artiq.coredevice.jsondesc",
|
||||
"nac3artiq",
|
||||
"qasync", "lmdb", "dateutil.parser", "prettytable", "PyQt6",
|
||||
"h5py", "llvmlite", "pythonparser", "tqdm", "jsonschema"]
|
||||
"h5py", "llvmlite", "pythonparser", "tqdm", "jsonschema", "platformdirs"]
|
||||
|
||||
for module in mock_modules:
|
||||
sys.modules[module] = Mock()
|
||||
@ -147,6 +147,10 @@ nitpick_ignore_regex = [
|
||||
(r'py:.*', r'artiq.gateware.*'),
|
||||
('py:mod', r'artiq.test.*'),
|
||||
('py:mod', r'artiq.applets.*'),
|
||||
# we can't use artiq.master.* because we shouldn't ignore the scheduler
|
||||
('py:class', r'artiq.master.experiments.*'),
|
||||
('py:class', r'artiq.master.databases.*'),
|
||||
('py:.*', r'artiq.master.worker.*'),
|
||||
('py:class', 'dac34H84'),
|
||||
('py:class', 'trf372017'),
|
||||
('py:class', r'list(.*)'),
|
||||
|
@ -6,6 +6,9 @@ Networking and configuration
|
||||
Setting up core device networking
|
||||
---------------------------------
|
||||
|
||||
.. note::
|
||||
Satellite core devices (in a DRTIO setting, see :doc:`using_drtio_subkernels`) do not support independent networking and this section does not apply to them. Follow the instructions on this page for your master core device, and proceed to :ref:`configuring-satellite` once DRTIO communications are established.
|
||||
|
||||
For Kasli, insert a SFP/RJ45 transceiver (normally included with purchases from M-Labs and QUARTIQ) into the SFP0 port and connect it to an Ethernet port in your network. If the port is 10Mbps or 100Mbps and not 1000Mbps, make sure that the SFP/RJ45 transceiver supports the lower rate. Many SFP/RJ45 transceivers only support the 1000Mbps rate. If you do not have a SFP/RJ45 transceiver that supports 10Mbps and 100Mbps rates, you may instead use a gigabit Ethernet switch in the middle to perform rate conversion.
|
||||
|
||||
You can also insert other types of SFP transceivers into Kasli if you wish to use it directly in e.g. an optical fiber Ethernet network. Kasli-SoC already directly features RJ45 10/100/1000 Ethernet.
|
||||
@ -55,6 +58,9 @@ For Kasli-SoC:
|
||||
For ZC706:
|
||||
If the ``ip`` config is not set, ZC706 firmware defaults to using the IP address ``192.168.1.52``.
|
||||
|
||||
For EBAZ4205:
|
||||
If the ``ip`` config is not set, EBAZ4205 firmware defaults to using the IP address ``192.168.1.57``.
|
||||
|
||||
For Kasli or KC705:
|
||||
If the ``ip`` config field is not set or set to ``use_dhcp``, the device will attempt to obtain an IP address and default gateway using DHCP. The chosen IP address will be in log output, which can be accessed via the :ref:`UART log <connecting-UART>`.
|
||||
|
||||
@ -127,3 +133,16 @@ Load the DRTIO routing table
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you are using DRTIO and the default routing table (for a star topology) is not suitable to your needs, you will first need to prepare and load a different routing table. See :ref:`Using DRTIO <drtio-routing>`.
|
||||
|
||||
.. _configuring-satellite:
|
||||
|
||||
Configuring DRTIO satellites
|
||||
----------------------------
|
||||
|
||||
Once DRTIO communications are online, any satellite devices can be accessed as normal using :mod:`~artiq.frontend.artiq_coremgmt`, e.g.: ::
|
||||
|
||||
$ artiq_coremgmt -s <destination_number> log
|
||||
|
||||
The destination number corresponds to the number assigned to that satellite both in the device database and, earlier, in the system configuration file. See the notes in :ref:`drtio-routing` if you are not sure what destination number to use.
|
||||
|
||||
It is also possible to set configuration values, reflash, or reboot the device. Notably, :ref:`event spreading <sed-event-spreading>` is a per-device setting considered particularly useful on satellites. Most other configuration settings, e.g. networking, clocking, will not be used in practice in a satellite context -- satellites do not support direct network connections and are always bound to the master's clock.
|
@ -39,7 +39,7 @@ The core device reserves some storage space (either flash or directly on SD card
|
||||
``device_map``
|
||||
If set, allows the core log to connect RTIO channels to device names and use device names as well as channel numbers in log output. A correctly formatted table can be automatically generated with :mod:`~artiq.frontend.artiq_rtiomap`, see :ref:`Utilities<rtiomap-tool>`.
|
||||
``net_trace``
|
||||
If set to ``1``, will activate net trace (print all packets sent and received to UART and core log). This will considerably slow down all network response from the core. Not applicable for ARTIQ-Zynq (Kasli-SoC, ZC706).
|
||||
If set to ``1``, will activate net trace (print all packets sent and received to UART and core log). This will considerably slow down all network response from the core. Not applicable for ARTIQ-Zynq (see :ref:`Zynq devices <devices-table>`).
|
||||
``panic_reset``
|
||||
If set to ``1``, core device will restart automatically. Not applicable for ARTIQ-Zynq.
|
||||
``no_flash_boot``
|
||||
@ -106,6 +106,27 @@ If not using WRPLL, PLL can also be bypassed entirely with the options
|
||||
|
||||
Bypassing the PLL ensures the skews between input clock, downstream clock outputs, and RTIO clock are deterministic across reboots of the system. This is useful when phase determinism is required in situations where the reference clock fans out to other devices before reaching the master.
|
||||
|
||||
.. _types-of-boards:
|
||||
|
||||
Types of boards
|
||||
---------------
|
||||
|
||||
To clarify the terminology used in ARTIQ, we can distinguish the boards into a few key groups. There are two primary ways to categorize them. The first is based on the ARTIQ platform itself: either ARTIQ or ARTIQ-Zynq. ARTIQ-Zynq boards specifically refer to those that feature a Xilinx Zynq FPGA. The second distinction is based on how the boards are configured: some use a :ref:`JSON system description file <system-description>`, while others do not.
|
||||
|
||||
Below are the current groups of boards:
|
||||
|
||||
.. _devices-table:
|
||||
|
||||
+------------------------------+------------------------------+
|
||||
| **Device Type** | **Devices** |
|
||||
+==============================+==============================+
|
||||
| Zynq devices | Kasli-SoC, ZC706, EBAZ4205 |
|
||||
+------------------------------+------------------------------+
|
||||
| JSON variant devices | Kasli, Kasli-SoC |
|
||||
+------------------------------+------------------------------+
|
||||
| Hardcoded variant devices | KC705, ZC706, EBAZ4205 |
|
||||
+------------------------------+------------------------------+
|
||||
|
||||
Board details
|
||||
-------------
|
||||
|
||||
@ -119,7 +140,7 @@ Kasli and Kasli-SoC
|
||||
|
||||
`Kasli <https://github.com/sinara-hw/Kasli/wiki>`_ and `Kasli-SoC <https://github.com/sinara-hw/Kasli-SOC/wiki>`_ are versatile core devices designed for ARTIQ as part of the open-source `Sinara <https://github.com/sinara-hw/meta/wiki>`_ family of boards. All support interfacing to various EEM daughterboards (TTL, DDS, ADC, DAC...) through twelve onboard EEM ports. Kasli is based on a Xilinx Artix-7 FPGA, and Kasli-SoC, which runs on a separate `Zynq port <https://git.m-labs.hk/M-Labs/artiq-zynq>`_ of the ARTIQ firmware, is based on a Zynq-7000 SoC, notably including an ARM CPU allowing for much heavier software computations at high speeds. They are architecturally very different but supply similar feature sets. Kasli itself exists in two versions, of which the improved Kasli v2.0 is now in more common use, but the original v1.0 remains supported by ARTIQ.
|
||||
|
||||
Kasli can be connected to the network using a 10000Base-X SFP module, installed into the SFP0 cage. Kasli-SoC features a built-in Ethernet port to use instead. If configured as a DRTIO satellite, both boards instead reserve SFP0 for the upstream DRTIO connection; remaining SFP cages are available for downstream connections. Equally, if used as a DRTIO master, all free SFP cages are available for downstream connections (i.e. all but SFP0 on Kasli, all four on Kasli-SoC).
|
||||
Kasli can be connected to the network using a 1000Base-X SFP module, installed into the SFP0 cage. Kasli-SoC features a built-in Ethernet port to use instead. If configured as a DRTIO satellite, both boards instead reserve SFP0 for the upstream DRTIO connection; remaining SFP cages are available for downstream connections. Equally, if used as a DRTIO master, all free SFP cages are available for downstream connections (i.e. all but SFP0 on Kasli, all four on Kasli-SoC).
|
||||
|
||||
The DRTIO line rate depends upon the RTIO clock frequency running, e.g., at 125MHz the line rate is 2.5Gbps, at 150MHz 3.0Gbps, etc. See below for information on RTIO clocks.
|
||||
|
||||
@ -140,6 +161,46 @@ VADJ
|
||||
|
||||
With the NIST CLOCK and QC2 adapters, for safe operation of the DDS buses (to prevent damage to the IO banks of the FPGA), the FMC VADJ rail of the KC705 should be changed to 3.3V. Plug the Texas Instruments USB-TO-GPIO PMBus adapter into the PMBus connector in the corner of the KC705 and use the Fusion Digital Power Designer software to configure (requires Windows). Write to chip number U55 (address 52), channel 4, which is the VADJ rail, to make it 3.3V instead of 2.5V. Power cycle the KC705 board to check that the startup voltage on the VADJ rail is now 3.3V.
|
||||
|
||||
EBAZ4205
|
||||
^^^^^^^^
|
||||
|
||||
The `EBAZ4205 <https://github.com/xjtuecho/EBAZ4205>`_ Zynq-SoC control card, originally used in the Ebit E9+ BTC miner, is a low-cost development board (around $20-$30 USD), making it an ideal option for experimenting with ARTIQ. To use the EBAZ4205, it's important to carefully follow the board documentation to configure it to boot from the SD card, as network booting via ``artiq_netboot`` is currently unsupported. This is because the Ethernet PHY is routed through the EMIO, requiring the FPGA to be programmed before the board can establish a network connection.
|
||||
|
||||
.. note::
|
||||
Although both ``int_100`` and ``int_125`` are supported, ``int_150`` -- used to synthesize a 150MHz RTIO clock -- is not currently compatible with the EBAZ4205.
|
||||
|
||||
SD BOOT
|
||||
"""""""
|
||||
|
||||
To enable the EBAZ4205 to boot from an SD card, you will need to modify the board's boot select resistors. By default, the board is set to boot from NAND, with a resistor placed on ``R2584``. To change the boot mode to SD card, move the resistor from ``R2584`` to ``R2577``. Be sure to carefully consult the `EBAZ4205 documentation <https://github.com/xjtuecho/EBAZ4205>`_ to confirm resistor locations and proper handling of the board.
|
||||
|
||||
AD9834 DDS
|
||||
""""""""""
|
||||
|
||||
One useful application of the EBAZ4205 is controlling external devices like the AD9834 DDS Module from ZonRi Technology Co., Ltd. To establish communication between the EBAZ4205 and the AD9834 module, proper configuration of the SPI interface pins is essential. The board's flexibility allows for straightforward control of the DDS once the correct pinout is known. The table below details the necessary connections between the EBAZ4205 and the AD9834 module, including power, ground, and SPI signals.
|
||||
|
||||
+--------------------------+---------------------+----------------------------+
|
||||
| Pin on AD9834 Module | Chip Function | Connection on EBAZ4205 |
|
||||
+==========================+=====================+============================+
|
||||
| SCLK | SCLK | CLK: DATA3-19 (Pin V20) |
|
||||
+--------------------------+---------------------+----------------------------+
|
||||
| DATA | SDATA | MOSI: DATA3-17 (Pin U20) |
|
||||
+--------------------------+---------------------+----------------------------+
|
||||
| SYNC | FSYNC | CS_N: DATA3-15 (Pin P19) |
|
||||
+--------------------------+---------------------+----------------------------+
|
||||
| FSE (Tied to GND) | FSELECT | N/A: Bit Controlled |
|
||||
+--------------------------+---------------------+----------------------------+
|
||||
| PSE (Tied to GND) | PSELECT | N/A: Bit Controlled |
|
||||
+--------------------------+---------------------+----------------------------+
|
||||
| GND | Ground | GND: J8-1, J8-3 |
|
||||
+--------------------------+---------------------+----------------------------+
|
||||
| VIN | AVDD/DVDD | 3.3V: J8-2 |
|
||||
+--------------------------+---------------------+----------------------------+
|
||||
| RESET (Unused) | RESET | N/A: Bit Controlled |
|
||||
+--------------------------+---------------------+----------------------------+
|
||||
|
||||
For a guide, see the `EBAZ4205 and AD9834 setup guide <https://newell.github.io/projects/ebaz4205>`_.
|
||||
|
||||
Variant details
|
||||
---------------
|
||||
|
||||
|
@ -85,6 +85,12 @@ RF generation drivers
|
||||
.. automodule:: artiq.coredevice.ad9914
|
||||
:members:
|
||||
|
||||
:mod:`artiq.coredevice.ad9834` module
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. automodule:: artiq.coredevice.ad9834
|
||||
:members:
|
||||
|
||||
:mod:`artiq.coredevice.mirny` module
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
@ -86,7 +86,7 @@ A dataset may be broadcast (``broadcast=True``), that is, distributed to all cli
|
||||
|
||||
Broadcasted datasets are replaced when a new dataset with the same key (name) is produced. By default, they are erased when the master halts. Broadcasted datasets may be made persistent (``persistent=True``, which also implies ``broadcast=True``), in which case the master stores them in a LMDB database typically called ``dataset_db.mdb``, where they are saved across master restarts.
|
||||
|
||||
By default, datasets are archived in the ``results`` HDF5 output for that run, although this can be opted against (``archive=False``).
|
||||
By default, datasets are archived in the ``results`` HDF5 output for that run, although this can be opted against (``archive=False``). They can be viewed and analyzed with the ARTIQ browser, or with an HDF5 viewer of your choice.
|
||||
|
||||
Datasets and units
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
@ -5,7 +5,7 @@ Extending RTIO
|
||||
|
||||
This page is for users who want to extend or modify ARTIQ RTIO. Broadly speaking, one of the core intentions of ARTIQ is to provide a high-level, easy-to-use interface for experimentation, while the infrastructure handles the technological challenges of the high-resolution, timing-critical operations required. Rather than worrying about the details of timing in hardware, users can outline tasks quickly and efficiently in ARTIQ Python, and trust the system to carry out those tasks in real time. It is not normally, or indeed ever, necessary to make modifications on a gateware level.
|
||||
|
||||
However, ARTIQ is an open-source project, and welcomes interest and agency from its users, as well as from experienced developers. This page is intended to serve firstly as a broad introduction to the internal structure of ARTIQ, and secondly as a tutorial for how RTIO extensions in ARTIQ can be made. Experience with FPGAs or hardware description languages is not strictly necessary, but additional research on the topic will likely be required to make serious modifications of your own.
|
||||
However, ARTIQ is an open-source project, and welcomes innovation and contribution from its users, as well as from experienced developers. This page is intended to serve firstly as a broad introduction to the internal structure of ARTIQ, and secondly as a tutorial for how RTIO extensions in ARTIQ can be made. Experience with FPGAs or hardware description languages is not strictly necessary, but additional research on the topic will likely be required to make serious modifications of your own.
|
||||
|
||||
For instructions on setting up the ARTIQ development environment and on building gateware and firmware binaries, first see :doc:`building_developing` in the main part of the manual.
|
||||
|
||||
@ -37,7 +37,7 @@ Introduction to the ARTIQ internal stack
|
||||
\draw[primary, -Stealth] (gateware) to (firmware);
|
||||
\draw[primary, -Stealth] (hardware) to (gateware);
|
||||
|
||||
Like any other modern piece of software, kernel code running on an ARTIQ core device rests upon a layered infrastructure, starting with the hardware: the physical carrier board and its peripherals. Generally, though not exclusively, this is the `Sinara device family <https://m-labs.hk/experiment-control/sinara-core/>`_, which is designed to work with ARTIQ. Other carrier boards, such as the Xilinx KC705 and ZC706, are also supported.
|
||||
Like any other modern piece of software, kernel code running on an ARTIQ core device rests upon a layered infrastructure, starting with the hardware: the physical carrier board and its peripherals. Generally, though not exclusively, this is the `Sinara device family <https://m-labs.hk/experiment-control/sinara-core/>`_, which is designed to work with ARTIQ. Other carrier boards, such as the :ref:`Hardcoded variant devices <devices-table>`, are also supported.
|
||||
|
||||
All of the ARTIQ core device carrier boards necessarily center around a physical field-programmable gate array, or FPGA. If you have never worked with FPGAs before, it is easiest to understand them as 'rearrangeable' circuits. Ideally, they are capable of approaching the tremendous speed and timing precision advantages of custom-designed, application-specific hardware, while still being reprogrammable, allowing development and revision to continue after manufacturing.
|
||||
|
||||
@ -45,10 +45,12 @@ The 'configuration' of an FPGA, the circuit design it is programmed with, is its
|
||||
|
||||
The low-level software that runs directly on the core device's CPU, softcore or hardcore, is its *firmware.* This is the 'operating system' of the core device. The firmware is tasked, among other things, with handling the low-level communication between the core device and the host machine, as well as between the core devices in a DRTIO setting. It is written in bare-metal `Rust <https://www.rust-lang.org/>`__. There are currently two active versions of the ARTIQ firmware (the version used for ARTIQ-Zynq, NAR3, is more modern than that used on Kasli and KC705, and will likely eventually replace it) but they are functionally equivalent except for internal details.
|
||||
|
||||
Experiment kernels themselves -- ARTIQ Python, processed by the ARTIQ compiler and loaded from the host machine -- rest on top of and are framed and supported by the firmware, in the same sense way that application software on your PC rests on top of an operating system. All together, software kernels communicate with the firmware to set parameters for the gateware, which passes signals directly to the hardware.
|
||||
Experiment kernels themselves -- ARTIQ Python, processed by the ARTIQ compiler and loaded from the host machine -- rest on top of and are framed and supported by the firmware, in the same way that application software on your PC rests on top of an operating system. All together, software kernels communicate with the firmware to set parameters for the gateware, which passes signals directly to the hardware.
|
||||
|
||||
These frameworks are built to be self-contained and extensible. To make additions to the gateware and software, for example, we do not need to make changes to the firmware; we can interact purely with the interfaces provided on either side.
|
||||
|
||||
.. _extending-gateware-logic:
|
||||
|
||||
Extending gateware logic
|
||||
------------------------
|
||||
|
||||
@ -57,10 +59,12 @@ As briefly explained in :doc:`rtio`, when we talk about RTIO infrastructure, we
|
||||
.. warning::
|
||||
Note that FPGA resources are finite, and buffer sizes, lane counts, etc., are generally chosen to maximize available resources already, with different values depending on the core device in use. Depending on the peripherals you include (some are more resource-intensive than others) blanket increases will likely quickly outstrip the capacity of your FPGA and fail to build. Increasing the depth of a particular channel you know to be heavily used is more likely to succeed; the easiest way to find out is to attempt the build and observe what results.
|
||||
|
||||
Gateware in ARTIQ is housed in ``artiq/gateware`` on the main ARTIQ repository and (for Zynq-specific additions) in ``artiq-zynq/src/gateware`` on ARTIQ-Zynq. The starting point for figuring out your changes will often be the *target file*, which is core device-specific and which you may recognize as the primary module called when building gateware. Depending on your core device, simply track down the file named after it, as in ``kasli.py``, ``kasli_soc.py``, and so on. Note that the Kasli and Kasli-SoC targets are designed to take JSON description files as input, whereas their KC705 and ZC706 equivalents work with hardcoded variants instead.
|
||||
Gateware in ARTIQ is housed in ``artiq/gateware`` on the main ARTIQ repository and (for Zynq-specific additions) in ``artiq-zynq/src/gateware`` on ARTIQ-Zynq. The starting point for figuring out your changes will often be the *target file*, which is core device-specific and which you may recognize as the primary module called when building gateware. Depending on your core device, simply track down the file named after it, as in ``kasli.py``, ``kasli_soc.py``, and so on. Note that the Kasli and Kasli-SoC targets are designed to take JSON description files as input (see :ref:`JSON variant devices <devices-table>`), whereas their KC705, ZC706 and EBAZ4205 (see :ref:`Hardcoded variant devices <devices-table>`) equivalents work with hardcoded variants instead.
|
||||
|
||||
To change parameters related to particular peripherals, see also the files ``eem.py`` and ``eem_7series.py``, which describe the core device's interface with other EEM cards in Migen terms, and contain ``add_std`` methods that in turn reference specific gateware modules and assign RTIO channels.
|
||||
|
||||
.. _adding-phy:
|
||||
|
||||
Adding a module to gateware
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -146,7 +150,7 @@ Add the combinatorial block as follows: ::
|
||||
self.comb += [
|
||||
pad0.eq(pad0_o),
|
||||
If(reg,
|
||||
pad1.eq(pad0_k)
|
||||
pad1.eq(pad0_o)
|
||||
)
|
||||
]
|
||||
|
||||
@ -154,6 +158,8 @@ The output ``pad0`` is continuously connected to the value of the ``pad0_o`` reg
|
||||
|
||||
The module is now capable of accepting RTIO output events and applying them to the hardware outputs. What we can't yet do is generate these output events in an ARTIQ kernel. To do that, we need to add a core device driver.
|
||||
|
||||
.. _adding-core-driver:
|
||||
|
||||
Adding a core device driver
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -234,7 +240,7 @@ Now, before you can access your new core device driver from a kernel, it must be
|
||||
.. warning::
|
||||
Channel numbers are assigned sequentially each time ``rtio_channels.append()`` is called. Since we assigned the channel for our linked LEDs in the same location as the old user LEDs, the correct channel number is likely simply the one previously used in your device database for the first LED. In any other case, however, the ``print()`` statement we added to the target file should tell us the exact canonical channel. Search through the console logs produced when generating the gateware to find the line starting with ``Linked LEDs at:``.
|
||||
|
||||
Depending on how your device database was written, note that the channel numbers for other peripherals, if they are present, *will have changed*, and :meth:`~artiq.frontend.artiq_ddb_template` will not generate their numbers correctly unless it is edited to match the new assignments of the user LEDs. For a longer-term gateware change, especially the addition of a new EEM card, ``artiq/frontend/artiq_ddb_template.py`` and ``artiq/coredevice/coredevice_generic.schema`` should be edited accordingly, so that system descriptions and device databases can continue to be parsed and generated correctly.
|
||||
Depending on how your device database was written, note that the channel numbers for other peripherals, if they are present, *will have changed*, and :meth:`~artiq.frontend.artiq_ddb_template` will not generate their numbers correctly unless it is edited to match the new assignments of the user LEDs. For a more long-term gateware change, ``artiq/frontend/artiq_ddb_template.py`` and ``artiq/coredevice/coredevice_generic.schema`` should be edited accordingly, so that system descriptions and device databases can continue to be parsed and generated correctly. See also :ref:`extending-system-description` below.
|
||||
|
||||
Test experiments
|
||||
^^^^^^^^^^^^^^^^
|
||||
@ -269,4 +275,133 @@ and ``linkup.py``: ::
|
||||
|
||||
Run these and observe the results. Congratulations! You have successfully constructed an extension to the ARTIQ RTIO.
|
||||
|
||||
.. + 'Adding custom EEMs' and 'Merging support'
|
||||
Adding a custom EEM
|
||||
-------------------
|
||||
|
||||
.. note::
|
||||
Adding a custom EEM to a Kasli or Kasli-SoC system is not much more difficult than adding new gateware logic for existing hardware, and may in some cases be simpler, if no custom PHY is required. On the other hand, modifying :ref:`Hardcoded variant devices <devices-table>` is a different process, and gateware generation for these boards does not use the files and modules described below. Creating new hardcoded variants is not directly addressed in this tutorial. That said, it would begin and end largely in the respective target file, where the variants are defined.
|
||||
|
||||
Non-realtime hardware which does not need to connect directly to the core device or require gateware support should instead be handled through an NDSP, see :doc:`developing_a_ndsp`. This is a more accessible process in general and does not vary based on core device.
|
||||
|
||||
Extending gateware support
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The first and most important file to look into is ``eem.py``, found in ``artiq/gateware``. This is where the classes for ARTIQ-supported EEM peripherals are defined, and where you can add your own class for a new EEM, following the model of the preexisting classes.
|
||||
|
||||
Your custom EEM class should subclass :class:`artiq.gateware.eem._EEM` and provide the two methods ``io()`` and ``add_std()``. The second, ``add_std()``, will be called to add this EEM to a gateware build. The first is called by ``add_extension()`` in :class:`~artiq.gateware.eem._EEM` itself. Your class should look something like: ::
|
||||
|
||||
class CustomEEM(_EEM):
|
||||
@staticmethod
|
||||
def io(*args, **kwargs iostandard=default_iostandard):
|
||||
io = [ ... ] # A sequence of pad assignments
|
||||
return io
|
||||
|
||||
@classmethod
|
||||
def add_std(cls, target, *args, **kwargs):
|
||||
cls.add_extension(target, *args, **kwargs) # calls CustomEEM.io(*args, **kwargs)
|
||||
|
||||
# Request IO pads that were added in CustomEEM.io()
|
||||
target.platform.request(...)
|
||||
|
||||
# Add submodule for PHY (pass IO pads in arguments)
|
||||
phy = ...
|
||||
phys.append(phy)
|
||||
target.submodules += phy
|
||||
|
||||
# Add RTIO channel(s) for PHY
|
||||
target.rtio_channels.append(rtio.Channel.from_phy(...))
|
||||
|
||||
Note that the pad assignments ``io()`` returns should be in Migen, usually comprised out of Migen ``Subsignal`` and ``Pin`` constructs. The predefined :func:`~artiq.gateware.eem._eem_signal` and :func:`~artiq.gateware.eem._eem_pin` functions (also provided in ``eem.py``) may be useful. Note also that ``add_std()`` covers essentially the same territory as the modifications we simply made directly to the target file for the LED tutorial. Depending on your use case, you may need to write a custom PHY for your hardware, or you may be able to make use of the PHYs ARTIQ already makes available. See :ref:`adding-phy`, if you haven't already. A single EEM may also generate several PHYs and/or claim several RTIO channels.
|
||||
|
||||
Now find the file ``eem_7series.py``, also in ``artiq/gateware``. The functions defined in this file mostly serve as wrappers for ``add_std()``, with some additional interpretation and checks on the parameters. Your own ``peripheral`` function should look something like: ::
|
||||
|
||||
def peripheral_custom(module, peripheral):
|
||||
... # (interpret peripheral arguments)
|
||||
CustomEEM.add_std(module, *args, **kwargs)
|
||||
|
||||
Once you have written this function, add it to the ``peripheral_processors`` dictionary at the end of the file, as: ::
|
||||
|
||||
peripheral_processors["custom_eem"] = peripheral_custom
|
||||
|
||||
Now your EEM is fully supported by the ARTIQ gateware infrastructure. All that remains is to add it to a build configuration.
|
||||
|
||||
.. _extending-system-description:
|
||||
|
||||
Target file and system description
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In the :ref:`extending-gateware-logic` tutorial above, we made modifications directly to the target file, to hardcode a certain PHY for a certain set of pads. This is reasonable to do in the case of the core device LEDs, which are always present and cannot be rearranged. It is theoretically possible to hardcode the addition of your new EEM in the same way. In this case it would not be necessary to make modifications to ``eem.py`` and ``eem_7series.py``; the pad assignments, requisite PHYs, and RTIO channels could all be defined directly in the target file. This is essentially how things are done for :ref:`Hardcoded variant devices <devices-table>`.
|
||||
|
||||
However, with EEM cards, which can be present in different numbers and rearranged at will, it is preferable to be more flexible. This is the reason system description files are used. Assuming you have added your EEM to ``eem.py`` and the ``peripheral_processors`` dictionary, no modifications to the target file are actually necessarily. All Kasli and Kasli-SoC targets already contain the line: ::
|
||||
|
||||
eem_7series.add_peripherals(self, description["peripherals"], iostandard=eem_iostandard)
|
||||
|
||||
In other words, your custom EEM will be automatically included if it is in the ``description`` dictionary, which is interpreted directly from the JSON system description. Simply add an entry to your system description: ::
|
||||
|
||||
{
|
||||
"type": "custom_eem",
|
||||
"ports": [0]
|
||||
# any other args to pass to add_std or io later:
|
||||
...
|
||||
}
|
||||
|
||||
Note however that before a build system descriptions are always checked against the corresponding JSON schema, which you can find as ``coredevice_generic_schema.json`` in ``artiq/coredevice``. Add the new format for your entry here as well, under ``definition``, ``peripheral``, and ``allOf``: ::
|
||||
|
||||
{
|
||||
"title": "CustomEEM",
|
||||
"if": {
|
||||
"properties": {
|
||||
"type": {
|
||||
"const": "custom_eem"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"ports": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
},
|
||||
"minItems": ...,
|
||||
"maxItems": ...
|
||||
},
|
||||
...
|
||||
},
|
||||
"required": ["ports", ...]
|
||||
}
|
||||
},
|
||||
|
||||
Now it should be possible to :doc:`build the binaries <building_developing>`, using your system description and its custom entry.
|
||||
|
||||
Device database and driver
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
As usual, before you can use your hardware from a kernel, you will need to add an entry to your device database. You can use one of the existing ARTIQ core drivers, if applicable, or you can write your own custom driver, as we did in :ref:`adding-core-driver`.
|
||||
|
||||
There are a few options to determine the correct channel number. You can figure it out from the structure of your system description; you can add a print statement to ``add_std()``; or, most preferably, you can add support for your custom EEM in :mod:`~artiq.frontend.artiq_ddb_template`, so that the channel number can be handled automatically as it is for other peripherals.
|
||||
|
||||
The relevant file is in ``artiq/frontend``, named simply ``artiq_ddb_template.py``. You will want to add a method within ``PeripheralManager``, in the format: ::
|
||||
|
||||
def process_custom_eem(self, rtio_offset, peripheral):
|
||||
self.gen("""
|
||||
device_db["{name}"] = {{
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.custom_eem",
|
||||
"class": "CustomDriver",
|
||||
"arguments": {{"channel": 0x{channel:06x}}}
|
||||
}}""",
|
||||
name=self.get_name("custom_eem"),
|
||||
channel=rtio_offset + next(channel))
|
||||
return next(channel)
|
||||
|
||||
Further arguments can be passed on through ``arguments`` if necessary. Note that the peripheral manager's ``process`` method chooses which method to use by performing a simple string check, so your ``process_`` method *must* use the same name for your custom hardware as given in the system description's ``"type"``.
|
||||
|
||||
You should now be able to use :mod:`~artiq.frontend.artiq_ddb_template` to generate your device database, and from there, compile and run experiments with your new hardware. Congratulations!
|
||||
|
||||
Merging support
|
||||
---------------
|
||||
|
||||
Being an open-source project, ARTIQ welcomes contributions from outside sources. If you have successfully integrated additional gateware or new hardware into ARTIQ, and you think this might be useful to other ARTIQ users in the community, you might consider merging support -- having your additions incorporated into the canonical ARTIQ codebase. See `this pull request <https://github.com/m-labs/artiq/pull/1800>`_ for one example of such a community addition.
|
||||
|
||||
Merging support also means the opportunity to have your code reviewed by experts, and if your addition is accepted, that maintaining these additions and keeping them up-to-date through new ARTIQ versions may be handled by the developers of ARTIQ directly, instead of being solely your responsibility. Clean up your code, test it well, be sure that it plays well with existing ARTIQ features and interfaces, and follow the `contribution guidelines <https://github.com/m-labs/artiq/blob/master/CONTRIBUTING.rst#contributing-code>`_. Your effort is appreciated!
|
||||
|
@ -27,6 +27,21 @@ Substitute ``artiq-manual-pdf`` to get the LaTeX PDF version. The results will b
|
||||
|
||||
The manual is written in `reStructured Text <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_; you can find the source files in the ARTIQ repository under ``doc/manual``. If you spot a mistake, a typo, or something that's out of date or missing -- in particular, if you want to add something to this FAQ -- feel free to clone the repository, edit the source RST files, and make a pull request with your version of an improvement. (If you're not a fan of or not familiar with command-line Git, both GitHub and Gitea support making edits and pull requests directly in the web interface; tutorial materials are easy to find online.) The second best thing is to open an issue to make M-Labs aware of the problem.
|
||||
|
||||
roll back to older versions of ARTIQ, or obtain it through other installation methods?
|
||||
--------------------------------------------------------------------------------------
|
||||
|
||||
At all times, three versions of ARTIQ are actively supported by M-Labs, released through the beta, stable, and legacy channels. See :doc:`releases`.
|
||||
|
||||
If you are trying to rollback to stable or legacy, the process should be accordingly simple. See the respective :doc:`installing` page in the respective version of the manual. If you've previously used the version you are rolling back to, you can likely use the rollback methods described in :ref:`installing-upgrading`; otherwise you can always treat it as a fresh install. Remember that it will also be necessary to reflash core devices with corresponding legacy binaries.
|
||||
|
||||
Regarding pre-legacy releases, note that being actively supported simply means that M-Labs makes prebuilt packages and binaries for these versions available via the supported installation methods and through AFWS. Outdated versions aren't automatically built or offered over these channels, but their source code remains available in the Git repository, and you are free to use it or adapt it in accordance with the terms of the license, including building whatever packages you prefer. In general, though, newer releases of ARTIQ offer more features, more stability, better performance, and better support. The legacy release is supported simply as a convenience for users who haven't been able to upgrade yet. For normal purposes, it is recommended to use the current stable release of ARTIQ if at all possible, or the beta to gain access to new features and improvements that are still in development.
|
||||
|
||||
For more details, see also `Clarifications regarding the ARTIQ release model and AFWS <https://forum.m-labs.hk/d/823-clarifications-regarding-the-artiq-release-model-and-afws>`_.
|
||||
|
||||
.. tip::
|
||||
|
||||
If you're particularly concerned with being able to precisely reproduce older experiments, even when you've moved on to newer ARTIQ versions, upgrade carefully and make your own local backups to be able to rollback to older versions of your system. Make sure to keep copies of older firmware binaries in order to be able to reflash your hardware. Older versions of ARTIQ will always continue working if left untouched, and you won't need to worry about rebuilding from the source if you keep your own prebuilt versions around.
|
||||
|
||||
.. _faq-networking:
|
||||
|
||||
troubleshoot networking problems?
|
||||
@ -60,13 +75,6 @@ Either reflash your core device with a newer version of ARTIQ (see :doc:`flashin
|
||||
|
||||
Minor version mismatches are common, even in stable ARTIQ versions, but should not cause any issues. The ARTIQ release system ensures breaking changes are strictly limited to new release versions, or to the beta branch (which explicitly makes no promises of stability.) Updates that *are* applied to the stable version are usually bug fixes, documentation improvements, or other quality-of-life changes. As long as gateware and software are using the same stable release version of ARTIQ, even if there is a minor mismatch, no warning will be displayed.
|
||||
|
||||
change configuration settings of satellite devices?
|
||||
---------------------------------------------------
|
||||
|
||||
Currently, it is not possible to reach satellites through ``artiq_coremgmt config``, although this is being worked on. On Kasli, use :class:`~artiq.frontend.artiq_mkfs` and :class:`~artiq.frontend.artiq_flash`; on Kasli-SoC, preload the SD card with a ``config.txt``, formatted as a list of ``key=value`` pairs, one per line.
|
||||
|
||||
Don't worry about individually flashing idle or startup kernels. If your idle or startup kernel contains subkernels, it will automatically compile as a ``.tar``, which you only need to flash to the master.
|
||||
|
||||
fix unreliable DRTIO master-satellite links?
|
||||
--------------------------------------------
|
||||
|
||||
@ -106,10 +114,45 @@ fix ``failed to connect to moninj`` in the dashboard?
|
||||
|
||||
This and other similar messages almost always indicate that your device database lists controllers (for example, ``aqctl_moninj_proxy``) that either haven't been started or aren't reachable at the given host and port. See :ref:`mgmt-ctlmgr`, or simply run: ::
|
||||
|
||||
$ artiq_ctlgmr
|
||||
$ artiq_ctlmgr
|
||||
|
||||
to let the controller manager start the necessary controllers automatically.
|
||||
|
||||
fix ``address already in use`` when running ARTIQ commands?
|
||||
-----------------------------------------------------------
|
||||
|
||||
A message like ``OSError: [Errno 98] error while attempting to bind on address ('127.0.0.1', 1067): [errno 98] address already in use`` indicates that the IP address and port number combination you're trying to use is already occupied by some other process. Often this simply means that the ARTIQ process you're trying to start is in fact already running. Note for example that trying to start a controller which is already being run by a controller manager will generally fail for this reason.
|
||||
|
||||
.. note::
|
||||
ARTIQ management system communications, whether distributed or local, run over TCP/IP, using TCP port numbers to identify their destinations. Generally speaking, client processes like the dashboard don't require fixed ports of their own, since they can simply reach out to the master when they want to establish a connection. Running multiple dashboards will never cause a port conflict. On the other hand, server processes like the ARTIQ master have to be 'listening' at a fixed, open port in order to be able to receive incoming connections. For more details, look into `ports in computer networking <https://en.wikipedia.org/wiki/Port_(computer_networking)>`_.
|
||||
|
||||
Most management system processes belong to the second category, and are bound to one or several fixed communication ports while they're running. See also :doc:`default_network_ports`.
|
||||
|
||||
You can use the command ``netstat`` to list the ports currently in use on your system. To check the status of a specific port on Linux, try either of: ::
|
||||
|
||||
$ netstat -anp --inet | grep "<port-number>"
|
||||
$ lsof -i:<port-number>
|
||||
|
||||
On Windows, you can list ports with: ::
|
||||
|
||||
$ netstat -ano -p TCP
|
||||
|
||||
Use your preferred method to search through the output; suitable commands will vary by environment (e.g. ``grep`` in an MSYS2 shell, ``Select-String`` in PowerShell, ``find`` in the Windows command line, etc.)
|
||||
|
||||
In all cases, if there are no results, the port isn't in use and should be free for new processes.
|
||||
|
||||
.. tip::
|
||||
While it is possible to run, for example, two identical ARTIQ controllers on the same machine, they can't be bound to the same port numbers at the same time. If you're intentionally running multiple copies of the same ARTIQ processes, use the command-line ``--port`` options to set alternate ports for at least one of the two. See :doc:`main_frontend_tools` and :doc:`utilities` for exact flags to use. Controllers should have similar flags available and will also require updated :ref:`device database entries <ndsp-integration>`. Note that alternate ports must be consistent to be useful, e.g., a master and dashboard must have the same ``--port-notify`` set in order to communicate with each other!
|
||||
|
||||
Otherwise, either the running process must be stopped, or you'll have to set different port numbers for the process you're trying to start. In some cases it might happen that a process is no longer accessible or has become unresponsive but is still occupying its ports. The easiest way to free the ports is to kill the process manually. On Linux, you can use the ``kill`` command with ``lsof``: ::
|
||||
|
||||
$ kill $(lsof -t -i:<port-number>)
|
||||
|
||||
On Windows, use ``netstat`` again to identify the process ID, and then feed it into ``taskkill``, e.g.: ::
|
||||
|
||||
$ netstat -ano -p TCP
|
||||
$ taskkill /F /PID <process-ID>
|
||||
|
||||
diagnose and fix sequence errors?
|
||||
---------------------------------
|
||||
|
||||
@ -196,6 +239,24 @@ create and use variable-length arrays in kernels?
|
||||
|
||||
You can't, in general; see the corresponding notes under :ref:`compiler-types`. ARTIQ kernels do not support heap allocation, meaning in particular that lists, arrays, and strings must be of constant size. One option is to preallocate everything, as mentioned on the Compiler page; another option is to chunk it and e.g. read 100 events per function call, push them upstream and retry until the gate time closes.
|
||||
|
||||
understand how best to send data between kernel and host?
|
||||
---------------------------------------------------------
|
||||
|
||||
See also :ref:`basic-artiq-python`. Let's run down the options for kernel-host data transfer:
|
||||
|
||||
- Kernels can return single values directly. They *cannot* return lists, arrays or strings, because of the way these values are allocated, which prevents values of these types from outliving the kernel they are created in. This is still true when the values in question are wrapped in functions or objects, in which case they may be missed by lifetime tracking and accepted by the compiler, but will cause memory corruption when run.
|
||||
|
||||
- Kernels can freely make changes to attributes of objects shared with the host, including ``self``. However, these changes will be made to a kernel-owned copy of the object, which is only synchronized with the host copy when the kernel completes. This means that host-side operations executed during the runtime of the kernel, including RPCs, will be handling an unmodified version of the object, and modifications made by those operations will simply be overwritten when the kernel returns.
|
||||
|
||||
.. note::
|
||||
Attribute writeback happens *once per kernel*, that is, if your experiment contains many separate kernels called from the host, modifications will be written back when each separate kernel completes. This is generally not suitable for data transfer, however, as new kernels are costly to create, and experiments often try to avoid doing so. It is also important to specify that kernels called *from* a kernel will not write back to the host upon completion. Attribute writeback is only executed upon return to the host.
|
||||
|
||||
- Kernels can interact with datasets, either as attributes (if :meth:`~artiq.language.environment.HasEnvironment.setattr_dataset` is used) or by RPC of the get and set methods (:meth:`~artiq.language.environment.HasEnvironment.get_dataset`, :meth:`~artiq.language.environment.HasEnvironment.set_dataset`, etc.). In this case note that, like certain other host-side methods, :meth:`~artiq.language.environment.HasEnvironment.get_dataset` will not actually be accepted by the compiler, because its return type is not specified. To call it as an RPC, simply wrap it in another function which *does* specify a return type. :meth:`~artiq.language.environment.HasEnvironment.set_dataset` can be similarly wrapped to make it asynchronous.
|
||||
|
||||
- Kernels can of course also call arbitrary RPCs. When sending data to the host, these can be asynchronous, and this is normally the recommended way of transferring data back to the host, resulting in a relatively minor amount of delay in the kernel. Keep in mind however that asynchronous RPCs may still block execution for some time if the arguments are very large or if many RPCs are submitted in close succession. When receiving data from the host, RPCs must be synchronous, which is still considerably faster than starting a new kernel. Note that if data is being both (asynchronously) sent and received, there is a small possibility of minor race conditions (i.e. retrieved data may not yet show updates sent in an earlier RPC).
|
||||
|
||||
Kernel attributes and data transfer remain somewhat of an open area of development. Many such developments are or will be implemented in `NAC3 <https://forum.m-labs.hk/d/392-nac3-new-artiq-compiler-3-prealpha-release>`_, the next-generation ARTIQ compiler. The overhead for starting new kernels, which is largely dominated by compile time, should be significantly reduced (NAC3 can be expected to complete compilations 6x - 30x faster than currently).
|
||||
|
||||
write part of my experiment as a coroutine/asyncio task/generator?
|
||||
------------------------------------------------------------------
|
||||
|
||||
@ -275,4 +336,4 @@ For more advanced questions, sometimes the `list of publications <https://m-labs
|
||||
- If you're reasonably certain you've identified a bug, or if you'd like to suggest a feature that should be included in future ARTIQ releases, `file a GitHub issue <https://github.com/m-labs/artiq/issues/new/choose>`_ yourself, following one of the provided templates.
|
||||
- In some odd cases, you may want to see the `mailing list archive <https://www.mail-archive.com/artiq@lists.m-labs.hk/>`_; the ARTIQ mailing list was shut down at the end of 2020 and was last regularly used during the time of ARTIQ-2 and 3, but for some older ARTIQ features, or to understand a development thought process, you may still find relevant information there.
|
||||
|
||||
In any situation, if you found the manual unclear or unhelpful, you might consider following the :ref:`directions for contribution <build-documentation>` and editing it to be more helpful for future readers.
|
||||
In any situation, if you found the manual unclear or unhelpful, you might consider following the :ref:`directions for contribution <build-documentation>` and editing it to be more helpful for future readers.
|
||||
|
@ -13,11 +13,11 @@ If you have an active firmware subscription with M-Labs or QUARTIQ, you can obta
|
||||
|
||||
Run the command::
|
||||
|
||||
$ afws_client <username> build <afws_director> <variant>
|
||||
$ afws_client <username> build <afws_directory> <variant>
|
||||
|
||||
Replace ``<username>`` with the login name that was given to you with the subscription, ``<variant>`` with the name of your system variant, and ``<afws_directory>`` with the name of an empty directory, which will be created by the command if it does not exist. Enter your password when prompted and wait for the build (if applicable) and download to finish. If you experience issues with the AFWS client, write to the helpdesk@ email. For more information about :mod:`~artiq.frontend.afws_client` see also the corresponding entry on the :ref:`Utilities <afws-client>` page.
|
||||
|
||||
For certain configurations (KC705 or ZC706 only) it is also possible to source firmware from `the M-Labs Hydra server <https://nixbld.m-labs.hk/project/artiq>`_ (in ``main`` and ``zynq`` respectively).
|
||||
For :ref:`hardcoded variant devices <devices-table>` it is also possible to source firmware from `the M-Labs Hydra server <https://nixbld.m-labs.hk/project/artiq>`_ (in ``main`` and ``zynq``).
|
||||
|
||||
Without a subscription, you may build the firmware yourself from the open source code. See the section :doc:`building_developing`.
|
||||
|
||||
@ -25,7 +25,7 @@ Installing and configuring OpenOCD
|
||||
----------------------------------
|
||||
|
||||
.. warning::
|
||||
These instructions are not applicable to Zynq devices (Kasli-SoC or ZC706), which do not use the utility :mod:`~artiq.frontend.artiq_flash`. If your core device is a Zynq device, skip straight to :ref:`writing-flash`.
|
||||
These instructions are not applicable to :ref:`Zynq devices <devices-table>`, which do not use the utility :mod:`~artiq.frontend.artiq_flash`. If your core device is a Zynq device, skip straight to :ref:`writing-flash`.
|
||||
|
||||
ARTIQ supplies the utility :mod:`~artiq.frontend.artiq_flash`, which uses OpenOCD to write the binary images into an FPGA board's flash memory. For both Nix and MSYS2, OpenOCD are included with the installation by default. Note that in the case of Nix this is the package ``artiq.openocd-bscanspi`` and not ``pkgs.openocd``; the second is OpenOCD from the Nix package collection, which does not support ARTIQ/Sinara boards.
|
||||
|
||||
@ -78,29 +78,25 @@ On Windows
|
||||
Writing the flash
|
||||
-----------------
|
||||
|
||||
First ensure the board is connected to your computer. In the case of Kasli, the JTAG adapter is integrated into the Kasli board; for flashing (and debugging) you can simply connect your computer to the micro-USB connector on the Kasli front panel. For Kasli-SoC, which uses :mod:`~artiq.frontend.artiq_coremgmt` to flash over network, an Ethernet connection and an IP address, supplied either with the ``-D`` option or in your :ref:`device database <device-db>`, are sufficient.
|
||||
If your device is already accessible over the network, all you need is an Ethernet connection and a correct IP address (supplied either with the ``-D`` option or in :ref:`your device database <device-db>`). ::
|
||||
|
||||
For Kasli-SoC or ZC706:
|
||||
::
|
||||
$ artiq_coremgmt [-D IP_address] flash <afws_directory>
|
||||
$ artiq_coremgmt [-D IP_address] reboot
|
||||
|
||||
$ artiq_coremgmt [-D IP_address] config write -f boot <afws_directory>/boot.bin
|
||||
$ artiq_coremgmt reboot
|
||||
If the device is not reachable due to corrupted firmware or networking problems, binaries can be loaded manually. On Kasli or KC705, connect the board directly to your computer by JTAG USB and use :mod:`~artiq.frontend.artiq_flash`, as follows: ::
|
||||
|
||||
If the device is not reachable due to corrupted firmware or networking problems, extract the SD card and copy ``boot.bin`` onto it manually.
|
||||
$ artiq_flash [-t kc705] -d <afws_directory>
|
||||
|
||||
For Kasli:
|
||||
::
|
||||
Note the micro-USB in the Kasli front panel. On KC705, the SW13 switches need to be set to 00001.
|
||||
|
||||
$ artiq_flash -d <afws_directory>
|
||||
For Zynq devices (Kasli-SoC, ZC706 or EBAZ4205), extract the SD card and copy ``boot.bin`` onto it manually.
|
||||
|
||||
For KC705:
|
||||
::
|
||||
Writing to satellite devices
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
$ artiq_flash -t kc705 -d <afws_directory>
|
||||
Satellite devices can at any time be flashed directly through the SD card or :mod:`~artiq.frontend.artiq_flash`, as applicable. Satellite devices do not support individual networking and do not have IP addresses. If your DRTIO system is up and running and the routing table is in place, on the other hand, they can be flashed through the master's network connection: ::
|
||||
|
||||
The SW13 switches need to be set to 00001.
|
||||
|
||||
Flashing over network is also possible for Kasli and KC705, assuming IP networking has already been set up. In this case, the ``-H HOSTNAME`` option is used; see the entry for :mod:`~artiq.frontend.artiq_flash` in the :ref:`Utilities <flashing-loading-tool>` reference.
|
||||
$ artiq_coremgmt [-D IP_address] -s <destination_number> flash <afws_directory>
|
||||
|
||||
.. _connecting-uart:
|
||||
|
||||
|
@ -139,8 +139,8 @@ Experiments may have arguments, values which can be set in the dashboard on subm
|
||||
self.setattr_argument("count", NumberValue(precision=0, step=1))
|
||||
|
||||
def run(self):
|
||||
for i in range(self.count):
|
||||
print("Hello World", i)
|
||||
for i in range(self.count):
|
||||
print("Hello World", i)
|
||||
|
||||
The method :meth:`~artiq.language.environment.HasEnvironment.setattr_argument` acts to set the argument and make its value accessible, similar to the effect of :meth:`~artiq.language.environment.HasEnvironment.setattr_device`. The second input sets the type of the argument; here, :class:`~artiq.language.environment.NumberValue` represents a floating point numerical value. To learn what other types are supported, see :class:`artiq.language.environment` and :class:`artiq.language.scan`.
|
||||
|
||||
@ -171,7 +171,6 @@ Now, in the same dock as 'Explorer', navigate to the tab 'Interactive Args'. You
|
||||
|
||||
In order to request and supply multiple interactive arguments at once, simply place them in the same ``with`` block; see also the example ``interactive.py`` in ``examples/no_hardware``.
|
||||
|
||||
|
||||
.. _master-setting-up-git:
|
||||
|
||||
Setting up Git integration
|
||||
@ -180,7 +179,7 @@ Setting up Git integration
|
||||
So far, we have used the bare filesystem for the experiment repository, without any version control. Using Git to host the experiment repository helps with tracking modifications to experiments and with the traceability to a particular version of an experiment.
|
||||
|
||||
.. note::
|
||||
The workflow we will describe in this tutorial corresponds to a situation where the computer running the ARTIQ master is also used as a Git server to which multiple users may contribute code. The Git setup can be customized according to your needs; the main point to remember is that when scanning or submitting, the ARTIQ master uses the internal Git data (*not* any working directory that may be present) to fetch the latest *fully completed commit* at the repository's head. See the :ref:`Management system <mgmt-git-integration>` page for notes on alternate workflows.
|
||||
The workflow we will describe in this tutorial is designed for a situation where the computer running the ARTIQ master is also used as a Git server to which multiple users may contribute code. This is not the only way Git integration can be useful, and the setup can be customized according to your needs. The main point to remember is that when scanning or submitting, the ARTIQ master uses the internal Git data, *not* any checked-out files that may be present, to fetch the latest *fully completed commit* at the repository's head. See also :ref:`mgmt-git-integration`, especially if you are unfamiliar with Git.
|
||||
|
||||
We will use our current ``repository`` folder as the working directory for making local modifications to the experiments, move it away from the master's data directory, and replace it with a new ``repository`` folder, which will hold only the Git data used by the master. Stop the master with Ctrl+C and enter the following commands: ::
|
||||
|
||||
@ -188,7 +187,7 @@ We will use our current ``repository`` folder as the working directory for makin
|
||||
$ mv repository ~/artiq-work
|
||||
$ mkdir repository
|
||||
$ cd repository
|
||||
$ git init bare
|
||||
$ git init --bare
|
||||
|
||||
Now initialize a regular (non-bare) Git repository in our working directory: ::
|
||||
|
||||
@ -207,7 +206,7 @@ and finally, connect the two repositories and push the commit upstream to the ma
|
||||
$ git push -u origin master
|
||||
|
||||
.. tip::
|
||||
If you are not familiar with command-line Git and would like to understand these commands in more detail, search for some tutorials in basic use of Git; there are many available online.
|
||||
If you are not familiar with command-line Git and would like to understand these commands in more detail, look for tutorials on the basic use of Git; there are many available online.
|
||||
|
||||
Start the master again with the ``-g`` flag, which tells it to treat its ``repository`` folder as a bare Git repository: ::
|
||||
|
||||
@ -233,10 +232,10 @@ Then set its execution permissions: ::
|
||||
|
||||
Let's now make a modification to the experiments. In the working directory ``artiq-work``, open ``mgmt_tutorial.py`` again and add an exclamation mark to the end of "Hello World". Before committing it, check that the experiment can still be executed correctly by submitting it directly from the working directory, using the command-line client: ::
|
||||
|
||||
$ artiq_client submit ~/artiq-work/mgmt_tutorial.py
|
||||
$ artiq_client submit --content ~/artiq-work/mgmt_tutorial.py
|
||||
|
||||
.. note::
|
||||
Alternatively, right-click in the Explorer dock and select the 'Open file outside repository' option for the same effect.
|
||||
The ``--content`` flag submits by content, that is, by sending a raw file rather than selecting an experiment from the master's local environment. Since you are likely running the client and the master on the same machine, it is probably not strictly necessary here. In a distributed setup across multiple machines, the master will not have access to the client's filesystem, and the ``--content`` flag is the only way to run experiments directly from a client file. See also :ref:`submission-details`.
|
||||
|
||||
Verify the log in the GUI. If you are happy with the result, commit the new version and push it into the master's repository: ::
|
||||
|
||||
@ -257,4 +256,4 @@ Arguments to the individual tools (including ``-s`` and ``--bind``) can still be
|
||||
|
||||
$ artiq_session -m=-g
|
||||
|
||||
to start the session with the master in Git mode. See also :mod:`~artiq.frontend.artiq_session`.
|
||||
to start the session with Git integration. See also :mod:`~artiq.frontend.artiq_session`.
|
@ -97,7 +97,7 @@ To find more packages you can browse the `Nix package search <https://search.nix
|
||||
|
||||
$ nix profile remove [index]
|
||||
|
||||
While using flakes, ARTIQ is not 'installed' as such in any permanent way. However, Nix will preserve independent cached packages in ``/nix/store`` for each flake, which over time or with many different flakes and versions can take up large amounts of storage space. To clear this cache, run ``$ nix-garbage-collect``.
|
||||
While using flakes, ARTIQ is not 'installed' as such in any permanent way. However, Nix will preserve independent cached packages in ``/nix/store`` for each flake, which over time or with many different flakes and versions can take up large amounts of storage space. To clear this cache, run ``$ nix-collect-garbage``.
|
||||
|
||||
.. _installing-troubleshooting:
|
||||
|
||||
|
@ -7,19 +7,23 @@ Management system
|
||||
Components
|
||||
----------
|
||||
|
||||
See also :doc:`overview` for a visual idea of the management system.
|
||||
|
||||
Master
|
||||
^^^^^^
|
||||
|
||||
The :ref:`ARTIQ master <frontend-artiq-master>` is responsible for managing the dataset and device databases, the experiment repository, scheduling and running experiments, archiving results, and distributing real-time results. It is a headless component, and one or several clients (command-line or GUI) use the network to interact with it.
|
||||
|
||||
The master expects to be given a directory on startup, the experiment repository, containing these experiments which are automatically tracked and communicated to clients. By default, it simply looks for a directory called ``repository``. The ``-r`` flag can be used to substitute an alternate location.
|
||||
The master expects to be given a directory on startup, the experiment repository, containing these experiments which are automatically tracked and communicated to clients. By default, it simply looks for a directory called ``repository``. The ``-r`` flag can be used to substitute an alternate location. Subdirectories in ``repository`` are also read, and experiments stored in them are known to the master. They will be displayed as folders in the dashboard's explorer.
|
||||
|
||||
It also expects access to a ``device_db.py``, with a corresponding flag ``--device-db`` to substitute a different file name. Additionally, it will reference or create certain files in the directory it is run in, among them ``dataset_db.mdb``, the LMDB database containing persistent datasets, ``last_rid.pyon``, which simply contains the last used RID, and the ``results`` directory.
|
||||
It also expects access to a ``device_db.py``, with a corresponding flag ``--device-db`` to substitute a different file name. Additionally, it will reference or create certain files in the directory it is run in, among them ``dataset_db.mdb``, the LMDB database containing persistent datasets, ``last_rid.pyon``, which simply holds the last used RID, and the ``results`` directory. For more on the device and dataset databases, see also :doc:`environment`.
|
||||
|
||||
.. note::
|
||||
Because the other parts of the management system all seem to be able to access the information stored in these files, confusion can sometimes result about where it is really stored and how it is distributed. Device databases, datasets, results, and experiments are all solely kept and administered by the master, which communicates information to dashboards, browsers, and clients over the network whenever necessary.
|
||||
Because the other parts of the management system often display knowledge of the information stored in these files, confusion can sometimes result about where it is really stored and how it is distributed. Device databases, datasets, results, and experiments are all solely kept and administered by the master, which communicates with dashboards, clients, and controller managers over the network whenever necessary.
|
||||
|
||||
Notably, clients and dashboards do not send in experiments to the master; they request them from the array of experiments the master knows about, primarily those in ``repository``, but also in the master's local file system, if 'Open file outside repository' is selected. This is true even if ``repository`` is configured as a Git repository and cloned on other machines.
|
||||
Notably, clients and dashboards normally do not *send in* experiments to the master. Rather, they make requests from the list of experiments the master already knows about, primarily those in ``repository``, but also in the master's local file system. This is true even if ``repository`` is configured as a Git repository and cloned onto other machines.
|
||||
|
||||
The only exception is the command line client's ``--content`` flag, which allows submission by content, i.e. sending in experiment files which may be otherwise unknown to the master. This feature however has some important limitations; see below in :ref:`submission-details`.
|
||||
|
||||
The ARTIQ master should not be confused with the 'master' device in a DRTIO system, which is only a designation for the particular core device acting as central node in a distributed configuration of ARTIQ. The two concepts are otherwise unrelated.
|
||||
|
||||
@ -32,31 +36,66 @@ The :ref:`dashboard <frontend-artiq-dashboard>` connects to the master and is th
|
||||
|
||||
The dashboard remembers and restores GUI state (window/dock positions, last values entered by the user, etc.) in between instances. This information is stored in a file called ``artiq_dashboard_{server}_{port}.pyon`` in the configuration directory (e.g. generally ``~/.config/artiq`` for Unix, same as data directory for Windows), distinguished in subfolders by ARTIQ version.
|
||||
|
||||
.. note::
|
||||
|
||||
To find where the configuration files are stored on your machine, try the command: ::
|
||||
|
||||
python -c "from artiq.tools import get_user_config_dir; print(get_user_config_dir())"
|
||||
|
||||
Browser
|
||||
^^^^^^^
|
||||
|
||||
The :ref:`browser <frontend-artiq-browser>` is used to read ARTIQ ``results`` HDF5 files and run experiment :meth:`~artiq.language.environment.Experiment.analyze` functions, in particular to retrieve previous result databases, process them, and display them in ARTIQ applets. The browser also remembers and restores its GUI state; this is stored in a file called simply ``artiq_browser``, kept in the same configuration directory as the dashboard.
|
||||
The :ref:`browser <frontend-artiq-browser>` is used to read ARTIQ ``results`` HDF5 files and run experiment :meth:`~artiq.language.environment.Experiment.analyze` functions, in particular to retrieve previous result databases, process them, and display them in ARTIQ applets. The browser also remembers and restores its GUI state; this is stored in a file called simply ``artiq_browser.pyon``, kept in the same configuration directory as the dashboard.
|
||||
|
||||
The browser *can* connect to the master, specifically in order to be able to access the master's store of datasets and to upload new datasets to it, but it does not require such a connection and can also be run completely standalone. However, it requires filesystem access to the ``results`` files to be of much use.
|
||||
|
||||
Controller manager
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The controller manager is provided in the ``artiq-comtools`` package (which is also made available separately from mainline ARTIQ, to allow independent use with minimal dependencies) and started with the :mod:`~artiq_comtools.artiq_ctlmgr` command. It is responsible for running and stopping controllers on a machine. One controller manager must be run by each network node that runs controllers.
|
||||
|
||||
A controller manager connects to the master and accesses the device database through it to determine what controllers need to be run. The local network address of the connection is used to filter for only those controllers allocated to the current node. Hostname resolution is supported. Changes to the device database are tracked and controllers will be stopped and started accordingly.
|
||||
A controller manager connects to the master and accesses the device database through it to determine what controllers need to be run. The local network address of the connection is used to filter for only those controllers allocated to the current node. Hostname resolution is supported. Changes to the device database are tracked upon rescan and controllers will be stopped and started accordingly.
|
||||
|
||||
.. _mgmt-git-integration:
|
||||
|
||||
Git integration
|
||||
---------------
|
||||
|
||||
The master may use a Git repository to store experiment source code. Using Git has many advantages. For example, each result file (HDF5) contains the commit ID corresponding to the exact source code it was produced by, which helps reproducibility. Although the master also supports non-bare repositories, it is recommended to use a bare repository (e.g. ``git init --bare``) to easily support push transactions from clients.
|
||||
The master may use a Git repository to store experiment source code. Using Git rather than the bare filesystem has many advantages. For example, each HDF5 result file contains the commit ID corresponding to the exact source code it was produced by, making results more reproduceable. See also :ref:`master-setting-up-git`. Generally, it is recommended to use a bare repository (i.e. ``git init --bare``), to easily support push transactions from clients, but both bare and non-bare repositories are supported.
|
||||
|
||||
You will want Git to notify the master every time the repository is pushed to (e.g. updated), so that the master knows to rescan the repository for new or changed experiments. This is easiest done with the ``post-receive`` hook, as described in :ref:`master-setting-up-git`.
|
||||
.. tip::
|
||||
If you are not familiar with Git, you may find the idea of the master reading experiment files from a bare repository confusing. A bare repository does not normally contain copies of the objects it stores; that is to say, you won't be able to find your experiment files listed in it. What it *does* contain is Git's internal data structures, i.e., ``hooks``, ``objects``, ``config``, and so forth. Among other things, this structure also stores, in compressed form, the full contents of every commit made to the repository. It is this compressed data which the master has access to and can read the experiments from. It is not meant to be directly edited, but it is updated every time new commits are received.
|
||||
|
||||
.. note::
|
||||
If you plan to run the ARTIQ system entirely on a single machine, you may also consider using a non-bare repository and the ``post-commit`` hook to trigger repository scans every time you commit changes (locally). In this case, note that the ARTIQ master never uses the repository's working directory, but only what is committed. More precisely, when scanning the repository, it fetches the last (atomically) completed commit at that time of repository scan and checks it out in a temporary folder. This commit ID is used by default when subsequently submitting experiments. There is one temporary folder by commit ID currently referenced in the system, so concurrently running experiments from different repository revisions is fully supported by the master.
|
||||
It may be useful to note that a normal Git repository, created with ``git init``, contains all the same internal data, kept in a hidden directory called ``.git`` to protect it from accidental modifications. Unlike a bare repository, it *also* normally contains working copies of all the files tracked by Git. When working with a non-bare repository, it is important to understand that the master still takes its image of the available experiments from the internal data, and *not* from the working copies. This is why, even in a non-bare repository, changes are only reflected once they are committed. The working copies are simply ignored.
|
||||
|
||||
By default, the dashboard runs experiments from the repository, whereas the command-line client (``artiq_client submit``) runs experiments from the raw filesystem (which is useful for iterating rapidly without creating many disorganized commits). In order to run from the raw filesystem when using the dashboard, right-click in the Explorer window and select the option 'Open file outside repository'. In order to run from the repository when using the command-line client, simply pass the ``-R`` flag.
|
||||
Other important files -- the device database, the dataset database, the ``results`` directory, and so on -- are normally kept outside of the experiment repository, and in this case, they are not stored or handled by Git at all. The master accesses them through the regular filesystem, not through Git, and other ARTIQ components access them through the master. This can be seen visualized in the :doc:`overview`.
|
||||
|
||||
With a bare repository, a Git ``post-receive`` hook can be used to trigger a repository scan every time the repository is pushed to (i.e. updated), as described in the tutorial. This removes the need to trigger repository rescans manually. If you plan to run your ARTIQ system from a single PC, without distributed clients, you may also consider using a non-bare repository and the ``post-commit`` hook instead. In this workflow, changes can be drafted directly in the master's repository, but the master continues to submit from the last completed commit until a new commit is made (and the repository is rescanned).
|
||||
|
||||
Behind the scenes, when scanning the repository, the master fetches the last (atomically) completed commit at that time of repository scan and checks it out in a temporary folder. This commit ID is used by default when subsequently submitting experiments. There is one temporary folder by commit ID currently referenced in the system, so concurrently running experiments from different repository revisions is fully supported by the master.
|
||||
|
||||
The use of the Git backend is triggered when the master is started with the ``-g`` flag. Otherwise the raw filesystem is read and Git-based features will not be available.
|
||||
|
||||
.. _submission-details:
|
||||
|
||||
Submission from the raw filesystem
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
By default, the dashboard runs experiments from the repository, that is, the master's temporary checkout folder, whereas the command-line client (``artiq_client submit``) runs experiments from the raw filesystem. This is convenient in order to be able to run working drafts without first committing them.
|
||||
|
||||
Be careful with this behavior, however, as it is rather particular. *The raw filesystem* means the immediate local filesystem of the running master. If the client is being run remotely, and you want to submit an experiment from the *client's* local filesystem, e.g. an uncommitted draft in a clone of the experiment repository, use the ``--content`` flag. If you would like to submit an experiment from the repository, in the same way the dashboard does, use the flag ``--repository`` / ``-R``.
|
||||
|
||||
To be precise:
|
||||
|
||||
- ``artiq_client submit`` should be given a file path that is relative to the location of the master, that is, if the master is run in the directory above its ``repository``, an experiment can be submitted as ``repository/experiment_file.py``. Keep in mind that when working with a bare repository, there may be no copies of experiment files in the raw local filesystem. In this case, files can still be made accessible to the master by network filesystem share or some other method for testing.
|
||||
|
||||
- ``artiq_client submit --repository`` should be given a file path relative to the root of the repository, that is, if the experiment is directly within ``repository``, it should be submitted as ``experiment_file.py``. Just as in the dashboard, this file is taken from the last completed commit.
|
||||
|
||||
- ``artiq_client submit --content`` should be given a file path that is relative to the location of the client, whether that is local or remote to the master; the contents of the file will be submitted directly to the master to be run. This essentially transfers a raw string, and will not work if the experiment imports or otherwise accesses other files.
|
||||
|
||||
Other flags can also be used, such as ``--class-name`` / ``-c`` to select a class name in an experiment which contains several, or ``--revision`` / ``-r`` to use a particular revision. See the reference of :mod:`~artiq.frontend.artiq_client` in :doc:`main_frontend_tools`.
|
||||
|
||||
In order to run from the raw filesystem when using the dashboard, right-click in the Explorer window and select the option 'Open file outside repository'. This will open a file explorer window displaying the master's local filesystem, which can be used to select and submit experiments outside of the chosen repository directory. There is no GUI support for submission by content. It is recommended to simply use the command-line client for this purpose.
|
||||
|
||||
.. _experiment-scheduling:
|
||||
|
||||
@ -66,30 +105,30 @@ Experiment scheduling
|
||||
Basics
|
||||
^^^^^^
|
||||
|
||||
To make more efficient use of hardware resources, experiments are generally split into three phases and pipelined, such that potentially compute-intensive pre-computation or analysis phases may be executed in parallel with the bodies of other experiments, which access hardware.
|
||||
To make more efficient use of resources, experiments are generally split into three phases and pipelined. While one experiment has control of the specialized hardware, others may carry out pre-computation or post-analysis in parallel. There are three stages of a standard experiment users may write code for:
|
||||
|
||||
1. The **preparation** stage, which pre-fetches and pre-computes any data that is necessary to run the experiment. Users may implement this stage by overloading the :meth:`~artiq.language.environment.Experiment.prepare` method. It is not permitted to access hardware in this stage.
|
||||
|
||||
2. The **run** stage, which corresponds to the body of the experiment. Users *must* implement this stage and overload the :meth:`~artiq.language.environment.Experiment.run` method. In this stage, the experiment has the right to run kernels and access hardware.
|
||||
|
||||
3. The **analysis** stage, where raw results collected in the running stage can be post-processed and/or saved. This stage may be implemented by overloading the :meth:`~artiq.language.environment.Experiment.analyze` method. It is not permitted to access hardware in this stage.
|
||||
|
||||
.. seealso::
|
||||
These steps are implemented in :class:`~artiq.language.environment.Experiment`. However, user-written experiments should usually derive from (sub-class) :class:`artiq.language.environment.EnvExperiment`, which additionally provides access to the methods of :class:`artiq.language.environment.HasEnvironment`.
|
||||
These steps are implemented in :class:`artiq.language.environment.Experiment`. User-written experiments should usually derive from (sub-class) :class:`artiq.language.environment.EnvExperiment`, which additionally provides access to the methods of :class:`~artiq.language.environment.HasEnvironment`.
|
||||
|
||||
There are three stages of a standard experiment users may write code in:
|
||||
Only the :meth:`~artiq.language.environment.Experiment.run` method implementation is mandatory; if the experiment does not fit into the pipelined scheduling model, it can leave one or both of the other methods empty (which is the default). Preparation and analysis stages are forbidden from accessing hardware so as not to interfere with a potential concurrent run stage. Note that they are not *prevented* from doing so, and it is up to the programmer to respect these guidelines.
|
||||
|
||||
1. The **preparation** stage, which pre-fetches and pre-computes any data that necessary to run the experiment. Users may implement this stage by overloading the :meth:`~artiq.language.environment.Experiment.prepare` method. It is not permitted to access hardware in this stage, as doing so may conflict with other experiments using the same devices.
|
||||
2. The **run** stage, which corresponds to the body of the experiment and generally accesses hardware. Users must implement this stage and overload the :meth:`~artiq.language.environment.Experiment.run` method.
|
||||
3. The **analysis** stage, where raw results collected in the running stage are post-processed and may lead to updates of the parameter database. This stage may be implemented by overloading the :meth:`~artiq.language.environment.Experiment.analyze` method.
|
||||
|
||||
Only the :meth:`~artiq.language.environment.Experiment.run` method implementation is mandatory; if the experiment does not fit into the pipelined scheduling model, it can leave one or both of the other methods empty (which is the default).
|
||||
|
||||
Consecutive experiments are then executed in a pipelined manner by the ARTIQ master's scheduler: first experiment A runs its preparation stage, than experiment A executes its running stage while experiment B executes its preparation stage, and so on.
|
||||
Consecutive experiments are automatically pipelined by the ARTIQ master's scheduler: first experiment A executes its preparation stage, then experiment A executes its running stage while experiment B executes its preparation stage, and so on.
|
||||
|
||||
.. note::
|
||||
The next experiment (B) may start its :meth:`~artiq.language.environment.Experiment.run` before all events placed into (core device) RTIO buffers by the previous experiment (A) have been executed. These events may then execute while experiment B's :meth:`~artiq.language.environment.Experiment.run` is already in progress. Using :meth:`~artiq.coredevice.core.Core.reset` in experiment B will clear the RTIO buffers, discarding pending events, including those left over from A.
|
||||
An experiment A can exit its :meth:`~artiq.language.environment.Experiment.run` method before all its RTIO events have been executed, i.e., while those events are still 'waiting' in the RTIO core buffers. If the next experiment entering the running stage uses :meth:`~artiq.coredevice.core.Core.reset`, those buffers will be cleared, and any remaining events discarded, potentially including those scheduled by A.
|
||||
|
||||
Interactions between events of different experiments can be avoided by preventing the :meth:`~artiq.language.environment.Experiment.run` method of experiment A from returning until all events have been executed. This is discussed in the section on RTIO :ref:`rtio-handover-synchronization`.
|
||||
This is a deliberate feature of seamless handover, but can cause problems if the events scheduled by A were important and should not have been skipped. In those cases, it is recommended to ensure the :meth:`~artiq.language.environment.Experiment.run` method of experiment A does not return until *all* its scheduled events have been executed, or that it is followed only by experiments which do not perform a core reset. See also :ref:`RTIO Synchronization<rtio-handover-synchronization>`.
|
||||
|
||||
Priorities and timed runs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When determining what experiment should begin executing next (i.e. enter the preparation stage), the scheduling looks at the following factors, by decreasing order of precedence:
|
||||
When determining what experiment should begin executing next (i.e. enter its preparation stage), the scheduling looks at the following factors, by decreasing order of precedence:
|
||||
|
||||
1. Experiments may be scheduled with a due date. This is considered the *earliest possible* time of their execution (rather than a deadline, or latest possible -- ARTIQ makes no guarantees about experiments being started or completed before any specified time). If a due date is set and it has not yet been reached, the experiment is not eligible for preparation.
|
||||
2. The integer priority value specified by the user.
|
||||
@ -106,11 +145,41 @@ When using multiple pipelines it is the responsibility of the user to ensure tha
|
||||
Pauses
|
||||
^^^^^^
|
||||
|
||||
In the run stage, an experiment may yield to the scheduler by calling the :meth:`pause` method of the scheduler.
|
||||
If there are other experiments with higher priority (e.g. a high-priority experiment has been newly submitted, or reached its due date and become eligible for execution), the higher-priority experiments are executed first, and then :meth:`pause` returns. If there are no such experiments, :meth:`pause` returns immediately. To check whether :meth:`pause` would in fact *not* return immediately, use :meth:`artiq.master.scheduler.Scheduler.check_pause`.
|
||||
In the run stage, an experiment may yield to the scheduler by calling the :meth:`pause` method of the scheduler. If there are other experiments with higher priority (e.g. a high-priority experiment has been newly submitted, or reached its due date and become eligible for execution), the higher-priority experiments are executed first, and then :meth:`pause` returns. If there are no such experiments, :meth:`pause` returns immediately. To check whether :meth:`pause` would in fact *not* return immediately, use :meth:`~artiq.master.scheduler.Scheduler.check_pause`.
|
||||
|
||||
The experiment must place the hardware in a safe state and disconnect from the core device (typically, by calling ``self.core.comm.close()`` from the kernel, which is equivalent to :meth:`artiq.coredevice.core.Core.close`) before calling :meth:`pause`.
|
||||
The experiment must place the hardware in a safe state and disconnect from the core device before calling :meth:`pause` - typically by calling ``self.core.comm.close()``, which is equivalent to :meth:`~artiq.coredevice.core.Core.close`, from the host after completion of the kernel.
|
||||
|
||||
Accessing the :meth:`pause` and :meth:`~artiq.master.scheduler.Scheduler.check_pause` methods is done through a virtual device called ``scheduler`` that is accessible to all experiments. The scheduler virtual device is requested like regular devices using :meth:`~artiq.language.environment.HasEnvironment.get_device` (``self.get_device()``) or :meth:`~artiq.language.environment.HasEnvironment.setattr_device` (``self.setattr_device()``).
|
||||
Accessing the :meth:`pause` and :meth:`~artiq.master.scheduler.Scheduler.check_pause` methods is done through a virtual device called ``scheduler`` that is accessible to all experiments. The scheduler virtual device is requested like any other device, with :meth:`~artiq.language.environment.HasEnvironment.get_device` or :meth:`~artiq.language.environment.HasEnvironment.setattr_device`. See also the detailed reference on the :doc:`mgmt_system_reference` page.
|
||||
|
||||
:meth:`~artiq.master.scheduler.Scheduler.check_pause` can be called (via RPC) from a kernel, but :meth:`pause` must not be.
|
||||
.. note::
|
||||
For maximum compatibility, the ``scheduler`` virtual device can also be accessed when running experiments with :mod:`~artiq.frontend.artiq_run`. However, since there is no :mod:`~artiq.master.scheduler.Scheduler` backend, the methods are replaced by simple dummies, e.g. :meth:`~artiq.master.scheduler.Scheduler.check_pause` simply returns false, and requests are printed into the console. Much the same is true of client control broadcasts (see again :doc:`mgmt_system_reference`).
|
||||
|
||||
:meth:`~artiq.master.scheduler.Scheduler.check_pause` can be called (via RPC) from a kernel, but :meth:`pause` cannot be.
|
||||
|
||||
Scheduler attributes
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``scheduler`` virtual device also exposes information about an experiment's scheduling status through the attributes ``rid``, ``pipeline_name``, ``priority``, and ``expid``. This allows e.g. access to an experiment's current RID as ``self.scheduler.rid``.
|
||||
|
||||
Internal details
|
||||
----------------
|
||||
|
||||
Internally, the ARTIQ management system uses Simple Python Communications, or `SiPyCo <https://github.com/m-labs/sipyco>`_, which was originally written as part of ARTIQ and later split away as a generic communications library. The SiPyCo manual is hosted `here <https://m-labs.hk/artiq/sipyco-manual/>`_. The core of the management system is largely contained within ``artiq.master``, which contains the :class:`~artiq.master.scheduler.Scheduler`, the various environment and filesystem databases, and the worker processes that execute the experiments themselves.
|
||||
|
||||
By default, the master communicates with other processes over four network ports, see :doc:`default_network_ports`, for logging, broadcasts, notifications, and control. All four of these can be customized by using the ``--port`` flags, see :ref:`the front-end reference<frontend-artiq-master>`.
|
||||
|
||||
- The logging port is occupied by a :class:`sipyco.logging_tools.Server`, and used only by the worker processes to transmit exceptions and other information to the master.
|
||||
- The broadcast port is occupied by a :class:`sipyco.broadcast.Broadcaster`, which inherits from :class:`sipyco.pc_rpc.AsyncioServer`. Both the dashboard and the client automatically connect to this port, using :class:`sipyco.broadcast.Receiver` to receive logs and CCB messages.
|
||||
- The notification port is occupied by a :class:`sipyco.sync_struct.Publisher`. The dashboard and client automatically connect to this port, using :class:`sipyco.sync_struct.Subscriber`. Several objects are given to the :class:`~sipyco.sync_struct.Publisher` to monitor, among them the experiment schedule, the device database, the dataset database, and the experiment list. It notifies the subscribers whenever these objects are modified.
|
||||
- The control port is occupied by a :class:`sipyco.pc_rpc.Server`, which when running can be queried with :mod:`sipyco.sipyco_rpctool` like any other source of RPC targets. Multiple concurrent calls to target methods are supported. Through this server, the clients are provided with access to control methods to access the various databases and repositories the master handles, through classes like :class:`artiq.master.databases.DeviceDB`, :class:`artiq.master.databases.DatasetDB`, and :class:`artiq.master.experiments.ExperimentDB`.
|
||||
|
||||
The experiment database is supported by :class:`artiq.master.experiments.GitBackend` when Git integration is active, and :class:`artiq.master.experiments.FilesystemBackend` if not.
|
||||
|
||||
Experiment workers
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The :mod:`~artiq.frontend.artiq_run` tool makes use of many of the same databases and handlers as the master (whereas the scheduler and CCB manager are replaced by dummies, as mentioned above), but also directly runs the build, run, and analyze stages of the experiments. On the other hand, within the management system, the master's :class:`~artiq.master.scheduler.Scheduler` spawns a new worker process for each experiment. This allows for the parallelization of stages and pipelines described above in :ref:`experiment-scheduling`.
|
||||
|
||||
The master and the worker processes communicate through IPC, Inter Process Communcation, implemented with :mod:`sipyco.pipe_ipc`. Specifically, it is :mod:`artiq.master.worker_impl` which is spawned as a new process for each experiment, and the class :class:`artiq.master.worker.Worker` which manages the IPC requests of the workers, including access to :class:`~artiq.master.scheduler.Scheduler` but also to devices, datasets, arguments, and CCBs. This allows the worker to support experiment :meth:`~artiq.language.environment.HasEnvironment.build` methods and the :doc:`management system interfaces <mgmt_system_reference>`.
|
||||
|
||||
The worker process also executes the experiment code itself. Within the experiment, kernel decorators -- :class:`~artiq.language.core.kernel`, :class:`~artiq.language.core.subkernel`, etc. -- call the ARTIQ compiler as necessary and trigger core device execution.
|
@ -41,13 +41,15 @@ You can open the result file for this experiment with HDFView, h5dump, or any si
|
||||
.. tip::
|
||||
If you are not familiar with Git, try running ``git log`` in either of your connected Git repositories to see a history of commits in the repository which includes their respective hashes. As long as this history remains intact, you can use a hash of this kind of to uniquely identify, and even retrieve, the state of the files in the repository at the time this experiment was run. In other words, when running experiments from a Git repository, it's always possible to retrieve the code that led to a particular set of results.
|
||||
|
||||
A last interesting feature of the result files is that, for experiments with arguments, they also store the values of the arguments used for that iteration of the experiment. Again, this is for reproducibility: if it's ever necessary to find what arguments produced certain results, that information is preserved in the HDF5 file. To repeat an experiment with the exact same arguments as in a previous run, the 'Load HDF5' button in the submission window can be used to take them directly from a result file.
|
||||
|
||||
Applets
|
||||
^^^^^^^
|
||||
|
||||
Most of the time, rather than the HDF dump, we would like to see our result datasets in a readable graphical form, preferably without opening any third-party applications. In the ARTIQ dashboard, this is achieved by programs called "applets". Applets provide simple, modular GUI features; are run independently from the dashboard as separate processes to achieve goals of modularity and resilience. ARTIQ supplies several applets for basic plotting in the :mod:`artiq.applets` module, and provides interfaces so users can write their own.
|
||||
Most of the time, rather than the HDF dump, we would like to see our result datasets in a readable graphical form, preferably without opening any third-party applications. In the ARTIQ dashboard, this is achieved by programs called "applets". Applets provide simple, modular GUI features, and are run independently from the dashboard as separate processes for modularity and resilience. ARTIQ supplies several applets for basic plotting in the :mod:`artiq.applets` module, and provides interfaces so users can write their own.
|
||||
|
||||
.. seealso::
|
||||
When developing your own applets, see also the references provided on the :ref:`Management system reference<applet-references>` page of this manual.
|
||||
Resources for writing your own applets are detailed on the :ref:`Management system reference<applet-references>` page.
|
||||
|
||||
For our ``parabola`` dataset, we will create an XY plot using the provided :mod:`artiq.applets.plot_xy`. Applets are configured with simple command line options. To figure out what configurations are accepted, use the ``-h`` flag, as in: ::
|
||||
|
||||
@ -57,7 +59,7 @@ In our case, we only need to supply our dataset to the applet to be plotted. Nav
|
||||
|
||||
${artiq_applet}plot_xy parabola
|
||||
|
||||
Run the experiment again, and observe how the points are added as they are generated to the plot in the applet window.
|
||||
Run the experiment again, and observe how the points are added to the plot in the applet window as they are generated.
|
||||
|
||||
.. tip::
|
||||
Datasets and applets can both be arranged in groups for organizational purposes. (In fact, so can arguments; see the reference of :meth:`~artiq.language.environment.HasEnvironment.setattr_argument`). For datasets, use a dot (``.``) in names to separate folders. For applets, left-click in the applet list to see the option 'Create Group'. You can drag and drop to move applets in and out of groups, or select a particular group with a click to create new applets in that group. Deselect applets or groups with CTRL+click.
|
||||
@ -68,7 +70,7 @@ Run the experiment again, and observe how the points are added as they are gener
|
||||
The ARTIQ browser
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
ARTIQ also possesses a second GUI, specifically targeted for the manipulation and analysis of datasets, called the ARTIQ browser. It is independent, and does not require either a running master or a core device to operate; a connection to the master is only necessary if you want to upload edited datasets back to the main management system. Open ``results`` in the browser by running: ::
|
||||
ARTIQ also possesses a second GUI, specifically targeted for the manipulation and analysis of datasets, called the ARTIQ browser. It is standalone, and does not require either a running master or a core device to operate; a connection to the master is only necessary if you want to upload edited datasets back to the main management system. Open ``results`` in the browser by running: ::
|
||||
|
||||
$ cd ~/artiq-master
|
||||
$ artiq_browser ./results
|
||||
@ -79,7 +81,7 @@ To open an experiment, click on 'Experiment' at the top left. Observe that inste
|
||||
|
||||
As described later in :ref:`experiment-scheduling`, only :meth:`~artiq.language.environment.Experiment.run` is obligatory for experiments to implement, and only :meth:`~artiq.language.environment.Experiment.run` is permitted to access hardware; the preparation and analysis stages occur before and after, and are limited to the host machine. The browser allows for re-running the post-experiment :meth:`~artiq.language.environment.Experiment.analyze`, potentially with different arguments or an edited algorithm, while accessing the datasets from opened ``results`` files.
|
||||
|
||||
Notably, the browser does not merely act as an HD5 viewer, but also allows the use of ARTIQ applets to plot and view the data. For this, see the lower left dock; applets can be opened, closed, and managed just as they are in the dashboard, once again accessing datasets from ``results``.
|
||||
Notably, the browser does not merely act as an HDF5 viewer, but also allows the use of ARTIQ applets to plot and view the data. For this, see the lower left dock; applets can be opened, closed, and managed just as they are in the dashboard, once again accessing datasets from ``results``.
|
||||
|
||||
.. _mgmt-ctlmgr:
|
||||
|
||||
|
@ -132,6 +132,8 @@ If a subkernel is called on a satellite where a kernel is already running, the n
|
||||
|
||||
If a subkernel is complex and its binary relatively large, the delay between the call and actually running the subkernel may be substantial. If it's necessary to minimize this delay, ``subkernel_preload(function)`` should be used before the call.
|
||||
|
||||
Subkernels receive the value of the timeline cursor ``now_mu`` from the caller at the moment of the call. As there is a delay between calling the subkernel and its actual start, there will be a difference in ``now_mu`` that can be compensated with a delay in the subkernel. Additionally, preloading the subkernel would decrease the difference, as the subkernel does not have to be loaded before running.
|
||||
|
||||
While a subkernel is running, the satellite is disconnected from the RTIO interface of the master. As a result, regardless of what devices the subkernel itself uses, none of the RTIO devices on that satellite will be available to the master, nor will messages be passed on to any further satellites downstream. This applies both to regular RTIO operations and DDMA. While a subkernel is running, a satellite may use its own local DMA, but an attempt by any other device to run DDMA through the satellite will fail. Control is returned to the master when no subkernel is running -- to be sure that a device will be accessible, await before performing any RTIO operations on the affected satellite.
|
||||
|
||||
.. note::
|
||||
|
164
flake.nix
164
flake.nix
@ -1,19 +1,45 @@
|
||||
{
|
||||
description = "A leading-edge control system for quantum information experiments";
|
||||
|
||||
inputs.nac3 = { type = "git"; url = "https://git.m-labs.hk/m-labs/nac3.git"; };
|
||||
inputs.rust-overlay = {
|
||||
inputs = {
|
||||
nac3 = {
|
||||
url = "https://git.m-labs.hk/m-labs/nac3.git";
|
||||
type = "git";
|
||||
};
|
||||
|
||||
rust-overlay = {
|
||||
url = "github:oxalica/rust-overlay?ref=snapshot/2024-08-01";
|
||||
inputs.nixpkgs.follows = "nac3/nixpkgs";
|
||||
};
|
||||
inputs.sipyco.url = github:m-labs/sipyco;
|
||||
inputs.sipyco.inputs.nixpkgs.follows = "nac3/nixpkgs";
|
||||
inputs.artiq-comtools.url = github:m-labs/artiq-comtools;
|
||||
inputs.artiq-comtools.inputs.nixpkgs.follows = "nac3/nixpkgs";
|
||||
inputs.artiq-comtools.inputs.sipyco.follows = "sipyco";
|
||||
};
|
||||
|
||||
inputs.src-migen = { url = github:m-labs/migen; flake = false; };
|
||||
inputs.src-misoc = { type = "git"; url = "https://github.com/m-labs/misoc.git"; submodules = true; flake = false; };
|
||||
artiq-comtools = {
|
||||
url = "github:m-labs/artiq-comtools";
|
||||
inputs.nixpkgs.follows = "nac3/nixpkgs";
|
||||
inputs.sipyco.follows = "sipyco";
|
||||
};
|
||||
|
||||
sipyco = {
|
||||
url = "github:m-labs/sipyco";
|
||||
inputs.nixpkgs.follows = "nac3/nixpkgs";
|
||||
};
|
||||
|
||||
src-migen = {
|
||||
url = "github:m-labs/migen";
|
||||
flake = false;
|
||||
};
|
||||
|
||||
src-misoc = {
|
||||
url = "https://github.com/m-labs/misoc.git";
|
||||
type = "git";
|
||||
submodules = true;
|
||||
flake = false;
|
||||
};
|
||||
|
||||
src-pythonparser = {
|
||||
url = "github:m-labs/pythonparser";
|
||||
flake = false;
|
||||
};
|
||||
};
|
||||
|
||||
outputs = { self, rust-overlay, sipyco, nac3, artiq-comtools, src-migen, src-misoc }:
|
||||
let
|
||||
@ -26,6 +52,14 @@
|
||||
artiqVersion = (builtins.toString artiqVersionMajor) + "." + (builtins.toString artiqVersionMinor) + "+" + artiqVersionId + ".beta";
|
||||
artiqRev = self.sourceInfo.rev or "unknown";
|
||||
|
||||
qtPaths = let
|
||||
inherit (pkgs.qt6) qtbase qtsvg;
|
||||
inherit (qtbase.dev) qtPluginPrefix qtQmlPrefix;
|
||||
in {
|
||||
QT_PLUGIN_PATH = "${qtbase}/${qtPluginPrefix}:${qtsvg}/${qtPluginPrefix}";
|
||||
QML2_IMPORT_PATH = "${qtbase}/${qtQmlPrefix}";
|
||||
};
|
||||
|
||||
rust = pkgs.rust-bin.nightly."2021-09-01".default.override {
|
||||
extensions = [ "rust-src" ];
|
||||
targets = [ ];
|
||||
@ -139,12 +173,12 @@
|
||||
|
||||
asyncserial = pkgs.python3Packages.buildPythonPackage rec {
|
||||
pname = "asyncserial";
|
||||
version = "0.1";
|
||||
version = "1.0";
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "m-labs";
|
||||
repo = "asyncserial";
|
||||
rev = "d95bc1d6c791b0e9785935d2f62f628eb5cdf98d";
|
||||
sha256 = "0yzkka9jk3612v8gx748x6ziwykq5lr7zmr9wzkcls0v2yilqx9k";
|
||||
rev = version;
|
||||
sha256 = "sha256-ZHzgJnbsDVxVcp09LXq9JZp46+dorgdP8bAiTB59K28=";
|
||||
};
|
||||
propagatedBuildInputs = [ pkgs.python3Packages.pyserial ];
|
||||
};
|
||||
@ -343,51 +377,65 @@
|
||||
|
||||
packages.x86_64-w64-mingw32 = import ./windows { inherit sipyco nac3 artiq-comtools; artiq = self; };
|
||||
|
||||
inherit makeArtiqBoardPackage openocd-bscanspi-f;
|
||||
inherit qtPaths makeArtiqBoardPackage openocd-bscanspi-f;
|
||||
|
||||
defaultPackage.x86_64-linux = packages.x86_64-linux.python3-mimalloc.withPackages(ps: [ packages.x86_64-linux.artiq ]);
|
||||
defaultPackage.x86_64-linux = packages.x86_64-linux.python3-mimalloc.withPackages(_: [ packages.x86_64-linux.artiq ]);
|
||||
|
||||
# Main development shell with everything you need to develop ARTIQ on Linux.
|
||||
# The current copy of the ARTIQ sources is added to PYTHONPATH so changes can be tested instantly.
|
||||
# Additionally, executable wrappers that import the current ARTIQ sources for the ARTIQ frontends
|
||||
# are added to PATH.
|
||||
devShells.x86_64-linux.default = pkgs.mkShell {
|
||||
name = "artiq-dev-shell";
|
||||
buildInputs = [
|
||||
(packages.x86_64-linux.python3-mimalloc.withPackages(ps: with packages.x86_64-linux; [ migen misoc ps.paramiko microscope ps.packaging ] ++ artiq.propagatedBuildInputs))
|
||||
rust
|
||||
pkgs.llvmPackages_14.clang-unwrapped
|
||||
pkgs.llvm_14
|
||||
pkgs.lld_14
|
||||
pkgs.git
|
||||
artiq-frontend-dev-wrappers
|
||||
# use the vivado-env command to enter a FHS shell that lets you run the Vivado installer
|
||||
packages.x86_64-linux.vivadoEnv
|
||||
packages.x86_64-linux.vivado
|
||||
packages.x86_64-linux.openocd-bscanspi
|
||||
pkgs.python3Packages.sphinx pkgs.python3Packages.sphinx_rtd_theme pkgs.pdf2svg
|
||||
pkgs.python3Packages.sphinx-argparse pkgs.python3Packages.sphinxcontrib-wavedrom latex-artiq-manual
|
||||
pkgs.python3Packages.sphinxcontrib-tikz
|
||||
];
|
||||
shellHook = ''
|
||||
export QT_PLUGIN_PATH=${pkgs.qt6.qtbase}/${pkgs.qt6.qtbase.dev.qtPluginPrefix}:${pkgs.qt6.qtsvg}/${pkgs.qt6.qtbase.dev.qtPluginPrefix}
|
||||
export QML2_IMPORT_PATH=${pkgs.qt6.qtbase}/${pkgs.qt6.qtbase.dev.qtQmlPrefix}
|
||||
export PYTHONPATH=`git rev-parse --show-toplevel`:$PYTHONPATH
|
||||
'';
|
||||
};
|
||||
devShells.x86_64-linux = {
|
||||
# Main development shell with everything you need to develop ARTIQ on Linux.
|
||||
# The current copy of the ARTIQ sources is added to PYTHONPATH so changes can be tested instantly.
|
||||
# Additionally, executable wrappers that import the current ARTIQ sources for the ARTIQ frontends
|
||||
# are added to PATH.
|
||||
default = pkgs.mkShell {
|
||||
name = "artiq-dev-shell";
|
||||
packages = with pkgs; [
|
||||
git
|
||||
lit
|
||||
lld_14
|
||||
llvm_14
|
||||
llvmPackages_14.clang-unwrapped
|
||||
pdf2svg
|
||||
|
||||
# Lighter development shell optimized for building firmware and flashing boards.
|
||||
devShells.x86_64-linux.boards = pkgs.mkShell {
|
||||
name = "artiq-boards-shell";
|
||||
buildInputs = [
|
||||
(pkgs.python3.withPackages(ps: with packages.x86_64-linux; [ migen misoc artiq ps.packaging ]))
|
||||
rust
|
||||
pkgs.llvmPackages_14.clang-unwrapped
|
||||
pkgs.llvm_14
|
||||
pkgs.lld_14
|
||||
packages.x86_64-linux.vivado
|
||||
packages.x86_64-linux.openocd-bscanspi
|
||||
];
|
||||
python3Packages.sphinx
|
||||
python3Packages.sphinx-argparse
|
||||
python3Packages.sphinxcontrib-tikz
|
||||
python3Packages.sphinxcontrib-wavedrom
|
||||
python3Packages.sphinx_rtd_theme
|
||||
|
||||
(packages.x86_64-linux.python3-mimalloc.withPackages(ps: [ migen misoc microscope ps.packaging ] ++ artiq.propagatedBuildInputs ))
|
||||
] ++
|
||||
[
|
||||
latex-artiq-manual
|
||||
rust
|
||||
artiq-frontend-dev-wrappers
|
||||
|
||||
# use the vivado-env command to enter a FHS shell that lets you run the Vivado installer
|
||||
packages.x86_64-linux.vivadoEnv
|
||||
packages.x86_64-linux.vivado
|
||||
packages.x86_64-linux.openocd-bscanspi
|
||||
];
|
||||
shellHook = ''
|
||||
export QT_PLUGIN_PATH=${qtPaths.QT_PLUGIN_PATH}
|
||||
export QML2_IMPORT_PATH=${qtPaths.QML2_IMPORT_PATH}
|
||||
export PYTHONPATH=`git rev-parse --show-toplevel`:$PYTHONPATH
|
||||
'';
|
||||
};
|
||||
# Lighter development shell optimized for building firmware and flashing boards.
|
||||
boards = pkgs.mkShell {
|
||||
name = "artiq-boards-shell";
|
||||
packages = [
|
||||
rust
|
||||
|
||||
pkgs.llvmPackages_14.clang-unwrapped
|
||||
pkgs.llvm_14
|
||||
pkgs.lld_14
|
||||
|
||||
packages.x86_64-linux.vivado
|
||||
packages.x86_64-linux.openocd-bscanspi
|
||||
|
||||
(pkgs.python3.withPackages(ps: [ migen misoc artiq ps.packaging ]))
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
packages.aarch64-linux = {
|
||||
@ -421,7 +469,11 @@
|
||||
#__impure = true; # Nix 2.8+
|
||||
|
||||
buildInputs = [
|
||||
(pkgs.python3.withPackages(ps: with packages.x86_64-linux; [ artiq ps.paramiko ]))
|
||||
(pkgs.python3.withPackages(ps: with packages.x86_64-linux; [
|
||||
artiq
|
||||
ps.paramiko
|
||||
] ++ ps.paramiko.optional-dependencies.ed25519
|
||||
))
|
||||
pkgs.llvm_14
|
||||
pkgs.openssh
|
||||
packages.x86_64-linux.openocd-bscanspi # for the bscanspi bitstreams
|
||||
|
Loading…
Reference in New Issue
Block a user